All ABIs, except alpha and sparc, define it to
atomic_full_barrier/__sync_synchronize, which can be mapped to
__atomic_thread_fence (__ATOMIC_RELEASE).
For alpha, it uses a 'wmb' which does not map to any of C11
barriers.
For sparc it uses a stronger 'member #LoadStore | #StoreStore',
where the release barrier maps to just 'membar #StoreLoad'. The
patch keeps the sparc definition.
For PowerPC, it allows the use of lwsync for additional chips
(since _ARCH_PWR4 does not cover all chips that support it).
Tested on aarch64-linux-gnu.
Co-authored-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
#endif
-#ifndef atomic_write_barrier
-# define atomic_write_barrier() atomic_full_barrier ()
-#endif
-
-
#ifndef atomic_forced_read
# define atomic_forced_read(x) \
({ __typeof (x) __x; __asm ("" : "=r" (__x) : "0" (x)); __x; })
# define atomic_read_barrier() atomic_thread_fence_acquire ()
#endif
+#ifndef atomic_write_barrier
+# define atomic_write_barrier() atomic_thread_fence_release ()
+#endif
+
/* ATOMIC_EXCHANGE_USES_CAS is non-zero if atomic_exchange operations
are implemented based on a CAS loop; otherwise, this is zero and we assume
#include <atomic.h>
-#ifndef atomic_write_barrier
-# define atomic_write_barrier() atomic_full_barrier ()
-#endif
-
#ifndef DEFAULT_TOP_PAD
# define DEFAULT_TOP_PAD 131072
#endif
# define MUTEX_HINT_REL
#endif
-#ifdef _ARCH_PWR4
-/*
- * "light weight" sync can also be used for the release barrier.
- */
-# define atomic_write_barrier() __asm ("lwsync" ::: "memory")
-#else
-# define atomic_write_barrier() __asm ("sync" ::: "memory")
-#endif
-
#endif
#define ATOMIC_EXCHANGE_USES_CAS 0
-#define atomic_write_barrier() __asm ("" ::: "memory")
-
#define atomic_spin_nop() __asm ("pause")
#endif /* atomic-machine.h */