All ABIs, except alpha, powerpc, and x86_64, define it to
atomic_full_barrier/__sync_synchronize, which can be mapped to
__atomic_thread_fence (__ATOMIC_SEQ_CST) in most cases, with the
exception of aarch64 (where the acquire fence is generated as
'dmb ishld' instead of 'dmb ish').
For s390x, it defaults to a memory barrier where __sync_synchronize
emits a 'bcr 15,0' (which the manual describes as pipeline
synchronization).
For PowerPC, it allows the use of lwsync for additional chips
(since _ARCH_PWR4 does not cover all chips that support it).
Tested on aarch64-linux-gnu, where the acquire produces a different
instruction that the current code.
Co-authored-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
Reviewed-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
#ifndef atomic_read_barrier
-# define atomic_read_barrier() atomic_full_barrier ()
+# define atomic_read_barrier() __atomic_thread_fence (__ATOMIC_ACQUIRE);
#endif
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
-#define atomic_read_barrier() __asm ("mb" : : : "memory");
#define atomic_write_barrier() __asm ("wmb" : : : "memory");
#include <atomic.h>
-#ifndef atomic_read_barrier
-# define atomic_read_barrier() atomic_full_barrier ()
-#endif
-
#ifndef atomic_write_barrier
# define atomic_write_barrier() atomic_full_barrier ()
#endif
#endif
#ifdef _ARCH_PWR4
-/*
- * Newer powerpc64 processors support the new "light weight" sync (lwsync)
- * So if the build is using -mcpu=[power4,power5,power5+,970] we can
- * safely use lwsync.
- */
-# define atomic_read_barrier() __asm ("lwsync" ::: "memory")
/*
* "light weight" sync can also be used for the release barrier.
*/
# define atomic_write_barrier() __asm ("lwsync" ::: "memory")
#else
-/*
- * Older powerpc32 processors don't support the new "light weight"
- * sync (lwsync). So the only safe option is to use normal sync
- * for all powerpc32 applications.
- */
-# define atomic_read_barrier() __asm ("sync" ::: "memory")
# define atomic_write_barrier() __asm ("sync" ::: "memory")
#endif
#define ATOMIC_EXCHANGE_USES_CAS 0
-#define atomic_read_barrier() __asm ("" ::: "memory")
#define atomic_write_barrier() __asm ("" ::: "memory")
#define atomic_spin_nop() __asm ("pause")