From e6b5ad1b1d9f8dcb80b711747f3abffec29408e3 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 8 Sep 2025 14:38:21 +0200 Subject: [PATCH] x86: Define atomic_full_barrier using __sync_synchronize For x86_64 targets, __sync_synchronize emits a full 64-bit 'LOCK ORQ $0x0,(%rsp)' instead of 'LOCK ORL $0x0,(%rsp)'. No functional changes. Signed-off-by: Uros Bizjak Cc: Florian Weimer Cc: Adhemerval Zanella Netto Cc: Wilco Dijkstra Cc: Collin Funk Cc: H.J.Lu Cc: Carlos O'Donell Reviewed-by: Adhemerval Zanella --- sysdeps/x86/atomic-machine.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/sysdeps/x86/atomic-machine.h b/sysdeps/x86/atomic-machine.h index d5b2d49031..c0c2c3437a 100644 --- a/sysdeps/x86/atomic-machine.h +++ b/sysdeps/x86/atomic-machine.h @@ -26,15 +26,14 @@ #ifdef __x86_64__ # define __HAVE_64B_ATOMICS 1 -# define SP_REG "rsp" #else /* Since the Pentium, i386 CPUs have supported 64-bit atomics, but the i386 psABI supplement provides only 4-byte alignment for uint64_t inside structs, so it is currently not possible to use 64-bit atomics on this platform. */ # define __HAVE_64B_ATOMICS 0 -# define SP_REG "esp" #endif + #define ATOMIC_EXCHANGE_USES_CAS 0 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ @@ -74,10 +73,7 @@ #define catomic_exchange_and_add(mem, value) \ __atomic_fetch_add (mem, value, __ATOMIC_ACQUIRE) -/* We don't use mfence because it is supposedly slower due to having to - provide stronger guarantees (e.g., regarding self-modifying code). */ -#define atomic_full_barrier() \ - __asm __volatile (LOCK_PREFIX "orl $0, (%%" SP_REG ")" ::: "memory") +#define atomic_full_barrier() __sync_synchronize () #define atomic_read_barrier() __asm ("" ::: "memory") #define atomic_write_barrier() __asm ("" ::: "memory") -- 2.47.3