]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
x86: Define atomic_full_barrier using __sync_synchronize
authorUros Bizjak <ubizjak@gmail.com>
Mon, 8 Sep 2025 12:38:21 +0000 (14:38 +0200)
committerH.J. Lu <hjl.tools@gmail.com>
Tue, 9 Sep 2025 14:44:41 +0000 (07:44 -0700)
For x86_64 targets, __sync_synchronize emits a full 64-bit
'LOCK ORQ $0x0,(%rsp)' instead of 'LOCK ORL $0x0,(%rsp)'.

No functional changes.

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Florian Weimer <fweimer@redhat.com>
Cc: Adhemerval Zanella Netto <adhemerval.zanella@linaro.org>
Cc: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
Cc: Collin Funk <collin.funk1@gmail.com>
Cc: H.J.Lu <hjl.tools@gmail.com>
Cc: Carlos O'Donell <carlos@redhat.com>
Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
sysdeps/x86/atomic-machine.h

index d5b2d49031fc5f3bc6da0547e7c9a721bd9657da..c0c2c3437a6815d9d2cf8f1e8cbd997103c561ec 100644 (file)
 
 #ifdef __x86_64__
 # define __HAVE_64B_ATOMICS            1
-# define SP_REG                                "rsp"
 #else
 /* Since the Pentium, i386 CPUs have supported 64-bit atomics, but the
    i386 psABI supplement provides only 4-byte alignment for uint64_t
    inside structs, so it is currently not possible to use 64-bit
    atomics on this platform.  */
 # define __HAVE_64B_ATOMICS            0
-# define SP_REG                                "esp"
 #endif
+
 #define ATOMIC_EXCHANGE_USES_CAS       0
 
 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
 #define catomic_exchange_and_add(mem, value) \
   __atomic_fetch_add (mem, value, __ATOMIC_ACQUIRE)
 
-/* We don't use mfence because it is supposedly slower due to having to
-   provide stronger guarantees (e.g., regarding self-modifying code).  */
-#define atomic_full_barrier() \
-    __asm __volatile (LOCK_PREFIX "orl $0, (%%" SP_REG ")" ::: "memory")
+#define atomic_full_barrier() __sync_synchronize ()
 #define atomic_read_barrier() __asm ("" ::: "memory")
 #define atomic_write_barrier() __asm ("" ::: "memory")