From: Adhemerval Zanella Date: Thu, 11 Sep 2025 13:49:44 +0000 (-0300) Subject: atomic: Consolidate atomic_full_barrier implementation X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=70ee250fb8b1ea870d5d7e2e7fdf4ea7850efa11;p=thirdparty%2Fglibc.git atomic: Consolidate atomic_full_barrier implementation All ABIs save for sparcv9 and s390 defines it to __sync_synchronize, which can be mapped to __atomic_thread_fence (__ATOMIC_SEQ_CST). For Sparc, it uses a stricter #StoreStore|#LoadStore|#StoreLoad|#LoadLoad instead of the #StoreLoad generated by __sync_synchronize. For s390x, it defaults to a memory barrier where __sync_synchronize emits a 'bcr 15,0' (which the manual describes as pipeline synchronization). The barrier is used only in one place (pthread_mutex_setprioceiling), and using a stricter barrier for s390 is ok performance-wise. Co-authored-by: Wilco Dijkstra Reviewed-by: Wilco Dijkstra --- diff --git a/include/atomic.h b/include/atomic.h index 12c439632c..227c4cdf27 100644 --- a/include/atomic.h +++ b/include/atomic.h @@ -103,7 +103,7 @@ #ifndef atomic_full_barrier -# define atomic_full_barrier() __asm ("" ::: "memory") +# define atomic_full_barrier() __atomic_thread_fence (__ATOMIC_SEQ_CST) #endif diff --git a/sysdeps/aarch64/atomic-machine.h b/sysdeps/aarch64/atomic-machine.h index d210c62bff..f00c4607f3 100644 --- a/sysdeps/aarch64/atomic-machine.h +++ b/sysdeps/aarch64/atomic-machine.h @@ -22,7 +22,4 @@ #define __HAVE_64B_ATOMICS 1 #define ATOMIC_EXCHANGE_USES_CAS 0 -/* Barrier macro. */ -#define atomic_full_barrier() __sync_synchronize() - #endif diff --git a/sysdeps/alpha/atomic-machine.h b/sysdeps/alpha/atomic-machine.h index e8ed69dff9..a1d74a930e 100644 --- a/sysdeps/alpha/atomic-machine.h +++ b/sysdeps/alpha/atomic-machine.h @@ -22,6 +22,5 @@ /* XXX Is this actually correct? */ #define ATOMIC_EXCHANGE_USES_CAS 1 -#define atomic_full_barrier() __asm ("mb" : : : "memory"); #define atomic_read_barrier() __asm ("mb" : : : "memory"); #define atomic_write_barrier() __asm ("wmb" : : : "memory"); diff --git a/sysdeps/arc/atomic-machine.h b/sysdeps/arc/atomic-machine.h index 4d14e41696..096035840a 100644 --- a/sysdeps/arc/atomic-machine.h +++ b/sysdeps/arc/atomic-machine.h @@ -25,6 +25,4 @@ is not as optimal as LLOCK/SCOND specially for SMP. */ #define ATOMIC_EXCHANGE_USES_CAS 1 -#define atomic_full_barrier() ({ asm volatile ("dmb 3":::"memory"); }) - #endif /* _ARC_BITS_ATOMIC_H */ diff --git a/sysdeps/arm/atomic-machine.h b/sysdeps/arm/atomic-machine.h index 1ba328c546..f728de4ba5 100644 --- a/sysdeps/arm/atomic-machine.h +++ b/sysdeps/arm/atomic-machine.h @@ -18,5 +18,3 @@ #define __HAVE_64B_ATOMICS 0 #define ATOMIC_EXCHANGE_USES_CAS 1 - -#define atomic_full_barrier() __sync_synchronize () diff --git a/sysdeps/generic/malloc-machine.h b/sysdeps/generic/malloc-machine.h index 1bbe03bf5d..195fd8c5e6 100644 --- a/sysdeps/generic/malloc-machine.h +++ b/sysdeps/generic/malloc-machine.h @@ -22,10 +22,6 @@ #include -#ifndef atomic_full_barrier -# define atomic_full_barrier() __asm ("" ::: "memory") -#endif - #ifndef atomic_read_barrier # define atomic_read_barrier() atomic_full_barrier () #endif diff --git a/sysdeps/hppa/atomic-machine.h b/sysdeps/hppa/atomic-machine.h index 5647631cb6..839b8df596 100644 --- a/sysdeps/hppa/atomic-machine.h +++ b/sysdeps/hppa/atomic-machine.h @@ -18,8 +18,6 @@ #ifndef _ATOMIC_MACHINE_H #define _ATOMIC_MACHINE_H 1 -#define atomic_full_barrier() __sync_synchronize () - #define __HAVE_64B_ATOMICS 0 /* XXX Is this actually correct? */ diff --git a/sysdeps/loongarch/atomic-machine.h b/sysdeps/loongarch/atomic-machine.h index 5ac91cf538..7e10309932 100644 --- a/sysdeps/loongarch/atomic-machine.h +++ b/sysdeps/loongarch/atomic-machine.h @@ -19,8 +19,6 @@ #ifndef _LINUX_LOONGARCH_BITS_ATOMIC_H #define _LINUX_LOONGARCH_BITS_ATOMIC_H 1 -#define atomic_full_barrier() __sync_synchronize () - #define __HAVE_64B_ATOMICS (__loongarch_grlen >= 64) #define ATOMIC_EXCHANGE_USES_CAS 0 diff --git a/sysdeps/mips/atomic-machine.h b/sysdeps/mips/atomic-machine.h index 850536af46..f19310006b 100644 --- a/sysdeps/mips/atomic-machine.h +++ b/sysdeps/mips/atomic-machine.h @@ -33,15 +33,6 @@ #define __HAVE_64B_ATOMICS 1 #endif -/* See the comments in about the use of the sync instruction. */ -#ifndef MIPS_SYNC -# define MIPS_SYNC sync -#endif - -#define MIPS_SYNC_STR_2(X) #X -#define MIPS_SYNC_STR_1(X) MIPS_SYNC_STR_2(X) -#define MIPS_SYNC_STR MIPS_SYNC_STR_1(MIPS_SYNC) - /* MIPS is an LL/SC machine. However, XLP has a direct atomic exchange instruction which will be used by __atomic_exchange_n. */ #ifdef _MIPS_ARCH_XLP @@ -50,15 +41,4 @@ # define ATOMIC_EXCHANGE_USES_CAS 1 #endif -#ifdef __mips16 -# define atomic_full_barrier() __sync_synchronize () - -#else /* !__mips16 */ -# define atomic_full_barrier() \ - __asm__ __volatile__ (".set push\n\t" \ - MIPS_PUSH_MIPS2 \ - MIPS_SYNC_STR "\n\t" \ - ".set pop" : : : "memory") -#endif /* !__mips16 */ - #endif /* atomic-machine.h */ diff --git a/sysdeps/mips/sys/asm.h b/sysdeps/mips/sys/asm.h index d40ca751e4..50e95a0b89 100644 --- a/sysdeps/mips/sys/asm.h +++ b/sysdeps/mips/sys/asm.h @@ -478,20 +478,4 @@ symbol = value # define MTC0 dmtc0 #endif -/* The MIPS architectures do not have a uniform memory model. Particular - platforms may provide additional guarantees - for instance, the R4000 - LL and SC instructions implicitly perform a SYNC, and the 4K promises - strong ordering. - - However, in the absence of those guarantees, we must assume weak ordering - and SYNC explicitly where necessary. - - Some obsolete MIPS processors may not support the SYNC instruction. This - applies to "true" MIPS I processors; most of the processors which compile - using MIPS I implement parts of MIPS II. */ - -#ifndef MIPS_SYNC -# define MIPS_SYNC sync -#endif - #endif /* sys/asm.h */ diff --git a/sysdeps/or1k/atomic-machine.h b/sysdeps/or1k/atomic-machine.h index ab9dc870d4..8dac0e4ced 100644 --- a/sysdeps/or1k/atomic-machine.h +++ b/sysdeps/or1k/atomic-machine.h @@ -22,6 +22,4 @@ #define __HAVE_64B_ATOMICS 0 #define ATOMIC_EXCHANGE_USES_CAS 1 -#define atomic_full_barrier() ({ asm volatile ("l.msync" ::: "memory"); }) - #endif /* atomic-machine.h */ diff --git a/sysdeps/powerpc/atomic-machine.h b/sysdeps/powerpc/atomic-machine.h index 7291facf47..65c774a064 100644 --- a/sysdeps/powerpc/atomic-machine.h +++ b/sysdeps/powerpc/atomic-machine.h @@ -57,6 +57,4 @@ # define atomic_write_barrier() __asm ("sync" ::: "memory") #endif -#define atomic_full_barrier() __asm ("sync" ::: "memory") - #endif diff --git a/sysdeps/riscv/atomic-machine.h b/sysdeps/riscv/atomic-machine.h index 2c39d4e5dc..c5d39c1be7 100644 --- a/sysdeps/riscv/atomic-machine.h +++ b/sysdeps/riscv/atomic-machine.h @@ -19,8 +19,6 @@ #ifndef _LINUX_RISCV_BITS_ATOMIC_H #define _LINUX_RISCV_BITS_ATOMIC_H 1 -#define atomic_full_barrier() __sync_synchronize () - #ifdef __riscv_atomic # define __HAVE_64B_ATOMICS (__riscv_xlen >= 64) diff --git a/sysdeps/x86/atomic-machine.h b/sysdeps/x86/atomic-machine.h index 1a963fea2f..97d9c99fa6 100644 --- a/sysdeps/x86/atomic-machine.h +++ b/sysdeps/x86/atomic-machine.h @@ -31,7 +31,6 @@ #define ATOMIC_EXCHANGE_USES_CAS 0 -#define atomic_full_barrier() __sync_synchronize () #define atomic_read_barrier() __asm ("" ::: "memory") #define atomic_write_barrier() __asm ("" ::: "memory")