]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
x86: Define atomic_exchange_acq using __atomic_exchange_n
authorUros Bizjak <ubizjak@gmail.com>
Mon, 8 Sep 2025 12:38:22 +0000 (14:38 +0200)
committerH.J. Lu <hjl.tools@gmail.com>
Tue, 9 Sep 2025 14:51:41 +0000 (07:51 -0700)
The resulting libc.so is identical on both x86_64 and
i386 targets compared to unpatched builds:

$ sha1sum libc-x86_64-old.so libc-x86_64-new.so
74eca1b87f2ecc9757a984c089a582b7615d93e7  libc-x86_64-old.so
74eca1b87f2ecc9757a984c089a582b7615d93e7  libc-x86_64-new.so
$ sha1sum libc-i386-old.so libc-i386-new.so
882bbab8324f79f4fbc85224c4c914fc6822ece7  libc-i386-old.so
882bbab8324f79f4fbc85224c4c914fc6822ece7  libc-i386-new.so

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Florian Weimer <fweimer@redhat.com>
Cc: Adhemerval Zanella Netto <adhemerval.zanella@linaro.org>
Cc: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
Cc: Collin Funk <collin.funk1@gmail.com>
Cc: H.J.Lu <hjl.tools@gmail.com>
Cc: Carlos O'Donell <carlos@redhat.com>
Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
sysdeps/x86/atomic-machine.h

index c0c2c3437a6815d9d2cf8f1e8cbd997103c561ec..ebe8e978e50956591e0f46d5a5f7ebfe0d49ab68 100644 (file)
@@ -19,9 +19,6 @@
 #ifndef _X86_ATOMIC_MACHINE_H
 #define _X86_ATOMIC_MACHINE_H 1
 
-#include <stdint.h>
-#include <libc-pointer-arith.h>                /* For cast_to_integer.  */
-
 #define USE_ATOMIC_COMPILER_BUILTINS   1
 
 #ifdef __x86_64__
 #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
   (! __sync_bool_compare_and_swap (mem, oldval, newval))
 
-/* Note that we need no lock prefix.  */
 #define atomic_exchange_acq(mem, newvalue) \
-  ({ __typeof (*mem) result;                                                 \
-     if (sizeof (*mem) == 1)                                                 \
-       __asm __volatile ("xchgb %b0, %1"                                     \
-                        : "=q" (result), "=m" (*mem)                         \
-                        : "0" (newvalue), "m" (*mem));                       \
-     else if (sizeof (*mem) == 2)                                            \
-       __asm __volatile ("xchgw %w0, %1"                                     \
-                        : "=r" (result), "=m" (*mem)                         \
-                        : "0" (newvalue), "m" (*mem));                       \
-     else if (sizeof (*mem) == 4)                                            \
-       __asm __volatile ("xchgl %0, %1"                                              \
-                        : "=r" (result), "=m" (*mem)                         \
-                        : "0" (newvalue), "m" (*mem));                       \
-     else if (__HAVE_64B_ATOMICS)                                            \
-       __asm __volatile ("xchgq %q0, %1"                                     \
-                        : "=r" (result), "=m" (*mem)                         \
-                        : "0" ((int64_t) cast_to_integer (newvalue)),        \
-                          "m" (*mem));                                       \
-     else                                                                    \
-       {                                                                     \
-        result = 0;                                                          \
-        __atomic_link_error ();                                              \
-       }                                                                     \
-     result; })
+  __atomic_exchange_n (mem, newvalue, __ATOMIC_ACQUIRE)
 
 /* ??? Remove when catomic_exchange_and_add
    fallback uses __atomic_fetch_add.  */