]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
powerpc: Fix missing barriers in atomic_exchange_and_add_{acq,rel}
authorAdhemerval Zanella <azanella@linux.vnet.ibm.com>
Tue, 25 Nov 2014 19:32:54 +0000 (14:32 -0500)
committerAdhemerval Zanella <azanella@linux.vnet.ibm.com>
Wed, 14 Jan 2015 11:04:53 +0000 (06:04 -0500)
On powerpc, atomic_exchange_and_add is implemented without any
barriers.  This patchs adds the missing instruction and memory barrier
for acquire and release semanthics.

ChangeLog
csu/tst-atomic.c
sysdeps/powerpc/bits/atomic.h
sysdeps/powerpc/powerpc32/bits/atomic.h
sysdeps/powerpc/powerpc64/bits/atomic.h

index 103f1edf52c26951cdd413b6ac9a0106a16f960c..814486eb8655a0bf0d1c7516779c7371c7dc0788 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,19 @@
+2014-11-26  Adhemerval Zanella  <azanella@linux.ibm.com>
+
+       * csu/tst-atomic.c (do_test): Add atomic_exchange_and_add_{acq,rel}
+       tests.
+       * sysdeps/powerpc/bits/atomic.h
+       (__arch_atomic_exchange_and_add_32_acq): Add definition.
+       (__arch_atomic_exchange_and_add_32_rel): Likewise.
+       (atomic_exchange_and_add_acq): Likewise.
+       (atomic_exchange_and_add_rel): Likewise.
+       * sysdeps/powerpc/powerpc32/bits/atomic.h
+       (__arch_atomic_exchange_and_add_64_acq): Add definition.
+       (__arch_atomic_exchange_and_add_64_rel): Likewise.
+       * sysdeps/powerpc/powerpc64/bits/atomic.h
+       (__arch_atomic_exchange_and_add_64_acq): Add definition.
+       (__arch_atomic_exchange_and_add_64_rel): Likewise.
+
 2014-11-25  Anton Blanchard <anton@samba.org>
 
        * sysdeps/powerpc/bits/atomic.h
index d16c66dc31d4150f0e651549e6ad7393d6d76119..ab6db453072a6f27602257d024ff7b5558bc577c 100644 (file)
@@ -113,6 +113,22 @@ do_test (void)
       ret = 1;
     }
 
+  mem = 2;
+  if (atomic_exchange_and_add_acq (&mem, 11) != 2
+      || mem != 13)
+    {
+      puts ("atomic_exchange_and_add test failed");
+      ret = 1;
+    }
+
+  mem = 2;
+  if (atomic_exchange_and_add_rel (&mem, 11) != 2
+      || mem != 13)
+    {
+      puts ("atomic_exchange_and_add test failed");
+      ret = 1;
+    }
+
   mem = -21;
   atomic_add (&mem, 22);
   if (mem != 1)
index f312676b45b73f289beabc1868ffaa39ca8b002d..b05b0f7aa07a90653a5d1fc0ce2bc440436200ba 100644 (file)
@@ -152,6 +152,34 @@ typedef uintmax_t uatomic_max_t;
     __val;                                                                   \
   })
 
+#define __arch_atomic_exchange_and_add_32_acq(mem, value) \
+  ({                                                                         \
+    __typeof (*mem) __val, __tmp;                                            \
+    __asm __volatile ("1:      lwarx   %0,0,%3" MUTEX_HINT_ACQ "\n"          \
+                     "         add     %1,%0,%4\n"                           \
+                     "         stwcx.  %1,0,%3\n"                            \
+                     "         bne-    1b\n"                                 \
+                     __ARCH_ACQ_INSTR                                        \
+                     : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)             \
+                     : "b" (mem), "r" (value), "m" (*mem)                    \
+                     : "cr0", "memory");                                     \
+    __val;                                                                   \
+  })
+
+#define __arch_atomic_exchange_and_add_32_rel(mem, value) \
+  ({                                                                         \
+    __typeof (*mem) __val, __tmp;                                            \
+    __asm __volatile (__ARCH_REL_INSTR "\n"                                  \
+                     "1:       lwarx   %0,0,%3" MUTEX_HINT_REL "\n"          \
+                     "         add     %1,%0,%4\n"                           \
+                     "         stwcx.  %1,0,%3\n"                            \
+                     "         bne-    1b"                                   \
+                     : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)             \
+                     : "b" (mem), "r" (value), "m" (*mem)                    \
+                     : "cr0", "memory");                                     \
+    __val;                                                                   \
+  })
+
 #define __arch_atomic_increment_val_32(mem) \
   ({                                                                         \
     __typeof (*(mem)) __val;                                                 \
@@ -252,6 +280,28 @@ typedef uintmax_t uatomic_max_t;
        abort ();                                                             \
     __result;                                                                \
   })
+#define atomic_exchange_and_add_acq(mem, value) \
+  ({                                                                         \
+    __typeof (*(mem)) __result;                                                      \
+    if (sizeof (*mem) == 4)                                                  \
+      __result = __arch_atomic_exchange_and_add_32_acq (mem, value);         \
+    else if (sizeof (*mem) == 8)                                             \
+      __result = __arch_atomic_exchange_and_add_64_acq (mem, value);         \
+    else                                                                     \
+       abort ();                                                             \
+    __result;                                                                \
+  })
+#define atomic_exchange_and_add_rel(mem, value) \
+  ({                                                                         \
+    __typeof (*(mem)) __result;                                                      \
+    if (sizeof (*mem) == 4)                                                  \
+      __result = __arch_atomic_exchange_and_add_32_rel (mem, value);         \
+    else if (sizeof (*mem) == 8)                                             \
+      __result = __arch_atomic_exchange_and_add_64_rel (mem, value);         \
+    else                                                                     \
+       abort ();                                                             \
+    __result;                                                                \
+  })
 
 #define atomic_increment_val(mem) \
   ({                                                                         \
index a3dd09cd9a626ee4ab268be3ef48568f8ccdba4f..7422262dc1d50977cbaa823a6ab4cda489aa8d53 100644 (file)
 #define __arch_atomic_exchange_and_add_64(mem, value) \
     ({ abort (); (*mem) = (value); })
 
+#define __arch_atomic_exchange_and_add_64_acq(mem, value) \
+    ({ abort (); (*mem) = (value); })
+
+#define __arch_atomic_exchange_and_add_64_rel(mem, value) \
+    ({ abort (); (*mem) = (value); })
+
 #define __arch_atomic_increment_val_64(mem) \
     ({ abort (); (*mem)++; })
 
index 9cab0a2ffee44d02941e29a4d3dbcacb9917ca23..e64cb9fa546e9521caba65eb0d5f021ef4c37cbe 100644 (file)
       __val;                                                                 \
     })
 
+#define __arch_atomic_exchange_and_add_64_acq(mem, value) \
+    ({                                                                       \
+      __typeof (*mem) __val, __tmp;                                          \
+      __asm __volatile ("1:    ldarx   %0,0,%3" MUTEX_HINT_ACQ "\n"          \
+                       "       add     %1,%0,%4\n"                           \
+                       "       stdcx.  %1,0,%3\n"                            \
+                       "       bne-    1b\n"                                 \
+                       __ARCH_ACQ_INSTR                                      \
+                       : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)           \
+                       : "b" (mem), "r" (value), "m" (*mem)                  \
+                       : "cr0", "memory");                                   \
+      __val;                                                                 \
+    })
+
+#define __arch_atomic_exchange_and_add_64_rel(mem, value) \
+    ({                                                                       \
+      __typeof (*mem) __val, __tmp;                                          \
+      __asm __volatile (__ARCH_REL_INSTR "\n"                                \
+                       "1:     ldarx   %0,0,%3" MUTEX_HINT_REL "\n"          \
+                       "       add     %1,%0,%4\n"                           \
+                       "       stdcx.  %1,0,%3\n"                            \
+                       "       bne-    1b"                                   \
+                       : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)           \
+                       : "b" (mem), "r" (value), "m" (*mem)                  \
+                       : "cr0", "memory");                                   \
+      __val;                                                                 \
+    })
+
 #define __arch_atomic_increment_val_64(mem) \
     ({                                                                       \
       __typeof (*(mem)) __val;                                               \