]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
powerpc: Enforce compiler barriers on hardware transactions
authorTulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
Mon, 28 Dec 2015 14:24:43 +0000 (12:24 -0200)
committerTulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
Mon, 22 Feb 2016 19:07:08 +0000 (16:07 -0300)
Work around a GCC behavior with hardware transactional memory built-ins.
GCC doesn't treat the PowerPC transactional built-ins as compiler
barriers, moving instructions past the transaction boundaries and
altering their atomicity.

(cherry picked from commit 42bf1c897170ff951c7fd0ee9da25f97ff787396)

Conflicts:
sysdeps/unix/sysv/linux/powerpc/elision-trylock.c

ChangeLog
sysdeps/powerpc/nptl/elide.h
sysdeps/powerpc/sysdep.h
sysdeps/unix/sysv/linux/powerpc/elision-lock.c
sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
sysdeps/unix/sysv/linux/powerpc/htm.h

index 758521d0c63534acf1ec91fceb19633c4ca3a355..5887ea183c94db53db8a6fd5fa8110c01f395d7c 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+2016-02-22  Tulio Magno Quites Machado Filho  <tuliom@linux.vnet.ibm.com>
+
+       * sysdeps/unix/sysv/linux/powerpc/htm.h (__libc_tbegin,
+       __libc_tabort, __libc_tend): New wrappers that enforce compiler
+       barriers to their respective compiler built-ins.
+       * sysdeps/powerpc/nptl/elide.h (__get_new_count, ELIDE_LOCK,
+       ELIDE_TRYLOCK, __elide_unlock): Use the new wrappers.
+       * sysdeps/powerpc/sysdep.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/elision-lock.c: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/elision-trylock.c: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/elision-unlock.c: Likewise.
+
 2016-02-15  Carlos O'Donell  <carlos@redhat.com>
 
        [BZ #18665]
index 2e1e4432789ef858be07d63391dc247a202f2577..02f8f3b6d9894ee7ca305280d6055824714266a2 100644 (file)
@@ -68,14 +68,14 @@ __get_new_count (uint8_t *adapt_count, int attempt)
     else                                                               \
       for (int i = __elision_aconf.try_tbegin; i > 0; i--)             \
        {                                                               \
-         if (__builtin_tbegin (0))                                     \
+         if (__libc_tbegin (0))                                        \
            {                                                           \
              if (is_lock_free)                                         \
                {                                                       \
                  ret = 1;                                              \
                  break;                                                \
                }                                                       \
-             __builtin_tabort (_ABORT_LOCK_BUSY);                      \
+             __libc_tabort (_ABORT_LOCK_BUSY);                         \
            }                                                           \
          else                                                          \
            if (!__get_new_count (&adapt_count,i))                      \
@@ -90,7 +90,7 @@ __get_new_count (uint8_t *adapt_count, int attempt)
     if (__elision_aconf.try_tbegin > 0)                                \
       {                                                                \
        if (write)                                              \
-         __builtin_tabort (_ABORT_NESTED_TRYLOCK);             \
+         __libc_tabort (_ABORT_NESTED_TRYLOCK);                \
        ret = ELIDE_LOCK (adapt_count, is_lock_free);           \
       }                                                                \
     ret;                                                       \
@@ -102,7 +102,7 @@ __elide_unlock (int is_lock_free)
 {
   if (is_lock_free)
     {
-      __builtin_tend (0);
+      __libc_tend (0);
       return true;
     }
   return false;
index e32168e8f4fca8e253d038ac2ad5a99bb0ab1de1..f424fe45658beeab1b4c39342d3c317c2c0baf2a 100644 (file)
 # define ABORT_TRANSACTION \
   ({                                           \
     if (THREAD_GET_TM_CAPABLE ())              \
-      __builtin_tabort (_ABORT_SYSCALL);       \
+      __libc_tabort (_ABORT_SYSCALL);  \
   })
 #else
 # define ABORT_TRANSACTION
index c6731ca6a4b07bf567c624d285a89e4a2692377e..e11ad1dc21ece4ef82c4795a32fd7bf38b52e01e 100644 (file)
@@ -74,12 +74,12 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
 
   for (int i = aconf.try_tbegin; i > 0; i--)
     {
-      if (__builtin_tbegin (0))
+      if (__libc_tbegin (0))
        {
          if (*lock == 0)
            return 0;
          /* Lock was busy.  Fall back to normal locking.  */
-         __builtin_tabort (_ABORT_LOCK_BUSY);
+         __libc_tabort (_ABORT_LOCK_BUSY);
        }
       else
        {
index 7b6d1b9b98320888ed958022cd14f96d9783a4fe..edec155058d1b43e60fc6737f57ca8734f2962b4 100644 (file)
@@ -31,7 +31,7 @@ int
 __lll_trylock_elision (int *futex, short *adapt_count)
 {
   /* Implement POSIX semantics by forbiding nesting elided trylocks.  */
-  __builtin_tabort (_ABORT_NESTED_TRYLOCK);
+  __libc_tabort (_ABORT_NESTED_TRYLOCK);
 
   /* Only try a transaction if it's worth it.  */
   if (*adapt_count > 0)
@@ -40,13 +40,13 @@ __lll_trylock_elision (int *futex, short *adapt_count)
       goto use_lock;
     }
 
-  if (__builtin_tbegin (0))
+  if (__libc_tbegin (0))
     {
       if (*futex == 0)
        return 0;
 
       /* Lock was busy.  Fall back to normal locking.  */
-      __builtin_tabort (_ABORT_LOCK_BUSY);
+      __libc_tabort (_ABORT_LOCK_BUSY);
     }
   else
     {
index f04c3393831b75d9c6c0f88c09428b39a1d0097b..7234db6e2d36fe83873ecea63d5c6ed1294c75ac 100644 (file)
@@ -25,7 +25,7 @@ __lll_unlock_elision(int *lock, int pshared)
 {
   /* When the lock was free we're in a transaction.  */
   if (*lock == 0)
-    __builtin_tend (0);
+    __libc_tend (0);
   else
     lll_unlock ((*lock), pshared);
   return 0;
index 57d5cd62620340ccd932dd913b8d0621ef8c5863..f9a25874ff06bbe4c2f43912bb3dbcf227770e8b 100644 (file)
      __ret;                            \
   })
 
-#define __builtin_tbegin(tdb)       _tbegin ()
-#define __builtin_tend(nested)      _tend ()
-#define __builtin_tabort(abortcode) _tabort (abortcode)
-#define __builtin_get_texasru()     _texasru ()
+#define __libc_tbegin(tdb)       _tbegin ()
+#define __libc_tend(nested)      _tend ()
+#define __libc_tabort(abortcode) _tabort (abortcode)
+#define __builtin_get_texasru()  _texasru ()
 
 #else
 # include <htmintrin.h>
+
+# ifdef __TM_FENCE__
+   /* New GCC behavior.  */
+#  define __libc_tbegin(R)  __builtin_tbegin (R);
+#  define __libc_tend(R)    __builtin_tend (R);
+#  define __libc_tabort(R)  __builtin_tabort (R);
+# else
+   /* Workaround an old GCC behavior. Earlier releases of GCC 4.9 and 5.0,
+      didn't use to treat __builtin_tbegin, __builtin_tend and
+      __builtin_tabort as compiler barriers, moving instructions into and
+      out the transaction.
+      Remove this when glibc drops support for GCC 5.0.  */
+#  define __libc_tbegin(R)                     \
+   ({ __asm__ volatile("" ::: "memory");       \
+     unsigned int __ret = __builtin_tbegin (R);        \
+     __asm__ volatile("" ::: "memory");                \
+     __ret;                                    \
+   })
+#  define __libc_tabort(R)                     \
+  ({ __asm__ volatile("" ::: "memory");                \
+    unsigned int __ret = __builtin_tabort (R); \
+    __asm__ volatile("" ::: "memory");         \
+    __ret;                                     \
+  })
+#  define __libc_tend(R)                       \
+   ({ __asm__ volatile("" ::: "memory");       \
+     unsigned int __ret = __builtin_tend (R);  \
+     __asm__ volatile("" ::: "memory");                \
+     __ret;                                    \
+   })
+# endif /* __TM_FENCE__  */
 #endif /* __HTM__  */
 
 #endif /* __ASSEMBLER__ */