]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
PowerPC: Fix a race condition when eliding a lock
authorTulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
Wed, 22 Jul 2015 12:26:02 +0000 (09:26 -0300)
committerTulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
Tue, 20 Oct 2015 15:45:16 +0000 (13:45 -0200)
The previous code used to evaluate the preprocessor token is_lock_free to
a variable before starting a transaction.  This behavior can cause an
error if another thread got the lock (without using a transaction)
between the evaluation of the token and the beginning of the transaction.

This bug can be triggered with the following order of events:
1. The lock accessed by is_lock_free is free.
2. Thread T1 evaluates is_lock_free and stores into register R1 that the
   lock is free.
3. Thread T2 acquires the same lock used in is_lock_free.
4. T1 begins the transaction, creating a memory barrier where is_lock_free
   is false, but R1 is true.
5. T1 reads R1 and doesn't abort the transaction.
6. T1 calls ELIDE_UNLOCK, which reads false from is_lock_free and decides
   to unlock a lock acquired by T2, leading to undefined behavior.

This patch delays the evaluation of is_lock_free to inside a transaction
by moving this part of the code to the macro ELIDE_LOCK.

[BZ #18743]
* sysdeps/powerpc/nptl/elide.h (__elide_lock): Move most of this
code to...
(ELIDE_LOCK): ...here.
(__get_new_count): New function with part of the code from
__elide_lock that updates the value of adapt_count after a
transaction abort.
(__elided_trylock): Moved this code to...
(ELIDE_TRYLOCK): ...here.

(cherry picked from commit 6ec52bf634b7650b57ff67b5f5053bce8992d549)

ChangeLog
NEWS
sysdeps/powerpc/nptl/elide.h

index 391ffca191e22a1af60ffb272abff23d75fec616..b32cd090a66599f6fd1b2b04564953e8047d18c9 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+2015-10-20  Tulio Magno Quites Machado Filho  <tuliom@linux.vnet.ibm.com>
+
+       [BZ #18743]
+       * sysdeps/powerpc/nptl/elide.h (__elide_lock): Move most of this
+       code to...
+       (ELIDE_LOCK): ...here.
+       (__get_new_count): New function with part of the code from
+       __elide_lock that updates the value of adapt_count after a
+       transaction abort.
+       (__elided_trylock): Moved this code to...
+       (ELIDE_TRYLOCK): ...here.
+
 2015-10-06  Florian Weimer  <fweimer@redhat.com>
 
        [BZ #19018]
diff --git a/NEWS b/NEWS
index cb503d13b01d1db12e332bfb1238fda7a20d138f..92037cee0880cd55fbfb7dd33d2f86cff1674370 100644 (file)
--- a/NEWS
+++ b/NEWS
@@ -9,8 +9,8 @@ Version 2.22.1
 
 * The following bugs are resolved with this release:
 
-  18589, 18778, 18781, 18787, 18796, 18870, 18887, 18921, 18928, 18969,
-  19018.
+  18589, 18743, 18778, 18781, 18787, 18796, 18870, 18887, 18921, 18928,
+  18969, 19018.
 
 * The LD_POINTER_GUARD environment variable can no longer be used to
   disable the pointer guard feature.  It is always enabled.
index 389f5a5e9ee8449c2e0de15b92ba10607a676b9f..12171f45dc1758f5d9a4f5ae07d57516fae53459 100644 (file)
 # include <htm.h>
 # include <elision-conf.h>
 
-/* Returns true if the lock defined by is_lock_free as elided.
-   ADAPT_COUNT is a pointer to per-lock state variable. */
-
+/* Get the new value of adapt_count according to the elision
+   configurations.  Returns true if the system should retry again or false
+   otherwise.  */
 static inline bool
-__elide_lock (uint8_t *adapt_count, int is_lock_free)
+__get_new_count (uint8_t *adapt_count)
 {
-  if (*adapt_count > 0)
+  /* A persistent failure indicates that a retry will probably
+     result in another failure.  Use normal locking now and
+     for the next couple of calls.  */
+  if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
     {
-      (*adapt_count)--;
+      if (__elision_aconf.skip_lock_internal_abort > 0)
+       *adapt_count = __elision_aconf.skip_lock_internal_abort;
       return false;
     }
-
-  for (int i = __elision_aconf.try_tbegin; i > 0; i--)
-    {
-      if (__builtin_tbegin (0))
-       {
-         if (is_lock_free)
-           return true;
-         /* Lock was busy.  */
-         __builtin_tabort (_ABORT_LOCK_BUSY);
-       }
-      else
-       {
-         /* A persistent failure indicates that a retry will probably
-            result in another failure.  Use normal locking now and
-            for the next couple of calls.  */
-         if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
-           {
-             if (__elision_aconf.skip_lock_internal_abort > 0)
-               *adapt_count = __elision_aconf.skip_lock_internal_abort;
-             break;
-           }
-         /* Same logic as above, but for a number of temporary failures in a
-            a row.  */
-         else if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
-                  && __elision_aconf.try_tbegin > 0)
-           *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
-       }
-     }
-
-  return false;
+  /* Same logic as above, but for a number of temporary failures in a
+     a row.  */
+  else if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
+          && __elision_aconf.try_tbegin > 0)
+    *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
+  return true;
 }
 
-# define ELIDE_LOCK(adapt_count, is_lock_free) \
-  __elide_lock (&(adapt_count), is_lock_free)
-
-
-static inline bool
-__elide_trylock (uint8_t *adapt_count, int is_lock_free, int write)
-{
-  if (__elision_aconf.try_tbegin > 0)
-    {
-      if (write)
-       __builtin_tabort (_ABORT_NESTED_TRYLOCK);
-      return __elide_lock (adapt_count, is_lock_free);
-    }
-  return false;
-}
+/* CONCURRENCY NOTES:
+
+   The evaluation of the macro expression is_lock_free encompasses one or
+   more loads from memory locations that are concurrently modified by other
+   threads.  For lock elision to work, this evaluation and the rest of the
+   critical section protected by the lock must be atomic because an
+   execution with lock elision must be equivalent to an execution in which
+   the lock would have been actually acquired and released.  Therefore, we
+   evaluate is_lock_free inside of the transaction that represents the
+   critical section for which we want to use lock elision, which ensures
+   the atomicity that we require.  */
+
+/* Returns 0 if the lock defined by is_lock_free was elided.
+   ADAPT_COUNT is a per-lock state variable.  */
+# define ELIDE_LOCK(adapt_count, is_lock_free)                         \
+  ({                                                                   \
+    int ret = 0;                                                       \
+    if (adapt_count > 0)                                               \
+      (adapt_count)--;                                                 \
+    else                                                               \
+      for (int i = __elision_aconf.try_tbegin; i > 0; i--)             \
+       {                                                               \
+         if (__builtin_tbegin (0))                                     \
+           {                                                           \
+             if (is_lock_free)                                         \
+               {                                                       \
+                 ret = 1;                                              \
+                 break;                                                \
+               }                                                       \
+             __builtin_tabort (_ABORT_LOCK_BUSY);                      \
+           }                                                           \
+         else                                                          \
+           if (!__get_new_count(&adapt_count))                         \
+             break;                                                    \
+       }                                                               \
+    ret;                                                               \
+  })
 
 # define ELIDE_TRYLOCK(adapt_count, is_lock_free, write)       \
-  __elide_trylock (&(adapt_count), is_lock_free, write)
+  ({                                                           \
+    int ret = 0;                                               \
+    if (__elision_aconf.try_tbegin > 0)                                \
+      {                                                                \
+       if (write)                                              \
+         __builtin_tabort (_ABORT_NESTED_TRYLOCK);             \
+       ret = ELIDE_LOCK (adapt_count, is_lock_free);           \
+      }                                                                \
+    ret;                                                       \
+  })
 
 
 static inline bool