]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
Add compiler barriers around modifications of the robust mutex list for pthread_mutex...
authorStefan Liebler <stli@linux.ibm.com>
Thu, 7 Feb 2019 14:18:36 +0000 (15:18 +0100)
committerStefan Liebler <stli@linux.ibm.com>
Thu, 7 Feb 2019 14:18:36 +0000 (15:18 +0100)
While debugging a kernel warning, Thomas Gleixner, Sebastian Sewior and
Heiko Carstens found a bug in pthread_mutex_trylock due to misordered
instructions:
140:   a5 1b 00 01             oill    %r1,1
144:   e5 48 a0 f0 00 00       mvghi   240(%r10),0   <--- THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
14a:   e3 10 a0 e0 00 24       stg     %r1,224(%r10) <--- last THREAD_SETMEM of ENQUEUE_MUTEX_PI

vs (with compiler barriers):
140:   a5 1b 00 01             oill    %r1,1
144:   e3 10 a0 e0 00 24       stg     %r1,224(%r10)
14a:   e5 48 a0 f0 00 00       mvghi   240(%r10),0

Please have a look at the discussion:
"Re: WARN_ON_ONCE(!new_owner) within wake_futex_pi() triggerede"
(https://lore.kernel.org/lkml/20190202112006.GB3381@osiris/)

This patch is introducing the same compiler barriers and comments
for pthread_mutex_trylock as introduced for pthread_mutex_lock and
pthread_mutex_timedlock by commit 8f9450a0b7a9e78267e8ae1ab1000ebca08e473e
"Add compiler barriers around modifications of the robust mutex list."

ChangeLog:

[BZ #24180]
* nptl/pthread_mutex_trylock.c (__pthread_mutex_trylock):

ChangeLog
nptl/pthread_mutex_trylock.c

index 327066298344708ee0f2c96db4617919f6e1c4df..29f4ac31e1eac7345e0a3bcd7c1fd9943237be88 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,9 @@
+2019-02-07  Stefan Liebler  <stli@linux.ibm.com>
+
+       [BZ #24180]
+       * nptl/pthread_mutex_trylock.c (__pthread_mutex_trylock):
+       Add compiler barriers and comments.
+
 2019-02-07  Florian Weimer  <fweimer@redhat.com>
 
        * include/array_length.h (array_length): Do not use a statement
index 8fe43b8f0f464f9f1183efc99c36b4940e3d82dd..bf2869eca23b7b67aa41327a0c17bf056740a0f1 100644 (file)
@@ -94,6 +94,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                     &mutex->__data.__list.__next);
+      /* We need to set op_pending before starting the operation.  Also
+        see comments at ENQUEUE_MUTEX.  */
+      __asm ("" ::: "memory");
 
       oldval = mutex->__data.__lock;
       do
@@ -119,7 +122,12 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
              /* But it is inconsistent unless marked otherwise.  */
              mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
 
+             /* We must not enqueue the mutex before we have acquired it.
+                Also see comments at ENQUEUE_MUTEX.  */
+             __asm ("" ::: "memory");
              ENQUEUE_MUTEX (mutex);
+             /* We need to clear op_pending after we enqueue the mutex.  */
+             __asm ("" ::: "memory");
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 
              /* Note that we deliberately exist here.  If we fall
@@ -135,6 +143,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
              int kind = PTHREAD_MUTEX_TYPE (mutex);
              if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
                {
+                 /* We do not need to ensure ordering wrt another memory
+                    access.  Also see comments at ENQUEUE_MUTEX. */
                  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                                 NULL);
                  return EDEADLK;
@@ -142,6 +152,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
 
              if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
                {
+                 /* We do not need to ensure ordering wrt another memory
+                    access.  */
                  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                                 NULL);
 
@@ -160,6 +172,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
                                                        id, 0);
          if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
            {
+             /* We haven't acquired the lock as it is already acquired by
+                another owner.  We do not need to ensure ordering wrt another
+                memory access.  */
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 
              return EBUSY;
@@ -173,13 +188,20 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
              if (oldval == id)
                lll_unlock (mutex->__data.__lock,
                            PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
+             /* FIXME This violates the mutex destruction requirements.  See
+                __pthread_mutex_unlock_full.  */
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
              return ENOTRECOVERABLE;
            }
        }
       while ((oldval & FUTEX_OWNER_DIED) != 0);
 
+      /* We must not enqueue the mutex before we have acquired it.
+        Also see comments at ENQUEUE_MUTEX.  */
+      __asm ("" ::: "memory");
       ENQUEUE_MUTEX (mutex);
+      /* We need to clear op_pending after we enqueue the mutex.  */
+      __asm ("" ::: "memory");
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 
       mutex->__data.__owner = id;
@@ -211,10 +233,15 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
        }
 
        if (robust)
-         /* Note: robust PI futexes are signaled by setting bit 0.  */
-         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
-                        (void *) (((uintptr_t) &mutex->__data.__list.__next)
-                                  | 1));
+         {
+           /* Note: robust PI futexes are signaled by setting bit 0.  */
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                          (void *) (((uintptr_t) &mutex->__data.__list.__next)
+                                    | 1));
+           /* We need to set op_pending before starting the operation.  Also
+              see comments at ENQUEUE_MUTEX.  */
+           __asm ("" ::: "memory");
+         }
 
        oldval = mutex->__data.__lock;
 
@@ -223,12 +250,16 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
          {
            if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
              {
+               /* We do not need to ensure ordering wrt another memory
+                  access.  */
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
                return EDEADLK;
              }
 
            if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
              {
+               /* We do not need to ensure ordering wrt another memory
+                  access.  */
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 
                /* Just bump the counter.  */
@@ -250,6 +281,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
          {
            if ((oldval & FUTEX_OWNER_DIED) == 0)
              {
+               /* We haven't acquired the lock as it is already acquired by
+                  another owner.  We do not need to ensure ordering wrt another
+                  memory access.  */
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 
                return EBUSY;
@@ -270,6 +304,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
            if (INTERNAL_SYSCALL_ERROR_P (e, __err)
                && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
              {
+               /* The kernel has not yet finished the mutex owner death.
+                  We do not need to ensure ordering wrt another memory
+                  access.  */
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 
                return EBUSY;
@@ -287,7 +324,12 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
            /* But it is inconsistent unless marked otherwise.  */
            mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
 
+           /* We must not enqueue the mutex before we have acquired it.
+              Also see comments at ENQUEUE_MUTEX.  */
+           __asm ("" ::: "memory");
            ENQUEUE_MUTEX (mutex);
+           /* We need to clear op_pending after we enqueue the mutex.  */
+           __asm ("" ::: "memory");
            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 
            /* Note that we deliberately exit here.  If we fall
@@ -310,13 +352,20 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
                                                  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
                              0, 0);
 
+           /* To the kernel, this will be visible after the kernel has
+              acquired the mutex in the syscall.  */
            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
            return ENOTRECOVERABLE;
          }
 
        if (robust)
          {
+           /* We must not enqueue the mutex before we have acquired it.
+              Also see comments at ENQUEUE_MUTEX.  */
+           __asm ("" ::: "memory");
            ENQUEUE_MUTEX_PI (mutex);
+           /* We need to clear op_pending after we enqueue the mutex.  */
+           __asm ("" ::: "memory");
            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
          }