]> git.ipfire.org Git - thirdparty/glibc.git/blobdiff - nptl/pthread_mutex_lock.c
Add compiler barriers around modifications of the robust mutex list.
[thirdparty/glibc.git] / nptl / pthread_mutex_lock.c
index bdfa529f639bda7eb59b620c042545f268f5a056..dc9ca4c4764be2654141493330bd8a91a989f601 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2017 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 #define lll_trylock_elision(a,t) lll_trylock(a)
 #endif
 
+/* Some of the following definitions differ when pthread_mutex_cond_lock.c
+   includes this file.  */
 #ifndef LLL_MUTEX_LOCK
 # define LLL_MUTEX_LOCK(mutex) \
   lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
 # define LLL_MUTEX_TRYLOCK(mutex) \
   lll_trylock ((mutex)->__data.__lock)
-# define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
-  lll_robust_lock ((mutex)->__data.__lock, id, \
-                  PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
+# define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
 # define LLL_MUTEX_LOCK_ELISION(mutex) \
   lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
                   PTHREAD_MUTEX_PSHARED (mutex))
@@ -180,19 +180,39 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                     &mutex->__data.__list.__next);
+      /* We need to set op_pending before starting the operation.  Also
+        see comments at ENQUEUE_MUTEX.  */
+      __asm ("" ::: "memory");
 
       oldval = mutex->__data.__lock;
-      do
+      /* This is set to FUTEX_WAITERS iff we might have shared the
+        FUTEX_WAITERS flag with other threads, and therefore need to keep it
+        set to avoid lost wake-ups.  We have the same requirement in the
+        simple mutex algorithm.
+        We start with value zero for a normal mutex, and FUTEX_WAITERS if we
+        are building the special case mutexes for use from within condition
+        variables.  */
+      unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER;
+      while (1)
        {
-       again:
+         /* Try to acquire the lock through a CAS from 0 (not acquired) to
+            our TID | assume_other_futex_waiters.  */
+         if (__glibc_likely ((oldval == 0)
+                             && (atomic_compare_and_exchange_bool_acq
+                                 (&mutex->__data.__lock,
+                                  id | assume_other_futex_waiters, 0) == 0)))
+             break;
+
          if ((oldval & FUTEX_OWNER_DIED) != 0)
            {
              /* The previous owner died.  Try locking the mutex.  */
              int newval = id;
 #ifdef NO_INCR
+             /* We are not taking assume_other_futex_waiters into accoount
+                here simply because we'll set FUTEX_WAITERS anyway.  */
              newval |= FUTEX_WAITERS;
 #else
-             newval |= (oldval & FUTEX_WAITERS);
+             newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters;
 #endif
 
              newval
@@ -202,7 +222,7 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
              if (newval != oldval)
                {
                  oldval = newval;
-                 goto again;
+                 continue;
                }
 
              /* We got the mutex.  */
@@ -210,7 +230,12 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
              /* But it is inconsistent unless marked otherwise.  */
              mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
 
+             /* We must not enqueue the mutex before we have acquired it.
+                Also see comments at ENQUEUE_MUTEX.  */
+             __asm ("" ::: "memory");
              ENQUEUE_MUTEX (mutex);
+             /* We need to clear op_pending after we enqueue the mutex.  */
+             __asm ("" ::: "memory");
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 
              /* Note that we deliberately exit here.  If we fall
@@ -232,6 +257,8 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
              int kind = PTHREAD_MUTEX_TYPE (mutex);
              if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
                {
+                 /* We do not need to ensure ordering wrt another memory
+                    access.  Also see comments at ENQUEUE_MUTEX. */
                  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                                 NULL);
                  return EDEADLK;
@@ -239,6 +266,8 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 
              if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
                {
+                 /* We do not need to ensure ordering wrt another memory
+                    access.  */
                  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                                 NULL);
 
@@ -253,23 +282,57 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
                }
            }
 
-         oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
-
-         if (__builtin_expect (mutex->__data.__owner
-                               == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+         /* We cannot acquire the mutex nor has its owner died.  Thus, try
+            to block using futexes.  Set FUTEX_WAITERS if necessary so that
+            other threads are aware that there are potentially threads
+            blocked on the futex.  Restart if oldval changed in the
+            meantime.  */
+         if ((oldval & FUTEX_WAITERS) == 0)
            {
-             /* This mutex is now not recoverable.  */
-             mutex->__data.__count = 0;
-             lll_unlock (mutex->__data.__lock,
-                         PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
-             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
-             return ENOTRECOVERABLE;
+             if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
+                                                       oldval | FUTEX_WAITERS,
+                                                       oldval)
+                 != 0)
+               {
+                 oldval = mutex->__data.__lock;
+                 continue;
+               }
+             oldval |= FUTEX_WAITERS;
            }
+
+         /* It is now possible that we share the FUTEX_WAITERS flag with
+            another thread; therefore, update assume_other_futex_waiters so
+            that we do not forget about this when handling other cases
+            above and thus do not cause lost wake-ups.  */
+         assume_other_futex_waiters |= FUTEX_WAITERS;
+
+         /* Block using the futex and reload current lock value.  */
+         lll_futex_wait (&mutex->__data.__lock, oldval,
+                         PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
+         oldval = mutex->__data.__lock;
+       }
+
+      /* We have acquired the mutex; check if it is still consistent.  */
+      if (__builtin_expect (mutex->__data.__owner
+                           == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+       {
+         /* This mutex is now not recoverable.  */
+         mutex->__data.__count = 0;
+         int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
+         lll_unlock (mutex->__data.__lock, private);
+         /* FIXME This violates the mutex destruction requirements.  See
+            __pthread_mutex_unlock_full.  */
+         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+         return ENOTRECOVERABLE;
        }
-      while ((oldval & FUTEX_OWNER_DIED) != 0);
 
       mutex->__data.__count = 1;
+      /* We must not enqueue the mutex before we have acquired it.
+        Also see comments at ENQUEUE_MUTEX.  */
+      __asm ("" ::: "memory");
       ENQUEUE_MUTEX (mutex);
+      /* We need to clear op_pending after we enqueue the mutex.  */
+      __asm ("" ::: "memory");
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
       break;
 
@@ -290,10 +353,15 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
        int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
 
        if (robust)
-         /* Note: robust PI futexes are signaled by setting bit 0.  */
-         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
-                        (void *) (((uintptr_t) &mutex->__data.__list.__next)
-                                  | 1));
+         {
+           /* Note: robust PI futexes are signaled by setting bit 0.  */
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                          (void *) (((uintptr_t) &mutex->__data.__list.__next)
+                                    | 1));
+           /* We need to set op_pending before starting the operation.  Also
+              see comments at ENQUEUE_MUTEX.  */
+           __asm ("" ::: "memory");
+         }
 
        oldval = mutex->__data.__lock;
 
@@ -302,12 +370,16 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
          {
            if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
              {
+               /* We do not need to ensure ordering wrt another memory
+                  access.  */
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
                return EDEADLK;
              }
 
            if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
              {
+               /* We do not need to ensure ordering wrt another memory
+                  access.  */
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 
                /* Just bump the counter.  */
@@ -370,7 +442,12 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
            /* But it is inconsistent unless marked otherwise.  */
            mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
 
+           /* We must not enqueue the mutex before we have acquired it.
+              Also see comments at ENQUEUE_MUTEX.  */
+           __asm ("" ::: "memory");
            ENQUEUE_MUTEX_PI (mutex);
+           /* We need to clear op_pending after we enqueue the mutex.  */
+           __asm ("" ::: "memory");
            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 
            /* Note that we deliberately exit here.  If we fall
@@ -398,6 +475,8 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
                                                  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
                              0, 0);
 
+           /* To the kernel, this will be visible after the kernel has
+              acquired the mutex in the syscall.  */
            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
            return ENOTRECOVERABLE;
          }
@@ -405,7 +484,12 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
        mutex->__data.__count = 1;
        if (robust)
          {
+           /* We must not enqueue the mutex before we have acquired it.
+              Also see comments at ENQUEUE_MUTEX.  */
+           __asm ("" ::: "memory");
            ENQUEUE_MUTEX_PI (mutex);
+           /* We need to clear op_pending after we enqueue the mutex.  */
+           __asm ("" ::: "memory");
            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
          }
       }