]> git.ipfire.org Git - thirdparty/glibc.git/blobdiff - nptl/pthread_mutex_lock.c
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / nptl / pthread_mutex_lock.c
index 1c3ee4fe25555d7815a3cd4aa4d8c72a8191ddcd..96075129b8df840f76127e614871efb3c2d5795f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2015 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    Lesser General Public License for more details.
 
    You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, write to the Free
-   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-   02111-1307 USA.  */
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
 
 #include <assert.h>
 #include <errno.h>
 #include <stdlib.h>
 #include <unistd.h>
+#include <sys/param.h>
 #include <not-cancel.h>
 #include "pthreadP.h"
 #include <lowlevellock.h>
+#include <stap-probe.h>
 
+#ifndef lll_lock_elision
+#define lll_lock_elision(lock, try_lock, private)      ({ \
+      lll_lock (lock, private); 0; })
+#endif
+
+#ifndef lll_trylock_elision
+#define lll_trylock_elision(a,t) lll_trylock(a)
+#endif
 
 #ifndef LLL_MUTEX_LOCK
-# define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
-# define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
-# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id)
+# define LLL_MUTEX_LOCK(mutex) \
+  lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
+# define LLL_MUTEX_TRYLOCK(mutex) \
+  lll_trylock ((mutex)->__data.__lock)
+# define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
+  lll_robust_lock ((mutex)->__data.__lock, id, \
+                  PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
+# define LLL_MUTEX_LOCK_ELISION(mutex) \
+  lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
+                  PTHREAD_MUTEX_PSHARED (mutex))
+# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
+  lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
+                  PTHREAD_MUTEX_PSHARED (mutex))
+#endif
+
+#ifndef FORCE_ELISION
+#define FORCE_ELISION(m, s)
 #endif
 
+static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
+     __attribute_noinline__;
 
 int
 __pthread_mutex_lock (mutex)
@@ -39,19 +64,45 @@ __pthread_mutex_lock (mutex)
 {
   assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
 
-  int oldval;
-  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+  unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
+
+  LIBC_PROBE (mutex_entry, 1, mutex);
+
+  if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
+                                | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
+    return __pthread_mutex_lock_full (mutex);
 
-  int retval = 0;
-  switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
+  if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
+    {
+      FORCE_ELISION (mutex, goto elision);
+    simple:
+      /* Normal mutex.  */
+      LLL_MUTEX_LOCK (mutex);
+      assert (mutex->__data.__owner == 0);
+    }
+#ifdef HAVE_ELISION
+  else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
+    {
+  elision: __attribute__((unused))
+      /* This case can never happen on a system without elision,
+         as the mutex type initialization functions will not
+        allow to set the elision flags.  */
+      /* Don't record owner or users for elision case.  This is a
+         tail call.  */
+      return LLL_MUTEX_LOCK_ELISION (mutex);
+    }
+#endif
+  else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
+                            == PTHREAD_MUTEX_RECURSIVE_NP, 1))
     {
       /* Recursive mutex.  */
-    case PTHREAD_MUTEX_RECURSIVE_NP:
+      pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
       /* Check whether we already hold the mutex.  */
       if (mutex->__data.__owner == id)
        {
          /* Just bump the counter.  */
-         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+         if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
            /* Overflow of the counter.  */
            return EAGAIN;
 
@@ -61,32 +112,18 @@ __pthread_mutex_lock (mutex)
        }
 
       /* We have to get the mutex.  */
-      LLL_MUTEX_LOCK (mutex->__data.__lock);
+      LLL_MUTEX_LOCK (mutex);
 
       assert (mutex->__data.__owner == 0);
       mutex->__data.__count = 1;
-      break;
-
-      /* Error checking mutex.  */
-    case PTHREAD_MUTEX_ERRORCHECK_NP:
-      /* Check whether we already hold the mutex.  */
-      if (__builtin_expect (mutex->__data.__owner == id, 0))
-       return EDEADLK;
-
-      /* FALLTHROUGH */
-
-    case PTHREAD_MUTEX_TIMED_NP:
-    simple:
-      /* Normal mutex.  */
-      LLL_MUTEX_LOCK (mutex->__data.__lock);
-      assert (mutex->__data.__owner == 0);
-      break;
-
-    case PTHREAD_MUTEX_ADAPTIVE_NP:
+    }
+  else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
+                         == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
+    {
       if (! __is_smp)
        goto simple;
 
-      if (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0)
+      if (LLL_MUTEX_TRYLOCK (mutex) != 0)
        {
          int cnt = 0;
          int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
@@ -95,7 +132,7 @@ __pthread_mutex_lock (mutex)
            {
              if (cnt++ >= max_cnt)
                {
-                 LLL_MUTEX_LOCK (mutex->__data.__lock);
+                 LLL_MUTEX_LOCK (mutex);
                  break;
                }
 
@@ -103,13 +140,43 @@ __pthread_mutex_lock (mutex)
              BUSY_WAIT_NOP;
 #endif
            }
-         while (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0);
+         while (LLL_MUTEX_TRYLOCK (mutex) != 0);
 
          mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
        }
       assert (mutex->__data.__owner == 0);
-      break;
+    }
+  else
+    {
+      pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+      assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
+      /* Check whether we already hold the mutex.  */
+      if (__glibc_unlikely (mutex->__data.__owner == id))
+       return EDEADLK;
+      goto simple;
+    }
+
+  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+  /* Record the ownership.  */
+  mutex->__data.__owner = id;
+#ifndef NO_INCR
+  ++mutex->__data.__nusers;
+#endif
+
+  LIBC_PROBE (mutex_acquired, 1, mutex);
 
+  return 0;
+}
+
+static int
+__pthread_mutex_lock_full (pthread_mutex_t *mutex)
+{
+  int oldval;
+  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+  switch (PTHREAD_MUTEX_TYPE (mutex))
+    {
     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
     case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
     case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
@@ -163,24 +230,23 @@ __pthread_mutex_lock (mutex)
            }
 
          /* Check whether we already hold the mutex.  */
-         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+         if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
            {
-             if (mutex->__data.__kind
-                 == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+             int kind = PTHREAD_MUTEX_TYPE (mutex);
+             if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
                {
                  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                                 NULL);
                  return EDEADLK;
                }
 
-             if (mutex->__data.__kind
-                 == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
+             if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
                {
                  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                                 NULL);
 
                  /* Just bump the counter.  */
-                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
                    /* Overflow of the counter.  */
                    return EAGAIN;
 
@@ -190,14 +256,15 @@ __pthread_mutex_lock (mutex)
                }
            }
 
-         oldval = LLL_ROBUST_MUTEX_LOCK (mutex->__data.__lock, id);
+         oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
 
          if (__builtin_expect (mutex->__data.__owner
                                == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
            {
              /* This mutex is now not recoverable.  */
              mutex->__data.__count = 0;
-             lll_mutex_unlock (mutex->__data.__lock);
+             lll_unlock (mutex->__data.__lock,
+                         PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
              return ENOTRECOVERABLE;
            }
@@ -209,6 +276,10 @@ __pthread_mutex_lock (mutex)
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
       break;
 
+    /* The PI support requires the Linux futex system call.  If that's not
+       available, pthread_mutex_init should never have allowed the type to
+       be set.  So it will get the default case for an invalid type.  */
+#ifdef __NR_futex
     case PTHREAD_MUTEX_PI_RECURSIVE_NP:
     case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
     case PTHREAD_MUTEX_PI_NORMAL_NP:
@@ -230,7 +301,7 @@ __pthread_mutex_lock (mutex)
        oldval = mutex->__data.__lock;
 
        /* Check whether we already hold the mutex.  */
-       if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+       if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
          {
            if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
              {
@@ -243,7 +314,7 @@ __pthread_mutex_lock (mutex)
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 
                /* Just bump the counter.  */
-               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+               if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
                  /* Overflow of the counter.  */
                  return EAGAIN;
 
@@ -254,9 +325,9 @@ __pthread_mutex_lock (mutex)
          }
 
        int newval = id;
-#ifdef NO_INCR
+# ifdef NO_INCR
        newval |= FUTEX_WAITERS;
-#endif
+# endif
        oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                      newval, 0);
 
@@ -264,9 +335,13 @@ __pthread_mutex_lock (mutex)
          {
            /* The mutex is locked.  The kernel will now take care of
               everything.  */
+           int private = (robust
+                          ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
+                          : PTHREAD_MUTEX_PSHARED (mutex));
            INTERNAL_SYSCALL_DECL (__err);
            int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-                                     FUTEX_LOCK_PI, 1, 0);
+                                     __lll_private_flag (FUTEX_LOCK_PI,
+                                                         private), 1, 0);
 
            if (INTERNAL_SYSCALL_ERROR_P (e, __err)
                && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
@@ -289,7 +364,7 @@ __pthread_mutex_lock (mutex)
            assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
          }
 
-       if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+       if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
          {
            atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
 
@@ -306,9 +381,9 @@ __pthread_mutex_lock (mutex)
               incremented which is not correct because the old owner
               has to be discounted.  If we are not supposed to
               increment __nusers we actually have to decrement it here.  */
-#ifdef NO_INCR
+# ifdef NO_INCR
            --mutex->__data.__nusers;
-#endif
+# endif
 
            return EOWNERDEAD;
          }
@@ -322,7 +397,9 @@ __pthread_mutex_lock (mutex)
 
            INTERNAL_SYSCALL_DECL (__err);
            INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-                             FUTEX_UNLOCK_PI, 0, 0);
+                             __lll_private_flag (FUTEX_UNLOCK_PI,
+                                                 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
+                             0, 0);
 
            THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
            return ENOTRECOVERABLE;
@@ -336,6 +413,7 @@ __pthread_mutex_lock (mutex)
          }
       }
       break;
+#endif  /* __NR_futex.  */
 
     case PTHREAD_MUTEX_PP_RECURSIVE_NP:
     case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
@@ -355,7 +433,7 @@ __pthread_mutex_lock (mutex)
            if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
              {
                /* Just bump the counter.  */
-               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+               if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
                  /* Overflow of the counter.  */
                  return EAGAIN;
 
@@ -378,7 +456,7 @@ __pthread_mutex_lock (mutex)
                return EINVAL;
              }
 
-           retval = __pthread_tpp_change_priority (oldprio, ceiling);
+           int retval = __pthread_tpp_change_priority (oldprio, ceiling);
            if (retval)
              return retval;
 
@@ -408,7 +486,8 @@ __pthread_mutex_lock (mutex)
                  break;
 
                if (oldval != ceilval)
-                 lll_futex_wait (&mutex->__data.__lock, ceilval | 2);
+                 lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
+                                 PTHREAD_MUTEX_PSHARED (mutex));
              }
            while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                        ceilval | 2, ceilval)
@@ -432,9 +511,30 @@ __pthread_mutex_lock (mutex)
   ++mutex->__data.__nusers;
 #endif
 
-  return retval;
+  LIBC_PROBE (mutex_acquired, 1, mutex);
+
+  return 0;
 }
 #ifndef __pthread_mutex_lock
 strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
-strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
+hidden_def (__pthread_mutex_lock)
+#endif
+
+
+#ifdef NO_INCR
+void
+__pthread_mutex_cond_lock_adjust (mutex)
+     pthread_mutex_t *mutex;
+{
+  assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
+  assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
+  assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
+
+  /* Record the ownership.  */
+  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+  mutex->__data.__owner = id;
+
+  if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
+    ++mutex->__data.__count;
+}
 #endif