]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 15 Jul 2014 00:54:10 +0000 (17:54 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 15 Jul 2014 00:54:10 +0000 (17:54 -0700)
added patches:
rtmutex-handle-deadlock-detection-smarter.patch
rtmutex-plug-slow-unlock-race.patch

queue-3.4/rtmutex-handle-deadlock-detection-smarter.patch [new file with mode: 0644]
queue-3.4/rtmutex-plug-slow-unlock-race.patch [new file with mode: 0644]
queue-3.4/series

diff --git a/queue-3.4/rtmutex-handle-deadlock-detection-smarter.patch b/queue-3.4/rtmutex-handle-deadlock-detection-smarter.patch
new file mode 100644 (file)
index 0000000..5800288
--- /dev/null
@@ -0,0 +1,136 @@
+From 3d5c9340d1949733eb37616abd15db36aef9a57c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 5 Jun 2014 12:34:23 +0200
+Subject: rtmutex: Handle deadlock detection smarter
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 3d5c9340d1949733eb37616abd15db36aef9a57c upstream.
+
+Even in the case when deadlock detection is not requested by the
+caller, we can detect deadlocks. Right now the code stops the lock
+chain walk and keeps the waiter enqueued, even on itself. Silly not to
+yell when such a scenario is detected and to keep the waiter enqueued.
+
+Return -EDEADLK unconditionally and handle it at the call sites.
+
+The futex calls return -EDEADLK. The non futex ones dequeue the
+waiter, throw a warning and put the task into a schedule loop.
+
+Tagged for stable as it makes the code more robust.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Brad Mouring <bmouring@ni.com>
+Link: http://lkml.kernel.org/r/20140605152801.836501969@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/rtmutex-debug.h |    5 +++++
+ kernel/rtmutex.c       |   33 ++++++++++++++++++++++++++++-----
+ kernel/rtmutex.h       |    5 +++++
+ 3 files changed, 38 insertions(+), 5 deletions(-)
+
+--- a/kernel/rtmutex-debug.h
++++ b/kernel/rtmutex-debug.h
+@@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_
+ {
+       return (waiter != NULL);
+ }
++
++static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
++{
++      debug_rt_mutex_print_deadlock(w);
++}
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -188,7 +188,7 @@ static int rt_mutex_adjust_prio_chain(st
+               }
+               put_task_struct(task);
+-              return deadlock_detect ? -EDEADLK : 0;
++              return -EDEADLK;
+       }
+  retry:
+       /*
+@@ -263,7 +263,7 @@ static int rt_mutex_adjust_prio_chain(st
+       if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
+               debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
+               raw_spin_unlock(&lock->wait_lock);
+-              ret = deadlock_detect ? -EDEADLK : 0;
++              ret = -EDEADLK;
+               goto out_unlock_pi;
+       }
+@@ -453,7 +453,7 @@ static int task_blocks_on_rt_mutex(struc
+        * which is wrong, as the other waiter is not in a deadlock
+        * situation.
+        */
+-      if (detect_deadlock && owner == task)
++      if (owner == task)
+               return -EDEADLK;
+       raw_spin_lock_irqsave(&task->pi_lock, flags);
+@@ -680,6 +680,26 @@ __rt_mutex_slowlock(struct rt_mutex *loc
+       return ret;
+ }
++static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
++                                   struct rt_mutex_waiter *w)
++{
++      /*
++       * If the result is not -EDEADLOCK or the caller requested
++       * deadlock detection, nothing to do here.
++       */
++      if (res != -EDEADLOCK || detect_deadlock)
++              return;
++
++      /*
++       * Yell lowdly and stop the task right here.
++       */
++      rt_mutex_print_deadlock(w);
++      while (1) {
++              set_current_state(TASK_INTERRUPTIBLE);
++              schedule();
++      }
++}
++
+ /*
+  * Slow path lock function:
+  */
+@@ -717,8 +737,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+       set_current_state(TASK_RUNNING);
+-      if (unlikely(ret))
++      if (unlikely(ret)) {
+               remove_waiter(lock, &waiter);
++              rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
++      }
+       /*
+        * try_to_take_rt_mutex() sets the waiter bit
+@@ -1026,7 +1048,8 @@ int rt_mutex_start_proxy_lock(struct rt_
+               return 1;
+       }
+-      ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
++      /* We enforce deadlock detection for futexes */
++      ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
+       if (ret && !rt_mutex_owner(lock)) {
+               /*
+--- a/kernel/rtmutex.h
++++ b/kernel/rtmutex.h
+@@ -24,3 +24,8 @@
+ #define debug_rt_mutex_print_deadlock(w)              do { } while (0)
+ #define debug_rt_mutex_detect_deadlock(w,d)           (d)
+ #define debug_rt_mutex_reset_waiter(w)                        do { } while (0)
++
++static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
++{
++      WARN(1, "rtmutex deadlock detected\n");
++}
diff --git a/queue-3.4/rtmutex-plug-slow-unlock-race.patch b/queue-3.4/rtmutex-plug-slow-unlock-race.patch
new file mode 100644 (file)
index 0000000..090de28
--- /dev/null
@@ -0,0 +1,238 @@
+From 27e35715df54cbc4f2d044f681802ae30479e7fb Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 11 Jun 2014 18:44:04 +0000
+Subject: rtmutex: Plug slow unlock race
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 27e35715df54cbc4f2d044f681802ae30479e7fb upstream.
+
+When the rtmutex fast path is enabled the slow unlock function can
+create the following situation:
+
+spin_lock(foo->m->wait_lock);
+foo->m->owner = NULL;
+                               rt_mutex_lock(foo->m); <-- fast path
+                               free = atomic_dec_and_test(foo->refcnt);
+                               rt_mutex_unlock(foo->m); <-- fast path
+                               if (free)
+                                  kfree(foo);
+
+spin_unlock(foo->m->wait_lock); <--- Use after free.
+
+Plug the race by changing the slow unlock to the following scheme:
+
+     while (!rt_mutex_has_waiters(m)) {
+           /* Clear the waiters bit in m->owner */
+           clear_rt_mutex_waiters(m);
+           owner = rt_mutex_owner(m);
+           spin_unlock(m->wait_lock);
+           if (cmpxchg(m->owner, owner, 0) == owner)
+              return;
+           spin_lock(m->wait_lock);
+     }
+
+So in case of a new waiter incoming while the owner tries the slow
+path unlock we have two situations:
+
+ unlock(wait_lock);
+                                       lock(wait_lock);
+ cmpxchg(p, owner, 0) == owner
+                                       mark_rt_mutex_waiters(lock);
+                                       acquire(lock);
+
+Or:
+
+ unlock(wait_lock);
+                                       lock(wait_lock);
+                                       mark_rt_mutex_waiters(lock);
+ cmpxchg(p, owner, 0) != owner
+                                       enqueue_waiter();
+                                       unlock(wait_lock);
+ lock(wait_lock);
+ wakeup_next waiter();
+ unlock(wait_lock);
+                                       lock(wait_lock);
+                                       acquire(lock);
+
+If the fast path is disabled, then the simple
+
+   m->owner = NULL;
+   unlock(m->wait_lock);
+
+is sufficient as all access to m->owner is serialized via
+m->wait_lock;
+
+Also document and clarify the wakeup_next_waiter function as suggested
+by Oleg Nesterov.
+
+Reported-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/20140611183852.937945560@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/rtmutex.c |  115 ++++++++++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 109 insertions(+), 6 deletions(-)
+
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -81,6 +81,47 @@ static inline void mark_rt_mutex_waiters
+               owner = *p;
+       } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
+ }
++
++/*
++ * Safe fastpath aware unlock:
++ * 1) Clear the waiters bit
++ * 2) Drop lock->wait_lock
++ * 3) Try to unlock the lock with cmpxchg
++ */
++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
++      __releases(lock->wait_lock)
++{
++      struct task_struct *owner = rt_mutex_owner(lock);
++
++      clear_rt_mutex_waiters(lock);
++      raw_spin_unlock(&lock->wait_lock);
++      /*
++       * If a new waiter comes in between the unlock and the cmpxchg
++       * we have two situations:
++       *
++       * unlock(wait_lock);
++       *                                      lock(wait_lock);
++       * cmpxchg(p, owner, 0) == owner
++       *                                      mark_rt_mutex_waiters(lock);
++       *                                      acquire(lock);
++       * or:
++       *
++       * unlock(wait_lock);
++       *                                      lock(wait_lock);
++       *                                      mark_rt_mutex_waiters(lock);
++       *
++       * cmpxchg(p, owner, 0) != owner
++       *                                      enqueue_waiter();
++       *                                      unlock(wait_lock);
++       * lock(wait_lock);
++       * wake waiter();
++       * unlock(wait_lock);
++       *                                      lock(wait_lock);
++       *                                      acquire(lock);
++       */
++      return rt_mutex_cmpxchg(lock, owner, NULL);
++}
++
+ #else
+ # define rt_mutex_cmpxchg(l,c,n)      (0)
+ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+@@ -88,6 +129,17 @@ static inline void mark_rt_mutex_waiters
+       lock->owner = (struct task_struct *)
+                       ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
+ }
++
++/*
++ * Simple slow path only version: lock->owner is protected by lock->wait_lock.
++ */
++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
++      __releases(lock->wait_lock)
++{
++      lock->owner = NULL;
++      raw_spin_unlock(&lock->wait_lock);
++      return true;
++}
+ #endif
+ /*
+@@ -519,7 +571,8 @@ static int task_blocks_on_rt_mutex(struc
+ /*
+  * Wake up the next waiter on the lock.
+  *
+- * Remove the top waiter from the current tasks waiter list and wake it up.
++ * Remove the top waiter from the current tasks pi waiter list and
++ * wake it up.
+  *
+  * Called with lock->wait_lock held.
+  */
+@@ -540,10 +593,23 @@ static void wakeup_next_waiter(struct rt
+        */
+       plist_del(&waiter->pi_list_entry, &current->pi_waiters);
+-      rt_mutex_set_owner(lock, NULL);
++      /*
++       * As we are waking up the top waiter, and the waiter stays
++       * queued on the lock until it gets the lock, this lock
++       * obviously has waiters. Just set the bit here and this has
++       * the added benefit of forcing all new tasks into the
++       * slow path making sure no task of lower priority than
++       * the top waiter can steal this lock.
++       */
++      lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
+       raw_spin_unlock_irqrestore(&current->pi_lock, flags);
++      /*
++       * It's safe to dereference waiter as it cannot go away as
++       * long as we hold lock->wait_lock. The waiter task needs to
++       * acquire it in order to dequeue the waiter.
++       */
+       wake_up_process(waiter->task);
+ }
+@@ -796,12 +862,49 @@ rt_mutex_slowunlock(struct rt_mutex *loc
+       rt_mutex_deadlock_account_unlock(current);
+-      if (!rt_mutex_has_waiters(lock)) {
+-              lock->owner = NULL;
+-              raw_spin_unlock(&lock->wait_lock);
+-              return;
++      /*
++       * We must be careful here if the fast path is enabled. If we
++       * have no waiters queued we cannot set owner to NULL here
++       * because of:
++       *
++       * foo->lock->owner = NULL;
++       *                      rtmutex_lock(foo->lock);   <- fast path
++       *                      free = atomic_dec_and_test(foo->refcnt);
++       *                      rtmutex_unlock(foo->lock); <- fast path
++       *                      if (free)
++       *                              kfree(foo);
++       * raw_spin_unlock(foo->lock->wait_lock);
++       *
++       * So for the fastpath enabled kernel:
++       *
++       * Nothing can set the waiters bit as long as we hold
++       * lock->wait_lock. So we do the following sequence:
++       *
++       *      owner = rt_mutex_owner(lock);
++       *      clear_rt_mutex_waiters(lock);
++       *      raw_spin_unlock(&lock->wait_lock);
++       *      if (cmpxchg(&lock->owner, owner, 0) == owner)
++       *              return;
++       *      goto retry;
++       *
++       * The fastpath disabled variant is simple as all access to
++       * lock->owner is serialized by lock->wait_lock:
++       *
++       *      lock->owner = NULL;
++       *      raw_spin_unlock(&lock->wait_lock);
++       */
++      while (!rt_mutex_has_waiters(lock)) {
++              /* Drops lock->wait_lock ! */
++              if (unlock_rt_mutex_safe(lock) == true)
++                      return;
++              /* Relock the rtmutex and try again */
++              raw_spin_lock(&lock->wait_lock);
+       }
++      /*
++       * The wakeup next waiter path does not suffer from the above
++       * race. See the comments there.
++       */
+       wakeup_next_waiter(lock);
+       raw_spin_unlock(&lock->wait_lock);
index 80e782580faf1018c2970d73f48bf90f722cda82..c0161731ba683fea0417d1822b9628addfe444f0 100644 (file)
@@ -11,3 +11,5 @@ tracing-remove-ftrace_stop-start-from-reading-the-trace-file.patch
 x86-64-espfix-don-t-leak-bits-31-16-of-esp-returning-to-16-bit-stack.patch
 rtmutex-fix-deadlock-detector-for-real.patch
 rtmutex-detect-changes-in-the-pi-lock-chain.patch
+rtmutex-handle-deadlock-detection-smarter.patch
+rtmutex-plug-slow-unlock-race.patch