commit
d33d26036a0274b472299d7dcdaa5fb34329f91b upstream.
rt_mutex_handle_deadlock() is called with rt_mutex::wait_lock held. In the
good case it returns with the lock held and in the deadlock case it emits a
warning and goes into an endless scheduling loop with the lock held, which
triggers the 'scheduling in atomic' warning.
Unlock rt_mutex::wait_lock in the dead lock case before issuing the warning
and dropping into the schedule for ever loop.
[ tglx: Moved unlock before the WARN(), removed the pointless comment,
massaged changelog, added Fixes tag ]
Fixes: 3d5c9340d194 ("rtmutex: Handle deadlock detection smarter")
Signed-off-by: Roland Xu <mu001999@outlook.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/all/ME0P300MB063599BEF0743B8FA339C2CECC802@ME0P300MB0635.AUSP300.PROD.OUTLOOK.COM
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
}
static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ struct rt_mutex *lock,
struct rt_mutex_waiter *w)
{
/*
if (res != -EDEADLOCK || detect_deadlock)
return;
+ raw_spin_unlock_irq(&lock->wait_lock);
/*
* Yell lowdly and stop the task right here.
*/
if (unlikely(ret)) {
__set_current_state(TASK_RUNNING);
remove_waiter(lock, &waiter);
- rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+ rt_mutex_handle_deadlock(ret, chwalk, lock, &waiter);
}
/*