1 From 7c2102e56a3f7d85b5d8f33efbd7aecc1f36fdd8 Mon Sep 17 00:00:00 2001
2 From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
3 Date: Mon, 18 Sep 2017 08:54:40 -0700
4 Subject: sched: Make resched_cpu() unconditional
6 From: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
8 commit 7c2102e56a3f7d85b5d8f33efbd7aecc1f36fdd8 upstream.
10 The current implementation of synchronize_sched_expedited() incorrectly
11 assumes that resched_cpu() is unconditional, which it is not. This means
12 that synchronize_sched_expedited() can hang when resched_cpu()'s trylock
13 fails as follows (analysis by Neeraj Upadhyay):
15 o CPU1 is waiting for expedited wait to complete:
17 sync_rcu_exp_select_cpus
18 rdp->exp_dynticks_snap & 0x1 // returns 1 for CPU5
21 synchronize_sched_expedited_wait
22 ret = swait_event_timeout(rsp->expedited_wq,
23 sync_rcu_preempt_exp_done(rnp_root),
26 expmask = 0x20, CPU 5 in idle path (in cpuidle_enter())
28 o CPU5 handles IPI and fails to acquire rq lock.
31 sync_sched_exp_handler
33 returns while failing to try lock acquire rq->lock
34 need_resched is not set
36 o CPU5 calls rcu_idle_enter() and as need_resched is not set, goes to
37 idle (schedule() is not called).
39 o CPU 1 reports RCU stall.
41 Given that resched_cpu() is now used only by RCU, this commit fixes the
42 assumption by making resched_cpu() unconditional.
44 Reported-by: Neeraj Upadhyay <neeraju@codeaurora.org>
45 Suggested-by: Neeraj Upadhyay <neeraju@codeaurora.org>
46 Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
47 Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
48 Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
49 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
52 kernel/sched/core.c | 3 +--
53 1 file changed, 1 insertion(+), 2 deletions(-)
55 --- a/kernel/sched/core.c
56 +++ b/kernel/sched/core.c
57 @@ -600,8 +600,7 @@ void resched_cpu(int cpu)
58 struct rq *rq = cpu_rq(cpu);
61 - if (!raw_spin_trylock_irqsave(&rq->lock, flags))
63 + raw_spin_lock_irqsave(&rq->lock, flags);
65 raw_spin_unlock_irqrestore(&rq->lock, flags);