From: Greg Kroah-Hartman Date: Sun, 15 Oct 2023 17:55:32 +0000 (+0200) Subject: 4.19-stable patches X-Git-Tag: v5.15.136~36 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=0a1ebb68934578899c9ca2e26f40a3bff47a4b5f;p=thirdparty%2Fkernel%2Fstable-queue.git 4.19-stable patches added patches: sched-idle-rcu-push-rcu_idle-deeper-into-the-idle-path.patch --- diff --git a/queue-4.19/sched-idle-rcu-push-rcu_idle-deeper-into-the-idle-path.patch b/queue-4.19/sched-idle-rcu-push-rcu_idle-deeper-into-the-idle-path.patch new file mode 100644 index 00000000000..7bee06e9a08 --- /dev/null +++ b/queue-4.19/sched-idle-rcu-push-rcu_idle-deeper-into-the-idle-path.patch @@ -0,0 +1,164 @@ +From 1098582a0f6c4e8fd28da0a6305f9233d02c9c1d Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Fri, 7 Aug 2020 20:50:19 +0200 +Subject: sched,idle,rcu: Push rcu_idle deeper into the idle path + +From: Peter Zijlstra + +commit 1098582a0f6c4e8fd28da0a6305f9233d02c9c1d upstream. + +Lots of things take locks, due to a wee bug, rcu_lockdep didn't notice +that the locking tracepoints were using RCU. + +Push rcu_idle_{enter,exit}() as deep as possible into the idle paths, +this also resolves a lot of _rcuidle()/RCU_NONIDLE() usage. + +Specifically, sched_clock_idle_wakeup_event() will use ktime which +will use seqlocks which will tickle lockdep, and +stop_critical_timings() uses lock. + +Signed-off-by: Peter Zijlstra (Intel) +Reviewed-by: Steven Rostedt (VMware) +Reviewed-by: Thomas Gleixner +Acked-by: Rafael J. Wysocki +Tested-by: Marco Elver +Link: https://lkml.kernel.org/r/20200821085348.310943801@infradead.org +Tested-by: Linux Kernel Functional Testing +Tested-by: Naresh Kamboju +Signed-off-by: Greg Kroah-Hartman +--- + drivers/cpuidle/cpuidle.c | 12 ++++++++---- + kernel/sched/idle.c | 22 ++++++++-------------- + 2 files changed, 16 insertions(+), 18 deletions(-) + +--- a/drivers/cpuidle/cpuidle.c ++++ b/drivers/cpuidle/cpuidle.c +@@ -140,13 +140,14 @@ static void enter_s2idle_proper(struct c + * executing it contains RCU usage regarded as invalid in the idle + * context, so tell RCU about that. + */ +- RCU_NONIDLE(tick_freeze()); ++ tick_freeze(); + /* + * The state used here cannot be a "coupled" one, because the "coupled" + * cpuidle mechanism enables interrupts and doing that with timekeeping + * suspended is generally unsafe. + */ + stop_critical_timings(); ++ rcu_idle_enter(); + drv->states[index].enter_s2idle(dev, drv, index); + if (WARN_ON_ONCE(!irqs_disabled())) + local_irq_disable(); +@@ -155,7 +156,8 @@ static void enter_s2idle_proper(struct c + * first CPU executing it calls functions containing RCU read-side + * critical sections, so tell RCU about that. + */ +- RCU_NONIDLE(tick_unfreeze()); ++ rcu_idle_exit(); ++ tick_unfreeze(); + start_critical_timings(); + + time_end = ns_to_ktime(local_clock()); +@@ -224,16 +226,18 @@ int cpuidle_enter_state(struct cpuidle_d + /* Take note of the planned idle state. */ + sched_idle_set_state(target_state); + +- trace_cpu_idle_rcuidle(index, dev->cpu); ++ trace_cpu_idle(index, dev->cpu); + time_start = ns_to_ktime(local_clock()); + + stop_critical_timings(); ++ rcu_idle_enter(); + entered_state = target_state->enter(dev, drv, index); ++ rcu_idle_exit(); + start_critical_timings(); + + sched_clock_idle_wakeup_event(); + time_end = ns_to_ktime(local_clock()); +- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); ++ trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); + + /* The cpu is no longer idle or about to enter idle. */ + sched_idle_set_state(NULL); +--- a/kernel/sched/idle.c ++++ b/kernel/sched/idle.c +@@ -53,17 +53,18 @@ __setup("hlt", cpu_idle_nopoll_setup); + + static noinline int __cpuidle cpu_idle_poll(void) + { ++ trace_cpu_idle(0, smp_processor_id()); ++ stop_critical_timings(); + rcu_idle_enter(); +- trace_cpu_idle_rcuidle(0, smp_processor_id()); + local_irq_enable(); +- stop_critical_timings(); + + while (!tif_need_resched() && +- (cpu_idle_force_poll || tick_check_broadcast_expired())) ++ (cpu_idle_force_poll || tick_check_broadcast_expired())) + cpu_relax(); +- start_critical_timings(); +- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); ++ + rcu_idle_exit(); ++ start_critical_timings(); ++ trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); + + return 1; + } +@@ -90,7 +91,9 @@ void __cpuidle default_idle_call(void) + local_irq_enable(); + } else { + stop_critical_timings(); ++ rcu_idle_enter(); + arch_cpu_idle(); ++ rcu_idle_exit(); + start_critical_timings(); + } + } +@@ -148,7 +151,6 @@ static void cpuidle_idle_call(void) + + if (cpuidle_not_available(drv, dev)) { + tick_nohz_idle_stop_tick(); +- rcu_idle_enter(); + + default_idle_call(); + goto exit_idle; +@@ -166,19 +168,15 @@ static void cpuidle_idle_call(void) + + if (idle_should_enter_s2idle() || dev->use_deepest_state) { + if (idle_should_enter_s2idle()) { +- rcu_idle_enter(); + + entered_state = cpuidle_enter_s2idle(drv, dev); + if (entered_state > 0) { + local_irq_enable(); + goto exit_idle; + } +- +- rcu_idle_exit(); + } + + tick_nohz_idle_stop_tick(); +- rcu_idle_enter(); + + next_state = cpuidle_find_deepest_state(drv, dev); + call_cpuidle(drv, dev, next_state); +@@ -195,8 +193,6 @@ static void cpuidle_idle_call(void) + else + tick_nohz_idle_retain_tick(); + +- rcu_idle_enter(); +- + entered_state = call_cpuidle(drv, dev, next_state); + /* + * Give the governor an opportunity to reflect on the outcome +@@ -212,8 +208,6 @@ exit_idle: + */ + if (WARN_ON_ONCE(irqs_disabled())) + local_irq_enable(); +- +- rcu_idle_exit(); + } + + /* diff --git a/queue-4.19/series b/queue-4.19/series index 94d35664672..0d1a6443a2e 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -18,3 +18,4 @@ net-nfc-fix-races-in-nfc_llcp_sock_get-and-nfc_llcp_.patch nfc-nci-assert-requested-protocol-is-valid.patch workqueue-override-implicit-ordered-attribute-in-wor.patch perf-inject-fix-gen_elf_text_offset-for-jit.patch +sched-idle-rcu-push-rcu_idle-deeper-into-the-idle-path.patch