]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
rcu: Fix callbacks processing time limit retaining cond_resched()
authorFrederic Weisbecker <frederic@kernel.org>
Tue, 19 Oct 2021 00:08:14 +0000 (02:08 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 12 May 2022 10:25:45 +0000 (12:25 +0200)
commit 3e61e95e2d095e308616cba4ffb640f95a480e01 upstream.

The callbacks processing time limit makes sure we are not exceeding a
given amount of time executing the queue.

However its "continue" clause bypasses the cond_resched() call on
rcuc and NOCB kthreads, delaying it until we reach the limit, which can
be very long...

Make sure the scheduler has a higher priority than the time limit.

Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Tested-by: Valentin Schneider <valentin.schneider@arm.com>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Neeraj Upadhyay <neeraju@codeaurora.org>
Cc: Uladzislau Rezki <urezki@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
[UR: backport to 5.10-stable + commit update]
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/rcu/tree.c

index 844c35803739e992c832398cd14c09c37ade7459..f340df6ebd86cf79e8ec8c7df1474c27266f41d8 100644 (file)
@@ -2490,10 +2490,22 @@ static void rcu_do_batch(struct rcu_data *rdp)
                 * Stop only if limit reached and CPU has something to do.
                 * Note: The rcl structure counts down from zero.
                 */
-               if (-rcl.len >= bl && !offloaded &&
-                   (need_resched() ||
-                    (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
-                       break;
+               if (in_serving_softirq()) {
+                       if (-rcl.len >= bl && (need_resched() ||
+                                       (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
+                               break;
+               } else {
+                       local_bh_enable();
+                       lockdep_assert_irqs_enabled();
+                       cond_resched_tasks_rcu_qs();
+                       lockdep_assert_irqs_enabled();
+                       local_bh_disable();
+               }
+
+               /*
+                * Make sure we don't spend too much time here and deprive other
+                * softirq vectors of CPU cycles.
+                */
                if (unlikely(tlimit)) {
                        /* only call local_clock() every 32 callbacks */
                        if (likely((-rcl.len & 31) || local_clock() < tlimit))
@@ -2501,14 +2513,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
                        /* Exceeded the time limit, so leave. */
                        break;
                }
-               if (offloaded) {
-                       WARN_ON_ONCE(in_serving_softirq());
-                       local_bh_enable();
-                       lockdep_assert_irqs_enabled();
-                       cond_resched_tasks_rcu_qs();
-                       lockdep_assert_irqs_enabled();
-                       local_bh_disable();
-               }
        }
 
        local_irq_save(flags);