]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 May 2022 11:05:01 +0000 (13:05 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 May 2022 11:05:01 +0000 (13:05 +0200)
added patches:
rcu-apply-callbacks-processing-time-limit-only-on-softirq.patch
rcu-fix-callbacks-processing-time-limit-retaining-cond_resched.patch

queue-5.15/rcu-apply-callbacks-processing-time-limit-only-on-softirq.patch [new file with mode: 0644]
queue-5.15/rcu-fix-callbacks-processing-time-limit-retaining-cond_resched.patch [new file with mode: 0644]
queue-5.15/series

diff --git a/queue-5.15/rcu-apply-callbacks-processing-time-limit-only-on-softirq.patch b/queue-5.15/rcu-apply-callbacks-processing-time-limit-only-on-softirq.patch
new file mode 100644 (file)
index 0000000..4745543
--- /dev/null
@@ -0,0 +1,91 @@
+From a554ba288845fd3f6f12311fd76a51694233458a Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Tue, 19 Oct 2021 02:08:15 +0200
+Subject: rcu: Apply callbacks processing time limit only on softirq
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+commit a554ba288845fd3f6f12311fd76a51694233458a upstream.
+
+Time limit only makes sense when callbacks are serviced in softirq mode
+because:
+
+_ In case we need to get back to the scheduler,
+  cond_resched_tasks_rcu_qs() is called after each callback.
+
+_ In case some other softirq vector needs the CPU, the call to
+  local_bh_enable() before cond_resched_tasks_rcu_qs() takes care about
+  them via a call to do_softirq().
+
+Therefore, make sure the time limit only applies to softirq mode.
+
+Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
+Tested-by: Valentin Schneider <valentin.schneider@arm.com>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Cc: Valentin Schneider <valentin.schneider@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Josh Triplett <josh@joshtriplett.org>
+Cc: Joel Fernandes <joel@joelfernandes.org>
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: Neeraj Upadhyay <neeraju@codeaurora.org>
+Cc: Uladzislau Rezki <urezki@gmail.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+[UR: backport to 5.15-stable]
+Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tree.c |   26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -2476,7 +2476,7 @@ static void rcu_do_batch(struct rcu_data
+       div = READ_ONCE(rcu_divisor);
+       div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
+       bl = max(rdp->blimit, pending >> div);
+-      if (unlikely(bl > 100)) {
++      if (in_serving_softirq() && unlikely(bl > 100)) {
+               long rrn = READ_ONCE(rcu_resched_ns);
+               rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
+@@ -2517,6 +2517,18 @@ static void rcu_do_batch(struct rcu_data
+                       if (count >= bl && (need_resched() ||
+                                       (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
+                               break;
++
++                      /*
++                       * Make sure we don't spend too much time here and deprive other
++                       * softirq vectors of CPU cycles.
++                       */
++                      if (unlikely(tlimit)) {
++                              /* only call local_clock() every 32 callbacks */
++                              if (likely((count & 31) || local_clock() < tlimit))
++                                      continue;
++                              /* Exceeded the time limit, so leave. */
++                              break;
++                      }
+               } else {
+                       local_bh_enable();
+                       lockdep_assert_irqs_enabled();
+@@ -2524,18 +2536,6 @@ static void rcu_do_batch(struct rcu_data
+                       lockdep_assert_irqs_enabled();
+                       local_bh_disable();
+               }
+-
+-              /*
+-               * Make sure we don't spend too much time here and deprive other
+-               * softirq vectors of CPU cycles.
+-               */
+-              if (unlikely(tlimit)) {
+-                      /* only call local_clock() every 32 callbacks */
+-                      if (likely((count & 31) || local_clock() < tlimit))
+-                              continue;
+-                      /* Exceeded the time limit, so leave. */
+-                      break;
+-              }
+       }
+       local_irq_save(flags);
diff --git a/queue-5.15/rcu-fix-callbacks-processing-time-limit-retaining-cond_resched.patch b/queue-5.15/rcu-fix-callbacks-processing-time-limit-retaining-cond_resched.patch
new file mode 100644 (file)
index 0000000..78f5df8
--- /dev/null
@@ -0,0 +1,82 @@
+From 3e61e95e2d095e308616cba4ffb640f95a480e01 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Tue, 19 Oct 2021 02:08:14 +0200
+Subject: rcu: Fix callbacks processing time limit retaining cond_resched()
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+commit 3e61e95e2d095e308616cba4ffb640f95a480e01 upstream.
+
+The callbacks processing time limit makes sure we are not exceeding a
+given amount of time executing the queue.
+
+However its "continue" clause bypasses the cond_resched() call on
+rcuc and NOCB kthreads, delaying it until we reach the limit, which can
+be very long...
+
+Make sure the scheduler has a higher priority than the time limit.
+
+Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
+Tested-by: Valentin Schneider <valentin.schneider@arm.com>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Cc: Valentin Schneider <valentin.schneider@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Josh Triplett <josh@joshtriplett.org>
+Cc: Joel Fernandes <joel@joelfernandes.org>
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: Neeraj Upadhyay <neeraju@codeaurora.org>
+Cc: Uladzislau Rezki <urezki@gmail.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+[UR: backport to 5.15-stable + commit update]
+Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tree.c |   27 ++++++++++++++++-----------
+ 1 file changed, 16 insertions(+), 11 deletions(-)
+
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -2513,10 +2513,22 @@ static void rcu_do_batch(struct rcu_data
+               /*
+                * Stop only if limit reached and CPU has something to do.
+                */
+-              if (count >= bl && !offloaded &&
+-                  (need_resched() ||
+-                   (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
+-                      break;
++              if (in_serving_softirq()) {
++                      if (count >= bl && (need_resched() ||
++                                      (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
++                              break;
++              } else {
++                      local_bh_enable();
++                      lockdep_assert_irqs_enabled();
++                      cond_resched_tasks_rcu_qs();
++                      lockdep_assert_irqs_enabled();
++                      local_bh_disable();
++              }
++
++              /*
++               * Make sure we don't spend too much time here and deprive other
++               * softirq vectors of CPU cycles.
++               */
+               if (unlikely(tlimit)) {
+                       /* only call local_clock() every 32 callbacks */
+                       if (likely((count & 31) || local_clock() < tlimit))
+@@ -2524,13 +2536,6 @@ static void rcu_do_batch(struct rcu_data
+                       /* Exceeded the time limit, so leave. */
+                       break;
+               }
+-              if (!in_serving_softirq()) {
+-                      local_bh_enable();
+-                      lockdep_assert_irqs_enabled();
+-                      cond_resched_tasks_rcu_qs();
+-                      lockdep_assert_irqs_enabled();
+-                      local_bh_disable();
+-              }
+       }
+       local_irq_save(flags);
index 44ed2665c7c9d0a8ea0aa4ccd9e4bdb3a4048c00..14f12b3df2a4a3aa1650866dd7bedc081b108b3e 100644 (file)
@@ -102,3 +102,5 @@ selftest-vm-verify-mmap-addr-in-mremap_test.patch
 selftest-vm-verify-remap-destination-address-in-mrem.patch
 mmc-rtsx-add-74-clocks-in-power-on-flow.patch
 revert-parisc-mark-sched_clock-unstable-only-if-clocks-are-not-syncronized.patch
+rcu-fix-callbacks-processing-time-limit-retaining-cond_resched.patch
+rcu-apply-callbacks-processing-time-limit-only-on-softirq.patch