--- /dev/null
+From stable-owner@vger.kernel.org Sat Jul 15 02:47:26 2023
+From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
+Date: Sat, 15 Jul 2023 00:47:09 +0000
+Subject: rcu-tasks: Mark ->trc_reader_nesting data races
+To: stable@vger.kernel.org
+Cc: "Joel Fernandes (Google)" <joel@joelfernandes.org>, "Paul E . McKenney" <paulmck@kernel.org>
+Message-ID: <20230715004711.2938489-2-joel@joelfernandes.org>
+
+From: "Paul E. McKenney" <paulmck@kernel.org>
+
+[ Upstream commit bdb0cca0d11060fce8a8a44588ac1470c25d62bc ]
+
+There are several ->trc_reader_nesting data races that are too
+low-probability for KCSAN to notice, but which will happen sooner or
+later. This commit therefore marks these accesses, and comments one
+that cannot race.
+
+Cc: <stable@vger.kernel.org> # 5.10.x
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tasks.h | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -848,7 +848,7 @@ static void trc_read_check_handler(void
+
+ // If the task is not in a read-side critical section, and
+ // if this is the last reader, awaken the grace-period kthread.
+- if (likely(!t->trc_reader_nesting)) {
++ if (likely(!READ_ONCE(t->trc_reader_nesting))) {
+ if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
+ wake_up(&trc_wait);
+ // Mark as checked after decrement to avoid false
+@@ -857,7 +857,7 @@ static void trc_read_check_handler(void
+ goto reset_ipi;
+ }
+ // If we are racing with an rcu_read_unlock_trace(), try again later.
+- if (unlikely(t->trc_reader_nesting < 0)) {
++ if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) {
+ if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
+ wake_up(&trc_wait);
+ goto reset_ipi;
+@@ -904,6 +904,7 @@ static bool trc_inspect_reader(struct ta
+ n_heavy_reader_ofl_updates++;
+ in_qs = true;
+ } else {
++ // The task is not running, so C-language access is safe.
+ in_qs = likely(!t->trc_reader_nesting);
+ }
+
+@@ -936,7 +937,7 @@ static void trc_wait_for_one_reader(stru
+ // The current task had better be in a quiescent state.
+ if (t == current) {
+ t->trc_reader_checked = true;
+- WARN_ON_ONCE(t->trc_reader_nesting);
++ WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
+ return;
+ }
+
+@@ -1046,7 +1047,7 @@ static void show_stalled_task_trace(stru
+ ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
+ ".i"[is_idle_task(t)],
+ ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
+- t->trc_reader_nesting,
++ READ_ONCE(t->trc_reader_nesting),
+ " N"[!!t->trc_reader_special.b.need_qs],
+ cpu);
+ sched_show_task(t);
+@@ -1141,7 +1142,7 @@ static void rcu_tasks_trace_postgp(struc
+ static void exit_tasks_rcu_finish_trace(struct task_struct *t)
+ {
+ WRITE_ONCE(t->trc_reader_checked, true);
+- WARN_ON_ONCE(t->trc_reader_nesting);
++ WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
+ WRITE_ONCE(t->trc_reader_nesting, 0);
+ if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
+ rcu_read_unlock_trace_special(t, 0);
--- /dev/null
+From stable-owner@vger.kernel.org Sat Jul 15 02:47:26 2023
+From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
+Date: Sat, 15 Jul 2023 00:47:10 +0000
+Subject: rcu-tasks: Mark ->trc_reader_special.b.need_qs data races
+To: stable@vger.kernel.org
+Cc: "Joel Fernandes (Google)" <joel@joelfernandes.org>, "Paul E . McKenney" <paulmck@kernel.org>
+Message-ID: <20230715004711.2938489-3-joel@joelfernandes.org>
+
+From: "Paul E. McKenney" <paulmck@kernel.org>
+
+[ Upstream commit f8ab3fad80dddf3f2cecb53983063c4431058ca1 ]
+
+There are several ->trc_reader_special.b.need_qs data races that are
+too low-probability for KCSAN to notice, but which will happen sooner
+or later. This commit therefore marks these accesses.
+
+Cc: <stable@vger.kernel.org> # 5.10.x
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tasks.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -801,7 +801,7 @@ static DEFINE_IRQ_WORK(rcu_tasks_trace_i
+ /* If we are the last reader, wake up the grace-period kthread. */
+ void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
+ {
+- int nq = t->trc_reader_special.b.need_qs;
++ int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
+
+ if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
+ t->trc_reader_special.b.need_mb)
+@@ -867,7 +867,7 @@ static void trc_read_check_handler(void
+ // Get here if the task is in a read-side critical section. Set
+ // its state so that it will awaken the grace-period kthread upon
+ // exit from that critical section.
+- WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
++ WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
+ WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
+
+ reset_ipi:
+@@ -919,7 +919,7 @@ static bool trc_inspect_reader(struct ta
+ // state so that it will awaken the grace-period kthread upon exit
+ // from that critical section.
+ atomic_inc(&trc_n_readers_need_end); // One more to wait on.
+- WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
++ WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
+ WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
+ return true;
+ }
+@@ -1048,7 +1048,7 @@ static void show_stalled_task_trace(stru
+ ".i"[is_idle_task(t)],
+ ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
+ READ_ONCE(t->trc_reader_nesting),
+- " N"[!!t->trc_reader_special.b.need_qs],
++ " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)],
+ cpu);
+ sched_show_task(t);
+ }
--- /dev/null
+From stable-owner@vger.kernel.org Sat Jul 15 02:47:26 2023
+From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
+Date: Sat, 15 Jul 2023 00:47:11 +0000
+Subject: rcu-tasks: Simplify trc_read_check_handler() atomic operations
+To: stable@vger.kernel.org
+Cc: "Joel Fernandes (Google)" <joel@joelfernandes.org>, "Paul E . McKenney" <paulmck@kernel.org>
+Message-ID: <20230715004711.2938489-4-joel@joelfernandes.org>
+
+From: "Paul E. McKenney" <paulmck@kernel.org>
+
+[ Upstream commit 96017bf9039763a2e02dcc6adaa18592cd73a39d ]
+
+Currently, trc_wait_for_one_reader() atomically increments
+the trc_n_readers_need_end counter before sending the IPI
+invoking trc_read_check_handler(). All failure paths out of
+trc_read_check_handler() and also from the smp_call_function_single()
+within trc_wait_for_one_reader() must carefully atomically decrement
+this counter. This is more complex than it needs to be.
+
+This commit therefore simplifies things and saves a few lines of
+code by dispensing with the atomic decrements in favor of having
+trc_read_check_handler() do the atomic increment only in the success case.
+In theory, this represents no change in functionality.
+
+Cc: <stable@vger.kernel.org> # 5.10.x
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tasks.h | 20 +++-----------------
+ 1 file changed, 3 insertions(+), 17 deletions(-)
+
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -841,32 +841,24 @@ static void trc_read_check_handler(void
+
+ // If the task is no longer running on this CPU, leave.
+ if (unlikely(texp != t)) {
+- if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
+- wake_up(&trc_wait);
+ goto reset_ipi; // Already on holdout list, so will check later.
+ }
+
+ // If the task is not in a read-side critical section, and
+ // if this is the last reader, awaken the grace-period kthread.
+ if (likely(!READ_ONCE(t->trc_reader_nesting))) {
+- if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
+- wake_up(&trc_wait);
+- // Mark as checked after decrement to avoid false
+- // positives on the above WARN_ON_ONCE().
+ WRITE_ONCE(t->trc_reader_checked, true);
+ goto reset_ipi;
+ }
+ // If we are racing with an rcu_read_unlock_trace(), try again later.
+- if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) {
+- if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
+- wake_up(&trc_wait);
++ if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
+ goto reset_ipi;
+- }
+ WRITE_ONCE(t->trc_reader_checked, true);
+
+ // Get here if the task is in a read-side critical section. Set
+ // its state so that it will awaken the grace-period kthread upon
+ // exit from that critical section.
++ atomic_inc(&trc_n_readers_need_end); // One more to wait on.
+ WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
+ WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
+
+@@ -960,21 +952,15 @@ static void trc_wait_for_one_reader(stru
+ if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
+ return;
+
+- atomic_inc(&trc_n_readers_need_end);
+ per_cpu(trc_ipi_to_cpu, cpu) = true;
+ t->trc_ipi_to_cpu = cpu;
+ rcu_tasks_trace.n_ipis++;
+- if (smp_call_function_single(cpu,
+- trc_read_check_handler, t, 0)) {
++ if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
+ // Just in case there is some other reason for
+ // failure than the target CPU being offline.
+ rcu_tasks_trace.n_ipis_fails++;
+ per_cpu(trc_ipi_to_cpu, cpu) = false;
+ t->trc_ipi_to_cpu = cpu;
+- if (atomic_dec_and_test(&trc_n_readers_need_end)) {
+- WARN_ON_ONCE(1);
+- wake_up(&trc_wait);
+- }
+ }
+ }
+ }
wireguard-queueing-use-saner-cpu-selection-wrapping.patch
wireguard-netlink-send-staged-packets-when-setting-initial-private-key.patch
tty-serial-fsl_lpuart-add-earlycon-for-imx8ulp-platform.patch
+rcu-tasks-mark-trc_reader_nesting-data-races.patch
+rcu-tasks-mark-trc_reader_special.b.need_qs-data-races.patch
+rcu-tasks-simplify-trc_read_check_handler-atomic-operations.patch