--- /dev/null
+From 1ba0403ac6447f2d63914fb760c44a3b19c44eaf Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Mon, 9 Sep 2024 21:41:48 +0800
+Subject: block, bfq: fix uaf for accessing waker_bfqq after splitting
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit 1ba0403ac6447f2d63914fb760c44a3b19c44eaf upstream.
+
+After commit 42c306ed7233 ("block, bfq: don't break merge chain in
+bfq_split_bfqq()"), if the current procress is the last holder of bfqq,
+the bfqq can be freed after bfq_split_bfqq(). Hence recored the bfqq and
+then access bfqq->waker_bfqq may trigger UAF. What's more, the waker_bfqq
+may in the merge chain of bfqq, hence just recored waker_bfqq is still
+not safe.
+
+Fix the problem by adding a helper bfq_waker_bfqq() to check if
+bfqq->waker_bfqq is in the merge chain, and current procress is the only
+holder.
+
+Fixes: 42c306ed7233 ("block, bfq: don't break merge chain in bfq_split_bfqq()")
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Link: https://lore.kernel.org/r/20240909134154.954924-2-yukuai1@huaweicloud.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bfq-iosched.c | 31 ++++++++++++++++++++++++++++---
+ 1 file changed, 28 insertions(+), 3 deletions(-)
+
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -6724,6 +6724,31 @@ static void bfq_prepare_request(struct r
+ rq->elv.priv[0] = rq->elv.priv[1] = NULL;
+ }
+
++static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq)
++{
++ struct bfq_queue *new_bfqq = bfqq->new_bfqq;
++ struct bfq_queue *waker_bfqq = bfqq->waker_bfqq;
++
++ if (!waker_bfqq)
++ return NULL;
++
++ while (new_bfqq) {
++ if (new_bfqq == waker_bfqq) {
++ /*
++ * If waker_bfqq is in the merge chain, and current
++ * is the only procress.
++ */
++ if (bfqq_process_refs(waker_bfqq) == 1)
++ return NULL;
++ break;
++ }
++
++ new_bfqq = new_bfqq->new_bfqq;
++ }
++
++ return waker_bfqq;
++}
++
+ /*
+ * If needed, init rq, allocate bfq data structures associated with
+ * rq, and increment reference counters in the destination bfq_queue
+@@ -6784,7 +6809,7 @@ static struct bfq_queue *bfq_init_rq(str
+ /* If the queue was seeky for too long, break it apart. */
+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) &&
+ !bic->stably_merged) {
+- struct bfq_queue *old_bfqq = bfqq;
++ struct bfq_queue *waker_bfqq = bfq_waker_bfqq(bfqq);
+
+ /* Update bic before losing reference to bfqq */
+ if (bfq_bfqq_in_large_burst(bfqq))
+@@ -6803,7 +6828,7 @@ static struct bfq_queue *bfq_init_rq(str
+ bfqq_already_existing = true;
+
+ if (!bfqq_already_existing) {
+- bfqq->waker_bfqq = old_bfqq->waker_bfqq;
++ bfqq->waker_bfqq = waker_bfqq;
+ bfqq->tentative_waker_bfqq = NULL;
+
+ /*
+@@ -6813,7 +6838,7 @@ static struct bfq_queue *bfq_init_rq(str
+ * woken_list of the waker. See
+ * bfq_check_waker for details.
+ */
+- if (bfqq->waker_bfqq)
++ if (waker_bfqq)
+ hlist_add_head(&bfqq->woken_list_node,
+ &bfqq->waker_bfqq->woken_list);
+ }
--- /dev/null
+From abaf1e0355abb050f9c11d2d13a513caec80f7ad Mon Sep 17 00:00:00 2001
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+Date: Thu, 17 Aug 2023 09:11:21 -0300
+Subject: perf lock: Don't pass an ERR_PTR() directly to perf_session__delete()
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+commit abaf1e0355abb050f9c11d2d13a513caec80f7ad upstream.
+
+While debugging a segfault on 'perf lock contention' without an
+available perf.data file I noticed that it was basically calling:
+
+ perf_session__delete(ERR_PTR(-1))
+
+Resulting in:
+
+ (gdb) run lock contention
+ Starting program: /root/bin/perf lock contention
+ [Thread debugging using libthread_db enabled]
+ Using host libthread_db library "/lib64/libthread_db.so.1".
+ failed to open perf.data: No such file or directory (try 'perf record' first)
+ Initializing perf session failed
+
+ Program received signal SIGSEGV, Segmentation fault.
+ 0x00000000005e7515 in auxtrace__free (session=0xffffffffffffffff) at util/auxtrace.c:2858
+ 2858 if (!session->auxtrace)
+ (gdb) p session
+ $1 = (struct perf_session *) 0xffffffffffffffff
+ (gdb) bt
+ #0 0x00000000005e7515 in auxtrace__free (session=0xffffffffffffffff) at util/auxtrace.c:2858
+ #1 0x000000000057bb4d in perf_session__delete (session=0xffffffffffffffff) at util/session.c:300
+ #2 0x000000000047c421 in __cmd_contention (argc=0, argv=0x7fffffffe200) at builtin-lock.c:2161
+ #3 0x000000000047dc95 in cmd_lock (argc=0, argv=0x7fffffffe200) at builtin-lock.c:2604
+ #4 0x0000000000501466 in run_builtin (p=0xe597a8 <commands+552>, argc=2, argv=0x7fffffffe200) at perf.c:322
+ #5 0x00000000005016d5 in handle_internal_command (argc=2, argv=0x7fffffffe200) at perf.c:375
+ #6 0x0000000000501824 in run_argv (argcp=0x7fffffffe02c, argv=0x7fffffffe020) at perf.c:419
+ #7 0x0000000000501b11 in main (argc=2, argv=0x7fffffffe200) at perf.c:535
+ (gdb)
+
+So just set it to NULL after using PTR_ERR(session) to decode the error
+as perf_session__delete(NULL) is supported.
+
+Fixes: eef4fee5e52071d5 ("perf lock: Dynamically allocate lockhash_table")
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: K Prateek Nayak <kprateek.nayak@amd.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Mamatha Inamdar <mamatha4@linux.vnet.ibm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ravi Bangoria <ravi.bangoria@amd.com>
+Cc: Ross Zwisler <zwisler@chromium.org>
+Cc: Sean Christopherson <seanjc@google.com>
+Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Cc: Tiezhu Yang <yangtiezhu@loongson.cn>
+Cc: Yang Jihong <yangjihong1@huawei.com>
+Link: https://lore.kernel.org/lkml/ZN4R1AYfsD2J8lRs@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/builtin-lock.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/perf/builtin-lock.c
++++ b/tools/perf/builtin-lock.c
+@@ -1660,6 +1660,7 @@ static int __cmd_contention(int argc, co
+ if (IS_ERR(session)) {
+ pr_err("Initializing perf session failed\n");
+ err = PTR_ERR(session);
++ session = NULL;
+ goto out_delete;
+ }
+
--- /dev/null
+From f7345ccc62a4b880cf76458db5f320725f28e400 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Thu, 10 Oct 2024 18:36:09 +0200
+Subject: rcu/nocb: Fix rcuog wake-up from offline softirq
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+commit f7345ccc62a4b880cf76458db5f320725f28e400 upstream.
+
+After a CPU has set itself offline and before it eventually calls
+rcutree_report_cpu_dead(), there are still opportunities for callbacks
+to be enqueued, for example from a softirq. When that happens on NOCB,
+the rcuog wake-up is deferred through an IPI to an online CPU in order
+not to call into the scheduler and risk arming the RT-bandwidth after
+hrtimers have been migrated out and disabled.
+
+But performing a synchronized IPI from a softirq is buggy as reported in
+the following scenario:
+
+ WARNING: CPU: 1 PID: 26 at kernel/smp.c:633 smp_call_function_single
+ Modules linked in: rcutorture torture
+ CPU: 1 UID: 0 PID: 26 Comm: migration/1 Not tainted 6.11.0-rc1-00012-g9139f93209d1 #1
+ Stopper: multi_cpu_stop+0x0/0x320 <- __stop_cpus+0xd0/0x120
+ RIP: 0010:smp_call_function_single
+ <IRQ>
+ swake_up_one_online
+ __call_rcu_nocb_wake
+ __call_rcu_common
+ ? rcu_torture_one_read
+ call_timer_fn
+ __run_timers
+ run_timer_softirq
+ handle_softirqs
+ irq_exit_rcu
+ ? tick_handle_periodic
+ sysvec_apic_timer_interrupt
+ </IRQ>
+
+Fix this with forcing deferred rcuog wake up through the NOCB timer when
+the CPU is offline. The actual wake up will happen from
+rcutree_report_cpu_dead().
+
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Closes: https://lore.kernel.org/oe-lkp/202409231644.4c55582d-lkp@intel.com
+Fixes: 9139f93209d1 ("rcu/nocb: Fix RT throttling hrtimer armed from offline CPU")
+Reviewed-by: "Joel Fernandes (Google)" <joel@joelfernandes.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tree_nocb.h | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
+index 97b99cd06923..16865475120b 100644
+--- a/kernel/rcu/tree_nocb.h
++++ b/kernel/rcu/tree_nocb.h
+@@ -554,13 +554,19 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
+ rcu_nocb_unlock(rdp);
+ wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
+ TPS("WakeLazy"));
+- } else if (!irqs_disabled_flags(flags)) {
++ } else if (!irqs_disabled_flags(flags) && cpu_online(rdp->cpu)) {
+ /* ... if queue was empty ... */
+ rcu_nocb_unlock(rdp);
+ wake_nocb_gp(rdp, false);
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+ TPS("WakeEmpty"));
+ } else {
++ /*
++ * Don't do the wake-up upfront on fragile paths.
++ * Also offline CPUs can't call swake_up_one_online() from
++ * (soft-)IRQs. Rely on the final deferred wake-up from
++ * rcutree_report_cpu_dead()
++ */
+ rcu_nocb_unlock(rdp);
+ wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
+ TPS("WakeEmptyIsDeferred"));
+--
+2.47.0
+
kthread-unpark-only-parked-kthread.patch
secretmem-disable-memfd_secret-if-arch-cannot-set-direct-map.patch
net-ethernet-cortina-restore-tso-support.patch
+perf-lock-don-t-pass-an-err_ptr-directly-to-perf_session__delete.patch
+block-bfq-fix-uaf-for-accessing-waker_bfqq-after-splitting.patch
+rcu-nocb-fix-rcuog-wake-up-from-offline-softirq.patch