-From 536dfe685ebd28b27ebfbc3d4b9168207b7e28a3 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
+From dc783ba4b9df3fb3e76e968b2cbeb9960069263c Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
Date: Mon, 7 Oct 2024 22:52:24 +0200
Subject: lib: alloc_tag_module_unload must wait for pending kfree_rcu calls
From: Florian Westphal <fw@strlen.de>
-[ Upstream commit dc783ba4b9df3fb3e76e968b2cbeb9960069263c ]
+commit dc783ba4b9df3fb3e76e968b2cbeb9960069263c upstream.
Ben Greear reports following splat:
------------[ cut here ]------------
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
- lib/codetag.c | 3 +++
+ lib/codetag.c | 3 +++
1 file changed, 3 insertions(+)
-diff --git a/lib/codetag.c b/lib/codetag.c
-index afa8a2d4f3173..d1fbbb7c2ec3d 100644
--- a/lib/codetag.c
+++ b/lib/codetag.c
-@@ -228,6 +228,9 @@ bool codetag_unload_module(struct module *mod)
+@@ -228,6 +228,9 @@ bool codetag_unload_module(struct module
if (!mod)
return true;
mutex_lock(&codetag_lock);
list_for_each_entry(cttype, &codetag_types, link) {
struct codetag_module *found = NULL;
---
-2.43.0
-
--- /dev/null
+From 2b55d6a42d14c8675e38d6d9adca3014fdf01951 Mon Sep 17 00:00:00 2001
+From: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
+Date: Tue, 20 Aug 2024 17:59:35 +0200
+Subject: rcu/kvfree: Add kvfree_rcu_barrier() API
+
+From: Uladzislau Rezki (Sony) <urezki@gmail.com>
+
+commit 2b55d6a42d14c8675e38d6d9adca3014fdf01951 upstream.
+
+Add a kvfree_rcu_barrier() function. It waits until all
+in-flight pointers are freed over RCU machinery. It does
+not wait any GP completion and it is within its right to
+return immediately if there are no outstanding pointers.
+
+This function is useful when there is a need to guarantee
+that a memory is fully freed before destroying memory caches.
+For example, during unloading a kernel module.
+
+Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/rcutiny.h | 5 ++
+ include/linux/rcutree.h | 1
+ kernel/rcu/tree.c | 109 ++++++++++++++++++++++++++++++++++++++++++++----
+ 3 files changed, 107 insertions(+), 8 deletions(-)
+
+--- a/include/linux/rcutiny.h
++++ b/include/linux/rcutiny.h
+@@ -111,6 +111,11 @@ static inline void __kvfree_call_rcu(str
+ kvfree(ptr);
+ }
+
++static inline void kvfree_rcu_barrier(void)
++{
++ rcu_barrier();
++}
++
+ #ifdef CONFIG_KASAN_GENERIC
+ void kvfree_call_rcu(struct rcu_head *head, void *ptr);
+ #else
+--- a/include/linux/rcutree.h
++++ b/include/linux/rcutree.h
+@@ -35,6 +35,7 @@ static inline void rcu_virt_note_context
+
+ void synchronize_rcu_expedited(void);
+ void kvfree_call_rcu(struct rcu_head *head, void *ptr);
++void kvfree_rcu_barrier(void);
+
+ void rcu_barrier(void);
+ void rcu_momentary_dyntick_idle(void);
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -3584,18 +3584,15 @@ kvfree_rcu_drain_ready(struct kfree_rcu_
+ }
+
+ /*
+- * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
++ * Return: %true if a work is queued, %false otherwise.
+ */
+-static void kfree_rcu_monitor(struct work_struct *work)
++static bool
++kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
+ {
+- struct kfree_rcu_cpu *krcp = container_of(work,
+- struct kfree_rcu_cpu, monitor_work.work);
+ unsigned long flags;
++ bool queued = false;
+ int i, j;
+
+- // Drain ready for reclaim.
+- kvfree_rcu_drain_ready(krcp);
+-
+ raw_spin_lock_irqsave(&krcp->lock, flags);
+
+ // Attempt to start a new batch.
+@@ -3634,11 +3631,27 @@ static void kfree_rcu_monitor(struct wor
+ // be that the work is in the pending state when
+ // channels have been detached following by each
+ // other.
+- queue_rcu_work(system_wq, &krwp->rcu_work);
++ queued = queue_rcu_work(system_wq, &krwp->rcu_work);
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
++ return queued;
++}
++
++/*
++ * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
++ */
++static void kfree_rcu_monitor(struct work_struct *work)
++{
++ struct kfree_rcu_cpu *krcp = container_of(work,
++ struct kfree_rcu_cpu, monitor_work.work);
++
++ // Drain ready for reclaim.
++ kvfree_rcu_drain_ready(krcp);
++
++ // Queue a batch for a rest.
++ kvfree_rcu_queue_batch(krcp);
+
+ // If there is nothing to detach, it means that our job is
+ // successfully done here. In case of having at least one
+@@ -3859,6 +3872,86 @@ unlock_return:
+ }
+ EXPORT_SYMBOL_GPL(kvfree_call_rcu);
+
++/**
++ * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
++ *
++ * Note that a single argument of kvfree_rcu() call has a slow path that
++ * triggers synchronize_rcu() following by freeing a pointer. It is done
++ * before the return from the function. Therefore for any single-argument
++ * call that will result in a kfree() to a cache that is to be destroyed
++ * during module exit, it is developer's responsibility to ensure that all
++ * such calls have returned before the call to kmem_cache_destroy().
++ */
++void kvfree_rcu_barrier(void)
++{
++ struct kfree_rcu_cpu_work *krwp;
++ struct kfree_rcu_cpu *krcp;
++ bool queued;
++ int i, cpu;
++
++ /*
++ * Firstly we detach objects and queue them over an RCU-batch
++ * for all CPUs. Finally queued works are flushed for each CPU.
++ *
++ * Please note. If there are outstanding batches for a particular
++ * CPU, those have to be finished first following by queuing a new.
++ */
++ for_each_possible_cpu(cpu) {
++ krcp = per_cpu_ptr(&krc, cpu);
++
++ /*
++ * Check if this CPU has any objects which have been queued for a
++ * new GP completion. If not(means nothing to detach), we are done
++ * with it. If any batch is pending/running for this "krcp", below
++ * per-cpu flush_rcu_work() waits its completion(see last step).
++ */
++ if (!need_offload_krc(krcp))
++ continue;
++
++ while (1) {
++ /*
++ * If we are not able to queue a new RCU work it means:
++ * - batches for this CPU are still in flight which should
++ * be flushed first and then repeat;
++ * - no objects to detach, because of concurrency.
++ */
++ queued = kvfree_rcu_queue_batch(krcp);
++
++ /*
++ * Bail out, if there is no need to offload this "krcp"
++ * anymore. As noted earlier it can run concurrently.
++ */
++ if (queued || !need_offload_krc(krcp))
++ break;
++
++ /* There are ongoing batches. */
++ for (i = 0; i < KFREE_N_BATCHES; i++) {
++ krwp = &(krcp->krw_arr[i]);
++ flush_rcu_work(&krwp->rcu_work);
++ }
++ }
++ }
++
++ /*
++ * Now we guarantee that all objects are flushed.
++ */
++ for_each_possible_cpu(cpu) {
++ krcp = per_cpu_ptr(&krc, cpu);
++
++ /*
++ * A monitor work can drain ready to reclaim objects
++ * directly. Wait its completion if running or pending.
++ */
++ cancel_delayed_work_sync(&krcp->monitor_work);
++
++ for (i = 0; i < KFREE_N_BATCHES; i++) {
++ krwp = &(krcp->krw_arr[i]);
++ flush_rcu_work(&krwp->rcu_work);
++ }
++ }
++}
++EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
++
+ static unsigned long
+ kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+ {
--- /dev/null
+From 33549fcf37ec461f398f0a41e1c9948be2e5aca4 Mon Sep 17 00:00:00 2001
+From: Conor Dooley <conor.dooley@microchip.com>
+Date: Tue, 1 Oct 2024 12:28:13 +0100
+Subject: RISC-V: disallow gcc + rust builds
+
+From: Conor Dooley <conor.dooley@microchip.com>
+
+commit 33549fcf37ec461f398f0a41e1c9948be2e5aca4 upstream.
+
+During the discussion before supporting rust on riscv, it was decided
+not to support gcc yet, due to differences in extension handling
+compared to llvm (only the version of libclang matching the c compiler
+is supported). Recently Jason Montleon reported [1] that building with
+gcc caused build issues, due to unsupported arguments being passed to
+libclang. After some discussion between myself and Miguel, it is better
+to disable gcc + rust builds to match the original intent, and
+subsequently support it when an appropriate set of extensions can be
+deduced from the version of libclang.
+
+Closes: https://lore.kernel.org/all/20240917000848.720765-2-jmontleo@redhat.com/ [1]
+Link: https://lore.kernel.org/all/20240926-battering-revolt-6c6a7827413e@spud/ [2]
+Fixes: 70a57b247251a ("RISC-V: enable building 64-bit kernels with rust support")
+Cc: stable@vger.kernel.org
+Reported-by: Jason Montleon <jmontleo@redhat.com>
+Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
+Acked-by: Miguel Ojeda <ojeda@kernel.org>
+Reviewed-by: Nathan Chancellor <nathan@kernel.org>
+Link: https://lore.kernel.org/r/20241001-playlist-deceiving-16ece2f440f5@spud
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/rust/arch-support.rst | 2 +-
+ arch/riscv/Kconfig | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/Documentation/rust/arch-support.rst
++++ b/Documentation/rust/arch-support.rst
+@@ -17,7 +17,7 @@ Architecture Level of support Constra
+ ============= ================ ==============================================
+ ``arm64`` Maintained Little Endian only.
+ ``loongarch`` Maintained \-
+-``riscv`` Maintained ``riscv64`` only.
++``riscv`` Maintained ``riscv64`` and LLVM/Clang only.
+ ``um`` Maintained \-
+ ``x86`` Maintained ``x86_64`` only.
+ ============= ================ ==============================================
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -172,7 +172,7 @@ config RISCV
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RETHOOK if !XIP_KERNEL
+ select HAVE_RSEQ
+- select HAVE_RUST if 64BIT
++ select HAVE_RUST if 64BIT && CC_IS_CLANG
+ select HAVE_SAMPLE_FTRACE_DIRECT
+ select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
+ select HAVE_STACKPROTECTOR