]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.11-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 7 Nov 2024 06:39:53 +0000 (07:39 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 7 Nov 2024 06:39:53 +0000 (07:39 +0100)
added patches:
lib-alloc_tag_module_unload-must-wait-for-pending-kfree_rcu-calls.patch
mips-export-__cmpxchg_small.patch
rcu-kvfree-add-kvfree_rcu_barrier-api.patch
risc-v-disallow-gcc-rust-builds.patch

queue-6.11/lib-alloc_tag_module_unload-must-wait-for-pending-kfree_rcu-calls.patch [moved from queue-6.11/lib-alloc_tag_module_unload-must-wait-for-pending-kf.patch with 83% similarity]
queue-6.11/mips-export-__cmpxchg_small.patch [new file with mode: 0644]
queue-6.11/rcu-kvfree-add-kvfree_rcu_barrier-api.patch [new file with mode: 0644]
queue-6.11/risc-v-disallow-gcc-rust-builds.patch [new file with mode: 0644]
queue-6.11/series

similarity index 83%
rename from queue-6.11/lib-alloc_tag_module_unload-must-wait-for-pending-kf.patch
rename to queue-6.11/lib-alloc_tag_module_unload-must-wait-for-pending-kfree_rcu-calls.patch
index 2a435cb29410769ed809c1300a8e5525e9a3fda9..b21ed6070a2a380be70ca3f6094fc962d37fe0ba 100644 (file)
@@ -1,11 +1,11 @@
-From 536dfe685ebd28b27ebfbc3d4b9168207b7e28a3 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
+From dc783ba4b9df3fb3e76e968b2cbeb9960069263c Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
 Date: Mon, 7 Oct 2024 22:52:24 +0200
 Subject: lib: alloc_tag_module_unload must wait for pending kfree_rcu calls
 
 From: Florian Westphal <fw@strlen.de>
 
-[ Upstream commit dc783ba4b9df3fb3e76e968b2cbeb9960069263c ]
+commit dc783ba4b9df3fb3e76e968b2cbeb9960069263c upstream.
 
 Ben Greear reports following splat:
  ------------[ cut here ]------------
@@ -42,16 +42,15 @@ Cc: Suren Baghdasaryan <surenb@google.com>
 Cc: Kent Overstreet <kent.overstreet@linux.dev>
 Cc: <stable@vger.kernel.org>
 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
 ---
- lib/codetag.c | 3 +++
+ lib/codetag.c |    3 +++
  1 file changed, 3 insertions(+)
 
-diff --git a/lib/codetag.c b/lib/codetag.c
-index afa8a2d4f3173..d1fbbb7c2ec3d 100644
 --- a/lib/codetag.c
 +++ b/lib/codetag.c
-@@ -228,6 +228,9 @@ bool codetag_unload_module(struct module *mod)
+@@ -228,6 +228,9 @@ bool codetag_unload_module(struct module
        if (!mod)
                return true;
  
@@ -61,6 +60,3 @@ index afa8a2d4f3173..d1fbbb7c2ec3d 100644
        mutex_lock(&codetag_lock);
        list_for_each_entry(cttype, &codetag_types, link) {
                struct codetag_module *found = NULL;
--- 
-2.43.0
-
diff --git a/queue-6.11/mips-export-__cmpxchg_small.patch b/queue-6.11/mips-export-__cmpxchg_small.patch
new file mode 100644 (file)
index 0000000..22021dd
--- /dev/null
@@ -0,0 +1,31 @@
+From 90a88784cdb7757feb8dd520255e6cb861f30943 Mon Sep 17 00:00:00 2001
+From: David Sterba <dsterba@suse.com>
+Date: Tue, 22 Oct 2024 16:21:05 +0200
+Subject: MIPS: export __cmpxchg_small()
+
+From: David Sterba <dsterba@suse.com>
+
+commit 90a88784cdb7757feb8dd520255e6cb861f30943 upstream.
+
+Export the symbol __cmpxchg_small() for btrfs.ko that uses it to store
+blk_status_t, which is u8. Reported by LKP:
+
+>> ERROR: modpost: "__cmpxchg_small" [fs/btrfs/btrfs.ko] undefined!
+
+Patch using the cmpxchg() https://lore.kernel.org/linux-btrfs/1d4f72f7fee285b2ddf4bf62b0ac0fd89def5417.1728575379.git.naohiro.aota@wdc.com/
+
+Link: https://lore.kernel.org/all/20241016134919.GO1609@suse.cz/
+Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kernel/cmpxchg.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/mips/kernel/cmpxchg.c
++++ b/arch/mips/kernel/cmpxchg.c
+@@ -102,3 +102,4 @@ unsigned long __cmpxchg_small(volatile v
+                       return old;
+       }
+ }
++EXPORT_SYMBOL(__cmpxchg_small);
diff --git a/queue-6.11/rcu-kvfree-add-kvfree_rcu_barrier-api.patch b/queue-6.11/rcu-kvfree-add-kvfree_rcu_barrier-api.patch
new file mode 100644 (file)
index 0000000..5dd48c7
--- /dev/null
@@ -0,0 +1,193 @@
+From 2b55d6a42d14c8675e38d6d9adca3014fdf01951 Mon Sep 17 00:00:00 2001
+From: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
+Date: Tue, 20 Aug 2024 17:59:35 +0200
+Subject: rcu/kvfree: Add kvfree_rcu_barrier() API
+
+From: Uladzislau Rezki (Sony) <urezki@gmail.com>
+
+commit 2b55d6a42d14c8675e38d6d9adca3014fdf01951 upstream.
+
+Add a kvfree_rcu_barrier() function. It waits until all
+in-flight pointers are freed over RCU machinery. It does
+not wait any GP completion and it is within its right to
+return immediately if there are no outstanding pointers.
+
+This function is useful when there is a need to guarantee
+that a memory is fully freed before destroying memory caches.
+For example, during unloading a kernel module.
+
+Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/rcutiny.h |    5 ++
+ include/linux/rcutree.h |    1 
+ kernel/rcu/tree.c       |  109 ++++++++++++++++++++++++++++++++++++++++++++----
+ 3 files changed, 107 insertions(+), 8 deletions(-)
+
+--- a/include/linux/rcutiny.h
++++ b/include/linux/rcutiny.h
+@@ -111,6 +111,11 @@ static inline void __kvfree_call_rcu(str
+       kvfree(ptr);
+ }
++static inline void kvfree_rcu_barrier(void)
++{
++      rcu_barrier();
++}
++
+ #ifdef CONFIG_KASAN_GENERIC
+ void kvfree_call_rcu(struct rcu_head *head, void *ptr);
+ #else
+--- a/include/linux/rcutree.h
++++ b/include/linux/rcutree.h
+@@ -35,6 +35,7 @@ static inline void rcu_virt_note_context
+ void synchronize_rcu_expedited(void);
+ void kvfree_call_rcu(struct rcu_head *head, void *ptr);
++void kvfree_rcu_barrier(void);
+ void rcu_barrier(void);
+ void rcu_momentary_dyntick_idle(void);
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -3584,18 +3584,15 @@ kvfree_rcu_drain_ready(struct kfree_rcu_
+ }
+ /*
+- * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
++ * Return: %true if a work is queued, %false otherwise.
+  */
+-static void kfree_rcu_monitor(struct work_struct *work)
++static bool
++kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
+ {
+-      struct kfree_rcu_cpu *krcp = container_of(work,
+-              struct kfree_rcu_cpu, monitor_work.work);
+       unsigned long flags;
++      bool queued = false;
+       int i, j;
+-      // Drain ready for reclaim.
+-      kvfree_rcu_drain_ready(krcp);
+-
+       raw_spin_lock_irqsave(&krcp->lock, flags);
+       // Attempt to start a new batch.
+@@ -3634,11 +3631,27 @@ static void kfree_rcu_monitor(struct wor
+                       // be that the work is in the pending state when
+                       // channels have been detached following by each
+                       // other.
+-                      queue_rcu_work(system_wq, &krwp->rcu_work);
++                      queued = queue_rcu_work(system_wq, &krwp->rcu_work);
+               }
+       }
+       raw_spin_unlock_irqrestore(&krcp->lock, flags);
++      return queued;
++}
++
++/*
++ * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
++ */
++static void kfree_rcu_monitor(struct work_struct *work)
++{
++      struct kfree_rcu_cpu *krcp = container_of(work,
++              struct kfree_rcu_cpu, monitor_work.work);
++
++      // Drain ready for reclaim.
++      kvfree_rcu_drain_ready(krcp);
++
++      // Queue a batch for a rest.
++      kvfree_rcu_queue_batch(krcp);
+       // If there is nothing to detach, it means that our job is
+       // successfully done here. In case of having at least one
+@@ -3859,6 +3872,86 @@ unlock_return:
+ }
+ EXPORT_SYMBOL_GPL(kvfree_call_rcu);
++/**
++ * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
++ *
++ * Note that a single argument of kvfree_rcu() call has a slow path that
++ * triggers synchronize_rcu() following by freeing a pointer. It is done
++ * before the return from the function. Therefore for any single-argument
++ * call that will result in a kfree() to a cache that is to be destroyed
++ * during module exit, it is developer's responsibility to ensure that all
++ * such calls have returned before the call to kmem_cache_destroy().
++ */
++void kvfree_rcu_barrier(void)
++{
++      struct kfree_rcu_cpu_work *krwp;
++      struct kfree_rcu_cpu *krcp;
++      bool queued;
++      int i, cpu;
++
++      /*
++       * Firstly we detach objects and queue them over an RCU-batch
++       * for all CPUs. Finally queued works are flushed for each CPU.
++       *
++       * Please note. If there are outstanding batches for a particular
++       * CPU, those have to be finished first following by queuing a new.
++       */
++      for_each_possible_cpu(cpu) {
++              krcp = per_cpu_ptr(&krc, cpu);
++
++              /*
++               * Check if this CPU has any objects which have been queued for a
++               * new GP completion. If not(means nothing to detach), we are done
++               * with it. If any batch is pending/running for this "krcp", below
++               * per-cpu flush_rcu_work() waits its completion(see last step).
++               */
++              if (!need_offload_krc(krcp))
++                      continue;
++
++              while (1) {
++                      /*
++                       * If we are not able to queue a new RCU work it means:
++                       * - batches for this CPU are still in flight which should
++                       *   be flushed first and then repeat;
++                       * - no objects to detach, because of concurrency.
++                       */
++                      queued = kvfree_rcu_queue_batch(krcp);
++
++                      /*
++                       * Bail out, if there is no need to offload this "krcp"
++                       * anymore. As noted earlier it can run concurrently.
++                       */
++                      if (queued || !need_offload_krc(krcp))
++                              break;
++
++                      /* There are ongoing batches. */
++                      for (i = 0; i < KFREE_N_BATCHES; i++) {
++                              krwp = &(krcp->krw_arr[i]);
++                              flush_rcu_work(&krwp->rcu_work);
++                      }
++              }
++      }
++
++      /*
++       * Now we guarantee that all objects are flushed.
++       */
++      for_each_possible_cpu(cpu) {
++              krcp = per_cpu_ptr(&krc, cpu);
++
++              /*
++               * A monitor work can drain ready to reclaim objects
++               * directly. Wait its completion if running or pending.
++               */
++              cancel_delayed_work_sync(&krcp->monitor_work);
++
++              for (i = 0; i < KFREE_N_BATCHES; i++) {
++                      krwp = &(krcp->krw_arr[i]);
++                      flush_rcu_work(&krwp->rcu_work);
++              }
++      }
++}
++EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
++
+ static unsigned long
+ kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+ {
diff --git a/queue-6.11/risc-v-disallow-gcc-rust-builds.patch b/queue-6.11/risc-v-disallow-gcc-rust-builds.patch
new file mode 100644 (file)
index 0000000..9268768
--- /dev/null
@@ -0,0 +1,58 @@
+From 33549fcf37ec461f398f0a41e1c9948be2e5aca4 Mon Sep 17 00:00:00 2001
+From: Conor Dooley <conor.dooley@microchip.com>
+Date: Tue, 1 Oct 2024 12:28:13 +0100
+Subject: RISC-V: disallow gcc + rust builds
+
+From: Conor Dooley <conor.dooley@microchip.com>
+
+commit 33549fcf37ec461f398f0a41e1c9948be2e5aca4 upstream.
+
+During the discussion before supporting rust on riscv, it was decided
+not to support gcc yet, due to differences in extension handling
+compared to llvm (only the version of libclang matching the c compiler
+is supported). Recently Jason Montleon reported [1] that building with
+gcc caused build issues, due to unsupported arguments being passed to
+libclang. After some discussion between myself and Miguel, it is better
+to disable gcc + rust builds to match the original intent, and
+subsequently support it when an appropriate set of extensions can be
+deduced from the version of libclang.
+
+Closes: https://lore.kernel.org/all/20240917000848.720765-2-jmontleo@redhat.com/ [1]
+Link: https://lore.kernel.org/all/20240926-battering-revolt-6c6a7827413e@spud/ [2]
+Fixes: 70a57b247251a ("RISC-V: enable building 64-bit kernels with rust support")
+Cc: stable@vger.kernel.org
+Reported-by: Jason Montleon <jmontleo@redhat.com>
+Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
+Acked-by: Miguel Ojeda <ojeda@kernel.org>
+Reviewed-by: Nathan Chancellor <nathan@kernel.org>
+Link: https://lore.kernel.org/r/20241001-playlist-deceiving-16ece2f440f5@spud
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/rust/arch-support.rst |    2 +-
+ arch/riscv/Kconfig                  |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/Documentation/rust/arch-support.rst
++++ b/Documentation/rust/arch-support.rst
+@@ -17,7 +17,7 @@ Architecture   Level of support  Constra
+ =============  ================  ==============================================
+ ``arm64``      Maintained        Little Endian only.
+ ``loongarch``  Maintained        \-
+-``riscv``      Maintained        ``riscv64`` only.
++``riscv``      Maintained        ``riscv64`` and LLVM/Clang only.
+ ``um``         Maintained        \-
+ ``x86``        Maintained        ``x86_64`` only.
+ =============  ================  ==============================================
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -172,7 +172,7 @@ config RISCV
+       select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_RETHOOK if !XIP_KERNEL
+       select HAVE_RSEQ
+-      select HAVE_RUST if 64BIT
++      select HAVE_RUST if 64BIT && CC_IS_CLANG
+       select HAVE_SAMPLE_FTRACE_DIRECT
+       select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
+       select HAVE_STACKPROTECTOR
index 001923712600dad35e1c18f8c07dfe724547df57..fdf4a3c4ecc21910f1ad0f97c51eab46c7bcc364 100644 (file)
@@ -1,4 +1,3 @@
-lib-alloc_tag_module_unload-must-wait-for-pending-kf.patch
 drm-amdgpu-fix-random-data-corruption-for-sdma-7.patch
 cgroup-fix-potential-overflow-issue-when-checking-ma.patch
 spi-geni-qcom-fix-boot-warning-related-to-pm_runtime.patch
@@ -243,3 +242,7 @@ drm-amdgpu-swsmu-fix-ordering-for-setting-workload_mask.patch
 drm-amdgpu-swsmu-default-to-fullscreen-3d-profile-for-dgpus.patch
 fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch
 drm-amdgpu-handle-default-profile-on-on-devices-without-fullscreen-3d.patch
+mips-export-__cmpxchg_small.patch
+risc-v-disallow-gcc-rust-builds.patch
+rcu-kvfree-add-kvfree_rcu_barrier-api.patch
+lib-alloc_tag_module_unload-must-wait-for-pending-kfree_rcu-calls.patch