]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 21 Nov 2025 09:57:40 +0000 (10:57 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 21 Nov 2025 09:57:40 +0000 (10:57 +0100)
added patches:
alsa-hda-fix-missing-pointer-check-in-hda_component_manager_init-function.patch
io_uring-napi-fix-io_napi_entry-rcu-accesses.patch
kvm-guest_memfd-pass-index-not-gfn-to-__kvm_gmem_get_pfn.patch
kvm-guest_memfd-remove-bindings-on-memslot-deletion-when-gmem-is-dying.patch
kvm-guest_memfd-remove-rcu-protected-attribute-from-slot-gmem.file.patch
kvm-vmx-fix-check-for-valid-gva-on-an-ept-violation.patch
kvm-vmx-split-out-guts-of-ept-violation-to-common-exposed-function.patch
mm-percpu-do-not-consider-sleepable-allocations-atomic.patch
mptcp-fix-msg_peek-stream-corruption.patch
net-netpoll-fix-incorrect-refcount-handling-causing-incorrect-cleanup.patch
net-netpoll-flush-skb-pool-during-cleanup.patch
net-netpoll-individualize-the-skb-pool.patch
rust-kbuild-treat-build_error-and-rustdoc-as-kernel-objects.patch
rust-kbuild-workaround-rustdoc-doctests-modifier-bug.patch
wifi-cfg80211-add-an-hrtimer-based-delayed-work-item.patch
wifi-mac80211-use-wiphy_hrtimer_work-for-csa.switch_work.patch

17 files changed:
queue-6.12/alsa-hda-fix-missing-pointer-check-in-hda_component_manager_init-function.patch [new file with mode: 0644]
queue-6.12/io_uring-napi-fix-io_napi_entry-rcu-accesses.patch [new file with mode: 0644]
queue-6.12/kvm-guest_memfd-pass-index-not-gfn-to-__kvm_gmem_get_pfn.patch [new file with mode: 0644]
queue-6.12/kvm-guest_memfd-remove-bindings-on-memslot-deletion-when-gmem-is-dying.patch [new file with mode: 0644]
queue-6.12/kvm-guest_memfd-remove-rcu-protected-attribute-from-slot-gmem.file.patch [new file with mode: 0644]
queue-6.12/kvm-vmx-fix-check-for-valid-gva-on-an-ept-violation.patch [new file with mode: 0644]
queue-6.12/kvm-vmx-split-out-guts-of-ept-violation-to-common-exposed-function.patch [new file with mode: 0644]
queue-6.12/mm-percpu-do-not-consider-sleepable-allocations-atomic.patch [new file with mode: 0644]
queue-6.12/mptcp-fix-msg_peek-stream-corruption.patch [new file with mode: 0644]
queue-6.12/net-netpoll-fix-incorrect-refcount-handling-causing-incorrect-cleanup.patch [new file with mode: 0644]
queue-6.12/net-netpoll-flush-skb-pool-during-cleanup.patch [new file with mode: 0644]
queue-6.12/net-netpoll-individualize-the-skb-pool.patch [new file with mode: 0644]
queue-6.12/rust-kbuild-treat-build_error-and-rustdoc-as-kernel-objects.patch [new file with mode: 0644]
queue-6.12/rust-kbuild-workaround-rustdoc-doctests-modifier-bug.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/wifi-cfg80211-add-an-hrtimer-based-delayed-work-item.patch [new file with mode: 0644]
queue-6.12/wifi-mac80211-use-wiphy_hrtimer_work-for-csa.switch_work.patch [new file with mode: 0644]

diff --git a/queue-6.12/alsa-hda-fix-missing-pointer-check-in-hda_component_manager_init-function.patch b/queue-6.12/alsa-hda-fix-missing-pointer-check-in-hda_component_manager_init-function.patch
new file mode 100644 (file)
index 0000000..310d264
--- /dev/null
@@ -0,0 +1,54 @@
+From stable+bounces-194761-greg=kroah.com@vger.kernel.org Fri Nov 14 04:33:17 2025
+From: Rajani Kantha <681739313@139.com>
+Date: Fri, 14 Nov 2025 11:28:09 +0800
+Subject: ALSA: hda: Fix missing pointer check in hda_component_manager_init function
+To: arefev@swemel.ru, tiwai@suse.de, stable@vger.kernel.org
+Message-ID: <20251114032809.30655-2-681739313@139.com>
+
+From: Denis Arefev <arefev@swemel.ru>
+
+[ Upstream commit 1cf11d80db5df805b538c942269e05a65bcaf5bc ]
+
+The __component_match_add function may assign the 'matchptr' pointer
+the value ERR_PTR(-ENOMEM), which will subsequently be dereferenced.
+
+The call stack leading to the error looks like this:
+
+hda_component_manager_init
+|-> component_match_add
+    |-> component_match_add_release
+        |-> __component_match_add ( ... ,**matchptr, ... )
+            |-> *matchptr = ERR_PTR(-ENOMEM);       // assign
+|-> component_master_add_with_match( ...  match)
+    |-> component_match_realloc(match, match->num); // dereference
+
+Add IS_ERR() check to prevent the crash.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: ae7abe36e352 ("ALSA: hda/realtek: Add CS35L41 support for Thinkpad laptops")
+Cc: stable@vger.kernel.org
+Signed-off-by: Denis Arefev <arefev@swemel.ru>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+[ Modified the source code path due to 6.12 doesn't have
+commit:6014e9021b28 ("ALSA: hda: Move codec drivers into sound/hda/codecs directory
+") ]
+Signed-off-by: Rajani Kantha <681739313@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/hda_component.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/sound/pci/hda/hda_component.c
++++ b/sound/pci/hda/hda_component.c
+@@ -181,6 +181,10 @@ int hda_component_manager_init(struct hd
+               sm->match_str = match_str;
+               sm->index = i;
+               component_match_add(dev, &match, hda_comp_match_dev_name, sm);
++              if (IS_ERR(match)) {
++                      codec_err(cdc, "Fail to add component %ld\n", PTR_ERR(match));
++                      return PTR_ERR(match);
++              }
+       }
+       ret = component_master_add_with_match(dev, ops, match);
diff --git a/queue-6.12/io_uring-napi-fix-io_napi_entry-rcu-accesses.patch b/queue-6.12/io_uring-napi-fix-io_napi_entry-rcu-accesses.patch
new file mode 100644 (file)
index 0000000..9ed0ac2
--- /dev/null
@@ -0,0 +1,78 @@
+From s.artuhov@tssltd.ru Wed Nov 12 12:37:46 2025
+From: Stepan Artuhov <s.artuhov@tssltd.ru>
+Date: Wed, 12 Nov 2025 14:37:06 +0300
+Subject: io_uring/napi: fix io_napi_entry RCU accesses
+To: stable@vger.kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Olivier Langlois <olivier@trillion01.com>, Jens Axboe <axboe@kernel.dk>, Pavel Begunkov <asml.silence@gmail.com>, io-uring@vger.kernel.org, linux-kernel@vger.kernel.org, lvc-project@linuxtesting.org, Stepan Artuhov <s.artuhov@tssltd.ru>
+Message-ID: <20251112113706.533309-1-s.artuhov@tssltd.ru>
+
+From: Olivier Langlois <olivier@trillion01.com>
+
+[Upstream commit 45b3941d09d13b3503309be1f023b83deaf69b4d ]
+
+correct 3 RCU structures modifications that were not using the RCU
+functions to make their update.
+
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Pavel Begunkov <asml.silence@gmail.com>
+Cc: io-uring@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Cc: lvc-project@linuxtesting.org
+Signed-off-by: Olivier Langlois <olivier@trillion01.com>
+Link: https://lore.kernel.org/r/9f53b5169afa8c7bf3665a0b19dc2f7061173530.1728828877.git.olivier@trillion01.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+[Stepan Artuhov: cherry-picked a commit]
+Signed-off-by: Stepan Artuhov <s.artuhov@tssltd.ru>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/napi.c |   19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+--- a/io_uring/napi.c
++++ b/io_uring/napi.c
+@@ -81,19 +81,24 @@ void __io_napi_add(struct io_ring_ctx *c
+       }
+       hlist_add_tail_rcu(&e->node, hash_list);
+-      list_add_tail(&e->list, &ctx->napi_list);
++      list_add_tail_rcu(&e->list, &ctx->napi_list);
+       spin_unlock(&ctx->napi_lock);
+ }
+ static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
+ {
+       struct io_napi_entry *e;
+-      unsigned int i;
+       spin_lock(&ctx->napi_lock);
+-      hash_for_each(ctx->napi_ht, i, e, node) {
+-              if (time_after(jiffies, e->timeout)) {
+-                      list_del(&e->list);
++      /*
++       * list_for_each_entry_safe() is not required as long as:
++       * 1. list_del_rcu() does not reset the deleted node next pointer
++       * 2. kfree_rcu() delays the memory freeing until the next quiescent
++       *    state
++       */
++      list_for_each_entry(e, &ctx->napi_list, list) {
++              if (time_after(jiffies, READ_ONCE(e->timeout))) {
++                      list_del_rcu(&e->list);
+                       hash_del_rcu(&e->node);
+                       kfree_rcu(e, rcu);
+               }
+@@ -204,13 +209,13 @@ void io_napi_init(struct io_ring_ctx *ct
+ void io_napi_free(struct io_ring_ctx *ctx)
+ {
+       struct io_napi_entry *e;
+-      unsigned int i;
+       spin_lock(&ctx->napi_lock);
+-      hash_for_each(ctx->napi_ht, i, e, node) {
++      list_for_each_entry(e, &ctx->napi_list, list) {
+               hash_del_rcu(&e->node);
+               kfree_rcu(e, rcu);
+       }
++      INIT_LIST_HEAD_RCU(&ctx->napi_list);
+       spin_unlock(&ctx->napi_lock);
+ }
diff --git a/queue-6.12/kvm-guest_memfd-pass-index-not-gfn-to-__kvm_gmem_get_pfn.patch b/queue-6.12/kvm-guest_memfd-pass-index-not-gfn-to-__kvm_gmem_get_pfn.patch
new file mode 100644 (file)
index 0000000..c5865c1
--- /dev/null
@@ -0,0 +1,98 @@
+From stable+bounces-195373-greg=kroah.com@vger.kernel.org Thu Nov 20 18:43:18 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 12:36:29 -0500
+Subject: KVM: guest_memfd: Pass index, not gfn, to __kvm_gmem_get_pfn()
+To: stable@vger.kernel.org
+Cc: Sean Christopherson <seanjc@google.com>, Dmitry Osipenko <dmitry.osipenko@collabora.com>, Paolo Bonzini <pbonzini@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251120173631.1905381-1-sashal@kernel.org>
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 4af18dc6a9204464db76d9771d1f40e2b46bf6ae ]
+
+Refactor guest_memfd usage of __kvm_gmem_get_pfn() to pass the index into
+the guest_memfd file instead of the gfn, i.e. resolve the index based on
+the slot+gfn in the caller instead of in __kvm_gmem_get_pfn().  This will
+allow kvm_gmem_get_pfn() to retrieve and return the specific "struct page",
+which requires the index into the folio, without a redoing the index
+calculation multiple times (which isn't costly, just hard to follow).
+
+Opportunistically add a kvm_gmem_get_index() helper to make the copy+pasted
+code easier to understand.
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Message-ID: <20241010182427.1434605-46-seanjc@google.com>
+Stable-dep-of: ae431059e75d ("KVM: guest_memfd: Remove bindings on memslot deletion when gmem is dying")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/guest_memfd.c |   20 +++++++++++++-------
+ 1 file changed, 13 insertions(+), 7 deletions(-)
+
+--- a/virt/kvm/guest_memfd.c
++++ b/virt/kvm/guest_memfd.c
+@@ -304,6 +304,11 @@ static inline struct file *kvm_gmem_get_
+       return get_file_active(&slot->gmem.file);
+ }
++static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn)
++{
++      return gfn - slot->base_gfn + slot->gmem.pgoff;
++}
++
+ static struct file_operations kvm_gmem_fops = {
+       .open           = generic_file_open,
+       .release        = kvm_gmem_release,
+@@ -553,12 +558,11 @@ void kvm_gmem_unbind(struct kvm_memory_s
+ }
+ /* Returns a locked folio on success.  */
+-static struct folio *
+-__kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
+-                 gfn_t gfn, kvm_pfn_t *pfn, bool *is_prepared,
+-                 int *max_order)
++static struct folio *__kvm_gmem_get_pfn(struct file *file,
++                                      struct kvm_memory_slot *slot,
++                                      pgoff_t index, kvm_pfn_t *pfn,
++                                      bool *is_prepared, int *max_order)
+ {
+-      pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
+       struct kvm_gmem *gmem = file->private_data;
+       struct folio *folio;
+@@ -594,6 +598,7 @@ __kvm_gmem_get_pfn(struct file *file, st
+ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+                    gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
+ {
++      pgoff_t index = kvm_gmem_get_index(slot, gfn);
+       struct file *file = kvm_gmem_get_file(slot);
+       struct folio *folio;
+       bool is_prepared = false;
+@@ -602,7 +607,7 @@ int kvm_gmem_get_pfn(struct kvm *kvm, st
+       if (!file)
+               return -EFAULT;
+-      folio = __kvm_gmem_get_pfn(file, slot, gfn, pfn, &is_prepared, max_order);
++      folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &is_prepared, max_order);
+       if (IS_ERR(folio)) {
+               r = PTR_ERR(folio);
+               goto out;
+@@ -650,6 +655,7 @@ long kvm_gmem_populate(struct kvm *kvm,
+       for (i = 0; i < npages; i += (1 << max_order)) {
+               struct folio *folio;
+               gfn_t gfn = start_gfn + i;
++              pgoff_t index = kvm_gmem_get_index(slot, gfn);
+               bool is_prepared = false;
+               kvm_pfn_t pfn;
+@@ -658,7 +664,7 @@ long kvm_gmem_populate(struct kvm *kvm,
+                       break;
+               }
+-              folio = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &is_prepared, &max_order);
++              folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, &max_order);
+               if (IS_ERR(folio)) {
+                       ret = PTR_ERR(folio);
+                       break;
diff --git a/queue-6.12/kvm-guest_memfd-remove-bindings-on-memslot-deletion-when-gmem-is-dying.patch b/queue-6.12/kvm-guest_memfd-remove-bindings-on-memslot-deletion-when-gmem-is-dying.patch
new file mode 100644 (file)
index 0000000..01bcdb7
--- /dev/null
@@ -0,0 +1,176 @@
+From stable+bounces-195375-greg=kroah.com@vger.kernel.org Thu Nov 20 18:43:27 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 12:36:31 -0500
+Subject: KVM: guest_memfd: Remove bindings on memslot deletion when gmem is dying
+To: stable@vger.kernel.org
+Cc: Sean Christopherson <seanjc@google.com>, syzbot+2479e53d0db9b32ae2aa@syzkaller.appspotmail.com, Hillf Danton <hdanton@sina.com>, Vishal Annapurve <vannapurve@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251120173631.1905381-3-sashal@kernel.org>
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit ae431059e75d36170a5ae6b44cc4d06d43613215 ]
+
+When unbinding a memslot from a guest_memfd instance, remove the bindings
+even if the guest_memfd file is dying, i.e. even if its file refcount has
+gone to zero.  If the memslot is freed before the file is fully released,
+nullifying the memslot side of the binding in kvm_gmem_release() will
+write to freed memory, as detected by syzbot+KASAN:
+
+  ==================================================================
+  BUG: KASAN: slab-use-after-free in kvm_gmem_release+0x176/0x440 virt/kvm/guest_memfd.c:353
+  Write of size 8 at addr ffff88807befa508 by task syz.0.17/6022
+
+  CPU: 0 UID: 0 PID: 6022 Comm: syz.0.17 Not tainted syzkaller #0 PREEMPT(full)
+  Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/02/2025
+  Call Trace:
+   <TASK>
+   dump_stack_lvl+0x189/0x250 lib/dump_stack.c:120
+   print_address_description mm/kasan/report.c:378 [inline]
+   print_report+0xca/0x240 mm/kasan/report.c:482
+   kasan_report+0x118/0x150 mm/kasan/report.c:595
+   kvm_gmem_release+0x176/0x440 virt/kvm/guest_memfd.c:353
+   __fput+0x44c/0xa70 fs/file_table.c:468
+   task_work_run+0x1d4/0x260 kernel/task_work.c:227
+   resume_user_mode_work include/linux/resume_user_mode.h:50 [inline]
+   exit_to_user_mode_loop+0xe9/0x130 kernel/entry/common.c:43
+   exit_to_user_mode_prepare include/linux/irq-entry-common.h:225 [inline]
+   syscall_exit_to_user_mode_work include/linux/entry-common.h:175 [inline]
+   syscall_exit_to_user_mode include/linux/entry-common.h:210 [inline]
+   do_syscall_64+0x2bd/0xfa0 arch/x86/entry/syscall_64.c:100
+   entry_SYSCALL_64_after_hwframe+0x77/0x7f
+  RIP: 0033:0x7fbeeff8efc9
+   </TASK>
+
+  Allocated by task 6023:
+   kasan_save_stack mm/kasan/common.c:56 [inline]
+   kasan_save_track+0x3e/0x80 mm/kasan/common.c:77
+   poison_kmalloc_redzone mm/kasan/common.c:397 [inline]
+   __kasan_kmalloc+0x93/0xb0 mm/kasan/common.c:414
+   kasan_kmalloc include/linux/kasan.h:262 [inline]
+   __kmalloc_cache_noprof+0x3e2/0x700 mm/slub.c:5758
+   kmalloc_noprof include/linux/slab.h:957 [inline]
+   kzalloc_noprof include/linux/slab.h:1094 [inline]
+   kvm_set_memory_region+0x747/0xb90 virt/kvm/kvm_main.c:2104
+   kvm_vm_ioctl_set_memory_region+0x6f/0xd0 virt/kvm/kvm_main.c:2154
+   kvm_vm_ioctl+0x957/0xc60 virt/kvm/kvm_main.c:5201
+   vfs_ioctl fs/ioctl.c:51 [inline]
+   __do_sys_ioctl fs/ioctl.c:597 [inline]
+   __se_sys_ioctl+0xfc/0x170 fs/ioctl.c:583
+   do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+   do_syscall_64+0xfa/0xfa0 arch/x86/entry/syscall_64.c:94
+   entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+  Freed by task 6023:
+   kasan_save_stack mm/kasan/common.c:56 [inline]
+   kasan_save_track+0x3e/0x80 mm/kasan/common.c:77
+   kasan_save_free_info+0x46/0x50 mm/kasan/generic.c:584
+   poison_slab_object mm/kasan/common.c:252 [inline]
+   __kasan_slab_free+0x5c/0x80 mm/kasan/common.c:284
+   kasan_slab_free include/linux/kasan.h:234 [inline]
+   slab_free_hook mm/slub.c:2533 [inline]
+   slab_free mm/slub.c:6622 [inline]
+   kfree+0x19a/0x6d0 mm/slub.c:6829
+   kvm_set_memory_region+0x9c4/0xb90 virt/kvm/kvm_main.c:2130
+   kvm_vm_ioctl_set_memory_region+0x6f/0xd0 virt/kvm/kvm_main.c:2154
+   kvm_vm_ioctl+0x957/0xc60 virt/kvm/kvm_main.c:5201
+   vfs_ioctl fs/ioctl.c:51 [inline]
+   __do_sys_ioctl fs/ioctl.c:597 [inline]
+   __se_sys_ioctl+0xfc/0x170 fs/ioctl.c:583
+   do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+   do_syscall_64+0xfa/0xfa0 arch/x86/entry/syscall_64.c:94
+   entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Deliberately don't acquire filemap invalid lock when the file is dying as
+the lifecycle of f_mapping is outside the purview of KVM.  Dereferencing
+the mapping is *probably* fine, but there's no need to invalidate anything
+as memslot deletion is responsible for zapping SPTEs, and the only code
+that can access the dying file is kvm_gmem_release(), whose core code is
+mutually exclusive with unbinding.
+
+Note, the mutual exclusivity is also what makes it safe to access the
+bindings on a dying gmem instance.  Unbinding either runs with slots_lock
+held, or after the last reference to the owning "struct kvm" is put, and
+kvm_gmem_release() nullifies the slot pointer under slots_lock, and puts
+its reference to the VM after that is done.
+
+Reported-by: syzbot+2479e53d0db9b32ae2aa@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68fa7a22.a70a0220.3bf6c6.008b.GAE@google.com
+Tested-by: syzbot+2479e53d0db9b32ae2aa@syzkaller.appspotmail.com
+Fixes: a7800aa80ea4 ("KVM: Add KVM_CREATE_GUEST_MEMFD ioctl() for guest-specific backing memory")
+Cc: stable@vger.kernel.org
+Cc: Hillf Danton <hdanton@sina.com>
+Reviewed-By: Vishal Annapurve <vannapurve@google.com>
+Link: https://patch.msgid.link/20251104011205.3853541-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/guest_memfd.c |   45 ++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 32 insertions(+), 13 deletions(-)
+
+--- a/virt/kvm/guest_memfd.c
++++ b/virt/kvm/guest_memfd.c
+@@ -534,31 +534,50 @@ err:
+       return r;
+ }
+-void kvm_gmem_unbind(struct kvm_memory_slot *slot)
++static void __kvm_gmem_unbind(struct kvm_memory_slot *slot, struct kvm_gmem *gmem)
+ {
+       unsigned long start = slot->gmem.pgoff;
+       unsigned long end = start + slot->npages;
+-      struct kvm_gmem *gmem;
++
++      xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL);
++
++      /*
++       * synchronize_srcu(&kvm->srcu) ensured that kvm_gmem_get_pfn()
++       * cannot see this memslot.
++       */
++      WRITE_ONCE(slot->gmem.file, NULL);
++}
++
++void kvm_gmem_unbind(struct kvm_memory_slot *slot)
++{
+       struct file *file;
+       /*
+-       * Nothing to do if the underlying file was already closed (or is being
+-       * closed right now), kvm_gmem_release() invalidates all bindings.
++       * Nothing to do if the underlying file was _already_ closed, as
++       * kvm_gmem_release() invalidates and nullifies all bindings.
+        */
+-      file = kvm_gmem_get_file(slot);
+-      if (!file)
++      if (!slot->gmem.file)
+               return;
+-      gmem = file->private_data;
+-
+-      filemap_invalidate_lock(file->f_mapping);
+-      xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL);
++      file = kvm_gmem_get_file(slot);
+       /*
+-       * synchronize_srcu(&kvm->srcu) ensured that kvm_gmem_get_pfn()
+-       * cannot see this memslot.
++       * However, if the file is _being_ closed, then the bindings need to be
++       * removed as kvm_gmem_release() might not run until after the memslot
++       * is freed.  Note, modifying the bindings is safe even though the file
++       * is dying as kvm_gmem_release() nullifies slot->gmem.file under
++       * slots_lock, and only puts its reference to KVM after destroying all
++       * bindings.  I.e. reaching this point means kvm_gmem_release() hasn't
++       * yet destroyed the bindings or freed the gmem_file, and can't do so
++       * until the caller drops slots_lock.
+        */
+-      WRITE_ONCE(slot->gmem.file, NULL);
++      if (!file) {
++              __kvm_gmem_unbind(slot, slot->gmem.file->private_data);
++              return;
++      }
++
++      filemap_invalidate_lock(file->f_mapping);
++      __kvm_gmem_unbind(slot, file->private_data);
+       filemap_invalidate_unlock(file->f_mapping);
+       fput(file);
diff --git a/queue-6.12/kvm-guest_memfd-remove-rcu-protected-attribute-from-slot-gmem.file.patch b/queue-6.12/kvm-guest_memfd-remove-rcu-protected-attribute-from-slot-gmem.file.patch
new file mode 100644 (file)
index 0000000..bbd7851
--- /dev/null
@@ -0,0 +1,157 @@
+From stable+bounces-195374-greg=kroah.com@vger.kernel.org Thu Nov 20 18:46:54 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 12:36:30 -0500
+Subject: KVM: guest_memfd: Remove RCU-protected attribute from slot->gmem.file
+To: stable@vger.kernel.org
+Cc: Yan Zhao <yan.y.zhao@intel.com>, Paolo Bonzini <pbonzini@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251120173631.1905381-2-sashal@kernel.org>
+
+From: Yan Zhao <yan.y.zhao@intel.com>
+
+[ Upstream commit 67b43038ce14d6b0673bdffb2052d879065c94ae ]
+
+Remove the RCU-protected attribute from slot->gmem.file. No need to use RCU
+primitives rcu_assign_pointer()/synchronize_rcu() to update this pointer.
+
+- slot->gmem.file is updated in 3 places:
+  kvm_gmem_bind(), kvm_gmem_unbind(), kvm_gmem_release().
+  All of them are protected by kvm->slots_lock.
+
+- slot->gmem.file is read in 2 paths:
+  (1) kvm_gmem_populate
+        kvm_gmem_get_file
+        __kvm_gmem_get_pfn
+
+  (2) kvm_gmem_get_pfn
+         kvm_gmem_get_file
+         __kvm_gmem_get_pfn
+
+  Path (1) kvm_gmem_populate() requires holding kvm->slots_lock, so
+  slot->gmem.file is protected by the kvm->slots_lock in this path.
+
+  Path (2) kvm_gmem_get_pfn() does not require holding kvm->slots_lock.
+  However, it's also not guarded by rcu_read_lock() and rcu_read_unlock().
+  So synchronize_rcu() in kvm_gmem_unbind()/kvm_gmem_release() actually
+  will not wait for the readers in kvm_gmem_get_pfn() due to lack of RCU
+  read-side critical section.
+
+  The path (2) kvm_gmem_get_pfn() is safe without RCU protection because:
+  a) kvm_gmem_bind() is called on a new memslot, before the memslot is
+     visible to kvm_gmem_get_pfn().
+  b) kvm->srcu ensures that kvm_gmem_unbind() and freeing of a memslot
+     occur after the memslot is no longer visible to kvm_gmem_get_pfn().
+  c) get_file_active() ensures that kvm_gmem_get_pfn() will not access the
+     stale file if kvm_gmem_release() sets it to NULL.  This is because if
+     kvm_gmem_release() occurs before kvm_gmem_get_pfn(), get_file_active()
+     will return NULL; if get_file_active() does not return NULL,
+     kvm_gmem_release() should not occur until after kvm_gmem_get_pfn()
+     releases the file reference.
+
+Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
+Message-ID: <20241104084303.29909-1-yan.y.zhao@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: ae431059e75d ("KVM: guest_memfd: Remove bindings on memslot deletion when gmem is dying")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/kvm_host.h |    7 ++++++-
+ virt/kvm/guest_memfd.c   |   34 +++++++++++++++++++++-------------
+ 2 files changed, 27 insertions(+), 14 deletions(-)
+
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -608,7 +608,12 @@ struct kvm_memory_slot {
+ #ifdef CONFIG_KVM_PRIVATE_MEM
+       struct {
+-              struct file __rcu *file;
++              /*
++               * Writes protected by kvm->slots_lock.  Acquiring a
++               * reference via kvm_gmem_get_file() is protected by
++               * either kvm->slots_lock or kvm->srcu.
++               */
++              struct file *file;
+               pgoff_t pgoff;
+       } gmem;
+ #endif
+--- a/virt/kvm/guest_memfd.c
++++ b/virt/kvm/guest_memfd.c
+@@ -261,15 +261,19 @@ static int kvm_gmem_release(struct inode
+        * dereferencing the slot for existing bindings needs to be protected
+        * against memslot updates, specifically so that unbind doesn't race
+        * and free the memslot (kvm_gmem_get_file() will return NULL).
++       *
++       * Since .release is called only when the reference count is zero,
++       * after which file_ref_get() and get_file_active() fail,
++       * kvm_gmem_get_pfn() cannot be using the file concurrently.
++       * file_ref_put() provides a full barrier, and get_file_active() the
++       * matching acquire barrier.
+        */
+       mutex_lock(&kvm->slots_lock);
+       filemap_invalidate_lock(inode->i_mapping);
+       xa_for_each(&gmem->bindings, index, slot)
+-              rcu_assign_pointer(slot->gmem.file, NULL);
+-
+-      synchronize_rcu();
++              WRITE_ONCE(slot->gmem.file, NULL);
+       /*
+        * All in-flight operations are gone and new bindings can be created.
+@@ -298,8 +302,7 @@ static inline struct file *kvm_gmem_get_
+       /*
+        * Do not return slot->gmem.file if it has already been closed;
+        * there might be some time between the last fput() and when
+-       * kvm_gmem_release() clears slot->gmem.file, and you do not
+-       * want to spin in the meanwhile.
++       * kvm_gmem_release() clears slot->gmem.file.
+        */
+       return get_file_active(&slot->gmem.file);
+ }
+@@ -510,11 +513,11 @@ int kvm_gmem_bind(struct kvm *kvm, struc
+       }
+       /*
+-       * No synchronize_rcu() needed, any in-flight readers are guaranteed to
+-       * be see either a NULL file or this new file, no need for them to go
+-       * away.
++       * memslots of flag KVM_MEM_GUEST_MEMFD are immutable to change, so
++       * kvm_gmem_bind() must occur on a new memslot.  Because the memslot
++       * is not visible yet, kvm_gmem_get_pfn() is guaranteed to see the file.
+        */
+-      rcu_assign_pointer(slot->gmem.file, file);
++      WRITE_ONCE(slot->gmem.file, file);
+       slot->gmem.pgoff = start;
+       xa_store_range(&gmem->bindings, start, end - 1, slot, GFP_KERNEL);
+@@ -550,8 +553,12 @@ void kvm_gmem_unbind(struct kvm_memory_s
+       filemap_invalidate_lock(file->f_mapping);
+       xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL);
+-      rcu_assign_pointer(slot->gmem.file, NULL);
+-      synchronize_rcu();
++
++      /*
++       * synchronize_srcu(&kvm->srcu) ensured that kvm_gmem_get_pfn()
++       * cannot see this memslot.
++       */
++      WRITE_ONCE(slot->gmem.file, NULL);
+       filemap_invalidate_unlock(file->f_mapping);
+       fput(file);
+@@ -563,11 +570,12 @@ static struct folio *__kvm_gmem_get_pfn(
+                                       pgoff_t index, kvm_pfn_t *pfn,
+                                       bool *is_prepared, int *max_order)
+ {
++      struct file *gmem_file = READ_ONCE(slot->gmem.file);
+       struct kvm_gmem *gmem = file->private_data;
+       struct folio *folio;
+-      if (file != slot->gmem.file) {
+-              WARN_ON_ONCE(slot->gmem.file);
++      if (file != gmem_file) {
++              WARN_ON_ONCE(gmem_file);
+               return ERR_PTR(-EFAULT);
+       }
diff --git a/queue-6.12/kvm-vmx-fix-check-for-valid-gva-on-an-ept-violation.patch b/queue-6.12/kvm-vmx-fix-check-for-valid-gva-on-an-ept-violation.patch
new file mode 100644 (file)
index 0000000..c7bfe45
--- /dev/null
@@ -0,0 +1,39 @@
+From stable+bounces-195384-greg=kroah.com@vger.kernel.org Thu Nov 20 19:19:52 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 13:19:40 -0500
+Subject: KVM: VMX: Fix check for valid GVA on an EPT violation
+To: stable@vger.kernel.org
+Cc: Sukrit Bhatnagar <Sukrit.Bhatnagar@sony.com>, Xiaoyao Li <xiaoyao.li@intel.com>, Sean Christopherson <seanjc@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251120181940.1924069-2-sashal@kernel.org>
+
+From: Sukrit Bhatnagar <Sukrit.Bhatnagar@sony.com>
+
+[ Upstream commit d0164c161923ac303bd843e04ebe95cfd03c6e19 ]
+
+On an EPT violation, bit 7 of the exit qualification is set if the
+guest linear-address is valid. The derived page fault error code
+should not be checked for this bit.
+
+Fixes: f3009482512e ("KVM: VMX: Set PFERR_GUEST_{FINAL,PAGE}_MASK if and only if the GVA is valid")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sukrit Bhatnagar <Sukrit.Bhatnagar@sony.com>
+Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
+Link: https://patch.msgid.link/20251106052853.3071088-1-Sukrit.Bhatnagar@sony.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/common.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/vmx/common.h
++++ b/arch/x86/kvm/vmx/common.h
+@@ -24,7 +24,7 @@ static inline int __vmx_handle_ept_viola
+       error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
+                     ? PFERR_PRESENT_MASK : 0;
+-      if (error_code & EPT_VIOLATION_GVA_IS_VALID)
++      if (exit_qualification & EPT_VIOLATION_GVA_IS_VALID)
+               error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
+                             PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
diff --git a/queue-6.12/kvm-vmx-split-out-guts-of-ept-violation-to-common-exposed-function.patch b/queue-6.12/kvm-vmx-split-out-guts-of-ept-violation-to-common-exposed-function.patch
new file mode 100644 (file)
index 0000000..a7a87a7
--- /dev/null
@@ -0,0 +1,130 @@
+From stable+bounces-195383-greg=kroah.com@vger.kernel.org Thu Nov 20 19:19:49 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 13:19:39 -0500
+Subject: KVM: VMX: Split out guts of EPT violation to common/exposed function
+To: stable@vger.kernel.org
+Cc: Sean Christopherson <sean.j.christopherson@intel.com>, Isaku Yamahata <isaku.yamahata@intel.com>, Rick Edgecombe <rick.p.edgecombe@intel.com>, Yan Zhao <yan.y.zhao@intel.com>, Paolo Bonzini <pbonzini@redhat.com>, Kai Huang <kai.huang@intel.com>, Binbin Wu <binbin.wu@linux.intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251120181940.1924069-1-sashal@kernel.org>
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+[ Upstream commit c8563d1b69988ef9b6803508e1c95f2aea0a171d ]
+
+The difference of TDX EPT violation is how to retrieve information, GPA,
+and exit qualification.  To share the code to handle EPT violation, split
+out the guts of EPT violation handler so that VMX/TDX exit handler can call
+it after retrieving GPA and exit qualification.
+
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Co-developed-by: Isaku Yamahata <isaku.yamahata@intel.com>
+Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
+Co-developed-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Kai Huang <kai.huang@intel.com>
+Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com>
+Message-ID: <20241112073528.22042-1-yan.y.zhao@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: d0164c161923 ("KVM: VMX: Fix check for valid GVA on an EPT violation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/common.h |   34 ++++++++++++++++++++++++++++++++++
+ arch/x86/kvm/vmx/vmx.c    |   25 +++----------------------
+ 2 files changed, 37 insertions(+), 22 deletions(-)
+ create mode 100644 arch/x86/kvm/vmx/common.h
+
+--- /dev/null
++++ b/arch/x86/kvm/vmx/common.h
+@@ -0,0 +1,34 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++#ifndef __KVM_X86_VMX_COMMON_H
++#define __KVM_X86_VMX_COMMON_H
++
++#include <linux/kvm_host.h>
++
++#include "mmu.h"
++
++static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,
++                                           unsigned long exit_qualification)
++{
++      u64 error_code;
++
++      /* Is it a read fault? */
++      error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
++                   ? PFERR_USER_MASK : 0;
++      /* Is it a write fault? */
++      error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
++                    ? PFERR_WRITE_MASK : 0;
++      /* Is it a fetch fault? */
++      error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
++                    ? PFERR_FETCH_MASK : 0;
++      /* ept page table entry is present? */
++      error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
++                    ? PFERR_PRESENT_MASK : 0;
++
++      if (error_code & EPT_VIOLATION_GVA_IS_VALID)
++              error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
++                            PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
++
++      return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
++}
++
++#endif /* __KVM_X86_VMX_COMMON_H */
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -53,6 +53,7 @@
+ #include <trace/events/ipi.h>
+ #include "capabilities.h"
++#include "common.h"
+ #include "cpuid.h"
+ #include "hyperv.h"
+ #include "kvm_onhyperv.h"
+@@ -5777,11 +5778,8 @@ static int handle_task_switch(struct kvm
+ static int handle_ept_violation(struct kvm_vcpu *vcpu)
+ {
+-      unsigned long exit_qualification;
++      unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
+       gpa_t gpa;
+-      u64 error_code;
+-
+-      exit_qualification = vmx_get_exit_qual(vcpu);
+       /*
+        * EPT violation happened while executing iret from NMI,
+@@ -5797,23 +5795,6 @@ static int handle_ept_violation(struct k
+       gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+       trace_kvm_page_fault(vcpu, gpa, exit_qualification);
+-      /* Is it a read fault? */
+-      error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
+-                   ? PFERR_USER_MASK : 0;
+-      /* Is it a write fault? */
+-      error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
+-                    ? PFERR_WRITE_MASK : 0;
+-      /* Is it a fetch fault? */
+-      error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
+-                    ? PFERR_FETCH_MASK : 0;
+-      /* ept page table entry is present? */
+-      error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
+-                    ? PFERR_PRESENT_MASK : 0;
+-
+-      if (error_code & EPT_VIOLATION_GVA_IS_VALID)
+-              error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
+-                            PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
+-
+       /*
+        * Check that the GPA doesn't exceed physical memory limits, as that is
+        * a guest page fault.  We have to emulate the instruction here, because
+@@ -5825,7 +5806,7 @@ static int handle_ept_violation(struct k
+       if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa)))
+               return kvm_emulate_instruction(vcpu, 0);
+-      return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
++      return __vmx_handle_ept_violation(vcpu, gpa, exit_qualification);
+ }
+ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
diff --git a/queue-6.12/mm-percpu-do-not-consider-sleepable-allocations-atomic.patch b/queue-6.12/mm-percpu-do-not-consider-sleepable-allocations-atomic.patch
new file mode 100644 (file)
index 0000000..f748343
--- /dev/null
@@ -0,0 +1,87 @@
+From mambaxin@163.com Mon Nov 17 10:37:01 2025
+From: mambaxin@163.com
+Date: Mon, 17 Nov 2025 17:36:04 +0800
+Subject: mm, percpu: do not consider sleepable allocations atomic
+To: dennis@kernel.org, tj@kernel.org, cl@linux.com, akpm@linux-foundation.org, gregkh@linuxfoundation.org, mhocko@suse.com
+Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, stable@vger.kernel.org, Vlastimil Babka <vbabka@suse.cz>, Filipe David Manana <fdmanana@suse.com>, chenxin <chenxinxin@xiaomi.com>
+Message-ID: <20251117093604.551707-1-mambaxin@163.com>
+
+From: Michal Hocko <mhocko@suse.com>
+
+[ Upstream commit 9a5b183941b52f84c0f9e5f27ce44e99318c9e0f ]
+
+28307d938fb2 ("percpu: make pcpu_alloc() aware of current gfp context")
+has fixed a reclaim recursion for scoped GFP_NOFS context.  It has done
+that by avoiding taking pcpu_alloc_mutex.  This is a correct solution as
+the worker context with full GFP_KERNEL allocation/reclaim power and which
+is using the same lock cannot block the NOFS pcpu_alloc caller.
+
+On the other hand this is a very conservative approach that could lead to
+failures because pcpu_alloc lockless implementation is quite limited.
+
+We have a bug report about premature failures when scsi array of 193
+devices is scanned.  Sometimes (not consistently) the scanning aborts
+because the iscsid daemon fails to create the queue for a random scsi
+device during the scan.  iscsid itself is running with PR_SET_IO_FLUSHER
+set so all allocations from this process context are GFP_NOIO.  This in
+turn makes any pcpu_alloc lockless (without pcpu_alloc_mutex) which leads
+to pre-mature failures.
+
+It has turned out that iscsid has worked around this by dropping
+PR_SET_IO_FLUSHER (https://github.com/open-iscsi/open-iscsi/pull/382) when
+scanning host.  But we can do better in this case on the kernel side and
+use pcpu_alloc_mutex for NOIO resp.  NOFS constrained allocation scopes
+too.  We just need the WQ worker to never trigger IO/FS reclaim.  Achieve
+that by enforcing scoped GFP_NOIO for the whole execution of
+pcpu_balance_workfn (this will imply NOFS constrain as well).  This will
+remove the dependency chain and preserve the full allocation power of the
+pcpu_alloc call.
+
+While at it make is_atomic really test for blockable allocations.
+
+Link: https://lkml.kernel.org/r/20250206122633.167896-1-mhocko@kernel.org
+Fixes: 28307d938fb2 ("percpu: make pcpu_alloc() aware of current gfp context")
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Dennis Zhou <dennis@kernel.org>
+Cc: Filipe David Manana <fdmanana@suse.com>
+Cc: Tejun Heo <tj@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: chenxin <chenxinxin@xiaomi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/percpu.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1758,7 +1758,7 @@ void __percpu *pcpu_alloc_noprof(size_t
+       gfp = current_gfp_context(gfp);
+       /* whitelisted flags that can be passed to the backing allocators */
+       pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+-      is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
++      is_atomic = !gfpflags_allow_blocking(gfp);
+       do_warn = !(gfp & __GFP_NOWARN);
+       /*
+@@ -2203,7 +2203,12 @@ static void pcpu_balance_workfn(struct w
+        * to grow other chunks.  This then gives pcpu_reclaim_populated() time
+        * to move fully free chunks to the active list to be freed if
+        * appropriate.
++       *
++       * Enforce GFP_NOIO allocations because we have pcpu_alloc users
++       * constrained to GFP_NOIO/NOFS contexts and they could form lock
++       * dependency through pcpu_alloc_mutex
+        */
++      unsigned int flags = memalloc_noio_save();
+       mutex_lock(&pcpu_alloc_mutex);
+       spin_lock_irq(&pcpu_lock);
+@@ -2214,6 +2219,7 @@ static void pcpu_balance_workfn(struct w
+       spin_unlock_irq(&pcpu_lock);
+       mutex_unlock(&pcpu_alloc_mutex);
++      memalloc_noio_restore(flags);
+ }
+ /**
diff --git a/queue-6.12/mptcp-fix-msg_peek-stream-corruption.patch b/queue-6.12/mptcp-fix-msg_peek-stream-corruption.patch
new file mode 100644 (file)
index 0000000..602cc3e
--- /dev/null
@@ -0,0 +1,122 @@
+From stable+bounces-192373-greg=kroah.com@vger.kernel.org Tue Nov  4 13:17:04 2025
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Tue,  4 Nov 2025 13:15:16 +0100
+Subject: mptcp: fix MSG_PEEK stream corruption
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, Paolo Abeni <pabeni@redhat.com>, Geliang Tang <geliang@kernel.org>, Mat Martineau <martineau@kernel.org>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251104121515.1093006-2-matttbe@kernel.org>
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 8e04ce45a8db7a080220e86e249198fa676b83dc ]
+
+If a MSG_PEEK | MSG_WAITALL read operation consumes all the bytes in the
+receive queue and recvmsg() need to waits for more data - i.e. it's a
+blocking one - upon arrival of the next packet the MPTCP protocol will
+start again copying the oldest data present in the receive queue,
+corrupting the data stream.
+
+Address the issue explicitly tracking the peeked sequence number,
+restarting from the last peeked byte.
+
+Fixes: ca4fb892579f ("mptcp: add MSG_PEEK support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Tested-by: Geliang Tang <geliang@kernel.org>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251028-net-mptcp-send-timeout-v1-2-38ffff5a9ec8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+Note: this is the patch Sasha sent for the v6.6 which applies on v6.12
+without conflicts. On v6.12, Sasha sent another version with dependences
+that caused some issues, see:
+ https://lore.kernel.org/bbe84711-95b2-4257-9f01-560b4473a3da@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c |   36 +++++++++++++++++++++++++-----------
+ 1 file changed, 25 insertions(+), 11 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1977,19 +1977,35 @@ static void mptcp_rcv_space_adjust(struc
+ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
+                               struct msghdr *msg,
+-                              size_t len, int flags,
++                              size_t len, int flags, int copied_total,
+                               struct scm_timestamping_internal *tss,
+                               int *cmsg_flags)
+ {
+       struct sk_buff *skb, *tmp;
++      int total_data_len = 0;
+       int copied = 0;
+       skb_queue_walk_safe(&msk->receive_queue, skb, tmp) {
+-              u32 offset = MPTCP_SKB_CB(skb)->offset;
++              u32 delta, offset = MPTCP_SKB_CB(skb)->offset;
+               u32 data_len = skb->len - offset;
+-              u32 count = min_t(size_t, len - copied, data_len);
++              u32 count;
+               int err;
++              if (flags & MSG_PEEK) {
++                      /* skip already peeked skbs */
++                      if (total_data_len + data_len <= copied_total) {
++                              total_data_len += data_len;
++                              continue;
++                      }
++
++                      /* skip the already peeked data in the current skb */
++                      delta = copied_total - total_data_len;
++                      offset += delta;
++                      data_len -= delta;
++              }
++
++              count = min_t(size_t, len - copied, data_len);
++
+               if (!(flags & MSG_TRUNC)) {
+                       err = skb_copy_datagram_msg(skb, offset, msg, count);
+                       if (unlikely(err < 0)) {
+@@ -2006,22 +2022,19 @@ static int __mptcp_recvmsg_mskq(struct m
+               copied += count;
+-              if (count < data_len) {
+-                      if (!(flags & MSG_PEEK)) {
++              if (!(flags & MSG_PEEK)) {
++                      msk->bytes_consumed += count;
++                      if (count < data_len) {
+                               MPTCP_SKB_CB(skb)->offset += count;
+                               MPTCP_SKB_CB(skb)->map_seq += count;
+-                              msk->bytes_consumed += count;
++                              break;
+                       }
+-                      break;
+-              }
+-              if (!(flags & MSG_PEEK)) {
+                       /* we will bulk release the skb memory later */
+                       skb->destructor = NULL;
+                       WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
+                       __skb_unlink(skb, &msk->receive_queue);
+                       __kfree_skb(skb);
+-                      msk->bytes_consumed += count;
+               }
+               if (copied >= len)
+@@ -2245,7 +2258,8 @@ static int mptcp_recvmsg(struct sock *sk
+       while (copied < len) {
+               int err, bytes_read;
+-              bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags);
++              bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags,
++                                                copied, &tss, &cmsg_flags);
+               if (unlikely(bytes_read < 0)) {
+                       if (!copied)
+                               copied = bytes_read;
diff --git a/queue-6.12/net-netpoll-fix-incorrect-refcount-handling-causing-incorrect-cleanup.patch b/queue-6.12/net-netpoll-fix-incorrect-refcount-handling-causing-incorrect-cleanup.patch
new file mode 100644 (file)
index 0000000..8ffc7e7
--- /dev/null
@@ -0,0 +1,84 @@
+From stable+bounces-195410-greg=kroah.com@vger.kernel.org Thu Nov 20 20:43:13 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 14:43:03 -0500
+Subject: net: netpoll: fix incorrect refcount handling causing incorrect cleanup
+To: stable@vger.kernel.org
+Cc: Breno Leitao <leitao@debian.org>, Jay Vosburgh <jv@jvosburgh.net>, Simon Horman <horms@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251120194303.2293083-3-sashal@kernel.org>
+
+From: Breno Leitao <leitao@debian.org>
+
+[ Upstream commit 49c8d2c1f94cc2f4d1a108530d7ba52614b874c2 ]
+
+commit efa95b01da18 ("netpoll: fix use after free") incorrectly
+ignored the refcount and prematurely set dev->npinfo to NULL during
+netpoll cleanup, leading to improper behavior and memory leaks.
+
+Scenario causing lack of proper cleanup:
+
+1) A netpoll is associated with a NIC (e.g., eth0) and netdev->npinfo is
+   allocated, and refcnt = 1
+   - Keep in mind that npinfo is shared among all netpoll instances. In
+     this case, there is just one.
+
+2) Another netpoll is also associated with the same NIC and
+   npinfo->refcnt += 1.
+   - Now dev->npinfo->refcnt = 2;
+   - There is just one npinfo associated to the netdev.
+
+3) When the first netpolls goes to clean up:
+   - The first cleanup succeeds and clears np->dev->npinfo, ignoring
+     refcnt.
+     - It basically calls `RCU_INIT_POINTER(np->dev->npinfo, NULL);`
+   - Set dev->npinfo = NULL, without proper cleanup
+   - No ->ndo_netpoll_cleanup() is either called
+
+4) Now the second target tries to clean up
+   - The second cleanup fails because np->dev->npinfo is already NULL.
+     * In this case, ops->ndo_netpoll_cleanup() was never called, and
+       the skb pool is not cleaned as well (for the second netpoll
+       instance)
+  - This leaks npinfo and skbpool skbs, which is clearly reported by
+    kmemleak.
+
+Revert commit efa95b01da18 ("netpoll: fix use after free") and adds
+clarifying comments emphasizing that npinfo cleanup should only happen
+once the refcount reaches zero, ensuring stable and correct netpoll
+behavior.
+
+Cc: <stable@vger.kernel.org> # 3.17.x
+Cc: Jay Vosburgh <jv@jvosburgh.net>
+Fixes: efa95b01da18 ("netpoll: fix use after free")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20251107-netconsole_torture-v10-1-749227b55f63@debian.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/netpoll.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -841,6 +841,10 @@ void __netpoll_cleanup(struct netpoll *n
+       if (!npinfo)
+               return;
++      /* At this point, there is a single npinfo instance per netdevice, and
++       * its refcnt tracks how many netpoll structures are linked to it. We
++       * only perform npinfo cleanup when the refcnt decrements to zero.
++       */
+       if (refcount_dec_and_test(&npinfo->refcnt)) {
+               const struct net_device_ops *ops;
+@@ -850,8 +854,7 @@ void __netpoll_cleanup(struct netpoll *n
+               RCU_INIT_POINTER(np->dev->npinfo, NULL);
+               call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
+-      } else
+-              RCU_INIT_POINTER(np->dev->npinfo, NULL);
++      }
+       skb_pool_flush(np);
+ }
diff --git a/queue-6.12/net-netpoll-flush-skb-pool-during-cleanup.patch b/queue-6.12/net-netpoll-flush-skb-pool-during-cleanup.patch
new file mode 100644 (file)
index 0000000..37c8f9b
--- /dev/null
@@ -0,0 +1,74 @@
+From stable+bounces-195411-greg=kroah.com@vger.kernel.org Thu Nov 20 20:43:14 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 14:43:02 -0500
+Subject: net: netpoll: flush skb pool during cleanup
+To: stable@vger.kernel.org
+Cc: Breno Leitao <leitao@debian.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251120194303.2293083-2-sashal@kernel.org>
+
+From: Breno Leitao <leitao@debian.org>
+
+[ Upstream commit 6c59f16f1770481a6ee684720ec55b1e38b3a4b2 ]
+
+The netpoll subsystem maintains a pool of 32 pre-allocated SKBs per
+instance, but these SKBs are not freed when the netpoll user is brought
+down. This leads to memory waste as these buffers remain allocated but
+unused.
+
+Add skb_pool_flush() to properly clean up these SKBs when netconsole is
+terminated, improving memory efficiency.
+
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Link: https://patch.msgid.link/20241114-skb_buffers_v2-v3-2-9be9f52a8b69@debian.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 49c8d2c1f94c ("net: netpoll: fix incorrect refcount handling causing incorrect cleanup")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/netpoll.c |   14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -536,6 +536,14 @@ static int netpoll_parse_ip_addr(const c
+       return -1;
+ }
++static void skb_pool_flush(struct netpoll *np)
++{
++      struct sk_buff_head *skb_pool;
++
++      skb_pool = &np->skb_pool;
++      skb_queue_purge_reason(skb_pool, SKB_CONSUMED);
++}
++
+ int netpoll_parse_options(struct netpoll *np, char *opt)
+ {
+       char *cur=opt, *delim;
+@@ -784,7 +792,7 @@ put_noaddr:
+       err = __netpoll_setup(np, ndev);
+       if (err)
+-              goto put;
++              goto flush;
+       rtnl_unlock();
+       /* Make sure all NAPI polls which started before dev->npinfo
+@@ -795,6 +803,8 @@ put_noaddr:
+       return 0;
++flush:
++      skb_pool_flush(np);
+ put:
+       DEBUG_NET_WARN_ON_ONCE(np->dev);
+       if (ip_overwritten)
+@@ -842,6 +852,8 @@ void __netpoll_cleanup(struct netpoll *n
+               call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
+       } else
+               RCU_INIT_POINTER(np->dev->npinfo, NULL);
++
++      skb_pool_flush(np);
+ }
+ EXPORT_SYMBOL_GPL(__netpoll_cleanup);
diff --git a/queue-6.12/net-netpoll-individualize-the-skb-pool.patch b/queue-6.12/net-netpoll-individualize-the-skb-pool.patch
new file mode 100644 (file)
index 0000000..5d88a53
--- /dev/null
@@ -0,0 +1,136 @@
+From stable+bounces-195409-greg=kroah.com@vger.kernel.org Thu Nov 20 20:43:11 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 14:43:01 -0500
+Subject: net: netpoll: Individualize the skb pool
+To: stable@vger.kernel.org
+Cc: Breno Leitao <leitao@debian.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251120194303.2293083-1-sashal@kernel.org>
+
+From: Breno Leitao <leitao@debian.org>
+
+[ Upstream commit 221a9c1df790fa711d65daf5ba05d0addc279153 ]
+
+The current implementation of the netpoll system uses a global skb
+pool, which can lead to inefficient memory usage and
+waste when targets are disabled or no longer in use.
+
+This can result in a significant amount of memory being unnecessarily
+allocated and retained, potentially causing performance issues and
+limiting the availability of resources for other system components.
+
+Modify the netpoll system to assign a skb pool to each target instead of
+using a global one.
+
+This approach allows for more fine-grained control over memory
+allocation and deallocation, ensuring that resources are only allocated
+and retained as needed.
+
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Link: https://patch.msgid.link/20241114-skb_buffers_v2-v3-1-9be9f52a8b69@debian.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 49c8d2c1f94c ("net: netpoll: fix incorrect refcount handling causing incorrect cleanup")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/netpoll.h |    1 +
+ net/core/netpoll.c      |   31 +++++++++++++------------------
+ 2 files changed, 14 insertions(+), 18 deletions(-)
+
+--- a/include/linux/netpoll.h
++++ b/include/linux/netpoll.h
+@@ -32,6 +32,7 @@ struct netpoll {
+       bool ipv6;
+       u16 local_port, remote_port;
+       u8 remote_mac[ETH_ALEN];
++      struct sk_buff_head skb_pool;
+ };
+ struct netpoll_info {
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -45,9 +45,6 @@
+ #define MAX_UDP_CHUNK 1460
+ #define MAX_SKBS 32
+-
+-static struct sk_buff_head skb_pool;
+-
+ #define USEC_PER_POLL 50
+ #define MAX_SKB_SIZE                                                  \
+@@ -234,20 +231,23 @@ void netpoll_poll_enable(struct net_devi
+               up(&ni->dev_lock);
+ }
+-static void refill_skbs(void)
++static void refill_skbs(struct netpoll *np)
+ {
++      struct sk_buff_head *skb_pool;
+       struct sk_buff *skb;
+       unsigned long flags;
+-      spin_lock_irqsave(&skb_pool.lock, flags);
+-      while (skb_pool.qlen < MAX_SKBS) {
++      skb_pool = &np->skb_pool;
++
++      spin_lock_irqsave(&skb_pool->lock, flags);
++      while (skb_pool->qlen < MAX_SKBS) {
+               skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
+               if (!skb)
+                       break;
+-              __skb_queue_tail(&skb_pool, skb);
++              __skb_queue_tail(skb_pool, skb);
+       }
+-      spin_unlock_irqrestore(&skb_pool.lock, flags);
++      spin_unlock_irqrestore(&skb_pool->lock, flags);
+ }
+ static void zap_completion_queue(void)
+@@ -284,12 +284,12 @@ static struct sk_buff *find_skb(struct n
+       struct sk_buff *skb;
+       zap_completion_queue();
+-      refill_skbs();
++      refill_skbs(np);
+ repeat:
+       skb = alloc_skb(len, GFP_ATOMIC);
+       if (!skb)
+-              skb = skb_dequeue(&skb_pool);
++              skb = skb_dequeue(&np->skb_pool);
+       if (!skb) {
+               if (++count < 10) {
+@@ -678,6 +678,8 @@ int netpoll_setup(struct netpoll *np)
+       struct in_device *in_dev;
+       int err;
++      skb_queue_head_init(&np->skb_pool);
++
+       rtnl_lock();
+       if (np->dev_name[0]) {
+               struct net *net = current->nsproxy->net_ns;
+@@ -778,7 +780,7 @@ put_noaddr:
+       }
+       /* fill up the skb queue */
+-      refill_skbs();
++      refill_skbs(np);
+       err = __netpoll_setup(np, ndev);
+       if (err)
+@@ -804,13 +806,6 @@ unlock:
+ }
+ EXPORT_SYMBOL(netpoll_setup);
+-static int __init netpoll_init(void)
+-{
+-      skb_queue_head_init(&skb_pool);
+-      return 0;
+-}
+-core_initcall(netpoll_init);
+-
+ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
+ {
+       struct netpoll_info *npinfo =
diff --git a/queue-6.12/rust-kbuild-treat-build_error-and-rustdoc-as-kernel-objects.patch b/queue-6.12/rust-kbuild-treat-build_error-and-rustdoc-as-kernel-objects.patch
new file mode 100644 (file)
index 0000000..bf369bb
--- /dev/null
@@ -0,0 +1,78 @@
+From stable+bounces-192781-greg=kroah.com@vger.kernel.org Sat Nov  8 15:08:02 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  8 Nov 2025 09:07:48 -0500
+Subject: rust: kbuild: treat `build_error` and `rustdoc` as kernel objects
+To: stable@vger.kernel.org
+Cc: Miguel Ojeda <ojeda@kernel.org>, Alice Ryhl <aliceryhl@google.com>, "Justin M. Forbes" <jforbes@fedoraproject.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251108140748.129017-1-sashal@kernel.org>
+
+From: Miguel Ojeda <ojeda@kernel.org>
+
+[ Upstream commit 16c43a56b79e2c3220b043236369a129d508c65a ]
+
+Even if normally `build_error` isn't a kernel object, it should still
+be treated as such so that we pass the same flags. Similarly, `rustdoc`
+targets are never kernel objects, but we need to treat them as such.
+
+Otherwise, starting with Rust 1.91.0 (released 2025-10-30), `rustc`
+will complain about missing sanitizer flags since `-Zsanitizer` is a
+target modifier too [1]:
+
+    error: mixing `-Zsanitizer` will cause an ABI mismatch in crate `build_error`
+     --> rust/build_error.rs:3:1
+      |
+    3 | //! Build-time error.
+      | ^
+      |
+      = help: the `-Zsanitizer` flag modifies the ABI so Rust crates compiled with different values of this flag cannot be used together safely
+      = note: unset `-Zsanitizer` in this crate is incompatible with `-Zsanitizer=kernel-address` in dependency `core`
+      = help: set `-Zsanitizer=kernel-address` in this crate or unset `-Zsanitizer` in `core`
+      = help: if you are sure this will not cause problems, you may use `-Cunsafe-allow-abi-mismatch=sanitizer` to silence this error
+
+Thus explicitly mark them as kernel objects.
+
+Cc: stable@vger.kernel.org # Needed in 6.12.y and later (Rust is pinned in older LTSs).
+Link: https://github.com/rust-lang/rust/pull/138736 [1]
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Tested-by: Justin M. Forbes <jforbes@fedoraproject.org>
+Link: https://patch.msgid.link/20251102212853.1505384-1-ojeda@kernel.org
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/Makefile |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -107,12 +107,18 @@ rustdoc-core: private rustc_target_flags
+ rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs rustdoc-clean FORCE
+       +$(call if_changed,rustdoc)
++# Even if `rustdoc` targets are not kernel objects, they should still be
++# treated as such so that we pass the same flags. Otherwise, for instance,
++# `rustdoc` will complain about missing sanitizer flags causing an ABI mismatch.
++rustdoc-compiler_builtins: private is-kernel-object := y
+ rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE
+       +$(call if_changed,rustdoc)
++rustdoc-ffi: private is-kernel-object := y
+ rustdoc-ffi: $(src)/ffi.rs rustdoc-core FORCE
+       +$(call if_changed,rustdoc)
++rustdoc-kernel: private is-kernel-object := y
+ rustdoc-kernel: private rustc_target_flags = --extern ffi \
+     --extern build_error --extern macros=$(objtree)/$(obj)/libmacros.so \
+     --extern bindings --extern uapi
+@@ -433,6 +439,10 @@ $(obj)/compiler_builtins.o: private rust
+ $(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE
+       +$(call if_changed_rule,rustc_library)
++# Even if normally `build_error` is not a kernel object, it should still be
++# treated as such so that we pass the same flags. Otherwise, for instance,
++# `rustc` will complain about missing sanitizer flags causing an ABI mismatch.
++$(obj)/build_error.o: private is-kernel-object := y
+ $(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE
+       +$(call if_changed_rule,rustc_library)
diff --git a/queue-6.12/rust-kbuild-workaround-rustdoc-doctests-modifier-bug.patch b/queue-6.12/rust-kbuild-workaround-rustdoc-doctests-modifier-bug.patch
new file mode 100644 (file)
index 0000000..d0baafa
--- /dev/null
@@ -0,0 +1,91 @@
+From stable+bounces-192779-greg=kroah.com@vger.kernel.org Sat Nov  8 15:06:04 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  8 Nov 2025 09:03:52 -0500
+Subject: rust: kbuild: workaround `rustdoc` doctests modifier bug
+To: stable@vger.kernel.org
+Cc: Miguel Ojeda <ojeda@kernel.org>, Alice Ryhl <aliceryhl@google.com>, "Justin M. Forbes" <jforbes@fedoraproject.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251108140352.127731-1-sashal@kernel.org>
+
+From: Miguel Ojeda <ojeda@kernel.org>
+
+[ Upstream commit fad472efab0a805dd939f017c5b8669a786a4bcf ]
+
+The `rustdoc` modifiers bug [1] was fixed in Rust 1.90.0 [2], for which
+we added a workaround in commit abbf9a449441 ("rust: workaround `rustdoc`
+target modifiers bug").
+
+However, `rustdoc`'s doctest generation still has a similar issue [3],
+being fixed at [4], which does not affect us because we apply the
+workaround to both, and now, starting with Rust 1.91.0 (released
+2025-10-30), `-Zsanitizer` is a target modifier too [5], which means we
+fail with:
+
+      RUSTDOC TK rust/kernel/lib.rs
+    error: mixing `-Zsanitizer` will cause an ABI mismatch in crate `kernel`
+     --> rust/kernel/lib.rs:3:1
+      |
+    3 | //! The `kernel` crate.
+      | ^
+      |
+      = help: the `-Zsanitizer` flag modifies the ABI so Rust crates compiled with different values of this flag cannot be used together safely
+      = note: unset `-Zsanitizer` in this crate is incompatible with `-Zsanitizer=kernel-address` in dependency `core`
+      = help: set `-Zsanitizer=kernel-address` in this crate or unset `-Zsanitizer` in `core`
+      = help: if you are sure this will not cause problems, you may use `-Cunsafe-allow-abi-mismatch=sanitizer` to silence this error
+
+A simple way around is to add the sanitizer to the list in the existing
+workaround (especially if we had not started to pass the sanitizer
+flags in the previous commit, since in that case that would not be
+necessary). However, that still applies the workaround in more cases
+than necessary.
+
+Instead, only modify the doctests flags to ignore the check for
+sanitizers, so that it is more local (and thus the compiler keeps checking
+it for us in the normal `rustdoc` calls). Since the previous commit
+already treated the `rustdoc` calls as kernel objects, this should allow
+us in the future to easily remove this workaround when the time comes.
+
+By the way, the `-Cunsafe-allow-abi-mismatch` flag overwrites previous
+ones rather than appending, so it needs to be all done in the same flag.
+Moreover, unknown modifiers are rejected, and thus we have to gate based
+on the version too.
+
+Finally, `-Zsanitizer-cfi-normalize-integers` is not affected (in Rust
+1.91.0), so it is not needed in the workaround for the moment.
+
+Cc: stable@vger.kernel.org # Needed in 6.12.y and later (Rust is pinned in older LTSs).
+Link: https://github.com/rust-lang/rust/issues/144521 [1]
+Link: https://github.com/rust-lang/rust/pull/144523 [2]
+Link: https://github.com/rust-lang/rust/issues/146465 [3]
+Link: https://github.com/rust-lang/rust/pull/148068 [4]
+Link: https://github.com/rust-lang/rust/pull/138736 [5]
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Tested-by: Justin M. Forbes <jforbes@fedoraproject.org>
+Link: https://patch.msgid.link/20251102212853.1505384-2-ojeda@kernel.org
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+[ added --remap-path-prefix comments missing in stable branch ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/Makefile |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -59,6 +59,8 @@ core-edition := $(if $(call rustc-min-ve
+ # the time being (https://github.com/rust-lang/rust/issues/144521).
+ rustdoc_modifiers_workaround := $(if $(call rustc-min-version,108800),-Cunsafe-allow-abi-mismatch=fixed-x18)
++# Similarly, for doctests (https://github.com/rust-lang/rust/issues/146465).
++doctests_modifiers_workaround := $(rustdoc_modifiers_workaround)$(if $(call rustc-min-version,109100),$(comma)sanitizer)
+ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
+       cmd_rustdoc = \
+       OBJTREE=$(abspath $(objtree)) \
+@@ -189,7 +191,7 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC
+               --extern bindings --extern uapi \
+               --no-run --crate-name kernel -Zunstable-options \
+               --sysroot=/dev/null \
+-              $(rustdoc_modifiers_workaround) \
++              $(doctests_modifiers_workaround) \
+               --test-builder $(objtree)/scripts/rustdoc_test_builder \
+               $< $(rustdoc_test_kernel_quiet); \
+       $(objtree)/scripts/rustdoc_test_gen
index 3ee926f183af3173343ba30cc65ee05232d542fc..5c567e99c64ce24285aded1362fe01a073f740d5 100644 (file)
@@ -156,3 +156,19 @@ selftests-mptcp-join-endpoints-longer-transfer.patch
 selftests-mptcp-connect-trunc-read-all-recv-data.patch
 selftests-mptcp-join-userspace-longer-transfer.patch
 selftests-mptcp-join-properly-kill-background-tasks.patch
+mptcp-fix-msg_peek-stream-corruption.patch
+wifi-cfg80211-add-an-hrtimer-based-delayed-work-item.patch
+wifi-mac80211-use-wiphy_hrtimer_work-for-csa.switch_work.patch
+mm-percpu-do-not-consider-sleepable-allocations-atomic.patch
+kvm-guest_memfd-pass-index-not-gfn-to-__kvm_gmem_get_pfn.patch
+kvm-guest_memfd-remove-rcu-protected-attribute-from-slot-gmem.file.patch
+kvm-guest_memfd-remove-bindings-on-memslot-deletion-when-gmem-is-dying.patch
+net-netpoll-individualize-the-skb-pool.patch
+net-netpoll-flush-skb-pool-during-cleanup.patch
+net-netpoll-fix-incorrect-refcount-handling-causing-incorrect-cleanup.patch
+kvm-vmx-split-out-guts-of-ept-violation-to-common-exposed-function.patch
+kvm-vmx-fix-check-for-valid-gva-on-an-ept-violation.patch
+alsa-hda-fix-missing-pointer-check-in-hda_component_manager_init-function.patch
+io_uring-napi-fix-io_napi_entry-rcu-accesses.patch
+rust-kbuild-treat-build_error-and-rustdoc-as-kernel-objects.patch
+rust-kbuild-workaround-rustdoc-doctests-modifier-bug.patch
diff --git a/queue-6.12/wifi-cfg80211-add-an-hrtimer-based-delayed-work-item.patch b/queue-6.12/wifi-cfg80211-add-an-hrtimer-based-delayed-work-item.patch
new file mode 100644 (file)
index 0000000..02b9860
--- /dev/null
@@ -0,0 +1,228 @@
+From stable+bounces-192844-greg=kroah.com@vger.kernel.org Sun Nov  9 15:21:20 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun,  9 Nov 2025 09:21:11 -0500
+Subject: wifi: cfg80211: add an hrtimer based delayed work item
+To: stable@vger.kernel.org
+Cc: Benjamin Berg <benjamin.berg@intel.com>, Johannes Berg <johannes.berg@intel.com>, Miri Korenblit <miriam.rachel.korenblit@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251109142111.444524-1-sashal@kernel.org>
+
+From: Benjamin Berg <benjamin.berg@intel.com>
+
+[ Upstream commit 7ceba45a6658ce637da334cd0ebf27f4ede6c0fe ]
+
+The normal timer mechanism assume that timeout further in the future
+need a lower accuracy. As an example, the granularity for a timer
+scheduled 4096 ms in the future on a 1000 Hz system is already 512 ms.
+This granularity is perfectly sufficient for e.g. timeouts, but there
+are other types of events that will happen at a future point in time and
+require a higher accuracy.
+
+Add a new wiphy_hrtimer_work type that uses an hrtimer internally. The
+API is almost identical to the existing wiphy_delayed_work and it can be
+used as a drop-in replacement after minor adjustments. The work will be
+scheduled relative to the current time with a slack of 1 millisecond.
+
+CC: stable@vger.kernel.org # 6.4+
+Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
+Reviewed-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Link: https://patch.msgid.link/20251028125710.7f13a2adc5eb.I01b5af0363869864b0580d9c2a1770bafab69566@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+[ replaced hrtimer_setup() call with hrtimer_init() and manual timer.function assignment ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/cfg80211.h |   78 +++++++++++++++++++++++++++++++++++++++++++++++++
+ net/wireless/core.c    |   56 +++++++++++++++++++++++++++++++++++
+ net/wireless/trace.h   |   21 +++++++++++++
+ 3 files changed, 155 insertions(+)
+
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -6105,6 +6105,11 @@ static inline void wiphy_delayed_work_in
+  * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
+  * use just cancel_work() instead of cancel_work_sync(), it requires
+  * being in a section protected by wiphy_lock().
++ *
++ * Note that these are scheduled with a timer where the accuracy
++ * becomes less the longer in the future the scheduled timer is. Use
++ * wiphy_hrtimer_work_queue() if the timer must be not be late by more
++ * than approximately 10 percent.
+  */
+ void wiphy_delayed_work_queue(struct wiphy *wiphy,
+                             struct wiphy_delayed_work *dwork,
+@@ -6176,6 +6181,79 @@ void wiphy_delayed_work_flush(struct wip
+ bool wiphy_delayed_work_pending(struct wiphy *wiphy,
+                               struct wiphy_delayed_work *dwork);
++struct wiphy_hrtimer_work {
++      struct wiphy_work work;
++      struct wiphy *wiphy;
++      struct hrtimer timer;
++};
++
++enum hrtimer_restart wiphy_hrtimer_work_timer(struct hrtimer *t);
++
++static inline void wiphy_hrtimer_work_init(struct wiphy_hrtimer_work *hrwork,
++                                         wiphy_work_func_t func)
++{
++      hrtimer_init(&hrwork->timer, CLOCK_BOOTTIME, HRTIMER_MODE_REL);
++      hrwork->timer.function = wiphy_hrtimer_work_timer;
++      wiphy_work_init(&hrwork->work, func);
++}
++
++/**
++ * wiphy_hrtimer_work_queue - queue hrtimer work for the wiphy
++ * @wiphy: the wiphy to queue for
++ * @hrwork: the high resolution timer worker
++ * @delay: the delay given as a ktime_t
++ *
++ * Please refer to wiphy_delayed_work_queue(). The difference is that
++ * the hrtimer work uses a high resolution timer for scheduling. This
++ * may be needed if timeouts might be scheduled further in the future
++ * and the accuracy of the normal timer is not sufficient.
++ *
++ * Expect a delay of a few milliseconds as the timer is scheduled
++ * with some slack and some more time may pass between queueing the
++ * work and its start.
++ */
++void wiphy_hrtimer_work_queue(struct wiphy *wiphy,
++                            struct wiphy_hrtimer_work *hrwork,
++                            ktime_t delay);
++
++/**
++ * wiphy_hrtimer_work_cancel - cancel previously queued hrtimer work
++ * @wiphy: the wiphy, for debug purposes
++ * @hrtimer: the hrtimer work to cancel
++ *
++ * Cancel the work *without* waiting for it, this assumes being
++ * called under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_hrtimer_work_cancel(struct wiphy *wiphy,
++                             struct wiphy_hrtimer_work *hrtimer);
++
++/**
++ * wiphy_hrtimer_work_flush - flush previously queued hrtimer work
++ * @wiphy: the wiphy, for debug purposes
++ * @hrwork: the hrtimer work to flush
++ *
++ * Flush the work (i.e. run it if pending). This must be called
++ * under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_hrtimer_work_flush(struct wiphy *wiphy,
++                            struct wiphy_hrtimer_work *hrwork);
++
++/**
++ * wiphy_hrtimer_work_pending - Find out whether a wiphy hrtimer
++ * work item is currently pending.
++ *
++ * @wiphy: the wiphy, for debug purposes
++ * @hrwork: the hrtimer work in question
++ *
++ * Return: true if timer is pending, false otherwise
++ *
++ * Please refer to the wiphy_delayed_work_pending() documentation as
++ * this is the equivalent function for hrtimer based delayed work
++ * items.
++ */
++bool wiphy_hrtimer_work_pending(struct wiphy *wiphy,
++                              struct wiphy_hrtimer_work *hrwork);
++
+ /**
+  * enum ieee80211_ap_reg_power - regulatory power for an Access Point
+  *
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1763,6 +1763,62 @@ bool wiphy_delayed_work_pending(struct w
+ }
+ EXPORT_SYMBOL_GPL(wiphy_delayed_work_pending);
++enum hrtimer_restart wiphy_hrtimer_work_timer(struct hrtimer *t)
++{
++      struct wiphy_hrtimer_work *hrwork =
++              container_of(t, struct wiphy_hrtimer_work, timer);
++
++      wiphy_work_queue(hrwork->wiphy, &hrwork->work);
++
++      return HRTIMER_NORESTART;
++}
++EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_timer);
++
++void wiphy_hrtimer_work_queue(struct wiphy *wiphy,
++                            struct wiphy_hrtimer_work *hrwork,
++                            ktime_t delay)
++{
++      trace_wiphy_hrtimer_work_queue(wiphy, &hrwork->work, delay);
++
++      if (!delay) {
++              hrtimer_cancel(&hrwork->timer);
++              wiphy_work_queue(wiphy, &hrwork->work);
++              return;
++      }
++
++      hrwork->wiphy = wiphy;
++      hrtimer_start_range_ns(&hrwork->timer, delay,
++                             1000 * NSEC_PER_USEC, HRTIMER_MODE_REL);
++}
++EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_queue);
++
++void wiphy_hrtimer_work_cancel(struct wiphy *wiphy,
++                             struct wiphy_hrtimer_work *hrwork)
++{
++      lockdep_assert_held(&wiphy->mtx);
++
++      hrtimer_cancel(&hrwork->timer);
++      wiphy_work_cancel(wiphy, &hrwork->work);
++}
++EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_cancel);
++
++void wiphy_hrtimer_work_flush(struct wiphy *wiphy,
++                            struct wiphy_hrtimer_work *hrwork)
++{
++      lockdep_assert_held(&wiphy->mtx);
++
++      hrtimer_cancel(&hrwork->timer);
++      wiphy_work_flush(wiphy, &hrwork->work);
++}
++EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_flush);
++
++bool wiphy_hrtimer_work_pending(struct wiphy *wiphy,
++                              struct wiphy_hrtimer_work *hrwork)
++{
++      return hrtimer_is_queued(&hrwork->timer);
++}
++EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_pending);
++
+ static int __init cfg80211_init(void)
+ {
+       int err;
+--- a/net/wireless/trace.h
++++ b/net/wireless/trace.h
+@@ -304,6 +304,27 @@ TRACE_EVENT(wiphy_delayed_work_queue,
+                 __entry->delay)
+ );
++TRACE_EVENT(wiphy_hrtimer_work_queue,
++      TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work,
++               ktime_t delay),
++      TP_ARGS(wiphy, work, delay),
++      TP_STRUCT__entry(
++              WIPHY_ENTRY
++              __field(void *, instance)
++              __field(void *, func)
++              __field(ktime_t, delay)
++      ),
++      TP_fast_assign(
++              WIPHY_ASSIGN;
++              __entry->instance = work;
++              __entry->func = work->func;
++              __entry->delay = delay;
++      ),
++      TP_printk(WIPHY_PR_FMT " instance=%p func=%pS delay=%llu",
++                WIPHY_PR_ARG, __entry->instance, __entry->func,
++                __entry->delay)
++);
++
+ TRACE_EVENT(wiphy_work_worker_start,
+       TP_PROTO(struct wiphy *wiphy),
+       TP_ARGS(wiphy),
diff --git a/queue-6.12/wifi-mac80211-use-wiphy_hrtimer_work-for-csa.switch_work.patch b/queue-6.12/wifi-mac80211-use-wiphy_hrtimer_work-for-csa.switch_work.patch
new file mode 100644 (file)
index 0000000..14d6cb6
--- /dev/null
@@ -0,0 +1,138 @@
+From stable+bounces-192868-greg=kroah.com@vger.kernel.org Mon Nov 10 00:21:24 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun,  9 Nov 2025 18:21:14 -0500
+Subject: wifi: mac80211: use wiphy_hrtimer_work for csa.switch_work
+To: stable@vger.kernel.org
+Cc: Benjamin Berg <benjamin.berg@intel.com>, Johannes Berg <johannes.berg@intel.com>, Miri Korenblit <miriam.rachel.korenblit@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251109232114.531375-2-sashal@kernel.org>
+
+From: Benjamin Berg <benjamin.berg@intel.com>
+
+[ Upstream commit fbc1cc6973099f45e4c30b86f12b4435c7cb7d24 ]
+
+The work item may be scheduled relatively far in the future. As the
+event happens at a specific point in time, the normal timer accuracy is
+not sufficient in that case.
+
+Switch to use wiphy_hrtimer_work so that the accuracy is sufficient. To
+make this work, use the same clock to store the timestamp.
+
+CC: stable@vger.kernel.org
+Fixes: ec3252bff7b6 ("wifi: mac80211: use wiphy work for channel switch")
+Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
+Reviewed-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Link: https://patch.msgid.link/20251028125710.68258c7e4ac4.I4ff2b2cdffbbf858bf5f08baccc7a88c4f9efe6f@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac80211/chan.c        |    2 +-
+ net/mac80211/ieee80211_i.h |    4 ++--
+ net/mac80211/link.c        |    4 ++--
+ net/mac80211/mlme.c        |   18 +++++++++---------
+ 4 files changed, 14 insertions(+), 14 deletions(-)
+
+--- a/net/mac80211/chan.c
++++ b/net/mac80211/chan.c
+@@ -1246,7 +1246,7 @@ ieee80211_link_chanctx_reservation_compl
+                                &link->csa.finalize_work);
+               break;
+       case NL80211_IFTYPE_STATION:
+-              wiphy_delayed_work_queue(sdata->local->hw.wiphy,
++              wiphy_hrtimer_work_queue(sdata->local->hw.wiphy,
+                                        &link->u.mgd.csa.switch_work, 0);
+               break;
+       case NL80211_IFTYPE_UNSPECIFIED:
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -981,10 +981,10 @@ struct ieee80211_link_data_managed {
+       bool operating_11g_mode;
+       struct {
+-              struct wiphy_delayed_work switch_work;
++              struct wiphy_hrtimer_work switch_work;
+               struct cfg80211_chan_def ap_chandef;
+               struct ieee80211_parsed_tpe tpe;
+-              unsigned long time;
++              ktime_t time;
+               bool waiting_bcn;
+               bool ignored_same_chan;
+               bool blocked_tx;
+--- a/net/mac80211/link.c
++++ b/net/mac80211/link.c
+@@ -469,10 +469,10 @@ static int _ieee80211_set_active_links(s
+                * from there.
+                */
+               if (link->conf->csa_active)
+-                      wiphy_delayed_work_queue(local->hw.wiphy,
++                      wiphy_hrtimer_work_queue(local->hw.wiphy,
+                                                &link->u.mgd.csa.switch_work,
+                                                link->u.mgd.csa.time -
+-                                               jiffies);
++                                               ktime_get_boottime());
+       }
+       list_for_each_entry(sta, &local->sta_list, list) {
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2225,7 +2225,7 @@ void ieee80211_chswitch_done(struct ieee
+                       return;
+               }
+-              wiphy_delayed_work_queue(sdata->local->hw.wiphy,
++              wiphy_hrtimer_work_queue(sdata->local->hw.wiphy,
+                                        &link->u.mgd.csa.switch_work, 0);
+       }
+@@ -2384,7 +2384,8 @@ ieee80211_sta_process_chanswitch(struct
+               .timestamp = timestamp,
+               .device_timestamp = device_timestamp,
+       };
+-      unsigned long now;
++      u32 csa_time_tu;
++      ktime_t now;
+       int res;
+       lockdep_assert_wiphy(local->hw.wiphy);
+@@ -2614,10 +2615,9 @@ ieee80211_sta_process_chanswitch(struct
+                                         csa_ie.mode);
+       /* we may have to handle timeout for deactivated link in software */
+-      now = jiffies;
+-      link->u.mgd.csa.time = now +
+-                             TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) *
+-                                           link->conf->beacon_int);
++      now = ktime_get_boottime();
++      csa_time_tu = (max_t(int, csa_ie.count, 1) - 1) * link->conf->beacon_int;
++      link->u.mgd.csa.time = now + ns_to_ktime(ieee80211_tu_to_usec(csa_time_tu) * NSEC_PER_USEC);
+       if (ieee80211_vif_link_active(&sdata->vif, link->link_id) &&
+           local->ops->channel_switch) {
+@@ -2632,7 +2632,7 @@ ieee80211_sta_process_chanswitch(struct
+       }
+       /* channel switch handled in software */
+-      wiphy_delayed_work_queue(local->hw.wiphy,
++      wiphy_hrtimer_work_queue(local->hw.wiphy,
+                                &link->u.mgd.csa.switch_work,
+                                link->u.mgd.csa.time - now);
+       return;
+@@ -8137,7 +8137,7 @@ void ieee80211_mgd_setup_link(struct iee
+       else
+               link->u.mgd.req_smps = IEEE80211_SMPS_OFF;
+-      wiphy_delayed_work_init(&link->u.mgd.csa.switch_work,
++      wiphy_hrtimer_work_init(&link->u.mgd.csa.switch_work,
+                               ieee80211_csa_switch_work);
+       ieee80211_clear_tpe(&link->conf->tpe);
+@@ -9267,7 +9267,7 @@ void ieee80211_mgd_stop_link(struct ieee
+                         &link->u.mgd.request_smps_work);
+       wiphy_work_cancel(link->sdata->local->hw.wiphy,
+                         &link->u.mgd.recalc_smps);
+-      wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy,
++      wiphy_hrtimer_work_cancel(link->sdata->local->hw.wiphy,
+                                 &link->u.mgd.csa.switch_work);
+ }