]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches master
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 22 Aug 2025 16:05:47 +0000 (18:05 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 22 Aug 2025 16:05:47 +0000 (18:05 +0200)
added patches:
kvm-retry-nx_huge_page_recovery_thread-creation.patch

queue-6.12/kvm-retry-nx_huge_page_recovery_thread-creation.patch [new file with mode: 0644]
queue-6.12/series

diff --git a/queue-6.12/kvm-retry-nx_huge_page_recovery_thread-creation.patch b/queue-6.12/kvm-retry-nx_huge_page_recovery_thread-creation.patch
new file mode 100644 (file)
index 0000000..7dacee3
--- /dev/null
@@ -0,0 +1,128 @@
+From 916b7f42b3b3b539a71c204a9b49fdc4ca92cd82 Mon Sep 17 00:00:00 2001
+From: Keith Busch <kbusch@kernel.org>
+Date: Thu, 27 Feb 2025 15:06:31 -0800
+Subject: kvm: retry nx_huge_page_recovery_thread creation
+
+From: Keith Busch <kbusch@kernel.org>
+
+commit 916b7f42b3b3b539a71c204a9b49fdc4ca92cd82 upstream.
+
+A VMM may send a non-fatal signal to its threads, including vCPU tasks,
+at any time, and thus may signal vCPU tasks during KVM_RUN.  If a vCPU
+task receives the signal while its trying to spawn the huge page recovery
+vhost task, then KVM_RUN will fail due to copy_process() returning
+-ERESTARTNOINTR.
+
+Rework call_once() to mark the call complete if and only if the called
+function succeeds, and plumb the function's true error code back to the
+call_once() invoker.  This provides userspace with the correct, non-fatal
+error code so that the VMM doesn't terminate the VM on -ENOMEM, and allows
+subsequent KVM_RUN a succeed by virtue of retrying creation of the NX huge
+page task.
+
+Co-developed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+[implemented the kvm user side]
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Message-ID: <20250227230631.303431-3-kbusch@meta.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Alistair Delva <adelva@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/mmu.c    |   10 ++++------
+ include/linux/call_once.h |   43 ++++++++++++++++++++++++++++++++-----------
+ 2 files changed, 36 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -7578,7 +7578,7 @@ static bool kvm_nx_huge_page_recovery_wo
+       return true;
+ }
+-static void kvm_mmu_start_lpage_recovery(struct once *once)
++static int kvm_mmu_start_lpage_recovery(struct once *once)
+ {
+       struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
+       struct kvm *kvm = container_of(ka, struct kvm, arch);
+@@ -7590,12 +7590,13 @@ static void kvm_mmu_start_lpage_recovery
+                                     kvm, "kvm-nx-lpage-recovery");
+       if (IS_ERR(nx_thread))
+-              return;
++              return PTR_ERR(nx_thread);
+       vhost_task_start(nx_thread);
+       /* Make the task visible only once it is fully started. */
+       WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
++      return 0;
+ }
+ int kvm_mmu_post_init_vm(struct kvm *kvm)
+@@ -7603,10 +7604,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm
+       if (nx_hugepage_mitigation_hard_disabled)
+               return 0;
+-      call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
+-      if (!kvm->arch.nx_huge_page_recovery_thread)
+-              return -ENOMEM;
+-      return 0;
++      return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
+ }
+ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
+--- a/include/linux/call_once.h
++++ b/include/linux/call_once.h
+@@ -26,20 +26,41 @@ do {                                                                       \
+       __once_init((once), #once, &__key);                             \
+ } while (0)
+-static inline void call_once(struct once *once, void (*cb)(struct once *))
++/*
++ * call_once - Ensure a function has been called exactly once
++ *
++ * @once: Tracking struct
++ * @cb: Function to be called
++ *
++ * If @once has never completed successfully before, call @cb and, if
++ * it returns a zero or positive value, mark @once as completed.  Return
++ * the value returned by @cb
++ *
++ * If @once has completed succesfully before, return 0.
++ *
++ * The call to @cb is implicitly surrounded by a mutex, though for
++ * efficiency the * function avoids taking it after the first call.
++ */
++static inline int call_once(struct once *once, int (*cb)(struct once *))
+ {
+-        /* Pairs with atomic_set_release() below.  */
+-        if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
+-                return;
++      int r, state;
+-        guard(mutex)(&once->lock);
+-        WARN_ON(atomic_read(&once->state) == ONCE_RUNNING);
+-        if (atomic_read(&once->state) != ONCE_NOT_STARTED)
+-                return;
++      /* Pairs with atomic_set_release() below.  */
++      if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
++              return 0;
+-        atomic_set(&once->state, ONCE_RUNNING);
+-        cb(once);
+-        atomic_set_release(&once->state, ONCE_COMPLETED);
++      guard(mutex)(&once->lock);
++      state = atomic_read(&once->state);
++      if (unlikely(state != ONCE_NOT_STARTED))
++              return WARN_ON_ONCE(state != ONCE_COMPLETED) ? -EINVAL : 0;
++
++      atomic_set(&once->state, ONCE_RUNNING);
++      r = cb(once);
++      if (r < 0)
++              atomic_set(&once->state, ONCE_NOT_STARTED);
++      else
++              atomic_set_release(&once->state, ONCE_COMPLETED);
++      return r;
+ }
+ #endif /* _LINUX_CALL_ONCE_H */
index 2eb417f516588b3598be2d3ef52a7bed117dbd31..d6c783531dc6c2f54150038f66cf9ad549a0b43c 100644 (file)
@@ -175,3 +175,4 @@ alsa-hda-realtek-add-support-for-hp-elitebook-x360-830-g6-and-elitebook-830-g6.p
 rdma-rxe-flush-delayed-skbs-while-releasing-rxe-resources.patch
 s390-sclp-fix-sccb-present-check.patch
 platform-x86-intel-uncore-freq-check-write-blocked-for-elc.patch
+kvm-retry-nx_huge_page_recovery_thread-creation.patch