]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: SVM: Fix SNP AP destroy race with VMRUN
authorTom Lendacky <thomas.lendacky@amd.com>
Thu, 27 Mar 2025 17:39:56 +0000 (12:39 -0500)
committerSean Christopherson <seanjc@google.com>
Thu, 24 Apr 2025 18:20:08 +0000 (11:20 -0700)
An AP destroy request for a target vCPU is typically followed by an
RMPADJUST to remove the VMSA attribute from the page currently being
used as the VMSA for the target vCPU. This can result in a vCPU that
is about to VMRUN to exit with #VMEXIT_INVALID.

This usually does not happen as APs are typically sitting in HLT when
being destroyed and therefore the vCPU thread is not running at the time.
However, if HLT is allowed inside the VM, then the vCPU could be about to
VMRUN when the VMSA attribute is removed from the VMSA page, resulting in
a #VMEXIT_INVALID when the vCPU actually issues the VMRUN and causing the
guest to crash. An RMPADJUST against an in-use (already running) VMSA
results in a #NPF for the vCPU issuing the RMPADJUST, so the VMSA
attribute cannot be changed until the VMRUN for target vCPU exits. The
Qemu command line option '-overcommit cpu-pm=on' is an example of allowing
HLT inside the guest.

Update the KVM_REQ_UPDATE_PROTECTED_GUEST_STATE event to include the
KVM_REQUEST_WAIT flag. The kvm_vcpu_kick() function will not wait for
requests to be honored, so create kvm_make_request_and_kick() that will
add a new event request and honor the KVM_REQUEST_WAIT flag. This will
ensure that the target vCPU sees the AP destroy request before returning
to the initiating vCPU should the target vCPU be in guest mode.

Fixes: e366f92ea99e ("KVM: SEV: Support SEV-SNP AP Creation NAE event")
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/fe2c885bf35643dd224e91294edb6777d5df23a4.1743097196.git.thomas.lendacky@amd.com
[sean: add a comment explaining the use of smp_send_reschedule()]
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/sev.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 6c06f3d6e0811600aa5e49dc203aaa472ed48933..c5e80131626d02ea53a91470262a6091136652b7 100644 (file)
        KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_HV_TLB_FLUSH \
        KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_UPDATE_PROTECTED_GUEST_STATE   KVM_ARCH_REQ(34)
+#define KVM_REQ_UPDATE_PROTECTED_GUEST_STATE \
+       KVM_ARCH_REQ_FLAGS(34, KVM_REQUEST_WAIT)
 
 #define CR0_RESERVED_BITS                                               \
        (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
index 0bc708ee278877ffa00a26a21e4c948214b4bc8e..7d11e82e4fa419afef7c0ee56fe308edc3ddd0ab 100644 (file)
@@ -3988,10 +3988,8 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
         * Unless Creation is deferred until INIT, signal the vCPU to update
         * its state.
         */
-       if (request != SVM_VMGEXIT_AP_CREATE_ON_INIT) {
-               kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
-               kvm_vcpu_kick(target_vcpu);
-       }
+       if (request != SVM_VMGEXIT_AP_CREATE_ON_INIT)
+               kvm_make_request_and_kick(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
 
        return 0;
 }
index 1dedc421b3e33b57ddffd8646e3b3ec638fb7cba..c685fb417e924ff9460778a263b74c45b22a2436 100644 (file)
@@ -1505,7 +1505,16 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+
+#ifndef CONFIG_S390
+void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait);
+
+static inline void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+       __kvm_vcpu_kick(vcpu, false);
+}
+#endif
+
 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
 
@@ -2253,6 +2262,14 @@ static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
        __kvm_make_request(req, vcpu);
 }
 
+#ifndef CONFIG_S390
+static inline void kvm_make_request_and_kick(int req, struct kvm_vcpu *vcpu)
+{
+       kvm_make_request(req, vcpu);
+       __kvm_vcpu_kick(vcpu, req & KVM_REQUEST_WAIT);
+}
+#endif
+
 static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
 {
        return READ_ONCE(vcpu->requests);
index 69782df3617fdebc9c561ec0107a6971e4be5f9a..16fe54cf2808bd7173ecb113193a9cfb0f6f9b9b 100644 (file)
@@ -3739,7 +3739,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
 /*
  * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
  */
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait)
 {
        int me, cpu;
 
@@ -3768,13 +3768,24 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
         */
        if (kvm_arch_vcpu_should_kick(vcpu)) {
                cpu = READ_ONCE(vcpu->cpu);
-               if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
-                       smp_send_reschedule(cpu);
+               if (cpu != me && (unsigned int)cpu < nr_cpu_ids && cpu_online(cpu)) {
+                       /*
+                        * Use a reschedule IPI to kick the vCPU if the caller
+                        * doesn't need to wait for a response, as KVM allows
+                        * kicking vCPUs while IRQs are disabled, but using the
+                        * SMP function call framework with IRQs disabled can
+                        * deadlock due to taking cross-CPU locks.
+                        */
+                       if (wait)
+                               smp_call_function_single(cpu, ack_kick, NULL, wait);
+                       else
+                               smp_send_reschedule(cpu);
+               }
        }
 out:
        put_cpu();
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
+EXPORT_SYMBOL_GPL(__kvm_vcpu_kick);
 #endif /* !CONFIG_S390 */
 
 int kvm_vcpu_yield_to(struct kvm_vcpu *target)