]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: SVM: Add a helper to detect VMRUN failures
authorSean Christopherson <seanjc@google.com>
Tue, 30 Dec 2025 21:13:40 +0000 (13:13 -0800)
committerSean Christopherson <seanjc@google.com>
Wed, 14 Jan 2026 01:37:03 +0000 (17:37 -0800)
Add a helper to detect VMRUN failures so that KVM can guard against its
own long-standing bug, where KVM neglects to set exitcode[63:32] when
synthesizing a nested VMFAIL_INVALID VM-Exit.  This will allow fixing
KVM's mess of treating exitcode as two separate 32-bit values without
breaking KVM-on-KVM when running on an older, unfixed KVM.

Cc: Jim Mattson <jmattson@google.com>
Cc: Yosry Ahmed <yosry.ahmed@linux.dev>
Reviewed-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Link: https://patch.msgid.link/20251230211347.4099600-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index 5b741f8ed170985f8352098cf239c368ef5b407d..666b5a36c15d8a5bdff8cb16998f306f0299e2bd 100644 (file)
@@ -1167,7 +1167,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        vmcb12->control.exit_info_1       = vmcb02->control.exit_info_1;
        vmcb12->control.exit_info_2       = vmcb02->control.exit_info_2;
 
-       if (vmcb12->control.exit_code != SVM_EXIT_ERR)
+       if (!svm_is_vmrun_failure(vmcb12->control.exit_code))
                nested_save_pending_event_to_vmcb12(svm, vmcb12);
 
        if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
@@ -1463,6 +1463,9 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
        u32 exit_code = svm->vmcb->control.exit_code;
        int vmexit = NESTED_EXIT_HOST;
 
+       if (svm_is_vmrun_failure(exit_code))
+               return NESTED_EXIT_DONE;
+
        switch (exit_code) {
        case SVM_EXIT_MSR:
                vmexit = nested_svm_exit_handled_msr(svm);
@@ -1470,7 +1473,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
        case SVM_EXIT_IOIO:
                vmexit = nested_svm_intercept_ioio(svm);
                break;
-       case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
+       case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f:
                /*
                 * Host-intercepted exceptions have been checked already in
                 * nested_svm_exit_special.  There is nothing to do here,
@@ -1478,15 +1481,10 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
                 */
                vmexit = NESTED_EXIT_DONE;
                break;
-       }
-       case SVM_EXIT_ERR: {
-               vmexit = NESTED_EXIT_DONE;
-               break;
-       }
-       default: {
+       default:
                if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
                        vmexit = NESTED_EXIT_DONE;
-       }
+               break;
        }
 
        return vmexit;
index c7bd78f5a2c7b98290f63f58274df1dd5b0f0818..e20b40f346afdf16c345ce3652c4d5bcc8368384 100644 (file)
@@ -3564,7 +3564,7 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
                        return 1;
        }
 
-       if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
+       if (svm_is_vmrun_failure(svm->vmcb->control.exit_code)) {
                kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
                kvm_run->fail_entry.hardware_entry_failure_reason
                        = svm->vmcb->control.exit_code;
@@ -4346,7 +4346,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
 
                /* Track VMRUNs that have made past consistency checking */
                if (svm->nested.nested_run_pending &&
-                   svm->vmcb->control.exit_code != SVM_EXIT_ERR)
+                   !svm_is_vmrun_failure(svm->vmcb->control.exit_code))
                         ++vcpu->stat.nested_run;
 
                svm->nested.nested_run_pending = 0;
index 7d28a739865fc8b980f553b7b0eea7b185911b9d..3360ac36e07108dfeb6c715d4c1d8ecd69559568 100644 (file)
@@ -425,6 +425,11 @@ static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
        return container_of(vcpu, struct vcpu_svm, vcpu);
 }
 
+static inline bool svm_is_vmrun_failure(u64 exit_code)
+{
+       return (u32)exit_code == (u32)SVM_EXIT_ERR;
+}
+
 /*
  * Only the PDPTRs are loaded on demand into the shadow MMU.  All other
  * fields are synchronized on VM-Exit, because accessing the VMCB is cheap.