]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86: Convert vcpu_run()'s immediate exit param into a generic bitmap
authorSean Christopherson <seanjc@google.com>
Tue, 10 Jun 2025 23:20:04 +0000 (16:20 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 20 Jun 2025 20:04:24 +0000 (13:04 -0700)
Convert kvm_x86_ops.vcpu_run()'s "force_immediate_exit" boolean parameter
into an a generic bitmap so that similar "take action" information can be
passed to vendor code without creating a pile of boolean parameters.

This will allow dropping kvm_x86_ops.set_dr6() in favor of a new flag, and
will also allow for adding similar functionality for re-loading debugctl
in the active VMCS.

Opportunistically massage the TDX WARN and comment to prepare for adding
more run_flags, all of which are expected to be mutually exclusive with
TDX, i.e. should be WARNed on.

No functional change intended.

Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20250610232010.162191-3-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/main.c
arch/x86/kvm/vmx/tdx.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/x86_ops.h
arch/x86/kvm/x86.c

index b4a391929cdbaa9960e870ade649606bdcab2e9a..8d81684fa15d6848a42418469d3fc5a06abd3a37 100644 (file)
@@ -1674,6 +1674,10 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
        return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
 }
 
+enum kvm_x86_run_flags {
+       KVM_RUN_FORCE_IMMEDIATE_EXIT    = BIT(0),
+};
+
 struct kvm_x86_ops {
        const char *name;
 
@@ -1755,7 +1759,7 @@ struct kvm_x86_ops {
 
        int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
        enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu,
-                                                 bool force_immediate_exit);
+                                                 u64 run_flags);
        int (*handle_exit)(struct kvm_vcpu *vcpu,
                enum exit_fastpath_completion exit_fastpath);
        int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
index ab9b947dbf4f9cd8fe97af0cbecfe540ccffc82c..83d1b62130b15d57a8c225b1f1ff1151fdd24e0a 100644 (file)
@@ -4389,9 +4389,9 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
        guest_state_exit_irqoff();
 }
 
-static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
-                                         bool force_immediate_exit)
+static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
 {
+       bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT;
        struct vcpu_svm *svm = to_svm(vcpu);
        bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
 
index d1e02e567b571f3885d46a8978f633ec235c050e..fef3e380370783478d80165ce3989eb9a71b29ee 100644 (file)
@@ -175,12 +175,12 @@ static int vt_vcpu_pre_run(struct kvm_vcpu *vcpu)
        return vmx_vcpu_pre_run(vcpu);
 }
 
-static fastpath_t vt_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+static fastpath_t vt_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
 {
        if (is_td_vcpu(vcpu))
-               return tdx_vcpu_run(vcpu, force_immediate_exit);
+               return tdx_vcpu_run(vcpu, run_flags);
 
-       return vmx_vcpu_run(vcpu, force_immediate_exit);
+       return vmx_vcpu_run(vcpu, run_flags);
 }
 
 static int vt_handle_exit(struct kvm_vcpu *vcpu,
index 4d2426ab6747113542e402731e0902ae51bbd905..1c4b4d9a1acbd35d7a5b9eceee5943294b0bbbb4 100644 (file)
@@ -1025,20 +1025,20 @@ static void tdx_load_host_xsave_state(struct kvm_vcpu *vcpu)
                                DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI | \
                                DEBUGCTLMSR_FREEZE_IN_SMM)
 
-fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
 {
        struct vcpu_tdx *tdx = to_tdx(vcpu);
        struct vcpu_vt *vt = to_vt(vcpu);
 
        /*
-        * force_immediate_exit requires vCPU entering for events injection with
-        * an immediately exit followed. But The TDX module doesn't guarantee
-        * entry, it's already possible for KVM to _think_ it completely entry
-        * to the guest without actually having done so.
-        * Since KVM never needs to force an immediate exit for TDX, and can't
-        * do direct injection, just warn on force_immediate_exit.
+        * WARN if KVM wants to force an immediate exit, as the TDX module does
+        * not guarantee entry into the guest, i.e. it's possible for KVM to
+        * _think_ it completed entry to the guest and forced an immediate exit
+        * without actually having done so.  Luckily, KVM never needs to force
+        * an immediate exit for TDX (KVM can't do direct event injection, so
+        * just WARN and continue on.
         */
-       WARN_ON_ONCE(force_immediate_exit);
+       WARN_ON_ONCE(run_flags);
 
        /*
         * Wait until retry of SEPT-zap-related SEAMCALL completes before
@@ -1048,7 +1048,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
        if (unlikely(READ_ONCE(to_kvm_tdx(vcpu->kvm)->wait_for_sept_zap)))
                return EXIT_FASTPATH_EXIT_HANDLED;
 
-       trace_kvm_entry(vcpu, force_immediate_exit);
+       trace_kvm_entry(vcpu, run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT);
 
        if (pi_test_on(&vt->pi_desc)) {
                apic->send_IPI_self(POSTED_INTR_VECTOR);
index 4953846cb30d1758f2eda294796c5a50ddb9408c..a61a28944de64b52a97aea18e44a3613eee8e1fd 100644 (file)
@@ -7323,8 +7323,9 @@ out:
        guest_state_exit_irqoff();
 }
 
-fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
 {
+       bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long cr3, cr4;
 
index b4596f65123282cd5a42892f59d3c6cb4c00b479..0b4f5c5558d0dac61cd599066d53ee7166da3d35 100644 (file)
@@ -21,7 +21,7 @@ void vmx_vm_destroy(struct kvm *kvm);
 int vmx_vcpu_precreate(struct kvm *kvm);
 int vmx_vcpu_create(struct kvm_vcpu *vcpu);
 int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu);
-fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
+fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags);
 void vmx_vcpu_free(struct kvm_vcpu *vcpu);
 void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
@@ -133,7 +133,7 @@ void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
 void tdx_vcpu_free(struct kvm_vcpu *vcpu);
 void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu);
-fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
+fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags);
 void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
 void tdx_vcpu_put(struct kvm_vcpu *vcpu);
 bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu);
index b58a74c1722de3f2d180cf8d6a3cf0ce9cf534fd..07ff02eed399a8e8b94ecb79bdd311be78d53803 100644 (file)
@@ -10779,6 +10779,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                dm_request_for_irq_injection(vcpu) &&
                kvm_cpu_accept_dm_intr(vcpu);
        fastpath_t exit_fastpath;
+       u64 run_flags;
 
        bool req_immediate_exit = false;
 
@@ -11023,8 +11024,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                goto cancel_injection;
        }
 
-       if (req_immediate_exit)
+       run_flags = 0;
+       if (req_immediate_exit) {
+               run_flags |= KVM_RUN_FORCE_IMMEDIATE_EXIT;
                kvm_make_request(KVM_REQ_EVENT, vcpu);
+       }
 
        fpregs_assert_state_consistent();
        if (test_thread_flag(TIF_NEED_FPU_LOAD))
@@ -11061,8 +11065,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
                             (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
 
-               exit_fastpath = kvm_x86_call(vcpu_run)(vcpu,
-                                                      req_immediate_exit);
+               exit_fastpath = kvm_x86_call(vcpu_run)(vcpu, run_flags);
                if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
                        break;
 
@@ -11074,6 +11077,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        break;
                }
 
+               run_flags = 0;
+
                /* Note, VM-Exits that go down the "slow" path are accounted below. */
                ++vcpu->stat.exits;
        }