]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86: Reorganize code in x86.c to co-locate vCPU blocking/running helpers
authorSean Christopherson <seanjc@google.com>
Fri, 2 Aug 2024 19:51:19 +0000 (12:51 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 30 Aug 2024 02:50:21 +0000 (19:50 -0700)
Shuffle code around in x86.c so that the various helpers related to vCPU
blocking/running logic are (a) located near each other and (b) ordered so
that HLT emulation can use kvm_vcpu_has_events() in a future path.

No functional change intended.

Link: https://lore.kernel.org/r/20240802195120.325560-5-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/x86.c

index d6c81d59d4872363683d26d531b8979bdc50ff17..c15eb8e7d3c39210febb962b3d9fef43763ccc07 100644 (file)
@@ -9917,51 +9917,6 @@ void kvm_x86_vendor_exit(void)
 }
 EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
 
-static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
-{
-       /*
-        * The vCPU has halted, e.g. executed HLT.  Update the run state if the
-        * local APIC is in-kernel, the run loop will detect the non-runnable
-        * state and halt the vCPU.  Exit to userspace if the local APIC is
-        * managed by userspace, in which case userspace is responsible for
-        * handling wake events.
-        */
-       ++vcpu->stat.halt_exits;
-       if (lapic_in_kernel(vcpu)) {
-               vcpu->arch.mp_state = state;
-               return 1;
-       } else {
-               vcpu->run->exit_reason = reason;
-               return 0;
-       }
-}
-
-int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
-{
-       return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
-}
-EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
-
-int kvm_emulate_halt(struct kvm_vcpu *vcpu)
-{
-       int ret = kvm_skip_emulated_instruction(vcpu);
-       /*
-        * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
-        * KVM_EXIT_DEBUG here.
-        */
-       return kvm_emulate_halt_noskip(vcpu) && ret;
-}
-EXPORT_SYMBOL_GPL(kvm_emulate_halt);
-
-int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
-{
-       int ret = kvm_skip_emulated_instruction(vcpu);
-
-       return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
-                                       KVM_EXIT_AP_RESET_HOLD) && ret;
-}
-EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
-
 #ifdef CONFIG_X86_64
 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
                                unsigned long clock_type)
@@ -11214,6 +11169,67 @@ out:
        return r;
 }
 
+static bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
+{
+       return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+               !vcpu->arch.apf.halted);
+}
+
+static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+{
+       if (!list_empty_careful(&vcpu->async_pf.done))
+               return true;
+
+       if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
+           kvm_apic_init_sipi_allowed(vcpu))
+               return true;
+
+       if (vcpu->arch.pv.pv_unhalted)
+               return true;
+
+       if (kvm_is_exception_pending(vcpu))
+               return true;
+
+       if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+           (vcpu->arch.nmi_pending &&
+            kvm_x86_call(nmi_allowed)(vcpu, false)))
+               return true;
+
+#ifdef CONFIG_KVM_SMM
+       if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
+           (vcpu->arch.smi_pending &&
+            kvm_x86_call(smi_allowed)(vcpu, false)))
+               return true;
+#endif
+
+       if (kvm_test_request(KVM_REQ_PMI, vcpu))
+               return true;
+
+       if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
+               return true;
+
+       if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
+               return true;
+
+       if (kvm_hv_has_stimer_pending(vcpu))
+               return true;
+
+       if (is_guest_mode(vcpu) &&
+           kvm_x86_ops.nested_ops->has_events &&
+           kvm_x86_ops.nested_ops->has_events(vcpu, false))
+               return true;
+
+       if (kvm_xen_has_pending_events(vcpu))
+               return true;
+
+       return false;
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
+}
+
 /* Called within kvm->srcu read side.  */
 static inline int vcpu_block(struct kvm_vcpu *vcpu)
 {
@@ -11285,12 +11301,6 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
        return 1;
 }
 
-static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
-{
-       return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-               !vcpu->arch.apf.halted);
-}
-
 /* Called within kvm->srcu read side.  */
 static int vcpu_run(struct kvm_vcpu *vcpu)
 {
@@ -11342,6 +11352,77 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
        return r;
 }
 
+static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
+{
+       /*
+        * The vCPU has halted, e.g. executed HLT.  Update the run state if the
+        * local APIC is in-kernel, the run loop will detect the non-runnable
+        * state and halt the vCPU.  Exit to userspace if the local APIC is
+        * managed by userspace, in which case userspace is responsible for
+        * handling wake events.
+        */
+       ++vcpu->stat.halt_exits;
+       if (lapic_in_kernel(vcpu)) {
+               vcpu->arch.mp_state = state;
+               return 1;
+       } else {
+               vcpu->run->exit_reason = reason;
+               return 0;
+       }
+}
+
+int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
+{
+       return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
+
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+       int ret = kvm_skip_emulated_instruction(vcpu);
+       /*
+        * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
+        * KVM_EXIT_DEBUG here.
+        */
+       return kvm_emulate_halt_noskip(vcpu) && ret;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+
+int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
+{
+       int ret = kvm_skip_emulated_instruction(vcpu);
+
+       return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
+                                       KVM_EXIT_AP_RESET_HOLD) && ret;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
+
+bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_apicv_active(vcpu) &&
+              kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu);
+}
+
+bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.preempted_in_kernel;
+}
+
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+       if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
+               return true;
+
+       if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+#ifdef CONFIG_KVM_SMM
+               kvm_test_request(KVM_REQ_SMI, vcpu) ||
+#endif
+                kvm_test_request(KVM_REQ_EVENT, vcpu))
+               return true;
+
+       return kvm_arch_dy_has_pending_interrupt(vcpu);
+}
+
 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
 {
        return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
@@ -13156,87 +13237,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
                kvm_arch_free_memslot(kvm, old);
 }
 
-static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
-{
-       if (!list_empty_careful(&vcpu->async_pf.done))
-               return true;
-
-       if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
-           kvm_apic_init_sipi_allowed(vcpu))
-               return true;
-
-       if (vcpu->arch.pv.pv_unhalted)
-               return true;
-
-       if (kvm_is_exception_pending(vcpu))
-               return true;
-
-       if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
-           (vcpu->arch.nmi_pending &&
-            kvm_x86_call(nmi_allowed)(vcpu, false)))
-               return true;
-
-#ifdef CONFIG_KVM_SMM
-       if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
-           (vcpu->arch.smi_pending &&
-            kvm_x86_call(smi_allowed)(vcpu, false)))
-               return true;
-#endif
-
-       if (kvm_test_request(KVM_REQ_PMI, vcpu))
-               return true;
-
-       if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
-               return true;
-
-       if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
-               return true;
-
-       if (kvm_hv_has_stimer_pending(vcpu))
-               return true;
-
-       if (is_guest_mode(vcpu) &&
-           kvm_x86_ops.nested_ops->has_events &&
-           kvm_x86_ops.nested_ops->has_events(vcpu, false))
-               return true;
-
-       if (kvm_xen_has_pending_events(vcpu))
-               return true;
-
-       return false;
-}
-
-int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
-{
-       return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
-}
-
-bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
-{
-       return kvm_vcpu_apicv_active(vcpu) &&
-              kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu);
-}
-
-bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.preempted_in_kernel;
-}
-
-bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
-{
-       if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
-               return true;
-
-       if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
-#ifdef CONFIG_KVM_SMM
-               kvm_test_request(KVM_REQ_SMI, vcpu) ||
-#endif
-                kvm_test_request(KVM_REQ_EVENT, vcpu))
-               return true;
-
-       return kvm_arch_dy_has_pending_interrupt(vcpu);
-}
-
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 {
        if (vcpu->arch.guest_state_protected)