]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86: Introduce kvm_set_mp_state()
authorJim Mattson <jmattson@google.com>
Mon, 13 Jan 2025 20:01:43 +0000 (12:01 -0800)
committerSean Christopherson <seanjc@google.com>
Wed, 12 Feb 2025 18:16:27 +0000 (10:16 -0800)
Replace all open-coded assignments to vcpu->arch.mp_state with calls
to a new helper, kvm_set_mp_state(), to centralize all changes to
mp_state.

No functional change intended.

Signed-off-by: Jim Mattson <jmattson@google.com>
Link: https://lore.kernel.org/r/20250113200150.487409-2-jmattson@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/lapic.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/kvm/xen.c

index 0b5e1ce494021b09719f92747638fd27407b1e15..750bb89b20ac5ba4e7d7a64d9847b7856a2e46d6 100644 (file)
@@ -3390,9 +3390,9 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
        if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
                kvm_vcpu_reset(vcpu, true);
                if (kvm_vcpu_is_bsp(apic->vcpu))
-                       vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+                       kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
                else
-                       vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+                       kvm_set_mp_state(vcpu, KVM_MP_STATE_INIT_RECEIVED);
        }
        if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
                if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
@@ -3401,7 +3401,7 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
                        sipi_vector = apic->sipi_vector;
                        kvm_x86_call(vcpu_deliver_sipi_vector)(vcpu,
                                                               sipi_vector);
-                       vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+                       kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
                }
        }
        return 0;
index d77b094d9a4d60985bde7c0d2b7b93264edc4d59..c6b79c2e1e052ce8d560bf9423e9c940f065d196 100644 (file)
@@ -994,7 +994,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
 
        /* in case we halted in L2 */
-       svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
+       kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
 
        /* Give the current vmcb to the guest */
 
index a2a794c320503f008c5dd3ff332bdfb064533a8e..87d2840da6af81899c898c0e0b39b8a259f8c5c7 100644 (file)
@@ -3845,7 +3845,7 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
 
        /* Mark the vCPU as offline and not runnable */
        vcpu->arch.pv.pv_unhalted = false;
-       vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+       kvm_set_mp_state(vcpu, KVM_MP_STATE_HALTED);
 
        /* Clear use of the VMSA */
        svm->vmcb->control.vmsa_pa = INVALID_PAGE;
@@ -3884,7 +3884,7 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
 
                /* Mark the vCPU as runnable */
                vcpu->arch.pv.pv_unhalted = false;
-               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+               kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
 
                svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
 
index 8a7af02d466e9ff738c255533c09e1d34cb7c452..bca2575837ceac9531ca6c5b441adb1531e07515 100644 (file)
@@ -3771,7 +3771,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
                break;
        case GUEST_ACTIVITY_WAIT_SIPI:
                vmx->nested.nested_run_pending = 0;
-               vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+               kvm_set_mp_state(vcpu, KVM_MP_STATE_INIT_RECEIVED);
                break;
        default:
                break;
@@ -5071,7 +5071,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
                vmx->nested.need_vmcs12_to_shadow_sync = true;
 
        /* in case we halted in L2 */
-       vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+       kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
 
        if (likely(!vmx->fail)) {
                if (vm_exit_reason != -1)
index 8e77e61d4fbd4dc0de7bfd900270e887ff81296c..3041b8d8b59fa52553cb50e6a16b96bcd0418aa1 100644 (file)
@@ -11216,8 +11216,7 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
        case KVM_MP_STATE_HALTED:
        case KVM_MP_STATE_AP_RESET_HOLD:
                vcpu->arch.pv.pv_unhalted = false;
-               vcpu->arch.mp_state =
-                       KVM_MP_STATE_RUNNABLE;
+               kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
                fallthrough;
        case KVM_MP_STATE_RUNNABLE:
                vcpu->arch.apf.halted = false;
@@ -11296,7 +11295,7 @@ static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
                if (kvm_vcpu_has_events(vcpu))
                        vcpu->arch.pv.pv_unhalted = false;
                else
-                       vcpu->arch.mp_state = state;
+                       kvm_set_mp_state(vcpu, state);
                return 1;
        } else {
                vcpu->run->exit_reason = reason;
@@ -11816,10 +11815,10 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                goto out;
 
        if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
-               vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+               kvm_set_mp_state(vcpu, KVM_MP_STATE_INIT_RECEIVED);
                set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
        } else
-               vcpu->arch.mp_state = mp_state->mp_state;
+               kvm_set_mp_state(vcpu, mp_state->mp_state);
        kvm_make_request(KVM_REQ_EVENT, vcpu);
 
        ret = 0;
@@ -11946,7 +11945,7 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
        if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
            sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
            !is_protmode(vcpu))
-               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+               kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
 
        return 0;
 }
@@ -12249,9 +12248,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm);
 
        if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
-               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+               kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
        else
-               vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
+               kvm_set_mp_state(vcpu, KVM_MP_STATE_UNINITIALIZED);
 
        r = kvm_mmu_create(vcpu);
        if (r < 0)
@@ -13469,7 +13468,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
        }
 
        vcpu->arch.apf.halted = false;
-       vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+       kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
 }
 
 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
index 91e50a513100edca6645327c6d5cfe1b3eb95f74..34ca87049845ec6cebb36bec08bdfc0df31adece 100644 (file)
@@ -121,6 +121,11 @@ static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
        return vcpu->arch.last_vmentry_cpu != -1;
 }
 
+static inline void kvm_set_mp_state(struct kvm_vcpu *vcpu, int mp_state)
+{
+       vcpu->arch.mp_state = mp_state;
+}
+
 static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.exception.pending ||
index a909b817b9c0da1714169d937c135474e8a9cf7d..f65ca27888e9652f5d364858291f372b9fcb04dc 100644 (file)
@@ -1480,7 +1480,7 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
        set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
 
        if (!wait_pending_event(vcpu, sched_poll.nr_ports, ports)) {
-               vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+               kvm_set_mp_state(vcpu, KVM_MP_STATE_HALTED);
 
                if (sched_poll.timeout)
                        mod_timer(&vcpu->arch.xen.poll_timer,
@@ -1491,7 +1491,7 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
                if (sched_poll.timeout)
                        del_timer(&vcpu->arch.xen.poll_timer);
 
-               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+               kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
        }
 
        vcpu->arch.xen.poll_evtchn = 0;