]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86: Update APICv ISR (a.k.a. SVI) as part of kvm_apic_update_apicv()
authorSean Christopherson <seanjc@google.com>
Fri, 9 Jan 2026 03:45:32 +0000 (19:45 -0800)
committerSean Christopherson <seanjc@google.com>
Wed, 14 Jan 2026 01:35:32 +0000 (17:35 -0800)
Fold the calls to .hwapic_isr_update() in kvm_apic_set_state(),
kvm_lapic_reset(), and __kvm_vcpu_update_apicv() into
kvm_apic_update_apicv(), as updating SVI is directly related to updating
KVM's own cache of ISR information, e.g. SVI is more or less the APICv
equivalent of highest_isr_cache.

Note, calling .hwapic_isr_update() during kvm_apic_update_apicv() has
benign side effects, as doing so changes the orders of the calls in
kvm_lapic_reset() and kvm_apic_set_state(), specifically with respect to
to the order between .hwapic_isr_update() and .apicv_post_state_restore().
However, the changes in ordering are glorified nops as the former hook is
VMX-only and the latter is SVM-only.

Reviewed-by: Chao Gao <chao.gao@intel.com>
Link: https://patch.msgid.link/20260109034532.1012993-9-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/x86.c

index 78c39341b2a54f2148fb678802d3874887399256..2e513f1c8988fe697e442b6a64c64fd0da43a80c 100644 (file)
@@ -760,17 +760,6 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
        }
 }
 
-void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu)
-{
-       struct kvm_lapic *apic = vcpu->arch.apic;
-
-       if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active)
-               return;
-
-       kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
-}
-EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_update_hwapic_isr);
-
 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
 {
        /* This may race with setting of irr in __apic_accept_irq() and
@@ -2783,10 +2772,18 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
         */
        apic->irr_pending = true;
 
-       if (apic->apicv_active)
+       /*
+        * Update SVI when APICv gets enabled, otherwise SVI won't reflect the
+        * highest bit in vISR and the next accelerated EOI in the guest won't
+        * be virtualized correctly (the CPU uses SVI to determine which vISR
+        * vector to clear).
+        */
+       if (apic->apicv_active) {
                apic->isr_count = 1;
-       else
+               kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
+       } else {
                apic->isr_count = count_vectors(apic->regs + APIC_ISR);
+       }
 
        apic->highest_isr_cache = -1;
 }
@@ -2914,10 +2911,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
 
        vcpu->arch.pv_eoi.msr_val = 0;
        apic_update_ppr(apic);
-       if (apic->apicv_active) {
+       if (apic->apicv_active)
                kvm_x86_call(apicv_post_state_restore)(vcpu);
-               kvm_x86_call(hwapic_isr_update)(vcpu, -1);
-       }
 
        vcpu->arch.apic_arb_prio = 0;
        vcpu->arch.apic_attention = 0;
@@ -3228,10 +3223,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
        __start_apic_timer(apic, APIC_TMCCT);
        kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
        kvm_apic_update_apicv(vcpu);
-       if (apic->apicv_active) {
+       if (apic->apicv_active)
                kvm_x86_call(apicv_post_state_restore)(vcpu);
-               kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
-       }
        kvm_make_request(KVM_REQ_EVENT, vcpu);
 
 #ifdef CONFIG_KVM_IOAPIC
index 71c80fa020e06fa0a0a5952ba42d9daf74e126b8..adf04a9bd57d688a63b5f9d0822e9801563a9e35 100644 (file)
@@ -134,7 +134,6 @@ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high);
 int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated);
 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
-void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu);
 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
 
 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
index 0c1933a303ca31b4bd0810bc750a9500a216fd61..bf8059179edb4fd603daec836d207964b7a9c9a0 100644 (file)
@@ -10886,16 +10886,9 @@ void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
         * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
         * still active when the interrupt got accepted. Make sure
         * kvm_check_and_inject_events() is called to check for that.
-        *
-        * Update SVI when APICv gets enabled, otherwise SVI won't reflect the
-        * highest bit in vISR and the next accelerated EOI in the guest won't
-        * be virtualized correctly (the CPU uses SVI to determine which vISR
-        * vector to clear).
         */
        if (!apic->apicv_active)
                kvm_make_request(KVM_REQ_EVENT, vcpu);
-       else
-               kvm_apic_update_hwapic_isr(vcpu);
 
 out:
        preempt_enable();