From: Sean Christopherson Date: Fri, 9 Jan 2026 03:45:32 +0000 (-0800) Subject: KVM: x86: Update APICv ISR (a.k.a. SVI) as part of kvm_apic_update_apicv() X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=000d75b0b18622e7454c3955631a3cf39e0353e7;p=thirdparty%2Flinux.git KVM: x86: Update APICv ISR (a.k.a. SVI) as part of kvm_apic_update_apicv() Fold the calls to .hwapic_isr_update() in kvm_apic_set_state(), kvm_lapic_reset(), and __kvm_vcpu_update_apicv() into kvm_apic_update_apicv(), as updating SVI is directly related to updating KVM's own cache of ISR information, e.g. SVI is more or less the APICv equivalent of highest_isr_cache. Note, calling .hwapic_isr_update() during kvm_apic_update_apicv() has benign side effects, as doing so changes the orders of the calls in kvm_lapic_reset() and kvm_apic_set_state(), specifically with respect to to the order between .hwapic_isr_update() and .apicv_post_state_restore(). However, the changes in ordering are glorified nops as the former hook is VMX-only and the latter is SVM-only. Reviewed-by: Chao Gao Link: https://patch.msgid.link/20260109034532.1012993-9-seanjc@google.com Signed-off-by: Sean Christopherson --- diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 78c39341b2a54..2e513f1c8988f 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -760,17 +760,6 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) } } -void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - - if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active) - return; - - kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); -} -EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_update_hwapic_isr); - int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) { /* This may race with setting of irr in __apic_accept_irq() and @@ -2783,10 +2772,18 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu) */ apic->irr_pending = true; - if (apic->apicv_active) + /* + * Update SVI when APICv gets enabled, otherwise SVI won't reflect the + * highest bit in vISR and the next accelerated EOI in the guest won't + * be virtualized correctly (the CPU uses SVI to determine which vISR + * vector to clear). + */ + if (apic->apicv_active) { apic->isr_count = 1; - else + kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); + } else { apic->isr_count = count_vectors(apic->regs + APIC_ISR); + } apic->highest_isr_cache = -1; } @@ -2914,10 +2911,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.pv_eoi.msr_val = 0; apic_update_ppr(apic); - if (apic->apicv_active) { + if (apic->apicv_active) kvm_x86_call(apicv_post_state_restore)(vcpu); - kvm_x86_call(hwapic_isr_update)(vcpu, -1); - } vcpu->arch.apic_arb_prio = 0; vcpu->arch.apic_attention = 0; @@ -3228,10 +3223,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) __start_apic_timer(apic, APIC_TMCCT); kvm_lapic_set_reg(apic, APIC_TMCCT, 0); kvm_apic_update_apicv(vcpu); - if (apic->apicv_active) { + if (apic->apicv_active) kvm_x86_call(apicv_post_state_restore)(vcpu); - kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); - } kvm_make_request(KVM_REQ_EVENT, vcpu); #ifdef CONFIG_KVM_IOAPIC diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 71c80fa020e06..adf04a9bd57d6 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -134,7 +134,6 @@ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high); int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated); int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); -void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu); int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0c1933a303ca3..bf8059179edb4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10886,16 +10886,9 @@ void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was * still active when the interrupt got accepted. Make sure * kvm_check_and_inject_events() is called to check for that. - * - * Update SVI when APICv gets enabled, otherwise SVI won't reflect the - * highest bit in vISR and the next accelerated EOI in the guest won't - * be virtualized correctly (the CPU uses SVI to determine which vISR - * vector to clear). */ if (!apic->apicv_active) kvm_make_request(KVM_REQ_EVENT, vcpu); - else - kvm_apic_update_hwapic_isr(vcpu); out: preempt_enable();