]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: nVMX: Request immediate exit iff pending nested event needs injection
authorSean Christopherson <seanjc@google.com>
Fri, 7 Jun 2024 17:26:05 +0000 (10:26 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 3 Aug 2024 07:00:41 +0000 (09:00 +0200)
commit 32f55e475ce2c4b8b124d335fcfaf1152ba977a1 upstream.

When requesting an immediate exit from L2 in order to inject a pending
event, do so only if the pending event actually requires manual injection,
i.e. if and only if KVM actually needs to regain control in order to
deliver the event.

Avoiding the "immediate exit" isn't simply an optimization, it's necessary
to make forward progress, as the "already expired" VMX preemption timer
trick that KVM uses to force a VM-Exit has higher priority than events
that aren't directly injected.

At present time, this is a glorified nop as all events processed by
vmx_has_nested_events() require injection, but that will not hold true in
the future, e.g. if there's a pending virtual interrupt in vmcs02.RVI.
I.e. if KVM is trying to deliver a virtual interrupt to L2, the expired
VMX preemption timer will trigger VM-Exit before the virtual interrupt is
delivered, and KVM will effectively hang the vCPU in an endless loop of
forced immediate VM-Exits (because the pending virtual interrupt never
goes away).

Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20240607172609.3205077-3-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/x86.c

index f8ca74e7678f3ab8dc44ea0d2cd355cf04c5e068..332347d014c2c10fd582d3753bf7caaafd088c48 100644 (file)
@@ -1819,7 +1819,7 @@ struct kvm_x86_nested_ops {
        bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
                                    u32 error_code);
        int (*check_events)(struct kvm_vcpu *vcpu);
-       bool (*has_events)(struct kvm_vcpu *vcpu);
+       bool (*has_events)(struct kvm_vcpu *vcpu, bool for_injection);
        void (*triple_fault)(struct kvm_vcpu *vcpu);
        int (*get_state)(struct kvm_vcpu *vcpu,
                         struct kvm_nested_state __user *user_kvm_nested_state,
index 8f4db6e8f57c728b93e33fc282da2ba7b33e45ce..411fe7aa07933369c52386e90411c557b06ef454 100644 (file)
@@ -4032,7 +4032,7 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
               to_vmx(vcpu)->nested.preemption_timer_expired;
 }
 
-static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
+static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection)
 {
        return nested_vmx_preemption_timer_pending(vcpu) ||
               to_vmx(vcpu)->nested.mtf_pending;
index 0763a0f72a067fc7f0b321cdab684d2fb7ca9839..369a4a300083f43264987eefe6a9fb64e195ab3d 100644 (file)
@@ -10516,7 +10516,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
 
        if (is_guest_mode(vcpu) &&
            kvm_x86_ops.nested_ops->has_events &&
-           kvm_x86_ops.nested_ops->has_events(vcpu))
+           kvm_x86_ops.nested_ops->has_events(vcpu, true))
                *req_immediate_exit = true;
 
        /*
@@ -13146,7 +13146,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
 
        if (is_guest_mode(vcpu) &&
            kvm_x86_ops.nested_ops->has_events &&
-           kvm_x86_ops.nested_ops->has_events(vcpu))
+           kvm_x86_ops.nested_ops->has_events(vcpu, false))
                return true;
 
        if (kvm_xen_has_pending_events(vcpu))