]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: nVMX: Honor event priority when emulating PI delivery during VM-Enter
authorSean Christopherson <seanjc@google.com>
Fri, 1 Nov 2024 19:14:47 +0000 (12:14 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 19 Dec 2024 15:34:14 +0000 (07:34 -0800)
Move the handling of a nested posted interrupt notification that is
unblocked by nested VM-Enter (unblocks L1 IRQs when ack-on-exit is enabled
by L1) from VM-Enter emulation to vmx_check_nested_events().  To avoid a
pointless forced immediate exit, i.e. to not regress IRQ delivery latency
when a nested posted interrupt is pending at VM-Enter, block processing of
the notification IRQ if and only if KVM must block _all_ events.  Unlike
injected events, KVM doesn't need to actually enter L2 before updating the
vIRR and vmcs02.GUEST_INTR_STATUS, as the resulting L2 IRQ will be blocked
by hardware itself, until VM-Enter to L2 completes.

Note, very strictly speaking, moving the IRQ from L2's PIR to IRR before
entering L2 is still technically wrong.  But, practically speaking, only
an L1 hypervisor or an L0 userspace that is deliberately checking event
priority against PIR=>IRR processing can even notice; L2 will see
architecturally correct behavior, as KVM ensures the VM-Enter is finished
before doing anything that would effectively preempt the PIR=>IRR movement.

Reported-by: Chao Gao <chao.gao@intel.com>
Link: https://lore.kernel.org/r/20241101191447.1807602-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/nested.c

index f4064c68dd17ea58828f532c084029fc24e1bf5f..42dd24e971cdf3c302cadf606bd5100d23082d97 100644 (file)
@@ -3739,14 +3739,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        if (unlikely(status != NVMX_VMENTRY_SUCCESS))
                goto vmentry_failed;
 
-       /* Emulate processing of posted interrupts on VM-Enter. */
-       if (nested_cpu_has_posted_intr(vmcs12) &&
-           kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
-               vmx->nested.pi_pending = true;
-               kvm_make_request(KVM_REQ_EVENT, vcpu);
-               kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
-       }
-
        /* Hide L1D cache contents from the nested guest.  */
        vmx->vcpu.arch.l1tf_flush_l1d = true;
 
@@ -4208,13 +4200,25 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
         */
        bool block_nested_exceptions = vmx->nested.nested_run_pending;
        /*
-        * New events (not exceptions) are only recognized at instruction
+        * Events that don't require injection, i.e. that are virtualized by
+        * hardware, aren't blocked by a pending VM-Enter as KVM doesn't need
+        * to regain control in order to deliver the event, and hardware will
+        * handle event ordering, e.g. with respect to injected exceptions.
+        *
+        * But, new events (not exceptions) are only recognized at instruction
         * boundaries.  If an event needs reinjection, then KVM is handling a
-        * VM-Exit that occurred _during_ instruction execution; new events are
-        * blocked until the instruction completes.
+        * VM-Exit that occurred _during_ instruction execution; new events,
+        * irrespective of whether or not they're injected, are blocked until
+        * the instruction completes.
+        */
+       bool block_non_injected_events = kvm_event_needs_reinjection(vcpu);
+       /*
+        * Inject events are blocked by nested VM-Enter, as KVM is responsible
+        * for managing priority between concurrent events, i.e. KVM needs to
+        * wait until after VM-Enter completes to deliver injected events.
         */
        bool block_nested_events = block_nested_exceptions ||
-                                  kvm_event_needs_reinjection(vcpu);
+                                  block_non_injected_events;
 
        if (lapic_in_kernel(vcpu) &&
                test_bit(KVM_APIC_INIT, &apic->pending_events)) {
@@ -4326,18 +4330,26 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
        if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) {
                int irq;
 
-               if (block_nested_events)
-                       return -EBUSY;
-               if (!nested_exit_on_intr(vcpu))
+               if (!nested_exit_on_intr(vcpu)) {
+                       if (block_nested_events)
+                               return -EBUSY;
+
                        goto no_vmexit;
+               }
 
                if (!nested_exit_intr_ack_set(vcpu)) {
+                       if (block_nested_events)
+                               return -EBUSY;
+
                        nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
                        return 0;
                }
 
                irq = kvm_cpu_get_extint(vcpu);
                if (irq != -1) {
+                       if (block_nested_events)
+                               return -EBUSY;
+
                        nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
                                          INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0);
                        return 0;
@@ -4356,11 +4368,22 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
                 * and enabling posted interrupts requires ACK-on-exit.
                 */
                if (irq == vmx->nested.posted_intr_nv) {
+                       /*
+                        * Nested posted interrupts are delivered via RVI, i.e.
+                        * aren't injected by KVM, and so can be queued even if
+                        * manual event injection is disallowed.
+                        */
+                       if (block_non_injected_events)
+                               return -EBUSY;
+
                        vmx->nested.pi_pending = true;
                        kvm_apic_clear_irr(vcpu, irq);
                        goto no_vmexit;
                }
 
+               if (block_nested_events)
+                       return -EBUSY;
+
                nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
                                  INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0);