From: Sean Christopherson Date: Wed, 11 Jun 2025 22:45:56 +0000 (-0700) Subject: KVM: x86: Decouple device assignment from IRQ bypass X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=77e1b8332d1d7aa786f7515e9bd4055def6a1e06;p=thirdparty%2Fkernel%2Fstable.git KVM: x86: Decouple device assignment from IRQ bypass Use a dedicated counter to track the number of IRQs that can utilize IRQ bypass instead of piggybacking the assigned device count. As evidenced by commit 2edd9cb79fb3 ("kvm: detect assigned device via irqbypass manager"), it's possible for a device to be able to post IRQs to a vCPU without said device being assigned to a VM. Leave the calls to kvm_arch_{start,end}_assignment() alone for the moment to avoid regressing the MMIO stale data mitigation. KVM is abusing the assigned device count when applying mmio_stale_data_clear, and it's not at all clear if vDPA devices rely on this behavior. This will hopefully be cleaned up in the future, as the number of assigned devices is a terrible heuristic for detecting if a VM has access to host MMIO. Link: https://lore.kernel.org/r/20250611224604.313496-55-seanjc@google.com Signed-off-by: Sean Christopherson --- diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 8d50e3e0a19b..8897f509860c 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -112,7 +112,7 @@ KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging) KVM_X86_OP_OPTIONAL(vcpu_blocking) KVM_X86_OP_OPTIONAL(vcpu_unblocking) KVM_X86_OP_OPTIONAL(pi_update_irte) -KVM_X86_OP_OPTIONAL(pi_start_assignment) +KVM_X86_OP_OPTIONAL(pi_start_bypass) KVM_X86_OP_OPTIONAL(apicv_pre_state_restore) KVM_X86_OP_OPTIONAL(apicv_post_state_restore) KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f91791473f0b..9a5d8e35f431 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1383,6 +1383,8 @@ struct kvm_arch { atomic_t noncoherent_dma_count; #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE atomic_t assigned_device_count; + unsigned long nr_possible_bypass_irqs; + #ifdef CONFIG_KVM_IOAPIC struct kvm_pic *vpic; struct kvm_ioapic *vioapic; @@ -1856,7 +1858,7 @@ struct kvm_x86_ops { int (*pi_update_irte)(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, struct kvm_vcpu *vcpu, u32 vector); - void (*pi_start_assignment)(struct kvm *kvm); + void (*pi_start_bypass)(struct kvm *kvm); void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu); void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index f87f63a53ede..3e8ee23b16c2 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -570,10 +570,15 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, spin_lock_irq(&kvm->irqfds.lock); irqfd->producer = prod; + if (!kvm->arch.nr_possible_bypass_irqs++) + kvm_x86_call(pi_start_bypass)(kvm); + if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) { ret = kvm_pi_update_irte(irqfd, &irqfd->irq_entry); - if (ret) + if (ret) { + kvm->arch.nr_possible_bypass_irqs--; kvm_arch_end_assignment(irqfd->kvm); + } } spin_unlock_irq(&kvm->irqfds.lock); @@ -606,6 +611,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, } irqfd->producer = NULL; + kvm->arch.nr_possible_bypass_irqs--; + spin_unlock_irq(&kvm->irqfds.lock); diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index d1e02e567b57..a986fc45145e 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -1014,7 +1014,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .nested_ops = &vmx_nested_ops, .pi_update_irte = vmx_pi_update_irte, - .pi_start_assignment = vmx_pi_start_assignment, + .pi_start_bypass = vmx_pi_start_bypass, #ifdef CONFIG_X86_64 .set_hv_timer = vt_op(set_hv_timer), diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 3a23c30f73cb..5671d59a6b6d 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -146,8 +146,13 @@ after_clear_sn: static bool vmx_can_use_vtd_pi(struct kvm *kvm) { + /* + * Note, reading the number of possible bypass IRQs can race with a + * bypass IRQ being attached to the VM. vmx_pi_start_bypass() ensures + * blockng vCPUs will see an elevated count or get KVM_REQ_UNBLOCK. + */ return irqchip_in_kernel(kvm) && kvm_arch_has_irq_bypass() && - kvm_arch_has_assigned_device(kvm); + READ_ONCE(kvm->arch.nr_possible_bypass_irqs); } /* @@ -285,12 +290,11 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu) /* - * Bail out of the block loop if the VM has an assigned - * device, but the blocking vCPU didn't reconfigure the - * PI.NV to the wakeup vector, i.e. the assigned device - * came along after the initial check in vmx_vcpu_pi_put(). + * Kick all vCPUs when the first possible bypass IRQ is attached to a VM, as + * blocking vCPUs may scheduled out without reconfiguring PID.NV to the wakeup + * vector, i.e. if the bypass IRQ came along after vmx_vcpu_pi_put(). */ -void vmx_pi_start_assignment(struct kvm *kvm) +void vmx_pi_start_bypass(struct kvm *kvm) { if (!kvm_arch_has_irq_bypass()) return; diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h index 94ed66ea6249..a4af39948cf0 100644 --- a/arch/x86/kvm/vmx/posted_intr.h +++ b/arch/x86/kvm/vmx/posted_intr.h @@ -17,7 +17,7 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu); int vmx_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, struct kvm_vcpu *vcpu, u32 vector); -void vmx_pi_start_assignment(struct kvm *kvm); +void vmx_pi_start_bypass(struct kvm *kvm); static inline int pi_find_highest_vector(struct pi_desc *pi_desc) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5ebe5a384fd9..1508b77622b9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13440,8 +13440,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) void kvm_arch_start_assignment(struct kvm *kvm) { - if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) - kvm_x86_call(pi_start_assignment)(kvm); + atomic_inc(&kvm->arch.assigned_device_count); } EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);