From: Sean Christopherson Date: Wed, 11 Jun 2025 22:46:02 +0000 (-0700) Subject: KVM: SVM: Consolidate IRTE update when toggling AVIC on/off X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5f3d06b1648e0878f04e3d4819f2ef16c1b40bbf;p=thirdparty%2Fkernel%2Fstable.git KVM: SVM: Consolidate IRTE update when toggling AVIC on/off Fold the IRTE modification logic in avic_refresh_apicv_exec_ctrl() into __avic_vcpu_{load,put}(), and add a param to the helpers to communicate whether or not AVIC is being toggled, i.e. if IRTE needs a "full" update, or just a quick update to set the CPU and IsRun. Link: https://lore.kernel.org/r/20250611224604.313496-61-seanjc@google.com Signed-off-by: Sean Christopherson --- diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 1a9862c7106e..5803f778999f 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -811,7 +811,28 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, return irq_set_vcpu_affinity(host_irq, NULL); } -static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu) +enum avic_vcpu_action { + /* + * There is no need to differentiate between activate and deactivate, + * as KVM only refreshes AVIC state when the vCPU is scheduled in and + * isn't blocking, i.e. the pCPU must always be (in)valid when AVIC is + * being (de)activated. + */ + AVIC_TOGGLE_ON_OFF = BIT(0), + AVIC_ACTIVATE = AVIC_TOGGLE_ON_OFF, + AVIC_DEACTIVATE = AVIC_TOGGLE_ON_OFF, + + /* + * No unique action is required to deal with a vCPU that stops/starts + * running, as IRTEs are configured to generate GALog interrupts at all + * times. + */ + AVIC_START_RUNNING = 0, + AVIC_STOP_RUNNING = 0, +}; + +static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, + enum avic_vcpu_action action) { struct vcpu_svm *svm = to_svm(vcpu); struct kvm_kernel_irqfd *irqfd; @@ -825,11 +846,20 @@ static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu) if (list_empty(&svm->ir_list)) return; - list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) - WARN_ON_ONCE(amd_iommu_update_ga(cpu, irqfd->irq_bypass_data)); + list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) { + void *data = irqfd->irq_bypass_data; + + if (!(action & AVIC_TOGGLE_ON_OFF)) + WARN_ON_ONCE(amd_iommu_update_ga(cpu, data)); + else if (cpu >= 0) + WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, cpu)); + else + WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data)); + } } -static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu, + enum avic_vcpu_action action) { struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); int h_physical_id = kvm_cpu_get_apicid(cpu); @@ -874,7 +904,7 @@ static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry); - avic_update_iommu_vcpu_affinity(vcpu, h_physical_id); + avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, action); spin_unlock_irqrestore(&svm->ir_list_lock, flags); } @@ -891,10 +921,10 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (kvm_vcpu_is_blocking(vcpu)) return; - __avic_vcpu_load(vcpu, cpu); + __avic_vcpu_load(vcpu, cpu, AVIC_START_RUNNING); } -static void __avic_vcpu_put(struct kvm_vcpu *vcpu) +static void __avic_vcpu_put(struct kvm_vcpu *vcpu, enum avic_vcpu_action action) { struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); struct vcpu_svm *svm = to_svm(vcpu); @@ -916,7 +946,7 @@ static void __avic_vcpu_put(struct kvm_vcpu *vcpu) */ spin_lock_irqsave(&svm->ir_list_lock, flags); - avic_update_iommu_vcpu_affinity(vcpu, -1); + avic_update_iommu_vcpu_affinity(vcpu, -1, action); entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK; svm->avic_physical_id_entry = entry; @@ -942,7 +972,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu) if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)) return; - __avic_vcpu_put(vcpu); + __avic_vcpu_put(vcpu, AVIC_STOP_RUNNING); } void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu) @@ -971,41 +1001,18 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu) void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) { - bool activated = kvm_vcpu_apicv_active(vcpu); - int apic_id = kvm_cpu_get_apicid(vcpu->cpu); - struct vcpu_svm *svm = to_svm(vcpu); - struct kvm_kernel_irqfd *irqfd; - unsigned long flags; - if (!enable_apicv) return; + /* APICv should only be toggled on/off while the vCPU is running. */ + WARN_ON_ONCE(kvm_vcpu_is_blocking(vcpu)); + avic_refresh_virtual_apic_mode(vcpu); - if (activated) - __avic_vcpu_load(vcpu, vcpu->cpu); + if (kvm_vcpu_apicv_active(vcpu)) + __avic_vcpu_load(vcpu, vcpu->cpu, AVIC_ACTIVATE); else - __avic_vcpu_put(vcpu); - - /* - * Here, we go through the per-vcpu ir_list to update all existing - * interrupt remapping table entry targeting this vcpu. - */ - spin_lock_irqsave(&svm->ir_list_lock, flags); - - if (list_empty(&svm->ir_list)) - goto out; - - list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) { - void *data = irqfd->irq_bypass_data; - - if (activated) - WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, apic_id)); - else - WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data)); - } -out: - spin_unlock_irqrestore(&svm->ir_list_lock, flags); + __avic_vcpu_put(vcpu, AVIC_DEACTIVATE); } void avic_vcpu_blocking(struct kvm_vcpu *vcpu) @@ -1031,7 +1038,7 @@ void avic_vcpu_blocking(struct kvm_vcpu *vcpu) * CPU and cause noisy neighbor problems if the VM is sending interrupts * to the vCPU while it's scheduled out. */ - avic_vcpu_put(vcpu); + __avic_vcpu_put(vcpu, AVIC_STOP_RUNNING); } void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)