From: Sean Christopherson Date: Wed, 11 Jun 2025 22:46:00 +0000 (-0700) Subject: KVM: SVM: Fold avic_set_pi_irte_mode() into its sole caller X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f2bc961d383bbc26c72f77e3f452da2f9f44dc0d;p=thirdparty%2Fkernel%2Fstable.git KVM: SVM: Fold avic_set_pi_irte_mode() into its sole caller Fold avic_set_pi_irte_mode() into avic_refresh_apicv_exec_ctrl() in anticipation of moving the __avic_vcpu_{load,put}() calls into the critical section, and because having a one-off helper with a name that's easily confused with avic_pi_update_irte() is unnecessary. No functional change intended. Tested-by: Sairaj Kodilkar Link: https://lore.kernel.org/r/20250611224604.313496-59-seanjc@google.com Signed-off-by: Sean Christopherson --- diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 808700ce70c3..3a59c5e77a4f 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -729,34 +729,6 @@ void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu) avic_handle_ldr_update(vcpu); } -static void avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate) -{ - int apic_id = kvm_cpu_get_apicid(vcpu->cpu); - unsigned long flags; - struct vcpu_svm *svm = to_svm(vcpu); - struct kvm_kernel_irqfd *irqfd; - - /* - * Here, we go through the per-vcpu ir_list to update all existing - * interrupt remapping table entry targeting this vcpu. - */ - spin_lock_irqsave(&svm->ir_list_lock, flags); - - if (list_empty(&svm->ir_list)) - goto out; - - list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) { - void *data = irqfd->irq_bypass_data; - - if (activate) - WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, apic_id)); - else - WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data)); - } -out: - spin_unlock_irqrestore(&svm->ir_list_lock, flags); -} - static void svm_ir_list_del(struct kvm_kernel_irqfd *irqfd) { struct kvm_vcpu *vcpu = irqfd->irq_bypass_vcpu; @@ -991,6 +963,10 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu) void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) { bool activated = kvm_vcpu_apicv_active(vcpu); + int apic_id = kvm_cpu_get_apicid(vcpu->cpu); + struct vcpu_svm *svm = to_svm(vcpu); + struct kvm_kernel_irqfd *irqfd; + unsigned long flags; if (!enable_apicv) return; @@ -1002,7 +978,25 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) else avic_vcpu_put(vcpu); - avic_set_pi_irte_mode(vcpu, activated); + /* + * Here, we go through the per-vcpu ir_list to update all existing + * interrupt remapping table entry targeting this vcpu. + */ + spin_lock_irqsave(&svm->ir_list_lock, flags); + + if (list_empty(&svm->ir_list)) + goto out; + + list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) { + void *data = irqfd->irq_bypass_data; + + if (activated) + WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, apic_id)); + else + WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data)); + } +out: + spin_unlock_irqrestore(&svm->ir_list_lock, flags); } void avic_vcpu_blocking(struct kvm_vcpu *vcpu)