]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: SVM: Fold avic_set_pi_irte_mode() into its sole caller
authorSean Christopherson <seanjc@google.com>
Wed, 11 Jun 2025 22:46:00 +0000 (15:46 -0700)
committerSean Christopherson <seanjc@google.com>
Mon, 23 Jun 2025 16:50:49 +0000 (09:50 -0700)
Fold avic_set_pi_irte_mode() into avic_refresh_apicv_exec_ctrl() in
anticipation of moving the __avic_vcpu_{load,put}() calls into the
critical section, and because having a one-off helper with a name that's
easily confused with avic_pi_update_irte() is unnecessary.

No functional change intended.

Tested-by: Sairaj Kodilkar <sarunkod@amd.com>
Link: https://lore.kernel.org/r/20250611224604.313496-59-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/avic.c

index 808700ce70c312d5a14375bbc710ee86d376ba6a..3a59c5e77a4fc6022ce410a28b30e489f541601c 100644 (file)
@@ -729,34 +729,6 @@ void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
        avic_handle_ldr_update(vcpu);
 }
 
-static void avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
-{
-       int apic_id = kvm_cpu_get_apicid(vcpu->cpu);
-       unsigned long flags;
-       struct vcpu_svm *svm = to_svm(vcpu);
-       struct kvm_kernel_irqfd *irqfd;
-
-       /*
-        * Here, we go through the per-vcpu ir_list to update all existing
-        * interrupt remapping table entry targeting this vcpu.
-        */
-       spin_lock_irqsave(&svm->ir_list_lock, flags);
-
-       if (list_empty(&svm->ir_list))
-               goto out;
-
-       list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
-               void *data = irqfd->irq_bypass_data;
-
-               if (activate)
-                       WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, apic_id));
-               else
-                       WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data));
-       }
-out:
-       spin_unlock_irqrestore(&svm->ir_list_lock, flags);
-}
-
 static void svm_ir_list_del(struct kvm_kernel_irqfd *irqfd)
 {
        struct kvm_vcpu *vcpu = irqfd->irq_bypass_vcpu;
@@ -991,6 +963,10 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 {
        bool activated = kvm_vcpu_apicv_active(vcpu);
+       int apic_id = kvm_cpu_get_apicid(vcpu->cpu);
+       struct vcpu_svm *svm = to_svm(vcpu);
+       struct kvm_kernel_irqfd *irqfd;
+       unsigned long flags;
 
        if (!enable_apicv)
                return;
@@ -1002,7 +978,25 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
        else
                avic_vcpu_put(vcpu);
 
-       avic_set_pi_irte_mode(vcpu, activated);
+       /*
+        * Here, we go through the per-vcpu ir_list to update all existing
+        * interrupt remapping table entry targeting this vcpu.
+        */
+       spin_lock_irqsave(&svm->ir_list_lock, flags);
+
+       if (list_empty(&svm->ir_list))
+               goto out;
+
+       list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
+               void *data = irqfd->irq_bypass_data;
+
+               if (activated)
+                       WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, apic_id));
+               else
+                       WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data));
+       }
+out:
+       spin_unlock_irqrestore(&svm->ir_list_lock, flags);
 }
 
 void avic_vcpu_blocking(struct kvm_vcpu *vcpu)