]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: SVM: Drop superfluous "cache" of AVIC Physical ID entry pointer
authorSean Christopherson <seanjc@google.com>
Wed, 11 Jun 2025 22:45:18 +0000 (15:45 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 20 Jun 2025 20:53:01 +0000 (13:53 -0700)
Drop the vCPU's pointer to its AVIC Physical ID entry, and simply index
the table directly.  Caching a pointer address is completely unnecessary
for performance, and while the field technically caches the result of the
pointer calculation, it's all too easy to misinterpret the name and think
that the field somehow caches the _data_ in the table.

No functional change intended.

Suggested-by: Maxim Levitsky <mlevitsk@redhat.com>
Tested-by: Sairaj Kodilkar <sarunkod@amd.com>
Reviewed-by: Naveen N Rao (AMD) <naveen@kernel.org>
Link: https://lore.kernel.org/r/20250611224604.313496-17-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/svm.h

index a65f61c5fc0ce6a888f640ef1bc8d2731f01c8e3..baee6d412e78aeb28dafc00be15b32ad1d806593 100644 (file)
@@ -295,8 +295,6 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
                    AVIC_PHYSICAL_ID_ENTRY_VALID_MASK;
        WRITE_ONCE(kvm_svm->avic_physical_id_table[id], new_entry);
 
-       svm->avic_physical_id_cache = &kvm_svm->avic_physical_id_table[id];
-
        return 0;
 }
 
@@ -771,13 +769,16 @@ static int svm_ir_list_add(struct vcpu_svm *svm,
                           struct kvm_kernel_irqfd *irqfd,
                           struct amd_iommu_pi_data *pi)
 {
+       struct kvm_vcpu *vcpu = &svm->vcpu;
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
        unsigned long flags;
        u64 entry;
 
        if (WARN_ON_ONCE(!pi->ir_data))
                return -EINVAL;
 
-       irqfd->irq_bypass_vcpu = &svm->vcpu;
+       irqfd->irq_bypass_vcpu = vcpu;
        irqfd->irq_bypass_data = pi->ir_data;
 
        spin_lock_irqsave(&svm->ir_list_lock, flags);
@@ -788,7 +789,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm,
         * will update the pCPU info when the vCPU awkened and/or scheduled in.
         * See also avic_vcpu_load().
         */
-       entry = READ_ONCE(*(svm->avic_physical_id_cache));
+       entry = READ_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id]);
        if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
                amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
                                    true, pi->ir_data);
@@ -965,17 +966,18 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
 
 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
-       u64 entry;
+       struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
        int h_physical_id = kvm_cpu_get_apicid(cpu);
        struct vcpu_svm *svm = to_svm(vcpu);
        unsigned long flags;
+       u64 entry;
 
        lockdep_assert_preemption_disabled();
 
        if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
                return;
 
-       if (WARN_ON_ONCE(!svm->avic_physical_id_cache))
+       if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= PAGE_SIZE))
                return;
 
        /*
@@ -997,14 +999,14 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
         */
        spin_lock_irqsave(&svm->ir_list_lock, flags);
 
-       entry = READ_ONCE(*(svm->avic_physical_id_cache));
+       entry = READ_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id]);
        WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
 
        entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
        entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
        entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
 
-       WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+       WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
        avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
 
        spin_unlock_irqrestore(&svm->ir_list_lock, flags);
@@ -1012,13 +1014,14 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 void avic_vcpu_put(struct kvm_vcpu *vcpu)
 {
-       u64 entry;
+       struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
        struct vcpu_svm *svm = to_svm(vcpu);
        unsigned long flags;
+       u64 entry;
 
        lockdep_assert_preemption_disabled();
 
-       if (WARN_ON_ONCE(!svm->avic_physical_id_cache))
+       if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= PAGE_SIZE))
                return;
 
        /*
@@ -1028,7 +1031,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
         * can't be scheduled out and thus avic_vcpu_{put,load}() can't run
         * recursively.
         */
-       entry = READ_ONCE(*(svm->avic_physical_id_cache));
+       entry = READ_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id]);
 
        /* Nothing to do if IsRunning == '0' due to vCPU blocking. */
        if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
@@ -1047,7 +1050,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
        avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
 
        entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
-       WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+       WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
 
        spin_unlock_irqrestore(&svm->ir_list_lock, flags);
 
index ec5d77d42a49cd29e573738039828902cef5c91c..f225d0bed152fba8225a136944cc2b2c292f7709 100644 (file)
@@ -306,7 +306,6 @@ struct vcpu_svm {
 
        u32 ldr_reg;
        u32 dfr_reg;
-       u64 *avic_physical_id_cache;
 
        /*
         * Per-vCPU list of irqfds that are eligible to post IRQs directly to