]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: SVM: Use vcpu_idx, not vcpu_id, for GA log tag/metadata
authorSean Christopherson <seanjc@google.com>
Wed, 11 Jun 2025 22:45:58 +0000 (15:45 -0700)
committerSean Christopherson <seanjc@google.com>
Mon, 23 Jun 2025 16:50:48 +0000 (09:50 -0700)
Use a vCPU's index, not its ID, for the GA log tag/metadata that's used to
find and kick vCPUs when a device posted interrupt serves as a wake event.
Lookups on a vCPU index are O(fast) (not sure what xa_load() actually
provides), whereas a vCPU ID lookup is O(n) if a vCPU's ID doesn't match
its index.

Unlike the Physical APIC Table, which is accessed by hardware when
virtualizing IPIs, hardware doesn't consume the GA tag, i.e. KVM _must_
use APIC IDs to fill the Physical APIC Table, but KVM has free rein over
the format/meaning of the GA tag.

Tested-by: Sairaj Kodilkar <sarunkod@amd.com>
Link: https://lore.kernel.org/r/20250611224604.313496-57-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/avic.c

index 4b359a4a7e400cd9e95e9f53c45328096edd89c7..808700ce70c312d5a14375bbc710ee86d376ba6a 100644 (file)
 #include "svm.h"
 
 /*
- * Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID,
- * into the GATag so that KVM can retrieve the correct vCPU from a GALog entry
- * if an interrupt can't be delivered, e.g. because the vCPU isn't running.
+ * Encode the arbitrary VM ID and the vCPU's _index_ into the GATag so that
+ * KVM can retrieve the correct vCPU from a GALog entry if an interrupt can't
+ * be delivered, e.g. because the vCPU isn't running.  Use the vCPU's index
+ * instead of its ID (a.k.a. its default APIC ID), as KVM is guaranteed a fast
+ * lookup on the index, where as vCPUs whose index doesn't match their ID need
+ * to walk the entire xarray of vCPUs in the worst case scenario.
  *
- * For the vCPU ID, use however many bits are currently allowed for the max
+ * For the vCPU index, use however many bits are currently allowed for the max
  * guest physical APIC ID (limited by the size of the physical ID table), and
  * use whatever bits remain to assign arbitrary AVIC IDs to VMs.  Note, the
  * size of the GATag is defined by hardware (32 bits), but is an opaque value
  * as far as hardware is concerned.
  */
-#define AVIC_VCPU_ID_MASK              AVIC_PHYSICAL_MAX_INDEX_MASK
+#define AVIC_VCPU_IDX_MASK             AVIC_PHYSICAL_MAX_INDEX_MASK
 
 #define AVIC_VM_ID_SHIFT               HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK)
 #define AVIC_VM_ID_MASK                        (GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT)
 
 #define AVIC_GATAG_TO_VMID(x)          ((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK)
-#define AVIC_GATAG_TO_VCPUID(x)                (x & AVIC_VCPU_ID_MASK)
+#define AVIC_GATAG_TO_VCPUIDX(x)       (x & AVIC_VCPU_IDX_MASK)
 
-#define __AVIC_GATAG(vm_id, vcpu_id  ((((vm_id) & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
-                                        ((vcpu_id) & AVIC_VCPU_ID_MASK))
-#define AVIC_GATAG(vm_id, vcpu_id                                    \
+#define __AVIC_GATAG(vm_id, vcpu_idx)  ((((vm_id) & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
+                                        ((vcpu_idx) & AVIC_VCPU_IDX_MASK))
+#define AVIC_GATAG(vm_id, vcpu_idx)                                    \
 ({                                                                     \
-       u32 ga_tag = __AVIC_GATAG(vm_id, vcpu_id);                      \
+       u32 ga_tag = __AVIC_GATAG(vm_id, vcpu_idx);                     \
                                                                        \
-       WARN_ON_ONCE(AVIC_GATAG_TO_VCPUID(ga_tag) != (vcpu_id));        \
+       WARN_ON_ONCE(AVIC_GATAG_TO_VCPUIDX(ga_tag) != (vcpu_idx));      \
        WARN_ON_ONCE(AVIC_GATAG_TO_VMID(ga_tag) != (vm_id));            \
        ga_tag;                                                         \
 })
 
-static_assert(__AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u);
+static_assert(__AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_IDX_MASK) == -1u);
 
 static bool force_avic;
 module_param_unsafe(force_avic, bool, 0444);
@@ -140,16 +143,16 @@ int avic_ga_log_notifier(u32 ga_tag)
        struct kvm_svm *kvm_svm;
        struct kvm_vcpu *vcpu = NULL;
        u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
-       u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
+       u32 vcpu_idx = AVIC_GATAG_TO_VCPUIDX(ga_tag);
 
-       pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
-       trace_kvm_avic_ga_log(vm_id, vcpu_id);
+       pr_debug("SVM: %s: vm_id=%#x, vcpu_idx=%#x\n", __func__, vm_id, vcpu_idx);
+       trace_kvm_avic_ga_log(vm_id, vcpu_idx);
 
        spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
        hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
                if (kvm_svm->avic_vm_id != vm_id)
                        continue;
-               vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
+               vcpu = kvm_get_vcpu(&kvm_svm->kvm, vcpu_idx);
                break;
        }
        spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
@@ -786,7 +789,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
                 */
                struct amd_iommu_pi_data pi_data = {
                        .ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
-                                            vcpu->vcpu_id),
+                                            vcpu->vcpu_idx),
                        .is_guest_mode = kvm_vcpu_apicv_active(vcpu),
                        .vapic_addr = avic_get_backing_page_address(to_svm(vcpu)),
                        .vector = vector,