spin_unlock_irqrestore(&to_svm(vcpu)->ir_list_lock, flags);
}
-static void svm_ir_list_add(struct vcpu_svm *svm,
- struct kvm_kernel_irqfd *irqfd,
- struct amd_iommu_pi_data *pi)
-{
- unsigned long flags;
- u64 entry;
-
- irqfd->irq_bypass_data = pi->ir_data;
-
- spin_lock_irqsave(&svm->ir_list_lock, flags);
-
- /*
- * Update the target pCPU for IOMMU doorbells if the vCPU is running.
- * If the vCPU is NOT running, i.e. is blocking or scheduled out, KVM
- * will update the pCPU info when the vCPU awkened and/or scheduled in.
- * See also avic_vcpu_load().
- */
- entry = svm->avic_physical_id_entry;
- if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
- amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
- true, pi->ir_data);
-
- list_add(&irqfd->vcpu_list, &svm->ir_list);
- spin_unlock_irqrestore(&svm->ir_list_lock, flags);
-}
-
int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
unsigned int host_irq, uint32_t guest_irq,
struct kvm_vcpu *vcpu, u32 vector)
.vapic_addr = avic_get_backing_page_address(to_svm(vcpu)),
.vector = vector,
};
+ struct vcpu_svm *svm = to_svm(vcpu);
+ u64 entry;
int ret;
+ /*
+ * Prevent the vCPU from being scheduled out or migrated until
+ * the IRTE is updated and its metadata has been added to the
+ * list of IRQs being posted to the vCPU, to ensure the IRTE
+ * isn't programmed with stale pCPU/IsRunning information.
+ */
+ guard(spinlock_irqsave)(&svm->ir_list_lock);
+
ret = irq_set_vcpu_affinity(host_irq, &pi_data);
if (ret)
return ret;
return -EIO;
}
- /**
- * Here, we successfully setting up vcpu affinity in
- * IOMMU guest mode. Now, we need to store the posted
- * interrupt information in a per-vcpu ir_list so that
- * we can reference to them directly when we update vcpu
- * scheduling information in IOMMU irte.
+ /*
+ * Update the target pCPU for IOMMU doorbells if the vCPU is
+ * running. If the vCPU is NOT running, i.e. is blocking or
+ * scheduled out, KVM will update the pCPU info when the vCPU
+ * is awakened and/or scheduled in. See also avic_vcpu_load().
*/
- svm_ir_list_add(to_svm(vcpu), irqfd, &pi_data);
+ entry = svm->avic_physical_id_entry;
+ if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
+ amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
+ true, pi_data.ir_data);
+
+ irqfd->irq_bypass_data = pi_data.ir_data;
+ list_add(&irqfd->vcpu_list, &svm->ir_list);
return 0;
}
return irq_set_vcpu_affinity(host_irq, NULL);