entry = svm->avic_physical_id_entry;
if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
- true, pi_data.ir_data);
+ pi_data.ir_data);
irqfd->irq_bypass_data = pi_data.ir_data;
list_add(&irqfd->vcpu_list, &svm->ir_list);
return irq_set_vcpu_affinity(host_irq, NULL);
}
-static inline int
-avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
+static inline int avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu)
{
int ret = 0;
struct vcpu_svm *svm = to_svm(vcpu);
return 0;
list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
- ret = amd_iommu_update_ga(cpu, r, irqfd->irq_bypass_data);
+ ret = amd_iommu_update_ga(cpu, irqfd->irq_bypass_data);
if (ret)
return ret;
}
WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
- avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
+ avic_update_iommu_vcpu_affinity(vcpu, h_physical_id);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
}
*/
spin_lock_irqsave(&svm->ir_list_lock, flags);
- avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
+ avic_update_iommu_vcpu_affinity(vcpu, -1);
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
svm->avic_physical_id_entry = entry;
* Update the pCPU information for an IRTE that is configured to post IRQs to
* a vCPU, without issuing an IOMMU invalidation for the IRTE.
*
- * This API is intended to be used when a vCPU is scheduled in/out (or stops
- * running for any reason), to do a fast update of IsRun and (conditionally)
- * Destination.
+ * If the vCPU is associated with a pCPU (@cpu >= 0), configure the Destination
+ * with the pCPU's APIC ID and set IsRun, else clear IsRun. I.e. treat vCPUs
+ * that are associated with a pCPU as running. This API is intended to be used
+ * when a vCPU is scheduled in/out (or stops running for any reason), to do a
+ * fast update of IsRun and (conditionally) Destination.
*
* Per the IOMMU spec, the Destination, IsRun, and GATag fields are not cached
* and thus don't require an invalidation to ensure the IOMMU consumes fresh
* information.
*/
-int amd_iommu_update_ga(int cpu, bool is_run, void *data)
+int amd_iommu_update_ga(int cpu, void *data)
{
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
APICID_TO_IRTE_DEST_LO(cpu);
entry->hi.fields.destination =
APICID_TO_IRTE_DEST_HI(cpu);
+ entry->lo.fields_vapic.is_run = true;
+ } else {
+ entry->lo.fields_vapic.is_run = false;
}
- entry->lo.fields_vapic.is_run = is_run;
return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry);
/* IOMMU AVIC Function */
extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
-extern int
-amd_iommu_update_ga(int cpu, bool is_run, void *data);
+extern int amd_iommu_update_ga(int cpu, void *data);
extern int amd_iommu_activate_guest_mode(void *data);
extern int amd_iommu_deactivate_guest_mode(void *data);
return 0;
}
-static inline int
-amd_iommu_update_ga(int cpu, bool is_run, void *data)
+static inline int amd_iommu_update_ga(int cpu, void *data)
{
return 0;
}