spin_unlock_irqrestore(&to_svm(vcpu)->ir_list_lock, flags);
}
-static int svm_ir_list_add(struct vcpu_svm *svm,
- struct kvm_kernel_irqfd *irqfd,
- struct amd_iommu_pi_data *pi)
+static void svm_ir_list_add(struct vcpu_svm *svm,
+ struct kvm_kernel_irqfd *irqfd,
+ struct amd_iommu_pi_data *pi)
{
unsigned long flags;
u64 entry;
- if (WARN_ON_ONCE(!pi->ir_data))
- return -EINVAL;
-
irqfd->irq_bypass_data = pi->ir_data;
spin_lock_irqsave(&svm->ir_list_lock, flags);
list_add(&irqfd->vcpu_list, &svm->ir_list);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
- return 0;
}
int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
if (ret)
return ret;
+ /*
+ * Revert to legacy mode if the IOMMU didn't provide metadata
+ * for the IRTE, which KVM needs to keep the IRTE up-to-date,
+ * e.g. if the vCPU is migrated or AVIC is disabled.
+ */
+ if (WARN_ON_ONCE(!pi_data.ir_data)) {
+ irq_set_vcpu_affinity(host_irq, NULL);
+ return -EIO;
+ }
+
/**
* Here, we successfully setting up vcpu affinity in
* IOMMU guest mode. Now, we need to store the posted
* we can reference to them directly when we update vcpu
* scheduling information in IOMMU irte.
*/
- return svm_ir_list_add(to_svm(vcpu), irqfd, &pi_data);
+ svm_ir_list_add(to_svm(vcpu), irqfd, &pi_data);
+ return 0;
}
return irq_set_vcpu_affinity(host_irq, NULL);
}