static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
bool x2avic_enabled;
-/*
- * This is a wrapper of struct amd_iommu_ir_data.
- */
-struct amd_svm_iommu_ir {
- struct list_head node; /* Used by SVM for per-vcpu ir_list */
- void *data; /* Storing pointer to struct amd_ir_data */
-};
-
static void avic_activate_vmcb(struct vcpu_svm *svm)
{
struct vmcb *vmcb = svm->vmcb01.ptr;
{
int ret = 0;
unsigned long flags;
- struct amd_svm_iommu_ir *ir;
struct vcpu_svm *svm = to_svm(vcpu);
+ struct kvm_kernel_irqfd *irqfd;
if (!kvm_arch_has_assigned_device(vcpu->kvm))
return 0;
if (list_empty(&svm->ir_list))
goto out;
- list_for_each_entry(ir, &svm->ir_list, node) {
+ list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
if (activate)
- ret = amd_iommu_activate_guest_mode(ir->data);
+ ret = amd_iommu_activate_guest_mode(irqfd->irq_bypass_data);
else
- ret = amd_iommu_deactivate_guest_mode(ir->data);
+ ret = amd_iommu_deactivate_guest_mode(irqfd->irq_bypass_data);
if (ret)
break;
}
return ret;
}
-static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+static void svm_ir_list_del(struct vcpu_svm *svm,
+ struct kvm_kernel_irqfd *irqfd,
+ struct amd_iommu_pi_data *pi)
{
unsigned long flags;
- struct amd_svm_iommu_ir *cur;
+ struct kvm_kernel_irqfd *cur;
spin_lock_irqsave(&svm->ir_list_lock, flags);
- list_for_each_entry(cur, &svm->ir_list, node) {
- if (cur->data != pi->ir_data)
+ list_for_each_entry(cur, &svm->ir_list, vcpu_list) {
+ if (cur->irq_bypass_data != pi->ir_data)
+ continue;
+ if (WARN_ON_ONCE(cur != irqfd))
continue;
- list_del(&cur->node);
- kfree(cur);
+ list_del(&irqfd->vcpu_list);
break;
}
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
}
-static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+static int svm_ir_list_add(struct vcpu_svm *svm,
+ struct kvm_kernel_irqfd *irqfd,
+ struct amd_iommu_pi_data *pi)
{
- int ret = 0;
unsigned long flags;
- struct amd_svm_iommu_ir *ir;
u64 entry;
if (WARN_ON_ONCE(!pi->ir_data))
struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
struct vcpu_svm *prev_svm;
- if (!prev_vcpu) {
- ret = -EINVAL;
- goto out;
- }
+ if (!prev_vcpu)
+ return -EINVAL;
prev_svm = to_svm(prev_vcpu);
- svm_ir_list_del(prev_svm, pi);
+ svm_ir_list_del(prev_svm, irqfd, pi);
}
- /**
- * Allocating new amd_iommu_pi_data, which will get
- * add to the per-vcpu ir_list.
- */
- ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT);
- if (!ir) {
- ret = -ENOMEM;
- goto out;
- }
- ir->data = pi->ir_data;
+ irqfd->irq_bypass_data = pi->ir_data;
spin_lock_irqsave(&svm->ir_list_lock, flags);
amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
true, pi->ir_data);
- list_add(&ir->node, &svm->ir_list);
+ list_add(&irqfd->vcpu_list, &svm->ir_list);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
-out:
- return ret;
+ return 0;
}
/*
* scheduling information in IOMMU irte.
*/
if (!ret && pi.is_guest_mode)
- svm_ir_list_add(svm, &pi);
+ svm_ir_list_add(svm, irqfd, &pi);
}
if (!ret && svm) {
vcpu = kvm_get_vcpu_by_id(kvm, id);
if (vcpu)
- svm_ir_list_del(to_svm(vcpu), &pi);
+ svm_ir_list_del(to_svm(vcpu), irqfd, &pi);
}
}
out:
avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
{
int ret = 0;
- struct amd_svm_iommu_ir *ir;
struct vcpu_svm *svm = to_svm(vcpu);
+ struct kvm_kernel_irqfd *irqfd;
lockdep_assert_held(&svm->ir_list_lock);
if (list_empty(&svm->ir_list))
return 0;
- list_for_each_entry(ir, &svm->ir_list, node) {
- ret = amd_iommu_update_ga(cpu, r, ir->data);
+ list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
+ ret = amd_iommu_update_ga(cpu, r, irqfd->irq_bypass_data);
if (ret)
return ret;
}