avic_handle_ldr_update(vcpu);
}
-static void avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
-{
- int apic_id = kvm_cpu_get_apicid(vcpu->cpu);
- unsigned long flags;
- struct vcpu_svm *svm = to_svm(vcpu);
- struct kvm_kernel_irqfd *irqfd;
-
- /*
- * Here, we go through the per-vcpu ir_list to update all existing
- * interrupt remapping table entry targeting this vcpu.
- */
- spin_lock_irqsave(&svm->ir_list_lock, flags);
-
- if (list_empty(&svm->ir_list))
- goto out;
-
- list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
- void *data = irqfd->irq_bypass_data;
-
- if (activate)
- WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, apic_id));
- else
- WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data));
- }
-out:
- spin_unlock_irqrestore(&svm->ir_list_lock, flags);
-}
-
static void svm_ir_list_del(struct kvm_kernel_irqfd *irqfd)
{
struct kvm_vcpu *vcpu = irqfd->irq_bypass_vcpu;
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
bool activated = kvm_vcpu_apicv_active(vcpu);
+ int apic_id = kvm_cpu_get_apicid(vcpu->cpu);
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct kvm_kernel_irqfd *irqfd;
+ unsigned long flags;
if (!enable_apicv)
return;
else
avic_vcpu_put(vcpu);
- avic_set_pi_irte_mode(vcpu, activated);
+ /*
+ * Here, we go through the per-vcpu ir_list to update all existing
+ * interrupt remapping table entry targeting this vcpu.
+ */
+ spin_lock_irqsave(&svm->ir_list_lock, flags);
+
+ if (list_empty(&svm->ir_list))
+ goto out;
+
+ list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
+ void *data = irqfd->irq_bypass_data;
+
+ if (activated)
+ WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, apic_id));
+ else
+ WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data));
+ }
+out:
+ spin_unlock_irqrestore(&svm->ir_list_lock, flags);
}
void avic_vcpu_blocking(struct kvm_vcpu *vcpu)