AVIC_PHYSICAL_ID_ENTRY_VALID_MASK;
WRITE_ONCE(kvm_svm->avic_physical_id_table[id], new_entry);
- svm->avic_physical_id_cache = &kvm_svm->avic_physical_id_table[id];
-
return 0;
}
struct kvm_kernel_irqfd *irqfd,
struct amd_iommu_pi_data *pi)
{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
unsigned long flags;
u64 entry;
if (WARN_ON_ONCE(!pi->ir_data))
return -EINVAL;
- irqfd->irq_bypass_vcpu = &svm->vcpu;
+ irqfd->irq_bypass_vcpu = vcpu;
irqfd->irq_bypass_data = pi->ir_data;
spin_lock_irqsave(&svm->ir_list_lock, flags);
* will update the pCPU info when the vCPU awkened and/or scheduled in.
* See also avic_vcpu_load().
*/
- entry = READ_ONCE(*(svm->avic_physical_id_cache));
+ entry = READ_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id]);
if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
true, pi->ir_data);
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- u64 entry;
+ struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long flags;
+ u64 entry;
lockdep_assert_preemption_disabled();
if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
return;
- if (WARN_ON_ONCE(!svm->avic_physical_id_cache))
+ if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= PAGE_SIZE))
return;
/*
*/
spin_lock_irqsave(&svm->ir_list_lock, flags);
- entry = READ_ONCE(*(svm->avic_physical_id_cache));
+ entry = READ_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id]);
WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
- WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+ WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
void avic_vcpu_put(struct kvm_vcpu *vcpu)
{
- u64 entry;
+ struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long flags;
+ u64 entry;
lockdep_assert_preemption_disabled();
- if (WARN_ON_ONCE(!svm->avic_physical_id_cache))
+ if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= PAGE_SIZE))
return;
/*
* can't be scheduled out and thus avic_vcpu_{put,load}() can't run
* recursively.
*/
- entry = READ_ONCE(*(svm->avic_physical_id_cache));
+ entry = READ_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id]);
/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
- WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+ WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);