/* Sync back the hardware VGIC state into our emulation after a guest's run. */
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
- int used_lrs;
-
/* If nesting, emulate the HW effect from L0 to L1 */
if (vgic_state_is_nested(vcpu)) {
vgic_v3_sync_nested(vcpu);
if (vcpu_has_nv(vcpu))
vgic_v3_nested_update_mi(vcpu);
- /* An empty ap_list_head implies used_lrs == 0 */
- if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
- return;
-
if (can_access_vgic_from_kernel())
vgic_save_state(vcpu);
- if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
- used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
- else
- used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
-
- if (used_lrs)
- vgic_fold_lr_state(vcpu);
+ vgic_fold_lr_state(vcpu);
vgic_prune_ap_list(vcpu);
}
if (vcpu_has_nv(vcpu))
vgic_v3_nested_update_mi(vcpu);
- /*
- * If there are no virtual interrupts active or pending for this
- * VCPU, then there is no work to do and we can bail out without
- * taking any lock. There is a potential race with someone injecting
- * interrupts to the VCPU, but it is a benign race as the VCPU will
- * either observe the new interrupt before or after doing this check,
- * and introducing additional synchronization mechanism doesn't change
- * this.
- *
- * Note that we still need to go through the whole thing if anything
- * can be directly injected (GICv4).
- */
- if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
- !vgic_supports_direct_irqs(vcpu->kvm))
- return;
-
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
- if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
- raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ scoped_guard(raw_spinlock, &vcpu->arch.vgic_cpu.ap_list_lock)
vgic_flush_lr_state(vcpu);
- raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
- }
if (can_access_vgic_from_kernel())
vgic_restore_state(vcpu);