From dd598fc1139f7181118719574a4e270e51e0a0eb Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 20 Nov 2025 17:25:09 +0000 Subject: [PATCH] KVM: arm64: Compute vgic state irrespective of the number of interrupts As we are going to rely on the [G]ICH_HCR{,_EL2} register to be programmed with MI information at all times, slightly de-optimise the flush/sync code to always be called. This is rather lightweight when no interrupts are in flight. Tested-by: Fuad Tabba Signed-off-by: Marc Zyngier Tested-by: Mark Brown Link: https://msgid.link/20251120172540.2267180-20-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/vgic/vgic.c | 35 ++--------------------------------- 1 file changed, 2 insertions(+), 33 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 7ee253a9fb77e..39346baa2677c 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -985,8 +985,6 @@ static inline void vgic_save_state(struct kvm_vcpu *vcpu) /* Sync back the hardware VGIC state into our emulation after a guest's run. */ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { - int used_lrs; - /* If nesting, emulate the HW effect from L0 to L1 */ if (vgic_state_is_nested(vcpu)) { vgic_v3_sync_nested(vcpu); @@ -996,20 +994,10 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) if (vcpu_has_nv(vcpu)) vgic_v3_nested_update_mi(vcpu); - /* An empty ap_list_head implies used_lrs == 0 */ - if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) - return; - if (can_access_vgic_from_kernel()) vgic_save_state(vcpu); - if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) - used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs; - else - used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs; - - if (used_lrs) - vgic_fold_lr_state(vcpu); + vgic_fold_lr_state(vcpu); vgic_prune_ap_list(vcpu); } @@ -1053,29 +1041,10 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) if (vcpu_has_nv(vcpu)) vgic_v3_nested_update_mi(vcpu); - /* - * If there are no virtual interrupts active or pending for this - * VCPU, then there is no work to do and we can bail out without - * taking any lock. There is a potential race with someone injecting - * interrupts to the VCPU, but it is a benign race as the VCPU will - * either observe the new interrupt before or after doing this check, - * and introducing additional synchronization mechanism doesn't change - * this. - * - * Note that we still need to go through the whole thing if anything - * can be directly injected (GICv4). - */ - if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) && - !vgic_supports_direct_irqs(vcpu->kvm)) - return; - DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); - if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) { - raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); + scoped_guard(raw_spinlock, &vcpu->arch.vgic_cpu.ap_list_lock) vgic_flush_lr_state(vcpu); - raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); - } if (can_access_vgic_from_kernel()) vgic_restore_state(vcpu); -- 2.47.3