From: Marc Zyngier Date: Thu, 20 Nov 2025 17:25:17 +0000 (+0000) Subject: KVM: arm64: GICv3: Handle LR overflow when EOImode==0 X-Git-Tag: v6.19-rc1~103^2~1^2~1^2~23 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3cfd59f81e0f3fbdf8a1b2f576bdc63ab6cc3277;p=thirdparty%2Fkernel%2Flinux.git KVM: arm64: GICv3: Handle LR overflow when EOImode==0 Now that we can identify interrupts that have not made it into the LRs, it becomes relatively easy to use EOIcount to walk the overflow list. What is a bit odd is that we compute a fake LR for the original state of the interrupt, clear the active bit, and feed into the existing logic for processing. In a way, this is what would have happened if the interrupt was in an LR. Tested-by: Fuad Tabba Signed-off-by: Marc Zyngier Tested-by: Mark Brown Link: https://msgid.link/20251120172540.2267180-28-maz@kernel.org Signed-off-by: Oliver Upton --- diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 312226cc2565d..d4f27f451c8fb 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -112,16 +112,62 @@ static void vgic_v3_fold_lr(struct kvm_vcpu *vcpu, u64 val) vgic_put_irq(vcpu->kvm, irq); } +static u64 vgic_v3_compute_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq); + +static void vgic_v3_deactivate_phys(u32 intid) +{ + if (cpus_have_final_cap(ARM64_HAS_GICV5_LEGACY)) + gic_insn(intid | FIELD_PREP(GICV5_GIC_CDDI_TYPE_MASK, 1), CDDI); + else + gic_write_dir(intid); +} + void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) { struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3; + u32 eoicount = FIELD_GET(ICH_HCR_EL2_EOIcount, cpuif->vgic_hcr); + struct vgic_irq *irq; DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); for (int lr = 0; lr < cpuif->used_lrs; lr++) vgic_v3_fold_lr(vcpu, cpuif->vgic_lr[lr]); + /* + * EOIMode=0: use EOIcount to emulate deactivation. We are + * guaranteed to deactivate in reverse order of the activation, so + * just pick one active interrupt after the other in the ap_list, + * and replay the deactivation as if the CPU was doing it. We also + * rely on priority drop to have taken place, and the list to be + * sorted by priority. + */ + list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { + u64 lr; + + /* + * I would have loved to write this using a scoped_guard(), + * but using 'continue' here is a total train wreck. + */ + if (!eoicount) { + break; + } else { + guard(raw_spinlock)(&irq->irq_lock); + + if (!(likely(vgic_target_oracle(irq) == vcpu) && + irq->active)) + continue; + + lr = vgic_v3_compute_lr(vcpu, irq) & ~ICH_LR_ACTIVE_BIT; + } + + if (lr & ICH_LR_HW) + vgic_v3_deactivate_phys(FIELD_GET(ICH_LR_PHYS_ID_MASK, lr)); + + vgic_v3_fold_lr(vcpu, lr); + eoicount--; + } + cpuif->used_lrs = 0; }