]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: GICv3: Extract LR folding primitive
authorMarc Zyngier <maz@kernel.org>
Thu, 20 Nov 2025 17:25:03 +0000 (17:25 +0000)
committerOliver Upton <oupton@kernel.org>
Mon, 24 Nov 2025 22:29:12 +0000 (14:29 -0800)
As we are going to need to handle deactivation for interrupts that
are not in the LRs, split vgic_v3_fold_lr_state() into a helper
that deals with a single interrupt, and the function that loops
over the used LRs.

Tested-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Link: https://msgid.link/20251120172540.2267180-14-maz@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
arch/arm64/kvm/vgic/vgic-v3.c

index 81d22f615fa6609e1ed8dcd85a621bfdc19ce970..6b7d7b4048f0363651e80a81fb0043a9ac166915 100644 (file)
@@ -33,78 +33,76 @@ static bool lr_signals_eoi_mi(u64 lr_val)
               !(lr_val & ICH_LR_HW);
 }
 
-void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
+static void vgic_v3_fold_lr(struct kvm_vcpu *vcpu, u64 val)
 {
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
-       u32 model = vcpu->kvm->arch.vgic.vgic_model;
-       int lr;
-
-       DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
-
-       cpuif->vgic_hcr &= ~ICH_HCR_EL2_UIE;
-
-       for (lr = 0; lr < cpuif->used_lrs; lr++) {
-               u64 val = cpuif->vgic_lr[lr];
-               u32 intid, cpuid;
-               struct vgic_irq *irq;
-               bool is_v2_sgi = false;
-               bool deactivated;
-
-               cpuid = val & GICH_LR_PHYSID_CPUID;
-               cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
-
-               if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
-                       intid = val & ICH_LR_VIRTUAL_ID_MASK;
-               } else {
-                       intid = val & GICH_LR_VIRTUALID;
-                       is_v2_sgi = vgic_irq_is_sgi(intid);
-               }
+       struct vgic_irq *irq;
+       bool is_v2_sgi = false;
+       bool deactivated;
+       u32 intid;
 
-               /* Notify fds when the guest EOI'ed a level-triggered IRQ */
-               if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
-                       kvm_notify_acked_irq(vcpu->kvm, 0,
-                                            intid - VGIC_NR_PRIVATE_IRQS);
+       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
+               intid = val & ICH_LR_VIRTUAL_ID_MASK;
+       } else {
+               intid = val & GICH_LR_VIRTUALID;
+               is_v2_sgi = vgic_irq_is_sgi(intid);
+       }
 
-               irq = vgic_get_vcpu_irq(vcpu, intid);
-               if (!irq)       /* An LPI could have been unmapped. */
-                       continue;
+       irq = vgic_get_vcpu_irq(vcpu, intid);
+       if (!irq)       /* An LPI could have been unmapped. */
+               return;
 
-               raw_spin_lock(&irq->irq_lock);
+       /* Notify fds when the guest EOI'ed a level-triggered IRQ */
+       if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
+               kvm_notify_acked_irq(vcpu->kvm, 0,
+                                    intid - VGIC_NR_PRIVATE_IRQS);
 
+       scoped_guard(raw_spinlock, &irq->irq_lock) {
                /* Always preserve the active bit for !LPIs, note deactivation */
                if (irq->intid >= VGIC_MIN_LPI)
                        val &= ~ICH_LR_ACTIVE_BIT;
                deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
                irq->active = !!(val & ICH_LR_ACTIVE_BIT);
 
-               if (irq->active && is_v2_sgi)
-                       irq->active_source = cpuid;
-
                /* Edge is the only case where we preserve the pending bit */
                if (irq->config == VGIC_CONFIG_EDGE &&
-                   (val & ICH_LR_PENDING_BIT)) {
+                   (val & ICH_LR_PENDING_BIT))
                        irq->pending_latch = true;
 
-                       if (is_v2_sgi)
-                               irq->source |= (1 << cpuid);
-               }
-
                /*
                 * Clear soft pending state when level irqs have been acked.
                 */
                if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
                        irq->pending_latch = false;
 
+               if (is_v2_sgi) {
+                       u8 cpuid = FIELD_GET(GICH_LR_PHYSID_CPUID, val);
+
+                       if (irq->active)
+                               irq->active_source = cpuid;
+
+                       if (val & ICH_LR_PENDING_BIT)
+                               irq->source |= BIT(cpuid);
+               }
+
                /* Handle resampling for mapped interrupts if required */
                vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
 
                irq->on_lr = false;
-
-               raw_spin_unlock(&irq->irq_lock);
-               vgic_put_irq(vcpu->kvm, irq);
        }
 
+       vgic_put_irq(vcpu->kvm, irq);
+}
+
+void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
+
+       DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
+
+       for (int lr = 0; lr < cpuif->used_lrs; lr++)
+               vgic_v3_fold_lr(vcpu, cpuif->vgic_lr[lr]);
+
        cpuif->used_lrs = 0;
 }