]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: GICv2: Extract LR folding primitive
authorMarc Zyngier <maz@kernel.org>
Thu, 20 Nov 2025 17:25:07 +0000 (17:25 +0000)
committerOliver Upton <oupton@kernel.org>
Mon, 24 Nov 2025 22:29:12 +0000 (14:29 -0800)
As we are going to need to handle deactivation for interrupts that
are not in the LRs, split vgic_v2_fold_lr_state() into a helper
that deals with a single interrupt, and the function that loops
over the used LRs.

Tested-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Link: https://msgid.link/20251120172540.2267180-18-maz@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
arch/arm64/kvm/vgic/vgic-v2.c

index a0d803c5b08ae859a5e001e8009950a5f6ef5cc6..fb8efdd4196b192c62001aee7634c40bc66a7a13 100644 (file)
@@ -39,43 +39,23 @@ static bool lr_signals_eoi_mi(u32 lr_val)
               !(lr_val & GICH_LR_HW);
 }
 
-/*
- * transfer the content of the LRs back into the corresponding ap_list:
- * - active bit is transferred as is
- * - pending bit is
- *   - transferred as is in case of edge sensitive IRQs
- *   - set to the line-level (resample time) for level sensitive IRQs
- */
-void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
+static void vgic_v2_fold_lr(struct kvm_vcpu *vcpu, u32 val)
 {
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
-       int lr;
-
-       DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
-
-       cpuif->vgic_hcr &= ~GICH_HCR_UIE;
+       u32 cpuid, intid = val & GICH_LR_VIRTUALID;
+       struct vgic_irq *irq;
+       bool deactivated;
 
-       for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) {
-               u32 val = cpuif->vgic_lr[lr];
-               u32 cpuid, intid = val & GICH_LR_VIRTUALID;
-               struct vgic_irq *irq;
-               bool deactivated;
+       /* Extract the source vCPU id from the LR */
+       cpuid = FIELD_GET(GICH_LR_PHYSID_CPUID, val) & 7;
 
-               /* Extract the source vCPU id from the LR */
-               cpuid = val & GICH_LR_PHYSID_CPUID;
-               cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
-               cpuid &= 7;
+       /* Notify fds when the guest EOI'ed a level-triggered SPI */
+       if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
+               kvm_notify_acked_irq(vcpu->kvm, 0,
+                                    intid - VGIC_NR_PRIVATE_IRQS);
 
-               /* Notify fds when the guest EOI'ed a level-triggered SPI */
-               if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
-                       kvm_notify_acked_irq(vcpu->kvm, 0,
-                                            intid - VGIC_NR_PRIVATE_IRQS);
-
-               irq = vgic_get_vcpu_irq(vcpu, intid);
-
-               raw_spin_lock(&irq->irq_lock);
+       irq = vgic_get_vcpu_irq(vcpu, intid);
 
+       scoped_guard(raw_spinlock, &irq->irq_lock) {
                /* Always preserve the active bit, note deactivation */
                deactivated = irq->active && !(val & GICH_LR_ACTIVE_BIT);
                irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -102,11 +82,28 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
                vgic_irq_handle_resampling(irq, deactivated, val & GICH_LR_PENDING_BIT);
 
                irq->on_lr = false;
-
-               raw_spin_unlock(&irq->irq_lock);
-               vgic_put_irq(vcpu->kvm, irq);
        }
 
+       vgic_put_irq(vcpu->kvm, irq);
+}
+
+/*
+ * transfer the content of the LRs back into the corresponding ap_list:
+ * - active bit is transferred as is
+ * - pending bit is
+ *   - transferred as is in case of edge sensitive IRQs
+ *   - set to the line-level (resample time) for level sensitive IRQs
+ */
+void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
+
+       DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
+
+       for (int lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++)
+               vgic_v2_fold_lr(vcpu, cpuif->vgic_lr[lr]);
+
        cpuif->used_lrs = 0;
 }