]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Spin off release helper from vgic_put_irq()
authorOliver Upton <oliver.upton@linux.dev>
Fri, 5 Sep 2025 10:05:28 +0000 (03:05 -0700)
committerOliver Upton <oliver.upton@linux.dev>
Wed, 10 Sep 2025 09:56:20 +0000 (02:56 -0700)
Spin off the release implementation from vgic_put_irq() to prepare for a
more involved fix for lock ordering such that it may be unnested from
raw spinlocks. This has the minor functional change of doing call_rcu()
behind the xa_lock although it shouldn't be consequential.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250905100531.282980-4-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/vgic/vgic.c

index a1d6fab895c4530ced5720eef2028d95b7c2b003..ec4d70936a5bd9646412716096247ce226ae907e 100644 (file)
@@ -114,22 +114,32 @@ struct vgic_irq *vgic_get_vcpu_irq(struct kvm_vcpu *vcpu, u32 intid)
        return vgic_get_irq(vcpu->kvm, intid);
 }
 
+static void vgic_release_lpi_locked(struct vgic_dist *dist, struct vgic_irq *irq)
+{
+       lockdep_assert_held(&dist->lpi_xa.xa_lock);
+       __xa_erase(&dist->lpi_xa, irq->intid);
+       kfree_rcu(irq, rcu);
+}
+
+static __must_check bool __vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
+{
+       if (irq->intid < VGIC_MIN_LPI)
+               return false;
+
+       return refcount_dec_and_test(&irq->refcount);
+}
+
 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
 {
        struct vgic_dist *dist = &kvm->arch.vgic;
        unsigned long flags;
 
-       if (irq->intid < VGIC_MIN_LPI)
-               return;
-
-       if (!refcount_dec_and_test(&irq->refcount))
+       if (!__vgic_put_irq(kvm, irq))
                return;
 
        xa_lock_irqsave(&dist->lpi_xa, flags);
-       __xa_erase(&dist->lpi_xa, irq->intid);
+       vgic_release_lpi_locked(dist, irq);
        xa_unlock_irqrestore(&dist->lpi_xa, flags);
-
-       kfree_rcu(irq, rcu);
 }
 
 void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)