]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
irqchip/gic-v4: Fix ordering between vmapp and vpe locks
authorMarc Zyngier <maz@kernel.org>
Sun, 18 Aug 2024 17:16:25 +0000 (18:16 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 20 Aug 2024 14:57:13 +0000 (16:57 +0200)
The recently established lock ordering mandates that the per-VM
vmapp_lock is acquired before taking the per-VPE lock.

As it turns out, its_vpe_set_affinity() takes the VPE lock, and
then calls into its_send_vmovp(), which itself takes the vmapp
lock. Obviously, this is a lock order violation.

As its_send_vmovp() is only called from its_vpe_set_affinity(),
hoist the vmapp locking from the former into the latter, restoring
the expected order.

Fixes: f0eb154c39471 ("irqchip/gic-v4: Substitute vmovp_lock for a per-VM lock")
Reported-by: Zhou Wang <wangzhou1@hisilicon.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20240818171625.3030584-1-maz@kernel.org
drivers/irqchip/irq-gic-v3-its.c

index 9b34596b3542ed7fb0d1c7ea603c6bd7d83e0c3a..fdec478ba5e70a772daee24c332d5bb1167fda00 100644 (file)
@@ -1329,12 +1329,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
                return;
        }
 
-       /*
-        * Protect against concurrent updates of the mapping state on
-        * individual VMs.
-        */
-       guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
-
        /*
         * Yet another marvel of the architecture. If using the
         * its_list "feature", we need to make sure that all ITSs
@@ -3824,7 +3818,14 @@ static int its_vpe_set_affinity(struct irq_data *d,
         * protect us, and that we must ensure nobody samples vpe->col_idx
         * during the update, hence the lock below which must also be
         * taken on any vLPI handling path that evaluates vpe->col_idx.
+        *
+        * Finally, we must protect ourselves against concurrent updates of
+        * the mapping state on this VM should the ITS list be in use (see
+        * the shortcut in its_send_vmovp() otherewise).
         */
+       if (its_list_map)
+               raw_spin_lock(&vpe->its_vm->vmapp_lock);
+
        from = vpe_to_cpuid_lock(vpe, &flags);
        table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
 
@@ -3854,6 +3855,9 @@ out:
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
        vpe_to_cpuid_unlock(vpe, flags);
 
+       if (its_list_map)
+               raw_spin_unlock(&vpe->its_vm->vmapp_lock);
+
        return IRQ_SET_MASK_OK_DONE;
 }