]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
genirq/resend: Switch to lock guards
authorThomas Gleixner <tglx@linutronix.de>
Tue, 29 Apr 2025 06:54:55 +0000 (08:54 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 7 May 2025 07:08:11 +0000 (09:08 +0200)
Convert all lock/unlock pairs to guards and tidy up the code.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/all/20250429065420.312487167@linutronix.de
kernel/irq/resend.c

index 1b7fa72968bd64a2ae0e1b4e3510705043b8f4cf..ca9cc1b806a9db19d60c08b2ce574b295be7b7b6 100644 (file)
@@ -30,18 +30,17 @@ static DEFINE_RAW_SPINLOCK(irq_resend_lock);
  */
 static void resend_irqs(struct tasklet_struct *unused)
 {
-       struct irq_desc *desc;
-
-       raw_spin_lock_irq(&irq_resend_lock);
+       guard(raw_spinlock_irq)(&irq_resend_lock);
        while (!hlist_empty(&irq_resend_list)) {
-               desc = hlist_entry(irq_resend_list.first, struct irq_desc,
-                                  resend_node);
+               struct irq_desc *desc;
+
+               desc = hlist_entry(irq_resend_list.first, struct irq_desc,  resend_node);
                hlist_del_init(&desc->resend_node);
+
                raw_spin_unlock(&irq_resend_lock);
                desc->handle_irq(desc);
                raw_spin_lock(&irq_resend_lock);
        }
-       raw_spin_unlock_irq(&irq_resend_lock);
 }
 
 /* Tasklet to handle resend: */
@@ -75,19 +74,18 @@ static int irq_sw_resend(struct irq_desc *desc)
        }
 
        /* Add to resend_list and activate the softirq: */
-       raw_spin_lock(&irq_resend_lock);
-       if (hlist_unhashed(&desc->resend_node))
-               hlist_add_head(&desc->resend_node, &irq_resend_list);
-       raw_spin_unlock(&irq_resend_lock);
+       scoped_guard(raw_spinlock, &irq_resend_lock) {
+               if (hlist_unhashed(&desc->resend_node))
+                       hlist_add_head(&desc->resend_node, &irq_resend_list);
+       }
        tasklet_schedule(&resend_tasklet);
        return 0;
 }
 
 void clear_irq_resend(struct irq_desc *desc)
 {
-       raw_spin_lock(&irq_resend_lock);
+       guard(raw_spinlock)(&irq_resend_lock);
        hlist_del_init(&desc->resend_node);
-       raw_spin_unlock(&irq_resend_lock);
 }
 
 void irq_resend_init(struct irq_desc *desc)
@@ -172,30 +170,24 @@ int check_irq_resend(struct irq_desc *desc, bool inject)
  */
 int irq_inject_interrupt(unsigned int irq)
 {
-       struct irq_desc *desc;
-       unsigned long flags;
-       int err;
+       int err = -EINVAL;
 
        /* Try the state injection hardware interface first */
        if (!irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true))
                return 0;
 
        /* That failed, try via the resend mechanism */
-       desc = irq_get_desc_buslock(irq, &flags, 0);
-       if (!desc)
-               return -EINVAL;
+       scoped_irqdesc_get_and_buslock(irq, 0) {
+               struct irq_desc *desc = scoped_irqdesc;
 
-       /*
-        * Only try to inject when the interrupt is:
-        *  - not NMI type
-        *  - activated
-        */
-       if (irq_is_nmi(desc) || !irqd_is_activated(&desc->irq_data))
-               err = -EINVAL;
-       else
-               err = check_irq_resend(desc, true);
-
-       irq_put_desc_busunlock(desc, flags);
+               /*
+                * Only try to inject when the interrupt is:
+                *  - not NMI type
+                *  - activated
+                */
+               if (!irq_is_nmi(desc) && irqd_is_activated(&desc->irq_data))
+                       err = check_irq_resend(desc, true);
+       }
        return err;
 }
 EXPORT_SYMBOL_GPL(irq_inject_interrupt);