]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
genirq: Move irq_wait_for_poll() to call site
authorThomas Gleixner <tglx@linutronix.de>
Fri, 18 Jul 2025 18:54:08 +0000 (20:54 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 22 Jul 2025 12:30:42 +0000 (14:30 +0200)
Move it to the call site so that the waiting for the INPROGRESS flag can be
reused by an upcoming mitigation for a potential live lock in the edge type
handler.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Liangyan <liangyan.peng@bytedance.com>
Reviewed-by: Jiri Slaby <jirislaby@kernel.org>
Link: https://lore.kernel.org/all/20250718185311.948555026@linutronix.de
kernel/irq/chip.c
kernel/irq/internals.h
kernel/irq/spurious.c

index 5bb26fc5368ba978d2aeff759c2f8aec7d9fdc54..290244ca28dd4b2abb0943bd94dc87322e8cf31b 100644 (file)
@@ -457,11 +457,21 @@ void unmask_threaded_irq(struct irq_desc *desc)
        unmask_irq(desc);
 }
 
-static bool irq_check_poll(struct irq_desc *desc)
-{
-       if (!(desc->istate & IRQS_POLL_INPROGRESS))
-               return false;
-       return irq_wait_for_poll(desc);
+/* Busy wait until INPROGRESS is cleared */
+static bool irq_wait_on_inprogress(struct irq_desc *desc)
+{
+       if (IS_ENABLED(CONFIG_SMP)) {
+               do {
+                       raw_spin_unlock(&desc->lock);
+                       while (irqd_irq_inprogress(&desc->irq_data))
+                               cpu_relax();
+                       raw_spin_lock(&desc->lock);
+               } while (irqd_irq_inprogress(&desc->irq_data));
+
+               /* Might have been disabled in meantime */
+               return !irqd_irq_disabled(&desc->irq_data) && desc->action;
+       }
+       return false;
 }
 
 static bool irq_can_handle_pm(struct irq_desc *desc)
@@ -481,10 +491,15 @@ static bool irq_can_handle_pm(struct irq_desc *desc)
        if (irq_pm_check_wakeup(desc))
                return false;
 
-       /*
-        * Handle a potential concurrent poll on a different core.
-        */
-       return irq_check_poll(desc);
+       /* Check whether the interrupt is polled on another CPU */
+       if (unlikely(desc->istate & IRQS_POLL_INPROGRESS)) {
+               if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
+                             "irq poll in progress on cpu %d for irq %d\n",
+                             smp_processor_id(), desc->irq_data.irq))
+                       return false;
+               return irq_wait_on_inprogress(desc);
+       }
+       return false;
 }
 
 static inline bool irq_can_handle_actions(struct irq_desc *desc)
index aebfe225c9a6d4abceefc6465e55daf4719b149d..82b0d67022c4610524e2ea5aa5d2b3dba8684efd 100644 (file)
@@ -20,6 +20,7 @@
 #define istate core_internal_state__do_not_mess_with_it
 
 extern bool noirqdebug;
+extern int irq_poll_cpu;
 
 extern struct irqaction chained_action;
 
@@ -112,7 +113,6 @@ irqreturn_t handle_irq_event(struct irq_desc *desc);
 int check_irq_resend(struct irq_desc *desc, bool inject);
 void clear_irq_resend(struct irq_desc *desc);
 void irq_resend_init(struct irq_desc *desc);
-bool irq_wait_for_poll(struct irq_desc *desc);
 void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
 
 void wake_threads_waitq(struct irq_desc *desc);
index 8f26982e730075f9fdf6c19329f8d1ec8449cad3..73280ccb74b0856385b3a4ef994382ed74d8ef9f 100644 (file)
@@ -19,44 +19,9 @@ static int irqfixup __read_mostly;
 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
 static void poll_spurious_irqs(struct timer_list *unused);
 static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs);
-static int irq_poll_cpu;
+int irq_poll_cpu;
 static atomic_t irq_poll_active;
 
-/*
- * We wait here for a poller to finish.
- *
- * If the poll runs on this CPU, then we yell loudly and return
- * false. That will leave the interrupt line disabled in the worst
- * case, but it should never happen.
- *
- * We wait until the poller is done and then recheck disabled and
- * action (about to be disabled). Only if it's still active, we return
- * true and let the handler run.
- */
-bool irq_wait_for_poll(struct irq_desc *desc)
-{
-       lockdep_assert_held(&desc->lock);
-
-       if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
-                     "irq poll in progress on cpu %d for irq %d\n",
-                     smp_processor_id(), desc->irq_data.irq))
-               return false;
-
-#ifdef CONFIG_SMP
-       do {
-               raw_spin_unlock(&desc->lock);
-               while (irqd_irq_inprogress(&desc->irq_data))
-                       cpu_relax();
-               raw_spin_lock(&desc->lock);
-       } while (irqd_irq_inprogress(&desc->irq_data));
-       /* Might have been disabled in meantime */
-       return !irqd_irq_disabled(&desc->irq_data) && desc->action;
-#else
-       return false;
-#endif
-}
-
-
 /*
  * Recovery handler for misrouted interrupts.
  */