]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
genirq/chip: Prepare for code reduction
authorThomas Gleixner <tglx@linutronix.de>
Tue, 29 Apr 2025 06:55:04 +0000 (08:55 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 7 May 2025 07:08:12 +0000 (09:08 +0200)
The interrupt flow handlers have similar patterns to decide whether to
handle an interrupt or not.

Provide common helper functions to allow removal of duplicated code.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/all/20250429065420.682547546@linutronix.de
kernel/irq/chip.c

index 36cf1b09cc847cff2665f81b1d47366fbba79341..4b4ce38cb30f49d7c8d918b71270ad40ef023ea8 100644 (file)
@@ -499,7 +499,7 @@ static bool irq_check_poll(struct irq_desc *desc)
        return irq_wait_for_poll(desc);
 }
 
-static bool irq_may_run(struct irq_desc *desc)
+static bool irq_can_handle_pm(struct irq_desc *desc)
 {
        unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
 
@@ -524,6 +524,25 @@ static bool irq_may_run(struct irq_desc *desc)
        return irq_check_poll(desc);
 }
 
+static inline bool irq_can_handle_actions(struct irq_desc *desc)
+{
+       desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+
+       if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
+               desc->istate |= IRQS_PENDING;
+               return false;
+       }
+       return true;
+}
+
+static inline bool irq_can_handle(struct irq_desc *desc)
+{
+       if (!irq_can_handle_pm(desc))
+               return false;
+
+       return irq_can_handle_actions(desc);
+}
+
 /**
  *     handle_simple_irq - Simple and software-decoded IRQs.
  *     @desc:  the interrupt description structure for this irq
@@ -539,7 +558,7 @@ void handle_simple_irq(struct irq_desc *desc)
 {
        raw_spin_lock(&desc->lock);
 
-       if (!irq_may_run(desc))
+       if (!irq_can_handle_pm(desc))
                goto out_unlock;
 
        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -574,7 +593,7 @@ void handle_untracked_irq(struct irq_desc *desc)
 {
        raw_spin_lock(&desc->lock);
 
-       if (!irq_may_run(desc))
+       if (!irq_can_handle_pm(desc))
                goto out_unlock;
 
        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -630,7 +649,7 @@ void handle_level_irq(struct irq_desc *desc)
        raw_spin_lock(&desc->lock);
        mask_ack_irq(desc);
 
-       if (!irq_may_run(desc))
+       if (!irq_can_handle_pm(desc))
                goto out_unlock;
 
        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -695,7 +714,7 @@ void handle_fasteoi_irq(struct irq_desc *desc)
         * can arrive on the new CPU before the original CPU has completed
         * handling the previous one - it may need to be resent.
         */
-       if (!irq_may_run(desc)) {
+       if (!irq_can_handle_pm(desc)) {
                if (irqd_needs_resend_when_in_progress(&desc->irq_data))
                        desc->istate |= IRQS_PENDING;
                goto out;
@@ -790,7 +809,7 @@ void handle_edge_irq(struct irq_desc *desc)
 
        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 
-       if (!irq_may_run(desc)) {
+       if (!irq_can_handle_pm(desc)) {
                desc->istate |= IRQS_PENDING;
                mask_ack_irq(desc);
                goto out_unlock;
@@ -1166,7 +1185,7 @@ void handle_fasteoi_ack_irq(struct irq_desc *desc)
 
        raw_spin_lock(&desc->lock);
 
-       if (!irq_may_run(desc))
+       if (!irq_can_handle_pm(desc))
                goto out;
 
        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -1218,7 +1237,7 @@ void handle_fasteoi_mask_irq(struct irq_desc *desc)
        raw_spin_lock(&desc->lock);
        mask_ack_irq(desc);
 
-       if (!irq_may_run(desc))
+       if (!irq_can_handle_pm(desc))
                goto out;
 
        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);