From: Thomas Gleixner Date: Tue, 29 Apr 2025 06:55:04 +0000 (+0200) Subject: genirq/chip: Prepare for code reduction X-Git-Tag: v6.16-rc1~189^2~44 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a6d8d0d12e1942a9403a0e79c87c161aa801d1a7;p=thirdparty%2Flinux.git genirq/chip: Prepare for code reduction The interrupt flow handlers have similar patterns to decide whether to handle an interrupt or not. Provide common helper functions to allow removal of duplicated code. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/all/20250429065420.682547546@linutronix.de --- diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 36cf1b09cc847..4b4ce38cb30f4 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -499,7 +499,7 @@ static bool irq_check_poll(struct irq_desc *desc) return irq_wait_for_poll(desc); } -static bool irq_may_run(struct irq_desc *desc) +static bool irq_can_handle_pm(struct irq_desc *desc) { unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; @@ -524,6 +524,25 @@ static bool irq_may_run(struct irq_desc *desc) return irq_check_poll(desc); } +static inline bool irq_can_handle_actions(struct irq_desc *desc) +{ + desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); + + if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { + desc->istate |= IRQS_PENDING; + return false; + } + return true; +} + +static inline bool irq_can_handle(struct irq_desc *desc) +{ + if (!irq_can_handle_pm(desc)) + return false; + + return irq_can_handle_actions(desc); +} + /** * handle_simple_irq - Simple and software-decoded IRQs. * @desc: the interrupt description structure for this irq @@ -539,7 +558,7 @@ void handle_simple_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); - if (!irq_may_run(desc)) + if (!irq_can_handle_pm(desc)) goto out_unlock; desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); @@ -574,7 +593,7 @@ void handle_untracked_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); - if (!irq_may_run(desc)) + if (!irq_can_handle_pm(desc)) goto out_unlock; desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); @@ -630,7 +649,7 @@ void handle_level_irq(struct irq_desc *desc) raw_spin_lock(&desc->lock); mask_ack_irq(desc); - if (!irq_may_run(desc)) + if (!irq_can_handle_pm(desc)) goto out_unlock; desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); @@ -695,7 +714,7 @@ void handle_fasteoi_irq(struct irq_desc *desc) * can arrive on the new CPU before the original CPU has completed * handling the previous one - it may need to be resent. */ - if (!irq_may_run(desc)) { + if (!irq_can_handle_pm(desc)) { if (irqd_needs_resend_when_in_progress(&desc->irq_data)) desc->istate |= IRQS_PENDING; goto out; @@ -790,7 +809,7 @@ void handle_edge_irq(struct irq_desc *desc) desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - if (!irq_may_run(desc)) { + if (!irq_can_handle_pm(desc)) { desc->istate |= IRQS_PENDING; mask_ack_irq(desc); goto out_unlock; @@ -1166,7 +1185,7 @@ void handle_fasteoi_ack_irq(struct irq_desc *desc) raw_spin_lock(&desc->lock); - if (!irq_may_run(desc)) + if (!irq_can_handle_pm(desc)) goto out; desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); @@ -1218,7 +1237,7 @@ void handle_fasteoi_mask_irq(struct irq_desc *desc) raw_spin_lock(&desc->lock); mask_ack_irq(desc); - if (!irq_may_run(desc)) + if (!irq_can_handle_pm(desc)) goto out; desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);