]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: macb: Remove dedicated IRQ handler for WoL
authorKevin Hao <haokexin@gmail.com>
Thu, 2 Apr 2026 13:41:25 +0000 (21:41 +0800)
committerJakub Kicinski <kuba@kernel.org>
Fri, 3 Apr 2026 22:51:05 +0000 (15:51 -0700)
In the current implementation, the suspend/resume path frees the
existing IRQ handler and sets up a dedicated WoL IRQ handler, then
restores the original handler upon resume. This approach is not used
by any other Ethernet driver and unnecessarily complicates the
suspend/resume process. After adjusting the IRQ handler in the previous
patches, we can now handle WoL interrupts without introducing any
overhead in the TX/RX hot path. Therefore, the dedicated WoL IRQ
handler is removed.

I have verified WoL functionality on my AMD ZynqMP board using the
following steps:
  root@amd-zynqmp:~# ifconfig end0 192.168.3.3
  root@amd-zynqmp:~# ethtool -s end0 wol a
  root@amd-zynqmp:~# echo mem >/sys/power/state

Signed-off-by: Kevin Hao <haokexin@gmail.com>
Link: https://patch.msgid.link/20260402-macb-irq-v2-4-942d98ab1154@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/cadence/macb_main.c

index 4fa75e6ed6d6876b405a808a8e2ccfeb00453b3c..6ec7b110dc7219ca337de4c6f0cbe38bb11558b5 100644 (file)
@@ -71,7 +71,8 @@ struct sifive_fu540_macb_mgmt {
                                        | MACB_BIT(TXUBR))
 
 #define MACB_INT_MISC_FLAGS    (MACB_TX_ERR_FLAGS | MACB_BIT(RXUBR) | \
-                                MACB_BIT(ISR_ROVR) | MACB_BIT(HRESP))
+                                MACB_BIT(ISR_ROVR) | MACB_BIT(HRESP) | \
+                                GEM_BIT(WOL) | MACB_BIT(WOL))
 
 /* Max length of transmit frame must be a multiple of 8 bytes */
 #define MACB_TX_LEN_ALIGN      8
@@ -2025,60 +2026,30 @@ static void macb_hresp_error_task(struct work_struct *work)
        netif_tx_start_all_queues(dev);
 }
 
-static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
+static void macb_wol_interrupt(struct macb_queue *queue, u32 status)
 {
-       struct macb_queue *queue = dev_id;
        struct macb *bp = queue->bp;
-       u32 status;
-
-       status = queue_readl(queue, ISR);
-
-       if (unlikely(!status))
-               return IRQ_NONE;
-
-       spin_lock(&bp->lock);
-
-       if (status & MACB_BIT(WOL)) {
-               queue_writel(queue, IDR, MACB_BIT(WOL));
-               macb_writel(bp, WOL, 0);
-               netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
-                           (unsigned int)(queue - bp->queues),
-                           (unsigned long)status);
-               macb_queue_isr_clear(bp, queue, MACB_BIT(WOL));
-               pm_wakeup_event(&bp->pdev->dev, 0);
-       }
-
-       spin_unlock(&bp->lock);
 
-       return IRQ_HANDLED;
+       queue_writel(queue, IDR, MACB_BIT(WOL));
+       macb_writel(bp, WOL, 0);
+       netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
+                   (unsigned int)(queue - bp->queues),
+                   (unsigned long)status);
+       macb_queue_isr_clear(bp, queue, MACB_BIT(WOL));
+       pm_wakeup_event(&bp->pdev->dev, 0);
 }
 
-static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
+static void gem_wol_interrupt(struct macb_queue *queue, u32 status)
 {
-       struct macb_queue *queue = dev_id;
        struct macb *bp = queue->bp;
-       u32 status;
-
-       status = queue_readl(queue, ISR);
-
-       if (unlikely(!status))
-               return IRQ_NONE;
-
-       spin_lock(&bp->lock);
-
-       if (status & GEM_BIT(WOL)) {
-               queue_writel(queue, IDR, GEM_BIT(WOL));
-               gem_writel(bp, WOL, 0);
-               netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
-                           (unsigned int)(queue - bp->queues),
-                           (unsigned long)status);
-               macb_queue_isr_clear(bp, queue, GEM_BIT(WOL));
-               pm_wakeup_event(&bp->pdev->dev, 0);
-       }
-
-       spin_unlock(&bp->lock);
 
-       return IRQ_HANDLED;
+       queue_writel(queue, IDR, GEM_BIT(WOL));
+       gem_writel(bp, WOL, 0);
+       netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
+                   (unsigned int)(queue - bp->queues),
+                   (unsigned long)status);
+       macb_queue_isr_clear(bp, queue, GEM_BIT(WOL));
+       pm_wakeup_event(&bp->pdev->dev, 0);
 }
 
 static int macb_interrupt_misc(struct macb_queue *queue, u32 status)
@@ -2132,6 +2103,14 @@ static int macb_interrupt_misc(struct macb_queue *queue, u32 status)
                macb_queue_isr_clear(bp, queue, MACB_BIT(HRESP));
        }
 
+       if (macb_is_gem(bp)) {
+               if (status & GEM_BIT(WOL))
+                       gem_wol_interrupt(queue, status);
+       } else {
+               if (status & MACB_BIT(WOL))
+                       macb_wol_interrupt(queue, status);
+       }
+
        return 0;
 }
 
@@ -6004,7 +5983,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
        unsigned long flags;
        u32 tmp, ifa_local;
        unsigned int q;
-       int err;
 
        if (!device_may_wakeup(&bp->dev->dev))
                phy_exit(bp->phy);
@@ -6067,39 +6045,15 @@ static int __maybe_unused macb_suspend(struct device *dev)
                        /* write IP address into register */
                        tmp |= MACB_BFEXT(IP, ifa_local);
                }
-               spin_unlock_irqrestore(&bp->lock, flags);
 
-               /* Change interrupt handler and
-                * Enable WoL IRQ on queue 0
-                */
-               devm_free_irq(dev, bp->queues[0].irq, bp->queues);
                if (macb_is_gem(bp)) {
-                       err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
-                                              IRQF_SHARED, netdev->name, bp->queues);
-                       if (err) {
-                               dev_err(dev,
-                                       "Unable to request IRQ %d (error %d)\n",
-                                       bp->queues[0].irq, err);
-                               return err;
-                       }
-                       spin_lock_irqsave(&bp->lock, flags);
                        queue_writel(bp->queues, IER, GEM_BIT(WOL));
                        gem_writel(bp, WOL, tmp);
-                       spin_unlock_irqrestore(&bp->lock, flags);
                } else {
-                       err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
-                                              IRQF_SHARED, netdev->name, bp->queues);
-                       if (err) {
-                               dev_err(dev,
-                                       "Unable to request IRQ %d (error %d)\n",
-                                       bp->queues[0].irq, err);
-                               return err;
-                       }
-                       spin_lock_irqsave(&bp->lock, flags);
                        queue_writel(bp->queues, IER, MACB_BIT(WOL));
                        macb_writel(bp, WOL, tmp);
-                       spin_unlock_irqrestore(&bp->lock, flags);
                }
+               spin_unlock_irqrestore(&bp->lock, flags);
 
                enable_irq_wake(bp->queues[0].irq);
        }
@@ -6141,7 +6095,6 @@ static int __maybe_unused macb_resume(struct device *dev)
        struct macb_queue *queue;
        unsigned long flags;
        unsigned int q;
-       int err;
 
        if (!device_may_wakeup(&bp->dev->dev))
                phy_init(bp->phy);
@@ -6167,17 +6120,6 @@ static int __maybe_unused macb_resume(struct device *dev)
                macb_queue_isr_clear(bp, bp->queues, -1);
                spin_unlock_irqrestore(&bp->lock, flags);
 
-               /* Replace interrupt handler on queue 0 */
-               devm_free_irq(dev, bp->queues[0].irq, bp->queues);
-               err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
-                                      IRQF_SHARED, netdev->name, bp->queues);
-               if (err) {
-                       dev_err(dev,
-                               "Unable to request IRQ %d (error %d)\n",
-                               bp->queues[0].irq, err);
-                       return err;
-               }
-
                disable_irq_wake(bp->queues[0].irq);
 
                /* Now make sure we disable phy before moving