]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: macb: Fix tx_ptr_lock locking
authorSean Anderson <sean.anderson@linux.dev>
Fri, 29 Aug 2025 14:35:21 +0000 (10:35 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 9 Sep 2025 16:58:08 +0000 (18:58 +0200)
[ Upstream commit 6bc8a5098bf4a365c4086a4a4130bfab10a58260 ]

macb_start_xmit and macb_tx_poll can be called with bottom-halves
disabled (e.g. from softirq) as well as with interrupts disabled (with
netpoll). Because of this, all other functions taking tx_ptr_lock must
use spin_lock_irqsave.

Fixes: 138badbc21a0 ("net: macb: use NAPI for TX completion path")
Reported-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
Link: https://patch.msgid.link/20250829143521.1686062-1-sean.anderson@linux.dev
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/cadence/macb_main.c

index 6c2d69ef1a8dbd9721232a24cd1856a38cce477a..f7e8c08d84415959755ded77c3962d769e16c10e 100644 (file)
@@ -1234,11 +1234,12 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
 {
        struct macb *bp = queue->bp;
        u16 queue_index = queue - bp->queues;
+       unsigned long flags;
        unsigned int tail;
        unsigned int head;
        int packets = 0;
 
-       spin_lock(&queue->tx_ptr_lock);
+       spin_lock_irqsave(&queue->tx_ptr_lock, flags);
        head = queue->tx_head;
        for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
                struct macb_tx_skb      *tx_skb;
@@ -1297,7 +1298,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
            CIRC_CNT(queue->tx_head, queue->tx_tail,
                     bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
                netif_wake_subqueue(bp->dev, queue_index);
-       spin_unlock(&queue->tx_ptr_lock);
+       spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
 
        return packets;
 }
@@ -1713,8 +1714,9 @@ static void macb_tx_restart(struct macb_queue *queue)
 {
        struct macb *bp = queue->bp;
        unsigned int head_idx, tbqp;
+       unsigned long flags;
 
-       spin_lock(&queue->tx_ptr_lock);
+       spin_lock_irqsave(&queue->tx_ptr_lock, flags);
 
        if (queue->tx_head == queue->tx_tail)
                goto out_tx_ptr_unlock;
@@ -1726,19 +1728,20 @@ static void macb_tx_restart(struct macb_queue *queue)
        if (tbqp == head_idx)
                goto out_tx_ptr_unlock;
 
-       spin_lock_irq(&bp->lock);
+       spin_lock(&bp->lock);
        macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
-       spin_unlock_irq(&bp->lock);
+       spin_unlock(&bp->lock);
 
 out_tx_ptr_unlock:
-       spin_unlock(&queue->tx_ptr_lock);
+       spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
 }
 
 static bool macb_tx_complete_pending(struct macb_queue *queue)
 {
        bool retval = false;
+       unsigned long flags;
 
-       spin_lock(&queue->tx_ptr_lock);
+       spin_lock_irqsave(&queue->tx_ptr_lock, flags);
        if (queue->tx_head != queue->tx_tail) {
                /* Make hw descriptor updates visible to CPU */
                rmb();
@@ -1746,7 +1749,7 @@ static bool macb_tx_complete_pending(struct macb_queue *queue)
                if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
                        retval = true;
        }
-       spin_unlock(&queue->tx_ptr_lock);
+       spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
        return retval;
 }
 
@@ -2314,6 +2317,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct macb_queue *queue = &bp->queues[queue_index];
        unsigned int desc_cnt, nr_frags, frag_size, f;
        unsigned int hdrlen;
+       unsigned long flags;
        bool is_lso;
        netdev_tx_t ret = NETDEV_TX_OK;
 
@@ -2374,7 +2378,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
                desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
        }
 
-       spin_lock_bh(&queue->tx_ptr_lock);
+       spin_lock_irqsave(&queue->tx_ptr_lock, flags);
 
        /* This is a hard error, log it. */
        if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
@@ -2396,15 +2400,15 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        wmb();
        skb_tx_timestamp(skb);
 
-       spin_lock_irq(&bp->lock);
+       spin_lock(&bp->lock);
        macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
-       spin_unlock_irq(&bp->lock);
+       spin_unlock(&bp->lock);
 
        if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
                netif_stop_subqueue(dev, queue_index);
 
 unlock:
-       spin_unlock_bh(&queue->tx_ptr_lock);
+       spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
 
        return ret;
 }