]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: stmmac: rename STMMAC_GET_ENTRY() -> STMMAC_NEXT_ENTRY()
authorRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Wed, 18 Mar 2026 18:26:54 +0000 (18:26 +0000)
committerJakub Kicinski <kuba@kernel.org>
Fri, 20 Mar 2026 00:18:52 +0000 (17:18 -0700)
STMMAC_GET_ENTRY() doesn't describe what this macro is doing - it is
incrementing the provided index for the circular array of descriptors.
Replace "GET" with "NEXT" as this better describes the action here.

Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Link: https://patch.msgid.link/E1w2vba-0000000DbWo-1oL5@rmk-PC.armlinux.org.uk
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/stmicro/stmmac/chain_mode.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

index 120a009c99929914049ebe7bef42644577f56392..dc7df4208c538f65ed755380479c220966d11653 100644 (file)
@@ -46,7 +46,7 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
 
        while (len != 0) {
                tx_q->tx_skbuff[entry] = NULL;
-               entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+               entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
                desc = tx_q->dma_tx + entry;
 
                if (len > bmax) {
index f1628de8ed18dfb4e43cf13dd7cc7474d646039f..8166389c853f0fe932b6a10a1fdc42858ab51f87 100644 (file)
@@ -63,7 +63,7 @@ static inline bool dwmac_is_xmac(enum dwmac_core_type core_type)
 #define DMA_MIN_RX_SIZE                64
 #define DMA_MAX_RX_SIZE                1024
 #define DMA_DEFAULT_RX_SIZE    512
-#define STMMAC_GET_ENTRY(x, size)      ((x + 1) & (size - 1))
+#define STMMAC_NEXT_ENTRY(x, size)     ((x + 1) & (size - 1))
 
 #undef FRAME_FILTER_DEBUG
 /* #define FRAME_FILTER_DEBUG */
index 382d94a3b97209a3787122aa17537985aa55f4e8..78fc6aa5bbe954436d53c3b05a7323bc009987a5 100644 (file)
@@ -51,7 +51,7 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
                stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
                                STMMAC_RING_MODE, 0, false, skb->len);
                tx_q->tx_skbuff[entry] = NULL;
-               entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+               entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
 
                if (priv->extend_desc)
                        desc = (struct dma_desc *)(tx_q->dma_etx + entry);
index 3c02062780817d9006d04fe533d416b739589449..5062537f79e9c054892369f788f0c43c921bf690 100644 (file)
@@ -2780,7 +2780,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
                xsk_tx_metadata_to_compl(meta,
                                         &tx_q->tx_skbuff_dma[entry].xsk_meta);
 
-               tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+               tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
                entry = tx_q->cur_tx;
        }
        u64_stats_update_begin(&txq_stats->napi_syncp);
@@ -2948,7 +2948,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
 
                stmmac_release_tx_desc(priv, p, priv->descriptor_mode);
 
-               entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+               entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
        }
        tx_q->dirty_tx = entry;
 
@@ -4303,7 +4303,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
                return false;
 
        stmmac_set_tx_owner(priv, p);
-       tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+       tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
        return true;
 }
 
@@ -4331,7 +4331,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
        while (tmp_len > 0) {
                dma_addr_t curr_addr;
 
-               tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+               tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
                                                priv->dma_conf.dma_tx_size);
                WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
 
@@ -4473,7 +4473,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
                stmmac_set_mss(priv, mss_desc, mss);
                tx_q->mss = mss;
-               tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+               tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
                                                priv->dma_conf.dma_tx_size);
                WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
        }
@@ -4573,7 +4573,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
         * ndo_start_xmit will fill this descriptor the next time it's
         * called and stmmac_tx_clean may clean up to this descriptor.
         */
-       tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+       tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
 
        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
@@ -4777,7 +4777,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                unsigned int frag_size = skb_frag_size(frag);
                bool last_segment = (i == (nfrags - 1));
 
-               entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+               entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
                WARN_ON(tx_q->tx_skbuff[entry]);
 
                desc = stmmac_get_tx_desc(priv, tx_q, entry);
@@ -4833,7 +4833,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
         * ndo_start_xmit will fill this descriptor the next time it's
         * called and stmmac_tx_clean may clean up to this descriptor.
         */
-       entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+       entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
        tx_q->cur_tx = entry;
 
        if (netif_msg_pktdata(priv)) {
@@ -4998,7 +4998,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
                dma_wmb();
                stmmac_set_rx_owner(priv, p, use_rx_wd);
 
-               entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
+               entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
        }
        rx_q->dirty_rx = entry;
        stmmac_set_queue_rx_tail_ptr(priv, rx_q, queue, rx_q->dirty_rx);
@@ -5139,7 +5139,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
 
        stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
 
-       entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+       entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
        tx_q->cur_tx = entry;
 
        return STMMAC_XDP_TX;
@@ -5370,7 +5370,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
                dma_wmb();
                stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
 
-               entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
+               entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
        }
 
        if (rx_desc) {
@@ -5454,7 +5454,7 @@ read_again:
                        break;
 
                /* Prefetch the next RX descriptor */
-               rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+               rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
                                                priv->dma_conf.dma_rx_size);
                next_entry = rx_q->cur_rx;
 
@@ -5638,7 +5638,7 @@ read_again:
                if (unlikely(status & dma_own))
                        break;
 
-               rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+               rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
                                                priv->dma_conf.dma_rx_size);
                next_entry = rx_q->cur_rx;