]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: fec: use switch statement to check the type of tx_buf
authorWei Fang <wei.fang@nxp.com>
Thu, 5 Feb 2026 08:57:37 +0000 (16:57 +0800)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 10 Feb 2026 09:58:20 +0000 (10:58 +0100)
The tx_buf has three types: FEC_TXBUF_T_SKB, FEC_TXBUF_T_XDP_NDO and
FEC_TXBUF_T_XDP_TX. Currently, the driver uses 'if...else...' statements
to check the type and perform the corresponding processing. This is very
detrimental to future expansion. To support AF_XDP zero-copy mode, two
new types will be added in the future, continuing to use 'if...else...'
would be a very bad coding style. So the 'if...else...' statements in
the current driver are replaced with switch statements.

Signed-off-by: Wei Fang <wei.fang@nxp.com>
Link: https://patch.msgid.link/20260205085742.2685134-11-wei.fang@nxp.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/freescale/fec_main.c

index 1b4ce004ae46850b4c924da855a807192213403c..80fcac0f342f2d63e698ce04a3f4e15091d5ab0f 100644 (file)
@@ -1023,9 +1023,13 @@ static void fec_enet_bd_init(struct net_device *dev)
                txq->bd.cur = bdp;
 
                for (i = 0; i < txq->bd.ring_size; i++) {
+                       struct page *page;
+
                        /* Initialize the BD for every fragment in the page. */
                        bdp->cbd_sc = cpu_to_fec16(0);
-                       if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+
+                       switch (txq->tx_buf[i].type) {
+                       case FEC_TXBUF_T_SKB:
                                if (bdp->cbd_bufaddr &&
                                    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
                                        dma_unmap_single(&fep->pdev->dev,
@@ -1033,18 +1037,21 @@ static void fec_enet_bd_init(struct net_device *dev)
                                                         fec16_to_cpu(bdp->cbd_datlen),
                                                         DMA_TO_DEVICE);
                                dev_kfree_skb_any(txq->tx_buf[i].buf_p);
-                       } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
+                               break;
+                       case FEC_TXBUF_T_XDP_NDO:
                                dma_unmap_single(&fep->pdev->dev,
                                                 fec32_to_cpu(bdp->cbd_bufaddr),
                                                 fec16_to_cpu(bdp->cbd_datlen),
                                                 DMA_TO_DEVICE);
-
                                xdp_return_frame(txq->tx_buf[i].buf_p);
-                       } else {
-                               struct page *page = txq->tx_buf[i].buf_p;
-
+                               break;
+                       case FEC_TXBUF_T_XDP_TX:
+                               page = txq->tx_buf[i].buf_p;
                                page_pool_put_page(pp_page_to_nmdesc(page)->pp,
                                                   page, 0, false);
+                               break;
+                       default:
+                               break;
                        }
 
                        txq->tx_buf[i].buf_p = NULL;
@@ -1510,39 +1517,69 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
                        break;
 
                index = fec_enet_get_bd_index(bdp, &txq->bd);
+               frame_len = fec16_to_cpu(bdp->cbd_datlen);
 
-               if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
-                       skb = txq->tx_buf[index].buf_p;
+               switch (txq->tx_buf[index].type) {
+               case FEC_TXBUF_T_SKB:
                        if (bdp->cbd_bufaddr &&
                            !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
                                dma_unmap_single(&fep->pdev->dev,
                                                 fec32_to_cpu(bdp->cbd_bufaddr),
-                                                fec16_to_cpu(bdp->cbd_datlen),
-                                                DMA_TO_DEVICE);
+                                                frame_len, DMA_TO_DEVICE);
+
                        bdp->cbd_bufaddr = cpu_to_fec32(0);
+                       skb = txq->tx_buf[index].buf_p;
                        if (!skb)
                                goto tx_buf_done;
-               } else {
+
+                       frame_len = skb->len;
+
+                       /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
+                        * are to time stamp the packet, so we still need to check time
+                        * stamping enabled flag.
+                        */
+                       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+                                    fep->hwts_tx_en) && fep->bufdesc_ex) {
+                               struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+                               struct skb_shared_hwtstamps shhwtstamps;
+
+                               fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
+                               skb_tstamp_tx(skb, &shhwtstamps);
+                       }
+
+                       /* Free the sk buffer associated with this last transmit */
+                       napi_consume_skb(skb, budget);
+                       break;
+               case FEC_TXBUF_T_XDP_NDO:
                        /* Tx processing cannot call any XDP (or page pool) APIs if
                         * the "budget" is 0. Because NAPI is called with budget of
                         * 0 (such as netpoll) indicates we may be in an IRQ context,
                         * however, we can't use the page pool from IRQ context.
                         */
                        if (unlikely(!budget))
-                               break;
+                               goto out;
 
-                       if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
-                               xdpf = txq->tx_buf[index].buf_p;
-                               dma_unmap_single(&fep->pdev->dev,
-                                                fec32_to_cpu(bdp->cbd_bufaddr),
-                                                fec16_to_cpu(bdp->cbd_datlen),
-                                                DMA_TO_DEVICE);
-                       } else {
-                               page = txq->tx_buf[index].buf_p;
-                       }
+                       xdpf = txq->tx_buf[index].buf_p;
+                       dma_unmap_single(&fep->pdev->dev,
+                                        fec32_to_cpu(bdp->cbd_bufaddr),
+                                        frame_len,  DMA_TO_DEVICE);
+                       bdp->cbd_bufaddr = cpu_to_fec32(0);
+                       xdp_return_frame_rx_napi(xdpf);
+                       break;
+               case FEC_TXBUF_T_XDP_TX:
+                       if (unlikely(!budget))
+                               goto out;
 
                        bdp->cbd_bufaddr = cpu_to_fec32(0);
-                       frame_len = fec16_to_cpu(bdp->cbd_datlen);
+                       page = txq->tx_buf[index].buf_p;
+                       /* The dma_sync_size = 0 as XDP_TX has already synced
+                        * DMA for_device
+                        */
+                       page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
+                                          0, true);
+                       break;
+               default:
+                       break;
                }
 
                /* Check for errors. */
@@ -1562,11 +1599,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
                                ndev->stats.tx_carrier_errors++;
                } else {
                        ndev->stats.tx_packets++;
-
-                       if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
-                               ndev->stats.tx_bytes += skb->len;
-                       else
-                               ndev->stats.tx_bytes += frame_len;
+                       ndev->stats.tx_bytes += frame_len;
                }
 
                /* Deferred means some collisions occurred during transmit,
@@ -1575,30 +1608,6 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
                if (status & BD_ENET_TX_DEF)
                        ndev->stats.collisions++;
 
-               if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
-                       /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
-                        * are to time stamp the packet, so we still need to check time
-                        * stamping enabled flag.
-                        */
-                       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
-                                    fep->hwts_tx_en) && fep->bufdesc_ex) {
-                               struct skb_shared_hwtstamps shhwtstamps;
-                               struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-
-                               fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
-                               skb_tstamp_tx(skb, &shhwtstamps);
-                       }
-
-                       /* Free the sk buffer associated with this last transmit */
-                       napi_consume_skb(skb, budget);
-               } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
-                       xdp_return_frame_rx_napi(xdpf);
-               } else { /* recycle pages of XDP_TX frames */
-                       /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
-                       page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
-                                          0, true);
-               }
-
                txq->tx_buf[index].buf_p = NULL;
                /* restore default tx buffer type: FEC_TXBUF_T_SKB */
                txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
@@ -1622,6 +1631,8 @@ tx_buf_done:
                }
        }
 
+out:
+
        /* ERR006358: Keep the transmitter going */
        if (bdp != txq->bd.cur &&
            readl(txq->bd.reg_desc_active) == 0)
@@ -3415,6 +3426,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
        unsigned int i;
        struct fec_enet_priv_tx_q *txq;
        struct fec_enet_priv_rx_q *rxq;
+       struct page *page;
        unsigned int q;
 
        for (q = 0; q < fep->num_rx_queues; q++) {
@@ -3438,20 +3450,20 @@ static void fec_enet_free_buffers(struct net_device *ndev)
                        kfree(txq->tx_bounce[i]);
                        txq->tx_bounce[i] = NULL;
 
-                       if (!txq->tx_buf[i].buf_p) {
-                               txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
-                               continue;
-                       }
-
-                       if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+                       switch (txq->tx_buf[i].type) {
+                       case FEC_TXBUF_T_SKB:
                                dev_kfree_skb(txq->tx_buf[i].buf_p);
-                       } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
+                               break;
+                       case FEC_TXBUF_T_XDP_NDO:
                                xdp_return_frame(txq->tx_buf[i].buf_p);
-                       } else {
-                               struct page *page = txq->tx_buf[i].buf_p;
-
+                               break;
+                       case FEC_TXBUF_T_XDP_TX:
+                               page = txq->tx_buf[i].buf_p;
                                page_pool_put_page(pp_page_to_nmdesc(page)->pp,
                                                   page, 0, false);
+                               break;
+                       default:
+                               break;
                        }
 
                        txq->tx_buf[i].buf_p = NULL;