]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
eth: bnxt: add netmem TX support
authorTaehee Yoo <ap420073@gmail.com>
Thu, 19 Jun 2025 14:40:58 +0000 (14:40 +0000)
committerJakub Kicinski <kuba@kernel.org>
Sat, 21 Jun 2025 14:53:07 +0000 (07:53 -0700)
Use netmem_dma_*() helpers and declare netmem_tx to support netmem TX.
By this change, all bnxt devices will support the netmem TX.

Unreadable skbs are not going to be handled by the TX push logic.
So, it checks whether a skb is readable or not before the TX push logic.

netmem TX can be tested with ncdevmem.c

Acked-by: Mina Almasry <almasrymina@google.com>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Signed-off-by: Taehee Yoo <ap420073@gmail.com>
Reviewed-by: Michael Chan <michael.chan@broadcom.com>
Link: https://patch.msgid.link/20250619144058.147051-1-ap420073@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/broadcom/bnxt/bnxt.c

index 07e749fa1910d9e7bbbcb9ab47b4c7da969a631e..c5026fa7e6e6799e47b79c1e89e183c1d5c667a3 100644 (file)
@@ -477,6 +477,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct bnxt_tx_ring_info *txr;
        struct bnxt_sw_tx_bd *tx_buf;
        __le32 lflags = 0;
+       skb_frag_t *frag;
 
        i = skb_get_queue_mapping(skb);
        if (unlikely(i >= bp->tx_nr_rings)) {
@@ -563,7 +564,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
 
        if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
-           !lflags) {
+           skb_frags_readable(skb) && !lflags) {
                struct tx_push_buffer *tx_push_buf = txr->tx_push;
                struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
                struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
@@ -598,9 +599,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                skb_copy_from_linear_data(skb, pdata, len);
                pdata += len;
                for (j = 0; j < last_frag; j++) {
-                       skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
                        void *fptr;
 
+                       frag = &skb_shinfo(skb)->frags[j];
                        fptr = skb_frag_address_safe(frag);
                        if (!fptr)
                                goto normal_tx;
@@ -708,8 +709,7 @@ normal_tx:
                        cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
        txbd0 = txbd;
        for (i = 0; i < last_frag; i++) {
-               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
+               frag = &skb_shinfo(skb)->frags[i];
                prod = NEXT_TX(prod);
                txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
 
@@ -721,7 +721,8 @@ normal_tx:
                        goto tx_dma_error;
 
                tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
-               dma_unmap_addr_set(tx_buf, mapping, mapping);
+               netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
+                                         mapping, mapping);
 
                txbd->tx_bd_haddr = cpu_to_le64(mapping);
 
@@ -778,9 +779,11 @@ tx_dma_error:
        for (i = 0; i < last_frag; i++) {
                prod = NEXT_TX(prod);
                tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
-               dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
-                              skb_frag_size(&skb_shinfo(skb)->frags[i]),
-                              DMA_TO_DEVICE);
+               frag = &skb_shinfo(skb)->frags[i];
+               netmem_dma_unmap_page_attrs(&pdev->dev,
+                                           dma_unmap_addr(tx_buf, mapping),
+                                           skb_frag_size(frag),
+                                           DMA_TO_DEVICE, 0);
        }
 
 tx_free:
@@ -809,6 +812,7 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
        u16 hw_cons = txr->tx_hw_cons;
        unsigned int tx_bytes = 0;
        u16 cons = txr->tx_cons;
+       skb_frag_t *frag;
        int tx_pkts = 0;
        bool rc = false;
 
@@ -848,13 +852,14 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
                last = tx_buf->nr_frags;
 
                for (j = 0; j < last; j++) {
+                       frag = &skb_shinfo(skb)->frags[j];
                        cons = NEXT_TX(cons);
                        tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
-                       dma_unmap_page(
-                               &pdev->dev,
-                               dma_unmap_addr(tx_buf, mapping),
-                               skb_frag_size(&skb_shinfo(skb)->frags[j]),
-                               DMA_TO_DEVICE);
+                       netmem_dma_unmap_page_attrs(&pdev->dev,
+                                                   dma_unmap_addr(tx_buf,
+                                                                  mapping),
+                                                   skb_frag_size(frag),
+                                                   DMA_TO_DEVICE, 0);
                }
                if (unlikely(is_ts_pkt)) {
                        if (BNXT_CHIP_P5(bp)) {
@@ -3422,9 +3427,11 @@ static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
 
                        tx_buf = &txr->tx_buf_ring[ring_idx];
-                       dma_unmap_page(&pdev->dev,
-                                      dma_unmap_addr(tx_buf, mapping),
-                                      skb_frag_size(frag), DMA_TO_DEVICE);
+                       netmem_dma_unmap_page_attrs(&pdev->dev,
+                                                   dma_unmap_addr(tx_buf,
+                                                                  mapping),
+                                                   skb_frag_size(frag),
+                                                   DMA_TO_DEVICE, 0);
                }
                dev_kfree_skb(skb);
        }
@@ -16754,6 +16761,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (BNXT_SUPPORTS_QUEUE_API(bp))
                dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
        dev->request_ops_lock = true;
+       dev->netmem_tx = true;
 
        rc = register_netdev(dev);
        if (rc)