]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: bnxt: Add TX inline buffer infrastructure
authorJoe Damato <joe@dama.to>
Wed, 8 Apr 2026 23:05:54 +0000 (16:05 -0700)
committerJakub Kicinski <kuba@kernel.org>
Sun, 12 Apr 2026 17:54:32 +0000 (10:54 -0700)
Add per-ring pre-allocated inline buffer fields (tx_inline_buf,
tx_inline_dma, tx_inline_size) to bnxt_tx_ring_info and helpers to
allocate and free them. A producer and consumer (tx_inline_prod,
tx_inline_cons) are added to track which slot(s) of the inline buffer
are in-use.

The inline buffer will be used by the SW USO path for pre-allocated,
pre-DMA-mapped per-segment header copies. In the future, this
could be extended to support TX copybreak.

Allocation helper is marked __maybe_unused in this commit because it
will be wired in later.

Suggested-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
Signed-off-by: Joe Damato <joe@dama.to>
Link: https://patch.msgid.link/20260408230607.2019402-6-joe@dama.to
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h

index bc2dac2f137d30923792e4ee1515f3292c8de6f7..bd93edb09ee0b8aef1157aac9dd19d5741dcf8b1 100644 (file)
@@ -3979,6 +3979,39 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
        return rc;
 }
 
+static void bnxt_free_tx_inline_buf(struct bnxt_tx_ring_info *txr,
+                                   struct pci_dev *pdev)
+{
+       if (!txr->tx_inline_buf)
+               return;
+
+       dma_unmap_single(&pdev->dev, txr->tx_inline_dma,
+                        txr->tx_inline_size, DMA_TO_DEVICE);
+       kfree(txr->tx_inline_buf);
+       txr->tx_inline_buf = NULL;
+       txr->tx_inline_size = 0;
+}
+
+static int __maybe_unused bnxt_alloc_tx_inline_buf(struct bnxt_tx_ring_info *txr,
+                                                  struct pci_dev *pdev,
+                                                  unsigned int size)
+{
+       txr->tx_inline_buf = kmalloc(size, GFP_KERNEL);
+       if (!txr->tx_inline_buf)
+               return -ENOMEM;
+
+       txr->tx_inline_dma = dma_map_single(&pdev->dev, txr->tx_inline_buf,
+                                           size, DMA_TO_DEVICE);
+       if (dma_mapping_error(&pdev->dev, txr->tx_inline_dma)) {
+               kfree(txr->tx_inline_buf);
+               txr->tx_inline_buf = NULL;
+               return -ENOMEM;
+       }
+       txr->tx_inline_size = size;
+
+       return 0;
+}
+
 static void bnxt_free_tx_rings(struct bnxt *bp)
 {
        int i;
@@ -3997,6 +4030,8 @@ static void bnxt_free_tx_rings(struct bnxt *bp)
                        txr->tx_push = NULL;
                }
 
+               bnxt_free_tx_inline_buf(txr, pdev);
+
                ring = &txr->tx_ring_struct;
 
                bnxt_free_ring(bp, &ring->ring_mem);
index 83b4136ccd31d42d9f4d695087556dc037bebf77..d98a58aa30f60398bc629d0b20c43200087a1418 100644 (file)
@@ -996,6 +996,12 @@ struct bnxt_tx_ring_info {
        dma_addr_t              tx_push_mapping;
        __le64                  data_mapping;
 
+       void                    *tx_inline_buf;
+       dma_addr_t              tx_inline_dma;
+       unsigned int            tx_inline_size;
+       u16                     tx_inline_prod;
+       u16                     tx_inline_cons;
+
 #define BNXT_DEV_STATE_CLOSING 0x1
        u32                     dev_state;