From: Joe Damato Date: Wed, 8 Apr 2026 23:05:53 +0000 (-0700) Subject: net: bnxt: Use dma_unmap_len for TX completion unmapping X-Git-Tag: v7.1-rc1~173^2~30^2~6 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=3cb430e62c83823b76fc7dc1aec8dc7bbf81a729;p=thirdparty%2Fkernel%2Flinux.git net: bnxt: Use dma_unmap_len for TX completion unmapping Store the DMA mapping length in each TX buffer descriptor via dma_unmap_len_set at submit time, and use dma_unmap_len at completion time. This is a no-op for normal packets but prepares for software USO, where header BDs set dma_unmap_len to 0 because the header buffer is unmapped collectively rather than per-segment. Suggested-by: Jakub Kicinski Reviewed-by: Pavan Chebbi Signed-off-by: Joe Damato Link: https://patch.msgid.link/20260408230607.2019402-5-joe@dama.to Signed-off-by: Jakub Kicinski --- diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d1f0969b781c3..bc2dac2f137d3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -656,6 +656,7 @@ normal_tx: goto tx_free; dma_unmap_addr_set(tx_buf, mapping, mapping); + dma_unmap_len_set(tx_buf, len, len); flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | TX_BD_CNT(last_frag + 2); @@ -720,6 +721,7 @@ normal_tx: tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf, mapping, mapping); + dma_unmap_len_set(tx_buf, len, len); txbd->tx_bd_haddr = cpu_to_le64(mapping); @@ -809,7 +811,8 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, u16 hw_cons = txr->tx_hw_cons; unsigned int tx_bytes = 0; u16 cons = txr->tx_cons; - skb_frag_t *frag; + unsigned int dma_len; + dma_addr_t dma_addr; int tx_pkts = 0; bool rc = false; @@ -844,19 +847,27 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, goto next_tx_int; } - dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), DMA_TO_DEVICE); + if (dma_unmap_len(tx_buf, len)) { + dma_addr = dma_unmap_addr(tx_buf, mapping); + dma_len = dma_unmap_len(tx_buf, len); + + dma_unmap_single(&pdev->dev, dma_addr, dma_len, + DMA_TO_DEVICE); + } + last = tx_buf->nr_frags; for (j = 0; j < last; j++) { - frag = &skb_shinfo(skb)->frags[j]; cons = NEXT_TX(cons); tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; - netmem_dma_unmap_page_attrs(&pdev->dev, - dma_unmap_addr(tx_buf, - mapping), - skb_frag_size(frag), - DMA_TO_DEVICE, 0); + if (dma_unmap_len(tx_buf, len)) { + dma_addr = dma_unmap_addr(tx_buf, mapping); + dma_len = dma_unmap_len(tx_buf, len); + + netmem_dma_unmap_page_attrs(&pdev->dev, + dma_addr, dma_len, + DMA_TO_DEVICE, 0); + } } if (unlikely(is_ts_pkt)) { if (BNXT_CHIP_P5(bp)) { @@ -3394,6 +3405,8 @@ static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp, { int i, max_idx; struct pci_dev *pdev = bp->pdev; + unsigned int dma_len; + dma_addr_t dma_addr; max_idx = bp->tx_nr_pages * TX_DESC_CNT; @@ -3404,9 +3417,10 @@ static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp, if (idx < bp->tx_nr_rings_xdp && tx_buf->action == XDP_REDIRECT) { - dma_unmap_single(&pdev->dev, - dma_unmap_addr(tx_buf, mapping), - dma_unmap_len(tx_buf, len), + dma_addr = dma_unmap_addr(tx_buf, mapping); + dma_len = dma_unmap_len(tx_buf, len); + + dma_unmap_single(&pdev->dev, dma_addr, dma_len, DMA_TO_DEVICE); xdp_return_frame(tx_buf->xdpf); tx_buf->action = 0; @@ -3429,23 +3443,28 @@ static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp, continue; } - dma_unmap_single(&pdev->dev, - dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), - DMA_TO_DEVICE); + if (dma_unmap_len(tx_buf, len)) { + dma_addr = dma_unmap_addr(tx_buf, mapping); + dma_len = dma_unmap_len(tx_buf, len); + + dma_unmap_single(&pdev->dev, dma_addr, dma_len, + DMA_TO_DEVICE); + } last = tx_buf->nr_frags; i += 2; for (j = 0; j < last; j++, i++) { int ring_idx = i & bp->tx_ring_mask; - skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; tx_buf = &txr->tx_buf_ring[ring_idx]; - netmem_dma_unmap_page_attrs(&pdev->dev, - dma_unmap_addr(tx_buf, - mapping), - skb_frag_size(frag), - DMA_TO_DEVICE, 0); + if (dma_unmap_len(tx_buf, len)) { + dma_addr = dma_unmap_addr(tx_buf, mapping); + dma_len = dma_unmap_len(tx_buf, len); + + netmem_dma_unmap_page_attrs(&pdev->dev, + dma_addr, dma_len, + DMA_TO_DEVICE, 0); + } } dev_kfree_skb(skb); }