]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bnxt_en: handle tpa_info in queue API implementation
authorDavid Wei <dw@davidwei.uk>
Wed, 4 Dec 2024 04:10:22 +0000 (20:10 -0800)
committerJakub Kicinski <kuba@kernel.org>
Thu, 5 Dec 2024 03:23:35 +0000 (19:23 -0800)
Commit 7ed816be35ab ("eth: bnxt: use page pool for head frags") added a
page pool for header frags, which may be distinct from the existing pool
for the aggregation ring. Prior to this change, frags used in the TPA
ring rx_tpa were allocated from system memory e.g. napi_alloc_frag()
meaning their lifetimes were not associated with a page pool. They can
be returned at any time and so the queue API did not alloc or free
rx_tpa.

But now frags come from a separate head_pool which may be different to
page_pool. Without allocating and freeing rx_tpa, frags allocated from
the old head_pool may be returned to a different new head_pool which
causes a mismatch between the pp hold/release count.

Fix this problem by properly freeing and allocating rx_tpa in the queue
API implementation.

Signed-off-by: David Wei <dw@davidwei.uk>
Reviewed-by: Michael Chan <michael.chan@broadcom.com>
Link: https://patch.msgid.link/20241204041022.56512-4-dw@davidwei.uk
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/broadcom/bnxt/bnxt.c

index 8031ff31f83779afbf6e23dd7a6c96c0df59a72e..6b963086c1d3b512352cb946f6642e0285821524 100644 (file)
@@ -3710,7 +3710,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
                        xdp_rxq_info_unreg(&rxr->xdp_rxq);
 
                page_pool_destroy(rxr->page_pool);
-               if (rxr->page_pool != rxr->head_pool)
+               if (bnxt_separate_head_pool())
                        page_pool_destroy(rxr->head_pool);
                rxr->page_pool = rxr->head_pool = NULL;
 
@@ -15388,15 +15388,25 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
                        goto err_free_rx_agg_ring;
        }
 
+       if (bp->flags & BNXT_FLAG_TPA) {
+               rc = bnxt_alloc_one_tpa_info(bp, clone);
+               if (rc)
+                       goto err_free_tpa_info;
+       }
+
        bnxt_init_one_rx_ring_rxbd(bp, clone);
        bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
 
        bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
        if (bp->flags & BNXT_FLAG_AGG_RINGS)
                bnxt_alloc_one_rx_ring_page(bp, clone, idx);
+       if (bp->flags & BNXT_FLAG_TPA)
+               bnxt_alloc_one_tpa_info_data(bp, clone);
 
        return 0;
 
+err_free_tpa_info:
+       bnxt_free_one_tpa_info(bp, clone);
 err_free_rx_agg_ring:
        bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
 err_free_rx_ring:
@@ -15404,9 +15414,11 @@ err_free_rx_ring:
 err_rxq_info_unreg:
        xdp_rxq_info_unreg(&clone->xdp_rxq);
 err_page_pool_destroy:
-       clone->page_pool->p.napi = NULL;
        page_pool_destroy(clone->page_pool);
+       if (bnxt_separate_head_pool())
+               page_pool_destroy(clone->head_pool);
        clone->page_pool = NULL;
+       clone->head_pool = NULL;
        return rc;
 }
 
@@ -15416,13 +15428,15 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
        struct bnxt *bp = netdev_priv(dev);
        struct bnxt_ring_struct *ring;
 
-       bnxt_free_one_rx_ring(bp, rxr);
-       bnxt_free_one_rx_agg_ring(bp, rxr);
+       bnxt_free_one_rx_ring_skbs(bp, rxr);
 
        xdp_rxq_info_unreg(&rxr->xdp_rxq);
 
        page_pool_destroy(rxr->page_pool);
+       if (bnxt_separate_head_pool())
+               page_pool_destroy(rxr->head_pool);
        rxr->page_pool = NULL;
+       rxr->head_pool = NULL;
 
        ring = &rxr->rx_ring_struct;
        bnxt_free_ring(bp, &ring->ring_mem);
@@ -15504,7 +15518,10 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
        rxr->rx_agg_prod = clone->rx_agg_prod;
        rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
        rxr->rx_next_cons = clone->rx_next_cons;
+       rxr->rx_tpa = clone->rx_tpa;
+       rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
        rxr->page_pool = clone->page_pool;
+       rxr->head_pool = clone->head_pool;
        rxr->xdp_rxq = clone->xdp_rxq;
 
        bnxt_copy_rx_ring(bp, rxr, clone);
@@ -15563,6 +15580,8 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
        bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
        rxr->rx_next_cons = 0;
        page_pool_disable_direct_recycling(rxr->page_pool);
+       if (bnxt_separate_head_pool())
+               page_pool_disable_direct_recycling(rxr->head_pool);
 
        memcpy(qmem, rxr, sizeof(*rxr));
        bnxt_init_rx_ring_struct(bp, qmem);