]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
eth: bnxt: take page size into account for page pool recycling rings
authorJakub Kicinski <kuba@kernel.org>
Thu, 26 Jun 2025 16:54:41 +0000 (09:54 -0700)
committerJakub Kicinski <kuba@kernel.org>
Fri, 27 Jun 2025 22:38:41 +0000 (15:38 -0700)
The Rx rings are filled with Rx buffers. Which are supposed to fit
packet headers (or MTU if HW-GRO is disabled). The aggregation buffers
are filled with "device pages". Adjust the sizes of the page pool
recycling ring appropriately, based on ratio of the size of the
buffer on given ring vs system page size. Otherwise on a system
with 64kB pages we end up with >700MB of memory sitting in every
single page pool cache.

Correct the size calculation for the head_pool. Since the buffers
there are always small I'm pretty sure I meant to cap the size
at 1k, rather than make it the lowest possible size. With 64k pages
1k cache with a 1k ring is 64x larger than we need.

Reviewed-by: Michael Chan <michael.chan@broadcom.com>
Link: https://patch.msgid.link/20250626165441.4125047-1-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/broadcom/bnxt/bnxt.c

index f391e63aa79df174a406468ac73adf4773abf654..f621a5bab1ead424221b9dfa3a6f3a2bd533be86 100644 (file)
@@ -3810,12 +3810,14 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
                                   struct bnxt_rx_ring_info *rxr,
                                   int numa_node)
 {
+       const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
+       const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
        struct page_pool_params pp = { 0 };
        struct page_pool *pool;
 
-       pp.pool_size = bp->rx_agg_ring_size;
+       pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
        if (BNXT_RX_PAGE_MODE(bp))
-               pp.pool_size += bp->rx_ring_size;
+               pp.pool_size += bp->rx_ring_size / rx_size_fac;
        pp.nid = numa_node;
        pp.napi = &rxr->bnapi->napi;
        pp.netdev = bp->dev;
@@ -3833,7 +3835,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
 
        rxr->need_head_pool = page_pool_is_unreadable(pool);
        if (bnxt_separate_head_pool(rxr)) {
-               pp.pool_size = max(bp->rx_ring_size, 1024);
+               pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
                pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
                pool = page_pool_create(&pp);
                if (IS_ERR(pool))