]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: mana: Switch to page pool for jumbo frames
authorHaiyang Zhang <haiyangz@microsoft.com>
Tue, 25 Mar 2025 16:32:37 +0000 (09:32 -0700)
committerJakub Kicinski <kuba@kernel.org>
Fri, 28 Mar 2025 11:43:18 +0000 (04:43 -0700)
Frag allocators, such as netdev_alloc_frag(), were not designed to
work for fragsz > PAGE_SIZE.

So, switch to page pool for jumbo frames instead of using page frag
allocators. This driver is using page pool for smaller MTUs already.

Cc: stable@vger.kernel.org
Fixes: 80f6215b450e ("net: mana: Add support for jumbo frame")
Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Reviewed-by: Long Li <longli@microsoft.com>
Reviewed-by: Shradha Gupta <shradhagupta@linux.microsoft.com>
Link: https://patch.msgid.link/1742920357-27263-1-git-send-email-haiyangz@microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/microsoft/mana/mana_en.c

index e190d5ee51544090a703c14c76abdc23797f864a..3ac73d97337e3a110097fb86d33fd76f41abe773 100644 (file)
@@ -661,30 +661,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
        mpc->rxbpre_total = 0;
 
        for (i = 0; i < num_rxb; i++) {
-               if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
-                       va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
-                       if (!va)
-                               goto error;
-
-                       page = virt_to_head_page(va);
-                       /* Check if the frag falls back to single page */
-                       if (compound_order(page) <
-                           get_order(mpc->rxbpre_alloc_size)) {
-                               put_page(page);
-                               goto error;
-                       }
-               } else {
-                       page = dev_alloc_page();
-                       if (!page)
-                               goto error;
+               page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
+               if (!page)
+                       goto error;
 
-                       va = page_to_virt(page);
-               }
+               va = page_to_virt(page);
 
                da = dma_map_single(dev, va + mpc->rxbpre_headroom,
                                    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
                if (dma_mapping_error(dev, da)) {
-                       put_page(virt_to_head_page(va));
+                       put_page(page);
                        goto error;
                }
 
@@ -1676,7 +1662,7 @@ drop:
 }
 
 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
-                            dma_addr_t *da, bool *from_pool, bool is_napi)
+                            dma_addr_t *da, bool *from_pool)
 {
        struct page *page;
        void *va;
@@ -1687,21 +1673,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
        if (rxq->xdp_save_va) {
                va = rxq->xdp_save_va;
                rxq->xdp_save_va = NULL;
-       } else if (rxq->alloc_size > PAGE_SIZE) {
-               if (is_napi)
-                       va = napi_alloc_frag(rxq->alloc_size);
-               else
-                       va = netdev_alloc_frag(rxq->alloc_size);
-
-               if (!va)
-                       return NULL;
-
-               page = virt_to_head_page(va);
-               /* Check if the frag falls back to single page */
-               if (compound_order(page) < get_order(rxq->alloc_size)) {
-                       put_page(page);
-                       return NULL;
-               }
        } else {
                page = page_pool_dev_alloc_pages(rxq->page_pool);
                if (!page)
@@ -1734,7 +1705,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
        dma_addr_t da;
        void *va;
 
-       va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
+       va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
        if (!va)
                return;
 
@@ -2176,7 +2147,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
        if (mpc->rxbufs_pre)
                va = mana_get_rxbuf_pre(rxq, &da);
        else
-               va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
+               va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
 
        if (!va)
                return -ENOMEM;
@@ -2262,6 +2233,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
        pprm.nid = gc->numa_node;
        pprm.napi = &rxq->rx_cq.napi;
        pprm.netdev = rxq->ndev;
+       pprm.order = get_order(rxq->alloc_size);
 
        rxq->page_pool = page_pool_create(&pprm);