From: Dragos Tatulea Date: Mon, 23 Feb 2026 20:41:49 +0000 (+0200) Subject: net/mlx5e: Set page_pool order based on calculated page_shift X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3a145cf492a3a154afb288cd460adf6721614eab;p=thirdparty%2Flinux.git net/mlx5e: Set page_pool order based on calculated page_shift Instead of unconditionally setting the page_pool to 0, calculate it from page_shift for MPWQE case. Signed-off-by: Dragos Tatulea Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20260223204155.1783580-10-tariqt@nvidia.com Signed-off-by: Paolo Abeni --- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index aca88fed2ac71..6344dbb6335e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -857,6 +857,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, { void *rqc_wq = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); struct mlx5_core_dev *mdev = rq->mdev; + u32 pool_order = 0; u32 pool_size; int wq_sz; int err; @@ -905,6 +906,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, pool_size = rq->mpwqe.pages_per_wqe << mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo); + pool_order = rq->mpwqe.page_shift - PAGE_SHIFT; if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo) && params->xdp_prog) @@ -960,7 +962,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, /* Create a page_pool and register it with rxq */ struct page_pool_params pp_params = { 0 }; - pp_params.order = 0; + pp_params.order = pool_order; pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.pool_size = pool_size; pp_params.nid = node; @@ -968,7 +970,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, pp_params.napi = rq->cq.napi; pp_params.netdev = rq->netdev; pp_params.dma_dir = rq->buff.map_dir; - pp_params.max_len = PAGE_SIZE; + pp_params.max_len = BIT(PAGE_SHIFT + pool_order); pp_params.queue_idx = rq->ix; /* Shampo header data split allow for unreadable netmem */