From: Dragos Tatulea Date: Mon, 23 Feb 2026 20:41:51 +0000 (+0200) Subject: net/mlx5e: RX, Make page frag bias more robust X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8611660778bf5db9f5f063c9bd58d41012801cb8;p=thirdparty%2Flinux.git net/mlx5e: RX, Make page frag bias more robust The formula uses the system page size but does not account for high order pages. One way to fix this would be to adapt the formula to take into account the pool order. This would require calculating it for every allocation or adding an additional rq struct member to hold the bias max. However, the above is not really needed as the driver doesn't check the bias value. It has other means to calculate the expected number of fragments based on context. This patch simply sets the value to the max possible value. A sanity check is added during queue init phase to avoid having really big pages from using more fragments than the type can fit. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20260223204155.1783580-12-tariqt@nvidia.com Signed-off-by: Paolo Abeni --- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 5181d6ab39ae8..c7ac6ebe82906 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -80,6 +80,7 @@ struct page_pool; #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define MLX5E_PAGECNT_BIAS_MAX U16_MAX #define MLX5E_RX_MAX_HEAD (256) #define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8) #define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2d3d89707246b..cf977273f7534 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -969,6 +969,12 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, /* Create a page_pool and register it with rxq */ struct page_pool_params pp_params = { 0 }; + if (WARN_ON(BIT(PAGE_SHIFT + pool_order) / 64 > + MLX5E_PAGECNT_BIAS_MAX)) { + err = -E2BIG; + goto err_free_by_rq_type; + } + pp_params.order = pool_order; pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.pool_size = pool_size; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index fc95ea00666b4..8fb57a4f36dd8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -272,8 +272,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem); } -#define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64) - static int mlx5e_page_alloc_fragmented(struct page_pool *pp, struct mlx5e_frag_page *frag_page) {