]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net/mlx5e: XDP, Use a single linear page per rq
authorDragos Tatulea <dtatulea@nvidia.com>
Fri, 3 Apr 2026 09:09:26 +0000 (12:09 +0300)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 7 Apr 2026 11:34:04 +0000 (13:34 +0200)
Currently in striding rq there is one mlx5e_frag_page member per WQE for
the linear page. This linear page is used only in XDP multi-buffer mode.
This is wasteful because only one linear page is needed per rq: the page
gets refreshed on every packet, regardless of WQE. Furthermore, it is
not needed in other modes (non-XDP, XDP single-buffer).

This change moves the linear page into its own structure (struct
mlx5_mpw_linear_info) and allocates it only when necessary.

A special structure is created because an upcoming patch will extend
this structure to support fragmentation of the linear page.

This patch has no functional changes.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20260403090927.139042-5-tariqt@nvidia.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index c7ac6ebe829069db7c09a4249586813c309bbd21..592234780f2b800902c0337ef35b14fa82a52844 100644 (file)
@@ -591,10 +591,13 @@ union mlx5e_alloc_units {
 struct mlx5e_mpw_info {
        u16 consumed_strides;
        DECLARE_BITMAP(skip_release_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE);
-       struct mlx5e_frag_page linear_page;
        union mlx5e_alloc_units alloc_units;
 };
 
+struct mlx5e_mpw_linear_info {
+       struct mlx5e_frag_page frag_page;
+};
+
 #define MLX5E_MAX_RX_FRAGS 4
 
 struct mlx5e_rq;
@@ -689,6 +692,7 @@ struct mlx5e_rq {
                        u8                     umr_wqebbs;
                        u8                     mtts_per_wqe;
                        u8                     umr_mode;
+                       struct mlx5e_mpw_linear_info *linear_info;
                        struct mlx5e_shampo_hd *shampo;
                } mpwqe;
        };
index 1238e53560122e96caeaf70728727a4824fb1fc0..aa8359a48b12c35c08620c3ba228342e0d86f1b6 100644 (file)
@@ -369,6 +369,29 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
        return 0;
 }
 
+static int mlx5e_rq_alloc_mpwqe_linear_info(struct mlx5e_rq *rq, int node,
+                                           struct mlx5e_params *params,
+                                           struct mlx5e_rq_opt_param *rqo,
+                                           u32 *pool_size)
+{
+       struct mlx5_core_dev *mdev = rq->mdev;
+       struct mlx5e_mpw_linear_info *li;
+
+       if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo) ||
+           !params->xdp_prog)
+               return 0;
+
+       li = kvzalloc_node(sizeof(*li), GFP_KERNEL, node);
+       if (!li)
+               return -ENOMEM;
+
+       rq->mpwqe.linear_info = li;
+
+       /* additional page per packet for the linear part */
+       *pool_size *= 2;
+
+       return 0;
+}
 
 static u8 mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode)
 {
@@ -915,10 +938,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
                        mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo);
                pool_order = rq->mpwqe.page_shift - PAGE_SHIFT;
 
-               if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo) &&
-                   params->xdp_prog)
-                       pool_size *= 2; /* additional page per packet for the linear part */
-
                rq->mpwqe.log_stride_sz =
                                mlx5e_mpwqe_get_log_stride_size(mdev, params,
                                                                rqo);
@@ -936,10 +955,15 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
                if (err)
                        goto err_rq_mkey;
 
-               err = mlx5_rq_shampo_alloc(mdev, params, rq_param, rq, node);
+               err = mlx5e_rq_alloc_mpwqe_linear_info(rq, node, params, rqo,
+                                                      &pool_size);
                if (err)
                        goto err_free_mpwqe_info;
 
+               err = mlx5_rq_shampo_alloc(mdev, params, rq_param, rq, node);
+               if (err)
+                       goto err_free_mpwqe_linear_info;
+
                break;
        default: /* MLX5_WQ_TYPE_CYCLIC */
                err = mlx5_wq_cyc_create(mdev, &rq_param->wq, rqc_wq,
@@ -1054,6 +1078,8 @@ err_free_by_rq_type:
        switch (rq->wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
                mlx5e_rq_free_shampo(rq);
+err_free_mpwqe_linear_info:
+               kvfree(rq->mpwqe.linear_info);
 err_free_mpwqe_info:
                kvfree(rq->mpwqe.info);
 err_rq_mkey:
@@ -1081,6 +1107,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
        switch (rq->wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
                mlx5e_rq_free_shampo(rq);
+               kvfree(rq->mpwqe.linear_info);
                kvfree(rq->mpwqe.info);
                mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
                mlx5e_free_mpwqe_rq_drop_page(rq);
index f5c0e2a0ada9acc9bb9525a395013c14ed4a35fb..feb042d84b8e2aee81c95d110c863472002d4d27 100644 (file)
@@ -1869,6 +1869,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
        struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
        u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
        struct mlx5e_frag_page *head_page = frag_page;
+       struct mlx5e_frag_page *linear_page = NULL;
        struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
        u32 page_size = BIT(rq->mpwqe.page_shift);
        u32 frag_offset    = head_offset;
@@ -1897,13 +1898,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
        if (prog) {
                /* area for bpf_xdp_[store|load]_bytes */
                net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
+
+               linear_page = &rq->mpwqe.linear_info->frag_page;
                if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
-                                                        &wi->linear_page))) {
+                                                        linear_page))) {
                        rq->stats->buff_alloc_err++;
                        return NULL;
                }
 
-               va = netmem_address(wi->linear_page.netmem);
+               va = netmem_address(linear_page->netmem);
                net_prefetchw(va); /* xdp_frame data area */
                linear_hr = XDP_PACKET_HEADROOM;
                linear_data_len = 0;
@@ -1966,10 +1969,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
                                for (pfp = head_page; pfp < frag_page; pfp++)
                                        pfp->frags++;
 
-                               wi->linear_page.frags++;
+                               linear_page->frags++;
                        }
                        mlx5e_page_release_fragmented(rq->page_pool,
-                                                     &wi->linear_page);
+                                                     linear_page);
                        return NULL; /* page/packet was consumed by XDP */
                }
 
@@ -1988,13 +1991,13 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
                        mxbuf->xdp.data - mxbuf->xdp.data_meta);
                if (unlikely(!skb)) {
                        mlx5e_page_release_fragmented(rq->page_pool,
-                                                     &wi->linear_page);
+                                                     linear_page);
                        return NULL;
                }
 
                skb_mark_for_recycle(skb);
-               wi->linear_page.frags++;
-               mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
+               linear_page->frags++;
+               mlx5e_page_release_fragmented(rq->page_pool, linear_page);
 
                if (xdp_buff_has_frags(&mxbuf->xdp)) {
                        struct mlx5e_frag_page *pagep;