]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net/mlx5e: Extract max_xsk_wqebbs into its own function
authorDragos Tatulea <dtatulea@nvidia.com>
Mon, 23 Feb 2026 20:41:43 +0000 (22:41 +0200)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 26 Feb 2026 09:54:23 +0000 (10:54 +0100)
Calculating max_xsk_wqebbs seems large enough to deserve its own
function. It will make upcoming changes easier.

This patch has no functional changes.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20260223204155.1783580-4-tariqt@nvidia.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/mellanox/mlx5/core/en/params.c

index 07d75a85ee7fcbc3ce5e124e55de23a3ad776f44..be1aa37531deb64c76ec551253d835174d431dcc 100644 (file)
@@ -1116,18 +1116,15 @@ static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
        return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
 }
 
-static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
-                                     struct mlx5e_params *params,
-                                     struct mlx5e_rq_param *rq_param)
+static u32 mlx5e_max_xsk_wqebbs(struct mlx5_core_dev *mdev,
+                               struct mlx5e_params *params)
 {
-       u32 wqebbs, total_pages, useful_space;
-
-       /* MLX5_WQ_TYPE_CYCLIC */
-       if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
-               return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+       struct mlx5e_xsk_param xsk = {0};
+       u32 max_xsk_wqebbs = 0;
+       u8 frame_shift;
 
-       /* UMR WQEs for the regular RQ. */
-       wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL);
+       if (!params->xdp_prog)
+               return 0;
 
        /* If XDP program is attached, XSK may be turned on at any time without
         * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
@@ -1139,41 +1136,54 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
         * from capabilities. Hence, we have to try all valid values of XSK
         * frame size (and page_shift) to find the maximum.
         */
-       if (params->xdp_prog) {
-               u32 max_xsk_wqebbs = 0;
-               u8 frame_shift;
-
-               for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT;
-                    frame_shift <= PAGE_SHIFT; frame_shift++) {
-                       /* The headroom doesn't affect the calculation. */
-                       struct mlx5e_xsk_param xsk = {
-                               .chunk_size = 1 << frame_shift,
-                               .unaligned = false,
-                       };
-
-                       /* XSK aligned mode. */
-                       max_xsk_wqebbs = max(max_xsk_wqebbs,
-                               mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
-
-                       /* XSK unaligned mode, frame size is a power of two. */
-                       xsk.unaligned = true;
-                       max_xsk_wqebbs = max(max_xsk_wqebbs,
-                               mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
-
-                       /* XSK unaligned mode, frame size is not equal to stride size. */
-                       xsk.chunk_size -= 1;
-                       max_xsk_wqebbs = max(max_xsk_wqebbs,
-                               mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
-
-                       /* XSK unaligned mode, frame size is a triple power of two. */
-                       xsk.chunk_size = (1 << frame_shift) / 4 * 3;
-                       max_xsk_wqebbs = max(max_xsk_wqebbs,
-                               mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
-               }
+       for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT;
+            frame_shift <= PAGE_SHIFT; frame_shift++) {
+               u32 total_wqebbs;
 
-               wqebbs += max_xsk_wqebbs;
+               /* The headroom doesn't affect the calculations below. */
+
+               /* XSK aligned mode. */
+               xsk.chunk_size = 1 << frame_shift;
+               xsk.unaligned = false;
+               total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+               max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
+
+               /* XSK unaligned mode, frame size is a power of two. */
+               xsk.unaligned = true;
+               total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+               max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
+
+               /* XSK unaligned mode, frame size is not equal to stride
+                * size.
+                */
+               xsk.chunk_size -= 1;
+               total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+               max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
+
+               /* XSK unaligned mode, frame size is a triple power of two. */
+               xsk.chunk_size = (1 << frame_shift) / 4 * 3;
+               total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+               max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
        }
 
+       return max_xsk_wqebbs;
+}
+
+static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
+                                     struct mlx5e_params *params,
+                                     struct mlx5e_rq_param *rq_param)
+{
+       u32 wqebbs, total_pages, useful_space;
+
+       /* MLX5_WQ_TYPE_CYCLIC */
+       if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+               return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+
+       /* UMR WQEs for the regular RQ. */
+       wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL);
+
+       wqebbs += mlx5e_max_xsk_wqebbs(mdev, params);
+
        /* UMR WQEs don't cross the page boundary, they are padded with NOPs.
         * This padding is always smaller than the max WQE size. That gives us
         * at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes