]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net/mlx5e: SHAMPO: Reorganize mlx5_rq_shampo_alloc
authorSaeed Mahameed <saeedm@nvidia.com>
Mon, 16 Jun 2025 14:14:33 +0000 (17:14 +0300)
committerJakub Kicinski <kuba@kernel.org>
Wed, 18 Jun 2025 01:34:12 +0000 (18:34 -0700)
Drop redundant SHAMPO structure alloc/free functions.

Gather together function calls pertaining to header split info, pass
header per WQE (hd_per_wqe) as parameter to those function to avoid use
before initialization future mistakes.

Allocate HW GRO related info outside of the header related info scope.

Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Link: https://patch.msgid.link/20250616141441.1243044-5-mbloch@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

index 5b0d03b3efe8284f0594d9c622f240b0a803e8dc..211ea429ea89399639ead5c0f5d6baf5345eb268 100644 (file)
@@ -638,7 +638,6 @@ struct mlx5e_shampo_hd {
        struct mlx5e_frag_page *pages;
        u32 hd_per_wq;
        u16 hd_per_wqe;
-       u16 pages_per_wq;
        unsigned long *bitmap;
        u16 pi;
        u16 ci;
index ea822c69d137b24d8801ec57d28783da110c5c6e..3d11c9f87171f7e8bcb1e102b52d6786e4c57c32 100644 (file)
@@ -331,47 +331,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
        ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
 }
 
-static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
-{
-       rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
-                                        GFP_KERNEL, node);
-       if (!rq->mpwqe.shampo)
-               return -ENOMEM;
-       return 0;
-}
-
-static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
-{
-       kvfree(rq->mpwqe.shampo);
-}
-
-static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
-{
-       struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-
-       shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
-                                           node);
-       shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq,
-                                                sizeof(*shampo->pages)),
-                                    GFP_KERNEL, node);
-       if (!shampo->bitmap || !shampo->pages)
-               goto err_nomem;
-
-       return 0;
-
-err_nomem:
-       bitmap_free(shampo->bitmap);
-       kvfree(shampo->pages);
-
-       return -ENOMEM;
-}
-
-static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
-{
-       bitmap_free(rq->mpwqe.shampo->bitmap);
-       kvfree(rq->mpwqe.shampo->pages);
-}
-
 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
 {
        int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
@@ -584,19 +543,18 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
 }
 
 static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
-                                      struct mlx5e_rq *rq)
+                                      u16 hd_per_wq, u32 *umr_mkey)
 {
        u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
 
-       if (max_ksm_size < rq->mpwqe.shampo->hd_per_wq) {
+       if (max_ksm_size < hd_per_wq) {
                mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
-                             max_ksm_size, rq->mpwqe.shampo->hd_per_wq);
+                             max_ksm_size, hd_per_wq);
                return -EINVAL;
        }
-
-       return mlx5e_create_umr_ksm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
+       return mlx5e_create_umr_ksm_mkey(mdev, hd_per_wq,
                                         MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
-                                        &rq->mpwqe.shampo->mkey);
+                                        umr_mkey);
 }
 
 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
@@ -758,6 +716,35 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
                                  xdp_frag_size);
 }
 
+static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, u16 hd_per_wq,
+                                        int node)
+{
+       struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+       shampo->hd_per_wq = hd_per_wq;
+
+       shampo->bitmap = bitmap_zalloc_node(hd_per_wq, GFP_KERNEL, node);
+       shampo->pages = kvzalloc_node(array_size(hd_per_wq,
+                                                sizeof(*shampo->pages)),
+                                     GFP_KERNEL, node);
+       if (!shampo->bitmap || !shampo->pages)
+               goto err_nomem;
+
+       return 0;
+
+err_nomem:
+       kvfree(shampo->pages);
+       bitmap_free(shampo->bitmap);
+
+       return -ENOMEM;
+}
+
+static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
+{
+       kvfree(rq->mpwqe.shampo->pages);
+       bitmap_free(rq->mpwqe.shampo->bitmap);
+}
+
 static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
                                struct mlx5e_params *params,
                                struct mlx5e_rq_param *rqp,
@@ -765,42 +752,52 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
                                u32 *pool_size,
                                int node)
 {
+       void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+       u16 hd_per_wq;
+       int wq_size;
        int err;
 
        if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
                return 0;
-       err = mlx5e_rq_shampo_hd_alloc(rq, node);
-       if (err)
-               goto out;
-       rq->mpwqe.shampo->hd_per_wq =
-               mlx5e_shampo_hd_per_wq(mdev, params, rqp);
-       err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
+
+       rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
+                                        GFP_KERNEL, node);
+       if (!rq->mpwqe.shampo)
+               return -ENOMEM;
+
+       /* split headers data structures */
+       hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rqp);
+       err = mlx5e_rq_shampo_hd_info_alloc(rq, hd_per_wq, node);
        if (err)
-               goto err_shampo_hd;
-       err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
+               goto err_shampo_hd_info_alloc;
+
+       err = mlx5e_create_rq_hd_umr_mkey(mdev, hd_per_wq,
+                                         &rq->mpwqe.shampo->mkey);
        if (err)
-               goto err_shampo_info;
+               goto err_umr_mkey;
+
+       rq->mpwqe.shampo->key = cpu_to_be32(rq->mpwqe.shampo->mkey);
+       rq->mpwqe.shampo->hd_per_wqe =
+               mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
+       wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+       *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
+                    MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+
+       /* gro only data structures */
        rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
        if (!rq->hw_gro_data) {
                err = -ENOMEM;
                goto err_hw_gro_data;
        }
-       rq->mpwqe.shampo->key =
-               cpu_to_be32(rq->mpwqe.shampo->mkey);
-       rq->mpwqe.shampo->hd_per_wqe =
-               mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
-       rq->mpwqe.shampo->pages_per_wq =
-               rq->mpwqe.shampo->hd_per_wq / MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
-       *pool_size += rq->mpwqe.shampo->pages_per_wq;
+
        return 0;
 
 err_hw_gro_data:
-       mlx5e_rq_shampo_hd_info_free(rq);
-err_shampo_info:
        mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
-err_shampo_hd:
-       mlx5e_rq_shampo_hd_free(rq);
-out:
+err_umr_mkey:
+       mlx5e_rq_shampo_hd_info_free(rq);
+err_shampo_hd_info_alloc:
+       kvfree(rq->mpwqe.shampo);
        return err;
 }
 
@@ -812,7 +809,7 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
        kvfree(rq->hw_gro_data);
        mlx5e_rq_shampo_hd_info_free(rq);
        mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
-       mlx5e_rq_shampo_hd_free(rq);
+       kvfree(rq->mpwqe.shampo);
 }
 
 static int mlx5e_alloc_rq(struct mlx5e_params *params,