]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net/mlx5: E-switch, Fix error handling for enabling roce
authorChris Mi <cmi@nvidia.com>
Wed, 23 Apr 2025 08:36:11 +0000 (11:36 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 9 May 2025 07:39:38 +0000 (09:39 +0200)
[ Upstream commit 90538d23278a981e344d364e923162fce752afeb ]

The cited commit assumes enabling roce always succeeds. But it is
not true. Add error handling for it.

Fixes: 80f09dfc237f ("net/mlx5: Eswitch, enable RoCE loopback traffic")
Signed-off-by: Chris Mi <cmi@nvidia.com>
Reviewed-by: Roi Dayan <roid@nvidia.com>
Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
Link: https://patch.msgid.link/20250423083611.324567-6-mbloch@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/rdma.c
drivers/net/ethernet/mellanox/mlx5/core/rdma.h

index 829f703233a9eaaecc3d6f95f9f8385d0827186d..766a05f557fba5ac13f24cd8e245a40bb31c5689 100644 (file)
@@ -3138,7 +3138,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
        int err;
 
        mutex_init(&esw->offloads.termtbl_mutex);
-       mlx5_rdma_enable_roce(esw->dev);
+       err = mlx5_rdma_enable_roce(esw->dev);
+       if (err)
+               goto err_roce;
 
        err = mlx5_esw_host_number_init(esw);
        if (err)
@@ -3198,6 +3200,7 @@ err_vport_metadata:
        esw_offloads_metadata_uninit(esw);
 err_metadata:
        mlx5_rdma_disable_roce(esw->dev);
+err_roce:
        mutex_destroy(&esw->offloads.termtbl_mutex);
        return err;
 }
index ab5afa6c5e0fd1050708342a135864aa736053bc..e61a4fa46d77223bf066caaf54cb4d876710f08d 100644 (file)
@@ -152,17 +152,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
        mlx5_nic_vport_disable_roce(dev);
 }
 
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
 {
        int err;
 
        if (!MLX5_CAP_GEN(dev, roce))
-               return;
+               return 0;
 
        err = mlx5_nic_vport_enable_roce(dev);
        if (err) {
                mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
-               return;
+               return err;
        }
 
        err = mlx5_rdma_add_roce_addr(dev);
@@ -177,10 +177,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
                goto del_roce_addr;
        }
 
-       return;
+       return err;
 
 del_roce_addr:
        mlx5_rdma_del_roce_addr(dev);
 disable_roce:
        mlx5_nic_vport_disable_roce(dev);
+       return err;
 }
index 750cff2a71a4bb2addd9c7617b2b5354c2d573d2..3d9e76c3d42fb1f2b10aee8a3a19c097ff148e9a 100644 (file)
@@ -8,12 +8,12 @@
 
 #ifdef CONFIG_MLX5_ESWITCH
 
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
 void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
 
 #else /* CONFIG_MLX5_ESWITCH */
 
-static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
+static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
 static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
 
 #endif /* CONFIG_MLX5_ESWITCH */