]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net/mlx5: E-switch, Fix error handling for enabling roce
authorChris Mi <cmi@nvidia.com>
Wed, 23 Apr 2025 08:36:11 +0000 (11:36 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 9 May 2025 07:43:57 +0000 (09:43 +0200)
[ Upstream commit 90538d23278a981e344d364e923162fce752afeb ]

The cited commit assumes enabling roce always succeeds. But it is
not true. Add error handling for it.

Fixes: 80f09dfc237f ("net/mlx5: Eswitch, enable RoCE loopback traffic")
Signed-off-by: Chris Mi <cmi@nvidia.com>
Reviewed-by: Roi Dayan <roid@nvidia.com>
Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
Link: https://patch.msgid.link/20250423083611.324567-6-mbloch@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/rdma.c
drivers/net/ethernet/mellanox/mlx5/core/rdma.h

index 7eba3a5bb97caefa69599727212e8b3bd9489561..326c72b3df86710cbbc482f195bd8bfa26d4c083 100644 (file)
@@ -3499,7 +3499,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
        int err;
 
        mutex_init(&esw->offloads.termtbl_mutex);
-       mlx5_rdma_enable_roce(esw->dev);
+       err = mlx5_rdma_enable_roce(esw->dev);
+       if (err)
+               goto err_roce;
 
        err = mlx5_esw_host_number_init(esw);
        if (err)
@@ -3560,6 +3562,7 @@ err_vport_metadata:
        esw_offloads_metadata_uninit(esw);
 err_metadata:
        mlx5_rdma_disable_roce(esw->dev);
+err_roce:
        mutex_destroy(&esw->offloads.termtbl_mutex);
        return err;
 }
index f585ef5a3424396ae9ce1cda1ed22497783d5cb8..5c552b71e371c5bdd42732b44382da828e6cb4e3 100644 (file)
@@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
        mlx5_nic_vport_disable_roce(dev);
 }
 
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
 {
        int err;
 
        if (!MLX5_CAP_GEN(dev, roce))
-               return;
+               return 0;
 
        err = mlx5_nic_vport_enable_roce(dev);
        if (err) {
                mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
-               return;
+               return err;
        }
 
        err = mlx5_rdma_add_roce_addr(dev);
@@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
                goto del_roce_addr;
        }
 
-       return;
+       return err;
 
 del_roce_addr:
        mlx5_rdma_del_roce_addr(dev);
 disable_roce:
        mlx5_nic_vport_disable_roce(dev);
+       return err;
 }
index 750cff2a71a4bb2addd9c7617b2b5354c2d573d2..3d9e76c3d42fb1f2b10aee8a3a19c097ff148e9a 100644 (file)
@@ -8,12 +8,12 @@
 
 #ifdef CONFIG_MLX5_ESWITCH
 
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
 void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
 
 #else /* CONFIG_MLX5_ESWITCH */
 
-static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
+static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
 static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
 
 #endif /* CONFIG_MLX5_ESWITCH */