]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net/mlx5: Lag, use port selection tables when available
authorMark Bloch <mbloch@nvidia.com>
Wed, 19 Mar 2025 14:02:59 +0000 (16:02 +0200)
committerJakub Kicinski <kuba@kernel.org>
Tue, 25 Mar 2025 14:29:47 +0000 (07:29 -0700)
As queue affinity is being deprecated and will no longer be supported
in the future, Always check for the presence of the port selection
namespace. When available, leverage it to distribute traffic
across the physical ports via steering, ensuring compatibility with
future NICs.

Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/1742392983-153050-2-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c

index ba41dd149f53cd0adddce4daa4e0f895580c8052..7db5ca95d32284756970c6a2678e4fbab3396418 100644 (file)
@@ -583,8 +583,9 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
        }
 }
 
-static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
-                                          unsigned long *flags)
+static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
+                                     enum mlx5_lag_mode mode,
+                                     unsigned long *flags)
 {
        int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
        struct mlx5_core_dev *dev0;
@@ -592,7 +593,12 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
        if (first_idx < 0)
                return -EINVAL;
 
+       if (mode == MLX5_LAG_MODE_MPESW ||
+           mode == MLX5_LAG_MODE_MULTIPATH)
+               return 0;
+
        dev0 = ldev->pf[first_idx].dev;
+
        if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
                if (ldev->ports > 2)
                        return -EINVAL;
@@ -607,32 +613,10 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
        return 0;
 }
 
-static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
-                                               struct lag_tracker *tracker,
-                                               enum mlx5_lag_mode mode,
-                                               unsigned long *flags)
-{
-       int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
-       struct lag_func *dev0;
-
-       if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW)
-               return;
-
-       dev0 = &ldev->pf[first_idx];
-       if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
-           tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
-               if (ldev->ports > 2)
-                       ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
-               set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
-       }
-}
-
 static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
                              struct lag_tracker *tracker, bool shared_fdb,
                              unsigned long *flags)
 {
-       bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
-
        *flags = 0;
        if (shared_fdb) {
                set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
@@ -642,11 +626,7 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
        if (mode == MLX5_LAG_MODE_MPESW)
                set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
 
-       if (roce_lag)
-               return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
-
-       mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
-       return 0;
+       return mlx5_lag_set_port_sel_mode(ldev, mode, flags);
 }
 
 char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)