From: Mark Bloch Date: Wed, 19 Mar 2025 14:02:59 +0000 (+0200) Subject: net/mlx5: Lag, use port selection tables when available X-Git-Tag: v6.15-rc1~160^2~28^2~4 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=16ad8394bf310791d2ad171e266b639b6b265fc5;p=thirdparty%2Fkernel%2Flinux.git net/mlx5: Lag, use port selection tables when available As queue affinity is being deprecated and will no longer be supported in the future, Always check for the presence of the port selection namespace. When available, leverage it to distribute traffic across the physical ports via steering, ensuring compatibility with future NICs. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/1742392983-153050-2-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index ba41dd149f53c..7db5ca95d3228 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -583,8 +583,9 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, } } -static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, - unsigned long *flags) +static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev, + enum mlx5_lag_mode mode, + unsigned long *flags) { int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_core_dev *dev0; @@ -592,7 +593,12 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, if (first_idx < 0) return -EINVAL; + if (mode == MLX5_LAG_MODE_MPESW || + mode == MLX5_LAG_MODE_MULTIPATH) + return 0; + dev0 = ldev->pf[first_idx].dev; + if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) { if (ldev->ports > 2) return -EINVAL; @@ -607,32 +613,10 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, return 0; } -static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev, - struct lag_tracker *tracker, - enum mlx5_lag_mode mode, - unsigned long *flags) -{ - int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); - struct lag_func *dev0; - - if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW) - return; - - dev0 = &ldev->pf[first_idx]; - if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) && - tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) { - if (ldev->ports > 2) - ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS; - set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags); - } -} - static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, struct lag_tracker *tracker, bool shared_fdb, unsigned long *flags) { - bool roce_lag = mode == MLX5_LAG_MODE_ROCE; - *flags = 0; if (shared_fdb) { set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags); @@ -642,11 +626,7 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, if (mode == MLX5_LAG_MODE_MPESW) set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags); - if (roce_lag) - return mlx5_lag_set_port_sel_mode_roce(ldev, flags); - - mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags); - return 0; + return mlx5_lag_set_port_sel_mode(ldev, mode, flags); } char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)