Introduce mlx5_lag_get_dev_seq() which returns a device's sequence
number within the LAG: master is always 0, remaining devices numbered
sequentially. This provides a stable index for peer flow tracking and
vport ordering without depending on native_port_num.
Replace mlx5_get_dev_index() usage in en_tc.c (peer flow array
indexing) and ib_rep.c (vport index ordering) with the new API.
Signed-off-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20260309093435.1850724-7-tariqt@nvidia.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*/
+#include <linux/mlx5/lag.h>
#include <linux/mlx5/vport.h>
#include "ib_rep.h"
#include "srq.h"
/* Only 1 ib port is the representor for all uplinks */
peer_n_ports--;
- if (mlx5_get_dev_index(peer_dev) < mlx5_get_dev_index(dev))
+ if (mlx5_lag_get_dev_seq(peer_dev) <
+ mlx5_lag_get_dev_seq(dev))
vport_index += peer_n_ports;
}
}
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/lag.h>
#include <linux/mlx5/device.h>
#include <linux/rhashtable.h>
#include <linux/refcount.h>
mutex_unlock(&esw->offloads.peer_mutex);
list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
- if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
+ if (peer_index != mlx5_lag_get_dev_seq(peer_flow->priv->mdev))
continue;
list_del(&peer_flow->peer_flows);
devcom = flow->priv->mdev->priv.eswitch->devcom;
mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
- i = mlx5_get_dev_index(peer_esw->dev);
+ i = mlx5_lag_get_dev_seq(peer_esw->dev);
mlx5e_tc_del_fdb_peer_flow(flow, i);
}
}
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- int i = mlx5_get_dev_index(peer_esw->dev);
+ int i = mlx5_lag_get_dev_seq(peer_esw->dev);
struct mlx5e_rep_priv *peer_urpriv;
struct mlx5e_tc_flow *peer_flow;
struct mlx5_core_dev *in_mdev;
devcom = esw->devcom;
mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
- i = mlx5_get_dev_index(peer_esw->dev);
+ i = mlx5_lag_get_dev_seq(peer_esw->dev);
list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i])
mlx5e_tc_del_fdb_peers_flow(flow);
}
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
+#include <linux/mlx5/lag.h>
#include "lib/mlx5.h"
#include "lib/devcom.h"
#include "mlx5_core.h"
return -ENOENT;
}
+/* Reverse of mlx5_lag_get_dev_index_by_seq: given a device, return its
+ * sequence number in the LAG. Master is always 0, others numbered
+ * sequentially starting from 1.
+ */
+int mlx5_lag_get_dev_seq(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev = mlx5_lag_dev(dev);
+ int master_idx, i, num = 1;
+ struct lag_func *pf;
+
+ if (!ldev)
+ return -ENOENT;
+
+ master_idx = mlx5_lag_get_master_idx(ldev);
+ if (master_idx < 0)
+ return -ENOENT;
+
+ pf = mlx5_lag_pf(ldev, master_idx);
+ if (pf && pf->dev == dev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (i == master_idx)
+ continue;
+ pf = mlx5_lag_pf(ldev, i);
+ if (pf->dev == dev)
+ return num;
+ num++;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL(mlx5_lag_get_dev_seq);
+
/* Devcom events for LAG master marking */
#define LAG_DEVCOM_PAIR (0)
#define LAG_DEVCOM_UNPAIR (1)
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LAG_API_H__
+#define __MLX5_LAG_API_H__
+
+struct mlx5_core_dev;
+
+int mlx5_lag_get_dev_seq(struct mlx5_core_dev *dev);
+
+#endif /* __MLX5_LAG_API_H__ */