--- /dev/null
+From db825feefc6868896fed5e361787ba3bee2fd906 Mon Sep 17 00:00:00 2001
+From: Vladyslav Tarasiuk <vladyslavt@nvidia.com>
+Date: Sun, 9 May 2021 09:43:18 +0300
+Subject: net/mlx4: Fix EEPROM dump support
+
+From: Vladyslav Tarasiuk <vladyslavt@nvidia.com>
+
+commit db825feefc6868896fed5e361787ba3bee2fd906 upstream.
+
+Fix SFP and QSFP* EEPROM queries by setting i2c_address, offset and page
+number correctly. For SFP set the following params:
+- I2C address for offsets 0-255 is 0x50. For 256-511 - 0x51.
+- Page number is zero.
+- Offset is 0-255.
+
+At the same time, QSFP* parameters are different:
+- I2C address is always 0x50.
+- Page number is not limited to zero.
+- Offset is 0-255 for page zero and 128-255 for others.
+
+To set parameters accordingly to cable used, implement function to query
+module ID and implement respective helper functions to set parameters
+correctly.
+
+Fixes: 135dd9594f12 ("net/mlx4_en: ethtool, Remove unsupported SFP EEPROM high pages query")
+Signed-off-by: Vladyslav Tarasiuk <vladyslavt@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 4
+ drivers/net/ethernet/mellanox/mlx4/port.c | 107 +++++++++++++++++++++++-
+ 2 files changed, 104 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -2027,8 +2027,6 @@ static int mlx4_en_set_tunable(struct ne
+ return ret;
+ }
+
+-#define MLX4_EEPROM_PAGE_LEN 256
+-
+ static int mlx4_en_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+ {
+@@ -2063,7 +2061,7 @@ static int mlx4_en_get_module_info(struc
+ break;
+ case MLX4_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+- modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
++ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ default:
+ return -EINVAL;
+--- a/drivers/net/ethernet/mellanox/mlx4/port.c
++++ b/drivers/net/ethernet/mellanox/mlx4/port.c
+@@ -1973,6 +1973,7 @@ EXPORT_SYMBOL(mlx4_get_roce_gid_from_sla
+ #define I2C_ADDR_LOW 0x50
+ #define I2C_ADDR_HIGH 0x51
+ #define I2C_PAGE_SIZE 256
++#define I2C_HIGH_PAGE_SIZE 128
+
+ /* Module Info Data */
+ struct mlx4_cable_info {
+@@ -2026,6 +2027,88 @@ static inline const char *cable_info_mad
+ return "Unknown Error";
+ }
+
++static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id)
++{
++ struct mlx4_cmd_mailbox *inbox, *outbox;
++ struct mlx4_mad_ifc *inmad, *outmad;
++ struct mlx4_cable_info *cable_info;
++ int ret;
++
++ inbox = mlx4_alloc_cmd_mailbox(dev);
++ if (IS_ERR(inbox))
++ return PTR_ERR(inbox);
++
++ outbox = mlx4_alloc_cmd_mailbox(dev);
++ if (IS_ERR(outbox)) {
++ mlx4_free_cmd_mailbox(dev, inbox);
++ return PTR_ERR(outbox);
++ }
++
++ inmad = (struct mlx4_mad_ifc *)(inbox->buf);
++ outmad = (struct mlx4_mad_ifc *)(outbox->buf);
++
++ inmad->method = 0x1; /* Get */
++ inmad->class_version = 0x1;
++ inmad->mgmt_class = 0x1;
++ inmad->base_version = 0x1;
++ inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
++
++ cable_info = (struct mlx4_cable_info *)inmad->data;
++ cable_info->dev_mem_address = 0;
++ cable_info->page_num = 0;
++ cable_info->i2c_addr = I2C_ADDR_LOW;
++ cable_info->size = cpu_to_be16(1);
++
++ ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
++ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
++ MLX4_CMD_NATIVE);
++ if (ret)
++ goto out;
++
++ if (be16_to_cpu(outmad->status)) {
++ /* Mad returned with bad status */
++ ret = be16_to_cpu(outmad->status);
++ mlx4_warn(dev,
++ "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
++ 0xFF60, port, I2C_ADDR_LOW, 0, 1, ret,
++ cable_info_mad_err_str(ret));
++ ret = -ret;
++ goto out;
++ }
++ cable_info = (struct mlx4_cable_info *)outmad->data;
++ *module_id = cable_info->data[0];
++out:
++ mlx4_free_cmd_mailbox(dev, inbox);
++ mlx4_free_cmd_mailbox(dev, outbox);
++ return ret;
++}
++
++static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
++{
++ *i2c_addr = I2C_ADDR_LOW;
++ *page_num = 0;
++
++ if (*offset < I2C_PAGE_SIZE)
++ return;
++
++ *i2c_addr = I2C_ADDR_HIGH;
++ *offset -= I2C_PAGE_SIZE;
++}
++
++static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
++{
++ /* Offsets 0-255 belong to page 0.
++ * Offsets 256-639 belong to pages 01, 02, 03.
++ * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2
++ */
++ if (*offset < I2C_PAGE_SIZE)
++ *page_num = 0;
++ else
++ *page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE;
++ *i2c_addr = I2C_ADDR_LOW;
++ *offset -= *page_num * I2C_HIGH_PAGE_SIZE;
++}
++
+ /**
+ * mlx4_get_module_info - Read cable module eeprom data
+ * @dev: mlx4_dev.
+@@ -2045,12 +2128,30 @@ int mlx4_get_module_info(struct mlx4_dev
+ struct mlx4_cmd_mailbox *inbox, *outbox;
+ struct mlx4_mad_ifc *inmad, *outmad;
+ struct mlx4_cable_info *cable_info;
+- u16 i2c_addr;
++ u8 module_id, i2c_addr, page_num;
+ int ret;
+
+ if (size > MODULE_INFO_MAX_READ)
+ size = MODULE_INFO_MAX_READ;
+
++ ret = mlx4_get_module_id(dev, port, &module_id);
++ if (ret)
++ return ret;
++
++ switch (module_id) {
++ case MLX4_MODULE_ID_SFP:
++ mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
++ break;
++ case MLX4_MODULE_ID_QSFP:
++ case MLX4_MODULE_ID_QSFP_PLUS:
++ case MLX4_MODULE_ID_QSFP28:
++ mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
++ break;
++ default:
++ mlx4_err(dev, "Module ID not recognized: %#x\n", module_id);
++ return -EINVAL;
++ }
++
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox))
+ return PTR_ERR(inbox);
+@@ -2076,11 +2177,9 @@ int mlx4_get_module_info(struct mlx4_dev
+ */
+ size -= offset + size - I2C_PAGE_SIZE;
+
+- i2c_addr = I2C_ADDR_LOW;
+-
+ cable_info = (struct mlx4_cable_info *)inmad->data;
+ cable_info->dev_mem_address = cpu_to_be16(offset);
+- cable_info->page_num = 0;
++ cable_info->page_num = page_num;
+ cable_info->i2c_addr = i2c_addr;
+ cable_info->size = cpu_to_be16(size);
+
--- /dev/null
+From 442b3d7b671bcb779ebdad46edd08051eb8b28d9 Mon Sep 17 00:00:00 2001
+From: Jianbo Liu <jianbol@nvidia.com>
+Date: Fri, 30 Apr 2021 06:58:29 +0000
+Subject: net/mlx5: Set reformat action when needed for termination rules
+
+From: Jianbo Liu <jianbol@nvidia.com>
+
+commit 442b3d7b671bcb779ebdad46edd08051eb8b28d9 upstream.
+
+For remote mirroring, after the tunnel packets are received, they are
+decapsulated and sent to representor, then re-encapsulated and sent
+out over another tunnel. So reformat action is set only when the
+destination is required to do encapsulation.
+
+Fixes: 249ccc3c95bd ("net/mlx5e: Add support for offloading traffic from uplink to uplink")
+Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
+Reviewed-by: Ariel Levkovich <lariel@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c | 31 +++-------
+ 1 file changed, 10 insertions(+), 21 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+@@ -171,19 +171,6 @@ mlx5_eswitch_termtbl_put(struct mlx5_esw
+ }
+ }
+
+-static bool mlx5_eswitch_termtbl_is_encap_reformat(struct mlx5_pkt_reformat *rt)
+-{
+- switch (rt->reformat_type) {
+- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+- case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+- case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+- case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+- return true;
+- default:
+- return false;
+- }
+-}
+-
+ static void
+ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
+ struct mlx5_flow_act *dst)
+@@ -201,14 +188,6 @@ mlx5_eswitch_termtbl_actions_move(struct
+ memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
+ }
+ }
+-
+- if (src->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
+- mlx5_eswitch_termtbl_is_encap_reformat(src->pkt_reformat)) {
+- src->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+- dst->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+- dst->pkt_reformat = src->pkt_reformat;
+- src->pkt_reformat = NULL;
+- }
+ }
+
+ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
+@@ -278,6 +257,14 @@ mlx5_eswitch_add_termtbl_rule(struct mlx
+ if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ continue;
+
++ if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
++ term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
++ term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
++ } else {
++ term_tbl_act.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
++ term_tbl_act.pkt_reformat = NULL;
++ }
++
+ /* get the terminating table for the action list */
+ tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
+ &dest[i], attr);
+@@ -299,6 +286,8 @@ mlx5_eswitch_add_termtbl_rule(struct mlx
+ goto revert_changes;
+
+ /* create the FTE */
++ flow_act->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
++ flow_act->pkt_reformat = NULL;
+ rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
+ if (IS_ERR(rule))
+ goto revert_changes;
--- /dev/null
+From 6ff51ab8aa8fcbcddeeefce8ca705b575805d12b Mon Sep 17 00:00:00 2001
+From: Ariel Levkovich <lariel@nvidia.com>
+Date: Wed, 31 Mar 2021 10:09:02 +0300
+Subject: net/mlx5: Set term table as an unmanaged flow table
+
+From: Ariel Levkovich <lariel@nvidia.com>
+
+commit 6ff51ab8aa8fcbcddeeefce8ca705b575805d12b upstream.
+
+Termination tables are restricted to have the default miss action and
+cannot be set to forward to another table in case of a miss.
+If the fs prio of the termination table is not the last one in the
+list, fs_core will attempt to attach it to another table.
+
+Set the unmanaged ft flag when creating the termination table ft
+and select the tc offload prio for it to prevent fs_core from selecting
+the forwarding to next ft miss action and use the default one.
+
+In addition, set the flow that forwards to the termination table to
+ignore ft level restrictions since the ft level is not set by fs_core
+for unamanged fts.
+
+Fixes: 249ccc3c95bd ("net/mlx5e: Add support for offloading traffic from uplink to uplink")
+Signed-off-by: Ariel Levkovich <lariel@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+@@ -76,10 +76,11 @@ mlx5_eswitch_termtbl_create(struct mlx5_
+ /* As this is the terminating action then the termination table is the
+ * same prio as the slow path
+ */
+- ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION |
++ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED |
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+- ft_attr.prio = FDB_SLOW_PATH;
++ ft_attr.prio = FDB_TC_OFFLOAD;
+ ft_attr.max_fte = 1;
++ ft_attr.level = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(tt->termtbl)) {
+@@ -216,6 +217,7 @@ mlx5_eswitch_termtbl_required(struct mlx
+ int i;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
++ !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
+ attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
+ !mlx5_eswitch_offload_is_uplink_port(esw, spec))
+ return false;
+@@ -288,6 +290,7 @@ mlx5_eswitch_add_termtbl_rule(struct mlx
+ /* create the FTE */
+ flow_act->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = NULL;
++ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
+ if (IS_ERR(rule))
+ goto revert_changes;
--- /dev/null
+From 5e7923acbd86d0ff29269688d8a9c47ad091dd46 Mon Sep 17 00:00:00 2001
+From: Aya Levin <ayal@nvidia.com>
+Date: Wed, 21 Apr 2021 14:26:31 +0300
+Subject: net/mlx5e: Fix error path of updating netdev queues
+
+From: Aya Levin <ayal@nvidia.com>
+
+commit 5e7923acbd86d0ff29269688d8a9c47ad091dd46 upstream.
+
+Avoid division by zero in the error flow. In the driver TC number can be
+either 1 or 8. When TC count is set to 1, driver zero netdev->num_tc.
+Hence, need to convert it back from 0 to 1 in the error flow.
+
+Fixes: fa3748775b92 ("net/mlx5e: Handle errors from netif_set_real_num_{tx,rx}_queues")
+Signed-off-by: Aya Levin <ayal@nvidia.com>
+Reviewed-by: Maxim Mikityanskiy <maximmi@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3027,7 +3027,7 @@ static int mlx5e_update_netdev_queues(st
+ int err;
+
+ old_num_txqs = netdev->real_num_tx_queues;
+- old_ntc = netdev->num_tc;
++ old_ntc = netdev->num_tc ? : 1;
+
+ nch = priv->channels.params.num_channels;
+ ntc = priv->channels.params.num_tc;
--- /dev/null
+From 97817fcc684ed01497bd19d0cd4dea699665b9cf Mon Sep 17 00:00:00 2001
+From: Dima Chumak <dchumak@nvidia.com>
+Date: Tue, 13 Apr 2021 22:43:08 +0300
+Subject: net/mlx5e: Fix multipath lag activation
+
+From: Dima Chumak <dchumak@nvidia.com>
+
+commit 97817fcc684ed01497bd19d0cd4dea699665b9cf upstream.
+
+When handling FIB_EVENT_ENTRY_REPLACE event for a new multipath route,
+lag activation can be missed if a stale (struct lag_mp)->mfi pointer
+exists, which was associated with an older multipath route that had been
+removed.
+
+Normally, when a route is removed, it triggers mlx5_lag_fib_event(),
+which handles FIB_EVENT_ENTRY_DEL and clears mfi pointer. But, if
+mlx5_lag_check_prereq() condition isn't met, for example when eswitch is
+in legacy mode, the fib event is skipped and mfi pointer becomes stale.
+
+Fix by resetting mfi pointer to NULL every time mlx5_lag_mp_init() is
+called.
+
+Fixes: 544fe7c2e654 ("net/mlx5e: Activate HW multipath and handle port affinity based on FIB events")
+Signed-off-by: Dima Chumak <dchumak@nvidia.com>
+Reviewed-by: Roi Dayan <roid@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+@@ -307,6 +307,11 @@ int mlx5_lag_mp_init(struct mlx5_lag *ld
+ struct lag_mp *mp = &ldev->lag_mp;
+ int err;
+
++ /* always clear mfi, as it might become stale when a route delete event
++ * has been missed
++ */
++ mp->mfi = NULL;
++
+ if (mp->fib_nb.notifier_call)
+ return 0;
+
+@@ -335,4 +340,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag
+ unregister_fib_notifier(&init_net, &mp->fib_nb);
+ destroy_workqueue(mp->wq);
+ mp->fib_nb.notifier_call = NULL;
++ mp->mfi = NULL;
+ }
--- /dev/null
+From 83026d83186bc48bb41ee4872f339b83f31dfc55 Mon Sep 17 00:00:00 2001
+From: Roi Dayan <roid@nvidia.com>
+Date: Mon, 3 May 2021 18:01:02 +0300
+Subject: net/mlx5e: Fix null deref accessing lag dev
+
+From: Roi Dayan <roid@nvidia.com>
+
+commit 83026d83186bc48bb41ee4872f339b83f31dfc55 upstream.
+
+It could be the lag dev is null so stop processing the event.
+In bond_enslave() the active/backup slave being set before setting the
+upper dev so first event is without an upper dev.
+After setting the upper dev with bond_master_upper_dev_link() there is
+a second event and in that event we have an upper dev.
+
+Fixes: 7e51891a237f ("net/mlx5e: Use netdev events to set/del egress acl forward-to-vport rule")
+Signed-off-by: Roi Dayan <roid@nvidia.com>
+Reviewed-by: Maor Dickman <maord@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+@@ -223,6 +223,8 @@ static void mlx5e_rep_changelowerstate_e
+ rpriv = priv->ppriv;
+ fwd_vport_num = rpriv->rep->vport;
+ lag_dev = netdev_master_upper_dev_get(netdev);
++ if (!lag_dev)
++ return;
+
+ netdev_dbg(netdev, "lag_dev(%s)'s slave vport(%d) is txable(%d)\n",
+ lag_dev->name, fwd_vport_num, net_lag_port_dev_txable(netdev));
--- /dev/null
+From dca59f4a791960ec73fa15803faa0abe0f92ece2 Mon Sep 17 00:00:00 2001
+From: Dima Chumak <dchumak@nvidia.com>
+Date: Mon, 26 Apr 2021 15:16:26 +0300
+Subject: net/mlx5e: Fix nullptr in add_vlan_push_action()
+
+From: Dima Chumak <dchumak@nvidia.com>
+
+commit dca59f4a791960ec73fa15803faa0abe0f92ece2 upstream.
+
+The result of dev_get_by_index_rcu() is not checked for NULL and then
+gets dereferenced immediately.
+
+Also, the RCU lock must be held by the caller of dev_get_by_index_rcu(),
+which isn't satisfied by the call stack.
+
+Fix by handling nullptr return value when iflink device is not found.
+Add RCU locking around dev_get_by_index_rcu() to avoid possible adverse
+effects while iterating over the net_device's hlist.
+
+It is safe not to increment reference count of the net_device pointer in
+case of a successful lookup, because it's already handled by VLAN code
+during VLAN device registration (see register_vlan_dev and
+netdev_upper_dev_link).
+
+Fixes: 278748a95aa3 ("net/mlx5e: Offload TC e-switch rules with egress VLAN device")
+Addresses-Coverity: ("Dereference null return value")
+Signed-off-by: Dima Chumak <dchumak@nvidia.com>
+Reviewed-by: Vlad Buslov <vladbu@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -3430,8 +3430,12 @@ static int add_vlan_push_action(struct m
+ if (err)
+ return err;
+
+- *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
+- dev_get_iflink(vlan_dev));
++ rcu_read_lock();
++ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
++ rcu_read_unlock();
++ if (!*out_dev)
++ return -ENODEV;
++
+ if (is_vlan_dev(*out_dev))
+ err = add_vlan_push_action(priv, attr, out_dev, action);
+
--- /dev/null
+From fe7738eb3ca3631a75844e790f6cb576c0fe7b00 Mon Sep 17 00:00:00 2001
+From: Dima Chumak <dchumak@nvidia.com>
+Date: Mon, 26 Apr 2021 15:16:26 +0300
+Subject: net/mlx5e: Fix nullptr in mlx5e_tc_add_fdb_flow()
+
+From: Dima Chumak <dchumak@nvidia.com>
+
+commit fe7738eb3ca3631a75844e790f6cb576c0fe7b00 upstream.
+
+The result of __dev_get_by_index() is not checked for NULL, which then
+passed to mlx5e_attach_encap() and gets dereferenced.
+
+Also, in case of a successful lookup, the net_device reference count is
+not incremented, which may result in net_device pointer becoming invalid
+at any time during mlx5e_attach_encap() execution.
+
+Fix by using dev_get_by_index(), which does proper reference counting on
+the net_device pointer. Also, handle nullptr return value when mirred
+device is not found.
+
+It's safe to call dev_put() on the mirred net_device pointer, right
+after mlx5e_attach_encap() call, because it's not being saved/copied
+down the call chain.
+
+Fixes: 3c37745ec614 ("net/mlx5e: Properly deal with encap flows add/del under neigh update")
+Addresses-Coverity: ("Dereference null return value")
+Signed-off-by: Dima Chumak <dchumak@nvidia.com>
+Reviewed-by: Vlad Buslov <vladbu@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1276,10 +1276,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv
+ struct netlink_ext_ack *extack)
+ {
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+- struct net_device *out_dev, *encap_dev = NULL;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ bool vf_tun = false, encap_valid = true;
++ struct net_device *encap_dev = NULL;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_fc *counter = NULL;
+ struct mlx5e_rep_priv *rpriv;
+@@ -1325,16 +1325,22 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv
+ esw_attr = attr->esw_attr;
+
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
++ struct net_device *out_dev;
+ int mirred_ifindex;
+
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ mirred_ifindex = parse_attr->mirred_ifindex[out_index];
+- out_dev = __dev_get_by_index(dev_net(priv->netdev),
+- mirred_ifindex);
++ out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
++ if (!out_dev) {
++ NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
++ err = -ENODEV;
++ goto err_out;
++ }
+ err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
+ extack, &encap_dev, &encap_valid);
++ dev_put(out_dev);
+ if (err)
+ goto err_out;
+
--- /dev/null
+From 77ecd10d0a8aaa6e4871d8c63626e4c9fc5e47db Mon Sep 17 00:00:00 2001
+From: Saeed Mahameed <saeedm@nvidia.com>
+Date: Thu, 25 Feb 2021 11:20:00 -0800
+Subject: net/mlx5e: reset XPS on error flow if netdev isn't registered yet
+
+From: Saeed Mahameed <saeedm@nvidia.com>
+
+commit 77ecd10d0a8aaa6e4871d8c63626e4c9fc5e47db upstream.
+
+mlx5e_attach_netdev can be called prior to registering the netdevice:
+Example stack:
+
+ipoib_new_child_link ->
+ipoib_intf_init->
+rdma_init_netdev->
+mlx5_rdma_setup_rn->
+
+mlx5e_attach_netdev->
+mlx5e_num_channels_changed ->
+mlx5e_set_default_xps_cpumasks ->
+netif_set_xps_queue ->
+__netif_set_xps_queue -> kmalloc
+
+If any later stage fails at any point after mlx5e_num_channels_changed()
+returns, XPS allocated maps will never be freed as they
+are only freed during netdev unregistration, which will never happen for
+yet to be registered netdevs.
+
+Fixes: 3909a12e7913 ("net/mlx5e: Fix configuration of XPS cpumasks and netdev queues in corner cases")
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Aya Levin <ayal@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5604,6 +5604,11 @@ static void mlx5e_update_features(struct
+ rtnl_unlock();
+ }
+
++static void mlx5e_reset_channels(struct net_device *netdev)
++{
++ netdev_reset_tc(netdev);
++}
++
+ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
+ {
+ const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
+@@ -5658,6 +5663,7 @@ err_cleanup_tx:
+ profile->cleanup_tx(priv);
+
+ out:
++ mlx5e_reset_channels(priv->netdev);
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ cancel_work_sync(&priv->update_stats_work);
+ return err;
+@@ -5675,6 +5681,7 @@ void mlx5e_detach_netdev(struct mlx5e_pr
+
+ profile->cleanup_rx(priv);
+ profile->cleanup_tx(priv);
++ mlx5e_reset_channels(priv->netdev);
+ cancel_work_sync(&priv->update_stats_work);
+ }
+
--- /dev/null
+From 3410fbcd47dc6479af4309febf760ccaa5efb472 Mon Sep 17 00:00:00 2001
+From: Maor Gottlieb <maorg@nvidia.com>
+Date: Wed, 12 May 2021 13:52:27 +0300
+Subject: {net, RDMA}/mlx5: Fix override of log_max_qp by other device
+
+From: Maor Gottlieb <maorg@nvidia.com>
+
+commit 3410fbcd47dc6479af4309febf760ccaa5efb472 upstream.
+
+mlx5_core_dev holds pointer to static profile, hence when the
+log_max_qp of the profile is override by some device, then it
+effect all other mlx5 devices that share the same profile.
+Fix it by having a profile instance for every mlx5 device.
+
+Fixes: 883371c453b9 ("net/mlx5: Check FW limitations on log_max_qp before setting it")
+Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/mlx5/mr.c | 4 +-
+ drivers/net/ethernet/mellanox/mlx5/core/main.c | 11 ++----
+ include/linux/mlx5/driver.h | 44 ++++++++++++-------------
+ 3 files changed, 29 insertions(+), 30 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -764,10 +764,10 @@ int mlx5_mr_cache_init(struct mlx5_ib_de
+ ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
+ MLX5_IB_UMR_OCTOWORD;
+ ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
+- if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
++ if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
+ !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
+ mlx5_ib_can_load_pas_with_umr(dev, 0))
+- ent->limit = dev->mdev->profile->mr_cache[i].limit;
++ ent->limit = dev->mdev->profile.mr_cache[i].limit;
+ else
+ ent->limit = 0;
+ spin_lock_irq(&ent->lock);
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -503,7 +503,7 @@ static int handle_hca_cap_odp(struct mlx
+
+ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
+ {
+- struct mlx5_profile *prof = dev->profile;
++ struct mlx5_profile *prof = &dev->profile;
+ void *set_hca_cap;
+ int err;
+
+@@ -524,11 +524,11 @@ static int handle_hca_cap(struct mlx5_co
+ to_fw_pkey_sz(dev, 128));
+
+ /* Check log_max_qp from HCA caps to set in current profile */
+- if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
++ if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
+ mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
+- profile[prof_sel].log_max_qp,
++ prof->log_max_qp,
+ MLX5_CAP_GEN_MAX(dev, log_max_qp));
+- profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
++ prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ }
+ if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
+@@ -1335,8 +1335,7 @@ int mlx5_mdev_init(struct mlx5_core_dev
+ struct mlx5_priv *priv = &dev->priv;
+ int err;
+
+- dev->profile = &profile[profile_idx];
+-
++ memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
+ INIT_LIST_HEAD(&priv->ctx_list);
+ spin_lock_init(&priv->ctx_lock);
+ mutex_init(&dev->intf_state_mutex);
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -698,6 +698,27 @@ struct mlx5_hv_vhca;
+ #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
+ #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+
++enum {
++ MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
++ MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
++};
++
++enum {
++ MR_CACHE_LAST_STD_ENTRY = 20,
++ MLX5_IMR_MTT_CACHE_ENTRY,
++ MLX5_IMR_KSM_CACHE_ENTRY,
++ MAX_MR_CACHE_ENTRIES
++};
++
++struct mlx5_profile {
++ u64 mask;
++ u8 log_max_qp;
++ struct {
++ int size;
++ int limit;
++ } mr_cache[MAX_MR_CACHE_ENTRIES];
++};
++
+ struct mlx5_core_dev {
+ struct device *device;
+ enum mlx5_coredev_type coredev_type;
+@@ -726,7 +747,7 @@ struct mlx5_core_dev {
+ struct mutex intf_state_mutex;
+ unsigned long intf_state;
+ struct mlx5_priv priv;
+- struct mlx5_profile *profile;
++ struct mlx5_profile profile;
+ u32 issi;
+ struct mlx5e_resources mlx5e_res;
+ struct mlx5_dm *dm;
+@@ -1073,18 +1094,6 @@ static inline u8 mlx5_mkey_variant(u32 m
+ return mkey & 0xff;
+ }
+
+-enum {
+- MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
+- MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
+-};
+-
+-enum {
+- MR_CACHE_LAST_STD_ENTRY = 20,
+- MLX5_IMR_MTT_CACHE_ENTRY,
+- MLX5_IMR_KSM_CACHE_ENTRY,
+- MAX_MR_CACHE_ENTRIES
+-};
+-
+ /* Async-atomic event notifier used by mlx5 core to forward FW
+ * evetns recived from event queue to mlx5 consumers.
+ * Optimise event queue dipatching.
+@@ -1138,15 +1147,6 @@ int mlx5_rdma_rn_get_params(struct mlx5_
+ struct ib_device *device,
+ struct rdma_netdev_alloc_params *params);
+
+-struct mlx5_profile {
+- u64 mask;
+- u8 log_max_qp;
+- struct {
+- int size;
+- int limit;
+- } mr_cache[MAX_MR_CACHE_ENTRIES];
+-};
+-
+ enum {
+ MLX5_PCI_DEV_IS_VF = 1 << 0,
+ };
--- /dev/null
+From 7c9f131f366ab414691907fa0407124ea2b2f3bc Mon Sep 17 00:00:00 2001
+From: Eli Cohen <elic@nvidia.com>
+Date: Thu, 22 Apr 2021 15:48:10 +0300
+Subject: {net,vdpa}/mlx5: Configure interface MAC into mpfs L2 table
+
+From: Eli Cohen <elic@nvidia.com>
+
+commit 7c9f131f366ab414691907fa0407124ea2b2f3bc upstream.
+
+net/mlx5: Expose MPFS configuration API
+
+MPFS is the multi physical function switch that bridges traffic between
+the physical port and any physical functions associated with it. The
+driver is required to add or remove MAC entries to properly forward
+incoming traffic to the correct physical function.
+
+We export the API to control MPFS so that other drivers, such as
+mlx5_vdpa are able to add MAC addresses of their network interfaces.
+
+The MAC address of the vdpa interface must be configured into the MPFS L2
+address. Failing to do so could cause, in some NIC configurations, failure
+to forward packets to the vdpa network device instance.
+
+Fix this by adding calls to update the MPFS table.
+
+CC: <mst@redhat.com>
+CC: <jasowang@redhat.com>
+CC: <virtualization@lists.linux-foundation.org>
+Fixes: 1a86b377aa21 ("vdpa/mlx5: Add VDPA driver for supported mlx5 devices")
+Signed-off-by: Eli Cohen <elic@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 1 +
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 1 +
+ drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c | 3 +++
+ drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h | 5 +----
+ drivers/vdpa/mlx5/net/mlx5_vnet.c | 19 ++++++++++++++++++-
+ include/linux/mlx5/mpfs.h | 18 ++++++++++++++++++
+ 6 files changed, 42 insertions(+), 5 deletions(-)
+ create mode 100644 include/linux/mlx5/mpfs.h
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+@@ -35,6 +35,7 @@
+ #include <linux/ipv6.h>
+ #include <linux/tcp.h>
+ #include <linux/mlx5/fs.h>
++#include <linux/mlx5/mpfs.h>
+ #include "en.h"
+ #include "lib/mpfs.h"
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -35,6 +35,7 @@
+ #include <linux/mlx5/mlx5_ifc.h>
+ #include <linux/mlx5/vport.h>
+ #include <linux/mlx5/fs.h>
++#include <linux/mlx5/mpfs.h>
+ #include "esw/acl/lgcy.h"
+ #include "mlx5_core.h"
+ #include "lib/eq.h"
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+@@ -33,6 +33,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/mlx5/driver.h>
+ #include <linux/mlx5/mlx5_ifc.h>
++#include <linux/mlx5/mpfs.h>
+ #include <linux/mlx5/eswitch.h>
+ #include "mlx5_core.h"
+ #include "lib/mpfs.h"
+@@ -175,6 +176,7 @@ out:
+ mutex_unlock(&mpfs->lock);
+ return err;
+ }
++EXPORT_SYMBOL(mlx5_mpfs_add_mac);
+
+ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
+ {
+@@ -206,3 +208,4 @@ unlock:
+ mutex_unlock(&mpfs->lock);
+ return err;
+ }
++EXPORT_SYMBOL(mlx5_mpfs_del_mac);
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
+@@ -84,12 +84,9 @@ struct l2addr_node {
+ #ifdef CONFIG_MLX5_MPFS
+ int mlx5_mpfs_init(struct mlx5_core_dev *dev);
+ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev);
+-int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
+-int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
+ #else /* #ifndef CONFIG_MLX5_MPFS */
+ static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; }
+ static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {}
+-static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+-static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+ #endif
++
+ #endif
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -15,6 +15,7 @@
+ #include <linux/mlx5/vport.h>
+ #include <linux/mlx5/fs.h>
+ #include <linux/mlx5/mlx5_ifc_vdpa.h>
++#include <linux/mlx5/mpfs.h>
+ #include "mlx5_vdpa.h"
+
+ MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
+@@ -1854,11 +1855,16 @@ static int mlx5_vdpa_set_map(struct vdpa
+ static void mlx5_vdpa_free(struct vdpa_device *vdev)
+ {
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
++ struct mlx5_core_dev *pfmdev;
+ struct mlx5_vdpa_net *ndev;
+
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+
+ free_resources(ndev);
++ if (!is_zero_ether_addr(ndev->config.mac)) {
++ pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
++ mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
++ }
+ mlx5_vdpa_free_resources(&ndev->mvdev);
+ mutex_destroy(&ndev->reslock);
+ }
+@@ -1980,6 +1986,7 @@ static int mlx5v_probe(struct auxiliary_
+ struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = madev->mdev;
+ struct virtio_net_config *config;
++ struct mlx5_core_dev *pfmdev;
+ struct mlx5_vdpa_dev *mvdev;
+ struct mlx5_vdpa_net *ndev;
+ u32 max_vqs;
+@@ -2008,10 +2015,17 @@ static int mlx5v_probe(struct auxiliary_
+ if (err)
+ goto err_mtu;
+
++ if (!is_zero_ether_addr(config->mac)) {
++ pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
++ err = mlx5_mpfs_add_mac(pfmdev, config->mac);
++ if (err)
++ goto err_mtu;
++ }
++
+ mvdev->vdev.dma_dev = mdev->device;
+ err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
+ if (err)
+- goto err_mtu;
++ goto err_mpfs;
+
+ err = alloc_resources(ndev);
+ if (err)
+@@ -2028,6 +2042,9 @@ err_reg:
+ free_resources(ndev);
+ err_res:
+ mlx5_vdpa_free_resources(&ndev->mvdev);
++err_mpfs:
++ if (!is_zero_ether_addr(config->mac))
++ mlx5_mpfs_del_mac(pfmdev, config->mac);
+ err_mtu:
+ mutex_destroy(&ndev->reslock);
+ put_device(&mvdev->vdev.dev);
+--- /dev/null
++++ b/include/linux/mlx5/mpfs.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
++ * Copyright (c) 2021 Mellanox Technologies Ltd.
++ */
++
++#ifndef _MLX5_MPFS_
++#define _MLX5_MPFS_
++
++struct mlx5_core_dev;
++
++#ifdef CONFIG_MLX5_MPFS
++int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
++int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
++#else /* #ifndef CONFIG_MLX5_MPFS */
++static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
++static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
++#endif
++
++#endif
nfs-don-t-corrupt-the-value-of-pg_bytes_written-in-nfs_do_recoalesce.patch
nfsv4-fix-v4.0-v4.1-seek_data-return-enotsupp-when-set-nfs_v4_2-config.patch
drm-meson-fix-shutdown-crash-when-component-not-probed.patch
+net-mlx5e-reset-xps-on-error-flow-if-netdev-isn-t-registered-yet.patch
+net-mlx5e-fix-multipath-lag-activation.patch
+net-mlx5e-fix-error-path-of-updating-netdev-queues.patch
+net-vdpa-mlx5-configure-interface-mac-into-mpfs-l2-table.patch
+net-mlx5e-fix-nullptr-in-mlx5e_tc_add_fdb_flow.patch
+net-mlx5e-fix-nullptr-in-add_vlan_push_action.patch
+net-mlx5-set-reformat-action-when-needed-for-termination-rules.patch
+net-mlx5e-fix-null-deref-accessing-lag-dev.patch
+net-mlx4-fix-eeprom-dump-support.patch
+net-rdma-mlx5-fix-override-of-log_max_qp-by-other-device.patch
+net-mlx5-set-term-table-as-an-unmanaged-flow-table.patch