]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net/mlx5e: Don't cache tunnel offloads capability
authorParav Pandit <parav@nvidia.com>
Fri, 12 Mar 2021 13:21:29 +0000 (07:21 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 22 Mar 2023 12:37:50 +0000 (13:37 +0100)
[ Upstream commit 9a92fe1db9e57ea94388a1d768e8ee42af858377 ]

When mlx5e attaches again after device health recovery, the device
capabilities might have changed by the eswitch manager.

For example in one flow when ECPF changes the eswitch mode between
legacy and switchdev, it updates the flow table tunnel capability.

The cached value is only used in one place, so just check the capability
there instead.

Fixes: 5bef709d76a2 ("net/mlx5: Enable host PF HCA after eswitch is initialized")
Signed-off-by: Parav Pandit <parav@nvidia.com>
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c

index 2d77fb8a8a015903f5c6c1e5361cc21e52d770bc..ae73c9af8f2511fb840ec4670f34fab7aac01658 100644 (file)
@@ -313,7 +313,6 @@ struct mlx5e_params {
                } channel;
        } mqprio;
        bool rx_cqe_compress_def;
-       bool tunneled_offload_en;
        struct dim_cq_moder rx_cq_moderation;
        struct dim_cq_moder tx_cq_moderation;
        struct mlx5e_packet_merge_param packet_merge;
index 6c24f33a5ea5c9292d24fe1f1888fa02cbbc0c18..d6bcbc17151d788fc4f7e96ba63e896cc0a74184 100644 (file)
@@ -4923,8 +4923,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
        /* TX inline */
        mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
 
-       params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev);
-
        /* AF_XDP */
        params->xsk = xsk;
 
@@ -5223,7 +5221,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
        }
 
        features = MLX5E_RX_RES_FEATURE_PTP;
-       if (priv->channels.params.tunneled_offload_en)
+       if (mlx5_tunnel_inner_ft_supported(mdev))
                features |= MLX5E_RX_RES_FEATURE_INNER_FT;
        err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
                                priv->max_nch, priv->drop_rq.rqn,
index 7d90e5b728548d947889071948be3540571f0da2..301a734b7c6a70cd8d21105bf3d4b76278449a29 100644 (file)
@@ -752,7 +752,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
        mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
 
        params->mqprio.num_tc       = 1;
-       params->tunneled_offload_en = false;
        if (rep->vport != MLX5_VPORT_UPLINK)
                params->vlan_strip_disable = true;
 
index 911cf4d23964548692bb3d3c6b080b3a59ac7776..4285b31fee6c4d73354e02de46c7f7a5622233db 100644 (file)
@@ -70,7 +70,6 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
 
        params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
        params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
-       params->tunneled_offload_en = false;
 
        /* CQE compression is not supported for IPoIB */
        params->rx_cqe_compress_def = false;