]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net/mlx5e: Remove unused VLAN insertion logic in TX path
authorCarolina Jubran <cjubran@nvidia.com>
Tue, 8 Jul 2025 21:16:23 +0000 (00:16 +0300)
committerJakub Kicinski <kuba@kernel.org>
Thu, 10 Jul 2025 02:47:43 +0000 (19:47 -0700)
The VLAN insertion capability (`wqe_vlan_insert`) was never enabled on
all mlx5 devices. When VLAN TX offload is advertised but this
capability is not supported, the driver uses inline headers to insert
the VLAN tag.

To support this, the driver used to set the
`MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE` bit to enforce L2 inline mode
when `wqe_vlan_insert` was not supported. Since the capability is
disabled on all devices, this logic was always active, and the SQ flag
has become redundant. L2 inline is enforced unconditionally for
VLAN-tagged packets.

The `skb_vlan_tag_present()` check in the else-if block of
`mlx5e_sq_xmit_wqe()` is never true by this point in the TX flow,
as the VLAN tag has already been inserted by the driver using inline
headers. As a result, this code is never executed.

Remove the redundant SQ state, dead VLAN insertion code block, and
related logic.

Signed-off-by: Carolina Jubran <cjubran@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/1752009387-13300-2-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

index 65a73913b9a24fe38d58bdb98ead5ce74058f34a..64e69e616b1f76f211e6d43641561fb3f93aae01 100644 (file)
@@ -383,7 +383,6 @@ enum {
        MLX5E_SQ_STATE_RECOVERING,
        MLX5E_SQ_STATE_IPSEC,
        MLX5E_SQ_STATE_DIM,
-       MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
        MLX5E_SQ_STATE_PENDING_XSK_TX,
        MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
        MLX5E_NUM_SQ_STATES, /* Must be kept last */
index 5d0014129a7e01d86b5980a1add3978a50fd6690..391b4e9c9dc49121cb2d1af1c03887b30f902c18 100644 (file)
@@ -340,8 +340,6 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
        sq->stats     = &c->priv->ptp_stats.sq[tc];
        sq->ptpsq     = ptpsq;
        INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
-       if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
-               set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
        sq->stop_room = param->stop_room;
        sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
 
index c3bda4612fa9c88d8c437e0488ab6ca2e782d35c..bd96988e102c33c8394ed62836ad70a992cce985 100644 (file)
@@ -13,7 +13,6 @@ static const char * const sq_sw_state_type_name[] = {
        [MLX5E_SQ_STATE_RECOVERING] = "recovering",
        [MLX5E_SQ_STATE_IPSEC] = "ipsec",
        [MLX5E_SQ_STATE_DIM] = "dim",
-       [MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
        [MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
        [MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
 };
index e8e5b347f9b2d2275d5cfd95ce7f4ed15f78af8a..fee323ade522b4df5643b686418287968ffa392d 100644 (file)
@@ -1677,8 +1677,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
        INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
-       if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
-               set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
        if (mlx5_ipsec_device_caps(c->priv->mdev))
                set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
        if (param->is_mpw)
index 55a8629f07923cb0d8ed0757282e7b1e3b891f37..e6a301ba32544926dc7a1f1e44155f018c3d5e18 100644 (file)
@@ -256,8 +256,7 @@ mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 
        mode = sq->min_inline_mode;
 
-       if (skb_vlan_tag_present(skb) &&
-           test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
+       if (skb_vlan_tag_present(skb))
                mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
 
        return mode;
@@ -483,12 +482,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                }
                eseg->inline_hdr.sz |= cpu_to_be16(ihs);
                dseg += wqe_attr->ds_cnt_inl;
-       } else if (skb_vlan_tag_present(skb)) {
-               eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
-               if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
-                       eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
-               eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
-               stats->added_vlan_packets++;
        }
 
        dseg += wqe_attr->ds_cnt_ids;