]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net/mlx5: XDP, Enable TX side XDP multi-buffer support
authorAlexei Lazar <alazar@nvidia.com>
Sun, 9 Feb 2025 10:17:16 +0000 (12:17 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 29 May 2025 09:13:29 +0000 (11:13 +0200)
[ Upstream commit 1a9304859b3a4119579524c293b902a8927180f3 ]

In XDP scenarios, fragmented packets can occur if the MTU is larger
than the page size, even when the packet size fits within the linear
part.
If XDP multi-buffer support is disabled, the fragmented part won't be
handled in the TX flow, leading to packet drops.

Since XDP multi-buffer support is always available, this commit removes
the conditional check for enabling it.
This ensures that XDP multi-buffer support is always enabled,
regardless of the `is_xdp_mb` parameter, and guarantees the handling of
fragmented packets in such scenarios.

Signed-off-by: Alexei Lazar <alazar@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20250209101716.112774-16-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.h
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

index 979fc56205e1fe7b473ad0849cf84f189d09fd4f..8f9ec48ecc06d375e9ecf64cc132616b9f0c6e00 100644 (file)
@@ -386,7 +386,6 @@ enum {
        MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
        MLX5E_SQ_STATE_PENDING_XSK_TX,
        MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
-       MLX5E_SQ_STATE_XDP_MULTIBUF,
        MLX5E_NUM_SQ_STATES, /* Must be kept last */
 };
 
index 31eb99f09c63c1f413948dadce646a063b624d58..8c4d710e856751ba6ef7e601e6eff4bee7387618 100644 (file)
@@ -1242,7 +1242,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
        mlx5e_build_sq_param_common(mdev, param);
        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
        param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
-       param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
        mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
 }
 
index 3f8986f9d86291e6744315ddc8d15d0568e008a8..bd5877acc5b1eb144e0372631547053474e19d72 100644 (file)
@@ -33,7 +33,6 @@ struct mlx5e_sq_param {
        struct mlx5_wq_param       wq;
        bool                       is_mpw;
        bool                       is_tls;
-       bool                       is_xdp_mb;
        u16                        stop_room;
 };
 
index c8adf309ecad045a59abb826602a7a61116f26b3..dbd9482359e1ecef54a93bc2bc4c0343b674ab1a 100644 (file)
@@ -16,7 +16,6 @@ static const char * const sq_sw_state_type_name[] = {
        [MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
        [MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
        [MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
-       [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
 };
 
 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
index 94b2916620873c1312d69dc4285e1b8c89f1cd65..7a6cc0f4002eaaf183eb4611a0d4316023cc486e 100644 (file)
@@ -546,6 +546,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
        bool inline_ok;
        bool linear;
        u16 pi;
+       int i;
 
        struct mlx5e_xdpsq_stats *stats = sq->stats;
 
@@ -612,41 +613,33 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
 
        cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
 
-       if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
-               int i;
-
-               memset(&cseg->trailer, 0, sizeof(cseg->trailer));
-               memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
-
-               eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+       memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+       memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
 
-               for (i = 0; i < num_frags; i++) {
-                       skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
-                       dma_addr_t addr;
+       eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
 
-                       addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
-                               page_pool_get_dma_addr(skb_frag_page(frag)) +
-                               skb_frag_off(frag);
+       for (i = 0; i < num_frags; i++) {
+               skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
+               dma_addr_t addr;
 
-                       dseg->addr = cpu_to_be64(addr);
-                       dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
-                       dseg->lkey = sq->mkey_be;
-                       dseg++;
-               }
+               addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+                       page_pool_get_dma_addr(skb_frag_page(frag)) +
+                       skb_frag_off(frag);
 
-               cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+               dseg->addr = cpu_to_be64(addr);
+               dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+               dseg->lkey = sq->mkey_be;
+               dseg++;
+       }
 
-               sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
-                       .num_wqebbs = num_wqebbs,
-                       .num_pkts = 1,
-               };
+       cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
 
-               sq->pc += num_wqebbs;
-       } else {
-               cseg->fm_ce_se = 0;
+       sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+               .num_wqebbs = num_wqebbs,
+               .num_pkts = 1,
+       };
 
-               sq->pc++;
-       }
+       sq->pc += num_wqebbs;
 
        xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
 
index 5c5168bdacb9002254429b9cba397660b552dadb..b0748e46b1ac41c657ebb33f951005fcee55a393 100644 (file)
@@ -2023,41 +2023,12 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
        csp.min_inline_mode = sq->min_inline_mode;
        set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
 
-       if (param->is_xdp_mb)
-               set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
-
        err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
        if (err)
                goto err_free_xdpsq;
 
        mlx5e_set_xmit_fp(sq, param->is_mpw);
 
-       if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
-               unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
-               unsigned int inline_hdr_sz = 0;
-               int i;
-
-               if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
-                       inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
-                       ds_cnt++;
-               }
-
-               /* Pre initialize fixed WQE fields */
-               for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
-                       struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(&sq->wq, i);
-                       struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
-                       struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
-
-                       sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
-                               .num_wqebbs = 1,
-                               .num_pkts   = 1,
-                       };
-
-                       cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
-                       eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
-               }
-       }
-
        return 0;
 
 err_free_xdpsq: