]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net/mlx5: XDP, Enable TX side XDP multi-buffer support
authorAlexei Lazar <alazar@nvidia.com>
Sun, 9 Feb 2025 10:17:16 +0000 (12:17 +0200)
committerJakub Kicinski <kuba@kernel.org>
Wed, 12 Feb 2025 18:46:18 +0000 (10:46 -0800)
In XDP scenarios, fragmented packets can occur if the MTU is larger
than the page size, even when the packet size fits within the linear
part.
If XDP multi-buffer support is disabled, the fragmented part won't be
handled in the TX flow, leading to packet drops.

Since XDP multi-buffer support is always available, this commit removes
the conditional check for enabling it.
This ensures that XDP multi-buffer support is always enabled,
regardless of the `is_xdp_mb` parameter, and guarantees the handling of
fragmented packets in such scenarios.

Signed-off-by: Alexei Lazar <alazar@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20250209101716.112774-16-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.h
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

index 534fdd27c8de061cea65ecb24d4013b0e2c556d9..769e683f2488368ab95835486c3daf41929a70af 100644 (file)
@@ -384,7 +384,6 @@ enum {
        MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
        MLX5E_SQ_STATE_PENDING_XSK_TX,
        MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
-       MLX5E_SQ_STATE_XDP_MULTIBUF,
        MLX5E_NUM_SQ_STATES, /* Must be kept last */
 };
 
index e37d4c202bba433e918fccbe6bd8f6464022b1f6..aa36670d9a369614cfc8ea43ad583823e9ed7bfe 100644 (file)
@@ -1247,7 +1247,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
        mlx5e_build_sq_param_common(mdev, param);
        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
        param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
-       param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
        mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
 }
 
index 3f8986f9d86291e6744315ddc8d15d0568e008a8..bd5877acc5b1eb144e0372631547053474e19d72 100644 (file)
@@ -33,7 +33,6 @@ struct mlx5e_sq_param {
        struct mlx5_wq_param       wq;
        bool                       is_mpw;
        bool                       is_tls;
-       bool                       is_xdp_mb;
        u16                        stop_room;
 };
 
index 09433b91be176fb639a2b54cf3fb9595621c168b..532c7fa94d172a568fc1459928a1bca62c2bd27f 100644 (file)
@@ -16,7 +16,6 @@ static const char * const sq_sw_state_type_name[] = {
        [MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
        [MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
        [MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
-       [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
 };
 
 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
index 3cc4d55613bf28e98892c103710475d92ae2adc6..6f3094a479e1ec61854bb48a6a0c812167487173 100644 (file)
@@ -546,6 +546,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
        bool inline_ok;
        bool linear;
        u16 pi;
+       int i;
 
        struct mlx5e_xdpsq_stats *stats = sq->stats;
 
@@ -612,41 +613,33 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
 
        cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
 
-       if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
-               int i;
-
-               memset(&cseg->trailer, 0, sizeof(cseg->trailer));
-               memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
-
-               eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+       memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+       memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
 
-               for (i = 0; i < num_frags; i++) {
-                       skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
-                       dma_addr_t addr;
+       eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
 
-                       addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
-                               page_pool_get_dma_addr(skb_frag_page(frag)) +
-                               skb_frag_off(frag);
+       for (i = 0; i < num_frags; i++) {
+               skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
+               dma_addr_t addr;
 
-                       dseg->addr = cpu_to_be64(addr);
-                       dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
-                       dseg->lkey = sq->mkey_be;
-                       dseg++;
-               }
+               addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+                       page_pool_get_dma_addr(skb_frag_page(frag)) +
+                       skb_frag_off(frag);
 
-               cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+               dseg->addr = cpu_to_be64(addr);
+               dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+               dseg->lkey = sq->mkey_be;
+               dseg++;
+       }
 
-               sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
-                       .num_wqebbs = num_wqebbs,
-                       .num_pkts = 1,
-               };
+       cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
 
-               sq->pc += num_wqebbs;
-       } else {
-               cseg->fm_ce_se = 0;
+       sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+               .num_wqebbs = num_wqebbs,
+               .num_pkts = 1,
+       };
 
-               sq->pc++;
-       }
+       sq->pc += num_wqebbs;
 
        xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
 
index 2fdc86432ac0f7d135b434667a5421bcba88660c..5d5e7b19c396b8e4e368250f4a03e8ea0da3938c 100644 (file)
@@ -2023,41 +2023,12 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
        csp.min_inline_mode = sq->min_inline_mode;
        set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
 
-       if (param->is_xdp_mb)
-               set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
-
        err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
        if (err)
                goto err_free_xdpsq;
 
        mlx5e_set_xmit_fp(sq, param->is_mpw);
 
-       if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
-               unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
-               unsigned int inline_hdr_sz = 0;
-               int i;
-
-               if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
-                       inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
-                       ds_cnt++;
-               }
-
-               /* Pre initialize fixed WQE fields */
-               for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
-                       struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(&sq->wq, i);
-                       struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
-                       struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
-
-                       sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
-                               .num_wqebbs = 1,
-                               .num_pkts   = 1,
-                       };
-
-                       cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
-                       eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
-               }
-       }
-
        return 0;
 
 err_free_xdpsq: