]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
net/mlx5e: XDP, Remove un-established assumptions on XDP buffer
[thirdparty/kernel/stable.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / xdp.c
CommitLineData
159d2131
TT
1/*
2 * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/bpf_trace.h>
a71506a4 34#include <net/xdp_sock_drv.h>
159d2131 35#include "en/xdp.h"
a011b49f 36#include "en/params.h"
67f245c2 37#include <linux/bitfield.h>
159d2131 38
a011b49f 39int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
d460c271 40{
a011b49f 41 int hr = mlx5e_get_linear_rq_headroom(params, xsk);
d460c271
MM
42
43 /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
44 * The condition checked in mlx5e_rx_is_linear_skb is:
45 * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1)
46 * (Note that hw_mtu == sw_mtu + hard_mtu.)
47 * What is returned from this function is:
48 * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2)
49 * After assigning sw_mtu := max_mtu, the left side of (1) turns to
50 * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE,
51 * because both PAGE_SIZE and S are already aligned. Any number greater
52 * than max_mtu would make the left side of (1) greater than PAGE_SIZE,
53 * so max_mtu is the maximum MTU allowed.
54 */
55
56 return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
57}
58
c94e4f11 59static inline bool
b9673cf5 60mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
bfc63c97 61 struct xdp_buff *xdp)
c94e4f11 62{
bfc63c97 63 struct page *page = virt_to_page(xdp->data);
eb9b9fdc
TT
64 struct mlx5e_xmit_data_frags xdptxdf = {};
65 struct mlx5e_xmit_data *xdptxd;
d963fa15
MM
66 struct xdp_frame *xdpf;
67 dma_addr_t dma_addr;
a48ad58c 68 int i;
c94e4f11 69
1b698fa5 70 xdpf = xdp_convert_buff_to_frame(xdp);
d963fa15 71 if (unlikely(!xdpf))
c94e4f11 72 return false;
c94e4f11 73
eb9b9fdc
TT
74 xdptxd = &xdptxdf.xd;
75 xdptxd->data = xdpf->data;
76 xdptxd->len = xdpf->len;
77 xdptxd->has_frags = xdp_frame_has_frags(xdpf);
d963fa15 78
39d6443c 79 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
84a0a231
MM
80 /* The xdp_buff was in the UMEM and was copied into a newly
81 * allocated page. The UMEM page was returned via the ZCA, and
82 * this new page has to be mapped at this point and has to be
83 * unmapped and returned via xdp_return_frame on completion.
84 */
85
86 /* Prevent double recycling of the UMEM page. Even in case this
87 * function returns false, the xdp_buff shouldn't be recycled,
88 * as it was already done in xdp_convert_zc_to_xdp_frame.
89 */
90 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
d963fa15 91
eb9b9fdc
TT
92 if (unlikely(xdptxd->has_frags))
93 return false;
94
95 dma_addr = dma_map_single(sq->pdev, xdptxd->data, xdptxd->len,
84a0a231
MM
96 DMA_TO_DEVICE);
97 if (dma_mapping_error(sq->pdev, dma_addr)) {
98 xdp_return_frame(xdpf);
99 return false;
100 }
101
eb9b9fdc 102 xdptxd->dma_addr = dma_addr;
84a0a231 103
fbeed25b 104 if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
eb9b9fdc 105 mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
fbeed25b 106 return false;
84a0a231 107
3f734b8c
TT
108 /* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */
109 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
110 (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_FRAME });
111 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
112 (union mlx5e_xdp_info) { .frame.xdpf = xdpf });
113 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
114 (union mlx5e_xdp_info) { .frame.dma_addr = dma_addr });
fbeed25b 115 return true;
84a0a231 116 }
d963fa15 117
fbeed25b
MM
118 /* Driver assumes that xdp_convert_buff_to_frame returns an xdp_frame
119 * that points to the same memory region as the original xdp_buff. It
120 * allows to map the memory only once and to use the DMA_BIDIRECTIONAL
121 * mode.
122 */
123
fbeed25b 124 dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
eb9b9fdc 125 dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd->len, DMA_BIDIRECTIONAL);
fbeed25b 126
3a48ba12 127 if (xdptxd->has_frags) {
eb9b9fdc 128 xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf);
c1783e74 129 xdptxdf.dma_arr = NULL;
a48ad58c 130
eb9b9fdc
TT
131 for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) {
132 skb_frag_t *frag = &xdptxdf.sinfo->frags[i];
a48ad58c
MM
133 dma_addr_t addr;
134 u32 len;
135
136 addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
137 skb_frag_off(frag);
138 len = skb_frag_size(frag);
139 dma_sync_single_for_device(sq->pdev, addr, len,
8d4b475e 140 DMA_BIDIRECTIONAL);
a48ad58c
MM
141 }
142 }
143
eb9b9fdc 144 xdptxd->dma_addr = dma_addr;
fbeed25b 145
49529a17 146 if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
eb9b9fdc 147 mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
49529a17
MM
148 return false;
149
3f734b8c
TT
150 /* xmit_mode == MLX5E_XDP_XMIT_MODE_PAGE */
151 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
152 (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_PAGE });
a48ad58c 153
3a48ba12 154 if (xdptxd->has_frags) {
3f734b8c
TT
155 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
156 (union mlx5e_xdp_info)
157 { .page.num = 1 + xdptxdf.sinfo->nr_frags });
158 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
159 (union mlx5e_xdp_info) { .page.page = page });
eb9b9fdc
TT
160 for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) {
161 skb_frag_t *frag = &xdptxdf.sinfo->frags[i];
a48ad58c 162
3f734b8c
TT
163 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
164 (union mlx5e_xdp_info)
165 { .page.page = skb_frag_page(frag) });
a48ad58c 166 }
3f734b8c
TT
167 } else {
168 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
169 (union mlx5e_xdp_info) { .page.num = 1 });
170 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
171 (union mlx5e_xdp_info) { .page.page = page });
a48ad58c
MM
172 }
173
49529a17 174 return true;
c94e4f11
TT
175}
176
bc8d405b
THJ
177static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
178{
179 const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
180
181 if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
915efd8a 182 return -ENODATA;
bc8d405b
THJ
183
184 *timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
185 _ctx->rq->clock, get_cqe_ts(_ctx->cqe));
186 return 0;
187}
188
67f245c2
JDB
189/* Mapping HW RSS Type bits CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 into 4-bits*/
190#define RSS_TYPE_MAX_TABLE 16 /* 4-bits max 16 entries */
191#define RSS_L4 GENMASK(1, 0)
192#define RSS_L3 GENMASK(3, 2) /* Same as CQE_RSS_HTYPE_IP */
193
194/* Valid combinations of CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 sorted numerical */
195enum mlx5_rss_hash_type {
196 RSS_TYPE_NO_HASH = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IP_NONE) |
197 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
198 RSS_TYPE_L3_IPV4 = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
199 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
200 RSS_TYPE_L4_IPV4_TCP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
201 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)),
202 RSS_TYPE_L4_IPV4_UDP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
203 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)),
204 RSS_TYPE_L4_IPV4_IPSEC = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
205 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)),
206 RSS_TYPE_L3_IPV6 = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
207 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
208 RSS_TYPE_L4_IPV6_TCP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
209 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)),
210 RSS_TYPE_L4_IPV6_UDP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
211 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)),
212 RSS_TYPE_L4_IPV6_IPSEC = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
213 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)),
214};
215
216/* Invalid combinations will simply return zero, allows no boundary checks */
217static const enum xdp_rss_hash_type mlx5_xdp_rss_type[RSS_TYPE_MAX_TABLE] = {
218 [RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_NONE,
219 [1] = XDP_RSS_TYPE_NONE, /* Implicit zero */
220 [2] = XDP_RSS_TYPE_NONE, /* Implicit zero */
221 [3] = XDP_RSS_TYPE_NONE, /* Implicit zero */
222 [RSS_TYPE_L3_IPV4] = XDP_RSS_TYPE_L3_IPV4,
223 [RSS_TYPE_L4_IPV4_TCP] = XDP_RSS_TYPE_L4_IPV4_TCP,
224 [RSS_TYPE_L4_IPV4_UDP] = XDP_RSS_TYPE_L4_IPV4_UDP,
225 [RSS_TYPE_L4_IPV4_IPSEC] = XDP_RSS_TYPE_L4_IPV4_IPSEC,
226 [RSS_TYPE_L3_IPV6] = XDP_RSS_TYPE_L3_IPV6,
227 [RSS_TYPE_L4_IPV6_TCP] = XDP_RSS_TYPE_L4_IPV6_TCP,
228 [RSS_TYPE_L4_IPV6_UDP] = XDP_RSS_TYPE_L4_IPV6_UDP,
229 [RSS_TYPE_L4_IPV6_IPSEC] = XDP_RSS_TYPE_L4_IPV6_IPSEC,
230 [12] = XDP_RSS_TYPE_NONE, /* Implicit zero */
231 [13] = XDP_RSS_TYPE_NONE, /* Implicit zero */
232 [14] = XDP_RSS_TYPE_NONE, /* Implicit zero */
233 [15] = XDP_RSS_TYPE_NONE, /* Implicit zero */
234};
235
0cd917a4
JDB
236static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
237 enum xdp_rss_hash_type *rss_type)
bc8d405b
THJ
238{
239 const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
67f245c2
JDB
240 const struct mlx5_cqe64 *cqe = _ctx->cqe;
241 u32 hash_type, l4_type, ip_type, lookup;
bc8d405b
THJ
242
243 if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
915efd8a 244 return -ENODATA;
bc8d405b 245
67f245c2
JDB
246 *hash = be32_to_cpu(cqe->rss_hash_result);
247
248 hash_type = cqe->rss_hash_type;
249 BUILD_BUG_ON(CQE_RSS_HTYPE_IP != RSS_L3); /* same mask */
250 ip_type = hash_type & CQE_RSS_HTYPE_IP;
251 l4_type = FIELD_GET(CQE_RSS_HTYPE_L4, hash_type);
252 lookup = ip_type | l4_type;
253 *rss_type = mlx5_xdp_rss_type[lookup];
254
bc8d405b
THJ
255 return 0;
256}
257
258const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
259 .xmo_rx_timestamp = mlx5e_xdp_rx_timestamp,
260 .xmo_rx_hash = mlx5e_xdp_rx_hash,
261};
262
159d2131 263/* returns true if packet was consumed by xdp */
9da5294e 264bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
384a13ca 265 struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf)
159d2131 266{
384a13ca 267 struct xdp_buff *xdp = &mxbuf->xdp;
159d2131
TT
268 u32 act;
269 int err;
270
39d6443c 271 act = bpf_prog_run_xdp(prog, xdp);
159d2131
TT
272 switch (act) {
273 case XDP_PASS:
159d2131
TT
274 return false;
275 case XDP_TX:
bfc63c97 276 if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, xdp)))
c94e4f11
TT
277 goto xdp_abort;
278 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
159d2131
TT
279 return true;
280 case XDP_REDIRECT:
281 /* When XDP enabled then page-refcnt==1 here */
39d6443c 282 err = xdp_do_redirect(rq->netdev, xdp, prog);
c94e4f11
TT
283 if (unlikely(err))
284 goto xdp_abort;
285 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
15143bf5 286 __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
86690b4b 287 rq->stats->xdp_redirect++;
159d2131
TT
288 return true;
289 default:
c8064e5b 290 bpf_warn_invalid_xdp_action(rq->netdev, prog, act);
c8b838d1 291 fallthrough;
159d2131 292 case XDP_ABORTED:
c94e4f11 293xdp_abort:
159d2131 294 trace_xdp_exception(rq->netdev, prog, act);
c8b838d1 295 fallthrough;
159d2131
TT
296 case XDP_DROP:
297 rq->stats->xdp_drop++;
298 return true;
299 }
300}
301
ec9cdca0 302static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size)
5e0d2eef 303{
5e0d2eef 304 struct mlx5_wq_cyc *wq = &sq->wq;
6c085a8a
SA
305 u16 pi, contig_wqebbs;
306
307 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
308 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
ec9cdca0
MM
309 if (unlikely(contig_wqebbs < size)) {
310 struct mlx5e_xdp_wqe_info *wi, *edge_wi;
311
312 wi = &sq->db.wqe_info[pi];
313 edge_wi = wi + contig_wqebbs;
314
315 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
316 for (; wi < edge_wi; wi++) {
317 *wi = (struct mlx5e_xdp_wqe_info) {
318 .num_wqebbs = 1,
319 .num_pkts = 0,
320 };
321 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
322 }
323 sq->stats->nops += contig_wqebbs;
6c085a8a 324
fed0c6cf
MM
325 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
326 }
5e0d2eef 327
ec9cdca0
MM
328 return pi;
329}
330
331static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
332{
b39fe61e 333 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
ec9cdca0 334 struct mlx5e_xdpsq_stats *stats = sq->stats;
97e3afd6 335 struct mlx5e_tx_wqe *wqe;
ec9cdca0
MM
336 u16 pi;
337
76c31e5f 338 pi = mlx5e_xdpsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
97e3afd6
MM
339 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
340 net_prefetchw(wqe->data);
5e0d2eef 341
b39fe61e 342 *session = (struct mlx5e_tx_mpwqe) {
97e3afd6 343 .wqe = wqe,
5af75c74 344 .bytes_count = 0,
97e3afd6
MM
345 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
346 .pkt_count = 0,
347 .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on),
348 };
c2273219 349
73cab880 350 stats->mpwqe++;
5e0d2eef
TT
351}
352
db05815b 353void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
5e0d2eef
TT
354{
355 struct mlx5_wq_cyc *wq = &sq->wq;
b39fe61e 356 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
5e0d2eef
TT
357 struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl;
358 u16 ds_count = session->ds_count;
359 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
360 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi];
361
362 cseg->opmod_idx_opcode =
363 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
364 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
365
366 wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS);
c2273219 367 wi->num_pkts = session->pkt_count;
5e0d2eef
TT
368
369 sq->pc += wi->num_wqebbs;
370
371 sq->doorbell_cseg = cseg;
372
373 session->wqe = NULL; /* Close session */
374}
375
db05815b
MM
376enum {
377 MLX5E_XDP_CHECK_OK = 1,
378 MLX5E_XDP_CHECK_START_MPWQE = 2,
379};
380
93761ca1 381INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
db05815b
MM
382{
383 if (unlikely(!sq->mpwqe.wqe)) {
384 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
c27bd171 385 sq->stop_room))) {
db05815b
MM
386 /* SQ is full, ring doorbell */
387 mlx5e_xmit_xdp_doorbell(sq);
388 sq->stats->full++;
389 return -EBUSY;
390 }
391
392 return MLX5E_XDP_CHECK_START_MPWQE;
393 }
394
395 return MLX5E_XDP_CHECK_OK;
396}
397
39a1665d
MM
398INDIRECT_CALLABLE_SCOPE bool
399mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
eb9b9fdc 400 int check_result);
39a1665d 401
93761ca1 402INDIRECT_CALLABLE_SCOPE bool
b39fe61e 403mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
eb9b9fdc 404 int check_result)
5e0d2eef 405{
b39fe61e 406 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
5e0d2eef
TT
407 struct mlx5e_xdpsq_stats *stats = sq->stats;
408
3a48ba12 409 if (xdptxd->has_frags) {
39a1665d
MM
410 /* MPWQE is enabled, but a multi-buffer packet is queued for
411 * transmission. MPWQE can't send fragmented packets, so close
412 * the current session and fall back to a regular WQE.
413 */
414 if (unlikely(sq->mpwqe.wqe))
415 mlx5e_xdp_mpwqe_complete(sq);
eb9b9fdc 416 return mlx5e_xmit_xdp_frame(sq, xdptxd, 0);
39a1665d
MM
417 }
418
d963fa15 419 if (unlikely(xdptxd->len > sq->hw_mtu)) {
5e0d2eef
TT
420 stats->err++;
421 return false;
422 }
423
db05815b
MM
424 if (!check_result)
425 check_result = mlx5e_xmit_xdp_frame_check_mpwqe(sq);
426 if (unlikely(check_result < 0))
427 return false;
5e0d2eef 428
db05815b
MM
429 if (check_result == MLX5E_XDP_CHECK_START_MPWQE) {
430 /* Start the session when nothing can fail, so it's guaranteed
431 * that if there is an active session, it has at least one dseg,
432 * and it's safe to complete it at any time.
433 */
5e0d2eef
TT
434 mlx5e_xdp_mpwqe_session_start(sq);
435 }
436
d963fa15 437 mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
5e0d2eef 438
e3c4c496 439 if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
5e0d2eef
TT
440 mlx5e_xdp_mpwqe_complete(sq);
441
5e0d2eef
TT
442 stats->xmit++;
443 return true;
444}
445
39a1665d 446static int mlx5e_xmit_xdp_frame_check_stop_room(struct mlx5e_xdpsq *sq, int stop_room)
db05815b 447{
39a1665d 448 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, stop_room))) {
db05815b
MM
449 /* SQ is full, ring doorbell */
450 mlx5e_xmit_xdp_doorbell(sq);
451 sq->stats->full++;
452 return -EBUSY;
453 }
454
455 return MLX5E_XDP_CHECK_OK;
456}
457
39a1665d
MM
458INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
459{
460 return mlx5e_xmit_xdp_frame_check_stop_room(sq, 1);
461}
462
93761ca1 463INDIRECT_CALLABLE_SCOPE bool
b39fe61e 464mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
eb9b9fdc 465 int check_result)
159d2131 466{
eb9b9fdc
TT
467 struct mlx5e_xmit_data_frags *xdptxdf =
468 container_of(xdptxd, struct mlx5e_xmit_data_frags, xd);
159d2131 469 struct mlx5_wq_cyc *wq = &sq->wq;
39a1665d
MM
470 struct mlx5_wqe_ctrl_seg *cseg;
471 struct mlx5_wqe_data_seg *dseg;
472 struct mlx5_wqe_eth_seg *eseg;
473 struct mlx5e_tx_wqe *wqe;
159d2131 474
d963fa15
MM
475 dma_addr_t dma_addr = xdptxd->dma_addr;
476 u32 dma_len = xdptxd->len;
9ded70fa 477 u16 ds_cnt, inline_hdr_sz;
39a1665d
MM
478 u8 num_wqebbs = 1;
479 int num_frags = 0;
124d0d8d
TT
480 bool inline_ok;
481 bool linear;
39a1665d 482 u16 pi;
159d2131 483
890388ad 484 struct mlx5e_xdpsq_stats *stats = sq->stats;
159d2131 485
124d0d8d
TT
486 inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE ||
487 dma_len >= MLX5E_XDP_MIN_INLINE;
488
489 if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) {
890388ad 490 stats->err++;
159d2131
TT
491 return false;
492 }
493
124d0d8d 494 inline_hdr_sz = 0;
39a1665d 495 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE)
124d0d8d
TT
496 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
497
498 linear = !!(dma_len - inline_hdr_sz);
499 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + linear + !!inline_hdr_sz;
39a1665d
MM
500
501 /* check_result must be 0 if sinfo is passed. */
502 if (!check_result) {
503 int stop_room = 1;
504
3a48ba12 505 if (xdptxd->has_frags) {
eb9b9fdc
TT
506 ds_cnt += xdptxdf->sinfo->nr_frags;
507 num_frags = xdptxdf->sinfo->nr_frags;
39a1665d
MM
508 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
509 /* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big
510 * enough to hold all fragments.
511 */
512 stop_room = MLX5E_STOP_ROOM(num_wqebbs);
513 }
514
515 check_result = mlx5e_xmit_xdp_frame_check_stop_room(sq, stop_room);
516 }
db05815b 517 if (unlikely(check_result < 0))
159d2131 518 return false;
159d2131 519
39a1665d
MM
520 pi = mlx5e_xdpsq_get_next_pi(sq, num_wqebbs);
521 wqe = mlx5_wq_cyc_get_wqe(wq, pi);
522 net_prefetchw(wqe);
523
524 cseg = &wqe->ctrl;
525 eseg = &wqe->eth;
526 dseg = wqe->data;
527
159d2131 528 /* copy the inline part if required */
124d0d8d 529 if (inline_hdr_sz) {
ad518573 530 memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
ad518573 531 memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
124d0d8d
TT
532 inline_hdr_sz - sizeof(eseg->inline_hdr.start));
533 dma_len -= inline_hdr_sz;
534 dma_addr += inline_hdr_sz;
159d2131
TT
535 dseg++;
536 }
537
538 /* write the dma part */
124d0d8d
TT
539 if (linear) {
540 dseg->addr = cpu_to_be64(dma_addr);
541 dseg->byte_count = cpu_to_be32(dma_len);
542 dseg->lkey = sq->mkey_be;
543 dseg++;
544 }
159d2131
TT
545
546 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
547
3a48ba12 548 if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
39a1665d 549 int i;
9ded70fa 550
2af7e566 551 memset(&cseg->trailer, 0, sizeof(cseg->trailer));
9ded70fa
MM
552 memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
553
554 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
9ded70fa 555
39a1665d 556 for (i = 0; i < num_frags; i++) {
eb9b9fdc 557 skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
39a1665d
MM
558 dma_addr_t addr;
559
c1783e74
TT
560 addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
561 page_pool_get_dma_addr(skb_frag_page(frag)) +
39a1665d
MM
562 skb_frag_off(frag);
563
39a1665d
MM
564 dseg->addr = cpu_to_be64(addr);
565 dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
566 dseg->lkey = sq->mkey_be;
124d0d8d 567 dseg++;
39a1665d
MM
568 }
569
9ded70fa
MM
570 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
571
9ded70fa
MM
572 sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
573 .num_wqebbs = num_wqebbs,
3f734b8c 574 .num_pkts = 1,
9ded70fa
MM
575 };
576
577 sq->pc += num_wqebbs;
578 } else {
579 cseg->fm_ce_se = 0;
580
581 sq->pc++;
582 }
159d2131 583
b8180392 584 sq->doorbell_cseg = cseg;
159d2131 585
890388ad 586 stats->xmit++;
159d2131
TT
587 return true;
588}
589
33e10924
MM
590static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
591 struct mlx5e_xdp_wqe_info *wi,
db05815b 592 u32 *xsk_frames,
b87c57ae 593 struct xdp_frame_bulk *bq)
33e10924
MM
594{
595 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
596 u16 i;
597
598 for (i = 0; i < wi->num_pkts; i++) {
3f734b8c 599 union mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
33e10924 600
d963fa15 601 switch (xdpi.mode) {
3f734b8c 602 case MLX5E_XDP_XMIT_MODE_FRAME: {
84a0a231 603 /* XDP_TX from the XSK RQ and XDP_REDIRECT */
3f734b8c
TT
604 struct xdp_frame *xdpf;
605 dma_addr_t dma_addr;
606
607 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
608 xdpf = xdpi.frame.xdpf;
609 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
610 dma_addr = xdpi.frame.dma_addr;
611
612 dma_unmap_single(sq->pdev, dma_addr,
613 xdpf->len, DMA_TO_DEVICE);
c1783e74
TT
614 if (xdp_frame_has_frags(xdpf)) {
615 struct skb_shared_info *sinfo;
616 int j;
617
618 sinfo = xdp_get_shared_info_from_frame(xdpf);
619 for (j = 0; j < sinfo->nr_frags; j++) {
620 skb_frag_t *frag = &sinfo->frags[j];
621
622 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
623 dma_addr = xdpi.frame.dma_addr;
624
625 dma_unmap_single(sq->pdev, dma_addr,
626 skb_frag_size(frag), DMA_TO_DEVICE);
627 }
628 }
3f734b8c 629 xdp_return_frame_bulk(xdpf, bq);
d963fa15 630 break;
3f734b8c
TT
631 }
632 case MLX5E_XDP_XMIT_MODE_PAGE: {
84a0a231 633 /* XDP_TX from the regular RQ */
3f734b8c
TT
634 u8 num, n = 0;
635
636 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
637 num = xdpi.page.num;
638
639 do {
640 struct page *page;
641
642 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
643 page = xdpi.page.page;
644
645 /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
646 * as we know this is a page_pool page.
647 */
648 page_pool_put_defragged_page(page->pp,
649 page, -1, true);
650 } while (++n < num);
651
db05815b 652 break;
3f734b8c 653 }
db05815b
MM
654 case MLX5E_XDP_XMIT_MODE_XSK:
655 /* AF_XDP send */
656 (*xsk_frames)++;
d963fa15
MM
657 break;
658 default:
659 WARN_ON_ONCE(true);
33e10924
MM
660 }
661 }
662}
663
b9673cf5 664bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
159d2131 665{
b87c57ae 666 struct xdp_frame_bulk bq;
159d2131
TT
667 struct mlx5e_xdpsq *sq;
668 struct mlx5_cqe64 *cqe;
db05815b 669 u32 xsk_frames = 0;
159d2131
TT
670 u16 sqcc;
671 int i;
672
b87c57ae
LB
673 xdp_frame_bulk_init(&bq);
674
159d2131
TT
675 sq = container_of(cq, struct mlx5e_xdpsq, cq);
676
677 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
678 return false;
679
680 cqe = mlx5_cqwq_get_cqe(&cq->wq);
681 if (!cqe)
682 return false;
683
159d2131
TT
684 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
685 * otherwise a cq overrun may occur
686 */
687 sqcc = sq->cc;
688
689 i = 0;
690 do {
e2e11dbf
TT
691 struct mlx5e_xdp_wqe_info *wi;
692 u16 wqe_counter, ci;
159d2131
TT
693 bool last_wqe;
694
695 mlx5_cqwq_pop(&cq->wq);
696
697 wqe_counter = be16_to_cpu(cqe->wqe_counter);
698
699 do {
159d2131 700 last_wqe = (sqcc == wqe_counter);
1feeab80
TT
701 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
702 wi = &sq->db.wqe_info[ci];
703
704 sqcc += wi->num_wqebbs;
705
3905f8d6 706 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq);
159d2131 707 } while (!last_wqe);
f1b95753
TT
708
709 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
710 netdev_WARN_ONCE(sq->channel->netdev,
711 "Bad OP in XDPSQ CQE: 0x%x\n",
712 get_cqe_opcode(cqe));
713 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
714 (struct mlx5_err_cqe *)cqe);
e2e11dbf 715 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
f1b95753 716 }
159d2131
TT
717 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
718
b87c57ae
LB
719 xdp_flush_frame_bulk(&bq);
720
db05815b 721 if (xsk_frames)
c4655761 722 xsk_tx_completed(sq->xsk_pool, xsk_frames);
db05815b 723
890388ad 724 sq->stats->cqes += i;
159d2131
TT
725
726 mlx5_cqwq_update_db_record(&cq->wq);
727
728 /* ensure cq space is freed before enabling more cqes */
729 wmb();
730
731 sq->cc = sqcc;
732 return (i == MLX5E_TX_CQ_POLL_BUDGET);
733}
734
b9673cf5 735void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
159d2131 736{
b87c57ae 737 struct xdp_frame_bulk bq;
db05815b
MM
738 u32 xsk_frames = 0;
739
b87c57ae
LB
740 xdp_frame_bulk_init(&bq);
741
742 rcu_read_lock(); /* need for xdp_return_frame_bulk */
743
159d2131 744 while (sq->cc != sq->pc) {
1feeab80 745 struct mlx5e_xdp_wqe_info *wi;
33e10924 746 u16 ci;
1feeab80
TT
747
748 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
749 wi = &sq->db.wqe_info[ci];
750
751 sq->cc += wi->num_wqebbs;
752
3905f8d6 753 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq);
159d2131 754 }
db05815b 755
b87c57ae
LB
756 xdp_flush_frame_bulk(&bq);
757 rcu_read_unlock();
758
db05815b 759 if (xsk_frames)
c4655761 760 xsk_tx_completed(sq->xsk_pool, xsk_frames);
159d2131
TT
761}
762
58b99ee3
TT
763int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
764 u32 flags)
765{
766 struct mlx5e_priv *priv = netdev_priv(dev);
767 struct mlx5e_xdpsq *sq;
fdc13979 768 int nxmit = 0;
58b99ee3
TT
769 int sq_num;
770 int i;
771
407e17b1
SM
772 /* this flag is sufficient, no need to test internal sq state */
773 if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
58b99ee3
TT
774 return -ENETDOWN;
775
776 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
777 return -EINVAL;
778
779 sq_num = smp_processor_id();
780
781 if (unlikely(sq_num >= priv->channels.num))
782 return -ENXIO;
783
784 sq = &priv->channels.c[sq_num]->xdpsq;
785
58b99ee3 786 for (i = 0; i < n; i++) {
c1783e74 787 struct mlx5e_xmit_data_frags xdptxdf = {};
58b99ee3 788 struct xdp_frame *xdpf = frames[i];
c1783e74
TT
789 dma_addr_t dma_arr[MAX_SKB_FRAGS];
790 struct mlx5e_xmit_data *xdptxd;
93761ca1 791 bool ret;
58b99ee3 792
c1783e74
TT
793 xdptxd = &xdptxdf.xd;
794 xdptxd->data = xdpf->data;
795 xdptxd->len = xdpf->len;
796 xdptxd->has_frags = xdp_frame_has_frags(xdpf);
797 xdptxd->dma_addr = dma_map_single(sq->pdev, xdptxd->data,
798 xdptxd->len, DMA_TO_DEVICE);
d963fa15 799
c1783e74 800 if (unlikely(dma_mapping_error(sq->pdev, xdptxd->dma_addr)))
fdc13979 801 break;
58b99ee3 802
c1783e74
TT
803 if (xdptxd->has_frags) {
804 int j;
805
806 xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf);
807 xdptxdf.dma_arr = dma_arr;
808 for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) {
809 skb_frag_t *frag = &xdptxdf.sinfo->frags[j];
810
811 dma_arr[j] = dma_map_single(sq->pdev, skb_frag_address(frag),
812 skb_frag_size(frag), DMA_TO_DEVICE);
813
814 if (!dma_mapping_error(sq->pdev, dma_arr[j]))
815 continue;
816 /* mapping error */
817 while (--j >= 0)
818 dma_unmap_single(sq->pdev, dma_arr[j],
819 skb_frag_size(&xdptxdf.sinfo->frags[j]),
820 DMA_TO_DEVICE);
821 goto out;
822 }
823 }
824
93761ca1 825 ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
c1783e74 826 mlx5e_xmit_xdp_frame, sq, xdptxd, 0);
93761ca1 827 if (unlikely(!ret)) {
c1783e74
TT
828 int j;
829
830 dma_unmap_single(sq->pdev, xdptxd->dma_addr,
831 xdptxd->len, DMA_TO_DEVICE);
832 if (!xdptxd->has_frags)
833 break;
834 for (j = 0; j < xdptxdf.sinfo->nr_frags; j++)
835 dma_unmap_single(sq->pdev, dma_arr[j],
836 skb_frag_size(&xdptxdf.sinfo->frags[j]),
837 DMA_TO_DEVICE);
fdc13979 838 break;
58b99ee3 839 }
3f734b8c
TT
840
841 /* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */
842 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
843 (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_FRAME });
844 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
845 (union mlx5e_xdp_info) { .frame.xdpf = xdpf });
846 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
c1783e74
TT
847 (union mlx5e_xdp_info) { .frame.dma_addr = xdptxd->dma_addr });
848 if (xdptxd->has_frags) {
849 int j;
850
851 for (j = 0; j < xdptxdf.sinfo->nr_frags; j++)
852 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
853 (union mlx5e_xdp_info)
854 { .frame.dma_addr = dma_arr[j] });
855 }
fdc13979 856 nxmit++;
58b99ee3
TT
857 }
858
c1783e74 859out:
5e0d2eef
TT
860 if (flags & XDP_XMIT_FLUSH) {
861 if (sq->mpwqe.wqe)
862 mlx5e_xdp_mpwqe_complete(sq);
58b99ee3 863 mlx5e_xmit_xdp_doorbell(sq);
5e0d2eef 864 }
58b99ee3 865
fdc13979 866 return nxmit;
58b99ee3 867}
4fb2f516
TT
868
869void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
870{
b9673cf5 871 struct mlx5e_xdpsq *xdpsq = rq->xdpsq;
4fb2f516 872
5e0d2eef
TT
873 if (xdpsq->mpwqe.wqe)
874 mlx5e_xdp_mpwqe_complete(xdpsq);
875
b8180392 876 mlx5e_xmit_xdp_doorbell(xdpsq);
4fb2f516 877
15143bf5 878 if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
4fb2f516 879 xdp_do_flush_map();
15143bf5 880 __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
4fb2f516
TT
881 }
882}
5e0d2eef
TT
883
884void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw)
885{
db05815b
MM
886 sq->xmit_xdp_frame_check = is_mpw ?
887 mlx5e_xmit_xdp_frame_check_mpwqe : mlx5e_xmit_xdp_frame_check;
5e0d2eef
TT
888 sq->xmit_xdp_frame = is_mpw ?
889 mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame;
890}