]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
Merge branch 'nvme-5.7' of git://git.infradead.org/nvme into block-5.7
[thirdparty/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / xsk / tx.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "tx.h"
5 #include "umem.h"
6 #include "en/xdp.h"
7 #include "en/params.h"
8 #include <net/xdp_sock.h>
9
10 int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
11 {
12 struct mlx5e_priv *priv = netdev_priv(dev);
13 struct mlx5e_params *params = &priv->channels.params;
14 struct mlx5e_channel *c;
15 u16 ix;
16
17 if (unlikely(!mlx5e_xdp_is_active(priv)))
18 return -ENETDOWN;
19
20 if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
21 return -EINVAL;
22
23 c = priv->channels.c[ix];
24
25 if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)))
26 return -ENXIO;
27
28 if (!napi_if_scheduled_mark_missed(&c->napi)) {
29 /* To avoid WQE overrun, don't post a NOP if XSKICOSQ is not
30 * active and not polled by NAPI. Return 0, because the upcoming
31 * activate will trigger the IRQ for us.
32 */
33 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
34 return 0;
35
36 spin_lock(&c->xskicosq_lock);
37 mlx5e_trigger_irq(&c->xskicosq);
38 spin_unlock(&c->xskicosq_lock);
39 }
40
41 return 0;
42 }
43
44 /* When TX fails (because of the size of the packet), we need to get completions
45 * in order, so post a NOP to get a CQE. Since AF_XDP doesn't distinguish
46 * between successful TX and errors, handling in mlx5e_poll_xdpsq_cq is the
47 * same.
48 */
49 static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
50 struct mlx5e_xdp_info *xdpi)
51 {
52 u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
53 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi];
54 struct mlx5e_tx_wqe *nopwqe;
55
56 wi->num_wqebbs = 1;
57 wi->num_pkts = 1;
58
59 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
60 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
61 sq->doorbell_cseg = &nopwqe->ctrl;
62 }
63
64 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
65 {
66 struct xdp_umem *umem = sq->umem;
67 struct mlx5e_xdp_info xdpi;
68 struct mlx5e_xdp_xmit_data xdptxd;
69 bool work_done = true;
70 bool flush = false;
71
72 xdpi.mode = MLX5E_XDP_XMIT_MODE_XSK;
73
74 for (; budget; budget--) {
75 int check_result = sq->xmit_xdp_frame_check(sq);
76 struct xdp_desc desc;
77
78 if (unlikely(check_result < 0)) {
79 work_done = false;
80 break;
81 }
82
83 if (!xsk_umem_consume_tx(umem, &desc)) {
84 /* TX will get stuck until something wakes it up by
85 * triggering NAPI. Currently it's expected that the
86 * application calls sendto() if there are consumed, but
87 * not completed frames.
88 */
89 break;
90 }
91
92 xdptxd.dma_addr = xdp_umem_get_dma(umem, desc.addr);
93 xdptxd.data = xdp_umem_get_data(umem, desc.addr);
94 xdptxd.len = desc.len;
95
96 dma_sync_single_for_device(sq->pdev, xdptxd.dma_addr,
97 xdptxd.len, DMA_BIDIRECTIONAL);
98
99 if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, check_result))) {
100 if (sq->mpwqe.wqe)
101 mlx5e_xdp_mpwqe_complete(sq);
102
103 mlx5e_xsk_tx_post_err(sq, &xdpi);
104 }
105
106 flush = true;
107 }
108
109 if (flush) {
110 if (sq->mpwqe.wqe)
111 mlx5e_xdp_mpwqe_complete(sq);
112 mlx5e_xmit_xdp_doorbell(sq);
113
114 xsk_umem_consume_tx_done(umem);
115 }
116
117 return !(budget && work_done);
118 }