]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_main.c
net/mlx5: Accel, Add TLS tx offload interface
[thirdparty/kernel/stable.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
CommitLineData
f62b8bb8 1/*
b3f63c3d 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e8f887ac
AV
33#include <net/tc_act/tc_gact.h>
34#include <net/pkt_cls.h>
86d722ad 35#include <linux/mlx5/fs.h>
b3f63c3d 36#include <net/vxlan.h>
86994156 37#include <linux/bpf.h>
60bbf7ee 38#include <net/page_pool.h>
1d447a39 39#include "eswitch.h"
f62b8bb8 40#include "en.h"
e8f887ac 41#include "en_tc.h"
1d447a39 42#include "en_rep.h"
547eede0 43#include "en_accel/ipsec.h"
899a59d3
IT
44#include "en_accel/ipsec_rxtx.h"
45#include "accel/ipsec.h"
b3f63c3d 46#include "vxlan.h"
f62b8bb8
AV
47
48struct mlx5e_rq_param {
cb3c7fd4
GR
49 u32 rqc[MLX5_ST_SZ_DW(rqc)];
50 struct mlx5_wq_param wq;
f62b8bb8
AV
51};
52
53struct mlx5e_sq_param {
54 u32 sqc[MLX5_ST_SZ_DW(sqc)];
55 struct mlx5_wq_param wq;
56};
57
58struct mlx5e_cq_param {
59 u32 cqc[MLX5_ST_SZ_DW(cqc)];
60 struct mlx5_wq_param wq;
61 u16 eq_ix;
9908aa29 62 u8 cq_period_mode;
f62b8bb8
AV
63};
64
65struct mlx5e_channel_param {
66 struct mlx5e_rq_param rq;
67 struct mlx5e_sq_param sq;
b5503b99 68 struct mlx5e_sq_param xdp_sq;
d3c9bc27 69 struct mlx5e_sq_param icosq;
f62b8bb8
AV
70 struct mlx5e_cq_param rx_cq;
71 struct mlx5e_cq_param tx_cq;
d3c9bc27 72 struct mlx5e_cq_param icosq_cq;
f62b8bb8
AV
73};
74
2ccb0a79 75bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
2fc4bfb7 76{
ea3886ca 77 bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
2fc4bfb7
SM
78 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
79 MLX5_CAP_ETH(mdev, reg_umr_sq);
ea3886ca
TT
80 u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
81 bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
82
83 if (!striding_rq_umr)
84 return false;
85 if (!inline_umr) {
86 mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
87 (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
88 return false;
89 }
90 return true;
2fc4bfb7
SM
91}
92
73281b78
TT
93static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params)
94{
619a8f2a
TT
95 if (!params->xdp_prog) {
96 u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
97 u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN;
73281b78 98
619a8f2a
TT
99 return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu);
100 }
101
102 return PAGE_SIZE;
73281b78
TT
103}
104
105static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
106{
107 u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
108
109 return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
110}
111
619a8f2a
TT
112static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
113 struct mlx5e_params *params)
114{
115 u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
116 s8 signed_log_num_strides_param;
117 u8 log_num_strides;
118
119 if (params->lro_en || frag_sz > PAGE_SIZE)
120 return false;
121
122 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
123 return true;
124
125 log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
126 signed_log_num_strides_param =
127 (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
128
129 return signed_log_num_strides_param >= 0;
130}
131
73281b78
TT
132static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
133{
134 if (params->log_rq_mtu_frames <
135 mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
136 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
137
138 return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params);
139}
140
141static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
142 struct mlx5e_params *params)
f1e4fc9b 143{
619a8f2a
TT
144 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
145 return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params));
146
f1e4fc9b
TT
147 return MLX5E_MPWQE_STRIDE_SZ(mdev,
148 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
149}
150
73281b78
TT
151static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
152 struct mlx5e_params *params)
f1e4fc9b
TT
153{
154 return MLX5_MPWRQ_LOG_WQE_SZ -
155 mlx5e_mpwqe_get_log_stride_size(mdev, params);
156}
157
619a8f2a
TT
158static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
159 struct mlx5e_params *params)
b0cedc84
TT
160{
161 u16 linear_rq_headroom = params->xdp_prog ?
162 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
163
164 linear_rq_headroom += NET_IP_ALIGN;
165
166 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)
167 return linear_rq_headroom;
168
619a8f2a
TT
169 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
170 return linear_rq_headroom;
171
b0cedc84
TT
172 return 0;
173}
174
696a97cf 175void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
2a0f561b 176 struct mlx5e_params *params)
2fc4bfb7 177{
6a9764ef 178 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
73281b78
TT
179 params->log_rq_mtu_frames = is_kdump_kernel() ?
180 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
181 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
6a9764ef 182 switch (params->rq_wq_type) {
2fc4bfb7 183 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2fc4bfb7
SM
184 break;
185 default: /* MLX5_WQ_TYPE_LINKED_LIST */
4078e637 186 /* Extra room needed for build_skb */
619a8f2a 187 params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) +
4078e637 188 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2fc4bfb7 189 }
2fc4bfb7 190
6a9764ef
SM
191 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
192 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
619a8f2a
TT
193 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
194 BIT(mlx5e_mpwqe_get_log_rq_size(params)) :
73281b78 195 BIT(params->log_rq_mtu_frames),
f1e4fc9b 196 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)),
6a9764ef 197 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
2fc4bfb7
SM
198}
199
2ccb0a79
TT
200bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
201 struct mlx5e_params *params)
202{
203 return mlx5e_check_fragmented_striding_rq_cap(mdev) &&
22f45398
TT
204 !MLX5_IPSEC_DEV(mdev) &&
205 !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params));
2ccb0a79 206}
291f445e 207
2ccb0a79 208void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
2fc4bfb7 209{
2ccb0a79
TT
210 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
211 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
291f445e
TT
212 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
213 MLX5_WQ_TYPE_LINKED_LIST;
2fc4bfb7
SM
214}
215
f62b8bb8
AV
216static void mlx5e_update_carrier(struct mlx5e_priv *priv)
217{
218 struct mlx5_core_dev *mdev = priv->mdev;
219 u8 port_state;
220
221 port_state = mlx5_query_vport_state(mdev,
e53eef63
OG
222 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
223 0);
f62b8bb8 224
87424ad5
SD
225 if (port_state == VPORT_STATE_UP) {
226 netdev_info(priv->netdev, "Link up\n");
f62b8bb8 227 netif_carrier_on(priv->netdev);
87424ad5
SD
228 } else {
229 netdev_info(priv->netdev, "Link down\n");
f62b8bb8 230 netif_carrier_off(priv->netdev);
87424ad5 231 }
f62b8bb8
AV
232}
233
234static void mlx5e_update_carrier_work(struct work_struct *work)
235{
236 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
237 update_carrier_work);
238
239 mutex_lock(&priv->state_lock);
240 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
7ca42c80
ES
241 if (priv->profile->update_carrier)
242 priv->profile->update_carrier(priv);
f62b8bb8
AV
243 mutex_unlock(&priv->state_lock);
244}
245
19386177 246void mlx5e_update_stats(struct mlx5e_priv *priv)
f62b8bb8 247{
19386177 248 int i;
f62b8bb8 249
19386177
KH
250 for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
251 if (mlx5e_stats_grps[i].update_stats)
252 mlx5e_stats_grps[i].update_stats(priv);
f62b8bb8
AV
253}
254
3834a5e6
GP
255static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
256{
19386177
KH
257 int i;
258
259 for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
260 if (mlx5e_stats_grps[i].update_stats_mask &
261 MLX5E_NDO_UPDATE_STATS)
262 mlx5e_stats_grps[i].update_stats(priv);
3834a5e6
GP
263}
264
cb67b832 265void mlx5e_update_stats_work(struct work_struct *work)
f62b8bb8
AV
266{
267 struct delayed_work *dwork = to_delayed_work(work);
268 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
269 update_stats_work);
270 mutex_lock(&priv->state_lock);
271 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6bfd390b 272 priv->profile->update_stats(priv);
7bb29755
MF
273 queue_delayed_work(priv->wq, dwork,
274 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
f62b8bb8
AV
275 }
276 mutex_unlock(&priv->state_lock);
277}
278
daa21560
TT
279static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
280 enum mlx5_dev_event event, unsigned long param)
f62b8bb8 281{
daa21560
TT
282 struct mlx5e_priv *priv = vpriv;
283
e0f46eb9 284 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
daa21560
TT
285 return;
286
f62b8bb8
AV
287 switch (event) {
288 case MLX5_DEV_EVENT_PORT_UP:
289 case MLX5_DEV_EVENT_PORT_DOWN:
7bb29755 290 queue_work(priv->wq, &priv->update_carrier_work);
f62b8bb8 291 break;
f62b8bb8
AV
292 default:
293 break;
294 }
295}
296
f62b8bb8
AV
297static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
298{
e0f46eb9 299 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
f62b8bb8
AV
300}
301
302static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
303{
e0f46eb9 304 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
78249c42 305 synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
f62b8bb8
AV
306}
307
31391048
SM
308static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
309 struct mlx5e_icosq *sq,
b8a98a4c 310 struct mlx5e_umr_wqe *wqe)
7e426671
TT
311{
312 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
313 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
ea3886ca 314 u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
7e426671
TT
315
316 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
317 ds_cnt);
318 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
319 cseg->imm = rq->mkey_be;
320
ea3886ca 321 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
31616255 322 ucseg->xlt_octowords =
7e426671 323 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
7e426671 324 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
7e426671
TT
325}
326
327static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
328 struct mlx5e_channel *c)
329{
330 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
7e426671 331
21c59685 332 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
231243c8 333 GFP_KERNEL, cpu_to_node(c->cpu));
21c59685 334 if (!rq->mpwqe.info)
ea3886ca 335 return -ENOMEM;
7e426671 336
b8a98a4c 337 mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
7e426671
TT
338
339 return 0;
7e426671
TT
340}
341
a43b25da 342static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
ec8b9981
TT
343 u64 npages, u8 page_shift,
344 struct mlx5_core_mkey *umr_mkey)
3608ae77 345{
3608ae77
TT
346 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
347 void *mkc;
348 u32 *in;
349 int err;
350
1b9a07ee 351 in = kvzalloc(inlen, GFP_KERNEL);
3608ae77
TT
352 if (!in)
353 return -ENOMEM;
354
355 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
356
3608ae77
TT
357 MLX5_SET(mkc, mkc, free, 1);
358 MLX5_SET(mkc, mkc, umr_en, 1);
359 MLX5_SET(mkc, mkc, lw, 1);
360 MLX5_SET(mkc, mkc, lr, 1);
cdbd0d2b 361 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
3608ae77
TT
362
363 MLX5_SET(mkc, mkc, qpn, 0xffffff);
364 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
ec8b9981 365 MLX5_SET64(mkc, mkc, len, npages << page_shift);
3608ae77
TT
366 MLX5_SET(mkc, mkc, translations_octword_size,
367 MLX5_MTT_OCTW(npages));
ec8b9981 368 MLX5_SET(mkc, mkc, log_page_size, page_shift);
3608ae77 369
ec8b9981 370 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
3608ae77
TT
371
372 kvfree(in);
373 return err;
374}
375
a43b25da 376static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
ec8b9981 377{
6a9764ef 378 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
ec8b9981 379
a43b25da 380 return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
ec8b9981
TT
381}
382
b8a98a4c
TT
383static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
384{
385 return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
386}
387
3b77235b 388static int mlx5e_alloc_rq(struct mlx5e_channel *c,
6a9764ef
SM
389 struct mlx5e_params *params,
390 struct mlx5e_rq_param *rqp,
3b77235b 391 struct mlx5e_rq *rq)
f62b8bb8 392{
60bbf7ee 393 struct page_pool_params pp_params = { 0 };
a43b25da 394 struct mlx5_core_dev *mdev = c->mdev;
6a9764ef 395 void *rqc = rqp->rqc;
f62b8bb8 396 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
60bbf7ee 397 u32 byte_count, pool_size;
1bfecfca 398 int npages;
f62b8bb8
AV
399 int wq_sz;
400 int err;
401 int i;
402
231243c8 403 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
311c7c71 404
6a9764ef 405 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
f62b8bb8
AV
406 &rq->wq_ctrl);
407 if (err)
408 return err;
409
410 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
411
412 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
f62b8bb8 413
6a9764ef 414 rq->wq_type = params->rq_wq_type;
7e426671
TT
415 rq->pdev = c->pdev;
416 rq->netdev = c->netdev;
a43b25da 417 rq->tstamp = c->tstamp;
7c39afb3 418 rq->clock = &mdev->clock;
7e426671
TT
419 rq->channel = c;
420 rq->ix = c->ix;
a43b25da 421 rq->mdev = mdev;
472a1e44 422 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
97bc402d 423
6a9764ef 424 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
97bc402d
DB
425 if (IS_ERR(rq->xdp_prog)) {
426 err = PTR_ERR(rq->xdp_prog);
427 rq->xdp_prog = NULL;
428 goto err_rq_wq_destroy;
429 }
7e426671 430
e213f5b6
WY
431 err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
432 if (err < 0)
0ddf5432
JDB
433 goto err_rq_wq_destroy;
434
bce2b2bf 435 rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
619a8f2a 436 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params);
60bbf7ee 437 pool_size = 1 << params->log_rq_mtu_frames;
b5503b99 438
6a9764ef 439 switch (rq->wq_type) {
461017cb 440 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
60bbf7ee
JDB
441
442 pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params);
7cc6d77b 443 rq->post_wqes = mlx5e_post_rx_mpwqes;
6cd392a0 444 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
461017cb 445
20fd0c19 446 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
899a59d3
IT
447#ifdef CONFIG_MLX5_EN_IPSEC
448 if (MLX5_IPSEC_DEV(mdev)) {
449 err = -EINVAL;
450 netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
451 goto err_rq_wq_destroy;
452 }
453#endif
20fd0c19
SM
454 if (!rq->handle_rx_cqe) {
455 err = -EINVAL;
456 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
457 goto err_rq_wq_destroy;
458 }
459
619a8f2a
TT
460 rq->mpwqe.skb_from_cqe_mpwrq =
461 mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ?
462 mlx5e_skb_from_cqe_mpwrq_linear :
463 mlx5e_skb_from_cqe_mpwrq_nonlinear;
f1e4fc9b
TT
464 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params);
465 rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params));
1bfecfca 466
b681c481 467 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
ec8b9981 468
a43b25da 469 err = mlx5e_create_rq_umr_mkey(mdev, rq);
7e426671
TT
470 if (err)
471 goto err_rq_wq_destroy;
ec8b9981
TT
472 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
473
474 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
475 if (err)
476 goto err_destroy_umr_mkey;
461017cb
TT
477 break;
478 default: /* MLX5_WQ_TYPE_LINKED_LIST */
accd5883
TT
479 rq->wqe.frag_info =
480 kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
231243c8 481 GFP_KERNEL, cpu_to_node(c->cpu));
accd5883 482 if (!rq->wqe.frag_info) {
461017cb
TT
483 err = -ENOMEM;
484 goto err_rq_wq_destroy;
485 }
7cc6d77b 486 rq->post_wqes = mlx5e_post_rx_wqes;
6cd392a0 487 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
461017cb 488
899a59d3
IT
489#ifdef CONFIG_MLX5_EN_IPSEC
490 if (c->priv->ipsec)
491 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
492 else
493#endif
494 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
20fd0c19 495 if (!rq->handle_rx_cqe) {
accd5883 496 kfree(rq->wqe.frag_info);
20fd0c19
SM
497 err = -EINVAL;
498 netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
499 goto err_rq_wq_destroy;
500 }
501
b681c481 502 byte_count = params->lro_en ?
6a9764ef 503 params->lro_wqe_sz :
472a1e44 504 MLX5E_SW2HW_MTU(params, params->sw_mtu);
899a59d3
IT
505#ifdef CONFIG_MLX5_EN_IPSEC
506 if (MLX5_IPSEC_DEV(mdev))
b681c481 507 byte_count += MLX5E_METADATA_ETHER_LEN;
899a59d3 508#endif
accd5883 509 rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
1bfecfca
SM
510
511 /* calc the required page order */
b45d8b50 512 rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count);
accd5883 513 npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
1bfecfca
SM
514 rq->buff.page_order = order_base_2(npages);
515
461017cb 516 byte_count |= MLX5_HW_START_PADDING;
7e426671 517 rq->mkey_be = c->mkey_be;
461017cb 518 }
f62b8bb8 519
60bbf7ee
JDB
520 /* Create a page_pool and register it with rxq */
521 pp_params.order = rq->buff.page_order;
522 pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
523 pp_params.pool_size = pool_size;
524 pp_params.nid = cpu_to_node(c->cpu);
525 pp_params.dev = c->pdev;
526 pp_params.dma_dir = rq->buff.map_dir;
527
528 /* page_pool can be used even when there is no rq->xdp_prog,
529 * given page_pool does not handle DMA mapping there is no
530 * required state to clear. And page_pool gracefully handle
531 * elevated refcnt.
532 */
533 rq->page_pool = page_pool_create(&pp_params);
534 if (IS_ERR(rq->page_pool)) {
535 if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
536 kfree(rq->wqe.frag_info);
537 err = PTR_ERR(rq->page_pool);
538 rq->page_pool = NULL;
539 goto err_rq_wq_destroy;
84f5e3fb 540 }
60bbf7ee
JDB
541 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
542 MEM_TYPE_PAGE_POOL, rq->page_pool);
543 if (err)
544 goto err_rq_wq_destroy;
84f5e3fb 545
f62b8bb8
AV
546 for (i = 0; i < wq_sz; i++) {
547 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
548
4c2af5cc 549 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
b8a98a4c 550 u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
4c2af5cc 551
619a8f2a 552 wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom);
4c2af5cc
TT
553 }
554
461017cb 555 wqe->data.byte_count = cpu_to_be32(byte_count);
7e426671 556 wqe->data.lkey = rq->mkey_be;
f62b8bb8
AV
557 }
558
9a317425
AG
559 INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
560
561 switch (params->rx_cq_moderation.cq_period_mode) {
562 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
563 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
564 break;
565 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
566 default:
567 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
568 }
569
4415a031
TT
570 rq->page_cache.head = 0;
571 rq->page_cache.tail = 0;
572
f62b8bb8
AV
573 return 0;
574
ec8b9981
TT
575err_destroy_umr_mkey:
576 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
577
f62b8bb8 578err_rq_wq_destroy:
97bc402d
DB
579 if (rq->xdp_prog)
580 bpf_prog_put(rq->xdp_prog);
0ddf5432 581 xdp_rxq_info_unreg(&rq->xdp_rxq);
60bbf7ee
JDB
582 if (rq->page_pool)
583 page_pool_destroy(rq->page_pool);
f62b8bb8
AV
584 mlx5_wq_destroy(&rq->wq_ctrl);
585
586 return err;
587}
588
3b77235b 589static void mlx5e_free_rq(struct mlx5e_rq *rq)
f62b8bb8 590{
4415a031
TT
591 int i;
592
86994156
RS
593 if (rq->xdp_prog)
594 bpf_prog_put(rq->xdp_prog);
595
0ddf5432 596 xdp_rxq_info_unreg(&rq->xdp_rxq);
60bbf7ee
JDB
597 if (rq->page_pool)
598 page_pool_destroy(rq->page_pool);
0ddf5432 599
461017cb
TT
600 switch (rq->wq_type) {
601 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
ea3886ca 602 kfree(rq->mpwqe.info);
a43b25da 603 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
461017cb
TT
604 break;
605 default: /* MLX5_WQ_TYPE_LINKED_LIST */
accd5883 606 kfree(rq->wqe.frag_info);
461017cb
TT
607 }
608
4415a031
TT
609 for (i = rq->page_cache.head; i != rq->page_cache.tail;
610 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
611 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
612
613 mlx5e_page_release(rq, dma_info, false);
614 }
f62b8bb8
AV
615 mlx5_wq_destroy(&rq->wq_ctrl);
616}
617
6a9764ef
SM
618static int mlx5e_create_rq(struct mlx5e_rq *rq,
619 struct mlx5e_rq_param *param)
f62b8bb8 620{
a43b25da 621 struct mlx5_core_dev *mdev = rq->mdev;
f62b8bb8
AV
622
623 void *in;
624 void *rqc;
625 void *wq;
626 int inlen;
627 int err;
628
629 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
630 sizeof(u64) * rq->wq_ctrl.buf.npages;
1b9a07ee 631 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
632 if (!in)
633 return -ENOMEM;
634
635 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
636 wq = MLX5_ADDR_OF(rqc, rqc, wq);
637
638 memcpy(rqc, param->rqc, sizeof(param->rqc));
639
97de9f31 640 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
f62b8bb8 641 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
f62b8bb8 642 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
68cdf5d6 643 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
644 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
645
646 mlx5_fill_page_array(&rq->wq_ctrl.buf,
647 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
648
7db22ffb 649 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
f62b8bb8
AV
650
651 kvfree(in);
652
653 return err;
654}
655
36350114
GP
656static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
657 int next_state)
f62b8bb8 658{
7cbaf9a3 659 struct mlx5_core_dev *mdev = rq->mdev;
f62b8bb8
AV
660
661 void *in;
662 void *rqc;
663 int inlen;
664 int err;
665
666 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 667 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
668 if (!in)
669 return -ENOMEM;
670
671 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
672
673 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
674 MLX5_SET(rqc, rqc, state, next_state);
675
7db22ffb 676 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
f62b8bb8
AV
677
678 kvfree(in);
679
680 return err;
681}
682
102722fc
GE
683static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
684{
685 struct mlx5e_channel *c = rq->channel;
686 struct mlx5e_priv *priv = c->priv;
687 struct mlx5_core_dev *mdev = priv->mdev;
688
689 void *in;
690 void *rqc;
691 int inlen;
692 int err;
693
694 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 695 in = kvzalloc(inlen, GFP_KERNEL);
102722fc
GE
696 if (!in)
697 return -ENOMEM;
698
699 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
700
701 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
702 MLX5_SET64(modify_rq_in, in, modify_bitmask,
703 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
704 MLX5_SET(rqc, rqc, scatter_fcs, enable);
705 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
706
707 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
708
709 kvfree(in);
710
711 return err;
712}
713
36350114
GP
714static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
715{
716 struct mlx5e_channel *c = rq->channel;
a43b25da 717 struct mlx5_core_dev *mdev = c->mdev;
36350114
GP
718 void *in;
719 void *rqc;
720 int inlen;
721 int err;
722
723 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 724 in = kvzalloc(inlen, GFP_KERNEL);
36350114
GP
725 if (!in)
726 return -ENOMEM;
727
728 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
729
730 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
83b502a1
AV
731 MLX5_SET64(modify_rq_in, in, modify_bitmask,
732 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
36350114
GP
733 MLX5_SET(rqc, rqc, vsd, vsd);
734 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
735
736 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
737
738 kvfree(in);
739
740 return err;
741}
742
3b77235b 743static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
f62b8bb8 744{
a43b25da 745 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
f62b8bb8
AV
746}
747
748static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
749{
01c196a2 750 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
f62b8bb8 751 struct mlx5e_channel *c = rq->channel;
a43b25da 752
f62b8bb8 753 struct mlx5_wq_ll *wq = &rq->wq;
6a9764ef 754 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
f62b8bb8 755
01c196a2 756 while (time_before(jiffies, exp_time)) {
6a9764ef 757 if (wq->cur_sz >= min_wqes)
f62b8bb8
AV
758 return 0;
759
760 msleep(20);
761 }
762
a43b25da 763 netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
6a9764ef 764 rq->rqn, wq->cur_sz, min_wqes);
f62b8bb8
AV
765 return -ETIMEDOUT;
766}
767
f2fde18c
SM
768static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
769{
770 struct mlx5_wq_ll *wq = &rq->wq;
771 struct mlx5e_rx_wqe *wqe;
772 __be16 wqe_ix_be;
773 u16 wqe_ix;
774
8484f9ed 775 /* UMR WQE (if in progress) is always at wq->head */
a071cb9f
TT
776 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
777 rq->mpwqe.umr_in_progress)
21c59685 778 mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
8484f9ed 779
f2fde18c
SM
780 while (!mlx5_wq_ll_is_empty(wq)) {
781 wqe_ix_be = *wq->tail_next;
782 wqe_ix = be16_to_cpu(wqe_ix_be);
783 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
784 rq->dealloc_wqe(rq, wqe_ix);
785 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
786 &wqe->next.next_wqe_index);
787 }
accd5883
TT
788
789 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
790 /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
791 * but yet to be re-posted.
792 */
793 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
794
795 for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
796 rq->dealloc_wqe(rq, wqe_ix);
797 }
f2fde18c
SM
798}
799
f62b8bb8 800static int mlx5e_open_rq(struct mlx5e_channel *c,
6a9764ef 801 struct mlx5e_params *params,
f62b8bb8
AV
802 struct mlx5e_rq_param *param,
803 struct mlx5e_rq *rq)
804{
805 int err;
806
6a9764ef 807 err = mlx5e_alloc_rq(c, params, param, rq);
f62b8bb8
AV
808 if (err)
809 return err;
810
3b77235b 811 err = mlx5e_create_rq(rq, param);
f62b8bb8 812 if (err)
3b77235b 813 goto err_free_rq;
f62b8bb8 814
36350114 815 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
f62b8bb8 816 if (err)
3b77235b 817 goto err_destroy_rq;
f62b8bb8 818
9a317425 819 if (params->rx_dim_enabled)
a1eaba4c 820 c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
cb3c7fd4 821
f62b8bb8
AV
822 return 0;
823
f62b8bb8
AV
824err_destroy_rq:
825 mlx5e_destroy_rq(rq);
3b77235b
SM
826err_free_rq:
827 mlx5e_free_rq(rq);
f62b8bb8
AV
828
829 return err;
830}
831
acc6c595
SM
832static void mlx5e_activate_rq(struct mlx5e_rq *rq)
833{
834 struct mlx5e_icosq *sq = &rq->channel->icosq;
835 u16 pi = sq->pc & sq->wq.sz_m1;
836 struct mlx5e_tx_wqe *nopwqe;
837
838 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
839 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
acc6c595
SM
840 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
841 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
842}
843
844static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
f62b8bb8 845{
c0f1147d 846 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
f62b8bb8 847 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
acc6c595 848}
cb3c7fd4 849
acc6c595
SM
850static void mlx5e_close_rq(struct mlx5e_rq *rq)
851{
9a317425 852 cancel_work_sync(&rq->dim.work);
f62b8bb8 853 mlx5e_destroy_rq(rq);
3b77235b
SM
854 mlx5e_free_rx_descs(rq);
855 mlx5e_free_rq(rq);
f62b8bb8
AV
856}
857
31391048 858static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
b5503b99 859{
31391048 860 kfree(sq->db.di);
b5503b99
SM
861}
862
31391048 863static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
b5503b99
SM
864{
865 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
866
31391048 867 sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
b5503b99 868 GFP_KERNEL, numa);
31391048
SM
869 if (!sq->db.di) {
870 mlx5e_free_xdpsq_db(sq);
b5503b99
SM
871 return -ENOMEM;
872 }
873
874 return 0;
875}
876
31391048 877static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
6a9764ef 878 struct mlx5e_params *params,
31391048
SM
879 struct mlx5e_sq_param *param,
880 struct mlx5e_xdpsq *sq)
881{
882 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 883 struct mlx5_core_dev *mdev = c->mdev;
31391048
SM
884 int err;
885
886 sq->pdev = c->pdev;
887 sq->mkey_be = c->mkey_be;
888 sq->channel = c;
889 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef 890 sq->min_inline_mode = params->tx_min_inline_mode;
31391048 891
231243c8 892 param->wq.db_numa_node = cpu_to_node(c->cpu);
31391048
SM
893 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
894 if (err)
895 return err;
896 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
897
231243c8 898 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
31391048
SM
899 if (err)
900 goto err_sq_wq_destroy;
901
902 return 0;
903
904err_sq_wq_destroy:
905 mlx5_wq_destroy(&sq->wq_ctrl);
906
907 return err;
908}
909
910static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
911{
912 mlx5e_free_xdpsq_db(sq);
913 mlx5_wq_destroy(&sq->wq_ctrl);
914}
915
916static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
f62b8bb8 917{
f10b7cc7 918 kfree(sq->db.ico_wqe);
f62b8bb8
AV
919}
920
31391048 921static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
f10b7cc7
SM
922{
923 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
924
925 sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
926 GFP_KERNEL, numa);
927 if (!sq->db.ico_wqe)
928 return -ENOMEM;
929
930 return 0;
931}
932
31391048 933static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
31391048
SM
934 struct mlx5e_sq_param *param,
935 struct mlx5e_icosq *sq)
f10b7cc7 936{
31391048 937 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 938 struct mlx5_core_dev *mdev = c->mdev;
31391048 939 int err;
f10b7cc7 940
31391048
SM
941 sq->channel = c;
942 sq->uar_map = mdev->mlx5e_res.bfreg.map;
f62b8bb8 943
231243c8 944 param->wq.db_numa_node = cpu_to_node(c->cpu);
31391048
SM
945 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
946 if (err)
947 return err;
948 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
f62b8bb8 949
231243c8 950 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
31391048
SM
951 if (err)
952 goto err_sq_wq_destroy;
953
954 sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
f62b8bb8
AV
955
956 return 0;
31391048
SM
957
958err_sq_wq_destroy:
959 mlx5_wq_destroy(&sq->wq_ctrl);
960
961 return err;
f62b8bb8
AV
962}
963
31391048 964static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
f10b7cc7 965{
31391048
SM
966 mlx5e_free_icosq_db(sq);
967 mlx5_wq_destroy(&sq->wq_ctrl);
f10b7cc7
SM
968}
969
31391048 970static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
f10b7cc7 971{
31391048
SM
972 kfree(sq->db.wqe_info);
973 kfree(sq->db.dma_fifo);
f10b7cc7
SM
974}
975
31391048 976static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
b5503b99 977{
31391048
SM
978 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
979 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
980
31391048
SM
981 sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
982 GFP_KERNEL, numa);
983 sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
984 GFP_KERNEL, numa);
77bdf895 985 if (!sq->db.dma_fifo || !sq->db.wqe_info) {
31391048
SM
986 mlx5e_free_txqsq_db(sq);
987 return -ENOMEM;
b5503b99 988 }
31391048
SM
989
990 sq->dma_fifo_mask = df_sz - 1;
991
992 return 0;
b5503b99
SM
993}
994
db75373c 995static void mlx5e_sq_recover(struct work_struct *work);
31391048 996static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
acc6c595 997 int txq_ix,
6a9764ef 998 struct mlx5e_params *params,
31391048
SM
999 struct mlx5e_sq_param *param,
1000 struct mlx5e_txqsq *sq)
f62b8bb8 1001{
31391048 1002 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1003 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8
AV
1004 int err;
1005
f10b7cc7 1006 sq->pdev = c->pdev;
a43b25da 1007 sq->tstamp = c->tstamp;
7c39afb3 1008 sq->clock = &mdev->clock;
f10b7cc7
SM
1009 sq->mkey_be = c->mkey_be;
1010 sq->channel = c;
acc6c595 1011 sq->txq_ix = txq_ix;
aff26157 1012 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef 1013 sq->min_inline_mode = params->tx_min_inline_mode;
db75373c 1014 INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
2ac9cfe7
IT
1015 if (MLX5_IPSEC_DEV(c->priv->mdev))
1016 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
f10b7cc7 1017
231243c8 1018 param->wq.db_numa_node = cpu_to_node(c->cpu);
31391048 1019 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
f62b8bb8 1020 if (err)
aff26157 1021 return err;
31391048 1022 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
f62b8bb8 1023
231243c8 1024 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
7ec0bb22 1025 if (err)
f62b8bb8
AV
1026 goto err_sq_wq_destroy;
1027
cbce4f44
TG
1028 INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
1029 sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
1030
31391048 1031 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
f62b8bb8
AV
1032
1033 return 0;
1034
1035err_sq_wq_destroy:
1036 mlx5_wq_destroy(&sq->wq_ctrl);
1037
f62b8bb8
AV
1038 return err;
1039}
1040
31391048 1041static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1042{
31391048 1043 mlx5e_free_txqsq_db(sq);
f62b8bb8 1044 mlx5_wq_destroy(&sq->wq_ctrl);
f62b8bb8
AV
1045}
1046
33ad9711
SM
1047struct mlx5e_create_sq_param {
1048 struct mlx5_wq_ctrl *wq_ctrl;
1049 u32 cqn;
1050 u32 tisn;
1051 u8 tis_lst_sz;
1052 u8 min_inline_mode;
1053};
1054
a43b25da 1055static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
33ad9711
SM
1056 struct mlx5e_sq_param *param,
1057 struct mlx5e_create_sq_param *csp,
1058 u32 *sqn)
f62b8bb8 1059{
f62b8bb8
AV
1060 void *in;
1061 void *sqc;
1062 void *wq;
1063 int inlen;
1064 int err;
1065
1066 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
33ad9711 1067 sizeof(u64) * csp->wq_ctrl->buf.npages;
1b9a07ee 1068 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1069 if (!in)
1070 return -ENOMEM;
1071
1072 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1073 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1074
1075 memcpy(sqc, param->sqc, sizeof(param->sqc));
33ad9711
SM
1076 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1077 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1078 MLX5_SET(sqc, sqc, cqn, csp->cqn);
a6f402e4
SM
1079
1080 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
33ad9711 1081 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
a6f402e4 1082
33ad9711 1083 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
db75373c 1084 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
f62b8bb8
AV
1085
1086 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
a43b25da 1087 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
33ad9711 1088 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
68cdf5d6 1089 MLX5_ADAPTER_PAGE_SHIFT);
33ad9711 1090 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
f62b8bb8 1091
33ad9711 1092 mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
f62b8bb8 1093
33ad9711 1094 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
f62b8bb8
AV
1095
1096 kvfree(in);
1097
1098 return err;
1099}
1100
33ad9711
SM
1101struct mlx5e_modify_sq_param {
1102 int curr_state;
1103 int next_state;
1104 bool rl_update;
1105 int rl_index;
1106};
1107
a43b25da 1108static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
33ad9711 1109 struct mlx5e_modify_sq_param *p)
f62b8bb8 1110{
f62b8bb8
AV
1111 void *in;
1112 void *sqc;
1113 int inlen;
1114 int err;
1115
1116 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1b9a07ee 1117 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1118 if (!in)
1119 return -ENOMEM;
1120
1121 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1122
33ad9711
SM
1123 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1124 MLX5_SET(sqc, sqc, state, p->next_state);
1125 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
507f0c81 1126 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
33ad9711 1127 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
507f0c81 1128 }
f62b8bb8 1129
33ad9711 1130 err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
f62b8bb8
AV
1131
1132 kvfree(in);
1133
1134 return err;
1135}
1136
a43b25da 1137static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
33ad9711 1138{
a43b25da 1139 mlx5_core_destroy_sq(mdev, sqn);
f62b8bb8
AV
1140}
1141
a43b25da 1142static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
31391048
SM
1143 struct mlx5e_sq_param *param,
1144 struct mlx5e_create_sq_param *csp,
1145 u32 *sqn)
f62b8bb8 1146{
33ad9711 1147 struct mlx5e_modify_sq_param msp = {0};
31391048
SM
1148 int err;
1149
a43b25da 1150 err = mlx5e_create_sq(mdev, param, csp, sqn);
31391048
SM
1151 if (err)
1152 return err;
1153
1154 msp.curr_state = MLX5_SQC_STATE_RST;
1155 msp.next_state = MLX5_SQC_STATE_RDY;
a43b25da 1156 err = mlx5e_modify_sq(mdev, *sqn, &msp);
31391048 1157 if (err)
a43b25da 1158 mlx5e_destroy_sq(mdev, *sqn);
31391048
SM
1159
1160 return err;
1161}
1162
7f859ecf
SM
1163static int mlx5e_set_sq_maxrate(struct net_device *dev,
1164 struct mlx5e_txqsq *sq, u32 rate);
1165
31391048 1166static int mlx5e_open_txqsq(struct mlx5e_channel *c,
a43b25da 1167 u32 tisn,
acc6c595 1168 int txq_ix,
6a9764ef 1169 struct mlx5e_params *params,
31391048
SM
1170 struct mlx5e_sq_param *param,
1171 struct mlx5e_txqsq *sq)
1172{
1173 struct mlx5e_create_sq_param csp = {};
7f859ecf 1174 u32 tx_rate;
f62b8bb8
AV
1175 int err;
1176
6a9764ef 1177 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
f62b8bb8
AV
1178 if (err)
1179 return err;
1180
a43b25da 1181 csp.tisn = tisn;
31391048 1182 csp.tis_lst_sz = 1;
33ad9711
SM
1183 csp.cqn = sq->cq.mcq.cqn;
1184 csp.wq_ctrl = &sq->wq_ctrl;
1185 csp.min_inline_mode = sq->min_inline_mode;
a43b25da 1186 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
f62b8bb8 1187 if (err)
31391048 1188 goto err_free_txqsq;
f62b8bb8 1189
a43b25da 1190 tx_rate = c->priv->tx_rates[sq->txq_ix];
7f859ecf 1191 if (tx_rate)
a43b25da 1192 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
7f859ecf 1193
cbce4f44
TG
1194 if (params->tx_dim_enabled)
1195 sq->state |= BIT(MLX5E_SQ_STATE_AM);
1196
f62b8bb8
AV
1197 return 0;
1198
31391048 1199err_free_txqsq:
3b77235b 1200 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
31391048 1201 mlx5e_free_txqsq(sq);
f62b8bb8
AV
1202
1203 return err;
1204}
1205
db75373c
EBE
1206static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
1207{
1208 WARN_ONCE(sq->cc != sq->pc,
1209 "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
1210 sq->sqn, sq->cc, sq->pc);
1211 sq->cc = 0;
1212 sq->dma_fifo_cc = 0;
1213 sq->pc = 0;
1214}
1215
acc6c595
SM
1216static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1217{
a43b25da 1218 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
db75373c 1219 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
acc6c595
SM
1220 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1221 netdev_tx_reset_queue(sq->txq);
1222 netif_tx_start_queue(sq->txq);
1223}
1224
f62b8bb8
AV
1225static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1226{
1227 __netif_tx_lock_bh(txq);
1228 netif_tx_stop_queue(txq);
1229 __netif_tx_unlock_bh(txq);
1230}
1231
acc6c595 1232static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1233{
33ad9711 1234 struct mlx5e_channel *c = sq->channel;
33ad9711 1235
c0f1147d 1236 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
6e8dd6d6 1237 /* prevent netif_tx_wake_queue */
33ad9711 1238 napi_synchronize(&c->napi);
29429f33 1239
31391048 1240 netif_tx_disable_queue(sq->txq);
f62b8bb8 1241
31391048
SM
1242 /* last doorbell out, godspeed .. */
1243 if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
1244 struct mlx5e_tx_wqe *nop;
864b2d71 1245
77bdf895 1246 sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
31391048
SM
1247 nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
1248 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
29429f33 1249 }
acc6c595
SM
1250}
1251
1252static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1253{
1254 struct mlx5e_channel *c = sq->channel;
a43b25da 1255 struct mlx5_core_dev *mdev = c->mdev;
05d3ac97 1256 struct mlx5_rate_limit rl = {0};
f62b8bb8 1257
a43b25da 1258 mlx5e_destroy_sq(mdev, sq->sqn);
05d3ac97
BW
1259 if (sq->rate_limit) {
1260 rl.rate = sq->rate_limit;
1261 mlx5_rl_remove_rate(mdev, &rl);
1262 }
31391048
SM
1263 mlx5e_free_txqsq_descs(sq);
1264 mlx5e_free_txqsq(sq);
1265}
1266
db75373c
EBE
1267static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
1268{
1269 unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
1270
1271 while (time_before(jiffies, exp_time)) {
1272 if (sq->cc == sq->pc)
1273 return 0;
1274
1275 msleep(20);
1276 }
1277
1278 netdev_err(sq->channel->netdev,
1279 "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
1280 sq->sqn, sq->cc, sq->pc);
1281
1282 return -ETIMEDOUT;
1283}
1284
1285static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
1286{
1287 struct mlx5_core_dev *mdev = sq->channel->mdev;
1288 struct net_device *dev = sq->channel->netdev;
1289 struct mlx5e_modify_sq_param msp = {0};
1290 int err;
1291
1292 msp.curr_state = curr_state;
1293 msp.next_state = MLX5_SQC_STATE_RST;
1294
1295 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1296 if (err) {
1297 netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
1298 return err;
1299 }
1300
1301 memset(&msp, 0, sizeof(msp));
1302 msp.curr_state = MLX5_SQC_STATE_RST;
1303 msp.next_state = MLX5_SQC_STATE_RDY;
1304
1305 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1306 if (err) {
1307 netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
1308 return err;
1309 }
1310
1311 return 0;
1312}
1313
1314static void mlx5e_sq_recover(struct work_struct *work)
1315{
1316 struct mlx5e_txqsq_recover *recover =
1317 container_of(work, struct mlx5e_txqsq_recover,
1318 recover_work);
1319 struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq,
1320 recover);
1321 struct mlx5_core_dev *mdev = sq->channel->mdev;
1322 struct net_device *dev = sq->channel->netdev;
1323 u8 state;
1324 int err;
1325
1326 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
1327 if (err) {
1328 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
1329 sq->sqn, err);
1330 return;
1331 }
1332
1333 if (state != MLX5_RQC_STATE_ERR) {
1334 netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
1335 return;
1336 }
1337
1338 netif_tx_disable_queue(sq->txq);
1339
1340 if (mlx5e_wait_for_sq_flush(sq))
1341 return;
1342
1343 /* If the interval between two consecutive recovers per SQ is too
1344 * short, don't recover to avoid infinite loop of ERR_CQE -> recover.
1345 * If we reached this state, there is probably a bug that needs to be
1346 * fixed. let's keep the queue close and let tx timeout cleanup.
1347 */
1348 if (jiffies_to_msecs(jiffies - recover->last_recover) <
1349 MLX5E_SQ_RECOVER_MIN_INTERVAL) {
1350 netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n",
1351 sq->sqn);
1352 return;
1353 }
1354
1355 /* At this point, no new packets will arrive from the stack as TXQ is
1356 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
1357 * pending WQEs. SQ can safely reset the SQ.
1358 */
1359 if (mlx5e_sq_to_ready(sq, state))
1360 return;
1361
1362 mlx5e_reset_txqsq_cc_pc(sq);
1363 sq->stats.recover++;
1364 recover->last_recover = jiffies;
1365 mlx5e_activate_txqsq(sq);
1366}
1367
31391048 1368static int mlx5e_open_icosq(struct mlx5e_channel *c,
6a9764ef 1369 struct mlx5e_params *params,
31391048
SM
1370 struct mlx5e_sq_param *param,
1371 struct mlx5e_icosq *sq)
1372{
1373 struct mlx5e_create_sq_param csp = {};
1374 int err;
1375
6a9764ef 1376 err = mlx5e_alloc_icosq(c, param, sq);
31391048
SM
1377 if (err)
1378 return err;
1379
1380 csp.cqn = sq->cq.mcq.cqn;
1381 csp.wq_ctrl = &sq->wq_ctrl;
6a9764ef 1382 csp.min_inline_mode = params->tx_min_inline_mode;
31391048 1383 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1384 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1385 if (err)
1386 goto err_free_icosq;
1387
1388 return 0;
1389
1390err_free_icosq:
1391 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1392 mlx5e_free_icosq(sq);
1393
1394 return err;
1395}
1396
1397static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1398{
1399 struct mlx5e_channel *c = sq->channel;
1400
1401 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1402 napi_synchronize(&c->napi);
1403
a43b25da 1404 mlx5e_destroy_sq(c->mdev, sq->sqn);
31391048
SM
1405 mlx5e_free_icosq(sq);
1406}
1407
1408static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
6a9764ef 1409 struct mlx5e_params *params,
31391048
SM
1410 struct mlx5e_sq_param *param,
1411 struct mlx5e_xdpsq *sq)
1412{
1413 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1414 struct mlx5e_create_sq_param csp = {};
31391048
SM
1415 unsigned int inline_hdr_sz = 0;
1416 int err;
1417 int i;
1418
6a9764ef 1419 err = mlx5e_alloc_xdpsq(c, params, param, sq);
31391048
SM
1420 if (err)
1421 return err;
1422
1423 csp.tis_lst_sz = 1;
a43b25da 1424 csp.tisn = c->priv->tisn[0]; /* tc = 0 */
31391048
SM
1425 csp.cqn = sq->cq.mcq.cqn;
1426 csp.wq_ctrl = &sq->wq_ctrl;
1427 csp.min_inline_mode = sq->min_inline_mode;
1428 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1429 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1430 if (err)
1431 goto err_free_xdpsq;
1432
1433 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1434 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1435 ds_cnt++;
1436 }
1437
1438 /* Pre initialize fixed WQE fields */
1439 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1440 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1441 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1442 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1443 struct mlx5_wqe_data_seg *dseg;
1444
1445 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1446 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1447
1448 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1449 dseg->lkey = sq->mkey_be;
1450 }
1451
1452 return 0;
1453
1454err_free_xdpsq:
1455 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1456 mlx5e_free_xdpsq(sq);
1457
1458 return err;
1459}
1460
1461static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1462{
1463 struct mlx5e_channel *c = sq->channel;
1464
1465 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1466 napi_synchronize(&c->napi);
1467
a43b25da 1468 mlx5e_destroy_sq(c->mdev, sq->sqn);
31391048
SM
1469 mlx5e_free_xdpsq_descs(sq);
1470 mlx5e_free_xdpsq(sq);
f62b8bb8
AV
1471}
1472
95b6c6a5
EBE
1473static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1474 struct mlx5e_cq_param *param,
1475 struct mlx5e_cq *cq)
f62b8bb8 1476{
f62b8bb8
AV
1477 struct mlx5_core_cq *mcq = &cq->mcq;
1478 int eqn_not_used;
0b6e26ce 1479 unsigned int irqn;
f62b8bb8
AV
1480 int err;
1481 u32 i;
1482
f62b8bb8
AV
1483 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1484 &cq->wq_ctrl);
1485 if (err)
1486 return err;
1487
1488 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1489
f62b8bb8
AV
1490 mcq->cqe_sz = 64;
1491 mcq->set_ci_db = cq->wq_ctrl.db.db;
1492 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1493 *mcq->set_ci_db = 0;
1494 *mcq->arm_db = 0;
1495 mcq->vector = param->eq_ix;
1496 mcq->comp = mlx5e_completion_event;
1497 mcq->event = mlx5e_cq_error_event;
1498 mcq->irqn = irqn;
f62b8bb8
AV
1499
1500 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1501 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1502
1503 cqe->op_own = 0xf1;
1504 }
1505
a43b25da 1506 cq->mdev = mdev;
f62b8bb8
AV
1507
1508 return 0;
1509}
1510
95b6c6a5
EBE
1511static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1512 struct mlx5e_cq_param *param,
1513 struct mlx5e_cq *cq)
1514{
1515 struct mlx5_core_dev *mdev = c->priv->mdev;
1516 int err;
1517
231243c8
SM
1518 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1519 param->wq.db_numa_node = cpu_to_node(c->cpu);
95b6c6a5
EBE
1520 param->eq_ix = c->ix;
1521
1522 err = mlx5e_alloc_cq_common(mdev, param, cq);
1523
1524 cq->napi = &c->napi;
1525 cq->channel = c;
1526
1527 return err;
1528}
1529
3b77235b 1530static void mlx5e_free_cq(struct mlx5e_cq *cq)
f62b8bb8 1531{
1c1b5228 1532 mlx5_cqwq_destroy(&cq->wq_ctrl);
f62b8bb8
AV
1533}
1534
3b77235b 1535static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
f62b8bb8 1536{
a43b25da 1537 struct mlx5_core_dev *mdev = cq->mdev;
f62b8bb8
AV
1538 struct mlx5_core_cq *mcq = &cq->mcq;
1539
1540 void *in;
1541 void *cqc;
1542 int inlen;
0b6e26ce 1543 unsigned int irqn_not_used;
f62b8bb8
AV
1544 int eqn;
1545 int err;
1546
1547 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1c1b5228 1548 sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
1b9a07ee 1549 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1550 if (!in)
1551 return -ENOMEM;
1552
1553 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1554
1555 memcpy(cqc, param->cqc, sizeof(param->cqc));
1556
1c1b5228
TT
1557 mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
1558 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
f62b8bb8
AV
1559
1560 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1561
9908aa29 1562 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
f62b8bb8 1563 MLX5_SET(cqc, cqc, c_eqn, eqn);
30aa60b3 1564 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
1c1b5228 1565 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
68cdf5d6 1566 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
1567 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1568
1569 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1570
1571 kvfree(in);
1572
1573 if (err)
1574 return err;
1575
1576 mlx5e_cq_arm(cq);
1577
1578 return 0;
1579}
1580
3b77235b 1581static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
f62b8bb8 1582{
a43b25da 1583 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
f62b8bb8
AV
1584}
1585
1586static int mlx5e_open_cq(struct mlx5e_channel *c,
9a317425 1587 struct net_dim_cq_moder moder,
f62b8bb8 1588 struct mlx5e_cq_param *param,
6a9764ef 1589 struct mlx5e_cq *cq)
f62b8bb8 1590{
a43b25da 1591 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8 1592 int err;
f62b8bb8 1593
3b77235b 1594 err = mlx5e_alloc_cq(c, param, cq);
f62b8bb8
AV
1595 if (err)
1596 return err;
1597
3b77235b 1598 err = mlx5e_create_cq(cq, param);
f62b8bb8 1599 if (err)
3b77235b 1600 goto err_free_cq;
f62b8bb8 1601
7524a5d8 1602 if (MLX5_CAP_GEN(mdev, cq_moderation))
6a9764ef 1603 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
f62b8bb8
AV
1604 return 0;
1605
3b77235b
SM
1606err_free_cq:
1607 mlx5e_free_cq(cq);
f62b8bb8
AV
1608
1609 return err;
1610}
1611
1612static void mlx5e_close_cq(struct mlx5e_cq *cq)
1613{
f62b8bb8 1614 mlx5e_destroy_cq(cq);
3b77235b 1615 mlx5e_free_cq(cq);
f62b8bb8
AV
1616}
1617
231243c8
SM
1618static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1619{
1620 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1621}
1622
f62b8bb8 1623static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
6a9764ef 1624 struct mlx5e_params *params,
f62b8bb8
AV
1625 struct mlx5e_channel_param *cparam)
1626{
f62b8bb8
AV
1627 int err;
1628 int tc;
1629
1630 for (tc = 0; tc < c->num_tc; tc++) {
6a9764ef
SM
1631 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1632 &cparam->tx_cq, &c->sq[tc].cq);
f62b8bb8
AV
1633 if (err)
1634 goto err_close_tx_cqs;
f62b8bb8
AV
1635 }
1636
1637 return 0;
1638
1639err_close_tx_cqs:
1640 for (tc--; tc >= 0; tc--)
1641 mlx5e_close_cq(&c->sq[tc].cq);
1642
1643 return err;
1644}
1645
1646static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1647{
1648 int tc;
1649
1650 for (tc = 0; tc < c->num_tc; tc++)
1651 mlx5e_close_cq(&c->sq[tc].cq);
1652}
1653
1654static int mlx5e_open_sqs(struct mlx5e_channel *c,
6a9764ef 1655 struct mlx5e_params *params,
f62b8bb8
AV
1656 struct mlx5e_channel_param *cparam)
1657{
1658 int err;
1659 int tc;
1660
6a9764ef
SM
1661 for (tc = 0; tc < params->num_tc; tc++) {
1662 int txq_ix = c->ix + tc * params->num_channels;
acc6c595 1663
a43b25da
SM
1664 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
1665 params, &cparam->sq, &c->sq[tc]);
f62b8bb8
AV
1666 if (err)
1667 goto err_close_sqs;
1668 }
1669
1670 return 0;
1671
1672err_close_sqs:
1673 for (tc--; tc >= 0; tc--)
31391048 1674 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1675
1676 return err;
1677}
1678
1679static void mlx5e_close_sqs(struct mlx5e_channel *c)
1680{
1681 int tc;
1682
1683 for (tc = 0; tc < c->num_tc; tc++)
31391048 1684 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1685}
1686
507f0c81 1687static int mlx5e_set_sq_maxrate(struct net_device *dev,
31391048 1688 struct mlx5e_txqsq *sq, u32 rate)
507f0c81
YP
1689{
1690 struct mlx5e_priv *priv = netdev_priv(dev);
1691 struct mlx5_core_dev *mdev = priv->mdev;
33ad9711 1692 struct mlx5e_modify_sq_param msp = {0};
05d3ac97 1693 struct mlx5_rate_limit rl = {0};
507f0c81
YP
1694 u16 rl_index = 0;
1695 int err;
1696
1697 if (rate == sq->rate_limit)
1698 /* nothing to do */
1699 return 0;
1700
05d3ac97
BW
1701 if (sq->rate_limit) {
1702 rl.rate = sq->rate_limit;
507f0c81 1703 /* remove current rl index to free space to next ones */
05d3ac97
BW
1704 mlx5_rl_remove_rate(mdev, &rl);
1705 }
507f0c81
YP
1706
1707 sq->rate_limit = 0;
1708
1709 if (rate) {
05d3ac97
BW
1710 rl.rate = rate;
1711 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
507f0c81
YP
1712 if (err) {
1713 netdev_err(dev, "Failed configuring rate %u: %d\n",
1714 rate, err);
1715 return err;
1716 }
1717 }
1718
33ad9711
SM
1719 msp.curr_state = MLX5_SQC_STATE_RDY;
1720 msp.next_state = MLX5_SQC_STATE_RDY;
1721 msp.rl_index = rl_index;
1722 msp.rl_update = true;
a43b25da 1723 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
507f0c81
YP
1724 if (err) {
1725 netdev_err(dev, "Failed configuring rate %u: %d\n",
1726 rate, err);
1727 /* remove the rate from the table */
1728 if (rate)
05d3ac97 1729 mlx5_rl_remove_rate(mdev, &rl);
507f0c81
YP
1730 return err;
1731 }
1732
1733 sq->rate_limit = rate;
1734 return 0;
1735}
1736
1737static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1738{
1739 struct mlx5e_priv *priv = netdev_priv(dev);
1740 struct mlx5_core_dev *mdev = priv->mdev;
acc6c595 1741 struct mlx5e_txqsq *sq = priv->txq2sq[index];
507f0c81
YP
1742 int err = 0;
1743
1744 if (!mlx5_rl_is_supported(mdev)) {
1745 netdev_err(dev, "Rate limiting is not supported on this device\n");
1746 return -EINVAL;
1747 }
1748
1749 /* rate is given in Mb/sec, HW config is in Kb/sec */
1750 rate = rate << 10;
1751
1752 /* Check whether rate in valid range, 0 is always valid */
1753 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1754 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1755 return -ERANGE;
1756 }
1757
1758 mutex_lock(&priv->state_lock);
1759 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1760 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1761 if (!err)
1762 priv->tx_rates[index] = rate;
1763 mutex_unlock(&priv->state_lock);
1764
1765 return err;
1766}
1767
f62b8bb8 1768static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
6a9764ef 1769 struct mlx5e_params *params,
f62b8bb8
AV
1770 struct mlx5e_channel_param *cparam,
1771 struct mlx5e_channel **cp)
1772{
9a317425 1773 struct net_dim_cq_moder icocq_moder = {0, 0};
f62b8bb8 1774 struct net_device *netdev = priv->netdev;
231243c8 1775 int cpu = mlx5e_get_cpu(priv, ix);
f62b8bb8 1776 struct mlx5e_channel *c;
a8c2eb15 1777 unsigned int irq;
f62b8bb8 1778 int err;
a8c2eb15 1779 int eqn;
f62b8bb8 1780
231243c8 1781 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
f62b8bb8
AV
1782 if (!c)
1783 return -ENOMEM;
1784
1785 c->priv = priv;
a43b25da
SM
1786 c->mdev = priv->mdev;
1787 c->tstamp = &priv->tstamp;
f62b8bb8 1788 c->ix = ix;
231243c8 1789 c->cpu = cpu;
f62b8bb8
AV
1790 c->pdev = &priv->mdev->pdev->dev;
1791 c->netdev = priv->netdev;
b50d292b 1792 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
6a9764ef
SM
1793 c->num_tc = params->num_tc;
1794 c->xdp = !!params->xdp_prog;
cb3c7fd4 1795
a8c2eb15
TT
1796 mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1797 c->irq_desc = irq_to_desc(irq);
1798
f62b8bb8
AV
1799 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1800
6a9764ef 1801 err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
f62b8bb8
AV
1802 if (err)
1803 goto err_napi_del;
1804
6a9764ef 1805 err = mlx5e_open_tx_cqs(c, params, cparam);
d3c9bc27
TT
1806 if (err)
1807 goto err_close_icosq_cq;
1808
6a9764ef 1809 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
f62b8bb8
AV
1810 if (err)
1811 goto err_close_tx_cqs;
f62b8bb8 1812
d7a0ecab 1813 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
6a9764ef
SM
1814 err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1815 &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
d7a0ecab
SM
1816 if (err)
1817 goto err_close_rx_cq;
1818
f62b8bb8
AV
1819 napi_enable(&c->napi);
1820
6a9764ef 1821 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
f62b8bb8
AV
1822 if (err)
1823 goto err_disable_napi;
1824
6a9764ef 1825 err = mlx5e_open_sqs(c, params, cparam);
d3c9bc27
TT
1826 if (err)
1827 goto err_close_icosq;
1828
6a9764ef 1829 err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
d7a0ecab
SM
1830 if (err)
1831 goto err_close_sqs;
b5503b99 1832
6a9764ef 1833 err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
f62b8bb8 1834 if (err)
b5503b99 1835 goto err_close_xdp_sq;
f62b8bb8 1836
f62b8bb8
AV
1837 *cp = c;
1838
1839 return 0;
b5503b99 1840err_close_xdp_sq:
d7a0ecab 1841 if (c->xdp)
31391048 1842 mlx5e_close_xdpsq(&c->rq.xdpsq);
f62b8bb8
AV
1843
1844err_close_sqs:
1845 mlx5e_close_sqs(c);
1846
d3c9bc27 1847err_close_icosq:
31391048 1848 mlx5e_close_icosq(&c->icosq);
d3c9bc27 1849
f62b8bb8
AV
1850err_disable_napi:
1851 napi_disable(&c->napi);
d7a0ecab 1852 if (c->xdp)
31871f87 1853 mlx5e_close_cq(&c->rq.xdpsq.cq);
d7a0ecab
SM
1854
1855err_close_rx_cq:
f62b8bb8
AV
1856 mlx5e_close_cq(&c->rq.cq);
1857
1858err_close_tx_cqs:
1859 mlx5e_close_tx_cqs(c);
1860
d3c9bc27
TT
1861err_close_icosq_cq:
1862 mlx5e_close_cq(&c->icosq.cq);
1863
f62b8bb8
AV
1864err_napi_del:
1865 netif_napi_del(&c->napi);
1866 kfree(c);
1867
1868 return err;
1869}
1870
acc6c595
SM
1871static void mlx5e_activate_channel(struct mlx5e_channel *c)
1872{
1873 int tc;
1874
1875 for (tc = 0; tc < c->num_tc; tc++)
1876 mlx5e_activate_txqsq(&c->sq[tc]);
1877 mlx5e_activate_rq(&c->rq);
231243c8 1878 netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
acc6c595
SM
1879}
1880
1881static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
1882{
1883 int tc;
1884
1885 mlx5e_deactivate_rq(&c->rq);
1886 for (tc = 0; tc < c->num_tc; tc++)
1887 mlx5e_deactivate_txqsq(&c->sq[tc]);
1888}
1889
f62b8bb8
AV
1890static void mlx5e_close_channel(struct mlx5e_channel *c)
1891{
1892 mlx5e_close_rq(&c->rq);
b5503b99 1893 if (c->xdp)
31391048 1894 mlx5e_close_xdpsq(&c->rq.xdpsq);
f62b8bb8 1895 mlx5e_close_sqs(c);
31391048 1896 mlx5e_close_icosq(&c->icosq);
f62b8bb8 1897 napi_disable(&c->napi);
b5503b99 1898 if (c->xdp)
31871f87 1899 mlx5e_close_cq(&c->rq.xdpsq.cq);
f62b8bb8
AV
1900 mlx5e_close_cq(&c->rq.cq);
1901 mlx5e_close_tx_cqs(c);
d3c9bc27 1902 mlx5e_close_cq(&c->icosq.cq);
f62b8bb8 1903 netif_napi_del(&c->napi);
7ae92ae5 1904
f62b8bb8
AV
1905 kfree(c);
1906}
1907
1908static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
6a9764ef 1909 struct mlx5e_params *params,
f62b8bb8
AV
1910 struct mlx5e_rq_param *param)
1911{
f1e4fc9b 1912 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
1913 void *rqc = param->rqc;
1914 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1915
6a9764ef 1916 switch (params->rq_wq_type) {
461017cb 1917 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
f1e4fc9b 1918 MLX5_SET(wq, wq, log_wqe_num_of_strides,
619a8f2a
TT
1919 mlx5e_mpwqe_get_log_num_strides(mdev, params) -
1920 MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
f1e4fc9b 1921 MLX5_SET(wq, wq, log_wqe_stride_size,
619a8f2a
TT
1922 mlx5e_mpwqe_get_log_stride_size(mdev, params) -
1923 MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
461017cb 1924 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
73281b78 1925 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params));
461017cb
TT
1926 break;
1927 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1928 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
73281b78 1929 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
461017cb
TT
1930 }
1931
f62b8bb8
AV
1932 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1933 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
f1e4fc9b 1934 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
593cf338 1935 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
6a9764ef 1936 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
102722fc 1937 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
f62b8bb8 1938
f1e4fc9b 1939 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
f62b8bb8
AV
1940 param->wq.linear = 1;
1941}
1942
7cbaf9a3 1943static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
2f0db879 1944 struct mlx5e_rq_param *param)
556dd1b9 1945{
7cbaf9a3 1946 struct mlx5_core_dev *mdev = priv->mdev;
556dd1b9
TT
1947 void *rqc = param->rqc;
1948 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1949
1950 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1951 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
7cbaf9a3 1952 MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
2f0db879
GP
1953
1954 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
556dd1b9
TT
1955}
1956
d3c9bc27
TT
1957static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1958 struct mlx5e_sq_param *param)
f62b8bb8
AV
1959{
1960 void *sqc = param->sqc;
1961 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1962
f62b8bb8 1963 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
b50d292b 1964 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
f62b8bb8 1965
311c7c71 1966 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
d3c9bc27
TT
1967}
1968
1969static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
6a9764ef 1970 struct mlx5e_params *params,
d3c9bc27
TT
1971 struct mlx5e_sq_param *param)
1972{
1973 void *sqc = param->sqc;
1974 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1975
1976 mlx5e_build_sq_param_common(priv, param);
6a9764ef 1977 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2ac9cfe7 1978 MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
f62b8bb8
AV
1979}
1980
1981static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1982 struct mlx5e_cq_param *param)
1983{
1984 void *cqc = param->cqc;
1985
30aa60b3 1986 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
f62b8bb8
AV
1987}
1988
1989static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
6a9764ef 1990 struct mlx5e_params *params,
f62b8bb8
AV
1991 struct mlx5e_cq_param *param)
1992{
73281b78 1993 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8 1994 void *cqc = param->cqc;
461017cb 1995 u8 log_cq_size;
f62b8bb8 1996
6a9764ef 1997 switch (params->rq_wq_type) {
461017cb 1998 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
73281b78
TT
1999 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) +
2000 mlx5e_mpwqe_get_log_num_strides(mdev, params);
461017cb
TT
2001 break;
2002 default: /* MLX5_WQ_TYPE_LINKED_LIST */
73281b78 2003 log_cq_size = params->log_rq_mtu_frames;
461017cb
TT
2004 }
2005
2006 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
6a9764ef 2007 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
7219ab34
TT
2008 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
2009 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
2010 }
f62b8bb8
AV
2011
2012 mlx5e_build_common_cq_param(priv, param);
0088cbbc 2013 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
f62b8bb8
AV
2014}
2015
2016static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
6a9764ef 2017 struct mlx5e_params *params,
f62b8bb8
AV
2018 struct mlx5e_cq_param *param)
2019{
2020 void *cqc = param->cqc;
2021
6a9764ef 2022 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
f62b8bb8
AV
2023
2024 mlx5e_build_common_cq_param(priv, param);
0088cbbc 2025 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
f62b8bb8
AV
2026}
2027
d3c9bc27 2028static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
6a9764ef
SM
2029 u8 log_wq_size,
2030 struct mlx5e_cq_param *param)
d3c9bc27
TT
2031{
2032 void *cqc = param->cqc;
2033
2034 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2035
2036 mlx5e_build_common_cq_param(priv, param);
9908aa29 2037
9a317425 2038 param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
d3c9bc27
TT
2039}
2040
2041static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
6a9764ef
SM
2042 u8 log_wq_size,
2043 struct mlx5e_sq_param *param)
d3c9bc27
TT
2044{
2045 void *sqc = param->sqc;
2046 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2047
2048 mlx5e_build_sq_param_common(priv, param);
2049
2050 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
bc77b240 2051 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
d3c9bc27
TT
2052}
2053
b5503b99 2054static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
6a9764ef 2055 struct mlx5e_params *params,
b5503b99
SM
2056 struct mlx5e_sq_param *param)
2057{
2058 void *sqc = param->sqc;
2059 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2060
2061 mlx5e_build_sq_param_common(priv, param);
6a9764ef 2062 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
b5503b99
SM
2063}
2064
6a9764ef
SM
2065static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2066 struct mlx5e_params *params,
2067 struct mlx5e_channel_param *cparam)
f62b8bb8 2068{
bc77b240 2069 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
d3c9bc27 2070
6a9764ef
SM
2071 mlx5e_build_rq_param(priv, params, &cparam->rq);
2072 mlx5e_build_sq_param(priv, params, &cparam->sq);
2073 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2074 mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2075 mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
2076 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2077 mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
f62b8bb8
AV
2078}
2079
55c2503d
SM
2080int mlx5e_open_channels(struct mlx5e_priv *priv,
2081 struct mlx5e_channels *chs)
f62b8bb8 2082{
6b87663f 2083 struct mlx5e_channel_param *cparam;
03289b88 2084 int err = -ENOMEM;
f62b8bb8 2085 int i;
f62b8bb8 2086
6a9764ef 2087 chs->num = chs->params.num_channels;
03289b88 2088
ff9c852f 2089 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
6b87663f 2090 cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
acc6c595
SM
2091 if (!chs->c || !cparam)
2092 goto err_free;
f62b8bb8 2093
6a9764ef 2094 mlx5e_build_channel_param(priv, &chs->params, cparam);
ff9c852f 2095 for (i = 0; i < chs->num; i++) {
6a9764ef 2096 err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
f62b8bb8
AV
2097 if (err)
2098 goto err_close_channels;
2099 }
2100
6b87663f 2101 kfree(cparam);
f62b8bb8
AV
2102 return 0;
2103
2104err_close_channels:
2105 for (i--; i >= 0; i--)
ff9c852f 2106 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2107
acc6c595 2108err_free:
ff9c852f 2109 kfree(chs->c);
6b87663f 2110 kfree(cparam);
ff9c852f 2111 chs->num = 0;
f62b8bb8
AV
2112 return err;
2113}
2114
acc6c595 2115static void mlx5e_activate_channels(struct mlx5e_channels *chs)
f62b8bb8
AV
2116{
2117 int i;
2118
acc6c595
SM
2119 for (i = 0; i < chs->num; i++)
2120 mlx5e_activate_channel(chs->c[i]);
2121}
2122
2123static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2124{
2125 int err = 0;
2126 int i;
2127
2128 for (i = 0; i < chs->num; i++) {
2129 err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
2130 if (err)
2131 break;
2132 }
2133
2134 return err;
2135}
2136
2137static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2138{
2139 int i;
2140
2141 for (i = 0; i < chs->num; i++)
2142 mlx5e_deactivate_channel(chs->c[i]);
2143}
2144
55c2503d 2145void mlx5e_close_channels(struct mlx5e_channels *chs)
acc6c595
SM
2146{
2147 int i;
c3b7c5c9 2148
ff9c852f
SM
2149 for (i = 0; i < chs->num; i++)
2150 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2151
ff9c852f
SM
2152 kfree(chs->c);
2153 chs->num = 0;
f62b8bb8
AV
2154}
2155
a5f97fee
SM
2156static int
2157mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
f62b8bb8
AV
2158{
2159 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
2160 void *rqtc;
2161 int inlen;
2162 int err;
1da36696 2163 u32 *in;
a5f97fee 2164 int i;
f62b8bb8 2165
f62b8bb8 2166 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2167 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
2168 if (!in)
2169 return -ENOMEM;
2170
2171 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2172
2173 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2174 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2175
a5f97fee
SM
2176 for (i = 0; i < sz; i++)
2177 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2be6967c 2178
398f3351
HHZ
2179 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2180 if (!err)
2181 rqt->enabled = true;
f62b8bb8
AV
2182
2183 kvfree(in);
1da36696
TT
2184 return err;
2185}
2186
cb67b832 2187void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
1da36696 2188{
398f3351
HHZ
2189 rqt->enabled = false;
2190 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
1da36696
TT
2191}
2192
8f493ffd 2193int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
6bfd390b
HHZ
2194{
2195 struct mlx5e_rqt *rqt = &priv->indir_rqt;
8f493ffd 2196 int err;
6bfd390b 2197
8f493ffd
SM
2198 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2199 if (err)
2200 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2201 return err;
6bfd390b
HHZ
2202}
2203
cb67b832 2204int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
1da36696 2205{
398f3351 2206 struct mlx5e_rqt *rqt;
1da36696
TT
2207 int err;
2208 int ix;
2209
6bfd390b 2210 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
398f3351 2211 rqt = &priv->direct_tir[ix].rqt;
a5f97fee 2212 err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
1da36696
TT
2213 if (err)
2214 goto err_destroy_rqts;
2215 }
2216
2217 return 0;
2218
2219err_destroy_rqts:
8f493ffd 2220 mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
1da36696 2221 for (ix--; ix >= 0; ix--)
398f3351 2222 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
1da36696 2223
f62b8bb8
AV
2224 return err;
2225}
2226
8f493ffd
SM
2227void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
2228{
2229 int i;
2230
2231 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
2232 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
2233}
2234
a5f97fee
SM
2235static int mlx5e_rx_hash_fn(int hfunc)
2236{
2237 return (hfunc == ETH_RSS_HASH_TOP) ?
2238 MLX5_RX_HASH_FN_TOEPLITZ :
2239 MLX5_RX_HASH_FN_INVERTED_XOR8;
2240}
2241
3f6d08d1 2242int mlx5e_bits_invert(unsigned long a, int size)
a5f97fee
SM
2243{
2244 int inv = 0;
2245 int i;
2246
2247 for (i = 0; i < size; i++)
2248 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2249
2250 return inv;
2251}
2252
2253static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2254 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2255{
2256 int i;
2257
2258 for (i = 0; i < sz; i++) {
2259 u32 rqn;
2260
2261 if (rrp.is_rss) {
2262 int ix = i;
2263
2264 if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2265 ix = mlx5e_bits_invert(i, ilog2(sz));
2266
6a9764ef 2267 ix = priv->channels.params.indirection_rqt[ix];
a5f97fee
SM
2268 rqn = rrp.rss.channels->c[ix]->rq.rqn;
2269 } else {
2270 rqn = rrp.rqn;
2271 }
2272 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2273 }
2274}
2275
2276int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2277 struct mlx5e_redirect_rqt_param rrp)
5c50368f
AS
2278{
2279 struct mlx5_core_dev *mdev = priv->mdev;
5c50368f
AS
2280 void *rqtc;
2281 int inlen;
1da36696 2282 u32 *in;
5c50368f
AS
2283 int err;
2284
5c50368f 2285 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2286 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2287 if (!in)
2288 return -ENOMEM;
2289
2290 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2291
2292 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
5c50368f 2293 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
a5f97fee 2294 mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
1da36696 2295 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
5c50368f
AS
2296
2297 kvfree(in);
5c50368f
AS
2298 return err;
2299}
2300
a5f97fee
SM
2301static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2302 struct mlx5e_redirect_rqt_param rrp)
2303{
2304 if (!rrp.is_rss)
2305 return rrp.rqn;
2306
2307 if (ix >= rrp.rss.channels->num)
2308 return priv->drop_rq.rqn;
2309
2310 return rrp.rss.channels->c[ix]->rq.rqn;
2311}
2312
2313static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2314 struct mlx5e_redirect_rqt_param rrp)
40ab6a6e 2315{
1da36696
TT
2316 u32 rqtn;
2317 int ix;
2318
398f3351 2319 if (priv->indir_rqt.enabled) {
a5f97fee 2320 /* RSS RQ table */
398f3351 2321 rqtn = priv->indir_rqt.rqtn;
a5f97fee 2322 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
398f3351
HHZ
2323 }
2324
a5f97fee
SM
2325 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2326 struct mlx5e_redirect_rqt_param direct_rrp = {
2327 .is_rss = false,
95632791
AM
2328 {
2329 .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
2330 },
a5f97fee
SM
2331 };
2332
2333 /* Direct RQ Tables */
398f3351
HHZ
2334 if (!priv->direct_tir[ix].rqt.enabled)
2335 continue;
a5f97fee 2336
398f3351 2337 rqtn = priv->direct_tir[ix].rqt.rqtn;
a5f97fee 2338 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
1da36696 2339 }
40ab6a6e
AS
2340}
2341
a5f97fee
SM
2342static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2343 struct mlx5e_channels *chs)
2344{
2345 struct mlx5e_redirect_rqt_param rrp = {
2346 .is_rss = true,
95632791
AM
2347 {
2348 .rss = {
2349 .channels = chs,
2350 .hfunc = chs->params.rss_hfunc,
2351 }
2352 },
a5f97fee
SM
2353 };
2354
2355 mlx5e_redirect_rqts(priv, rrp);
2356}
2357
2358static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2359{
2360 struct mlx5e_redirect_rqt_param drop_rrp = {
2361 .is_rss = false,
95632791
AM
2362 {
2363 .rqn = priv->drop_rq.rqn,
2364 },
a5f97fee
SM
2365 };
2366
2367 mlx5e_redirect_rqts(priv, drop_rrp);
2368}
2369
6a9764ef 2370static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
5c50368f 2371{
6a9764ef 2372 if (!params->lro_en)
5c50368f
AS
2373 return;
2374
2375#define ROUGH_MAX_L2_L3_HDR_SZ 256
2376
2377 MLX5_SET(tirc, tirc, lro_enable_mask,
2378 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2379 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2380 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
6a9764ef
SM
2381 (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2382 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
5c50368f
AS
2383}
2384
6a9764ef
SM
2385void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
2386 enum mlx5e_traffic_types tt,
7b3722fa 2387 void *tirc, bool inner)
bdfc028d 2388{
7b3722fa
GP
2389 void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2390 MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
a100ff3e
GP
2391
2392#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2393 MLX5_HASH_FIELD_SEL_DST_IP)
2394
2395#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2396 MLX5_HASH_FIELD_SEL_DST_IP |\
2397 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2398 MLX5_HASH_FIELD_SEL_L4_DPORT)
2399
2400#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2401 MLX5_HASH_FIELD_SEL_DST_IP |\
2402 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2403
6a9764ef
SM
2404 MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
2405 if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
bdfc028d
TT
2406 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2407 rx_hash_toeplitz_key);
2408 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2409 rx_hash_toeplitz_key);
2410
2411 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
6a9764ef 2412 memcpy(rss_key, params->toeplitz_hash_key, len);
bdfc028d 2413 }
a100ff3e
GP
2414
2415 switch (tt) {
2416 case MLX5E_TT_IPV4_TCP:
2417 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2418 MLX5_L3_PROT_TYPE_IPV4);
2419 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2420 MLX5_L4_PROT_TYPE_TCP);
2421 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2422 MLX5_HASH_IP_L4PORTS);
2423 break;
2424
2425 case MLX5E_TT_IPV6_TCP:
2426 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2427 MLX5_L3_PROT_TYPE_IPV6);
2428 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2429 MLX5_L4_PROT_TYPE_TCP);
2430 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2431 MLX5_HASH_IP_L4PORTS);
2432 break;
2433
2434 case MLX5E_TT_IPV4_UDP:
2435 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2436 MLX5_L3_PROT_TYPE_IPV4);
2437 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2438 MLX5_L4_PROT_TYPE_UDP);
2439 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2440 MLX5_HASH_IP_L4PORTS);
2441 break;
2442
2443 case MLX5E_TT_IPV6_UDP:
2444 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2445 MLX5_L3_PROT_TYPE_IPV6);
2446 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2447 MLX5_L4_PROT_TYPE_UDP);
2448 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2449 MLX5_HASH_IP_L4PORTS);
2450 break;
2451
2452 case MLX5E_TT_IPV4_IPSEC_AH:
2453 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2454 MLX5_L3_PROT_TYPE_IPV4);
2455 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2456 MLX5_HASH_IP_IPSEC_SPI);
2457 break;
2458
2459 case MLX5E_TT_IPV6_IPSEC_AH:
2460 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2461 MLX5_L3_PROT_TYPE_IPV6);
2462 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2463 MLX5_HASH_IP_IPSEC_SPI);
2464 break;
2465
2466 case MLX5E_TT_IPV4_IPSEC_ESP:
2467 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2468 MLX5_L3_PROT_TYPE_IPV4);
2469 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2470 MLX5_HASH_IP_IPSEC_SPI);
2471 break;
2472
2473 case MLX5E_TT_IPV6_IPSEC_ESP:
2474 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2475 MLX5_L3_PROT_TYPE_IPV6);
2476 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2477 MLX5_HASH_IP_IPSEC_SPI);
2478 break;
2479
2480 case MLX5E_TT_IPV4:
2481 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2482 MLX5_L3_PROT_TYPE_IPV4);
2483 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2484 MLX5_HASH_IP);
2485 break;
2486
2487 case MLX5E_TT_IPV6:
2488 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2489 MLX5_L3_PROT_TYPE_IPV6);
2490 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2491 MLX5_HASH_IP);
2492 break;
2493 default:
2494 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2495 }
bdfc028d
TT
2496}
2497
ab0394fe 2498static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
5c50368f
AS
2499{
2500 struct mlx5_core_dev *mdev = priv->mdev;
2501
2502 void *in;
2503 void *tirc;
2504 int inlen;
2505 int err;
ab0394fe 2506 int tt;
1da36696 2507 int ix;
5c50368f
AS
2508
2509 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1b9a07ee 2510 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2511 if (!in)
2512 return -ENOMEM;
2513
2514 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2515 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2516
6a9764ef 2517 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
5c50368f 2518
1da36696 2519 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
724b2aa1 2520 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
1da36696 2521 inlen);
ab0394fe 2522 if (err)
1da36696 2523 goto free_in;
ab0394fe 2524 }
5c50368f 2525
6bfd390b 2526 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
1da36696
TT
2527 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2528 in, inlen);
2529 if (err)
2530 goto free_in;
2531 }
2532
2533free_in:
5c50368f
AS
2534 kvfree(in);
2535
2536 return err;
2537}
2538
7b3722fa
GP
2539static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
2540 enum mlx5e_traffic_types tt,
2541 u32 *tirc)
2542{
2543 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2544
2545 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2546
2547 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2548 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2549 MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
2550
2551 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
2552}
2553
472a1e44
TT
2554static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2555 struct mlx5e_params *params, u16 mtu)
40ab6a6e 2556{
472a1e44 2557 u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
40ab6a6e
AS
2558 int err;
2559
cd255eff 2560 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
40ab6a6e
AS
2561 if (err)
2562 return err;
2563
cd255eff
SM
2564 /* Update vport context MTU */
2565 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2566 return 0;
2567}
40ab6a6e 2568
472a1e44
TT
2569static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2570 struct mlx5e_params *params, u16 *mtu)
cd255eff 2571{
cd255eff
SM
2572 u16 hw_mtu = 0;
2573 int err;
40ab6a6e 2574
cd255eff
SM
2575 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2576 if (err || !hw_mtu) /* fallback to port oper mtu */
2577 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2578
472a1e44 2579 *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
cd255eff
SM
2580}
2581
2e20a151 2582static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
cd255eff 2583{
472a1e44 2584 struct mlx5e_params *params = &priv->channels.params;
2e20a151 2585 struct net_device *netdev = priv->netdev;
472a1e44 2586 struct mlx5_core_dev *mdev = priv->mdev;
cd255eff
SM
2587 u16 mtu;
2588 int err;
2589
472a1e44 2590 err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
cd255eff
SM
2591 if (err)
2592 return err;
40ab6a6e 2593
472a1e44
TT
2594 mlx5e_query_mtu(mdev, params, &mtu);
2595 if (mtu != params->sw_mtu)
cd255eff 2596 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
472a1e44 2597 __func__, mtu, params->sw_mtu);
40ab6a6e 2598
472a1e44 2599 params->sw_mtu = mtu;
40ab6a6e
AS
2600 return 0;
2601}
2602
08fb1dac
SM
2603static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2604{
2605 struct mlx5e_priv *priv = netdev_priv(netdev);
6a9764ef
SM
2606 int nch = priv->channels.params.num_channels;
2607 int ntc = priv->channels.params.num_tc;
08fb1dac
SM
2608 int tc;
2609
2610 netdev_reset_tc(netdev);
2611
2612 if (ntc == 1)
2613 return;
2614
2615 netdev_set_num_tc(netdev, ntc);
2616
7ccdd084
RS
2617 /* Map netdev TCs to offset 0
2618 * We have our own UP to TXQ mapping for QoS
2619 */
08fb1dac 2620 for (tc = 0; tc < ntc; tc++)
7ccdd084 2621 netdev_set_tc_queue(netdev, tc, nch, 0);
08fb1dac
SM
2622}
2623
acc6c595
SM
2624static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
2625{
2626 struct mlx5e_channel *c;
2627 struct mlx5e_txqsq *sq;
2628 int i, tc;
2629
2630 for (i = 0; i < priv->channels.num; i++)
2631 for (tc = 0; tc < priv->profile->max_tc; tc++)
2632 priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
2633
2634 for (i = 0; i < priv->channels.num; i++) {
2635 c = priv->channels.c[i];
2636 for (tc = 0; tc < c->num_tc; tc++) {
2637 sq = &c->sq[tc];
2638 priv->txq2sq[sq->txq_ix] = sq;
2639 }
2640 }
2641}
2642
603f4a45 2643void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2644{
9008ae07
SM
2645 int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2646 struct net_device *netdev = priv->netdev;
2647
2648 mlx5e_netdev_set_tcs(netdev);
053ee0a7
TR
2649 netif_set_real_num_tx_queues(netdev, num_txqs);
2650 netif_set_real_num_rx_queues(netdev, priv->channels.num);
9008ae07 2651
acc6c595
SM
2652 mlx5e_build_channels_tx_maps(priv);
2653 mlx5e_activate_channels(&priv->channels);
2654 netif_tx_start_all_queues(priv->netdev);
9008ae07 2655
a9f7705f 2656 if (MLX5_VPORT_MANAGER(priv->mdev))
9008ae07
SM
2657 mlx5e_add_sqs_fwd_rules(priv);
2658
acc6c595 2659 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
9008ae07 2660 mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
acc6c595
SM
2661}
2662
603f4a45 2663void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2664{
9008ae07
SM
2665 mlx5e_redirect_rqts_to_drop(priv);
2666
a9f7705f 2667 if (MLX5_VPORT_MANAGER(priv->mdev))
9008ae07
SM
2668 mlx5e_remove_sqs_fwd_rules(priv);
2669
acc6c595
SM
2670 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2671 * polling for inactive tx queues.
2672 */
2673 netif_tx_stop_all_queues(priv->netdev);
2674 netif_tx_disable(priv->netdev);
2675 mlx5e_deactivate_channels(&priv->channels);
2676}
2677
55c2503d 2678void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2e20a151
SM
2679 struct mlx5e_channels *new_chs,
2680 mlx5e_fp_hw_modify hw_modify)
55c2503d
SM
2681{
2682 struct net_device *netdev = priv->netdev;
2683 int new_num_txqs;
7ca42c80 2684 int carrier_ok;
55c2503d
SM
2685 new_num_txqs = new_chs->num * new_chs->params.num_tc;
2686
7ca42c80 2687 carrier_ok = netif_carrier_ok(netdev);
55c2503d
SM
2688 netif_carrier_off(netdev);
2689
2690 if (new_num_txqs < netdev->real_num_tx_queues)
2691 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2692
2693 mlx5e_deactivate_priv_channels(priv);
2694 mlx5e_close_channels(&priv->channels);
2695
2696 priv->channels = *new_chs;
2697
2e20a151
SM
2698 /* New channels are ready to roll, modify HW settings if needed */
2699 if (hw_modify)
2700 hw_modify(priv);
2701
55c2503d
SM
2702 mlx5e_refresh_tirs(priv, false);
2703 mlx5e_activate_priv_channels(priv);
2704
7ca42c80
ES
2705 /* return carrier back if needed */
2706 if (carrier_ok)
2707 netif_carrier_on(netdev);
55c2503d
SM
2708}
2709
237f258c 2710void mlx5e_timestamp_init(struct mlx5e_priv *priv)
7c39afb3
FD
2711{
2712 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
2713 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
2714}
2715
40ab6a6e
AS
2716int mlx5e_open_locked(struct net_device *netdev)
2717{
2718 struct mlx5e_priv *priv = netdev_priv(netdev);
40ab6a6e
AS
2719 int err;
2720
2721 set_bit(MLX5E_STATE_OPENED, &priv->state);
2722
ff9c852f 2723 err = mlx5e_open_channels(priv, &priv->channels);
acc6c595 2724 if (err)
343b29f3 2725 goto err_clear_state_opened_flag;
40ab6a6e 2726
b676f653 2727 mlx5e_refresh_tirs(priv, false);
acc6c595 2728 mlx5e_activate_priv_channels(priv);
7ca42c80
ES
2729 if (priv->profile->update_carrier)
2730 priv->profile->update_carrier(priv);
be4891af 2731
cb67b832
HHZ
2732 if (priv->profile->update_stats)
2733 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
40ab6a6e 2734
9b37b07f 2735 return 0;
343b29f3
AS
2736
2737err_clear_state_opened_flag:
2738 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2739 return err;
40ab6a6e
AS
2740}
2741
cb67b832 2742int mlx5e_open(struct net_device *netdev)
40ab6a6e
AS
2743{
2744 struct mlx5e_priv *priv = netdev_priv(netdev);
2745 int err;
2746
2747 mutex_lock(&priv->state_lock);
2748 err = mlx5e_open_locked(netdev);
63bfd399
EBE
2749 if (!err)
2750 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
40ab6a6e
AS
2751 mutex_unlock(&priv->state_lock);
2752
a117f73d
SK
2753 if (mlx5e_vxlan_allowed(priv->mdev))
2754 udp_tunnel_get_rx_info(netdev);
2755
40ab6a6e
AS
2756 return err;
2757}
2758
2759int mlx5e_close_locked(struct net_device *netdev)
2760{
2761 struct mlx5e_priv *priv = netdev_priv(netdev);
2762
a1985740
AS
2763 /* May already be CLOSED in case a previous configuration operation
2764 * (e.g RX/TX queue size change) that involves close&open failed.
2765 */
2766 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2767 return 0;
2768
40ab6a6e
AS
2769 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2770
40ab6a6e 2771 netif_carrier_off(priv->netdev);
acc6c595
SM
2772 mlx5e_deactivate_priv_channels(priv);
2773 mlx5e_close_channels(&priv->channels);
40ab6a6e
AS
2774
2775 return 0;
2776}
2777
cb67b832 2778int mlx5e_close(struct net_device *netdev)
40ab6a6e
AS
2779{
2780 struct mlx5e_priv *priv = netdev_priv(netdev);
2781 int err;
2782
26e59d80
MHY
2783 if (!netif_device_present(netdev))
2784 return -ENODEV;
2785
40ab6a6e 2786 mutex_lock(&priv->state_lock);
63bfd399 2787 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
40ab6a6e
AS
2788 err = mlx5e_close_locked(netdev);
2789 mutex_unlock(&priv->state_lock);
2790
2791 return err;
2792}
2793
a43b25da 2794static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3b77235b
SM
2795 struct mlx5e_rq *rq,
2796 struct mlx5e_rq_param *param)
40ab6a6e 2797{
40ab6a6e
AS
2798 void *rqc = param->rqc;
2799 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2800 int err;
2801
2802 param->wq.db_numa_node = param->wq.buf_numa_node;
2803
2804 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
2805 &rq->wq_ctrl);
2806 if (err)
2807 return err;
2808
0ddf5432
JDB
2809 /* Mark as unused given "Drop-RQ" packets never reach XDP */
2810 xdp_rxq_info_unused(&rq->xdp_rxq);
2811
a43b25da 2812 rq->mdev = mdev;
40ab6a6e
AS
2813
2814 return 0;
2815}
2816
a43b25da 2817static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
3b77235b
SM
2818 struct mlx5e_cq *cq,
2819 struct mlx5e_cq_param *param)
40ab6a6e 2820{
2f0db879
GP
2821 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2822 param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
2823
95b6c6a5 2824 return mlx5e_alloc_cq_common(mdev, param, cq);
40ab6a6e
AS
2825}
2826
7cbaf9a3 2827static int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
a43b25da 2828 struct mlx5e_rq *drop_rq)
40ab6a6e 2829{
7cbaf9a3 2830 struct mlx5_core_dev *mdev = priv->mdev;
a43b25da
SM
2831 struct mlx5e_cq_param cq_param = {};
2832 struct mlx5e_rq_param rq_param = {};
2833 struct mlx5e_cq *cq = &drop_rq->cq;
40ab6a6e
AS
2834 int err;
2835
7cbaf9a3 2836 mlx5e_build_drop_rq_param(priv, &rq_param);
40ab6a6e 2837
a43b25da 2838 err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
40ab6a6e
AS
2839 if (err)
2840 return err;
2841
3b77235b 2842 err = mlx5e_create_cq(cq, &cq_param);
40ab6a6e 2843 if (err)
3b77235b 2844 goto err_free_cq;
40ab6a6e 2845
a43b25da 2846 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
40ab6a6e 2847 if (err)
3b77235b 2848 goto err_destroy_cq;
40ab6a6e 2849
a43b25da 2850 err = mlx5e_create_rq(drop_rq, &rq_param);
40ab6a6e 2851 if (err)
3b77235b 2852 goto err_free_rq;
40ab6a6e 2853
7cbaf9a3
MS
2854 err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
2855 if (err)
2856 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
2857
40ab6a6e
AS
2858 return 0;
2859
3b77235b 2860err_free_rq:
a43b25da 2861 mlx5e_free_rq(drop_rq);
40ab6a6e
AS
2862
2863err_destroy_cq:
a43b25da 2864 mlx5e_destroy_cq(cq);
40ab6a6e 2865
3b77235b 2866err_free_cq:
a43b25da 2867 mlx5e_free_cq(cq);
3b77235b 2868
40ab6a6e
AS
2869 return err;
2870}
2871
a43b25da 2872static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
40ab6a6e 2873{
a43b25da
SM
2874 mlx5e_destroy_rq(drop_rq);
2875 mlx5e_free_rq(drop_rq);
2876 mlx5e_destroy_cq(&drop_rq->cq);
2877 mlx5e_free_cq(&drop_rq->cq);
40ab6a6e
AS
2878}
2879
5426a0b2
SM
2880int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
2881 u32 underlay_qpn, u32 *tisn)
40ab6a6e 2882{
c4f287c4 2883 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
40ab6a6e
AS
2884 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2885
08fb1dac 2886 MLX5_SET(tisc, tisc, prio, tc << 1);
5426a0b2 2887 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
b50d292b 2888 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
db60b802
AH
2889
2890 if (mlx5_lag_is_lacp_owner(mdev))
2891 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
2892
5426a0b2 2893 return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
40ab6a6e
AS
2894}
2895
5426a0b2 2896void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
40ab6a6e 2897{
5426a0b2 2898 mlx5_core_destroy_tis(mdev, tisn);
40ab6a6e
AS
2899}
2900
cb67b832 2901int mlx5e_create_tises(struct mlx5e_priv *priv)
40ab6a6e
AS
2902{
2903 int err;
2904 int tc;
2905
6bfd390b 2906 for (tc = 0; tc < priv->profile->max_tc; tc++) {
5426a0b2 2907 err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
40ab6a6e
AS
2908 if (err)
2909 goto err_close_tises;
2910 }
2911
2912 return 0;
2913
2914err_close_tises:
2915 for (tc--; tc >= 0; tc--)
5426a0b2 2916 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
40ab6a6e
AS
2917
2918 return err;
2919}
2920
cb67b832 2921void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
40ab6a6e
AS
2922{
2923 int tc;
2924
6bfd390b 2925 for (tc = 0; tc < priv->profile->max_tc; tc++)
5426a0b2 2926 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
40ab6a6e
AS
2927}
2928
6a9764ef
SM
2929static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
2930 enum mlx5e_traffic_types tt,
2931 u32 *tirc)
f62b8bb8 2932{
b50d292b 2933 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3191e05f 2934
6a9764ef 2935 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
f62b8bb8 2936
4cbeaff5 2937 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
398f3351 2938 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
7b3722fa 2939 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
f62b8bb8
AV
2940}
2941
6a9764ef 2942static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
f62b8bb8 2943{
b50d292b 2944 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
1da36696 2945
6a9764ef 2946 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
1da36696
TT
2947
2948 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2949 MLX5_SET(tirc, tirc, indirect_table, rqtn);
2950 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2951}
2952
8f493ffd 2953int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
1da36696 2954{
724b2aa1 2955 struct mlx5e_tir *tir;
f62b8bb8
AV
2956 void *tirc;
2957 int inlen;
7b3722fa 2958 int i = 0;
f62b8bb8 2959 int err;
1da36696 2960 u32 *in;
1da36696 2961 int tt;
f62b8bb8
AV
2962
2963 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 2964 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
2965 if (!in)
2966 return -ENOMEM;
2967
1da36696
TT
2968 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2969 memset(in, 0, inlen);
724b2aa1 2970 tir = &priv->indir_tir[tt];
1da36696 2971 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6a9764ef 2972 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
724b2aa1 2973 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
7b3722fa
GP
2974 if (err) {
2975 mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
2976 goto err_destroy_inner_tirs;
2977 }
f62b8bb8
AV
2978 }
2979
7b3722fa
GP
2980 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
2981 goto out;
2982
2983 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
2984 memset(in, 0, inlen);
2985 tir = &priv->inner_indir_tir[i];
2986 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2987 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
2988 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2989 if (err) {
2990 mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
2991 goto err_destroy_inner_tirs;
2992 }
2993 }
2994
2995out:
6bfd390b
HHZ
2996 kvfree(in);
2997
2998 return 0;
2999
7b3722fa
GP
3000err_destroy_inner_tirs:
3001 for (i--; i >= 0; i--)
3002 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3003
6bfd390b
HHZ
3004 for (tt--; tt >= 0; tt--)
3005 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
3006
3007 kvfree(in);
3008
3009 return err;
3010}
3011
cb67b832 3012int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
3013{
3014 int nch = priv->profile->max_nch(priv->mdev);
3015 struct mlx5e_tir *tir;
3016 void *tirc;
3017 int inlen;
3018 int err;
3019 u32 *in;
3020 int ix;
3021
3022 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 3023 in = kvzalloc(inlen, GFP_KERNEL);
6bfd390b
HHZ
3024 if (!in)
3025 return -ENOMEM;
3026
1da36696
TT
3027 for (ix = 0; ix < nch; ix++) {
3028 memset(in, 0, inlen);
724b2aa1 3029 tir = &priv->direct_tir[ix];
1da36696 3030 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6a9764ef 3031 mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
724b2aa1 3032 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
1da36696
TT
3033 if (err)
3034 goto err_destroy_ch_tirs;
3035 }
3036
3037 kvfree(in);
3038
f62b8bb8
AV
3039 return 0;
3040
1da36696 3041err_destroy_ch_tirs:
8f493ffd 3042 mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
1da36696 3043 for (ix--; ix >= 0; ix--)
724b2aa1 3044 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
1da36696 3045
1da36696 3046 kvfree(in);
f62b8bb8
AV
3047
3048 return err;
3049}
3050
8f493ffd 3051void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
3052{
3053 int i;
3054
1da36696 3055 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
724b2aa1 3056 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
7b3722fa
GP
3057
3058 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
3059 return;
3060
3061 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3062 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
f62b8bb8
AV
3063}
3064
cb67b832 3065void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
3066{
3067 int nch = priv->profile->max_nch(priv->mdev);
3068 int i;
3069
3070 for (i = 0; i < nch; i++)
3071 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
3072}
3073
102722fc
GE
3074static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3075{
3076 int err = 0;
3077 int i;
3078
3079 for (i = 0; i < chs->num; i++) {
3080 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3081 if (err)
3082 return err;
3083 }
3084
3085 return 0;
3086}
3087
f6d96a20 3088static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
36350114
GP
3089{
3090 int err = 0;
3091 int i;
3092
ff9c852f
SM
3093 for (i = 0; i < chs->num; i++) {
3094 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
36350114
GP
3095 if (err)
3096 return err;
3097 }
3098
3099 return 0;
3100}
3101
0cf0f6d3
JP
3102static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
3103 struct tc_mqprio_qopt *mqprio)
08fb1dac
SM
3104{
3105 struct mlx5e_priv *priv = netdev_priv(netdev);
6f9485af 3106 struct mlx5e_channels new_channels = {};
0cf0f6d3 3107 u8 tc = mqprio->num_tc;
08fb1dac
SM
3108 int err = 0;
3109
0cf0f6d3
JP
3110 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3111
08fb1dac
SM
3112 if (tc && tc != MLX5E_MAX_NUM_TC)
3113 return -EINVAL;
3114
3115 mutex_lock(&priv->state_lock);
3116
6f9485af
SM
3117 new_channels.params = priv->channels.params;
3118 new_channels.params.num_tc = tc ? tc : 1;
08fb1dac 3119
20b6a1c7 3120 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6f9485af
SM
3121 priv->channels.params = new_channels.params;
3122 goto out;
3123 }
08fb1dac 3124
6f9485af
SM
3125 err = mlx5e_open_channels(priv, &new_channels);
3126 if (err)
3127 goto out;
08fb1dac 3128
2e20a151 3129 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
6f9485af 3130out:
08fb1dac 3131 mutex_unlock(&priv->state_lock);
08fb1dac
SM
3132 return err;
3133}
3134
e80541ec 3135#ifdef CONFIG_MLX5_ESWITCH
d6c862ba 3136static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
0cf0f6d3 3137 struct tc_cls_flower_offload *cls_flower)
08fb1dac 3138{
0cf0f6d3
JP
3139 switch (cls_flower->command) {
3140 case TC_CLSFLOWER_REPLACE:
5fd9fc4e 3141 return mlx5e_configure_flower(priv, cls_flower);
0cf0f6d3
JP
3142 case TC_CLSFLOWER_DESTROY:
3143 return mlx5e_delete_flower(priv, cls_flower);
3144 case TC_CLSFLOWER_STATS:
3145 return mlx5e_stats_flower(priv, cls_flower);
3146 default:
a5fcf8a6 3147 return -EOPNOTSUPP;
0cf0f6d3
JP
3148 }
3149}
d6c862ba
JP
3150
3151int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3152 void *cb_priv)
3153{
3154 struct mlx5e_priv *priv = cb_priv;
3155
9ab88e83 3156 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
44ae12a7
JP
3157 return -EOPNOTSUPP;
3158
d6c862ba
JP
3159 switch (type) {
3160 case TC_SETUP_CLSFLOWER:
3161 return mlx5e_setup_tc_cls_flower(priv, type_data);
3162 default:
3163 return -EOPNOTSUPP;
3164 }
3165}
3166
3167static int mlx5e_setup_tc_block(struct net_device *dev,
3168 struct tc_block_offload *f)
3169{
3170 struct mlx5e_priv *priv = netdev_priv(dev);
3171
3172 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3173 return -EOPNOTSUPP;
3174
3175 switch (f->command) {
3176 case TC_BLOCK_BIND:
3177 return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
3178 priv, priv);
3179 case TC_BLOCK_UNBIND:
3180 tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
3181 priv);
3182 return 0;
3183 default:
3184 return -EOPNOTSUPP;
3185 }
3186}
e80541ec 3187#endif
a5fcf8a6 3188
9afe9a53
OG
3189static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3190 void *type_data)
0cf0f6d3 3191{
2572ac53 3192 switch (type) {
fde6af47 3193#ifdef CONFIG_MLX5_ESWITCH
d6c862ba
JP
3194 case TC_SETUP_BLOCK:
3195 return mlx5e_setup_tc_block(dev, type_data);
fde6af47 3196#endif
575ed7d3 3197 case TC_SETUP_QDISC_MQPRIO:
de4784ca 3198 return mlx5e_setup_tc_mqprio(dev, type_data);
e8f887ac
AV
3199 default:
3200 return -EOPNOTSUPP;
3201 }
08fb1dac
SM
3202}
3203
bc1f4470 3204static void
f62b8bb8
AV
3205mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3206{
3207 struct mlx5e_priv *priv = netdev_priv(dev);
9218b44d 3208 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
f62b8bb8 3209 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
269e6b3a 3210 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
f62b8bb8 3211
370bad0f
OG
3212 if (mlx5e_is_uplink_rep(priv)) {
3213 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3214 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3215 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3216 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3217 } else {
3218 stats->rx_packets = sstats->rx_packets;
3219 stats->rx_bytes = sstats->rx_bytes;
3220 stats->tx_packets = sstats->tx_packets;
3221 stats->tx_bytes = sstats->tx_bytes;
3222 stats->tx_dropped = sstats->tx_queue_dropped;
3223 }
269e6b3a
GP
3224
3225 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
269e6b3a
GP
3226
3227 stats->rx_length_errors =
9218b44d
GP
3228 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3229 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3230 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
269e6b3a 3231 stats->rx_crc_errors =
9218b44d
GP
3232 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3233 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3234 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
269e6b3a
GP
3235 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3236 stats->rx_frame_errors;
3237 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3238
3239 /* vport multicast also counts packets that are dropped due to steering
3240 * or rx out of buffer
3241 */
9218b44d
GP
3242 stats->multicast =
3243 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
f62b8bb8
AV
3244}
3245
3246static void mlx5e_set_rx_mode(struct net_device *dev)
3247{
3248 struct mlx5e_priv *priv = netdev_priv(dev);
3249
7bb29755 3250 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3251}
3252
3253static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3254{
3255 struct mlx5e_priv *priv = netdev_priv(netdev);
3256 struct sockaddr *saddr = addr;
3257
3258 if (!is_valid_ether_addr(saddr->sa_data))
3259 return -EADDRNOTAVAIL;
3260
3261 netif_addr_lock_bh(netdev);
3262 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3263 netif_addr_unlock_bh(netdev);
3264
7bb29755 3265 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3266
3267 return 0;
3268}
3269
75b81ce7 3270#define MLX5E_SET_FEATURE(features, feature, enable) \
0e405443
GP
3271 do { \
3272 if (enable) \
75b81ce7 3273 *features |= feature; \
0e405443 3274 else \
75b81ce7 3275 *features &= ~feature; \
0e405443
GP
3276 } while (0)
3277
3278typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3279
3280static int set_feature_lro(struct net_device *netdev, bool enable)
f62b8bb8
AV
3281{
3282 struct mlx5e_priv *priv = netdev_priv(netdev);
619a8f2a 3283 struct mlx5_core_dev *mdev = priv->mdev;
2e20a151 3284 struct mlx5e_channels new_channels = {};
619a8f2a 3285 struct mlx5e_params *old_params;
2e20a151
SM
3286 int err = 0;
3287 bool reset;
f62b8bb8
AV
3288
3289 mutex_lock(&priv->state_lock);
f62b8bb8 3290
619a8f2a
TT
3291 old_params = &priv->channels.params;
3292 reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3293
619a8f2a 3294 new_channels.params = *old_params;
2e20a151
SM
3295 new_channels.params.lro_en = enable;
3296
619a8f2a
TT
3297 if (old_params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) {
3298 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) ==
3299 mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params))
3300 reset = false;
3301 }
3302
2e20a151 3303 if (!reset) {
619a8f2a 3304 *old_params = new_channels.params;
2e20a151
SM
3305 err = mlx5e_modify_tirs_lro(priv);
3306 goto out;
98e81b0a 3307 }
f62b8bb8 3308
2e20a151
SM
3309 err = mlx5e_open_channels(priv, &new_channels);
3310 if (err)
3311 goto out;
0e405443 3312
2e20a151
SM
3313 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
3314out:
9b37b07f 3315 mutex_unlock(&priv->state_lock);
0e405443
GP
3316 return err;
3317}
3318
2b52a283 3319static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
0e405443
GP
3320{
3321 struct mlx5e_priv *priv = netdev_priv(netdev);
3322
3323 if (enable)
2b52a283 3324 mlx5e_enable_cvlan_filter(priv);
0e405443 3325 else
2b52a283 3326 mlx5e_disable_cvlan_filter(priv);
0e405443
GP
3327
3328 return 0;
3329}
3330
3331static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3332{
3333 struct mlx5e_priv *priv = netdev_priv(netdev);
f62b8bb8 3334
0e405443 3335 if (!enable && mlx5e_tc_num_filters(priv)) {
e8f887ac
AV
3336 netdev_err(netdev,
3337 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3338 return -EINVAL;
3339 }
3340
0e405443
GP
3341 return 0;
3342}
3343
94cb1ebb
EBE
3344static int set_feature_rx_all(struct net_device *netdev, bool enable)
3345{
3346 struct mlx5e_priv *priv = netdev_priv(netdev);
3347 struct mlx5_core_dev *mdev = priv->mdev;
3348
3349 return mlx5_set_port_fcs(mdev, !enable);
3350}
3351
102722fc
GE
3352static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3353{
3354 struct mlx5e_priv *priv = netdev_priv(netdev);
3355 int err;
3356
3357 mutex_lock(&priv->state_lock);
3358
3359 priv->channels.params.scatter_fcs_en = enable;
3360 err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3361 if (err)
3362 priv->channels.params.scatter_fcs_en = !enable;
3363
3364 mutex_unlock(&priv->state_lock);
3365
3366 return err;
3367}
3368
36350114
GP
3369static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3370{
3371 struct mlx5e_priv *priv = netdev_priv(netdev);
ff9c852f 3372 int err = 0;
36350114
GP
3373
3374 mutex_lock(&priv->state_lock);
3375
6a9764ef 3376 priv->channels.params.vlan_strip_disable = !enable;
ff9c852f
SM
3377 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3378 goto unlock;
3379
3380 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
36350114 3381 if (err)
6a9764ef 3382 priv->channels.params.vlan_strip_disable = enable;
36350114 3383
ff9c852f 3384unlock:
36350114
GP
3385 mutex_unlock(&priv->state_lock);
3386
3387 return err;
3388}
3389
45bf454a
MG
3390#ifdef CONFIG_RFS_ACCEL
3391static int set_feature_arfs(struct net_device *netdev, bool enable)
3392{
3393 struct mlx5e_priv *priv = netdev_priv(netdev);
3394 int err;
3395
3396 if (enable)
3397 err = mlx5e_arfs_enable(priv);
3398 else
3399 err = mlx5e_arfs_disable(priv);
3400
3401 return err;
3402}
3403#endif
3404
0e405443 3405static int mlx5e_handle_feature(struct net_device *netdev,
75b81ce7 3406 netdev_features_t *features,
0e405443
GP
3407 netdev_features_t wanted_features,
3408 netdev_features_t feature,
3409 mlx5e_feature_handler feature_handler)
3410{
3411 netdev_features_t changes = wanted_features ^ netdev->features;
3412 bool enable = !!(wanted_features & feature);
3413 int err;
3414
3415 if (!(changes & feature))
3416 return 0;
3417
3418 err = feature_handler(netdev, enable);
3419 if (err) {
b20eab15
GP
3420 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3421 enable ? "Enable" : "Disable", &feature, err);
0e405443
GP
3422 return err;
3423 }
3424
75b81ce7 3425 MLX5E_SET_FEATURE(features, feature, enable);
0e405443
GP
3426 return 0;
3427}
3428
3429static int mlx5e_set_features(struct net_device *netdev,
3430 netdev_features_t features)
3431{
75b81ce7 3432 netdev_features_t oper_features = netdev->features;
be0f780b
GP
3433 int err = 0;
3434
3435#define MLX5E_HANDLE_FEATURE(feature, handler) \
3436 mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
0e405443 3437
be0f780b
GP
3438 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
3439 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
2b52a283 3440 set_feature_cvlan_filter);
be0f780b
GP
3441 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
3442 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
3443 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
3444 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
45bf454a 3445#ifdef CONFIG_RFS_ACCEL
be0f780b 3446 err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
45bf454a 3447#endif
0e405443 3448
75b81ce7
GP
3449 if (err) {
3450 netdev->features = oper_features;
3451 return -EINVAL;
3452 }
3453
3454 return 0;
f62b8bb8
AV
3455}
3456
7d92d580
GP
3457static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
3458 netdev_features_t features)
3459{
3460 struct mlx5e_priv *priv = netdev_priv(netdev);
3461
3462 mutex_lock(&priv->state_lock);
3463 if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
3464 /* HW strips the outer C-tag header, this is a problem
3465 * for S-tag traffic.
3466 */
3467 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3468 if (!priv->channels.params.vlan_strip_disable)
3469 netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3470 }
3471 mutex_unlock(&priv->state_lock);
3472
3473 return features;
3474}
3475
f62b8bb8
AV
3476static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
3477{
3478 struct mlx5e_priv *priv = netdev_priv(netdev);
2e20a151 3479 struct mlx5e_channels new_channels = {};
472a1e44 3480 struct mlx5e_params *params;
98e81b0a 3481 int err = 0;
506753b0 3482 bool reset;
f62b8bb8 3483
f62b8bb8 3484 mutex_lock(&priv->state_lock);
98e81b0a 3485
472a1e44 3486 params = &priv->channels.params;
506753b0 3487
73281b78 3488 reset = !params->lro_en;
2e20a151 3489 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3490
73281b78
TT
3491 new_channels.params = *params;
3492 new_channels.params.sw_mtu = new_mtu;
3493
3494 if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) {
3495 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
3496 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
3497
3498 reset = reset && (ppw_old != ppw_new);
3499 }
3500
2e20a151 3501 if (!reset) {
472a1e44 3502 params->sw_mtu = new_mtu;
2e20a151 3503 mlx5e_set_dev_port_mtu(priv);
472a1e44 3504 netdev->mtu = params->sw_mtu;
2e20a151
SM
3505 goto out;
3506 }
98e81b0a 3507
2e20a151 3508 err = mlx5e_open_channels(priv, &new_channels);
472a1e44 3509 if (err)
2e20a151 3510 goto out;
2e20a151
SM
3511
3512 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
472a1e44 3513 netdev->mtu = new_channels.params.sw_mtu;
f62b8bb8 3514
2e20a151
SM
3515out:
3516 mutex_unlock(&priv->state_lock);
f62b8bb8
AV
3517 return err;
3518}
3519
7c39afb3
FD
3520int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
3521{
3522 struct hwtstamp_config config;
3523 int err;
3524
3525 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3526 return -EOPNOTSUPP;
3527
3528 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
3529 return -EFAULT;
3530
3531 /* TX HW timestamp */
3532 switch (config.tx_type) {
3533 case HWTSTAMP_TX_OFF:
3534 case HWTSTAMP_TX_ON:
3535 break;
3536 default:
3537 return -ERANGE;
3538 }
3539
3540 mutex_lock(&priv->state_lock);
3541 /* RX HW timestamp */
3542 switch (config.rx_filter) {
3543 case HWTSTAMP_FILTER_NONE:
3544 /* Reset CQE compression to Admin default */
3545 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
3546 break;
3547 case HWTSTAMP_FILTER_ALL:
3548 case HWTSTAMP_FILTER_SOME:
3549 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3550 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3551 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3552 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3553 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3554 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3555 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3556 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3557 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3558 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3559 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3560 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3561 case HWTSTAMP_FILTER_NTP_ALL:
3562 /* Disable CQE compression */
3563 netdev_warn(priv->netdev, "Disabling cqe compression");
3564 err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
3565 if (err) {
3566 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
3567 mutex_unlock(&priv->state_lock);
3568 return err;
3569 }
3570 config.rx_filter = HWTSTAMP_FILTER_ALL;
3571 break;
3572 default:
3573 mutex_unlock(&priv->state_lock);
3574 return -ERANGE;
3575 }
3576
3577 memcpy(&priv->tstamp, &config, sizeof(config));
3578 mutex_unlock(&priv->state_lock);
3579
3580 return copy_to_user(ifr->ifr_data, &config,
3581 sizeof(config)) ? -EFAULT : 0;
3582}
3583
3584int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
3585{
3586 struct hwtstamp_config *cfg = &priv->tstamp;
3587
3588 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3589 return -EOPNOTSUPP;
3590
3591 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
3592}
3593
ef9814de
EBE
3594static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3595{
1170fbd8
FD
3596 struct mlx5e_priv *priv = netdev_priv(dev);
3597
ef9814de
EBE
3598 switch (cmd) {
3599 case SIOCSHWTSTAMP:
1170fbd8 3600 return mlx5e_hwstamp_set(priv, ifr);
ef9814de 3601 case SIOCGHWTSTAMP:
1170fbd8 3602 return mlx5e_hwstamp_get(priv, ifr);
ef9814de
EBE
3603 default:
3604 return -EOPNOTSUPP;
3605 }
3606}
3607
e80541ec 3608#ifdef CONFIG_MLX5_ESWITCH
66e49ded
SM
3609static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3610{
3611 struct mlx5e_priv *priv = netdev_priv(dev);
3612 struct mlx5_core_dev *mdev = priv->mdev;
3613
3614 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
3615}
3616
79aab093
MS
3617static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
3618 __be16 vlan_proto)
66e49ded
SM
3619{
3620 struct mlx5e_priv *priv = netdev_priv(dev);
3621 struct mlx5_core_dev *mdev = priv->mdev;
3622
79aab093
MS
3623 if (vlan_proto != htons(ETH_P_8021Q))
3624 return -EPROTONOSUPPORT;
3625
66e49ded
SM
3626 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
3627 vlan, qos);
3628}
3629
f942380c
MHY
3630static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
3631{
3632 struct mlx5e_priv *priv = netdev_priv(dev);
3633 struct mlx5_core_dev *mdev = priv->mdev;
3634
3635 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
3636}
3637
1edc57e2
MHY
3638static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
3639{
3640 struct mlx5e_priv *priv = netdev_priv(dev);
3641 struct mlx5_core_dev *mdev = priv->mdev;
3642
3643 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
3644}
bd77bf1c
MHY
3645
3646static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
3647 int max_tx_rate)
3648{
3649 struct mlx5e_priv *priv = netdev_priv(dev);
3650 struct mlx5_core_dev *mdev = priv->mdev;
3651
bd77bf1c 3652 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
c9497c98 3653 max_tx_rate, min_tx_rate);
bd77bf1c
MHY
3654}
3655
66e49ded
SM
3656static int mlx5_vport_link2ifla(u8 esw_link)
3657{
3658 switch (esw_link) {
3659 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
3660 return IFLA_VF_LINK_STATE_DISABLE;
3661 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
3662 return IFLA_VF_LINK_STATE_ENABLE;
3663 }
3664 return IFLA_VF_LINK_STATE_AUTO;
3665}
3666
3667static int mlx5_ifla_link2vport(u8 ifla_link)
3668{
3669 switch (ifla_link) {
3670 case IFLA_VF_LINK_STATE_DISABLE:
3671 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
3672 case IFLA_VF_LINK_STATE_ENABLE:
3673 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
3674 }
3675 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
3676}
3677
3678static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
3679 int link_state)
3680{
3681 struct mlx5e_priv *priv = netdev_priv(dev);
3682 struct mlx5_core_dev *mdev = priv->mdev;
3683
3684 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
3685 mlx5_ifla_link2vport(link_state));
3686}
3687
3688static int mlx5e_get_vf_config(struct net_device *dev,
3689 int vf, struct ifla_vf_info *ivi)
3690{
3691 struct mlx5e_priv *priv = netdev_priv(dev);
3692 struct mlx5_core_dev *mdev = priv->mdev;
3693 int err;
3694
3695 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
3696 if (err)
3697 return err;
3698 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
3699 return 0;
3700}
3701
3702static int mlx5e_get_vf_stats(struct net_device *dev,
3703 int vf, struct ifla_vf_stats *vf_stats)
3704{
3705 struct mlx5e_priv *priv = netdev_priv(dev);
3706 struct mlx5_core_dev *mdev = priv->mdev;
3707
3708 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
3709 vf_stats);
3710}
e80541ec 3711#endif
66e49ded 3712
1ad9a00a
PB
3713static void mlx5e_add_vxlan_port(struct net_device *netdev,
3714 struct udp_tunnel_info *ti)
b3f63c3d
MF
3715{
3716 struct mlx5e_priv *priv = netdev_priv(netdev);
3717
974c3f30
AD
3718 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3719 return;
3720
b3f63c3d
MF
3721 if (!mlx5e_vxlan_allowed(priv->mdev))
3722 return;
3723
974c3f30 3724 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
b3f63c3d
MF
3725}
3726
1ad9a00a
PB
3727static void mlx5e_del_vxlan_port(struct net_device *netdev,
3728 struct udp_tunnel_info *ti)
b3f63c3d
MF
3729{
3730 struct mlx5e_priv *priv = netdev_priv(netdev);
3731
974c3f30
AD
3732 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3733 return;
3734
b3f63c3d
MF
3735 if (!mlx5e_vxlan_allowed(priv->mdev))
3736 return;
3737
974c3f30 3738 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
b3f63c3d
MF
3739}
3740
27299841
GP
3741static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
3742 struct sk_buff *skb,
3743 netdev_features_t features)
b3f63c3d 3744{
2989ad1e 3745 unsigned int offset = 0;
b3f63c3d 3746 struct udphdr *udph;
27299841
GP
3747 u8 proto;
3748 u16 port;
b3f63c3d
MF
3749
3750 switch (vlan_get_protocol(skb)) {
3751 case htons(ETH_P_IP):
3752 proto = ip_hdr(skb)->protocol;
3753 break;
3754 case htons(ETH_P_IPV6):
2989ad1e 3755 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
b3f63c3d
MF
3756 break;
3757 default:
3758 goto out;
3759 }
3760
27299841
GP
3761 switch (proto) {
3762 case IPPROTO_GRE:
3763 return features;
3764 case IPPROTO_UDP:
b3f63c3d
MF
3765 udph = udp_hdr(skb);
3766 port = be16_to_cpu(udph->dest);
b3f63c3d 3767
27299841
GP
3768 /* Verify if UDP port is being offloaded by HW */
3769 if (mlx5e_vxlan_lookup_port(priv, port))
3770 return features;
3771 }
b3f63c3d
MF
3772
3773out:
3774 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3775 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3776}
3777
3778static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3779 struct net_device *netdev,
3780 netdev_features_t features)
3781{
3782 struct mlx5e_priv *priv = netdev_priv(netdev);
3783
3784 features = vlan_features_check(skb, features);
3785 features = vxlan_features_check(skb, features);
3786
2ac9cfe7
IT
3787#ifdef CONFIG_MLX5_EN_IPSEC
3788 if (mlx5e_ipsec_feature_check(skb, netdev, features))
3789 return features;
3790#endif
3791
b3f63c3d
MF
3792 /* Validate if the tunneled packet is being offloaded by HW */
3793 if (skb->encapsulation &&
3794 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
27299841 3795 return mlx5e_tunnel_features_check(priv, skb, features);
b3f63c3d
MF
3796
3797 return features;
3798}
3799
7ca560b5
EBE
3800static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
3801 struct mlx5e_txqsq *sq)
3802{
7b2117bb 3803 struct mlx5_eq *eq = sq->cq.mcq.eq;
7ca560b5
EBE
3804 u32 eqe_count;
3805
7ca560b5 3806 netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
7b2117bb 3807 eq->eqn, eq->cons_index, eq->irqn);
7ca560b5
EBE
3808
3809 eqe_count = mlx5_eq_poll_irq_disabled(eq);
3810 if (!eqe_count)
3811 return false;
3812
3813 netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
57d689a8 3814 sq->channel->stats.eq_rearm++;
7ca560b5
EBE
3815 return true;
3816}
3817
bfc647d5 3818static void mlx5e_tx_timeout_work(struct work_struct *work)
3947ca18 3819{
bfc647d5
EBE
3820 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
3821 tx_timeout_work);
3822 struct net_device *dev = priv->netdev;
7ca560b5 3823 bool reopen_channels = false;
bfc647d5 3824 int i, err;
3947ca18 3825
bfc647d5
EBE
3826 rtnl_lock();
3827 mutex_lock(&priv->state_lock);
3828
3829 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3830 goto unlock;
3947ca18 3831
6a9764ef 3832 for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
84990945 3833 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
acc6c595 3834 struct mlx5e_txqsq *sq = priv->txq2sq[i];
3947ca18 3835
84990945 3836 if (!netif_xmit_stopped(dev_queue))
3947ca18 3837 continue;
bfc647d5
EBE
3838
3839 netdev_err(dev,
3840 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
84990945
EBE
3841 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
3842 jiffies_to_usecs(jiffies - dev_queue->trans_start));
3a32b26a 3843
7ca560b5
EBE
3844 /* If we recover a lost interrupt, most likely TX timeout will
3845 * be resolved, skip reopening channels
3846 */
3847 if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
3848 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
3849 reopen_channels = true;
3850 }
3947ca18
DJ
3851 }
3852
bfc647d5
EBE
3853 if (!reopen_channels)
3854 goto unlock;
3855
3856 mlx5e_close_locked(dev);
3857 err = mlx5e_open_locked(dev);
3858 if (err)
3859 netdev_err(priv->netdev,
3860 "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
3861 err);
3862
3863unlock:
3864 mutex_unlock(&priv->state_lock);
3865 rtnl_unlock();
3866}
3867
3868static void mlx5e_tx_timeout(struct net_device *dev)
3869{
3870 struct mlx5e_priv *priv = netdev_priv(dev);
3871
3872 netdev_err(dev, "TX timeout detected\n");
3873 queue_work(priv->wq, &priv->tx_timeout_work);
3947ca18
DJ
3874}
3875
86994156
RS
3876static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3877{
3878 struct mlx5e_priv *priv = netdev_priv(netdev);
3879 struct bpf_prog *old_prog;
3880 int err = 0;
3881 bool reset, was_opened;
3882 int i;
3883
3884 mutex_lock(&priv->state_lock);
3885
3886 if ((netdev->features & NETIF_F_LRO) && prog) {
3887 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
3888 err = -EINVAL;
3889 goto unlock;
3890 }
3891
547eede0
IT
3892 if ((netdev->features & NETIF_F_HW_ESP) && prog) {
3893 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
3894 err = -EINVAL;
3895 goto unlock;
3896 }
3897
86994156
RS
3898 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3899 /* no need for full reset when exchanging programs */
6a9764ef 3900 reset = (!priv->channels.params.xdp_prog || !prog);
86994156
RS
3901
3902 if (was_opened && reset)
3903 mlx5e_close_locked(netdev);
c54c0629
DB
3904 if (was_opened && !reset) {
3905 /* num_channels is invariant here, so we can take the
3906 * batched reference right upfront.
3907 */
6a9764ef 3908 prog = bpf_prog_add(prog, priv->channels.num);
c54c0629
DB
3909 if (IS_ERR(prog)) {
3910 err = PTR_ERR(prog);
3911 goto unlock;
3912 }
3913 }
86994156 3914
c54c0629
DB
3915 /* exchange programs, extra prog reference we got from caller
3916 * as long as we don't fail from this point onwards.
3917 */
6a9764ef 3918 old_prog = xchg(&priv->channels.params.xdp_prog, prog);
86994156
RS
3919 if (old_prog)
3920 bpf_prog_put(old_prog);
3921
3922 if (reset) /* change RQ type according to priv->xdp_prog */
2a0f561b 3923 mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
86994156
RS
3924
3925 if (was_opened && reset)
3926 mlx5e_open_locked(netdev);
3927
3928 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
3929 goto unlock;
3930
3931 /* exchanging programs w/o reset, we update ref counts on behalf
3932 * of the channels RQs here.
3933 */
ff9c852f
SM
3934 for (i = 0; i < priv->channels.num; i++) {
3935 struct mlx5e_channel *c = priv->channels.c[i];
86994156 3936
c0f1147d 3937 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156
RS
3938 napi_synchronize(&c->napi);
3939 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3940
3941 old_prog = xchg(&c->rq.xdp_prog, prog);
3942
c0f1147d 3943 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156 3944 /* napi_schedule in case we have missed anything */
86994156
RS
3945 napi_schedule(&c->napi);
3946
3947 if (old_prog)
3948 bpf_prog_put(old_prog);
3949 }
3950
3951unlock:
3952 mutex_unlock(&priv->state_lock);
3953 return err;
3954}
3955
821b2e29 3956static u32 mlx5e_xdp_query(struct net_device *dev)
86994156
RS
3957{
3958 struct mlx5e_priv *priv = netdev_priv(dev);
821b2e29
MKL
3959 const struct bpf_prog *xdp_prog;
3960 u32 prog_id = 0;
86994156 3961
821b2e29
MKL
3962 mutex_lock(&priv->state_lock);
3963 xdp_prog = priv->channels.params.xdp_prog;
3964 if (xdp_prog)
3965 prog_id = xdp_prog->aux->id;
3966 mutex_unlock(&priv->state_lock);
3967
3968 return prog_id;
86994156
RS
3969}
3970
f4e63525 3971static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
86994156
RS
3972{
3973 switch (xdp->command) {
3974 case XDP_SETUP_PROG:
3975 return mlx5e_xdp_set(dev, xdp->prog);
3976 case XDP_QUERY_PROG:
821b2e29
MKL
3977 xdp->prog_id = mlx5e_xdp_query(dev);
3978 xdp->prog_attached = !!xdp->prog_id;
86994156
RS
3979 return 0;
3980 default:
3981 return -EINVAL;
3982 }
3983}
3984
80378384
CO
3985#ifdef CONFIG_NET_POLL_CONTROLLER
3986/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3987 * reenabling interrupts.
3988 */
3989static void mlx5e_netpoll(struct net_device *dev)
3990{
3991 struct mlx5e_priv *priv = netdev_priv(dev);
ff9c852f
SM
3992 struct mlx5e_channels *chs = &priv->channels;
3993
80378384
CO
3994 int i;
3995
ff9c852f
SM
3996 for (i = 0; i < chs->num; i++)
3997 napi_schedule(&chs->c[i]->napi);
80378384
CO
3998}
3999#endif
4000
e80541ec 4001static const struct net_device_ops mlx5e_netdev_ops = {
f62b8bb8
AV
4002 .ndo_open = mlx5e_open,
4003 .ndo_stop = mlx5e_close,
4004 .ndo_start_xmit = mlx5e_xmit,
0cf0f6d3 4005 .ndo_setup_tc = mlx5e_setup_tc,
08fb1dac 4006 .ndo_select_queue = mlx5e_select_queue,
f62b8bb8
AV
4007 .ndo_get_stats64 = mlx5e_get_stats,
4008 .ndo_set_rx_mode = mlx5e_set_rx_mode,
4009 .ndo_set_mac_address = mlx5e_set_mac,
b0eed40e
SM
4010 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
4011 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
f62b8bb8 4012 .ndo_set_features = mlx5e_set_features,
7d92d580 4013 .ndo_fix_features = mlx5e_fix_features,
b0eed40e
SM
4014 .ndo_change_mtu = mlx5e_change_mtu,
4015 .ndo_do_ioctl = mlx5e_ioctl,
507f0c81 4016 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
706b3583
SM
4017 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
4018 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
4019 .ndo_features_check = mlx5e_features_check,
45bf454a
MG
4020#ifdef CONFIG_RFS_ACCEL
4021 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
4022#endif
3947ca18 4023 .ndo_tx_timeout = mlx5e_tx_timeout,
f4e63525 4024 .ndo_bpf = mlx5e_xdp,
80378384
CO
4025#ifdef CONFIG_NET_POLL_CONTROLLER
4026 .ndo_poll_controller = mlx5e_netpoll,
4027#endif
e80541ec 4028#ifdef CONFIG_MLX5_ESWITCH
706b3583 4029 /* SRIOV E-Switch NDOs */
b0eed40e
SM
4030 .ndo_set_vf_mac = mlx5e_set_vf_mac,
4031 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
f942380c 4032 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
1edc57e2 4033 .ndo_set_vf_trust = mlx5e_set_vf_trust,
bd77bf1c 4034 .ndo_set_vf_rate = mlx5e_set_vf_rate,
b0eed40e
SM
4035 .ndo_get_vf_config = mlx5e_get_vf_config,
4036 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
4037 .ndo_get_vf_stats = mlx5e_get_vf_stats,
370bad0f
OG
4038 .ndo_has_offload_stats = mlx5e_has_offload_stats,
4039 .ndo_get_offload_stats = mlx5e_get_offload_stats,
e80541ec 4040#endif
f62b8bb8
AV
4041};
4042
4043static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
4044{
4045 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
9eb78923 4046 return -EOPNOTSUPP;
f62b8bb8
AV
4047 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
4048 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
4049 !MLX5_CAP_ETH(mdev, csum_cap) ||
4050 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
4051 !MLX5_CAP_ETH(mdev, vlan_cap) ||
796a27ec
GP
4052 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
4053 MLX5_CAP_FLOWTABLE(mdev,
4054 flow_table_properties_nic_receive.max_ft_level)
4055 < 3) {
f62b8bb8
AV
4056 mlx5_core_warn(mdev,
4057 "Not creating net device, some required device capabilities are missing\n");
9eb78923 4058 return -EOPNOTSUPP;
f62b8bb8 4059 }
66189961
TT
4060 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
4061 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
7524a5d8 4062 if (!MLX5_CAP_GEN(mdev, cq_moderation))
3e432ab6 4063 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
66189961 4064
f62b8bb8
AV
4065 return 0;
4066}
4067
d4b6c488 4068void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
85082dba
TT
4069 int num_channels)
4070{
4071 int i;
4072
4073 for (i = 0; i < len; i++)
4074 indirection_rqt[i] = i % num_channels;
4075}
4076
0608d4db 4077static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
b797a684 4078{
0608d4db
TT
4079 u32 link_speed = 0;
4080 u32 pci_bw = 0;
b797a684 4081
0608d4db 4082 mlx5e_get_max_linkspeed(mdev, &link_speed);
3c0d551e 4083 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
0608d4db
TT
4084 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
4085 link_speed, pci_bw);
4086
4087#define MLX5E_SLOW_PCI_RATIO (2)
4088
4089 return link_speed && pci_bw &&
4090 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
0f6e4cf6
EBE
4091}
4092
cbce4f44 4093static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
0088cbbc 4094{
cbce4f44
TG
4095 struct net_dim_cq_moder moder;
4096
4097 moder.cq_period_mode = cq_period_mode;
4098 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
4099 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
4100 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4101 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
4102
4103 return moder;
4104}
0088cbbc 4105
cbce4f44
TG
4106static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
4107{
4108 struct net_dim_cq_moder moder;
0088cbbc 4109
cbce4f44
TG
4110 moder.cq_period_mode = cq_period_mode;
4111 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
4112 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
0088cbbc 4113 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
cbce4f44
TG
4114 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
4115
4116 return moder;
4117}
4118
4119static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
4120{
4121 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
4122 NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE :
4123 NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4124}
4125
4126void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4127{
4128 if (params->tx_dim_enabled) {
4129 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4130
4131 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
4132 } else {
4133 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
4134 }
0088cbbc
TG
4135
4136 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
4137 params->tx_cq_moderation.cq_period_mode ==
4138 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4139}
4140
9908aa29
TT
4141void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4142{
9a317425 4143 if (params->rx_dim_enabled) {
cbce4f44
TG
4144 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4145
4146 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
4147 } else {
4148 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
9a317425 4149 }
457fcd8a 4150
6a9764ef 4151 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
0088cbbc
TG
4152 params->rx_cq_moderation.cq_period_mode ==
4153 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
9908aa29
TT
4154}
4155
707129dc 4156static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
2b029556
SM
4157{
4158 int i;
4159
4160 /* The supported periods are organized in ascending order */
4161 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4162 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4163 break;
4164
4165 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4166}
4167
8f493ffd
SM
4168void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
4169 struct mlx5e_params *params,
472a1e44 4170 u16 max_channels, u16 mtu)
f62b8bb8 4171{
48bfc397 4172 u8 rx_cq_period_mode;
2fc4bfb7 4173
472a1e44
TT
4174 params->sw_mtu = mtu;
4175 params->hard_mtu = MLX5E_ETH_HARD_MTU;
6a9764ef
SM
4176 params->num_channels = max_channels;
4177 params->num_tc = 1;
2b029556 4178
6a9764ef
SM
4179 /* SQ */
4180 params->log_sq_size = is_kdump_kernel() ?
b4e029da
KH
4181 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4182 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
461017cb 4183
b797a684 4184 /* set CQE compression */
6a9764ef 4185 params->rx_cqe_compress_def = false;
b797a684 4186 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
e53eef63 4187 MLX5_CAP_GEN(mdev, vport_group_manager))
0608d4db 4188 params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
0f6e4cf6 4189
6a9764ef
SM
4190 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
4191
4192 /* RQ */
2ccb0a79
TT
4193 if (mlx5e_striding_rq_possible(mdev, params))
4194 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ,
4195 !slow_pci_heuristic(mdev));
2a0f561b
TT
4196 mlx5e_set_rq_type(mdev, params);
4197 mlx5e_init_rq_type_params(mdev, params);
b797a684 4198
6a9764ef 4199 /* HW LRO */
c139dbfd 4200
5426a0b2 4201 /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
6a9764ef 4202 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
619a8f2a
TT
4203 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
4204 params->lro_en = !slow_pci_heuristic(mdev);
6a9764ef 4205 params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
b0d4660b 4206
6a9764ef 4207 /* CQ moderation params */
48bfc397 4208 rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
6a9764ef
SM
4209 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4210 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
9a317425 4211 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
cbce4f44 4212 params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
48bfc397
TG
4213 mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
4214 mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
9908aa29 4215
6a9764ef 4216 /* TX inline */
fbcb127e 4217 params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
a6f402e4 4218
6a9764ef
SM
4219 /* RSS */
4220 params->rss_hfunc = ETH_RSS_HASH_XOR;
4221 netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
d4b6c488 4222 mlx5e_build_default_indir_rqt(params->indirection_rqt,
6a9764ef
SM
4223 MLX5E_INDIR_RQT_SIZE, max_channels);
4224}
f62b8bb8 4225
6a9764ef
SM
4226static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
4227 struct net_device *netdev,
4228 const struct mlx5e_profile *profile,
4229 void *ppriv)
4230{
4231 struct mlx5e_priv *priv = netdev_priv(netdev);
57afead5 4232
6a9764ef
SM
4233 priv->mdev = mdev;
4234 priv->netdev = netdev;
4235 priv->profile = profile;
4236 priv->ppriv = ppriv;
79c48764 4237 priv->msglevel = MLX5E_MSG_LEVEL;
2d75b2bc 4238
472a1e44
TT
4239 mlx5e_build_nic_params(mdev, &priv->channels.params,
4240 profile->max_nch(mdev), netdev->mtu);
9908aa29 4241
f62b8bb8
AV
4242 mutex_init(&priv->state_lock);
4243
4244 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
4245 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3947ca18 4246 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
f62b8bb8 4247 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
237f258c
FD
4248
4249 mlx5e_timestamp_init(priv);
f62b8bb8
AV
4250}
4251
4252static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4253{
4254 struct mlx5e_priv *priv = netdev_priv(netdev);
4255
e1d7d349 4256 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
108805fc
SM
4257 if (is_zero_ether_addr(netdev->dev_addr) &&
4258 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4259 eth_hw_addr_random(netdev);
4260 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4261 }
f62b8bb8
AV
4262}
4263
f125376b 4264#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
cb67b832
HHZ
4265static const struct switchdev_ops mlx5e_switchdev_ops = {
4266 .switchdev_port_attr_get = mlx5e_attr_get,
4267};
e80541ec 4268#endif
cb67b832 4269
6bfd390b 4270static void mlx5e_build_nic_netdev(struct net_device *netdev)
f62b8bb8
AV
4271{
4272 struct mlx5e_priv *priv = netdev_priv(netdev);
4273 struct mlx5_core_dev *mdev = priv->mdev;
94cb1ebb
EBE
4274 bool fcs_supported;
4275 bool fcs_enabled;
f62b8bb8
AV
4276
4277 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
4278
e80541ec
SM
4279 netdev->netdev_ops = &mlx5e_netdev_ops;
4280
08fb1dac 4281#ifdef CONFIG_MLX5_CORE_EN_DCB
e80541ec
SM
4282 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
4283 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
08fb1dac 4284#endif
66e49ded 4285
f62b8bb8
AV
4286 netdev->watchdog_timeo = 15 * HZ;
4287
4288 netdev->ethtool_ops = &mlx5e_ethtool_ops;
4289
12be4b21 4290 netdev->vlan_features |= NETIF_F_SG;
f62b8bb8
AV
4291 netdev->vlan_features |= NETIF_F_IP_CSUM;
4292 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
4293 netdev->vlan_features |= NETIF_F_GRO;
4294 netdev->vlan_features |= NETIF_F_TSO;
4295 netdev->vlan_features |= NETIF_F_TSO6;
4296 netdev->vlan_features |= NETIF_F_RXCSUM;
4297 netdev->vlan_features |= NETIF_F_RXHASH;
4298
71186172
AH
4299 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
4300 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
4301
f62b8bb8
AV
4302 if (!!MLX5_CAP_ETH(mdev, lro_cap))
4303 netdev->vlan_features |= NETIF_F_LRO;
4304
4305 netdev->hw_features = netdev->vlan_features;
e4cf27bd 4306 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
f62b8bb8
AV
4307 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4308 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4382c7b9 4309 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
f62b8bb8 4310
27299841
GP
4311 if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4312 netdev->hw_features |= NETIF_F_GSO_PARTIAL;
b3f63c3d 4313 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
f3ed653c 4314 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
b3f63c3d
MF
4315 netdev->hw_enc_features |= NETIF_F_TSO;
4316 netdev->hw_enc_features |= NETIF_F_TSO6;
27299841
GP
4317 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4318 }
4319
4320 if (mlx5e_vxlan_allowed(mdev)) {
4321 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
4322 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4323 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4324 NETIF_F_GSO_UDP_TUNNEL_CSUM;
b49663c8 4325 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
b3f63c3d
MF
4326 }
4327
27299841
GP
4328 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4329 netdev->hw_features |= NETIF_F_GSO_GRE |
4330 NETIF_F_GSO_GRE_CSUM;
4331 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4332 NETIF_F_GSO_GRE_CSUM;
4333 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4334 NETIF_F_GSO_GRE_CSUM;
4335 }
4336
94cb1ebb
EBE
4337 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4338
4339 if (fcs_supported)
4340 netdev->hw_features |= NETIF_F_RXALL;
4341
102722fc
GE
4342 if (MLX5_CAP_ETH(mdev, scatter_fcs))
4343 netdev->hw_features |= NETIF_F_RXFCS;
4344
f62b8bb8 4345 netdev->features = netdev->hw_features;
6a9764ef 4346 if (!priv->channels.params.lro_en)
f62b8bb8
AV
4347 netdev->features &= ~NETIF_F_LRO;
4348
94cb1ebb
EBE
4349 if (fcs_enabled)
4350 netdev->features &= ~NETIF_F_RXALL;
4351
102722fc
GE
4352 if (!priv->channels.params.scatter_fcs_en)
4353 netdev->features &= ~NETIF_F_RXFCS;
4354
e8f887ac
AV
4355#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4356 if (FT_CAP(flow_modify_en) &&
4357 FT_CAP(modify_root) &&
4358 FT_CAP(identified_miss_table_mode) &&
1cabe6b0
MG
4359 FT_CAP(flow_table_modify)) {
4360 netdev->hw_features |= NETIF_F_HW_TC;
4361#ifdef CONFIG_RFS_ACCEL
4362 netdev->hw_features |= NETIF_F_NTUPLE;
4363#endif
4364 }
e8f887ac 4365
f62b8bb8 4366 netdev->features |= NETIF_F_HIGHDMA;
7d92d580 4367 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
f62b8bb8
AV
4368
4369 netdev->priv_flags |= IFF_UNICAST_FLT;
4370
4371 mlx5e_set_netdev_dev_addr(netdev);
cb67b832 4372
f125376b 4373#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
a9f7705f 4374 if (MLX5_VPORT_MANAGER(mdev))
cb67b832
HHZ
4375 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4376#endif
547eede0
IT
4377
4378 mlx5e_ipsec_build_netdev(priv);
f62b8bb8
AV
4379}
4380
7cbaf9a3 4381static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
593cf338
RS
4382{
4383 struct mlx5_core_dev *mdev = priv->mdev;
4384 int err;
4385
4386 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4387 if (err) {
4388 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4389 priv->q_counter = 0;
4390 }
7cbaf9a3
MS
4391
4392 err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
4393 if (err) {
4394 mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
4395 priv->drop_rq_q_counter = 0;
4396 }
593cf338
RS
4397}
4398
7cbaf9a3 4399static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
593cf338 4400{
7cbaf9a3
MS
4401 if (priv->q_counter)
4402 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
593cf338 4403
7cbaf9a3
MS
4404 if (priv->drop_rq_q_counter)
4405 mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
593cf338
RS
4406}
4407
6bfd390b
HHZ
4408static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
4409 struct net_device *netdev,
127ea380
HHZ
4410 const struct mlx5e_profile *profile,
4411 void *ppriv)
6bfd390b
HHZ
4412{
4413 struct mlx5e_priv *priv = netdev_priv(netdev);
547eede0 4414 int err;
6bfd390b 4415
127ea380 4416 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
547eede0
IT
4417 err = mlx5e_ipsec_init(priv);
4418 if (err)
4419 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
6bfd390b
HHZ
4420 mlx5e_build_nic_netdev(netdev);
4421 mlx5e_vxlan_init(priv);
4422}
4423
4424static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4425{
547eede0 4426 mlx5e_ipsec_cleanup(priv);
6bfd390b
HHZ
4427 mlx5e_vxlan_cleanup(priv);
4428}
4429
4430static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
4431{
4432 struct mlx5_core_dev *mdev = priv->mdev;
4433 int err;
6bfd390b 4434
8f493ffd
SM
4435 err = mlx5e_create_indirect_rqt(priv);
4436 if (err)
6bfd390b 4437 return err;
6bfd390b
HHZ
4438
4439 err = mlx5e_create_direct_rqts(priv);
8f493ffd 4440 if (err)
6bfd390b 4441 goto err_destroy_indirect_rqts;
6bfd390b
HHZ
4442
4443 err = mlx5e_create_indirect_tirs(priv);
8f493ffd 4444 if (err)
6bfd390b 4445 goto err_destroy_direct_rqts;
6bfd390b
HHZ
4446
4447 err = mlx5e_create_direct_tirs(priv);
8f493ffd 4448 if (err)
6bfd390b 4449 goto err_destroy_indirect_tirs;
6bfd390b
HHZ
4450
4451 err = mlx5e_create_flow_steering(priv);
4452 if (err) {
4453 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
4454 goto err_destroy_direct_tirs;
4455 }
4456
4457 err = mlx5e_tc_init(priv);
4458 if (err)
4459 goto err_destroy_flow_steering;
4460
4461 return 0;
4462
4463err_destroy_flow_steering:
4464 mlx5e_destroy_flow_steering(priv);
4465err_destroy_direct_tirs:
4466 mlx5e_destroy_direct_tirs(priv);
4467err_destroy_indirect_tirs:
4468 mlx5e_destroy_indirect_tirs(priv);
4469err_destroy_direct_rqts:
8f493ffd 4470 mlx5e_destroy_direct_rqts(priv);
6bfd390b
HHZ
4471err_destroy_indirect_rqts:
4472 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4473 return err;
4474}
4475
4476static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
4477{
6bfd390b
HHZ
4478 mlx5e_tc_cleanup(priv);
4479 mlx5e_destroy_flow_steering(priv);
4480 mlx5e_destroy_direct_tirs(priv);
4481 mlx5e_destroy_indirect_tirs(priv);
8f493ffd 4482 mlx5e_destroy_direct_rqts(priv);
6bfd390b
HHZ
4483 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4484}
4485
4486static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
4487{
4488 int err;
4489
4490 err = mlx5e_create_tises(priv);
4491 if (err) {
4492 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
4493 return err;
4494 }
4495
4496#ifdef CONFIG_MLX5_CORE_EN_DCB
e207b7e9 4497 mlx5e_dcbnl_initialize(priv);
6bfd390b
HHZ
4498#endif
4499 return 0;
4500}
4501
4502static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4503{
4504 struct net_device *netdev = priv->netdev;
4505 struct mlx5_core_dev *mdev = priv->mdev;
2c3b5bee
SM
4506 u16 max_mtu;
4507
4508 mlx5e_init_l2_addr(priv);
4509
63bfd399
EBE
4510 /* Marking the link as currently not needed by the Driver */
4511 if (!netif_running(netdev))
4512 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
4513
2c3b5bee
SM
4514 /* MTU range: 68 - hw-specific max */
4515 netdev->min_mtu = ETH_MIN_MTU;
4516 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
472a1e44 4517 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
2c3b5bee 4518 mlx5e_set_dev_port_mtu(priv);
6bfd390b 4519
7907f23a
AH
4520 mlx5_lag_add(mdev, netdev);
4521
6bfd390b 4522 mlx5e_enable_async_events(priv);
127ea380 4523
a9f7705f 4524 if (MLX5_VPORT_MANAGER(priv->mdev))
1d447a39 4525 mlx5e_register_vport_reps(priv);
2c3b5bee 4526
610e89e0
SM
4527 if (netdev->reg_state != NETREG_REGISTERED)
4528 return;
2a5e7a13
HN
4529#ifdef CONFIG_MLX5_CORE_EN_DCB
4530 mlx5e_dcbnl_init_app(priv);
4531#endif
610e89e0
SM
4532
4533 queue_work(priv->wq, &priv->set_rx_mode_work);
2c3b5bee
SM
4534
4535 rtnl_lock();
4536 if (netif_running(netdev))
4537 mlx5e_open(netdev);
4538 netif_device_attach(netdev);
4539 rtnl_unlock();
6bfd390b
HHZ
4540}
4541
4542static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4543{
3deef8ce 4544 struct mlx5_core_dev *mdev = priv->mdev;
3deef8ce 4545
2a5e7a13
HN
4546#ifdef CONFIG_MLX5_CORE_EN_DCB
4547 if (priv->netdev->reg_state == NETREG_REGISTERED)
4548 mlx5e_dcbnl_delete_app(priv);
4549#endif
4550
2c3b5bee
SM
4551 rtnl_lock();
4552 if (netif_running(priv->netdev))
4553 mlx5e_close(priv->netdev);
4554 netif_device_detach(priv->netdev);
4555 rtnl_unlock();
4556
6bfd390b 4557 queue_work(priv->wq, &priv->set_rx_mode_work);
1d447a39 4558
a9f7705f 4559 if (MLX5_VPORT_MANAGER(priv->mdev))
1d447a39
SM
4560 mlx5e_unregister_vport_reps(priv);
4561
6bfd390b 4562 mlx5e_disable_async_events(priv);
3deef8ce 4563 mlx5_lag_remove(mdev);
6bfd390b
HHZ
4564}
4565
4566static const struct mlx5e_profile mlx5e_nic_profile = {
4567 .init = mlx5e_nic_init,
4568 .cleanup = mlx5e_nic_cleanup,
4569 .init_rx = mlx5e_init_nic_rx,
4570 .cleanup_rx = mlx5e_cleanup_nic_rx,
4571 .init_tx = mlx5e_init_nic_tx,
4572 .cleanup_tx = mlx5e_cleanup_nic_tx,
4573 .enable = mlx5e_nic_enable,
4574 .disable = mlx5e_nic_disable,
3834a5e6 4575 .update_stats = mlx5e_update_ndo_stats,
6bfd390b 4576 .max_nch = mlx5e_get_max_num_channels,
7ca42c80 4577 .update_carrier = mlx5e_update_carrier,
20fd0c19
SM
4578 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
4579 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
6bfd390b
HHZ
4580 .max_tc = MLX5E_MAX_NUM_TC,
4581};
4582
2c3b5bee
SM
4583/* mlx5e generic netdev management API (move to en_common.c) */
4584
26e59d80
MHY
4585struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4586 const struct mlx5e_profile *profile,
4587 void *ppriv)
f62b8bb8 4588{
26e59d80 4589 int nch = profile->max_nch(mdev);
f62b8bb8
AV
4590 struct net_device *netdev;
4591 struct mlx5e_priv *priv;
f62b8bb8 4592
08fb1dac 4593 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
6bfd390b 4594 nch * profile->max_tc,
08fb1dac 4595 nch);
f62b8bb8
AV
4596 if (!netdev) {
4597 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
4598 return NULL;
4599 }
4600
be4891af
SM
4601#ifdef CONFIG_RFS_ACCEL
4602 netdev->rx_cpu_rmap = mdev->rmap;
4603#endif
4604
127ea380 4605 profile->init(mdev, netdev, profile, ppriv);
f62b8bb8
AV
4606
4607 netif_carrier_off(netdev);
4608
4609 priv = netdev_priv(netdev);
4610
7bb29755
MF
4611 priv->wq = create_singlethread_workqueue("mlx5e");
4612 if (!priv->wq)
26e59d80
MHY
4613 goto err_cleanup_nic;
4614
4615 return netdev;
4616
4617err_cleanup_nic:
31ac9338
OG
4618 if (profile->cleanup)
4619 profile->cleanup(priv);
26e59d80
MHY
4620 free_netdev(netdev);
4621
4622 return NULL;
4623}
4624
2c3b5bee 4625int mlx5e_attach_netdev(struct mlx5e_priv *priv)
26e59d80 4626{
2c3b5bee 4627 struct mlx5_core_dev *mdev = priv->mdev;
26e59d80 4628 const struct mlx5e_profile *profile;
26e59d80
MHY
4629 int err;
4630
26e59d80
MHY
4631 profile = priv->profile;
4632 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
7bb29755 4633
6bfd390b
HHZ
4634 err = profile->init_tx(priv);
4635 if (err)
ec8b9981 4636 goto out;
5c50368f 4637
7cbaf9a3
MS
4638 mlx5e_create_q_counters(priv);
4639
4640 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
5c50368f
AS
4641 if (err) {
4642 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
7cbaf9a3 4643 goto err_destroy_q_counters;
5c50368f
AS
4644 }
4645
6bfd390b
HHZ
4646 err = profile->init_rx(priv);
4647 if (err)
5c50368f 4648 goto err_close_drop_rq;
5c50368f 4649
6bfd390b
HHZ
4650 if (profile->enable)
4651 profile->enable(priv);
f62b8bb8 4652
26e59d80 4653 return 0;
5c50368f
AS
4654
4655err_close_drop_rq:
a43b25da 4656 mlx5e_close_drop_rq(&priv->drop_rq);
5c50368f 4657
7cbaf9a3
MS
4658err_destroy_q_counters:
4659 mlx5e_destroy_q_counters(priv);
6bfd390b 4660 profile->cleanup_tx(priv);
5c50368f 4661
26e59d80
MHY
4662out:
4663 return err;
f62b8bb8
AV
4664}
4665
2c3b5bee 4666void mlx5e_detach_netdev(struct mlx5e_priv *priv)
26e59d80 4667{
26e59d80
MHY
4668 const struct mlx5e_profile *profile = priv->profile;
4669
4670 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
26e59d80 4671
37f304d1
SM
4672 if (profile->disable)
4673 profile->disable(priv);
4674 flush_workqueue(priv->wq);
4675
26e59d80 4676 profile->cleanup_rx(priv);
a43b25da 4677 mlx5e_close_drop_rq(&priv->drop_rq);
7cbaf9a3 4678 mlx5e_destroy_q_counters(priv);
26e59d80 4679 profile->cleanup_tx(priv);
26e59d80
MHY
4680 cancel_delayed_work_sync(&priv->update_stats_work);
4681}
4682
2c3b5bee
SM
4683void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
4684{
4685 const struct mlx5e_profile *profile = priv->profile;
4686 struct net_device *netdev = priv->netdev;
4687
4688 destroy_workqueue(priv->wq);
4689 if (profile->cleanup)
4690 profile->cleanup(priv);
4691 free_netdev(netdev);
4692}
4693
26e59d80
MHY
4694/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4695 * hardware contexts and to connect it to the current netdev.
4696 */
4697static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
4698{
4699 struct mlx5e_priv *priv = vpriv;
4700 struct net_device *netdev = priv->netdev;
4701 int err;
4702
4703 if (netif_device_present(netdev))
4704 return 0;
4705
4706 err = mlx5e_create_mdev_resources(mdev);
4707 if (err)
4708 return err;
4709
2c3b5bee 4710 err = mlx5e_attach_netdev(priv);
26e59d80
MHY
4711 if (err) {
4712 mlx5e_destroy_mdev_resources(mdev);
4713 return err;
4714 }
4715
4716 return 0;
4717}
4718
4719static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
4720{
4721 struct mlx5e_priv *priv = vpriv;
4722 struct net_device *netdev = priv->netdev;
4723
4724 if (!netif_device_present(netdev))
4725 return;
4726
2c3b5bee 4727 mlx5e_detach_netdev(priv);
26e59d80
MHY
4728 mlx5e_destroy_mdev_resources(mdev);
4729}
4730
b50d292b
HHZ
4731static void *mlx5e_add(struct mlx5_core_dev *mdev)
4732{
07c9f1e5
SM
4733 struct net_device *netdev;
4734 void *rpriv = NULL;
26e59d80 4735 void *priv;
26e59d80 4736 int err;
b50d292b 4737
26e59d80
MHY
4738 err = mlx5e_check_required_hca_cap(mdev);
4739 if (err)
b50d292b
HHZ
4740 return NULL;
4741
e80541ec 4742#ifdef CONFIG_MLX5_ESWITCH
a9f7705f 4743 if (MLX5_VPORT_MANAGER(mdev)) {
07c9f1e5 4744 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
1d447a39 4745 if (!rpriv) {
07c9f1e5 4746 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
1d447a39
SM
4747 return NULL;
4748 }
1d447a39 4749 }
e80541ec 4750#endif
127ea380 4751
1d447a39 4752 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
26e59d80
MHY
4753 if (!netdev) {
4754 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
07c9f1e5 4755 goto err_free_rpriv;
26e59d80
MHY
4756 }
4757
4758 priv = netdev_priv(netdev);
4759
4760 err = mlx5e_attach(mdev, priv);
4761 if (err) {
4762 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
4763 goto err_destroy_netdev;
4764 }
4765
4766 err = register_netdev(netdev);
4767 if (err) {
4768 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
4769 goto err_detach;
b50d292b 4770 }
26e59d80 4771
2a5e7a13
HN
4772#ifdef CONFIG_MLX5_CORE_EN_DCB
4773 mlx5e_dcbnl_init_app(priv);
4774#endif
26e59d80
MHY
4775 return priv;
4776
4777err_detach:
4778 mlx5e_detach(mdev, priv);
26e59d80 4779err_destroy_netdev:
2c3b5bee 4780 mlx5e_destroy_netdev(priv);
07c9f1e5 4781err_free_rpriv:
1d447a39 4782 kfree(rpriv);
26e59d80 4783 return NULL;
b50d292b
HHZ
4784}
4785
b50d292b
HHZ
4786static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
4787{
4788 struct mlx5e_priv *priv = vpriv;
1d447a39 4789 void *ppriv = priv->ppriv;
127ea380 4790
2a5e7a13
HN
4791#ifdef CONFIG_MLX5_CORE_EN_DCB
4792 mlx5e_dcbnl_delete_app(priv);
4793#endif
5e1e93c7 4794 unregister_netdev(priv->netdev);
26e59d80 4795 mlx5e_detach(mdev, vpriv);
2c3b5bee 4796 mlx5e_destroy_netdev(priv);
1d447a39 4797 kfree(ppriv);
b50d292b
HHZ
4798}
4799
f62b8bb8
AV
4800static void *mlx5e_get_netdev(void *vpriv)
4801{
4802 struct mlx5e_priv *priv = vpriv;
4803
4804 return priv->netdev;
4805}
4806
4807static struct mlx5_interface mlx5e_interface = {
b50d292b
HHZ
4808 .add = mlx5e_add,
4809 .remove = mlx5e_remove,
26e59d80
MHY
4810 .attach = mlx5e_attach,
4811 .detach = mlx5e_detach,
f62b8bb8
AV
4812 .event = mlx5e_async_event,
4813 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
4814 .get_dev = mlx5e_get_netdev,
4815};
4816
4817void mlx5e_init(void)
4818{
2ac9cfe7 4819 mlx5e_ipsec_build_inverse_table();
665bc539 4820 mlx5e_build_ptys2ethtool_map();
f62b8bb8
AV
4821 mlx5_register_interface(&mlx5e_interface);
4822}
4823
4824void mlx5e_cleanup(void)
4825{
4826 mlx5_unregister_interface(&mlx5e_interface);
4827}