]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_main.c
net/mlx5e: Vxlan, add sync lock for add/del vxlan port
[thirdparty/kernel/stable.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
CommitLineData
f62b8bb8 1/*
b3f63c3d 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e8f887ac
AV
33#include <net/tc_act/tc_gact.h>
34#include <net/pkt_cls.h>
86d722ad 35#include <linux/mlx5/fs.h>
b3f63c3d 36#include <net/vxlan.h>
86994156 37#include <linux/bpf.h>
60bbf7ee 38#include <net/page_pool.h>
1d447a39 39#include "eswitch.h"
f62b8bb8 40#include "en.h"
e8f887ac 41#include "en_tc.h"
1d447a39 42#include "en_rep.h"
547eede0 43#include "en_accel/ipsec.h"
899a59d3 44#include "en_accel/ipsec_rxtx.h"
c83294b9 45#include "en_accel/tls.h"
899a59d3 46#include "accel/ipsec.h"
c83294b9 47#include "accel/tls.h"
b3f63c3d 48#include "vxlan.h"
2c81bfd5 49#include "en/port.h"
159d2131 50#include "en/xdp.h"
f62b8bb8
AV
51
52struct mlx5e_rq_param {
cb3c7fd4
GR
53 u32 rqc[MLX5_ST_SZ_DW(rqc)];
54 struct mlx5_wq_param wq;
069d1146 55 struct mlx5e_rq_frags_info frags_info;
f62b8bb8
AV
56};
57
58struct mlx5e_sq_param {
59 u32 sqc[MLX5_ST_SZ_DW(sqc)];
60 struct mlx5_wq_param wq;
61};
62
63struct mlx5e_cq_param {
64 u32 cqc[MLX5_ST_SZ_DW(cqc)];
65 struct mlx5_wq_param wq;
66 u16 eq_ix;
9908aa29 67 u8 cq_period_mode;
f62b8bb8
AV
68};
69
70struct mlx5e_channel_param {
71 struct mlx5e_rq_param rq;
72 struct mlx5e_sq_param sq;
b5503b99 73 struct mlx5e_sq_param xdp_sq;
d3c9bc27 74 struct mlx5e_sq_param icosq;
f62b8bb8
AV
75 struct mlx5e_cq_param rx_cq;
76 struct mlx5e_cq_param tx_cq;
d3c9bc27 77 struct mlx5e_cq_param icosq_cq;
f62b8bb8
AV
78};
79
2ccb0a79 80bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
2fc4bfb7 81{
ea3886ca 82 bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
2fc4bfb7
SM
83 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
84 MLX5_CAP_ETH(mdev, reg_umr_sq);
ea3886ca
TT
85 u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
86 bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
87
88 if (!striding_rq_umr)
89 return false;
90 if (!inline_umr) {
91 mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
92 (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
93 return false;
94 }
95 return true;
2fc4bfb7
SM
96}
97
069d1146 98static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
73281b78 99{
a26a5bdf
TT
100 u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
101 u16 linear_rq_headroom = params->xdp_prog ?
102 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
103 u32 frag_sz;
73281b78 104
a26a5bdf 105 linear_rq_headroom += NET_IP_ALIGN;
619a8f2a 106
a26a5bdf
TT
107 frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu);
108
109 if (params->xdp_prog && frag_sz < PAGE_SIZE)
110 frag_sz = PAGE_SIZE;
111
112 return frag_sz;
73281b78
TT
113}
114
115static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
116{
069d1146 117 u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params);
73281b78
TT
118
119 return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
120}
121
069d1146
TT
122static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
123 struct mlx5e_params *params)
124{
125 u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
126
127 return !params->lro_en && frag_sz <= PAGE_SIZE;
128}
129
619a8f2a
TT
130static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
131 struct mlx5e_params *params)
132{
069d1146 133 u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
619a8f2a
TT
134 s8 signed_log_num_strides_param;
135 u8 log_num_strides;
136
069d1146 137 if (!mlx5e_rx_is_linear_skb(mdev, params))
619a8f2a
TT
138 return false;
139
140 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
141 return true;
142
143 log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
144 signed_log_num_strides_param =
145 (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
146
147 return signed_log_num_strides_param >= 0;
148}
149
73281b78
TT
150static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
151{
152 if (params->log_rq_mtu_frames <
153 mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
154 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
155
156 return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params);
157}
158
159static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
160 struct mlx5e_params *params)
f1e4fc9b 161{
619a8f2a 162 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
069d1146 163 return order_base_2(mlx5e_rx_get_linear_frag_sz(params));
619a8f2a 164
f1e4fc9b
TT
165 return MLX5E_MPWQE_STRIDE_SZ(mdev,
166 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
167}
168
73281b78
TT
169static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
170 struct mlx5e_params *params)
f1e4fc9b
TT
171{
172 return MLX5_MPWRQ_LOG_WQE_SZ -
173 mlx5e_mpwqe_get_log_stride_size(mdev, params);
174}
175
619a8f2a
TT
176static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
177 struct mlx5e_params *params)
b0cedc84
TT
178{
179 u16 linear_rq_headroom = params->xdp_prog ?
180 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
069d1146 181 bool is_linear_skb;
b0cedc84
TT
182
183 linear_rq_headroom += NET_IP_ALIGN;
184
069d1146
TT
185 is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
186 mlx5e_rx_is_linear_skb(mdev, params) :
187 mlx5e_rx_mpwqe_is_linear_skb(mdev, params);
b0cedc84 188
069d1146 189 return is_linear_skb ? linear_rq_headroom : 0;
b0cedc84
TT
190}
191
696a97cf 192void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
2a0f561b 193 struct mlx5e_params *params)
2fc4bfb7 194{
6a9764ef 195 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
73281b78
TT
196 params->log_rq_mtu_frames = is_kdump_kernel() ?
197 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
198 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2fc4bfb7 199
6a9764ef
SM
200 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
201 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
619a8f2a
TT
202 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
203 BIT(mlx5e_mpwqe_get_log_rq_size(params)) :
73281b78 204 BIT(params->log_rq_mtu_frames),
f1e4fc9b 205 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)),
6a9764ef 206 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
2fc4bfb7
SM
207}
208
2ccb0a79
TT
209bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
210 struct mlx5e_params *params)
211{
212 return mlx5e_check_fragmented_striding_rq_cap(mdev) &&
22f45398
TT
213 !MLX5_IPSEC_DEV(mdev) &&
214 !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params));
2ccb0a79 215}
291f445e 216
2ccb0a79 217void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
2fc4bfb7 218{
2ccb0a79
TT
219 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
220 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
291f445e 221 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
99cbfa93 222 MLX5_WQ_TYPE_CYCLIC;
2fc4bfb7
SM
223}
224
f62b8bb8
AV
225static void mlx5e_update_carrier(struct mlx5e_priv *priv)
226{
227 struct mlx5_core_dev *mdev = priv->mdev;
228 u8 port_state;
229
230 port_state = mlx5_query_vport_state(mdev,
e53eef63
OG
231 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
232 0);
f62b8bb8 233
87424ad5
SD
234 if (port_state == VPORT_STATE_UP) {
235 netdev_info(priv->netdev, "Link up\n");
f62b8bb8 236 netif_carrier_on(priv->netdev);
87424ad5
SD
237 } else {
238 netdev_info(priv->netdev, "Link down\n");
f62b8bb8 239 netif_carrier_off(priv->netdev);
87424ad5 240 }
f62b8bb8
AV
241}
242
243static void mlx5e_update_carrier_work(struct work_struct *work)
244{
245 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
246 update_carrier_work);
247
248 mutex_lock(&priv->state_lock);
249 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
7ca42c80
ES
250 if (priv->profile->update_carrier)
251 priv->profile->update_carrier(priv);
f62b8bb8
AV
252 mutex_unlock(&priv->state_lock);
253}
254
19386177 255void mlx5e_update_stats(struct mlx5e_priv *priv)
f62b8bb8 256{
19386177 257 int i;
f62b8bb8 258
19386177
KH
259 for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
260 if (mlx5e_stats_grps[i].update_stats)
261 mlx5e_stats_grps[i].update_stats(priv);
f62b8bb8
AV
262}
263
3834a5e6
GP
264static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
265{
19386177
KH
266 int i;
267
268 for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
269 if (mlx5e_stats_grps[i].update_stats_mask &
270 MLX5E_NDO_UPDATE_STATS)
271 mlx5e_stats_grps[i].update_stats(priv);
3834a5e6
GP
272}
273
cb67b832 274void mlx5e_update_stats_work(struct work_struct *work)
f62b8bb8
AV
275{
276 struct delayed_work *dwork = to_delayed_work(work);
277 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
278 update_stats_work);
ed56c519 279
f62b8bb8 280 mutex_lock(&priv->state_lock);
ed56c519 281 priv->profile->update_stats(priv);
f62b8bb8
AV
282 mutex_unlock(&priv->state_lock);
283}
284
daa21560
TT
285static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
286 enum mlx5_dev_event event, unsigned long param)
f62b8bb8 287{
daa21560
TT
288 struct mlx5e_priv *priv = vpriv;
289
e0f46eb9 290 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
daa21560
TT
291 return;
292
f62b8bb8
AV
293 switch (event) {
294 case MLX5_DEV_EVENT_PORT_UP:
295 case MLX5_DEV_EVENT_PORT_DOWN:
7bb29755 296 queue_work(priv->wq, &priv->update_carrier_work);
f62b8bb8 297 break;
f62b8bb8
AV
298 default:
299 break;
300 }
301}
302
f62b8bb8
AV
303static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
304{
e0f46eb9 305 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
f62b8bb8
AV
306}
307
308static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
309{
e0f46eb9 310 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
78249c42 311 synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
f62b8bb8
AV
312}
313
31391048
SM
314static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
315 struct mlx5e_icosq *sq,
b8a98a4c 316 struct mlx5e_umr_wqe *wqe)
7e426671
TT
317{
318 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
319 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
ea3886ca 320 u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
7e426671
TT
321
322 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
323 ds_cnt);
324 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
325 cseg->imm = rq->mkey_be;
326
ea3886ca 327 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
31616255 328 ucseg->xlt_octowords =
7e426671 329 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
7e426671 330 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
7e426671
TT
331}
332
422d4c40
TT
333static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
334{
335 switch (rq->wq_type) {
336 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
337 return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
338 default:
99cbfa93 339 return mlx5_wq_cyc_get_size(&rq->wqe.wq);
422d4c40
TT
340 }
341}
342
343static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
344{
345 switch (rq->wq_type) {
346 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
347 return rq->mpwqe.wq.cur_sz;
348 default:
349 return rq->wqe.wq.cur_sz;
350 }
351}
352
7e426671
TT
353static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
354 struct mlx5e_channel *c)
355{
422d4c40 356 int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
7e426671 357
eec4edc9
KC
358 rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
359 sizeof(*rq->mpwqe.info)),
ca11b798 360 GFP_KERNEL, cpu_to_node(c->cpu));
21c59685 361 if (!rq->mpwqe.info)
ea3886ca 362 return -ENOMEM;
7e426671 363
b8a98a4c 364 mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
7e426671
TT
365
366 return 0;
7e426671
TT
367}
368
a43b25da 369static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
ec8b9981
TT
370 u64 npages, u8 page_shift,
371 struct mlx5_core_mkey *umr_mkey)
3608ae77 372{
3608ae77
TT
373 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
374 void *mkc;
375 u32 *in;
376 int err;
377
1b9a07ee 378 in = kvzalloc(inlen, GFP_KERNEL);
3608ae77
TT
379 if (!in)
380 return -ENOMEM;
381
382 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
383
3608ae77
TT
384 MLX5_SET(mkc, mkc, free, 1);
385 MLX5_SET(mkc, mkc, umr_en, 1);
386 MLX5_SET(mkc, mkc, lw, 1);
387 MLX5_SET(mkc, mkc, lr, 1);
cdbd0d2b 388 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
3608ae77
TT
389
390 MLX5_SET(mkc, mkc, qpn, 0xffffff);
391 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
ec8b9981 392 MLX5_SET64(mkc, mkc, len, npages << page_shift);
3608ae77
TT
393 MLX5_SET(mkc, mkc, translations_octword_size,
394 MLX5_MTT_OCTW(npages));
ec8b9981 395 MLX5_SET(mkc, mkc, log_page_size, page_shift);
3608ae77 396
ec8b9981 397 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
3608ae77
TT
398
399 kvfree(in);
400 return err;
401}
402
a43b25da 403static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
ec8b9981 404{
422d4c40 405 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
ec8b9981 406
a43b25da 407 return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
ec8b9981
TT
408}
409
b8a98a4c
TT
410static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
411{
412 return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
413}
414
069d1146
TT
415static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
416{
417 struct mlx5e_wqe_frag_info next_frag, *prev;
418 int i;
419
420 next_frag.di = &rq->wqe.di[0];
421 next_frag.offset = 0;
422 prev = NULL;
423
424 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
425 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
426 struct mlx5e_wqe_frag_info *frag =
427 &rq->wqe.frags[i << rq->wqe.info.log_num_frags];
428 int f;
429
430 for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
431 if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
432 next_frag.di++;
433 next_frag.offset = 0;
434 if (prev)
435 prev->last_in_page = true;
436 }
437 *frag = next_frag;
438
439 /* prepare next */
440 next_frag.offset += frag_info[f].frag_stride;
441 prev = frag;
442 }
443 }
444
445 if (prev)
446 prev->last_in_page = true;
447}
448
449static int mlx5e_init_di_list(struct mlx5e_rq *rq,
450 struct mlx5e_params *params,
451 int wq_sz, int cpu)
452{
453 int len = wq_sz << rq->wqe.info.log_num_frags;
454
84ca176b 455 rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)),
069d1146
TT
456 GFP_KERNEL, cpu_to_node(cpu));
457 if (!rq->wqe.di)
458 return -ENOMEM;
459
460 mlx5e_init_frags_partition(rq);
461
462 return 0;
463}
464
465static void mlx5e_free_di_list(struct mlx5e_rq *rq)
466{
467 kvfree(rq->wqe.di);
468}
469
3b77235b 470static int mlx5e_alloc_rq(struct mlx5e_channel *c,
6a9764ef
SM
471 struct mlx5e_params *params,
472 struct mlx5e_rq_param *rqp,
3b77235b 473 struct mlx5e_rq *rq)
f62b8bb8 474{
60bbf7ee 475 struct page_pool_params pp_params = { 0 };
a43b25da 476 struct mlx5_core_dev *mdev = c->mdev;
6a9764ef 477 void *rqc = rqp->rqc;
f62b8bb8 478 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
069d1146 479 u32 pool_size;
f62b8bb8
AV
480 int wq_sz;
481 int err;
482 int i;
483
231243c8 484 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
311c7c71 485
6a9764ef 486 rq->wq_type = params->rq_wq_type;
7e426671
TT
487 rq->pdev = c->pdev;
488 rq->netdev = c->netdev;
a43b25da 489 rq->tstamp = c->tstamp;
7c39afb3 490 rq->clock = &mdev->clock;
7e426671
TT
491 rq->channel = c;
492 rq->ix = c->ix;
a43b25da 493 rq->mdev = mdev;
05909bab 494 rq->stats = &c->priv->channel_stats[c->ix].rq;
97bc402d 495
6a9764ef 496 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
97bc402d
DB
497 if (IS_ERR(rq->xdp_prog)) {
498 err = PTR_ERR(rq->xdp_prog);
499 rq->xdp_prog = NULL;
500 goto err_rq_wq_destroy;
501 }
7e426671 502
e213f5b6
WY
503 err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
504 if (err < 0)
0ddf5432
JDB
505 goto err_rq_wq_destroy;
506
bce2b2bf 507 rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
619a8f2a 508 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params);
60bbf7ee 509 pool_size = 1 << params->log_rq_mtu_frames;
b5503b99 510
6a9764ef 511 switch (rq->wq_type) {
461017cb 512 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
422d4c40
TT
513 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
514 &rq->wq_ctrl);
515 if (err)
516 return err;
517
518 rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
519
520 wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
60bbf7ee
JDB
521
522 pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params);
422d4c40 523
7cc6d77b 524 rq->post_wqes = mlx5e_post_rx_mpwqes;
6cd392a0 525 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
461017cb 526
20fd0c19 527 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
899a59d3
IT
528#ifdef CONFIG_MLX5_EN_IPSEC
529 if (MLX5_IPSEC_DEV(mdev)) {
530 err = -EINVAL;
531 netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
532 goto err_rq_wq_destroy;
533 }
534#endif
20fd0c19
SM
535 if (!rq->handle_rx_cqe) {
536 err = -EINVAL;
537 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
538 goto err_rq_wq_destroy;
539 }
540
619a8f2a
TT
541 rq->mpwqe.skb_from_cqe_mpwrq =
542 mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ?
543 mlx5e_skb_from_cqe_mpwrq_linear :
544 mlx5e_skb_from_cqe_mpwrq_nonlinear;
f1e4fc9b
TT
545 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params);
546 rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params));
1bfecfca 547
a43b25da 548 err = mlx5e_create_rq_umr_mkey(mdev, rq);
7e426671
TT
549 if (err)
550 goto err_rq_wq_destroy;
ec8b9981
TT
551 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
552
553 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
554 if (err)
069d1146 555 goto err_free;
461017cb 556 break;
99cbfa93
TT
557 default: /* MLX5_WQ_TYPE_CYCLIC */
558 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
559 &rq->wq_ctrl);
422d4c40
TT
560 if (err)
561 return err;
562
563 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
564
99cbfa93 565 wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
422d4c40 566
069d1146
TT
567 rq->wqe.info = rqp->frags_info;
568 rq->wqe.frags =
84ca176b
KC
569 kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
570 (wq_sz << rq->wqe.info.log_num_frags)),
069d1146 571 GFP_KERNEL, cpu_to_node(c->cpu));
47a6ca3f
WY
572 if (!rq->wqe.frags) {
573 err = -ENOMEM;
069d1146 574 goto err_free;
47a6ca3f 575 }
069d1146
TT
576
577 err = mlx5e_init_di_list(rq, params, wq_sz, c->cpu);
578 if (err)
579 goto err_free;
7cc6d77b 580 rq->post_wqes = mlx5e_post_rx_wqes;
6cd392a0 581 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
461017cb 582
899a59d3
IT
583#ifdef CONFIG_MLX5_EN_IPSEC
584 if (c->priv->ipsec)
585 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
586 else
587#endif
588 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
20fd0c19 589 if (!rq->handle_rx_cqe) {
20fd0c19
SM
590 err = -EINVAL;
591 netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
069d1146 592 goto err_free;
20fd0c19
SM
593 }
594
069d1146
TT
595 rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(mdev, params) ?
596 mlx5e_skb_from_cqe_linear :
597 mlx5e_skb_from_cqe_nonlinear;
7e426671 598 rq->mkey_be = c->mkey_be;
461017cb 599 }
f62b8bb8 600
60bbf7ee 601 /* Create a page_pool and register it with rxq */
069d1146 602 pp_params.order = 0;
60bbf7ee
JDB
603 pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
604 pp_params.pool_size = pool_size;
605 pp_params.nid = cpu_to_node(c->cpu);
606 pp_params.dev = c->pdev;
607 pp_params.dma_dir = rq->buff.map_dir;
608
609 /* page_pool can be used even when there is no rq->xdp_prog,
610 * given page_pool does not handle DMA mapping there is no
611 * required state to clear. And page_pool gracefully handle
612 * elevated refcnt.
613 */
614 rq->page_pool = page_pool_create(&pp_params);
615 if (IS_ERR(rq->page_pool)) {
60bbf7ee
JDB
616 err = PTR_ERR(rq->page_pool);
617 rq->page_pool = NULL;
069d1146 618 goto err_free;
84f5e3fb 619 }
60bbf7ee
JDB
620 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
621 MEM_TYPE_PAGE_POOL, rq->page_pool);
622 if (err)
069d1146 623 goto err_free;
84f5e3fb 624
f62b8bb8 625 for (i = 0; i < wq_sz; i++) {
4c2af5cc 626 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
99cbfa93 627 struct mlx5e_rx_wqe_ll *wqe =
422d4c40 628 mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
069d1146
TT
629 u32 byte_count =
630 rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
b8a98a4c 631 u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
4c2af5cc 632
99cbfa93
TT
633 wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
634 wqe->data[0].byte_count = cpu_to_be32(byte_count);
635 wqe->data[0].lkey = rq->mkey_be;
422d4c40 636 } else {
99cbfa93
TT
637 struct mlx5e_rx_wqe_cyc *wqe =
638 mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
069d1146
TT
639 int f;
640
641 for (f = 0; f < rq->wqe.info.num_frags; f++) {
642 u32 frag_size = rq->wqe.info.arr[f].frag_size |
643 MLX5_HW_START_PADDING;
644
645 wqe->data[f].byte_count = cpu_to_be32(frag_size);
646 wqe->data[f].lkey = rq->mkey_be;
647 }
648 /* check if num_frags is not a pow of two */
649 if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
650 wqe->data[f].byte_count = 0;
651 wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
652 wqe->data[f].addr = 0;
653 }
422d4c40 654 }
f62b8bb8
AV
655 }
656
9a317425
AG
657 INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
658
659 switch (params->rx_cq_moderation.cq_period_mode) {
660 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
661 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
662 break;
663 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
664 default:
665 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
666 }
667
4415a031
TT
668 rq->page_cache.head = 0;
669 rq->page_cache.tail = 0;
670
f62b8bb8
AV
671 return 0;
672
069d1146
TT
673err_free:
674 switch (rq->wq_type) {
675 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
ca11b798 676 kvfree(rq->mpwqe.info);
069d1146
TT
677 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
678 break;
679 default: /* MLX5_WQ_TYPE_CYCLIC */
680 kvfree(rq->wqe.frags);
681 mlx5e_free_di_list(rq);
682 }
ec8b9981 683
f62b8bb8 684err_rq_wq_destroy:
97bc402d
DB
685 if (rq->xdp_prog)
686 bpf_prog_put(rq->xdp_prog);
0ddf5432 687 xdp_rxq_info_unreg(&rq->xdp_rxq);
60bbf7ee
JDB
688 if (rq->page_pool)
689 page_pool_destroy(rq->page_pool);
f62b8bb8
AV
690 mlx5_wq_destroy(&rq->wq_ctrl);
691
692 return err;
693}
694
3b77235b 695static void mlx5e_free_rq(struct mlx5e_rq *rq)
f62b8bb8 696{
4415a031
TT
697 int i;
698
86994156
RS
699 if (rq->xdp_prog)
700 bpf_prog_put(rq->xdp_prog);
701
0ddf5432 702 xdp_rxq_info_unreg(&rq->xdp_rxq);
60bbf7ee
JDB
703 if (rq->page_pool)
704 page_pool_destroy(rq->page_pool);
0ddf5432 705
461017cb
TT
706 switch (rq->wq_type) {
707 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
ca11b798 708 kvfree(rq->mpwqe.info);
a43b25da 709 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
461017cb 710 break;
99cbfa93 711 default: /* MLX5_WQ_TYPE_CYCLIC */
069d1146
TT
712 kvfree(rq->wqe.frags);
713 mlx5e_free_di_list(rq);
461017cb
TT
714 }
715
4415a031
TT
716 for (i = rq->page_cache.head; i != rq->page_cache.tail;
717 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
718 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
719
720 mlx5e_page_release(rq, dma_info, false);
721 }
f62b8bb8
AV
722 mlx5_wq_destroy(&rq->wq_ctrl);
723}
724
6a9764ef
SM
725static int mlx5e_create_rq(struct mlx5e_rq *rq,
726 struct mlx5e_rq_param *param)
f62b8bb8 727{
a43b25da 728 struct mlx5_core_dev *mdev = rq->mdev;
f62b8bb8
AV
729
730 void *in;
731 void *rqc;
732 void *wq;
733 int inlen;
734 int err;
735
736 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
737 sizeof(u64) * rq->wq_ctrl.buf.npages;
1b9a07ee 738 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
739 if (!in)
740 return -ENOMEM;
741
742 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
743 wq = MLX5_ADDR_OF(rqc, rqc, wq);
744
745 memcpy(rqc, param->rqc, sizeof(param->rqc));
746
97de9f31 747 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
f62b8bb8 748 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
f62b8bb8 749 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
68cdf5d6 750 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
751 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
752
3a2f7033
TT
753 mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
754 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
f62b8bb8 755
7db22ffb 756 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
f62b8bb8
AV
757
758 kvfree(in);
759
760 return err;
761}
762
36350114
GP
763static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
764 int next_state)
f62b8bb8 765{
7cbaf9a3 766 struct mlx5_core_dev *mdev = rq->mdev;
f62b8bb8
AV
767
768 void *in;
769 void *rqc;
770 int inlen;
771 int err;
772
773 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 774 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
775 if (!in)
776 return -ENOMEM;
777
778 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
779
780 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
781 MLX5_SET(rqc, rqc, state, next_state);
782
7db22ffb 783 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
f62b8bb8
AV
784
785 kvfree(in);
786
787 return err;
788}
789
102722fc
GE
790static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
791{
792 struct mlx5e_channel *c = rq->channel;
793 struct mlx5e_priv *priv = c->priv;
794 struct mlx5_core_dev *mdev = priv->mdev;
795
796 void *in;
797 void *rqc;
798 int inlen;
799 int err;
800
801 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 802 in = kvzalloc(inlen, GFP_KERNEL);
102722fc
GE
803 if (!in)
804 return -ENOMEM;
805
806 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
807
808 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
809 MLX5_SET64(modify_rq_in, in, modify_bitmask,
810 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
811 MLX5_SET(rqc, rqc, scatter_fcs, enable);
812 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
813
814 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
815
816 kvfree(in);
817
818 return err;
819}
820
36350114
GP
821static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
822{
823 struct mlx5e_channel *c = rq->channel;
a43b25da 824 struct mlx5_core_dev *mdev = c->mdev;
36350114
GP
825 void *in;
826 void *rqc;
827 int inlen;
828 int err;
829
830 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 831 in = kvzalloc(inlen, GFP_KERNEL);
36350114
GP
832 if (!in)
833 return -ENOMEM;
834
835 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
836
837 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
83b502a1
AV
838 MLX5_SET64(modify_rq_in, in, modify_bitmask,
839 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
36350114
GP
840 MLX5_SET(rqc, rqc, vsd, vsd);
841 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
842
843 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
844
845 kvfree(in);
846
847 return err;
848}
849
3b77235b 850static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
f62b8bb8 851{
a43b25da 852 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
f62b8bb8
AV
853}
854
1e7477ae 855static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
f62b8bb8 856{
1e7477ae 857 unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
f62b8bb8 858 struct mlx5e_channel *c = rq->channel;
a43b25da 859
422d4c40 860 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
f62b8bb8 861
1e7477ae 862 do {
422d4c40 863 if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
f62b8bb8
AV
864 return 0;
865
866 msleep(20);
1e7477ae
EBE
867 } while (time_before(jiffies, exp_time));
868
869 netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
422d4c40 870 c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
f62b8bb8
AV
871
872 return -ETIMEDOUT;
873}
874
f2fde18c
SM
875static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
876{
f2fde18c
SM
877 __be16 wqe_ix_be;
878 u16 wqe_ix;
879
422d4c40
TT
880 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
881 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
882
99cbfa93 883 /* UMR WQE (if in progress) is always at wq->head */
422d4c40 884 if (rq->mpwqe.umr_in_progress)
afab995e 885 rq->dealloc_wqe(rq, wq->head);
422d4c40
TT
886
887 while (!mlx5_wq_ll_is_empty(wq)) {
99cbfa93 888 struct mlx5e_rx_wqe_ll *wqe;
422d4c40
TT
889
890 wqe_ix_be = *wq->tail_next;
891 wqe_ix = be16_to_cpu(wqe_ix_be);
892 wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
893 rq->dealloc_wqe(rq, wqe_ix);
894 mlx5_wq_ll_pop(wq, wqe_ix_be,
895 &wqe->next.next_wqe_index);
896 }
897 } else {
99cbfa93 898 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
422d4c40 899
99cbfa93
TT
900 while (!mlx5_wq_cyc_is_empty(wq)) {
901 wqe_ix = mlx5_wq_cyc_get_tail(wq);
422d4c40 902 rq->dealloc_wqe(rq, wqe_ix);
99cbfa93 903 mlx5_wq_cyc_pop(wq);
422d4c40 904 }
accd5883 905 }
069d1146 906
f2fde18c
SM
907}
908
f62b8bb8 909static int mlx5e_open_rq(struct mlx5e_channel *c,
6a9764ef 910 struct mlx5e_params *params,
f62b8bb8
AV
911 struct mlx5e_rq_param *param,
912 struct mlx5e_rq *rq)
913{
914 int err;
915
6a9764ef 916 err = mlx5e_alloc_rq(c, params, param, rq);
f62b8bb8
AV
917 if (err)
918 return err;
919
3b77235b 920 err = mlx5e_create_rq(rq, param);
f62b8bb8 921 if (err)
3b77235b 922 goto err_free_rq;
f62b8bb8 923
36350114 924 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
f62b8bb8 925 if (err)
3b77235b 926 goto err_destroy_rq;
f62b8bb8 927
9a317425 928 if (params->rx_dim_enabled)
af5a6c93 929 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
cb3c7fd4 930
f62b8bb8
AV
931 return 0;
932
f62b8bb8
AV
933err_destroy_rq:
934 mlx5e_destroy_rq(rq);
3b77235b
SM
935err_free_rq:
936 mlx5e_free_rq(rq);
f62b8bb8
AV
937
938 return err;
939}
940
acc6c595
SM
941static void mlx5e_activate_rq(struct mlx5e_rq *rq)
942{
943 struct mlx5e_icosq *sq = &rq->channel->icosq;
ddf385e3 944 struct mlx5_wq_cyc *wq = &sq->wq;
acc6c595
SM
945 struct mlx5e_tx_wqe *nopwqe;
946
ddf385e3
TT
947 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
948
acc6c595
SM
949 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
950 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
ddf385e3
TT
951 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
952 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
acc6c595
SM
953}
954
955static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
f62b8bb8 956{
c0f1147d 957 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
f62b8bb8 958 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
acc6c595 959}
cb3c7fd4 960
acc6c595
SM
961static void mlx5e_close_rq(struct mlx5e_rq *rq)
962{
9a317425 963 cancel_work_sync(&rq->dim.work);
f62b8bb8 964 mlx5e_destroy_rq(rq);
3b77235b
SM
965 mlx5e_free_rx_descs(rq);
966 mlx5e_free_rq(rq);
f62b8bb8
AV
967}
968
31391048 969static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
b5503b99 970{
c94e4f11 971 kvfree(sq->db.xdpi);
b5503b99
SM
972}
973
31391048 974static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
b5503b99
SM
975{
976 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
977
c94e4f11
TT
978 sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)),
979 GFP_KERNEL, numa);
980 if (!sq->db.xdpi) {
31391048 981 mlx5e_free_xdpsq_db(sq);
b5503b99
SM
982 return -ENOMEM;
983 }
984
985 return 0;
986}
987
31391048 988static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
6a9764ef 989 struct mlx5e_params *params,
31391048 990 struct mlx5e_sq_param *param,
58b99ee3
TT
991 struct mlx5e_xdpsq *sq,
992 bool is_redirect)
31391048
SM
993{
994 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 995 struct mlx5_core_dev *mdev = c->mdev;
ddf385e3 996 struct mlx5_wq_cyc *wq = &sq->wq;
31391048
SM
997 int err;
998
999 sq->pdev = c->pdev;
1000 sq->mkey_be = c->mkey_be;
1001 sq->channel = c;
1002 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef 1003 sq->min_inline_mode = params->tx_min_inline_mode;
c94e4f11 1004 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
58b99ee3
TT
1005 sq->stats = is_redirect ?
1006 &c->priv->channel_stats[c->ix].xdpsq :
1007 &c->priv->channel_stats[c->ix].rq_xdpsq;
31391048 1008
231243c8 1009 param->wq.db_numa_node = cpu_to_node(c->cpu);
ddf385e3 1010 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
31391048
SM
1011 if (err)
1012 return err;
ddf385e3 1013 wq->db = &wq->db[MLX5_SND_DBR];
31391048 1014
231243c8 1015 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
31391048
SM
1016 if (err)
1017 goto err_sq_wq_destroy;
1018
1019 return 0;
1020
1021err_sq_wq_destroy:
1022 mlx5_wq_destroy(&sq->wq_ctrl);
1023
1024 return err;
1025}
1026
1027static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1028{
1029 mlx5e_free_xdpsq_db(sq);
1030 mlx5_wq_destroy(&sq->wq_ctrl);
1031}
1032
1033static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
f62b8bb8 1034{
ca11b798 1035 kvfree(sq->db.ico_wqe);
f62b8bb8
AV
1036}
1037
31391048 1038static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
f10b7cc7
SM
1039{
1040 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1041
eec4edc9
KC
1042 sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
1043 sizeof(*sq->db.ico_wqe)),
ca11b798 1044 GFP_KERNEL, numa);
f10b7cc7
SM
1045 if (!sq->db.ico_wqe)
1046 return -ENOMEM;
1047
1048 return 0;
1049}
1050
31391048 1051static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
31391048
SM
1052 struct mlx5e_sq_param *param,
1053 struct mlx5e_icosq *sq)
f10b7cc7 1054{
31391048 1055 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1056 struct mlx5_core_dev *mdev = c->mdev;
ddf385e3 1057 struct mlx5_wq_cyc *wq = &sq->wq;
31391048 1058 int err;
f10b7cc7 1059
31391048
SM
1060 sq->channel = c;
1061 sq->uar_map = mdev->mlx5e_res.bfreg.map;
f62b8bb8 1062
231243c8 1063 param->wq.db_numa_node = cpu_to_node(c->cpu);
ddf385e3 1064 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
31391048
SM
1065 if (err)
1066 return err;
ddf385e3 1067 wq->db = &wq->db[MLX5_SND_DBR];
f62b8bb8 1068
231243c8 1069 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
31391048
SM
1070 if (err)
1071 goto err_sq_wq_destroy;
1072
f62b8bb8 1073 return 0;
31391048
SM
1074
1075err_sq_wq_destroy:
1076 mlx5_wq_destroy(&sq->wq_ctrl);
1077
1078 return err;
f62b8bb8
AV
1079}
1080
31391048 1081static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
f10b7cc7 1082{
31391048
SM
1083 mlx5e_free_icosq_db(sq);
1084 mlx5_wq_destroy(&sq->wq_ctrl);
f10b7cc7
SM
1085}
1086
31391048 1087static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
f10b7cc7 1088{
ca11b798
TT
1089 kvfree(sq->db.wqe_info);
1090 kvfree(sq->db.dma_fifo);
f10b7cc7
SM
1091}
1092
31391048 1093static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
b5503b99 1094{
31391048
SM
1095 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1096 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1097
eec4edc9
KC
1098 sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
1099 sizeof(*sq->db.dma_fifo)),
ca11b798 1100 GFP_KERNEL, numa);
eec4edc9
KC
1101 sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
1102 sizeof(*sq->db.wqe_info)),
ca11b798 1103 GFP_KERNEL, numa);
77bdf895 1104 if (!sq->db.dma_fifo || !sq->db.wqe_info) {
31391048
SM
1105 mlx5e_free_txqsq_db(sq);
1106 return -ENOMEM;
b5503b99 1107 }
31391048
SM
1108
1109 sq->dma_fifo_mask = df_sz - 1;
1110
1111 return 0;
b5503b99
SM
1112}
1113
db75373c 1114static void mlx5e_sq_recover(struct work_struct *work);
31391048 1115static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
acc6c595 1116 int txq_ix,
6a9764ef 1117 struct mlx5e_params *params,
31391048 1118 struct mlx5e_sq_param *param,
05909bab
EBE
1119 struct mlx5e_txqsq *sq,
1120 int tc)
f62b8bb8 1121{
31391048 1122 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1123 struct mlx5_core_dev *mdev = c->mdev;
ddf385e3 1124 struct mlx5_wq_cyc *wq = &sq->wq;
f62b8bb8
AV
1125 int err;
1126
f10b7cc7 1127 sq->pdev = c->pdev;
a43b25da 1128 sq->tstamp = c->tstamp;
7c39afb3 1129 sq->clock = &mdev->clock;
f10b7cc7
SM
1130 sq->mkey_be = c->mkey_be;
1131 sq->channel = c;
acc6c595 1132 sq->txq_ix = txq_ix;
aff26157 1133 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef 1134 sq->min_inline_mode = params->tx_min_inline_mode;
05909bab 1135 sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
db75373c 1136 INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
2ac9cfe7
IT
1137 if (MLX5_IPSEC_DEV(c->priv->mdev))
1138 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
bf239741
IL
1139 if (mlx5_accel_is_tls_device(c->priv->mdev))
1140 set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
f10b7cc7 1141
231243c8 1142 param->wq.db_numa_node = cpu_to_node(c->cpu);
ddf385e3 1143 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
f62b8bb8 1144 if (err)
aff26157 1145 return err;
ddf385e3 1146 wq->db = &wq->db[MLX5_SND_DBR];
f62b8bb8 1147
231243c8 1148 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
7ec0bb22 1149 if (err)
f62b8bb8
AV
1150 goto err_sq_wq_destroy;
1151
cbce4f44
TG
1152 INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
1153 sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
1154
f62b8bb8
AV
1155 return 0;
1156
1157err_sq_wq_destroy:
1158 mlx5_wq_destroy(&sq->wq_ctrl);
1159
f62b8bb8
AV
1160 return err;
1161}
1162
31391048 1163static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1164{
31391048 1165 mlx5e_free_txqsq_db(sq);
f62b8bb8 1166 mlx5_wq_destroy(&sq->wq_ctrl);
f62b8bb8
AV
1167}
1168
33ad9711
SM
1169struct mlx5e_create_sq_param {
1170 struct mlx5_wq_ctrl *wq_ctrl;
1171 u32 cqn;
1172 u32 tisn;
1173 u8 tis_lst_sz;
1174 u8 min_inline_mode;
1175};
1176
a43b25da 1177static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
33ad9711
SM
1178 struct mlx5e_sq_param *param,
1179 struct mlx5e_create_sq_param *csp,
1180 u32 *sqn)
f62b8bb8 1181{
f62b8bb8
AV
1182 void *in;
1183 void *sqc;
1184 void *wq;
1185 int inlen;
1186 int err;
1187
1188 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
33ad9711 1189 sizeof(u64) * csp->wq_ctrl->buf.npages;
1b9a07ee 1190 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1191 if (!in)
1192 return -ENOMEM;
1193
1194 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1195 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1196
1197 memcpy(sqc, param->sqc, sizeof(param->sqc));
33ad9711
SM
1198 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1199 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1200 MLX5_SET(sqc, sqc, cqn, csp->cqn);
a6f402e4
SM
1201
1202 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
33ad9711 1203 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
a6f402e4 1204
33ad9711 1205 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
db75373c 1206 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
f62b8bb8
AV
1207
1208 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
a43b25da 1209 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
33ad9711 1210 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
68cdf5d6 1211 MLX5_ADAPTER_PAGE_SHIFT);
33ad9711 1212 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
f62b8bb8 1213
3a2f7033
TT
1214 mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
1215 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
f62b8bb8 1216
33ad9711 1217 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
f62b8bb8
AV
1218
1219 kvfree(in);
1220
1221 return err;
1222}
1223
33ad9711
SM
1224struct mlx5e_modify_sq_param {
1225 int curr_state;
1226 int next_state;
1227 bool rl_update;
1228 int rl_index;
1229};
1230
a43b25da 1231static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
33ad9711 1232 struct mlx5e_modify_sq_param *p)
f62b8bb8 1233{
f62b8bb8
AV
1234 void *in;
1235 void *sqc;
1236 int inlen;
1237 int err;
1238
1239 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1b9a07ee 1240 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1241 if (!in)
1242 return -ENOMEM;
1243
1244 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1245
33ad9711
SM
1246 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1247 MLX5_SET(sqc, sqc, state, p->next_state);
1248 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
507f0c81 1249 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
33ad9711 1250 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
507f0c81 1251 }
f62b8bb8 1252
33ad9711 1253 err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
f62b8bb8
AV
1254
1255 kvfree(in);
1256
1257 return err;
1258}
1259
a43b25da 1260static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
33ad9711 1261{
a43b25da 1262 mlx5_core_destroy_sq(mdev, sqn);
f62b8bb8
AV
1263}
1264
a43b25da 1265static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
31391048
SM
1266 struct mlx5e_sq_param *param,
1267 struct mlx5e_create_sq_param *csp,
1268 u32 *sqn)
f62b8bb8 1269{
33ad9711 1270 struct mlx5e_modify_sq_param msp = {0};
31391048
SM
1271 int err;
1272
a43b25da 1273 err = mlx5e_create_sq(mdev, param, csp, sqn);
31391048
SM
1274 if (err)
1275 return err;
1276
1277 msp.curr_state = MLX5_SQC_STATE_RST;
1278 msp.next_state = MLX5_SQC_STATE_RDY;
a43b25da 1279 err = mlx5e_modify_sq(mdev, *sqn, &msp);
31391048 1280 if (err)
a43b25da 1281 mlx5e_destroy_sq(mdev, *sqn);
31391048
SM
1282
1283 return err;
1284}
1285
7f859ecf
SM
1286static int mlx5e_set_sq_maxrate(struct net_device *dev,
1287 struct mlx5e_txqsq *sq, u32 rate);
1288
31391048 1289static int mlx5e_open_txqsq(struct mlx5e_channel *c,
a43b25da 1290 u32 tisn,
acc6c595 1291 int txq_ix,
6a9764ef 1292 struct mlx5e_params *params,
31391048 1293 struct mlx5e_sq_param *param,
05909bab
EBE
1294 struct mlx5e_txqsq *sq,
1295 int tc)
31391048
SM
1296{
1297 struct mlx5e_create_sq_param csp = {};
7f859ecf 1298 u32 tx_rate;
f62b8bb8
AV
1299 int err;
1300
05909bab 1301 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
f62b8bb8
AV
1302 if (err)
1303 return err;
1304
a43b25da 1305 csp.tisn = tisn;
31391048 1306 csp.tis_lst_sz = 1;
33ad9711
SM
1307 csp.cqn = sq->cq.mcq.cqn;
1308 csp.wq_ctrl = &sq->wq_ctrl;
1309 csp.min_inline_mode = sq->min_inline_mode;
a43b25da 1310 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
f62b8bb8 1311 if (err)
31391048 1312 goto err_free_txqsq;
f62b8bb8 1313
a43b25da 1314 tx_rate = c->priv->tx_rates[sq->txq_ix];
7f859ecf 1315 if (tx_rate)
a43b25da 1316 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
7f859ecf 1317
cbce4f44
TG
1318 if (params->tx_dim_enabled)
1319 sq->state |= BIT(MLX5E_SQ_STATE_AM);
1320
f62b8bb8
AV
1321 return 0;
1322
31391048 1323err_free_txqsq:
3b77235b 1324 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
31391048 1325 mlx5e_free_txqsq(sq);
f62b8bb8
AV
1326
1327 return err;
1328}
1329
db75373c
EBE
1330static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
1331{
1332 WARN_ONCE(sq->cc != sq->pc,
1333 "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
1334 sq->sqn, sq->cc, sq->pc);
1335 sq->cc = 0;
1336 sq->dma_fifo_cc = 0;
1337 sq->pc = 0;
1338}
1339
acc6c595
SM
1340static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1341{
a43b25da 1342 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
db75373c 1343 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
acc6c595
SM
1344 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1345 netdev_tx_reset_queue(sq->txq);
1346 netif_tx_start_queue(sq->txq);
1347}
1348
f62b8bb8
AV
1349static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1350{
1351 __netif_tx_lock_bh(txq);
1352 netif_tx_stop_queue(txq);
1353 __netif_tx_unlock_bh(txq);
1354}
1355
acc6c595 1356static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1357{
33ad9711 1358 struct mlx5e_channel *c = sq->channel;
ddf385e3 1359 struct mlx5_wq_cyc *wq = &sq->wq;
33ad9711 1360
c0f1147d 1361 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
6e8dd6d6 1362 /* prevent netif_tx_wake_queue */
33ad9711 1363 napi_synchronize(&c->napi);
29429f33 1364
31391048 1365 netif_tx_disable_queue(sq->txq);
f62b8bb8 1366
31391048 1367 /* last doorbell out, godspeed .. */
ddf385e3
TT
1368 if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1369 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
31391048 1370 struct mlx5e_tx_wqe *nop;
864b2d71 1371
ddf385e3
TT
1372 sq->db.wqe_info[pi].skb = NULL;
1373 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1374 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
29429f33 1375 }
acc6c595
SM
1376}
1377
1378static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1379{
1380 struct mlx5e_channel *c = sq->channel;
a43b25da 1381 struct mlx5_core_dev *mdev = c->mdev;
05d3ac97 1382 struct mlx5_rate_limit rl = {0};
f62b8bb8 1383
a43b25da 1384 mlx5e_destroy_sq(mdev, sq->sqn);
05d3ac97
BW
1385 if (sq->rate_limit) {
1386 rl.rate = sq->rate_limit;
1387 mlx5_rl_remove_rate(mdev, &rl);
1388 }
31391048
SM
1389 mlx5e_free_txqsq_descs(sq);
1390 mlx5e_free_txqsq(sq);
1391}
1392
db75373c
EBE
1393static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
1394{
1395 unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
1396
1397 while (time_before(jiffies, exp_time)) {
1398 if (sq->cc == sq->pc)
1399 return 0;
1400
1401 msleep(20);
1402 }
1403
1404 netdev_err(sq->channel->netdev,
1405 "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
1406 sq->sqn, sq->cc, sq->pc);
1407
1408 return -ETIMEDOUT;
1409}
1410
1411static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
1412{
1413 struct mlx5_core_dev *mdev = sq->channel->mdev;
1414 struct net_device *dev = sq->channel->netdev;
1415 struct mlx5e_modify_sq_param msp = {0};
1416 int err;
1417
1418 msp.curr_state = curr_state;
1419 msp.next_state = MLX5_SQC_STATE_RST;
1420
1421 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1422 if (err) {
1423 netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
1424 return err;
1425 }
1426
1427 memset(&msp, 0, sizeof(msp));
1428 msp.curr_state = MLX5_SQC_STATE_RST;
1429 msp.next_state = MLX5_SQC_STATE_RDY;
1430
1431 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1432 if (err) {
1433 netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
1434 return err;
1435 }
1436
1437 return 0;
1438}
1439
1440static void mlx5e_sq_recover(struct work_struct *work)
1441{
1442 struct mlx5e_txqsq_recover *recover =
1443 container_of(work, struct mlx5e_txqsq_recover,
1444 recover_work);
1445 struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq,
1446 recover);
1447 struct mlx5_core_dev *mdev = sq->channel->mdev;
1448 struct net_device *dev = sq->channel->netdev;
1449 u8 state;
1450 int err;
1451
1452 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
1453 if (err) {
1454 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
1455 sq->sqn, err);
1456 return;
1457 }
1458
1459 if (state != MLX5_RQC_STATE_ERR) {
1460 netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
1461 return;
1462 }
1463
1464 netif_tx_disable_queue(sq->txq);
1465
1466 if (mlx5e_wait_for_sq_flush(sq))
1467 return;
1468
1469 /* If the interval between two consecutive recovers per SQ is too
1470 * short, don't recover to avoid infinite loop of ERR_CQE -> recover.
1471 * If we reached this state, there is probably a bug that needs to be
1472 * fixed. let's keep the queue close and let tx timeout cleanup.
1473 */
1474 if (jiffies_to_msecs(jiffies - recover->last_recover) <
1475 MLX5E_SQ_RECOVER_MIN_INTERVAL) {
1476 netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n",
1477 sq->sqn);
1478 return;
1479 }
1480
1481 /* At this point, no new packets will arrive from the stack as TXQ is
1482 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
1483 * pending WQEs. SQ can safely reset the SQ.
1484 */
1485 if (mlx5e_sq_to_ready(sq, state))
1486 return;
1487
1488 mlx5e_reset_txqsq_cc_pc(sq);
05909bab 1489 sq->stats->recover++;
db75373c
EBE
1490 recover->last_recover = jiffies;
1491 mlx5e_activate_txqsq(sq);
1492}
1493
31391048 1494static int mlx5e_open_icosq(struct mlx5e_channel *c,
6a9764ef 1495 struct mlx5e_params *params,
31391048
SM
1496 struct mlx5e_sq_param *param,
1497 struct mlx5e_icosq *sq)
1498{
1499 struct mlx5e_create_sq_param csp = {};
1500 int err;
1501
6a9764ef 1502 err = mlx5e_alloc_icosq(c, param, sq);
31391048
SM
1503 if (err)
1504 return err;
1505
1506 csp.cqn = sq->cq.mcq.cqn;
1507 csp.wq_ctrl = &sq->wq_ctrl;
6a9764ef 1508 csp.min_inline_mode = params->tx_min_inline_mode;
31391048 1509 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1510 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1511 if (err)
1512 goto err_free_icosq;
1513
1514 return 0;
1515
1516err_free_icosq:
1517 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1518 mlx5e_free_icosq(sq);
1519
1520 return err;
1521}
1522
1523static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1524{
1525 struct mlx5e_channel *c = sq->channel;
1526
1527 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1528 napi_synchronize(&c->napi);
1529
a43b25da 1530 mlx5e_destroy_sq(c->mdev, sq->sqn);
31391048
SM
1531 mlx5e_free_icosq(sq);
1532}
1533
1534static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
6a9764ef 1535 struct mlx5e_params *params,
31391048 1536 struct mlx5e_sq_param *param,
58b99ee3
TT
1537 struct mlx5e_xdpsq *sq,
1538 bool is_redirect)
31391048
SM
1539{
1540 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1541 struct mlx5e_create_sq_param csp = {};
31391048
SM
1542 unsigned int inline_hdr_sz = 0;
1543 int err;
1544 int i;
1545
58b99ee3 1546 err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect);
31391048
SM
1547 if (err)
1548 return err;
1549
1550 csp.tis_lst_sz = 1;
a43b25da 1551 csp.tisn = c->priv->tisn[0]; /* tc = 0 */
31391048
SM
1552 csp.cqn = sq->cq.mcq.cqn;
1553 csp.wq_ctrl = &sq->wq_ctrl;
1554 csp.min_inline_mode = sq->min_inline_mode;
58b99ee3
TT
1555 if (is_redirect)
1556 set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
31391048 1557 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1558 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1559 if (err)
1560 goto err_free_xdpsq;
1561
1562 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1563 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1564 ds_cnt++;
1565 }
1566
1567 /* Pre initialize fixed WQE fields */
1568 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1569 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1570 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1571 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1572 struct mlx5_wqe_data_seg *dseg;
1573
1574 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1575 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1576
1577 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1578 dseg->lkey = sq->mkey_be;
1579 }
1580
1581 return 0;
1582
1583err_free_xdpsq:
1584 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1585 mlx5e_free_xdpsq(sq);
1586
1587 return err;
1588}
1589
1590static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1591{
1592 struct mlx5e_channel *c = sq->channel;
1593
1594 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1595 napi_synchronize(&c->napi);
1596
a43b25da 1597 mlx5e_destroy_sq(c->mdev, sq->sqn);
31391048
SM
1598 mlx5e_free_xdpsq_descs(sq);
1599 mlx5e_free_xdpsq(sq);
f62b8bb8
AV
1600}
1601
95b6c6a5
EBE
1602static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1603 struct mlx5e_cq_param *param,
1604 struct mlx5e_cq *cq)
f62b8bb8 1605{
f62b8bb8
AV
1606 struct mlx5_core_cq *mcq = &cq->mcq;
1607 int eqn_not_used;
0b6e26ce 1608 unsigned int irqn;
f62b8bb8
AV
1609 int err;
1610 u32 i;
1611
f62b8bb8
AV
1612 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1613 &cq->wq_ctrl);
1614 if (err)
1615 return err;
1616
1617 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1618
f62b8bb8
AV
1619 mcq->cqe_sz = 64;
1620 mcq->set_ci_db = cq->wq_ctrl.db.db;
1621 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1622 *mcq->set_ci_db = 0;
1623 *mcq->arm_db = 0;
1624 mcq->vector = param->eq_ix;
1625 mcq->comp = mlx5e_completion_event;
1626 mcq->event = mlx5e_cq_error_event;
1627 mcq->irqn = irqn;
f62b8bb8
AV
1628
1629 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1630 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1631
1632 cqe->op_own = 0xf1;
1633 }
1634
a43b25da 1635 cq->mdev = mdev;
f62b8bb8
AV
1636
1637 return 0;
1638}
1639
95b6c6a5
EBE
1640static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1641 struct mlx5e_cq_param *param,
1642 struct mlx5e_cq *cq)
1643{
1644 struct mlx5_core_dev *mdev = c->priv->mdev;
1645 int err;
1646
231243c8
SM
1647 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1648 param->wq.db_numa_node = cpu_to_node(c->cpu);
95b6c6a5
EBE
1649 param->eq_ix = c->ix;
1650
1651 err = mlx5e_alloc_cq_common(mdev, param, cq);
1652
1653 cq->napi = &c->napi;
1654 cq->channel = c;
1655
1656 return err;
1657}
1658
3b77235b 1659static void mlx5e_free_cq(struct mlx5e_cq *cq)
f62b8bb8 1660{
3a2f7033 1661 mlx5_wq_destroy(&cq->wq_ctrl);
f62b8bb8
AV
1662}
1663
3b77235b 1664static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
f62b8bb8 1665{
a43b25da 1666 struct mlx5_core_dev *mdev = cq->mdev;
f62b8bb8
AV
1667 struct mlx5_core_cq *mcq = &cq->mcq;
1668
1669 void *in;
1670 void *cqc;
1671 int inlen;
0b6e26ce 1672 unsigned int irqn_not_used;
f62b8bb8
AV
1673 int eqn;
1674 int err;
1675
1676 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
3a2f7033 1677 sizeof(u64) * cq->wq_ctrl.buf.npages;
1b9a07ee 1678 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1679 if (!in)
1680 return -ENOMEM;
1681
1682 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1683
1684 memcpy(cqc, param->cqc, sizeof(param->cqc));
1685
3a2f7033 1686 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1c1b5228 1687 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
f62b8bb8
AV
1688
1689 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1690
9908aa29 1691 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
f62b8bb8 1692 MLX5_SET(cqc, cqc, c_eqn, eqn);
30aa60b3 1693 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
3a2f7033 1694 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
68cdf5d6 1695 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
1696 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1697
1698 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1699
1700 kvfree(in);
1701
1702 if (err)
1703 return err;
1704
1705 mlx5e_cq_arm(cq);
1706
1707 return 0;
1708}
1709
3b77235b 1710static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
f62b8bb8 1711{
a43b25da 1712 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
f62b8bb8
AV
1713}
1714
1715static int mlx5e_open_cq(struct mlx5e_channel *c,
9a317425 1716 struct net_dim_cq_moder moder,
f62b8bb8 1717 struct mlx5e_cq_param *param,
6a9764ef 1718 struct mlx5e_cq *cq)
f62b8bb8 1719{
a43b25da 1720 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8 1721 int err;
f62b8bb8 1722
3b77235b 1723 err = mlx5e_alloc_cq(c, param, cq);
f62b8bb8
AV
1724 if (err)
1725 return err;
1726
3b77235b 1727 err = mlx5e_create_cq(cq, param);
f62b8bb8 1728 if (err)
3b77235b 1729 goto err_free_cq;
f62b8bb8 1730
7524a5d8 1731 if (MLX5_CAP_GEN(mdev, cq_moderation))
6a9764ef 1732 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
f62b8bb8
AV
1733 return 0;
1734
3b77235b
SM
1735err_free_cq:
1736 mlx5e_free_cq(cq);
f62b8bb8
AV
1737
1738 return err;
1739}
1740
1741static void mlx5e_close_cq(struct mlx5e_cq *cq)
1742{
f62b8bb8 1743 mlx5e_destroy_cq(cq);
3b77235b 1744 mlx5e_free_cq(cq);
f62b8bb8
AV
1745}
1746
231243c8
SM
1747static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1748{
1749 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1750}
1751
f62b8bb8 1752static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
6a9764ef 1753 struct mlx5e_params *params,
f62b8bb8
AV
1754 struct mlx5e_channel_param *cparam)
1755{
f62b8bb8
AV
1756 int err;
1757 int tc;
1758
1759 for (tc = 0; tc < c->num_tc; tc++) {
6a9764ef
SM
1760 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1761 &cparam->tx_cq, &c->sq[tc].cq);
f62b8bb8
AV
1762 if (err)
1763 goto err_close_tx_cqs;
f62b8bb8
AV
1764 }
1765
1766 return 0;
1767
1768err_close_tx_cqs:
1769 for (tc--; tc >= 0; tc--)
1770 mlx5e_close_cq(&c->sq[tc].cq);
1771
1772 return err;
1773}
1774
1775static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1776{
1777 int tc;
1778
1779 for (tc = 0; tc < c->num_tc; tc++)
1780 mlx5e_close_cq(&c->sq[tc].cq);
1781}
1782
1783static int mlx5e_open_sqs(struct mlx5e_channel *c,
6a9764ef 1784 struct mlx5e_params *params,
f62b8bb8
AV
1785 struct mlx5e_channel_param *cparam)
1786{
05909bab
EBE
1787 struct mlx5e_priv *priv = c->priv;
1788 int err, tc, max_nch = priv->profile->max_nch(priv->mdev);
f62b8bb8 1789
6a9764ef 1790 for (tc = 0; tc < params->num_tc; tc++) {
05909bab 1791 int txq_ix = c->ix + tc * max_nch;
acc6c595 1792
a43b25da 1793 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
05909bab 1794 params, &cparam->sq, &c->sq[tc], tc);
f62b8bb8
AV
1795 if (err)
1796 goto err_close_sqs;
1797 }
1798
1799 return 0;
1800
1801err_close_sqs:
1802 for (tc--; tc >= 0; tc--)
31391048 1803 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1804
1805 return err;
1806}
1807
1808static void mlx5e_close_sqs(struct mlx5e_channel *c)
1809{
1810 int tc;
1811
1812 for (tc = 0; tc < c->num_tc; tc++)
31391048 1813 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1814}
1815
507f0c81 1816static int mlx5e_set_sq_maxrate(struct net_device *dev,
31391048 1817 struct mlx5e_txqsq *sq, u32 rate)
507f0c81
YP
1818{
1819 struct mlx5e_priv *priv = netdev_priv(dev);
1820 struct mlx5_core_dev *mdev = priv->mdev;
33ad9711 1821 struct mlx5e_modify_sq_param msp = {0};
05d3ac97 1822 struct mlx5_rate_limit rl = {0};
507f0c81
YP
1823 u16 rl_index = 0;
1824 int err;
1825
1826 if (rate == sq->rate_limit)
1827 /* nothing to do */
1828 return 0;
1829
05d3ac97
BW
1830 if (sq->rate_limit) {
1831 rl.rate = sq->rate_limit;
507f0c81 1832 /* remove current rl index to free space to next ones */
05d3ac97
BW
1833 mlx5_rl_remove_rate(mdev, &rl);
1834 }
507f0c81
YP
1835
1836 sq->rate_limit = 0;
1837
1838 if (rate) {
05d3ac97
BW
1839 rl.rate = rate;
1840 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
507f0c81
YP
1841 if (err) {
1842 netdev_err(dev, "Failed configuring rate %u: %d\n",
1843 rate, err);
1844 return err;
1845 }
1846 }
1847
33ad9711
SM
1848 msp.curr_state = MLX5_SQC_STATE_RDY;
1849 msp.next_state = MLX5_SQC_STATE_RDY;
1850 msp.rl_index = rl_index;
1851 msp.rl_update = true;
a43b25da 1852 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
507f0c81
YP
1853 if (err) {
1854 netdev_err(dev, "Failed configuring rate %u: %d\n",
1855 rate, err);
1856 /* remove the rate from the table */
1857 if (rate)
05d3ac97 1858 mlx5_rl_remove_rate(mdev, &rl);
507f0c81
YP
1859 return err;
1860 }
1861
1862 sq->rate_limit = rate;
1863 return 0;
1864}
1865
1866static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1867{
1868 struct mlx5e_priv *priv = netdev_priv(dev);
1869 struct mlx5_core_dev *mdev = priv->mdev;
acc6c595 1870 struct mlx5e_txqsq *sq = priv->txq2sq[index];
507f0c81
YP
1871 int err = 0;
1872
1873 if (!mlx5_rl_is_supported(mdev)) {
1874 netdev_err(dev, "Rate limiting is not supported on this device\n");
1875 return -EINVAL;
1876 }
1877
1878 /* rate is given in Mb/sec, HW config is in Kb/sec */
1879 rate = rate << 10;
1880
1881 /* Check whether rate in valid range, 0 is always valid */
1882 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1883 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1884 return -ERANGE;
1885 }
1886
1887 mutex_lock(&priv->state_lock);
1888 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1889 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1890 if (!err)
1891 priv->tx_rates[index] = rate;
1892 mutex_unlock(&priv->state_lock);
1893
1894 return err;
1895}
1896
f62b8bb8 1897static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
6a9764ef 1898 struct mlx5e_params *params,
f62b8bb8
AV
1899 struct mlx5e_channel_param *cparam,
1900 struct mlx5e_channel **cp)
1901{
9a317425 1902 struct net_dim_cq_moder icocq_moder = {0, 0};
f62b8bb8 1903 struct net_device *netdev = priv->netdev;
231243c8 1904 int cpu = mlx5e_get_cpu(priv, ix);
f62b8bb8 1905 struct mlx5e_channel *c;
a8c2eb15 1906 unsigned int irq;
f62b8bb8 1907 int err;
a8c2eb15 1908 int eqn;
f62b8bb8 1909
ca11b798 1910 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
f62b8bb8
AV
1911 if (!c)
1912 return -ENOMEM;
1913
1914 c->priv = priv;
a43b25da
SM
1915 c->mdev = priv->mdev;
1916 c->tstamp = &priv->tstamp;
f62b8bb8 1917 c->ix = ix;
231243c8 1918 c->cpu = cpu;
f62b8bb8
AV
1919 c->pdev = &priv->mdev->pdev->dev;
1920 c->netdev = priv->netdev;
b50d292b 1921 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
6a9764ef
SM
1922 c->num_tc = params->num_tc;
1923 c->xdp = !!params->xdp_prog;
05909bab 1924 c->stats = &priv->channel_stats[ix].ch;
cb3c7fd4 1925
a8c2eb15
TT
1926 mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1927 c->irq_desc = irq_to_desc(irq);
1928
f62b8bb8
AV
1929 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1930
6a9764ef 1931 err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
f62b8bb8
AV
1932 if (err)
1933 goto err_napi_del;
1934
6a9764ef 1935 err = mlx5e_open_tx_cqs(c, params, cparam);
d3c9bc27
TT
1936 if (err)
1937 goto err_close_icosq_cq;
1938
58b99ee3 1939 err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq);
f62b8bb8
AV
1940 if (err)
1941 goto err_close_tx_cqs;
f62b8bb8 1942
58b99ee3
TT
1943 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
1944 if (err)
1945 goto err_close_xdp_tx_cqs;
1946
d7a0ecab 1947 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
6a9764ef
SM
1948 err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1949 &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
d7a0ecab
SM
1950 if (err)
1951 goto err_close_rx_cq;
1952
f62b8bb8
AV
1953 napi_enable(&c->napi);
1954
6a9764ef 1955 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
f62b8bb8
AV
1956 if (err)
1957 goto err_disable_napi;
1958
6a9764ef 1959 err = mlx5e_open_sqs(c, params, cparam);
d3c9bc27
TT
1960 if (err)
1961 goto err_close_icosq;
1962
58b99ee3 1963 err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq, false) : 0;
d7a0ecab
SM
1964 if (err)
1965 goto err_close_sqs;
b5503b99 1966
6a9764ef 1967 err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
f62b8bb8 1968 if (err)
b5503b99 1969 goto err_close_xdp_sq;
f62b8bb8 1970
58b99ee3
TT
1971 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->xdpsq, true);
1972 if (err)
1973 goto err_close_rq;
1974
f62b8bb8
AV
1975 *cp = c;
1976
1977 return 0;
58b99ee3
TT
1978
1979err_close_rq:
1980 mlx5e_close_rq(&c->rq);
1981
b5503b99 1982err_close_xdp_sq:
d7a0ecab 1983 if (c->xdp)
31391048 1984 mlx5e_close_xdpsq(&c->rq.xdpsq);
f62b8bb8
AV
1985
1986err_close_sqs:
1987 mlx5e_close_sqs(c);
1988
d3c9bc27 1989err_close_icosq:
31391048 1990 mlx5e_close_icosq(&c->icosq);
d3c9bc27 1991
f62b8bb8
AV
1992err_disable_napi:
1993 napi_disable(&c->napi);
d7a0ecab 1994 if (c->xdp)
31871f87 1995 mlx5e_close_cq(&c->rq.xdpsq.cq);
d7a0ecab
SM
1996
1997err_close_rx_cq:
f62b8bb8
AV
1998 mlx5e_close_cq(&c->rq.cq);
1999
58b99ee3
TT
2000err_close_xdp_tx_cqs:
2001 mlx5e_close_cq(&c->xdpsq.cq);
2002
f62b8bb8
AV
2003err_close_tx_cqs:
2004 mlx5e_close_tx_cqs(c);
2005
d3c9bc27
TT
2006err_close_icosq_cq:
2007 mlx5e_close_cq(&c->icosq.cq);
2008
f62b8bb8
AV
2009err_napi_del:
2010 netif_napi_del(&c->napi);
ca11b798 2011 kvfree(c);
f62b8bb8
AV
2012
2013 return err;
2014}
2015
acc6c595
SM
2016static void mlx5e_activate_channel(struct mlx5e_channel *c)
2017{
2018 int tc;
2019
2020 for (tc = 0; tc < c->num_tc; tc++)
2021 mlx5e_activate_txqsq(&c->sq[tc]);
2022 mlx5e_activate_rq(&c->rq);
231243c8 2023 netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
acc6c595
SM
2024}
2025
2026static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
2027{
2028 int tc;
2029
2030 mlx5e_deactivate_rq(&c->rq);
2031 for (tc = 0; tc < c->num_tc; tc++)
2032 mlx5e_deactivate_txqsq(&c->sq[tc]);
2033}
2034
f62b8bb8
AV
2035static void mlx5e_close_channel(struct mlx5e_channel *c)
2036{
58b99ee3 2037 mlx5e_close_xdpsq(&c->xdpsq);
f62b8bb8 2038 mlx5e_close_rq(&c->rq);
b5503b99 2039 if (c->xdp)
31391048 2040 mlx5e_close_xdpsq(&c->rq.xdpsq);
f62b8bb8 2041 mlx5e_close_sqs(c);
31391048 2042 mlx5e_close_icosq(&c->icosq);
f62b8bb8 2043 napi_disable(&c->napi);
b5503b99 2044 if (c->xdp)
31871f87 2045 mlx5e_close_cq(&c->rq.xdpsq.cq);
f62b8bb8 2046 mlx5e_close_cq(&c->rq.cq);
58b99ee3 2047 mlx5e_close_cq(&c->xdpsq.cq);
f62b8bb8 2048 mlx5e_close_tx_cqs(c);
d3c9bc27 2049 mlx5e_close_cq(&c->icosq.cq);
f62b8bb8 2050 netif_napi_del(&c->napi);
7ae92ae5 2051
ca11b798 2052 kvfree(c);
f62b8bb8
AV
2053}
2054
069d1146
TT
2055#define DEFAULT_FRAG_SIZE (2048)
2056
2057static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
2058 struct mlx5e_params *params,
2059 struct mlx5e_rq_frags_info *info)
2060{
2061 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
2062 int frag_size_max = DEFAULT_FRAG_SIZE;
2063 u32 buf_size = 0;
2064 int i;
2065
2066#ifdef CONFIG_MLX5_EN_IPSEC
2067 if (MLX5_IPSEC_DEV(mdev))
2068 byte_count += MLX5E_METADATA_ETHER_LEN;
2069#endif
2070
2071 if (mlx5e_rx_is_linear_skb(mdev, params)) {
2072 int frag_stride;
2073
2074 frag_stride = mlx5e_rx_get_linear_frag_sz(params);
2075 frag_stride = roundup_pow_of_two(frag_stride);
2076
2077 info->arr[0].frag_size = byte_count;
2078 info->arr[0].frag_stride = frag_stride;
2079 info->num_frags = 1;
2080 info->wqe_bulk = PAGE_SIZE / frag_stride;
2081 goto out;
2082 }
2083
2084 if (byte_count > PAGE_SIZE +
2085 (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
2086 frag_size_max = PAGE_SIZE;
2087
2088 i = 0;
2089 while (buf_size < byte_count) {
2090 int frag_size = byte_count - buf_size;
2091
2092 if (i < MLX5E_MAX_RX_FRAGS - 1)
2093 frag_size = min(frag_size, frag_size_max);
2094
2095 info->arr[i].frag_size = frag_size;
2096 info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
2097
2098 buf_size += frag_size;
2099 i++;
2100 }
2101 info->num_frags = i;
2102 /* number of different wqes sharing a page */
2103 info->wqe_bulk = 1 + (info->num_frags % 2);
2104
2105out:
2106 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
2107 info->log_num_frags = order_base_2(info->num_frags);
2108}
2109
99cbfa93
TT
2110static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
2111{
2112 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
2113
2114 switch (wq_type) {
2115 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2116 sz += sizeof(struct mlx5e_rx_wqe_ll);
2117 break;
2118 default: /* MLX5_WQ_TYPE_CYCLIC */
2119 sz += sizeof(struct mlx5e_rx_wqe_cyc);
2120 }
2121
2122 return order_base_2(sz);
2123}
2124
f62b8bb8 2125static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
6a9764ef 2126 struct mlx5e_params *params,
f62b8bb8
AV
2127 struct mlx5e_rq_param *param)
2128{
f1e4fc9b 2129 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
2130 void *rqc = param->rqc;
2131 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
99cbfa93 2132 int ndsegs = 1;
f62b8bb8 2133
6a9764ef 2134 switch (params->rq_wq_type) {
461017cb 2135 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
f1e4fc9b 2136 MLX5_SET(wq, wq, log_wqe_num_of_strides,
619a8f2a
TT
2137 mlx5e_mpwqe_get_log_num_strides(mdev, params) -
2138 MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
f1e4fc9b 2139 MLX5_SET(wq, wq, log_wqe_stride_size,
619a8f2a
TT
2140 mlx5e_mpwqe_get_log_stride_size(mdev, params) -
2141 MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
73281b78 2142 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params));
461017cb 2143 break;
99cbfa93 2144 default: /* MLX5_WQ_TYPE_CYCLIC */
73281b78 2145 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
069d1146
TT
2146 mlx5e_build_rq_frags_info(mdev, params, &param->frags_info);
2147 ndsegs = param->frags_info.num_frags;
461017cb
TT
2148 }
2149
99cbfa93 2150 MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
f62b8bb8 2151 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
99cbfa93
TT
2152 MLX5_SET(wq, wq, log_wq_stride,
2153 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
f1e4fc9b 2154 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
593cf338 2155 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
6a9764ef 2156 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
102722fc 2157 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
f62b8bb8 2158
f1e4fc9b 2159 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
f62b8bb8
AV
2160}
2161
7cbaf9a3 2162static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
2f0db879 2163 struct mlx5e_rq_param *param)
556dd1b9 2164{
7cbaf9a3 2165 struct mlx5_core_dev *mdev = priv->mdev;
556dd1b9
TT
2166 void *rqc = param->rqc;
2167 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2168
99cbfa93
TT
2169 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
2170 MLX5_SET(wq, wq, log_wq_stride,
2171 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
7cbaf9a3 2172 MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
2f0db879
GP
2173
2174 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
556dd1b9
TT
2175}
2176
d3c9bc27
TT
2177static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
2178 struct mlx5e_sq_param *param)
f62b8bb8
AV
2179{
2180 void *sqc = param->sqc;
2181 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2182
f62b8bb8 2183 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
b50d292b 2184 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
f62b8bb8 2185
311c7c71 2186 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
d3c9bc27
TT
2187}
2188
2189static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
6a9764ef 2190 struct mlx5e_params *params,
d3c9bc27
TT
2191 struct mlx5e_sq_param *param)
2192{
2193 void *sqc = param->sqc;
2194 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2195
2196 mlx5e_build_sq_param_common(priv, param);
6a9764ef 2197 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2ac9cfe7 2198 MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
f62b8bb8
AV
2199}
2200
2201static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
2202 struct mlx5e_cq_param *param)
2203{
2204 void *cqc = param->cqc;
2205
30aa60b3 2206 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
f62b8bb8
AV
2207}
2208
2209static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
6a9764ef 2210 struct mlx5e_params *params,
f62b8bb8
AV
2211 struct mlx5e_cq_param *param)
2212{
73281b78 2213 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8 2214 void *cqc = param->cqc;
461017cb 2215 u8 log_cq_size;
f62b8bb8 2216
6a9764ef 2217 switch (params->rq_wq_type) {
461017cb 2218 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
73281b78
TT
2219 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) +
2220 mlx5e_mpwqe_get_log_num_strides(mdev, params);
461017cb 2221 break;
99cbfa93 2222 default: /* MLX5_WQ_TYPE_CYCLIC */
73281b78 2223 log_cq_size = params->log_rq_mtu_frames;
461017cb
TT
2224 }
2225
2226 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
6a9764ef 2227 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
7219ab34
TT
2228 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
2229 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
2230 }
f62b8bb8
AV
2231
2232 mlx5e_build_common_cq_param(priv, param);
0088cbbc 2233 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
f62b8bb8
AV
2234}
2235
2236static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
6a9764ef 2237 struct mlx5e_params *params,
f62b8bb8
AV
2238 struct mlx5e_cq_param *param)
2239{
2240 void *cqc = param->cqc;
2241
6a9764ef 2242 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
f62b8bb8
AV
2243
2244 mlx5e_build_common_cq_param(priv, param);
0088cbbc 2245 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
f62b8bb8
AV
2246}
2247
d3c9bc27 2248static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
6a9764ef
SM
2249 u8 log_wq_size,
2250 struct mlx5e_cq_param *param)
d3c9bc27
TT
2251{
2252 void *cqc = param->cqc;
2253
2254 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2255
2256 mlx5e_build_common_cq_param(priv, param);
9908aa29 2257
9a317425 2258 param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
d3c9bc27
TT
2259}
2260
2261static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
6a9764ef
SM
2262 u8 log_wq_size,
2263 struct mlx5e_sq_param *param)
d3c9bc27
TT
2264{
2265 void *sqc = param->sqc;
2266 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2267
2268 mlx5e_build_sq_param_common(priv, param);
2269
2270 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
bc77b240 2271 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
d3c9bc27
TT
2272}
2273
b5503b99 2274static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
6a9764ef 2275 struct mlx5e_params *params,
b5503b99
SM
2276 struct mlx5e_sq_param *param)
2277{
2278 void *sqc = param->sqc;
2279 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2280
2281 mlx5e_build_sq_param_common(priv, param);
6a9764ef 2282 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
b5503b99
SM
2283}
2284
6a9764ef
SM
2285static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2286 struct mlx5e_params *params,
2287 struct mlx5e_channel_param *cparam)
f62b8bb8 2288{
bc77b240 2289 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
d3c9bc27 2290
6a9764ef
SM
2291 mlx5e_build_rq_param(priv, params, &cparam->rq);
2292 mlx5e_build_sq_param(priv, params, &cparam->sq);
2293 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2294 mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2295 mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
2296 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2297 mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
f62b8bb8
AV
2298}
2299
55c2503d
SM
2300int mlx5e_open_channels(struct mlx5e_priv *priv,
2301 struct mlx5e_channels *chs)
f62b8bb8 2302{
6b87663f 2303 struct mlx5e_channel_param *cparam;
03289b88 2304 int err = -ENOMEM;
f62b8bb8 2305 int i;
f62b8bb8 2306
6a9764ef 2307 chs->num = chs->params.num_channels;
03289b88 2308
ff9c852f 2309 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
ca11b798 2310 cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
acc6c595
SM
2311 if (!chs->c || !cparam)
2312 goto err_free;
f62b8bb8 2313
6a9764ef 2314 mlx5e_build_channel_param(priv, &chs->params, cparam);
ff9c852f 2315 for (i = 0; i < chs->num; i++) {
6a9764ef 2316 err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
f62b8bb8
AV
2317 if (err)
2318 goto err_close_channels;
2319 }
2320
ca11b798 2321 kvfree(cparam);
f62b8bb8
AV
2322 return 0;
2323
2324err_close_channels:
2325 for (i--; i >= 0; i--)
ff9c852f 2326 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2327
acc6c595 2328err_free:
ff9c852f 2329 kfree(chs->c);
ca11b798 2330 kvfree(cparam);
ff9c852f 2331 chs->num = 0;
f62b8bb8
AV
2332 return err;
2333}
2334
acc6c595 2335static void mlx5e_activate_channels(struct mlx5e_channels *chs)
f62b8bb8
AV
2336{
2337 int i;
2338
acc6c595
SM
2339 for (i = 0; i < chs->num; i++)
2340 mlx5e_activate_channel(chs->c[i]);
2341}
2342
2343static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2344{
2345 int err = 0;
2346 int i;
2347
1e7477ae
EBE
2348 for (i = 0; i < chs->num; i++)
2349 err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq,
2350 err ? 0 : 20000);
acc6c595 2351
1e7477ae 2352 return err ? -ETIMEDOUT : 0;
acc6c595
SM
2353}
2354
2355static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2356{
2357 int i;
2358
2359 for (i = 0; i < chs->num; i++)
2360 mlx5e_deactivate_channel(chs->c[i]);
2361}
2362
55c2503d 2363void mlx5e_close_channels(struct mlx5e_channels *chs)
acc6c595
SM
2364{
2365 int i;
c3b7c5c9 2366
ff9c852f
SM
2367 for (i = 0; i < chs->num; i++)
2368 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2369
ff9c852f
SM
2370 kfree(chs->c);
2371 chs->num = 0;
f62b8bb8
AV
2372}
2373
a5f97fee
SM
2374static int
2375mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
f62b8bb8
AV
2376{
2377 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
2378 void *rqtc;
2379 int inlen;
2380 int err;
1da36696 2381 u32 *in;
a5f97fee 2382 int i;
f62b8bb8 2383
f62b8bb8 2384 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2385 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
2386 if (!in)
2387 return -ENOMEM;
2388
2389 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2390
2391 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2392 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2393
a5f97fee
SM
2394 for (i = 0; i < sz; i++)
2395 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2be6967c 2396
398f3351
HHZ
2397 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2398 if (!err)
2399 rqt->enabled = true;
f62b8bb8
AV
2400
2401 kvfree(in);
1da36696
TT
2402 return err;
2403}
2404
cb67b832 2405void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
1da36696 2406{
398f3351
HHZ
2407 rqt->enabled = false;
2408 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
1da36696
TT
2409}
2410
8f493ffd 2411int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
6bfd390b
HHZ
2412{
2413 struct mlx5e_rqt *rqt = &priv->indir_rqt;
8f493ffd 2414 int err;
6bfd390b 2415
8f493ffd
SM
2416 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2417 if (err)
2418 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2419 return err;
6bfd390b
HHZ
2420}
2421
cb67b832 2422int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
1da36696 2423{
398f3351 2424 struct mlx5e_rqt *rqt;
1da36696
TT
2425 int err;
2426 int ix;
2427
6bfd390b 2428 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
398f3351 2429 rqt = &priv->direct_tir[ix].rqt;
a5f97fee 2430 err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
1da36696
TT
2431 if (err)
2432 goto err_destroy_rqts;
2433 }
2434
2435 return 0;
2436
2437err_destroy_rqts:
8f493ffd 2438 mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
1da36696 2439 for (ix--; ix >= 0; ix--)
398f3351 2440 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
1da36696 2441
f62b8bb8
AV
2442 return err;
2443}
2444
8f493ffd
SM
2445void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
2446{
2447 int i;
2448
2449 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
2450 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
2451}
2452
a5f97fee
SM
2453static int mlx5e_rx_hash_fn(int hfunc)
2454{
2455 return (hfunc == ETH_RSS_HASH_TOP) ?
2456 MLX5_RX_HASH_FN_TOEPLITZ :
2457 MLX5_RX_HASH_FN_INVERTED_XOR8;
2458}
2459
3f6d08d1 2460int mlx5e_bits_invert(unsigned long a, int size)
a5f97fee
SM
2461{
2462 int inv = 0;
2463 int i;
2464
2465 for (i = 0; i < size; i++)
2466 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2467
2468 return inv;
2469}
2470
2471static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2472 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2473{
2474 int i;
2475
2476 for (i = 0; i < sz; i++) {
2477 u32 rqn;
2478
2479 if (rrp.is_rss) {
2480 int ix = i;
2481
2482 if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2483 ix = mlx5e_bits_invert(i, ilog2(sz));
2484
6a9764ef 2485 ix = priv->channels.params.indirection_rqt[ix];
a5f97fee
SM
2486 rqn = rrp.rss.channels->c[ix]->rq.rqn;
2487 } else {
2488 rqn = rrp.rqn;
2489 }
2490 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2491 }
2492}
2493
2494int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2495 struct mlx5e_redirect_rqt_param rrp)
5c50368f
AS
2496{
2497 struct mlx5_core_dev *mdev = priv->mdev;
5c50368f
AS
2498 void *rqtc;
2499 int inlen;
1da36696 2500 u32 *in;
5c50368f
AS
2501 int err;
2502
5c50368f 2503 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2504 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2505 if (!in)
2506 return -ENOMEM;
2507
2508 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2509
2510 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
5c50368f 2511 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
a5f97fee 2512 mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
1da36696 2513 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
5c50368f
AS
2514
2515 kvfree(in);
5c50368f
AS
2516 return err;
2517}
2518
a5f97fee
SM
2519static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2520 struct mlx5e_redirect_rqt_param rrp)
2521{
2522 if (!rrp.is_rss)
2523 return rrp.rqn;
2524
2525 if (ix >= rrp.rss.channels->num)
2526 return priv->drop_rq.rqn;
2527
2528 return rrp.rss.channels->c[ix]->rq.rqn;
2529}
2530
2531static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2532 struct mlx5e_redirect_rqt_param rrp)
40ab6a6e 2533{
1da36696
TT
2534 u32 rqtn;
2535 int ix;
2536
398f3351 2537 if (priv->indir_rqt.enabled) {
a5f97fee 2538 /* RSS RQ table */
398f3351 2539 rqtn = priv->indir_rqt.rqtn;
a5f97fee 2540 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
398f3351
HHZ
2541 }
2542
a5f97fee
SM
2543 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2544 struct mlx5e_redirect_rqt_param direct_rrp = {
2545 .is_rss = false,
95632791
AM
2546 {
2547 .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
2548 },
a5f97fee
SM
2549 };
2550
2551 /* Direct RQ Tables */
398f3351
HHZ
2552 if (!priv->direct_tir[ix].rqt.enabled)
2553 continue;
a5f97fee 2554
398f3351 2555 rqtn = priv->direct_tir[ix].rqt.rqtn;
a5f97fee 2556 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
1da36696 2557 }
40ab6a6e
AS
2558}
2559
a5f97fee
SM
2560static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2561 struct mlx5e_channels *chs)
2562{
2563 struct mlx5e_redirect_rqt_param rrp = {
2564 .is_rss = true,
95632791
AM
2565 {
2566 .rss = {
2567 .channels = chs,
2568 .hfunc = chs->params.rss_hfunc,
2569 }
2570 },
a5f97fee
SM
2571 };
2572
2573 mlx5e_redirect_rqts(priv, rrp);
2574}
2575
2576static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2577{
2578 struct mlx5e_redirect_rqt_param drop_rrp = {
2579 .is_rss = false,
95632791
AM
2580 {
2581 .rqn = priv->drop_rq.rqn,
2582 },
a5f97fee
SM
2583 };
2584
2585 mlx5e_redirect_rqts(priv, drop_rrp);
2586}
2587
6a9764ef 2588static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
5c50368f 2589{
6a9764ef 2590 if (!params->lro_en)
5c50368f
AS
2591 return;
2592
2593#define ROUGH_MAX_L2_L3_HDR_SZ 256
2594
2595 MLX5_SET(tirc, tirc, lro_enable_mask,
2596 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2597 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2598 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
6a9764ef
SM
2599 (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2600 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
5c50368f
AS
2601}
2602
6a9764ef
SM
2603void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
2604 enum mlx5e_traffic_types tt,
7b3722fa 2605 void *tirc, bool inner)
bdfc028d 2606{
7b3722fa
GP
2607 void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2608 MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
a100ff3e
GP
2609
2610#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2611 MLX5_HASH_FIELD_SEL_DST_IP)
2612
2613#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2614 MLX5_HASH_FIELD_SEL_DST_IP |\
2615 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2616 MLX5_HASH_FIELD_SEL_L4_DPORT)
2617
2618#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2619 MLX5_HASH_FIELD_SEL_DST_IP |\
2620 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2621
6a9764ef
SM
2622 MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
2623 if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
bdfc028d
TT
2624 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2625 rx_hash_toeplitz_key);
2626 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2627 rx_hash_toeplitz_key);
2628
2629 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
6a9764ef 2630 memcpy(rss_key, params->toeplitz_hash_key, len);
bdfc028d 2631 }
a100ff3e
GP
2632
2633 switch (tt) {
2634 case MLX5E_TT_IPV4_TCP:
2635 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2636 MLX5_L3_PROT_TYPE_IPV4);
2637 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2638 MLX5_L4_PROT_TYPE_TCP);
2639 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2640 MLX5_HASH_IP_L4PORTS);
2641 break;
2642
2643 case MLX5E_TT_IPV6_TCP:
2644 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2645 MLX5_L3_PROT_TYPE_IPV6);
2646 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2647 MLX5_L4_PROT_TYPE_TCP);
2648 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2649 MLX5_HASH_IP_L4PORTS);
2650 break;
2651
2652 case MLX5E_TT_IPV4_UDP:
2653 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2654 MLX5_L3_PROT_TYPE_IPV4);
2655 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2656 MLX5_L4_PROT_TYPE_UDP);
2657 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2658 MLX5_HASH_IP_L4PORTS);
2659 break;
2660
2661 case MLX5E_TT_IPV6_UDP:
2662 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2663 MLX5_L3_PROT_TYPE_IPV6);
2664 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2665 MLX5_L4_PROT_TYPE_UDP);
2666 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2667 MLX5_HASH_IP_L4PORTS);
2668 break;
2669
2670 case MLX5E_TT_IPV4_IPSEC_AH:
2671 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2672 MLX5_L3_PROT_TYPE_IPV4);
2673 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2674 MLX5_HASH_IP_IPSEC_SPI);
2675 break;
2676
2677 case MLX5E_TT_IPV6_IPSEC_AH:
2678 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2679 MLX5_L3_PROT_TYPE_IPV6);
2680 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2681 MLX5_HASH_IP_IPSEC_SPI);
2682 break;
2683
2684 case MLX5E_TT_IPV4_IPSEC_ESP:
2685 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2686 MLX5_L3_PROT_TYPE_IPV4);
2687 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2688 MLX5_HASH_IP_IPSEC_SPI);
2689 break;
2690
2691 case MLX5E_TT_IPV6_IPSEC_ESP:
2692 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2693 MLX5_L3_PROT_TYPE_IPV6);
2694 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2695 MLX5_HASH_IP_IPSEC_SPI);
2696 break;
2697
2698 case MLX5E_TT_IPV4:
2699 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2700 MLX5_L3_PROT_TYPE_IPV4);
2701 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2702 MLX5_HASH_IP);
2703 break;
2704
2705 case MLX5E_TT_IPV6:
2706 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2707 MLX5_L3_PROT_TYPE_IPV6);
2708 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2709 MLX5_HASH_IP);
2710 break;
2711 default:
2712 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2713 }
bdfc028d
TT
2714}
2715
ab0394fe 2716static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
5c50368f
AS
2717{
2718 struct mlx5_core_dev *mdev = priv->mdev;
2719
2720 void *in;
2721 void *tirc;
2722 int inlen;
2723 int err;
ab0394fe 2724 int tt;
1da36696 2725 int ix;
5c50368f
AS
2726
2727 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1b9a07ee 2728 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2729 if (!in)
2730 return -ENOMEM;
2731
2732 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2733 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2734
6a9764ef 2735 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
5c50368f 2736
1da36696 2737 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
724b2aa1 2738 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
1da36696 2739 inlen);
ab0394fe 2740 if (err)
1da36696 2741 goto free_in;
ab0394fe 2742 }
5c50368f 2743
6bfd390b 2744 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
1da36696
TT
2745 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2746 in, inlen);
2747 if (err)
2748 goto free_in;
2749 }
2750
2751free_in:
5c50368f
AS
2752 kvfree(in);
2753
2754 return err;
2755}
2756
7b3722fa
GP
2757static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
2758 enum mlx5e_traffic_types tt,
2759 u32 *tirc)
2760{
2761 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2762
2763 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2764
2765 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2766 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2767 MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
2768
2769 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
2770}
2771
472a1e44
TT
2772static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2773 struct mlx5e_params *params, u16 mtu)
40ab6a6e 2774{
472a1e44 2775 u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
40ab6a6e
AS
2776 int err;
2777
cd255eff 2778 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
40ab6a6e
AS
2779 if (err)
2780 return err;
2781
cd255eff
SM
2782 /* Update vport context MTU */
2783 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2784 return 0;
2785}
40ab6a6e 2786
472a1e44
TT
2787static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2788 struct mlx5e_params *params, u16 *mtu)
cd255eff 2789{
cd255eff
SM
2790 u16 hw_mtu = 0;
2791 int err;
40ab6a6e 2792
cd255eff
SM
2793 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2794 if (err || !hw_mtu) /* fallback to port oper mtu */
2795 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2796
472a1e44 2797 *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
cd255eff
SM
2798}
2799
2e20a151 2800static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
cd255eff 2801{
472a1e44 2802 struct mlx5e_params *params = &priv->channels.params;
2e20a151 2803 struct net_device *netdev = priv->netdev;
472a1e44 2804 struct mlx5_core_dev *mdev = priv->mdev;
cd255eff
SM
2805 u16 mtu;
2806 int err;
2807
472a1e44 2808 err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
cd255eff
SM
2809 if (err)
2810 return err;
40ab6a6e 2811
472a1e44
TT
2812 mlx5e_query_mtu(mdev, params, &mtu);
2813 if (mtu != params->sw_mtu)
cd255eff 2814 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
472a1e44 2815 __func__, mtu, params->sw_mtu);
40ab6a6e 2816
472a1e44 2817 params->sw_mtu = mtu;
40ab6a6e
AS
2818 return 0;
2819}
2820
08fb1dac
SM
2821static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2822{
2823 struct mlx5e_priv *priv = netdev_priv(netdev);
6a9764ef
SM
2824 int nch = priv->channels.params.num_channels;
2825 int ntc = priv->channels.params.num_tc;
08fb1dac
SM
2826 int tc;
2827
2828 netdev_reset_tc(netdev);
2829
2830 if (ntc == 1)
2831 return;
2832
2833 netdev_set_num_tc(netdev, ntc);
2834
7ccdd084
RS
2835 /* Map netdev TCs to offset 0
2836 * We have our own UP to TXQ mapping for QoS
2837 */
08fb1dac 2838 for (tc = 0; tc < ntc; tc++)
7ccdd084 2839 netdev_set_tc_queue(netdev, tc, nch, 0);
08fb1dac
SM
2840}
2841
8bfaf07f 2842static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
acc6c595 2843{
8bfaf07f 2844 int max_nch = priv->profile->max_nch(priv->mdev);
acc6c595
SM
2845 int i, tc;
2846
8bfaf07f 2847 for (i = 0; i < max_nch; i++)
acc6c595 2848 for (tc = 0; tc < priv->profile->max_tc; tc++)
8bfaf07f
EBE
2849 priv->channel_tc2txq[i][tc] = i + tc * max_nch;
2850}
2851
2852static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
2853{
2854 struct mlx5e_channel *c;
2855 struct mlx5e_txqsq *sq;
2856 int i, tc;
acc6c595
SM
2857
2858 for (i = 0; i < priv->channels.num; i++) {
2859 c = priv->channels.c[i];
2860 for (tc = 0; tc < c->num_tc; tc++) {
2861 sq = &c->sq[tc];
2862 priv->txq2sq[sq->txq_ix] = sq;
2863 }
2864 }
2865}
2866
603f4a45 2867void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2868{
9008ae07
SM
2869 int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2870 struct net_device *netdev = priv->netdev;
2871
2872 mlx5e_netdev_set_tcs(netdev);
053ee0a7
TR
2873 netif_set_real_num_tx_queues(netdev, num_txqs);
2874 netif_set_real_num_rx_queues(netdev, priv->channels.num);
9008ae07 2875
8bfaf07f 2876 mlx5e_build_tx2sq_maps(priv);
acc6c595
SM
2877 mlx5e_activate_channels(&priv->channels);
2878 netif_tx_start_all_queues(priv->netdev);
9008ae07 2879
733d3e54 2880 if (MLX5_ESWITCH_MANAGER(priv->mdev))
9008ae07
SM
2881 mlx5e_add_sqs_fwd_rules(priv);
2882
acc6c595 2883 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
9008ae07 2884 mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
acc6c595
SM
2885}
2886
603f4a45 2887void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2888{
9008ae07
SM
2889 mlx5e_redirect_rqts_to_drop(priv);
2890
733d3e54 2891 if (MLX5_ESWITCH_MANAGER(priv->mdev))
9008ae07
SM
2892 mlx5e_remove_sqs_fwd_rules(priv);
2893
acc6c595
SM
2894 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2895 * polling for inactive tx queues.
2896 */
2897 netif_tx_stop_all_queues(priv->netdev);
2898 netif_tx_disable(priv->netdev);
2899 mlx5e_deactivate_channels(&priv->channels);
2900}
2901
55c2503d 2902void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2e20a151
SM
2903 struct mlx5e_channels *new_chs,
2904 mlx5e_fp_hw_modify hw_modify)
55c2503d
SM
2905{
2906 struct net_device *netdev = priv->netdev;
2907 int new_num_txqs;
7ca42c80 2908 int carrier_ok;
55c2503d
SM
2909 new_num_txqs = new_chs->num * new_chs->params.num_tc;
2910
7ca42c80 2911 carrier_ok = netif_carrier_ok(netdev);
55c2503d
SM
2912 netif_carrier_off(netdev);
2913
2914 if (new_num_txqs < netdev->real_num_tx_queues)
2915 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2916
2917 mlx5e_deactivate_priv_channels(priv);
2918 mlx5e_close_channels(&priv->channels);
2919
2920 priv->channels = *new_chs;
2921
2e20a151
SM
2922 /* New channels are ready to roll, modify HW settings if needed */
2923 if (hw_modify)
2924 hw_modify(priv);
2925
55c2503d
SM
2926 mlx5e_refresh_tirs(priv, false);
2927 mlx5e_activate_priv_channels(priv);
2928
7ca42c80
ES
2929 /* return carrier back if needed */
2930 if (carrier_ok)
2931 netif_carrier_on(netdev);
55c2503d
SM
2932}
2933
237f258c 2934void mlx5e_timestamp_init(struct mlx5e_priv *priv)
7c39afb3
FD
2935{
2936 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
2937 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
2938}
2939
40ab6a6e
AS
2940int mlx5e_open_locked(struct net_device *netdev)
2941{
2942 struct mlx5e_priv *priv = netdev_priv(netdev);
40ab6a6e
AS
2943 int err;
2944
2945 set_bit(MLX5E_STATE_OPENED, &priv->state);
2946
ff9c852f 2947 err = mlx5e_open_channels(priv, &priv->channels);
acc6c595 2948 if (err)
343b29f3 2949 goto err_clear_state_opened_flag;
40ab6a6e 2950
b676f653 2951 mlx5e_refresh_tirs(priv, false);
acc6c595 2952 mlx5e_activate_priv_channels(priv);
7ca42c80
ES
2953 if (priv->profile->update_carrier)
2954 priv->profile->update_carrier(priv);
be4891af 2955
cb67b832
HHZ
2956 if (priv->profile->update_stats)
2957 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
40ab6a6e 2958
9b37b07f 2959 return 0;
343b29f3
AS
2960
2961err_clear_state_opened_flag:
2962 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2963 return err;
40ab6a6e
AS
2964}
2965
cb67b832 2966int mlx5e_open(struct net_device *netdev)
40ab6a6e
AS
2967{
2968 struct mlx5e_priv *priv = netdev_priv(netdev);
2969 int err;
2970
2971 mutex_lock(&priv->state_lock);
2972 err = mlx5e_open_locked(netdev);
63bfd399
EBE
2973 if (!err)
2974 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
40ab6a6e
AS
2975 mutex_unlock(&priv->state_lock);
2976
a3c785d7 2977 if (mlx5_vxlan_allowed(priv->vxlan))
a117f73d
SK
2978 udp_tunnel_get_rx_info(netdev);
2979
40ab6a6e
AS
2980 return err;
2981}
2982
2983int mlx5e_close_locked(struct net_device *netdev)
2984{
2985 struct mlx5e_priv *priv = netdev_priv(netdev);
2986
a1985740
AS
2987 /* May already be CLOSED in case a previous configuration operation
2988 * (e.g RX/TX queue size change) that involves close&open failed.
2989 */
2990 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2991 return 0;
2992
40ab6a6e
AS
2993 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2994
40ab6a6e 2995 netif_carrier_off(priv->netdev);
acc6c595
SM
2996 mlx5e_deactivate_priv_channels(priv);
2997 mlx5e_close_channels(&priv->channels);
40ab6a6e
AS
2998
2999 return 0;
3000}
3001
cb67b832 3002int mlx5e_close(struct net_device *netdev)
40ab6a6e
AS
3003{
3004 struct mlx5e_priv *priv = netdev_priv(netdev);
3005 int err;
3006
26e59d80
MHY
3007 if (!netif_device_present(netdev))
3008 return -ENODEV;
3009
40ab6a6e 3010 mutex_lock(&priv->state_lock);
63bfd399 3011 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
40ab6a6e
AS
3012 err = mlx5e_close_locked(netdev);
3013 mutex_unlock(&priv->state_lock);
3014
3015 return err;
3016}
3017
a43b25da 3018static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3b77235b
SM
3019 struct mlx5e_rq *rq,
3020 struct mlx5e_rq_param *param)
40ab6a6e 3021{
40ab6a6e
AS
3022 void *rqc = param->rqc;
3023 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
3024 int err;
3025
3026 param->wq.db_numa_node = param->wq.buf_numa_node;
3027
99cbfa93
TT
3028 err = mlx5_wq_cyc_create(mdev, &param->wq, rqc_wq, &rq->wqe.wq,
3029 &rq->wq_ctrl);
40ab6a6e
AS
3030 if (err)
3031 return err;
3032
0ddf5432
JDB
3033 /* Mark as unused given "Drop-RQ" packets never reach XDP */
3034 xdp_rxq_info_unused(&rq->xdp_rxq);
3035
a43b25da 3036 rq->mdev = mdev;
40ab6a6e
AS
3037
3038 return 0;
3039}
3040
a43b25da 3041static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
3b77235b
SM
3042 struct mlx5e_cq *cq,
3043 struct mlx5e_cq_param *param)
40ab6a6e 3044{
2f0db879
GP
3045 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
3046 param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
3047
95b6c6a5 3048 return mlx5e_alloc_cq_common(mdev, param, cq);
40ab6a6e
AS
3049}
3050
7cbaf9a3 3051static int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
a43b25da 3052 struct mlx5e_rq *drop_rq)
40ab6a6e 3053{
7cbaf9a3 3054 struct mlx5_core_dev *mdev = priv->mdev;
a43b25da
SM
3055 struct mlx5e_cq_param cq_param = {};
3056 struct mlx5e_rq_param rq_param = {};
3057 struct mlx5e_cq *cq = &drop_rq->cq;
40ab6a6e
AS
3058 int err;
3059
7cbaf9a3 3060 mlx5e_build_drop_rq_param(priv, &rq_param);
40ab6a6e 3061
a43b25da 3062 err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
40ab6a6e
AS
3063 if (err)
3064 return err;
3065
3b77235b 3066 err = mlx5e_create_cq(cq, &cq_param);
40ab6a6e 3067 if (err)
3b77235b 3068 goto err_free_cq;
40ab6a6e 3069
a43b25da 3070 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
40ab6a6e 3071 if (err)
3b77235b 3072 goto err_destroy_cq;
40ab6a6e 3073
a43b25da 3074 err = mlx5e_create_rq(drop_rq, &rq_param);
40ab6a6e 3075 if (err)
3b77235b 3076 goto err_free_rq;
40ab6a6e 3077
7cbaf9a3
MS
3078 err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3079 if (err)
3080 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
3081
40ab6a6e
AS
3082 return 0;
3083
3b77235b 3084err_free_rq:
a43b25da 3085 mlx5e_free_rq(drop_rq);
40ab6a6e
AS
3086
3087err_destroy_cq:
a43b25da 3088 mlx5e_destroy_cq(cq);
40ab6a6e 3089
3b77235b 3090err_free_cq:
a43b25da 3091 mlx5e_free_cq(cq);
3b77235b 3092
40ab6a6e
AS
3093 return err;
3094}
3095
a43b25da 3096static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
40ab6a6e 3097{
a43b25da
SM
3098 mlx5e_destroy_rq(drop_rq);
3099 mlx5e_free_rq(drop_rq);
3100 mlx5e_destroy_cq(&drop_rq->cq);
3101 mlx5e_free_cq(&drop_rq->cq);
40ab6a6e
AS
3102}
3103
5426a0b2
SM
3104int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
3105 u32 underlay_qpn, u32 *tisn)
40ab6a6e 3106{
c4f287c4 3107 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
40ab6a6e
AS
3108 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3109
08fb1dac 3110 MLX5_SET(tisc, tisc, prio, tc << 1);
5426a0b2 3111 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
b50d292b 3112 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
db60b802
AH
3113
3114 if (mlx5_lag_is_lacp_owner(mdev))
3115 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
3116
5426a0b2 3117 return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
40ab6a6e
AS
3118}
3119
5426a0b2 3120void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
40ab6a6e 3121{
5426a0b2 3122 mlx5_core_destroy_tis(mdev, tisn);
40ab6a6e
AS
3123}
3124
cb67b832 3125int mlx5e_create_tises(struct mlx5e_priv *priv)
40ab6a6e
AS
3126{
3127 int err;
3128 int tc;
3129
6bfd390b 3130 for (tc = 0; tc < priv->profile->max_tc; tc++) {
5426a0b2 3131 err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
40ab6a6e
AS
3132 if (err)
3133 goto err_close_tises;
3134 }
3135
3136 return 0;
3137
3138err_close_tises:
3139 for (tc--; tc >= 0; tc--)
5426a0b2 3140 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
40ab6a6e
AS
3141
3142 return err;
3143}
3144
cb67b832 3145void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
40ab6a6e
AS
3146{
3147 int tc;
3148
6bfd390b 3149 for (tc = 0; tc < priv->profile->max_tc; tc++)
5426a0b2 3150 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
40ab6a6e
AS
3151}
3152
6a9764ef
SM
3153static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
3154 enum mlx5e_traffic_types tt,
3155 u32 *tirc)
f62b8bb8 3156{
b50d292b 3157 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3191e05f 3158
6a9764ef 3159 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
f62b8bb8 3160
4cbeaff5 3161 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
398f3351 3162 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
7b3722fa 3163 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
f62b8bb8
AV
3164}
3165
6a9764ef 3166static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
f62b8bb8 3167{
b50d292b 3168 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
1da36696 3169
6a9764ef 3170 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
1da36696
TT
3171
3172 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
3173 MLX5_SET(tirc, tirc, indirect_table, rqtn);
3174 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
3175}
3176
8f493ffd 3177int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
1da36696 3178{
724b2aa1 3179 struct mlx5e_tir *tir;
f62b8bb8
AV
3180 void *tirc;
3181 int inlen;
7b3722fa 3182 int i = 0;
f62b8bb8 3183 int err;
1da36696 3184 u32 *in;
1da36696 3185 int tt;
f62b8bb8
AV
3186
3187 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 3188 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
3189 if (!in)
3190 return -ENOMEM;
3191
1da36696
TT
3192 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
3193 memset(in, 0, inlen);
724b2aa1 3194 tir = &priv->indir_tir[tt];
1da36696 3195 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6a9764ef 3196 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
724b2aa1 3197 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
7b3722fa
GP
3198 if (err) {
3199 mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
3200 goto err_destroy_inner_tirs;
3201 }
f62b8bb8
AV
3202 }
3203
7b3722fa
GP
3204 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
3205 goto out;
3206
3207 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
3208 memset(in, 0, inlen);
3209 tir = &priv->inner_indir_tir[i];
3210 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3211 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
3212 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3213 if (err) {
3214 mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
3215 goto err_destroy_inner_tirs;
3216 }
3217 }
3218
3219out:
6bfd390b
HHZ
3220 kvfree(in);
3221
3222 return 0;
3223
7b3722fa
GP
3224err_destroy_inner_tirs:
3225 for (i--; i >= 0; i--)
3226 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3227
6bfd390b
HHZ
3228 for (tt--; tt >= 0; tt--)
3229 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
3230
3231 kvfree(in);
3232
3233 return err;
3234}
3235
cb67b832 3236int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
3237{
3238 int nch = priv->profile->max_nch(priv->mdev);
3239 struct mlx5e_tir *tir;
3240 void *tirc;
3241 int inlen;
3242 int err;
3243 u32 *in;
3244 int ix;
3245
3246 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 3247 in = kvzalloc(inlen, GFP_KERNEL);
6bfd390b
HHZ
3248 if (!in)
3249 return -ENOMEM;
3250
1da36696
TT
3251 for (ix = 0; ix < nch; ix++) {
3252 memset(in, 0, inlen);
724b2aa1 3253 tir = &priv->direct_tir[ix];
1da36696 3254 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6a9764ef 3255 mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
724b2aa1 3256 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
1da36696
TT
3257 if (err)
3258 goto err_destroy_ch_tirs;
3259 }
3260
3261 kvfree(in);
3262
f62b8bb8
AV
3263 return 0;
3264
1da36696 3265err_destroy_ch_tirs:
8f493ffd 3266 mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
1da36696 3267 for (ix--; ix >= 0; ix--)
724b2aa1 3268 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
1da36696 3269
1da36696 3270 kvfree(in);
f62b8bb8
AV
3271
3272 return err;
3273}
3274
8f493ffd 3275void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
3276{
3277 int i;
3278
1da36696 3279 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
724b2aa1 3280 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
7b3722fa
GP
3281
3282 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
3283 return;
3284
3285 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3286 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
f62b8bb8
AV
3287}
3288
cb67b832 3289void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
3290{
3291 int nch = priv->profile->max_nch(priv->mdev);
3292 int i;
3293
3294 for (i = 0; i < nch; i++)
3295 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
3296}
3297
102722fc
GE
3298static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3299{
3300 int err = 0;
3301 int i;
3302
3303 for (i = 0; i < chs->num; i++) {
3304 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3305 if (err)
3306 return err;
3307 }
3308
3309 return 0;
3310}
3311
f6d96a20 3312static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
36350114
GP
3313{
3314 int err = 0;
3315 int i;
3316
ff9c852f
SM
3317 for (i = 0; i < chs->num; i++) {
3318 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
36350114
GP
3319 if (err)
3320 return err;
3321 }
3322
3323 return 0;
3324}
3325
0cf0f6d3
JP
3326static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
3327 struct tc_mqprio_qopt *mqprio)
08fb1dac
SM
3328{
3329 struct mlx5e_priv *priv = netdev_priv(netdev);
6f9485af 3330 struct mlx5e_channels new_channels = {};
0cf0f6d3 3331 u8 tc = mqprio->num_tc;
08fb1dac
SM
3332 int err = 0;
3333
0cf0f6d3
JP
3334 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3335
08fb1dac
SM
3336 if (tc && tc != MLX5E_MAX_NUM_TC)
3337 return -EINVAL;
3338
3339 mutex_lock(&priv->state_lock);
3340
6f9485af
SM
3341 new_channels.params = priv->channels.params;
3342 new_channels.params.num_tc = tc ? tc : 1;
08fb1dac 3343
20b6a1c7 3344 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6f9485af
SM
3345 priv->channels.params = new_channels.params;
3346 goto out;
3347 }
08fb1dac 3348
6f9485af
SM
3349 err = mlx5e_open_channels(priv, &new_channels);
3350 if (err)
3351 goto out;
08fb1dac 3352
05909bab
EBE
3353 priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
3354 new_channels.params.num_tc);
2e20a151 3355 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
6f9485af 3356out:
08fb1dac 3357 mutex_unlock(&priv->state_lock);
08fb1dac
SM
3358 return err;
3359}
3360
e80541ec 3361#ifdef CONFIG_MLX5_ESWITCH
d6c862ba 3362static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
60bd4af8
OG
3363 struct tc_cls_flower_offload *cls_flower,
3364 int flags)
08fb1dac 3365{
0cf0f6d3
JP
3366 switch (cls_flower->command) {
3367 case TC_CLSFLOWER_REPLACE:
60bd4af8 3368 return mlx5e_configure_flower(priv, cls_flower, flags);
0cf0f6d3 3369 case TC_CLSFLOWER_DESTROY:
60bd4af8 3370 return mlx5e_delete_flower(priv, cls_flower, flags);
0cf0f6d3 3371 case TC_CLSFLOWER_STATS:
60bd4af8 3372 return mlx5e_stats_flower(priv, cls_flower, flags);
0cf0f6d3 3373 default:
a5fcf8a6 3374 return -EOPNOTSUPP;
0cf0f6d3
JP
3375 }
3376}
d6c862ba 3377
60bd4af8
OG
3378static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3379 void *cb_priv)
d6c862ba
JP
3380{
3381 struct mlx5e_priv *priv = cb_priv;
3382
9ab88e83 3383 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
44ae12a7
JP
3384 return -EOPNOTSUPP;
3385
d6c862ba
JP
3386 switch (type) {
3387 case TC_SETUP_CLSFLOWER:
60bd4af8 3388 return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
d6c862ba
JP
3389 default:
3390 return -EOPNOTSUPP;
3391 }
3392}
3393
3394static int mlx5e_setup_tc_block(struct net_device *dev,
3395 struct tc_block_offload *f)
3396{
3397 struct mlx5e_priv *priv = netdev_priv(dev);
3398
3399 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3400 return -EOPNOTSUPP;
3401
3402 switch (f->command) {
3403 case TC_BLOCK_BIND:
3404 return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
60513bd8 3405 priv, priv, f->extack);
d6c862ba
JP
3406 case TC_BLOCK_UNBIND:
3407 tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
3408 priv);
3409 return 0;
3410 default:
3411 return -EOPNOTSUPP;
3412 }
3413}
e80541ec 3414#endif
a5fcf8a6 3415
9afe9a53
OG
3416static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3417 void *type_data)
0cf0f6d3 3418{
2572ac53 3419 switch (type) {
fde6af47 3420#ifdef CONFIG_MLX5_ESWITCH
d6c862ba
JP
3421 case TC_SETUP_BLOCK:
3422 return mlx5e_setup_tc_block(dev, type_data);
fde6af47 3423#endif
575ed7d3 3424 case TC_SETUP_QDISC_MQPRIO:
de4784ca 3425 return mlx5e_setup_tc_mqprio(dev, type_data);
e8f887ac
AV
3426 default:
3427 return -EOPNOTSUPP;
3428 }
08fb1dac
SM
3429}
3430
bc1f4470 3431static void
f62b8bb8
AV
3432mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3433{
3434 struct mlx5e_priv *priv = netdev_priv(dev);
9218b44d 3435 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
f62b8bb8 3436 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
269e6b3a 3437 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
f62b8bb8 3438
ed56c519
SM
3439 /* update HW stats in background for next time */
3440 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
3441
370bad0f
OG
3442 if (mlx5e_is_uplink_rep(priv)) {
3443 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3444 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3445 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3446 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3447 } else {
868a01a2 3448 mlx5e_grp_sw_update_stats(priv);
370bad0f
OG
3449 stats->rx_packets = sstats->rx_packets;
3450 stats->rx_bytes = sstats->rx_bytes;
3451 stats->tx_packets = sstats->tx_packets;
3452 stats->tx_bytes = sstats->tx_bytes;
3453 stats->tx_dropped = sstats->tx_queue_dropped;
3454 }
269e6b3a
GP
3455
3456 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
269e6b3a
GP
3457
3458 stats->rx_length_errors =
9218b44d
GP
3459 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3460 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3461 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
269e6b3a 3462 stats->rx_crc_errors =
9218b44d
GP
3463 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3464 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3465 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
269e6b3a
GP
3466 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3467 stats->rx_frame_errors;
3468 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3469
3470 /* vport multicast also counts packets that are dropped due to steering
3471 * or rx out of buffer
3472 */
9218b44d
GP
3473 stats->multicast =
3474 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
f62b8bb8
AV
3475}
3476
3477static void mlx5e_set_rx_mode(struct net_device *dev)
3478{
3479 struct mlx5e_priv *priv = netdev_priv(dev);
3480
7bb29755 3481 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3482}
3483
3484static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3485{
3486 struct mlx5e_priv *priv = netdev_priv(netdev);
3487 struct sockaddr *saddr = addr;
3488
3489 if (!is_valid_ether_addr(saddr->sa_data))
3490 return -EADDRNOTAVAIL;
3491
3492 netif_addr_lock_bh(netdev);
3493 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3494 netif_addr_unlock_bh(netdev);
3495
7bb29755 3496 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3497
3498 return 0;
3499}
3500
75b81ce7 3501#define MLX5E_SET_FEATURE(features, feature, enable) \
0e405443
GP
3502 do { \
3503 if (enable) \
75b81ce7 3504 *features |= feature; \
0e405443 3505 else \
75b81ce7 3506 *features &= ~feature; \
0e405443
GP
3507 } while (0)
3508
3509typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3510
3511static int set_feature_lro(struct net_device *netdev, bool enable)
f62b8bb8
AV
3512{
3513 struct mlx5e_priv *priv = netdev_priv(netdev);
619a8f2a 3514 struct mlx5_core_dev *mdev = priv->mdev;
2e20a151 3515 struct mlx5e_channels new_channels = {};
619a8f2a 3516 struct mlx5e_params *old_params;
2e20a151
SM
3517 int err = 0;
3518 bool reset;
f62b8bb8
AV
3519
3520 mutex_lock(&priv->state_lock);
f62b8bb8 3521
619a8f2a 3522 old_params = &priv->channels.params;
6c3a823e
TT
3523 if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3524 netdev_warn(netdev, "can't set LRO with legacy RQ\n");
3525 err = -EINVAL;
3526 goto out;
3527 }
3528
619a8f2a 3529 reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3530
619a8f2a 3531 new_channels.params = *old_params;
2e20a151
SM
3532 new_channels.params.lro_en = enable;
3533
99cbfa93 3534 if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
619a8f2a
TT
3535 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) ==
3536 mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params))
3537 reset = false;
3538 }
3539
2e20a151 3540 if (!reset) {
619a8f2a 3541 *old_params = new_channels.params;
2e20a151
SM
3542 err = mlx5e_modify_tirs_lro(priv);
3543 goto out;
98e81b0a 3544 }
f62b8bb8 3545
2e20a151
SM
3546 err = mlx5e_open_channels(priv, &new_channels);
3547 if (err)
3548 goto out;
0e405443 3549
2e20a151
SM
3550 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
3551out:
9b37b07f 3552 mutex_unlock(&priv->state_lock);
0e405443
GP
3553 return err;
3554}
3555
2b52a283 3556static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
0e405443
GP
3557{
3558 struct mlx5e_priv *priv = netdev_priv(netdev);
3559
3560 if (enable)
2b52a283 3561 mlx5e_enable_cvlan_filter(priv);
0e405443 3562 else
2b52a283 3563 mlx5e_disable_cvlan_filter(priv);
0e405443
GP
3564
3565 return 0;
3566}
3567
3568static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3569{
3570 struct mlx5e_priv *priv = netdev_priv(netdev);
f62b8bb8 3571
0e405443 3572 if (!enable && mlx5e_tc_num_filters(priv)) {
e8f887ac
AV
3573 netdev_err(netdev,
3574 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3575 return -EINVAL;
3576 }
3577
0e405443
GP
3578 return 0;
3579}
3580
94cb1ebb
EBE
3581static int set_feature_rx_all(struct net_device *netdev, bool enable)
3582{
3583 struct mlx5e_priv *priv = netdev_priv(netdev);
3584 struct mlx5_core_dev *mdev = priv->mdev;
3585
3586 return mlx5_set_port_fcs(mdev, !enable);
3587}
3588
102722fc
GE
3589static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3590{
3591 struct mlx5e_priv *priv = netdev_priv(netdev);
3592 int err;
3593
3594 mutex_lock(&priv->state_lock);
3595
3596 priv->channels.params.scatter_fcs_en = enable;
3597 err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3598 if (err)
3599 priv->channels.params.scatter_fcs_en = !enable;
3600
3601 mutex_unlock(&priv->state_lock);
3602
3603 return err;
3604}
3605
36350114
GP
3606static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3607{
3608 struct mlx5e_priv *priv = netdev_priv(netdev);
ff9c852f 3609 int err = 0;
36350114
GP
3610
3611 mutex_lock(&priv->state_lock);
3612
6a9764ef 3613 priv->channels.params.vlan_strip_disable = !enable;
ff9c852f
SM
3614 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3615 goto unlock;
3616
3617 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
36350114 3618 if (err)
6a9764ef 3619 priv->channels.params.vlan_strip_disable = enable;
36350114 3620
ff9c852f 3621unlock:
36350114
GP
3622 mutex_unlock(&priv->state_lock);
3623
3624 return err;
3625}
3626
45bf454a
MG
3627#ifdef CONFIG_RFS_ACCEL
3628static int set_feature_arfs(struct net_device *netdev, bool enable)
3629{
3630 struct mlx5e_priv *priv = netdev_priv(netdev);
3631 int err;
3632
3633 if (enable)
3634 err = mlx5e_arfs_enable(priv);
3635 else
3636 err = mlx5e_arfs_disable(priv);
3637
3638 return err;
3639}
3640#endif
3641
0e405443 3642static int mlx5e_handle_feature(struct net_device *netdev,
75b81ce7 3643 netdev_features_t *features,
0e405443
GP
3644 netdev_features_t wanted_features,
3645 netdev_features_t feature,
3646 mlx5e_feature_handler feature_handler)
3647{
3648 netdev_features_t changes = wanted_features ^ netdev->features;
3649 bool enable = !!(wanted_features & feature);
3650 int err;
3651
3652 if (!(changes & feature))
3653 return 0;
3654
3655 err = feature_handler(netdev, enable);
3656 if (err) {
b20eab15
GP
3657 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3658 enable ? "Enable" : "Disable", &feature, err);
0e405443
GP
3659 return err;
3660 }
3661
75b81ce7 3662 MLX5E_SET_FEATURE(features, feature, enable);
0e405443
GP
3663 return 0;
3664}
3665
3666static int mlx5e_set_features(struct net_device *netdev,
3667 netdev_features_t features)
3668{
75b81ce7 3669 netdev_features_t oper_features = netdev->features;
be0f780b
GP
3670 int err = 0;
3671
3672#define MLX5E_HANDLE_FEATURE(feature, handler) \
3673 mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
0e405443 3674
be0f780b
GP
3675 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
3676 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
2b52a283 3677 set_feature_cvlan_filter);
be0f780b
GP
3678 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
3679 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
3680 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
3681 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
45bf454a 3682#ifdef CONFIG_RFS_ACCEL
be0f780b 3683 err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
45bf454a 3684#endif
0e405443 3685
75b81ce7
GP
3686 if (err) {
3687 netdev->features = oper_features;
3688 return -EINVAL;
3689 }
3690
3691 return 0;
f62b8bb8
AV
3692}
3693
7d92d580
GP
3694static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
3695 netdev_features_t features)
3696{
3697 struct mlx5e_priv *priv = netdev_priv(netdev);
6c3a823e 3698 struct mlx5e_params *params;
7d92d580
GP
3699
3700 mutex_lock(&priv->state_lock);
6c3a823e 3701 params = &priv->channels.params;
7d92d580
GP
3702 if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
3703 /* HW strips the outer C-tag header, this is a problem
3704 * for S-tag traffic.
3705 */
3706 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
6c3a823e 3707 if (!params->vlan_strip_disable)
7d92d580
GP
3708 netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3709 }
6c3a823e
TT
3710 if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3711 features &= ~NETIF_F_LRO;
3712 if (params->lro_en)
3713 netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
3714 }
3715
7d92d580
GP
3716 mutex_unlock(&priv->state_lock);
3717
3718 return features;
3719}
3720
250a42b6
AN
3721int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
3722 change_hw_mtu_cb set_mtu_cb)
f62b8bb8
AV
3723{
3724 struct mlx5e_priv *priv = netdev_priv(netdev);
2e20a151 3725 struct mlx5e_channels new_channels = {};
472a1e44 3726 struct mlx5e_params *params;
98e81b0a 3727 int err = 0;
506753b0 3728 bool reset;
f62b8bb8 3729
f62b8bb8 3730 mutex_lock(&priv->state_lock);
98e81b0a 3731
472a1e44 3732 params = &priv->channels.params;
506753b0 3733
73281b78 3734 reset = !params->lro_en;
2e20a151 3735 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3736
73281b78
TT
3737 new_channels.params = *params;
3738 new_channels.params.sw_mtu = new_mtu;
3739
a26a5bdf
TT
3740 if (params->xdp_prog &&
3741 !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
3742 netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
3743 new_mtu, MLX5E_XDP_MAX_MTU);
3744 err = -EINVAL;
3745 goto out;
3746 }
3747
99cbfa93 3748 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
73281b78
TT
3749 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
3750 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
3751
3752 reset = reset && (ppw_old != ppw_new);
3753 }
3754
2e20a151 3755 if (!reset) {
472a1e44 3756 params->sw_mtu = new_mtu;
250a42b6 3757 set_mtu_cb(priv);
472a1e44 3758 netdev->mtu = params->sw_mtu;
2e20a151
SM
3759 goto out;
3760 }
98e81b0a 3761
2e20a151 3762 err = mlx5e_open_channels(priv, &new_channels);
472a1e44 3763 if (err)
2e20a151 3764 goto out;
2e20a151 3765
250a42b6 3766 mlx5e_switch_priv_channels(priv, &new_channels, set_mtu_cb);
472a1e44 3767 netdev->mtu = new_channels.params.sw_mtu;
f62b8bb8 3768
2e20a151
SM
3769out:
3770 mutex_unlock(&priv->state_lock);
f62b8bb8
AV
3771 return err;
3772}
3773
250a42b6
AN
3774static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
3775{
3776 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
3777}
3778
7c39afb3
FD
3779int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
3780{
3781 struct hwtstamp_config config;
3782 int err;
3783
3784 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3785 return -EOPNOTSUPP;
3786
3787 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
3788 return -EFAULT;
3789
3790 /* TX HW timestamp */
3791 switch (config.tx_type) {
3792 case HWTSTAMP_TX_OFF:
3793 case HWTSTAMP_TX_ON:
3794 break;
3795 default:
3796 return -ERANGE;
3797 }
3798
3799 mutex_lock(&priv->state_lock);
3800 /* RX HW timestamp */
3801 switch (config.rx_filter) {
3802 case HWTSTAMP_FILTER_NONE:
3803 /* Reset CQE compression to Admin default */
3804 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
3805 break;
3806 case HWTSTAMP_FILTER_ALL:
3807 case HWTSTAMP_FILTER_SOME:
3808 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3809 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3810 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3811 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3812 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3813 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3814 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3815 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3816 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3817 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3818 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3819 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3820 case HWTSTAMP_FILTER_NTP_ALL:
3821 /* Disable CQE compression */
3822 netdev_warn(priv->netdev, "Disabling cqe compression");
3823 err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
3824 if (err) {
3825 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
3826 mutex_unlock(&priv->state_lock);
3827 return err;
3828 }
3829 config.rx_filter = HWTSTAMP_FILTER_ALL;
3830 break;
3831 default:
3832 mutex_unlock(&priv->state_lock);
3833 return -ERANGE;
3834 }
3835
3836 memcpy(&priv->tstamp, &config, sizeof(config));
3837 mutex_unlock(&priv->state_lock);
3838
3839 return copy_to_user(ifr->ifr_data, &config,
3840 sizeof(config)) ? -EFAULT : 0;
3841}
3842
3843int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
3844{
3845 struct hwtstamp_config *cfg = &priv->tstamp;
3846
3847 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3848 return -EOPNOTSUPP;
3849
3850 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
3851}
3852
ef9814de
EBE
3853static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3854{
1170fbd8
FD
3855 struct mlx5e_priv *priv = netdev_priv(dev);
3856
ef9814de
EBE
3857 switch (cmd) {
3858 case SIOCSHWTSTAMP:
1170fbd8 3859 return mlx5e_hwstamp_set(priv, ifr);
ef9814de 3860 case SIOCGHWTSTAMP:
1170fbd8 3861 return mlx5e_hwstamp_get(priv, ifr);
ef9814de
EBE
3862 default:
3863 return -EOPNOTSUPP;
3864 }
3865}
3866
e80541ec 3867#ifdef CONFIG_MLX5_ESWITCH
66e49ded
SM
3868static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3869{
3870 struct mlx5e_priv *priv = netdev_priv(dev);
3871 struct mlx5_core_dev *mdev = priv->mdev;
3872
3873 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
3874}
3875
79aab093
MS
3876static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
3877 __be16 vlan_proto)
66e49ded
SM
3878{
3879 struct mlx5e_priv *priv = netdev_priv(dev);
3880 struct mlx5_core_dev *mdev = priv->mdev;
3881
79aab093
MS
3882 if (vlan_proto != htons(ETH_P_8021Q))
3883 return -EPROTONOSUPPORT;
3884
66e49ded
SM
3885 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
3886 vlan, qos);
3887}
3888
f942380c
MHY
3889static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
3890{
3891 struct mlx5e_priv *priv = netdev_priv(dev);
3892 struct mlx5_core_dev *mdev = priv->mdev;
3893
3894 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
3895}
3896
1edc57e2
MHY
3897static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
3898{
3899 struct mlx5e_priv *priv = netdev_priv(dev);
3900 struct mlx5_core_dev *mdev = priv->mdev;
3901
3902 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
3903}
bd77bf1c
MHY
3904
3905static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
3906 int max_tx_rate)
3907{
3908 struct mlx5e_priv *priv = netdev_priv(dev);
3909 struct mlx5_core_dev *mdev = priv->mdev;
3910
bd77bf1c 3911 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
c9497c98 3912 max_tx_rate, min_tx_rate);
bd77bf1c
MHY
3913}
3914
66e49ded
SM
3915static int mlx5_vport_link2ifla(u8 esw_link)
3916{
3917 switch (esw_link) {
3918 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
3919 return IFLA_VF_LINK_STATE_DISABLE;
3920 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
3921 return IFLA_VF_LINK_STATE_ENABLE;
3922 }
3923 return IFLA_VF_LINK_STATE_AUTO;
3924}
3925
3926static int mlx5_ifla_link2vport(u8 ifla_link)
3927{
3928 switch (ifla_link) {
3929 case IFLA_VF_LINK_STATE_DISABLE:
3930 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
3931 case IFLA_VF_LINK_STATE_ENABLE:
3932 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
3933 }
3934 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
3935}
3936
3937static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
3938 int link_state)
3939{
3940 struct mlx5e_priv *priv = netdev_priv(dev);
3941 struct mlx5_core_dev *mdev = priv->mdev;
3942
3943 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
3944 mlx5_ifla_link2vport(link_state));
3945}
3946
3947static int mlx5e_get_vf_config(struct net_device *dev,
3948 int vf, struct ifla_vf_info *ivi)
3949{
3950 struct mlx5e_priv *priv = netdev_priv(dev);
3951 struct mlx5_core_dev *mdev = priv->mdev;
3952 int err;
3953
3954 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
3955 if (err)
3956 return err;
3957 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
3958 return 0;
3959}
3960
3961static int mlx5e_get_vf_stats(struct net_device *dev,
3962 int vf, struct ifla_vf_stats *vf_stats)
3963{
3964 struct mlx5e_priv *priv = netdev_priv(dev);
3965 struct mlx5_core_dev *mdev = priv->mdev;
3966
3967 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
3968 vf_stats);
3969}
e80541ec 3970#endif
66e49ded 3971
dccea6bf
SM
3972struct mlx5e_vxlan_work {
3973 struct work_struct work;
3974 struct mlx5e_priv *priv;
3975 u16 port;
3976};
3977
3978static void mlx5e_vxlan_add_work(struct work_struct *work)
3979{
3980 struct mlx5e_vxlan_work *vxlan_work =
3981 container_of(work, struct mlx5e_vxlan_work, work);
3982 struct mlx5e_priv *priv = vxlan_work->priv;
3983 u16 port = vxlan_work->port;
3984
3985 mutex_lock(&priv->state_lock);
a3c785d7 3986 mlx5_vxlan_add_port(priv->vxlan, port);
dccea6bf
SM
3987 mutex_unlock(&priv->state_lock);
3988
3989 kfree(vxlan_work);
3990}
3991
3992static void mlx5e_vxlan_del_work(struct work_struct *work)
3993{
3994 struct mlx5e_vxlan_work *vxlan_work =
3995 container_of(work, struct mlx5e_vxlan_work, work);
3996 struct mlx5e_priv *priv = vxlan_work->priv;
3997 u16 port = vxlan_work->port;
3998
3999 mutex_lock(&priv->state_lock);
a3c785d7 4000 mlx5_vxlan_del_port(priv->vxlan, port);
dccea6bf
SM
4001 mutex_unlock(&priv->state_lock);
4002 kfree(vxlan_work);
4003}
4004
4005static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
4006{
4007 struct mlx5e_vxlan_work *vxlan_work;
4008
4009 vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
4010 if (!vxlan_work)
4011 return;
4012
4013 if (add)
4014 INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
4015 else
4016 INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
4017
4018 vxlan_work->priv = priv;
4019 vxlan_work->port = port;
4020 queue_work(priv->wq, &vxlan_work->work);
4021}
4022
1ad9a00a
PB
4023static void mlx5e_add_vxlan_port(struct net_device *netdev,
4024 struct udp_tunnel_info *ti)
b3f63c3d
MF
4025{
4026 struct mlx5e_priv *priv = netdev_priv(netdev);
4027
974c3f30
AD
4028 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4029 return;
4030
a3c785d7 4031 if (!mlx5_vxlan_allowed(priv->vxlan))
b3f63c3d
MF
4032 return;
4033
278d7f3d 4034 mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
b3f63c3d
MF
4035}
4036
1ad9a00a
PB
4037static void mlx5e_del_vxlan_port(struct net_device *netdev,
4038 struct udp_tunnel_info *ti)
b3f63c3d
MF
4039{
4040 struct mlx5e_priv *priv = netdev_priv(netdev);
4041
974c3f30
AD
4042 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4043 return;
4044
a3c785d7 4045 if (!mlx5_vxlan_allowed(priv->vxlan))
b3f63c3d
MF
4046 return;
4047
278d7f3d 4048 mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
b3f63c3d
MF
4049}
4050
27299841
GP
4051static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
4052 struct sk_buff *skb,
4053 netdev_features_t features)
b3f63c3d 4054{
2989ad1e 4055 unsigned int offset = 0;
b3f63c3d 4056 struct udphdr *udph;
27299841
GP
4057 u8 proto;
4058 u16 port;
b3f63c3d
MF
4059
4060 switch (vlan_get_protocol(skb)) {
4061 case htons(ETH_P_IP):
4062 proto = ip_hdr(skb)->protocol;
4063 break;
4064 case htons(ETH_P_IPV6):
2989ad1e 4065 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
b3f63c3d
MF
4066 break;
4067 default:
4068 goto out;
4069 }
4070
27299841
GP
4071 switch (proto) {
4072 case IPPROTO_GRE:
4073 return features;
4074 case IPPROTO_UDP:
b3f63c3d
MF
4075 udph = udp_hdr(skb);
4076 port = be16_to_cpu(udph->dest);
b3f63c3d 4077
27299841 4078 /* Verify if UDP port is being offloaded by HW */
a3c785d7 4079 if (mlx5_vxlan_lookup_port(priv->vxlan, port))
27299841
GP
4080 return features;
4081 }
b3f63c3d
MF
4082
4083out:
4084 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
4085 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4086}
4087
4088static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
4089 struct net_device *netdev,
4090 netdev_features_t features)
4091{
4092 struct mlx5e_priv *priv = netdev_priv(netdev);
4093
4094 features = vlan_features_check(skb, features);
4095 features = vxlan_features_check(skb, features);
4096
2ac9cfe7
IT
4097#ifdef CONFIG_MLX5_EN_IPSEC
4098 if (mlx5e_ipsec_feature_check(skb, netdev, features))
4099 return features;
4100#endif
4101
b3f63c3d
MF
4102 /* Validate if the tunneled packet is being offloaded by HW */
4103 if (skb->encapsulation &&
4104 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
27299841 4105 return mlx5e_tunnel_features_check(priv, skb, features);
b3f63c3d
MF
4106
4107 return features;
4108}
4109
7ca560b5
EBE
4110static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
4111 struct mlx5e_txqsq *sq)
4112{
7b2117bb 4113 struct mlx5_eq *eq = sq->cq.mcq.eq;
7ca560b5
EBE
4114 u32 eqe_count;
4115
7ca560b5 4116 netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
7b2117bb 4117 eq->eqn, eq->cons_index, eq->irqn);
7ca560b5
EBE
4118
4119 eqe_count = mlx5_eq_poll_irq_disabled(eq);
4120 if (!eqe_count)
4121 return false;
4122
4123 netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
05909bab 4124 sq->channel->stats->eq_rearm++;
7ca560b5
EBE
4125 return true;
4126}
4127
bfc647d5 4128static void mlx5e_tx_timeout_work(struct work_struct *work)
3947ca18 4129{
bfc647d5
EBE
4130 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
4131 tx_timeout_work);
4132 struct net_device *dev = priv->netdev;
7ca560b5 4133 bool reopen_channels = false;
bfc647d5 4134 int i, err;
3947ca18 4135
bfc647d5
EBE
4136 rtnl_lock();
4137 mutex_lock(&priv->state_lock);
4138
4139 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
4140 goto unlock;
3947ca18 4141
6a9764ef 4142 for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
84990945 4143 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
acc6c595 4144 struct mlx5e_txqsq *sq = priv->txq2sq[i];
3947ca18 4145
84990945 4146 if (!netif_xmit_stopped(dev_queue))
3947ca18 4147 continue;
bfc647d5
EBE
4148
4149 netdev_err(dev,
4150 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
84990945
EBE
4151 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
4152 jiffies_to_usecs(jiffies - dev_queue->trans_start));
3a32b26a 4153
7ca560b5
EBE
4154 /* If we recover a lost interrupt, most likely TX timeout will
4155 * be resolved, skip reopening channels
4156 */
4157 if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
4158 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
4159 reopen_channels = true;
4160 }
3947ca18
DJ
4161 }
4162
bfc647d5
EBE
4163 if (!reopen_channels)
4164 goto unlock;
4165
4166 mlx5e_close_locked(dev);
4167 err = mlx5e_open_locked(dev);
4168 if (err)
4169 netdev_err(priv->netdev,
4170 "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
4171 err);
4172
4173unlock:
4174 mutex_unlock(&priv->state_lock);
4175 rtnl_unlock();
4176}
4177
4178static void mlx5e_tx_timeout(struct net_device *dev)
4179{
4180 struct mlx5e_priv *priv = netdev_priv(dev);
4181
4182 netdev_err(dev, "TX timeout detected\n");
4183 queue_work(priv->wq, &priv->tx_timeout_work);
3947ca18
DJ
4184}
4185
a26a5bdf 4186static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
0ec13877
TT
4187{
4188 struct net_device *netdev = priv->netdev;
a26a5bdf 4189 struct mlx5e_channels new_channels = {};
0ec13877
TT
4190
4191 if (priv->channels.params.lro_en) {
4192 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
4193 return -EINVAL;
4194 }
4195
4196 if (MLX5_IPSEC_DEV(priv->mdev)) {
4197 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
4198 return -EINVAL;
4199 }
4200
a26a5bdf
TT
4201 new_channels.params = priv->channels.params;
4202 new_channels.params.xdp_prog = prog;
4203
4204 if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
4205 netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
4206 new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
4207 return -EINVAL;
4208 }
4209
0ec13877
TT
4210 return 0;
4211}
4212
86994156
RS
4213static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
4214{
4215 struct mlx5e_priv *priv = netdev_priv(netdev);
4216 struct bpf_prog *old_prog;
86994156 4217 bool reset, was_opened;
0ec13877 4218 int err;
86994156
RS
4219 int i;
4220
4221 mutex_lock(&priv->state_lock);
4222
0ec13877 4223 if (prog) {
a26a5bdf 4224 err = mlx5e_xdp_allowed(priv, prog);
0ec13877
TT
4225 if (err)
4226 goto unlock;
547eede0
IT
4227 }
4228
86994156
RS
4229 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
4230 /* no need for full reset when exchanging programs */
6a9764ef 4231 reset = (!priv->channels.params.xdp_prog || !prog);
86994156
RS
4232
4233 if (was_opened && reset)
4234 mlx5e_close_locked(netdev);
c54c0629
DB
4235 if (was_opened && !reset) {
4236 /* num_channels is invariant here, so we can take the
4237 * batched reference right upfront.
4238 */
6a9764ef 4239 prog = bpf_prog_add(prog, priv->channels.num);
c54c0629
DB
4240 if (IS_ERR(prog)) {
4241 err = PTR_ERR(prog);
4242 goto unlock;
4243 }
4244 }
86994156 4245
c54c0629
DB
4246 /* exchange programs, extra prog reference we got from caller
4247 * as long as we don't fail from this point onwards.
4248 */
6a9764ef 4249 old_prog = xchg(&priv->channels.params.xdp_prog, prog);
86994156
RS
4250 if (old_prog)
4251 bpf_prog_put(old_prog);
4252
4253 if (reset) /* change RQ type according to priv->xdp_prog */
2a0f561b 4254 mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
86994156
RS
4255
4256 if (was_opened && reset)
4257 mlx5e_open_locked(netdev);
4258
4259 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
4260 goto unlock;
4261
4262 /* exchanging programs w/o reset, we update ref counts on behalf
4263 * of the channels RQs here.
4264 */
ff9c852f
SM
4265 for (i = 0; i < priv->channels.num; i++) {
4266 struct mlx5e_channel *c = priv->channels.c[i];
86994156 4267
c0f1147d 4268 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156
RS
4269 napi_synchronize(&c->napi);
4270 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
4271
4272 old_prog = xchg(&c->rq.xdp_prog, prog);
4273
c0f1147d 4274 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156 4275 /* napi_schedule in case we have missed anything */
86994156
RS
4276 napi_schedule(&c->napi);
4277
4278 if (old_prog)
4279 bpf_prog_put(old_prog);
4280 }
4281
4282unlock:
4283 mutex_unlock(&priv->state_lock);
4284 return err;
4285}
4286
821b2e29 4287static u32 mlx5e_xdp_query(struct net_device *dev)
86994156
RS
4288{
4289 struct mlx5e_priv *priv = netdev_priv(dev);
821b2e29
MKL
4290 const struct bpf_prog *xdp_prog;
4291 u32 prog_id = 0;
86994156 4292
821b2e29
MKL
4293 mutex_lock(&priv->state_lock);
4294 xdp_prog = priv->channels.params.xdp_prog;
4295 if (xdp_prog)
4296 prog_id = xdp_prog->aux->id;
4297 mutex_unlock(&priv->state_lock);
4298
4299 return prog_id;
86994156
RS
4300}
4301
f4e63525 4302static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
86994156
RS
4303{
4304 switch (xdp->command) {
4305 case XDP_SETUP_PROG:
4306 return mlx5e_xdp_set(dev, xdp->prog);
4307 case XDP_QUERY_PROG:
821b2e29 4308 xdp->prog_id = mlx5e_xdp_query(dev);
86994156
RS
4309 return 0;
4310 default:
4311 return -EINVAL;
4312 }
4313}
4314
80378384
CO
4315#ifdef CONFIG_NET_POLL_CONTROLLER
4316/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
4317 * reenabling interrupts.
4318 */
4319static void mlx5e_netpoll(struct net_device *dev)
4320{
4321 struct mlx5e_priv *priv = netdev_priv(dev);
ff9c852f
SM
4322 struct mlx5e_channels *chs = &priv->channels;
4323
80378384
CO
4324 int i;
4325
ff9c852f
SM
4326 for (i = 0; i < chs->num; i++)
4327 napi_schedule(&chs->c[i]->napi);
80378384
CO
4328}
4329#endif
4330
e80541ec 4331static const struct net_device_ops mlx5e_netdev_ops = {
f62b8bb8
AV
4332 .ndo_open = mlx5e_open,
4333 .ndo_stop = mlx5e_close,
4334 .ndo_start_xmit = mlx5e_xmit,
0cf0f6d3 4335 .ndo_setup_tc = mlx5e_setup_tc,
08fb1dac 4336 .ndo_select_queue = mlx5e_select_queue,
f62b8bb8
AV
4337 .ndo_get_stats64 = mlx5e_get_stats,
4338 .ndo_set_rx_mode = mlx5e_set_rx_mode,
4339 .ndo_set_mac_address = mlx5e_set_mac,
b0eed40e
SM
4340 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
4341 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
f62b8bb8 4342 .ndo_set_features = mlx5e_set_features,
7d92d580 4343 .ndo_fix_features = mlx5e_fix_features,
250a42b6 4344 .ndo_change_mtu = mlx5e_change_nic_mtu,
b0eed40e 4345 .ndo_do_ioctl = mlx5e_ioctl,
507f0c81 4346 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
706b3583
SM
4347 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
4348 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
4349 .ndo_features_check = mlx5e_features_check,
45bf454a
MG
4350#ifdef CONFIG_RFS_ACCEL
4351 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
4352#endif
3947ca18 4353 .ndo_tx_timeout = mlx5e_tx_timeout,
f4e63525 4354 .ndo_bpf = mlx5e_xdp,
58b99ee3 4355 .ndo_xdp_xmit = mlx5e_xdp_xmit,
80378384
CO
4356#ifdef CONFIG_NET_POLL_CONTROLLER
4357 .ndo_poll_controller = mlx5e_netpoll,
4358#endif
e80541ec 4359#ifdef CONFIG_MLX5_ESWITCH
706b3583 4360 /* SRIOV E-Switch NDOs */
b0eed40e
SM
4361 .ndo_set_vf_mac = mlx5e_set_vf_mac,
4362 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
f942380c 4363 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
1edc57e2 4364 .ndo_set_vf_trust = mlx5e_set_vf_trust,
bd77bf1c 4365 .ndo_set_vf_rate = mlx5e_set_vf_rate,
b0eed40e
SM
4366 .ndo_get_vf_config = mlx5e_get_vf_config,
4367 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
4368 .ndo_get_vf_stats = mlx5e_get_vf_stats,
370bad0f
OG
4369 .ndo_has_offload_stats = mlx5e_has_offload_stats,
4370 .ndo_get_offload_stats = mlx5e_get_offload_stats,
e80541ec 4371#endif
f62b8bb8
AV
4372};
4373
4374static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
4375{
4376 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
9eb78923 4377 return -EOPNOTSUPP;
f62b8bb8
AV
4378 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
4379 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
4380 !MLX5_CAP_ETH(mdev, csum_cap) ||
4381 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
4382 !MLX5_CAP_ETH(mdev, vlan_cap) ||
796a27ec
GP
4383 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
4384 MLX5_CAP_FLOWTABLE(mdev,
4385 flow_table_properties_nic_receive.max_ft_level)
4386 < 3) {
f62b8bb8
AV
4387 mlx5_core_warn(mdev,
4388 "Not creating net device, some required device capabilities are missing\n");
9eb78923 4389 return -EOPNOTSUPP;
f62b8bb8 4390 }
66189961
TT
4391 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
4392 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
7524a5d8 4393 if (!MLX5_CAP_GEN(mdev, cq_moderation))
3e432ab6 4394 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
66189961 4395
f62b8bb8
AV
4396 return 0;
4397}
4398
d4b6c488 4399void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
85082dba
TT
4400 int num_channels)
4401{
4402 int i;
4403
4404 for (i = 0; i < len; i++)
4405 indirection_rqt[i] = i % num_channels;
4406}
4407
0608d4db 4408static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
b797a684 4409{
0608d4db
TT
4410 u32 link_speed = 0;
4411 u32 pci_bw = 0;
b797a684 4412
2c81bfd5 4413 mlx5e_port_max_linkspeed(mdev, &link_speed);
3c0d551e 4414 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
0608d4db
TT
4415 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
4416 link_speed, pci_bw);
4417
4418#define MLX5E_SLOW_PCI_RATIO (2)
4419
4420 return link_speed && pci_bw &&
4421 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
0f6e4cf6
EBE
4422}
4423
cbce4f44 4424static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
0088cbbc 4425{
cbce4f44
TG
4426 struct net_dim_cq_moder moder;
4427
4428 moder.cq_period_mode = cq_period_mode;
4429 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
4430 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
4431 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4432 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
4433
4434 return moder;
4435}
0088cbbc 4436
cbce4f44
TG
4437static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
4438{
4439 struct net_dim_cq_moder moder;
0088cbbc 4440
cbce4f44
TG
4441 moder.cq_period_mode = cq_period_mode;
4442 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
4443 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
0088cbbc 4444 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
cbce4f44
TG
4445 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
4446
4447 return moder;
4448}
4449
4450static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
4451{
4452 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
4453 NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE :
4454 NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4455}
4456
4457void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4458{
4459 if (params->tx_dim_enabled) {
4460 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4461
4462 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
4463 } else {
4464 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
4465 }
0088cbbc
TG
4466
4467 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
4468 params->tx_cq_moderation.cq_period_mode ==
4469 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4470}
4471
9908aa29
TT
4472void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4473{
9a317425 4474 if (params->rx_dim_enabled) {
cbce4f44
TG
4475 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4476
4477 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
4478 } else {
4479 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
9a317425 4480 }
457fcd8a 4481
6a9764ef 4482 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
0088cbbc
TG
4483 params->rx_cq_moderation.cq_period_mode ==
4484 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
9908aa29
TT
4485}
4486
707129dc 4487static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
2b029556
SM
4488{
4489 int i;
4490
4491 /* The supported periods are organized in ascending order */
4492 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4493 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4494 break;
4495
4496 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4497}
4498
8f493ffd
SM
4499void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
4500 struct mlx5e_params *params,
472a1e44 4501 u16 max_channels, u16 mtu)
f62b8bb8 4502{
48bfc397 4503 u8 rx_cq_period_mode;
2fc4bfb7 4504
472a1e44
TT
4505 params->sw_mtu = mtu;
4506 params->hard_mtu = MLX5E_ETH_HARD_MTU;
6a9764ef
SM
4507 params->num_channels = max_channels;
4508 params->num_tc = 1;
2b029556 4509
6a9764ef
SM
4510 /* SQ */
4511 params->log_sq_size = is_kdump_kernel() ?
b4e029da
KH
4512 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4513 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
461017cb 4514
b797a684 4515 /* set CQE compression */
6a9764ef 4516 params->rx_cqe_compress_def = false;
b797a684 4517 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
e53eef63 4518 MLX5_CAP_GEN(mdev, vport_group_manager))
0608d4db 4519 params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
0f6e4cf6 4520
6a9764ef
SM
4521 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
4522
4523 /* RQ */
5ffd8194
TT
4524 /* Prefer Striding RQ, unless any of the following holds:
4525 * - Striding RQ configuration is not possible/supported.
4526 * - Slow PCI heuristic.
4527 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
4528 */
4529 if (!slow_pci_heuristic(mdev) &&
4530 mlx5e_striding_rq_possible(mdev, params) &&
4531 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
4532 !mlx5e_rx_is_linear_skb(mdev, params)))
4533 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
2a0f561b
TT
4534 mlx5e_set_rq_type(mdev, params);
4535 mlx5e_init_rq_type_params(mdev, params);
b797a684 4536
6a9764ef 4537 /* HW LRO */
c139dbfd 4538
5426a0b2 4539 /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
6a9764ef 4540 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
619a8f2a
TT
4541 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
4542 params->lro_en = !slow_pci_heuristic(mdev);
6a9764ef 4543 params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
b0d4660b 4544
6a9764ef 4545 /* CQ moderation params */
48bfc397 4546 rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
6a9764ef
SM
4547 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4548 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
9a317425 4549 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
cbce4f44 4550 params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
48bfc397
TG
4551 mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
4552 mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
9908aa29 4553
6a9764ef 4554 /* TX inline */
fbcb127e 4555 params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
a6f402e4 4556
6a9764ef
SM
4557 /* RSS */
4558 params->rss_hfunc = ETH_RSS_HASH_XOR;
4559 netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
d4b6c488 4560 mlx5e_build_default_indir_rqt(params->indirection_rqt,
6a9764ef
SM
4561 MLX5E_INDIR_RQT_SIZE, max_channels);
4562}
f62b8bb8 4563
6a9764ef
SM
4564static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
4565 struct net_device *netdev,
4566 const struct mlx5e_profile *profile,
4567 void *ppriv)
4568{
4569 struct mlx5e_priv *priv = netdev_priv(netdev);
57afead5 4570
6a9764ef
SM
4571 priv->mdev = mdev;
4572 priv->netdev = netdev;
4573 priv->profile = profile;
4574 priv->ppriv = ppriv;
79c48764 4575 priv->msglevel = MLX5E_MSG_LEVEL;
05909bab 4576 priv->max_opened_tc = 1;
2d75b2bc 4577
472a1e44
TT
4578 mlx5e_build_nic_params(mdev, &priv->channels.params,
4579 profile->max_nch(mdev), netdev->mtu);
9908aa29 4580
f62b8bb8
AV
4581 mutex_init(&priv->state_lock);
4582
4583 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
4584 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3947ca18 4585 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
f62b8bb8 4586 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
237f258c
FD
4587
4588 mlx5e_timestamp_init(priv);
f62b8bb8
AV
4589}
4590
4591static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4592{
4593 struct mlx5e_priv *priv = netdev_priv(netdev);
4594
e1d7d349 4595 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
108805fc
SM
4596 if (is_zero_ether_addr(netdev->dev_addr) &&
4597 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4598 eth_hw_addr_random(netdev);
4599 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4600 }
f62b8bb8
AV
4601}
4602
f125376b 4603#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
cb67b832
HHZ
4604static const struct switchdev_ops mlx5e_switchdev_ops = {
4605 .switchdev_port_attr_get = mlx5e_attr_get,
4606};
e80541ec 4607#endif
cb67b832 4608
6bfd390b 4609static void mlx5e_build_nic_netdev(struct net_device *netdev)
f62b8bb8
AV
4610{
4611 struct mlx5e_priv *priv = netdev_priv(netdev);
4612 struct mlx5_core_dev *mdev = priv->mdev;
94cb1ebb
EBE
4613 bool fcs_supported;
4614 bool fcs_enabled;
f62b8bb8
AV
4615
4616 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
4617
e80541ec
SM
4618 netdev->netdev_ops = &mlx5e_netdev_ops;
4619
08fb1dac 4620#ifdef CONFIG_MLX5_CORE_EN_DCB
e80541ec
SM
4621 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
4622 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
08fb1dac 4623#endif
66e49ded 4624
f62b8bb8
AV
4625 netdev->watchdog_timeo = 15 * HZ;
4626
4627 netdev->ethtool_ops = &mlx5e_ethtool_ops;
4628
12be4b21 4629 netdev->vlan_features |= NETIF_F_SG;
f62b8bb8
AV
4630 netdev->vlan_features |= NETIF_F_IP_CSUM;
4631 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
4632 netdev->vlan_features |= NETIF_F_GRO;
4633 netdev->vlan_features |= NETIF_F_TSO;
4634 netdev->vlan_features |= NETIF_F_TSO6;
4635 netdev->vlan_features |= NETIF_F_RXCSUM;
4636 netdev->vlan_features |= NETIF_F_RXHASH;
4637
71186172
AH
4638 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
4639 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
4640
6c3a823e
TT
4641 if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
4642 mlx5e_check_fragmented_striding_rq_cap(mdev))
f62b8bb8
AV
4643 netdev->vlan_features |= NETIF_F_LRO;
4644
4645 netdev->hw_features = netdev->vlan_features;
e4cf27bd 4646 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
f62b8bb8
AV
4647 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4648 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4382c7b9 4649 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
f62b8bb8 4650
a3c785d7 4651 if (mlx5_vxlan_allowed(priv->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
b3f63c3d 4652 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
f3ed653c 4653 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
b3f63c3d
MF
4654 netdev->hw_enc_features |= NETIF_F_TSO;
4655 netdev->hw_enc_features |= NETIF_F_TSO6;
27299841
GP
4656 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4657 }
4658
a3c785d7 4659 if (mlx5_vxlan_allowed(priv->vxlan)) {
27299841
GP
4660 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
4661 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4662 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4663 NETIF_F_GSO_UDP_TUNNEL_CSUM;
b49663c8 4664 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
b3f63c3d
MF
4665 }
4666
27299841
GP
4667 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4668 netdev->hw_features |= NETIF_F_GSO_GRE |
4669 NETIF_F_GSO_GRE_CSUM;
4670 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4671 NETIF_F_GSO_GRE_CSUM;
4672 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4673 NETIF_F_GSO_GRE_CSUM;
4674 }
4675
3f44899e
BP
4676 netdev->hw_features |= NETIF_F_GSO_PARTIAL;
4677 netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
4678 netdev->hw_features |= NETIF_F_GSO_UDP_L4;
4679 netdev->features |= NETIF_F_GSO_UDP_L4;
4680
94cb1ebb
EBE
4681 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4682
4683 if (fcs_supported)
4684 netdev->hw_features |= NETIF_F_RXALL;
4685
102722fc
GE
4686 if (MLX5_CAP_ETH(mdev, scatter_fcs))
4687 netdev->hw_features |= NETIF_F_RXFCS;
4688
f62b8bb8 4689 netdev->features = netdev->hw_features;
6a9764ef 4690 if (!priv->channels.params.lro_en)
f62b8bb8
AV
4691 netdev->features &= ~NETIF_F_LRO;
4692
94cb1ebb
EBE
4693 if (fcs_enabled)
4694 netdev->features &= ~NETIF_F_RXALL;
4695
102722fc
GE
4696 if (!priv->channels.params.scatter_fcs_en)
4697 netdev->features &= ~NETIF_F_RXFCS;
4698
e8f887ac
AV
4699#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4700 if (FT_CAP(flow_modify_en) &&
4701 FT_CAP(modify_root) &&
4702 FT_CAP(identified_miss_table_mode) &&
1cabe6b0
MG
4703 FT_CAP(flow_table_modify)) {
4704 netdev->hw_features |= NETIF_F_HW_TC;
4705#ifdef CONFIG_RFS_ACCEL
4706 netdev->hw_features |= NETIF_F_NTUPLE;
4707#endif
4708 }
e8f887ac 4709
f62b8bb8 4710 netdev->features |= NETIF_F_HIGHDMA;
7d92d580 4711 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
f62b8bb8
AV
4712
4713 netdev->priv_flags |= IFF_UNICAST_FLT;
4714
4715 mlx5e_set_netdev_dev_addr(netdev);
cb67b832 4716
f125376b 4717#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
733d3e54 4718 if (MLX5_ESWITCH_MANAGER(mdev))
cb67b832
HHZ
4719 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4720#endif
547eede0
IT
4721
4722 mlx5e_ipsec_build_netdev(priv);
c83294b9 4723 mlx5e_tls_build_netdev(priv);
f62b8bb8
AV
4724}
4725
7cbaf9a3 4726static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
593cf338
RS
4727{
4728 struct mlx5_core_dev *mdev = priv->mdev;
4729 int err;
4730
4731 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4732 if (err) {
4733 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4734 priv->q_counter = 0;
4735 }
7cbaf9a3
MS
4736
4737 err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
4738 if (err) {
4739 mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
4740 priv->drop_rq_q_counter = 0;
4741 }
593cf338
RS
4742}
4743
7cbaf9a3 4744static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
593cf338 4745{
7cbaf9a3
MS
4746 if (priv->q_counter)
4747 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
593cf338 4748
7cbaf9a3
MS
4749 if (priv->drop_rq_q_counter)
4750 mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
593cf338
RS
4751}
4752
6bfd390b
HHZ
4753static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
4754 struct net_device *netdev,
127ea380
HHZ
4755 const struct mlx5e_profile *profile,
4756 void *ppriv)
6bfd390b
HHZ
4757{
4758 struct mlx5e_priv *priv = netdev_priv(netdev);
547eede0 4759 int err;
6bfd390b 4760
a3c785d7
SM
4761 priv->vxlan = mlx5_vxlan_create(mdev);
4762
127ea380 4763 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
547eede0
IT
4764 err = mlx5e_ipsec_init(priv);
4765 if (err)
4766 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
43585a41
IL
4767 err = mlx5e_tls_init(priv);
4768 if (err)
4769 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
6bfd390b 4770 mlx5e_build_nic_netdev(netdev);
8bfaf07f 4771 mlx5e_build_tc2txq_maps(priv);
6bfd390b
HHZ
4772}
4773
4774static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4775{
a3c785d7 4776 mlx5_vxlan_destroy(priv->vxlan);
43585a41 4777 mlx5e_tls_cleanup(priv);
547eede0 4778 mlx5e_ipsec_cleanup(priv);
6bfd390b
HHZ
4779}
4780
4781static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
4782{
4783 struct mlx5_core_dev *mdev = priv->mdev;
4784 int err;
6bfd390b 4785
8f493ffd
SM
4786 err = mlx5e_create_indirect_rqt(priv);
4787 if (err)
6bfd390b 4788 return err;
6bfd390b
HHZ
4789
4790 err = mlx5e_create_direct_rqts(priv);
8f493ffd 4791 if (err)
6bfd390b 4792 goto err_destroy_indirect_rqts;
6bfd390b
HHZ
4793
4794 err = mlx5e_create_indirect_tirs(priv);
8f493ffd 4795 if (err)
6bfd390b 4796 goto err_destroy_direct_rqts;
6bfd390b
HHZ
4797
4798 err = mlx5e_create_direct_tirs(priv);
8f493ffd 4799 if (err)
6bfd390b 4800 goto err_destroy_indirect_tirs;
6bfd390b
HHZ
4801
4802 err = mlx5e_create_flow_steering(priv);
4803 if (err) {
4804 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
4805 goto err_destroy_direct_tirs;
4806 }
4807
655dc3d2 4808 err = mlx5e_tc_nic_init(priv);
6bfd390b
HHZ
4809 if (err)
4810 goto err_destroy_flow_steering;
4811
4812 return 0;
4813
4814err_destroy_flow_steering:
4815 mlx5e_destroy_flow_steering(priv);
4816err_destroy_direct_tirs:
4817 mlx5e_destroy_direct_tirs(priv);
4818err_destroy_indirect_tirs:
4819 mlx5e_destroy_indirect_tirs(priv);
4820err_destroy_direct_rqts:
8f493ffd 4821 mlx5e_destroy_direct_rqts(priv);
6bfd390b
HHZ
4822err_destroy_indirect_rqts:
4823 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4824 return err;
4825}
4826
4827static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
4828{
655dc3d2 4829 mlx5e_tc_nic_cleanup(priv);
6bfd390b
HHZ
4830 mlx5e_destroy_flow_steering(priv);
4831 mlx5e_destroy_direct_tirs(priv);
4832 mlx5e_destroy_indirect_tirs(priv);
8f493ffd 4833 mlx5e_destroy_direct_rqts(priv);
6bfd390b
HHZ
4834 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4835}
4836
4837static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
4838{
4839 int err;
4840
4841 err = mlx5e_create_tises(priv);
4842 if (err) {
4843 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
4844 return err;
4845 }
4846
4847#ifdef CONFIG_MLX5_CORE_EN_DCB
e207b7e9 4848 mlx5e_dcbnl_initialize(priv);
6bfd390b
HHZ
4849#endif
4850 return 0;
4851}
4852
4853static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4854{
4855 struct net_device *netdev = priv->netdev;
4856 struct mlx5_core_dev *mdev = priv->mdev;
2c3b5bee
SM
4857 u16 max_mtu;
4858
4859 mlx5e_init_l2_addr(priv);
4860
63bfd399
EBE
4861 /* Marking the link as currently not needed by the Driver */
4862 if (!netif_running(netdev))
4863 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
4864
2c3b5bee
SM
4865 /* MTU range: 68 - hw-specific max */
4866 netdev->min_mtu = ETH_MIN_MTU;
4867 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
472a1e44 4868 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
2c3b5bee 4869 mlx5e_set_dev_port_mtu(priv);
6bfd390b 4870
7907f23a
AH
4871 mlx5_lag_add(mdev, netdev);
4872
6bfd390b 4873 mlx5e_enable_async_events(priv);
127ea380 4874
733d3e54 4875 if (MLX5_ESWITCH_MANAGER(priv->mdev))
1d447a39 4876 mlx5e_register_vport_reps(priv);
2c3b5bee 4877
610e89e0
SM
4878 if (netdev->reg_state != NETREG_REGISTERED)
4879 return;
2a5e7a13
HN
4880#ifdef CONFIG_MLX5_CORE_EN_DCB
4881 mlx5e_dcbnl_init_app(priv);
4882#endif
610e89e0
SM
4883
4884 queue_work(priv->wq, &priv->set_rx_mode_work);
2c3b5bee
SM
4885
4886 rtnl_lock();
4887 if (netif_running(netdev))
4888 mlx5e_open(netdev);
4889 netif_device_attach(netdev);
4890 rtnl_unlock();
6bfd390b
HHZ
4891}
4892
4893static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4894{
3deef8ce 4895 struct mlx5_core_dev *mdev = priv->mdev;
3deef8ce 4896
2a5e7a13
HN
4897#ifdef CONFIG_MLX5_CORE_EN_DCB
4898 if (priv->netdev->reg_state == NETREG_REGISTERED)
4899 mlx5e_dcbnl_delete_app(priv);
4900#endif
4901
2c3b5bee
SM
4902 rtnl_lock();
4903 if (netif_running(priv->netdev))
4904 mlx5e_close(priv->netdev);
4905 netif_device_detach(priv->netdev);
4906 rtnl_unlock();
4907
6bfd390b 4908 queue_work(priv->wq, &priv->set_rx_mode_work);
1d447a39 4909
733d3e54 4910 if (MLX5_ESWITCH_MANAGER(priv->mdev))
1d447a39
SM
4911 mlx5e_unregister_vport_reps(priv);
4912
6bfd390b 4913 mlx5e_disable_async_events(priv);
3deef8ce 4914 mlx5_lag_remove(mdev);
6bfd390b
HHZ
4915}
4916
4917static const struct mlx5e_profile mlx5e_nic_profile = {
4918 .init = mlx5e_nic_init,
4919 .cleanup = mlx5e_nic_cleanup,
4920 .init_rx = mlx5e_init_nic_rx,
4921 .cleanup_rx = mlx5e_cleanup_nic_rx,
4922 .init_tx = mlx5e_init_nic_tx,
4923 .cleanup_tx = mlx5e_cleanup_nic_tx,
4924 .enable = mlx5e_nic_enable,
4925 .disable = mlx5e_nic_disable,
3834a5e6 4926 .update_stats = mlx5e_update_ndo_stats,
6bfd390b 4927 .max_nch = mlx5e_get_max_num_channels,
7ca42c80 4928 .update_carrier = mlx5e_update_carrier,
20fd0c19
SM
4929 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
4930 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
6bfd390b
HHZ
4931 .max_tc = MLX5E_MAX_NUM_TC,
4932};
4933
2c3b5bee
SM
4934/* mlx5e generic netdev management API (move to en_common.c) */
4935
26e59d80
MHY
4936struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4937 const struct mlx5e_profile *profile,
4938 void *ppriv)
f62b8bb8 4939{
26e59d80 4940 int nch = profile->max_nch(mdev);
f62b8bb8
AV
4941 struct net_device *netdev;
4942 struct mlx5e_priv *priv;
f62b8bb8 4943
08fb1dac 4944 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
6bfd390b 4945 nch * profile->max_tc,
08fb1dac 4946 nch);
f62b8bb8
AV
4947 if (!netdev) {
4948 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
4949 return NULL;
4950 }
4951
be4891af
SM
4952#ifdef CONFIG_RFS_ACCEL
4953 netdev->rx_cpu_rmap = mdev->rmap;
4954#endif
4955
127ea380 4956 profile->init(mdev, netdev, profile, ppriv);
f62b8bb8
AV
4957
4958 netif_carrier_off(netdev);
4959
4960 priv = netdev_priv(netdev);
4961
7bb29755
MF
4962 priv->wq = create_singlethread_workqueue("mlx5e");
4963 if (!priv->wq)
26e59d80
MHY
4964 goto err_cleanup_nic;
4965
4966 return netdev;
4967
4968err_cleanup_nic:
31ac9338
OG
4969 if (profile->cleanup)
4970 profile->cleanup(priv);
26e59d80
MHY
4971 free_netdev(netdev);
4972
4973 return NULL;
4974}
4975
2c3b5bee 4976int mlx5e_attach_netdev(struct mlx5e_priv *priv)
26e59d80 4977{
2c3b5bee 4978 struct mlx5_core_dev *mdev = priv->mdev;
26e59d80 4979 const struct mlx5e_profile *profile;
26e59d80
MHY
4980 int err;
4981
26e59d80
MHY
4982 profile = priv->profile;
4983 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
7bb29755 4984
6bfd390b
HHZ
4985 err = profile->init_tx(priv);
4986 if (err)
ec8b9981 4987 goto out;
5c50368f 4988
7cbaf9a3
MS
4989 mlx5e_create_q_counters(priv);
4990
4991 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
5c50368f
AS
4992 if (err) {
4993 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
7cbaf9a3 4994 goto err_destroy_q_counters;
5c50368f
AS
4995 }
4996
6bfd390b
HHZ
4997 err = profile->init_rx(priv);
4998 if (err)
5c50368f 4999 goto err_close_drop_rq;
5c50368f 5000
6bfd390b
HHZ
5001 if (profile->enable)
5002 profile->enable(priv);
f62b8bb8 5003
26e59d80 5004 return 0;
5c50368f
AS
5005
5006err_close_drop_rq:
a43b25da 5007 mlx5e_close_drop_rq(&priv->drop_rq);
5c50368f 5008
7cbaf9a3
MS
5009err_destroy_q_counters:
5010 mlx5e_destroy_q_counters(priv);
6bfd390b 5011 profile->cleanup_tx(priv);
5c50368f 5012
26e59d80
MHY
5013out:
5014 return err;
f62b8bb8
AV
5015}
5016
2c3b5bee 5017void mlx5e_detach_netdev(struct mlx5e_priv *priv)
26e59d80 5018{
26e59d80
MHY
5019 const struct mlx5e_profile *profile = priv->profile;
5020
5021 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
26e59d80 5022
37f304d1
SM
5023 if (profile->disable)
5024 profile->disable(priv);
5025 flush_workqueue(priv->wq);
5026
26e59d80 5027 profile->cleanup_rx(priv);
a43b25da 5028 mlx5e_close_drop_rq(&priv->drop_rq);
7cbaf9a3 5029 mlx5e_destroy_q_counters(priv);
26e59d80 5030 profile->cleanup_tx(priv);
26e59d80
MHY
5031 cancel_delayed_work_sync(&priv->update_stats_work);
5032}
5033
2c3b5bee
SM
5034void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
5035{
5036 const struct mlx5e_profile *profile = priv->profile;
5037 struct net_device *netdev = priv->netdev;
5038
5039 destroy_workqueue(priv->wq);
5040 if (profile->cleanup)
5041 profile->cleanup(priv);
5042 free_netdev(netdev);
5043}
5044
26e59d80
MHY
5045/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
5046 * hardware contexts and to connect it to the current netdev.
5047 */
5048static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
5049{
5050 struct mlx5e_priv *priv = vpriv;
5051 struct net_device *netdev = priv->netdev;
5052 int err;
5053
5054 if (netif_device_present(netdev))
5055 return 0;
5056
5057 err = mlx5e_create_mdev_resources(mdev);
5058 if (err)
5059 return err;
5060
2c3b5bee 5061 err = mlx5e_attach_netdev(priv);
26e59d80
MHY
5062 if (err) {
5063 mlx5e_destroy_mdev_resources(mdev);
5064 return err;
5065 }
5066
5067 return 0;
5068}
5069
5070static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
5071{
5072 struct mlx5e_priv *priv = vpriv;
5073 struct net_device *netdev = priv->netdev;
5074
5075 if (!netif_device_present(netdev))
5076 return;
5077
2c3b5bee 5078 mlx5e_detach_netdev(priv);
26e59d80
MHY
5079 mlx5e_destroy_mdev_resources(mdev);
5080}
5081
b50d292b
HHZ
5082static void *mlx5e_add(struct mlx5_core_dev *mdev)
5083{
07c9f1e5
SM
5084 struct net_device *netdev;
5085 void *rpriv = NULL;
26e59d80 5086 void *priv;
26e59d80 5087 int err;
b50d292b 5088
26e59d80
MHY
5089 err = mlx5e_check_required_hca_cap(mdev);
5090 if (err)
b50d292b
HHZ
5091 return NULL;
5092
e80541ec 5093#ifdef CONFIG_MLX5_ESWITCH
733d3e54 5094 if (MLX5_ESWITCH_MANAGER(mdev)) {
07c9f1e5 5095 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
1d447a39 5096 if (!rpriv) {
07c9f1e5 5097 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
1d447a39
SM
5098 return NULL;
5099 }
1d447a39 5100 }
e80541ec 5101#endif
127ea380 5102
1d447a39 5103 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
26e59d80
MHY
5104 if (!netdev) {
5105 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
07c9f1e5 5106 goto err_free_rpriv;
26e59d80
MHY
5107 }
5108
5109 priv = netdev_priv(netdev);
5110
5111 err = mlx5e_attach(mdev, priv);
5112 if (err) {
5113 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
5114 goto err_destroy_netdev;
5115 }
5116
5117 err = register_netdev(netdev);
5118 if (err) {
5119 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
5120 goto err_detach;
b50d292b 5121 }
26e59d80 5122
2a5e7a13
HN
5123#ifdef CONFIG_MLX5_CORE_EN_DCB
5124 mlx5e_dcbnl_init_app(priv);
5125#endif
26e59d80
MHY
5126 return priv;
5127
5128err_detach:
5129 mlx5e_detach(mdev, priv);
26e59d80 5130err_destroy_netdev:
2c3b5bee 5131 mlx5e_destroy_netdev(priv);
07c9f1e5 5132err_free_rpriv:
1d447a39 5133 kfree(rpriv);
26e59d80 5134 return NULL;
b50d292b
HHZ
5135}
5136
b50d292b
HHZ
5137static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
5138{
5139 struct mlx5e_priv *priv = vpriv;
1d447a39 5140 void *ppriv = priv->ppriv;
127ea380 5141
2a5e7a13
HN
5142#ifdef CONFIG_MLX5_CORE_EN_DCB
5143 mlx5e_dcbnl_delete_app(priv);
5144#endif
5e1e93c7 5145 unregister_netdev(priv->netdev);
26e59d80 5146 mlx5e_detach(mdev, vpriv);
2c3b5bee 5147 mlx5e_destroy_netdev(priv);
1d447a39 5148 kfree(ppriv);
b50d292b
HHZ
5149}
5150
f62b8bb8
AV
5151static void *mlx5e_get_netdev(void *vpriv)
5152{
5153 struct mlx5e_priv *priv = vpriv;
5154
5155 return priv->netdev;
5156}
5157
5158static struct mlx5_interface mlx5e_interface = {
b50d292b
HHZ
5159 .add = mlx5e_add,
5160 .remove = mlx5e_remove,
26e59d80
MHY
5161 .attach = mlx5e_attach,
5162 .detach = mlx5e_detach,
f62b8bb8
AV
5163 .event = mlx5e_async_event,
5164 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
5165 .get_dev = mlx5e_get_netdev,
5166};
5167
5168void mlx5e_init(void)
5169{
2ac9cfe7 5170 mlx5e_ipsec_build_inverse_table();
665bc539 5171 mlx5e_build_ptys2ethtool_map();
f62b8bb8
AV
5172 mlx5_register_interface(&mlx5e_interface);
5173}
5174
5175void mlx5e_cleanup(void)
5176{
5177 mlx5_unregister_interface(&mlx5e_interface);
5178}