]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_main.c
net/mlx5e: Use kernel's mechanism to avoid missing NAPIs
[thirdparty/kernel/stable.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
CommitLineData
f62b8bb8 1/*
b3f63c3d 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e8f887ac
AV
33#include <net/tc_act/tc_gact.h>
34#include <net/pkt_cls.h>
86d722ad 35#include <linux/mlx5/fs.h>
b3f63c3d 36#include <net/vxlan.h>
86994156 37#include <linux/bpf.h>
1d447a39 38#include "eswitch.h"
f62b8bb8 39#include "en.h"
e8f887ac 40#include "en_tc.h"
1d447a39 41#include "en_rep.h"
547eede0 42#include "en_accel/ipsec.h"
899a59d3
IT
43#include "en_accel/ipsec_rxtx.h"
44#include "accel/ipsec.h"
b3f63c3d 45#include "vxlan.h"
f62b8bb8
AV
46
47struct mlx5e_rq_param {
cb3c7fd4
GR
48 u32 rqc[MLX5_ST_SZ_DW(rqc)];
49 struct mlx5_wq_param wq;
f62b8bb8
AV
50};
51
52struct mlx5e_sq_param {
53 u32 sqc[MLX5_ST_SZ_DW(sqc)];
54 struct mlx5_wq_param wq;
55};
56
57struct mlx5e_cq_param {
58 u32 cqc[MLX5_ST_SZ_DW(cqc)];
59 struct mlx5_wq_param wq;
60 u16 eq_ix;
9908aa29 61 u8 cq_period_mode;
f62b8bb8
AV
62};
63
64struct mlx5e_channel_param {
65 struct mlx5e_rq_param rq;
66 struct mlx5e_sq_param sq;
b5503b99 67 struct mlx5e_sq_param xdp_sq;
d3c9bc27 68 struct mlx5e_sq_param icosq;
f62b8bb8
AV
69 struct mlx5e_cq_param rx_cq;
70 struct mlx5e_cq_param tx_cq;
d3c9bc27 71 struct mlx5e_cq_param icosq_cq;
f62b8bb8
AV
72};
73
2fc4bfb7
SM
74static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
75{
76 return MLX5_CAP_GEN(mdev, striding_rq) &&
77 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
78 MLX5_CAP_ETH(mdev, reg_umr_sq);
79}
80
6a9764ef
SM
81void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
82 struct mlx5e_params *params, u8 rq_type)
2fc4bfb7 83{
6a9764ef
SM
84 params->rq_wq_type = rq_type;
85 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
86 switch (params->rq_wq_type) {
2fc4bfb7 87 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
6a9764ef 88 params->log_rq_size = is_kdump_kernel() ?
b4e029da
KH
89 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
90 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
6a9764ef
SM
91 params->mpwqe_log_stride_sz =
92 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
93 MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
94 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
95 params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
96 params->mpwqe_log_stride_sz;
2fc4bfb7
SM
97 break;
98 default: /* MLX5_WQ_TYPE_LINKED_LIST */
6a9764ef 99 params->log_rq_size = is_kdump_kernel() ?
b4e029da
KH
100 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
101 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
bce2b2bf
TT
102 params->rq_headroom = params->xdp_prog ?
103 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
104 params->rq_headroom += NET_IP_ALIGN;
4078e637
TT
105
106 /* Extra room needed for build_skb */
bce2b2bf 107 params->lro_wqe_sz -= params->rq_headroom +
4078e637 108 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2fc4bfb7 109 }
2fc4bfb7 110
6a9764ef
SM
111 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
112 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
113 BIT(params->log_rq_size),
114 BIT(params->mpwqe_log_stride_sz),
115 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
2fc4bfb7
SM
116}
117
6a9764ef 118static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
2fc4bfb7 119{
6a9764ef 120 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
899a59d3 121 !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
2fc4bfb7
SM
122 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
123 MLX5_WQ_TYPE_LINKED_LIST;
6a9764ef 124 mlx5e_set_rq_type_params(mdev, params, rq_type);
2fc4bfb7
SM
125}
126
f62b8bb8
AV
127static void mlx5e_update_carrier(struct mlx5e_priv *priv)
128{
129 struct mlx5_core_dev *mdev = priv->mdev;
130 u8 port_state;
131
132 port_state = mlx5_query_vport_state(mdev,
e53eef63
OG
133 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
134 0);
f62b8bb8 135
87424ad5
SD
136 if (port_state == VPORT_STATE_UP) {
137 netdev_info(priv->netdev, "Link up\n");
f62b8bb8 138 netif_carrier_on(priv->netdev);
87424ad5
SD
139 } else {
140 netdev_info(priv->netdev, "Link down\n");
f62b8bb8 141 netif_carrier_off(priv->netdev);
87424ad5 142 }
f62b8bb8
AV
143}
144
145static void mlx5e_update_carrier_work(struct work_struct *work)
146{
147 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
148 update_carrier_work);
149
150 mutex_lock(&priv->state_lock);
151 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
7ca42c80
ES
152 if (priv->profile->update_carrier)
153 priv->profile->update_carrier(priv);
f62b8bb8
AV
154 mutex_unlock(&priv->state_lock);
155}
156
3947ca18
DJ
157static void mlx5e_tx_timeout_work(struct work_struct *work)
158{
159 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
160 tx_timeout_work);
161 int err;
162
163 rtnl_lock();
164 mutex_lock(&priv->state_lock);
165 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
166 goto unlock;
167 mlx5e_close_locked(priv->netdev);
168 err = mlx5e_open_locked(priv->netdev);
169 if (err)
170 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
171 err);
172unlock:
173 mutex_unlock(&priv->state_lock);
174 rtnl_unlock();
175}
176
9218b44d 177static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
f62b8bb8 178{
1510d728 179 struct mlx5e_sw_stats temp, *s = &temp;
f62b8bb8
AV
180 struct mlx5e_rq_stats *rq_stats;
181 struct mlx5e_sq_stats *sq_stats;
9218b44d 182 u64 tx_offload_none = 0;
f62b8bb8
AV
183 int i, j;
184
9218b44d 185 memset(s, 0, sizeof(*s));
ff9c852f
SM
186 for (i = 0; i < priv->channels.num; i++) {
187 struct mlx5e_channel *c = priv->channels.c[i];
188
189 rq_stats = &c->rq.stats;
f62b8bb8 190
faf4478b
GP
191 s->rx_packets += rq_stats->packets;
192 s->rx_bytes += rq_stats->bytes;
bfe6d8d1
GP
193 s->rx_lro_packets += rq_stats->lro_packets;
194 s->rx_lro_bytes += rq_stats->lro_bytes;
f62b8bb8 195 s->rx_csum_none += rq_stats->csum_none;
bfe6d8d1
GP
196 s->rx_csum_complete += rq_stats->csum_complete;
197 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
86994156 198 s->rx_xdp_drop += rq_stats->xdp_drop;
b5503b99
SM
199 s->rx_xdp_tx += rq_stats->xdp_tx;
200 s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
f62b8bb8 201 s->rx_wqe_err += rq_stats->wqe_err;
461017cb 202 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
54984407 203 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
7219ab34
TT
204 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
205 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
accd5883 206 s->rx_page_reuse += rq_stats->page_reuse;
4415a031
TT
207 s->rx_cache_reuse += rq_stats->cache_reuse;
208 s->rx_cache_full += rq_stats->cache_full;
209 s->rx_cache_empty += rq_stats->cache_empty;
210 s->rx_cache_busy += rq_stats->cache_busy;
70871f1e 211 s->rx_cache_waive += rq_stats->cache_waive;
f62b8bb8 212
6a9764ef 213 for (j = 0; j < priv->channels.params.num_tc; j++) {
ff9c852f 214 sq_stats = &c->sq[j].stats;
f62b8bb8 215
faf4478b
GP
216 s->tx_packets += sq_stats->packets;
217 s->tx_bytes += sq_stats->bytes;
bfe6d8d1
GP
218 s->tx_tso_packets += sq_stats->tso_packets;
219 s->tx_tso_bytes += sq_stats->tso_bytes;
220 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
221 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
f62b8bb8
AV
222 s->tx_queue_stopped += sq_stats->stopped;
223 s->tx_queue_wake += sq_stats->wake;
224 s->tx_queue_dropped += sq_stats->dropped;
c8cf78fe 225 s->tx_xmit_more += sq_stats->xmit_more;
bfe6d8d1
GP
226 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
227 tx_offload_none += sq_stats->csum_none;
f62b8bb8
AV
228 }
229 }
230
9218b44d 231 /* Update calculated offload counters */
bfe6d8d1
GP
232 s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
233 s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
121fcdc8 234
bfe6d8d1 235 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
121fcdc8
GP
236 priv->stats.pport.phy_counters,
237 counter_set.phys_layer_cntrs.link_down_events);
1510d728 238 memcpy(&priv->stats.sw, s, sizeof(*s));
9218b44d
GP
239}
240
241static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
242{
243 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
244 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
c4f287c4 245 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
9218b44d
GP
246 struct mlx5_core_dev *mdev = priv->mdev;
247
f62b8bb8
AV
248 MLX5_SET(query_vport_counter_in, in, opcode,
249 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
250 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
251 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
252
9218b44d
GP
253 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
254}
255
3834a5e6 256static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
9218b44d
GP
257{
258 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
259 struct mlx5_core_dev *mdev = priv->mdev;
0883b4f4 260 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
9218b44d 261 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
cf678570 262 int prio;
9218b44d 263 void *out;
f62b8bb8 264
9218b44d 265 MLX5_SET(ppcnt_reg, in, local_port, 1);
f62b8bb8 266
9218b44d
GP
267 out = pstats->IEEE_802_3_counters;
268 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
269 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
f62b8bb8 270
3834a5e6
GP
271 if (!full)
272 return;
273
9218b44d
GP
274 out = pstats->RFC_2863_counters;
275 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
276 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
277
278 out = pstats->RFC_2819_counters;
279 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
280 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
593cf338 281
121fcdc8
GP
282 out = pstats->phy_counters;
283 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
284 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
285
5db0a4f6
GP
286 if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
287 out = pstats->phy_statistical_counters;
288 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
068aef33
GP
289 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
290 }
291
292 if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
293 out = pstats->eth_ext_counters;
294 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
5db0a4f6
GP
295 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
296 }
297
cf678570
GP
298 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
299 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
300 out = pstats->per_prio_counters[prio];
301 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
302 mlx5_core_access_reg(mdev, in, sz, out, sz,
303 MLX5_REG_PPCNT, 0, 0);
304 }
9218b44d
GP
305}
306
307static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
308{
309 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
432609a4
GP
310 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
311 int err;
9218b44d
GP
312
313 if (!priv->q_counter)
314 return;
315
432609a4
GP
316 err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
317 if (err)
318 return;
319
320 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
9218b44d
GP
321}
322
0f7f3481
GP
323static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
324{
325 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
326 struct mlx5_core_dev *mdev = priv->mdev;
0883b4f4 327 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
0f7f3481
GP
328 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
329 void *out;
0f7f3481
GP
330
331 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
332 return;
333
0f7f3481
GP
334 out = pcie_stats->pcie_perf_counters;
335 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
336 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
0f7f3481
GP
337}
338
3834a5e6 339void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
9218b44d 340{
164f16f7 341 if (full) {
3834a5e6 342 mlx5e_update_pcie_counters(priv);
164f16f7
IT
343 mlx5e_ipsec_update_stats(priv);
344 }
3834a5e6 345 mlx5e_update_pport_counters(priv, full);
3dd69e3d
SM
346 mlx5e_update_vport_counters(priv);
347 mlx5e_update_q_counter(priv);
121fcdc8 348 mlx5e_update_sw_counters(priv);
f62b8bb8
AV
349}
350
3834a5e6
GP
351static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
352{
353 mlx5e_update_stats(priv, false);
354}
355
cb67b832 356void mlx5e_update_stats_work(struct work_struct *work)
f62b8bb8
AV
357{
358 struct delayed_work *dwork = to_delayed_work(work);
359 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
360 update_stats_work);
361 mutex_lock(&priv->state_lock);
362 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6bfd390b 363 priv->profile->update_stats(priv);
7bb29755
MF
364 queue_delayed_work(priv->wq, dwork,
365 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
f62b8bb8
AV
366 }
367 mutex_unlock(&priv->state_lock);
368}
369
daa21560
TT
370static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
371 enum mlx5_dev_event event, unsigned long param)
f62b8bb8 372{
daa21560 373 struct mlx5e_priv *priv = vpriv;
ee7f1220
EE
374 struct ptp_clock_event ptp_event;
375 struct mlx5_eqe *eqe = NULL;
daa21560 376
e0f46eb9 377 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
daa21560
TT
378 return;
379
f62b8bb8
AV
380 switch (event) {
381 case MLX5_DEV_EVENT_PORT_UP:
382 case MLX5_DEV_EVENT_PORT_DOWN:
7bb29755 383 queue_work(priv->wq, &priv->update_carrier_work);
f62b8bb8 384 break;
ee7f1220
EE
385 case MLX5_DEV_EVENT_PPS:
386 eqe = (struct mlx5_eqe *)param;
ee7f1220
EE
387 ptp_event.index = eqe->data.pps.pin;
388 ptp_event.timestamp =
389 timecounter_cyc2time(&priv->tstamp.clock,
390 be64_to_cpu(eqe->data.pps.time_stamp));
391 mlx5e_pps_event_handler(vpriv, &ptp_event);
392 break;
f62b8bb8
AV
393 default:
394 break;
395 }
396}
397
f62b8bb8
AV
398static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
399{
e0f46eb9 400 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
f62b8bb8
AV
401}
402
403static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
404{
e0f46eb9 405 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
daa21560 406 synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
f62b8bb8
AV
407}
408
7e426671
TT
409static inline int mlx5e_get_wqe_mtt_sz(void)
410{
411 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
412 * To avoid copying garbage after the mtt array, we allocate
413 * a little more.
414 */
415 return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
416 MLX5_UMR_MTT_ALIGNMENT);
417}
418
31391048
SM
419static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
420 struct mlx5e_icosq *sq,
421 struct mlx5e_umr_wqe *wqe,
422 u16 ix)
7e426671
TT
423{
424 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
425 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
426 struct mlx5_wqe_data_seg *dseg = &wqe->data;
21c59685 427 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
7e426671
TT
428 u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
429 u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
430
431 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
432 ds_cnt);
433 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
434 cseg->imm = rq->mkey_be;
435
436 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
31616255 437 ucseg->xlt_octowords =
7e426671
TT
438 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
439 ucseg->bsf_octowords =
440 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
441 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
442
443 dseg->lkey = sq->mkey_be;
444 dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
445}
446
447static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
448 struct mlx5e_channel *c)
449{
450 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
451 int mtt_sz = mlx5e_get_wqe_mtt_sz();
452 int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
453 int i;
454
21c59685
SM
455 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
456 GFP_KERNEL, cpu_to_node(c->cpu));
457 if (!rq->mpwqe.info)
7e426671
TT
458 goto err_out;
459
460 /* We allocate more than mtt_sz as we will align the pointer */
21c59685 461 rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
7e426671 462 cpu_to_node(c->cpu));
21c59685 463 if (unlikely(!rq->mpwqe.mtt_no_align))
7e426671
TT
464 goto err_free_wqe_info;
465
466 for (i = 0; i < wq_sz; i++) {
21c59685 467 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671 468
21c59685 469 wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
7e426671
TT
470 MLX5_UMR_ALIGN);
471 wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
472 PCI_DMA_TODEVICE);
473 if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
474 goto err_unmap_mtts;
475
476 mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
477 }
478
479 return 0;
480
481err_unmap_mtts:
482 while (--i >= 0) {
21c59685 483 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671
TT
484
485 dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
486 PCI_DMA_TODEVICE);
487 }
21c59685 488 kfree(rq->mpwqe.mtt_no_align);
7e426671 489err_free_wqe_info:
21c59685 490 kfree(rq->mpwqe.info);
7e426671
TT
491
492err_out:
493 return -ENOMEM;
494}
495
496static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
497{
498 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
499 int mtt_sz = mlx5e_get_wqe_mtt_sz();
500 int i;
501
502 for (i = 0; i < wq_sz; i++) {
21c59685 503 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671
TT
504
505 dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
506 PCI_DMA_TODEVICE);
507 }
21c59685
SM
508 kfree(rq->mpwqe.mtt_no_align);
509 kfree(rq->mpwqe.info);
7e426671
TT
510}
511
a43b25da 512static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
ec8b9981
TT
513 u64 npages, u8 page_shift,
514 struct mlx5_core_mkey *umr_mkey)
3608ae77 515{
3608ae77
TT
516 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
517 void *mkc;
518 u32 *in;
519 int err;
520
ec8b9981
TT
521 if (!MLX5E_VALID_NUM_MTTS(npages))
522 return -EINVAL;
523
1b9a07ee 524 in = kvzalloc(inlen, GFP_KERNEL);
3608ae77
TT
525 if (!in)
526 return -ENOMEM;
527
528 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
529
3608ae77
TT
530 MLX5_SET(mkc, mkc, free, 1);
531 MLX5_SET(mkc, mkc, umr_en, 1);
532 MLX5_SET(mkc, mkc, lw, 1);
533 MLX5_SET(mkc, mkc, lr, 1);
534 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
535
536 MLX5_SET(mkc, mkc, qpn, 0xffffff);
537 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
ec8b9981 538 MLX5_SET64(mkc, mkc, len, npages << page_shift);
3608ae77
TT
539 MLX5_SET(mkc, mkc, translations_octword_size,
540 MLX5_MTT_OCTW(npages));
ec8b9981 541 MLX5_SET(mkc, mkc, log_page_size, page_shift);
3608ae77 542
ec8b9981 543 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
3608ae77
TT
544
545 kvfree(in);
546 return err;
547}
548
a43b25da 549static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
ec8b9981 550{
6a9764ef 551 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
ec8b9981 552
a43b25da 553 return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
ec8b9981
TT
554}
555
3b77235b 556static int mlx5e_alloc_rq(struct mlx5e_channel *c,
6a9764ef
SM
557 struct mlx5e_params *params,
558 struct mlx5e_rq_param *rqp,
3b77235b 559 struct mlx5e_rq *rq)
f62b8bb8 560{
a43b25da 561 struct mlx5_core_dev *mdev = c->mdev;
6a9764ef 562 void *rqc = rqp->rqc;
f62b8bb8 563 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
461017cb 564 u32 byte_count;
1bfecfca 565 int npages;
f62b8bb8
AV
566 int wq_sz;
567 int err;
568 int i;
569
6a9764ef 570 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
311c7c71 571
6a9764ef 572 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
f62b8bb8
AV
573 &rq->wq_ctrl);
574 if (err)
575 return err;
576
577 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
578
579 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
f62b8bb8 580
6a9764ef 581 rq->wq_type = params->rq_wq_type;
7e426671
TT
582 rq->pdev = c->pdev;
583 rq->netdev = c->netdev;
a43b25da 584 rq->tstamp = c->tstamp;
7e426671
TT
585 rq->channel = c;
586 rq->ix = c->ix;
a43b25da 587 rq->mdev = mdev;
97bc402d 588
6a9764ef 589 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
97bc402d
DB
590 if (IS_ERR(rq->xdp_prog)) {
591 err = PTR_ERR(rq->xdp_prog);
592 rq->xdp_prog = NULL;
593 goto err_rq_wq_destroy;
594 }
7e426671 595
bce2b2bf 596 rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
b45d8b50 597 rq->buff.headroom = params->rq_headroom;
b5503b99 598
6a9764ef 599 switch (rq->wq_type) {
461017cb 600 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
f5f82476 601
7cc6d77b 602 rq->post_wqes = mlx5e_post_rx_mpwqes;
6cd392a0 603 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
461017cb 604
20fd0c19 605 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
899a59d3
IT
606#ifdef CONFIG_MLX5_EN_IPSEC
607 if (MLX5_IPSEC_DEV(mdev)) {
608 err = -EINVAL;
609 netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
610 goto err_rq_wq_destroy;
611 }
612#endif
20fd0c19
SM
613 if (!rq->handle_rx_cqe) {
614 err = -EINVAL;
615 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
616 goto err_rq_wq_destroy;
617 }
618
89e89f7a 619 rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz;
b45d8b50 620 rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);
1bfecfca 621
b681c481 622 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
ec8b9981 623
a43b25da 624 err = mlx5e_create_rq_umr_mkey(mdev, rq);
7e426671
TT
625 if (err)
626 goto err_rq_wq_destroy;
ec8b9981
TT
627 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
628
629 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
630 if (err)
631 goto err_destroy_umr_mkey;
461017cb
TT
632 break;
633 default: /* MLX5_WQ_TYPE_LINKED_LIST */
accd5883
TT
634 rq->wqe.frag_info =
635 kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
636 GFP_KERNEL, cpu_to_node(c->cpu));
637 if (!rq->wqe.frag_info) {
461017cb
TT
638 err = -ENOMEM;
639 goto err_rq_wq_destroy;
640 }
7cc6d77b 641 rq->post_wqes = mlx5e_post_rx_wqes;
6cd392a0 642 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
461017cb 643
899a59d3
IT
644#ifdef CONFIG_MLX5_EN_IPSEC
645 if (c->priv->ipsec)
646 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
647 else
648#endif
649 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
20fd0c19 650 if (!rq->handle_rx_cqe) {
accd5883 651 kfree(rq->wqe.frag_info);
20fd0c19
SM
652 err = -EINVAL;
653 netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
654 goto err_rq_wq_destroy;
655 }
656
b681c481 657 byte_count = params->lro_en ?
6a9764ef 658 params->lro_wqe_sz :
c139dbfd 659 MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
899a59d3
IT
660#ifdef CONFIG_MLX5_EN_IPSEC
661 if (MLX5_IPSEC_DEV(mdev))
b681c481 662 byte_count += MLX5E_METADATA_ETHER_LEN;
899a59d3 663#endif
accd5883 664 rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
1bfecfca
SM
665
666 /* calc the required page order */
b45d8b50 667 rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count);
accd5883 668 npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
1bfecfca
SM
669 rq->buff.page_order = order_base_2(npages);
670
461017cb 671 byte_count |= MLX5_HW_START_PADDING;
7e426671 672 rq->mkey_be = c->mkey_be;
461017cb 673 }
f62b8bb8
AV
674
675 for (i = 0; i < wq_sz; i++) {
676 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
677
4c2af5cc
TT
678 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
679 u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT;
680
681 wqe->data.addr = cpu_to_be64(dma_offset);
682 }
683
461017cb 684 wqe->data.byte_count = cpu_to_be32(byte_count);
7e426671 685 wqe->data.lkey = rq->mkey_be;
f62b8bb8
AV
686 }
687
cb3c7fd4 688 INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
6a9764ef 689 rq->am.mode = params->rx_cq_period_mode;
4415a031
TT
690 rq->page_cache.head = 0;
691 rq->page_cache.tail = 0;
692
f62b8bb8
AV
693 return 0;
694
ec8b9981
TT
695err_destroy_umr_mkey:
696 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
697
f62b8bb8 698err_rq_wq_destroy:
97bc402d
DB
699 if (rq->xdp_prog)
700 bpf_prog_put(rq->xdp_prog);
f62b8bb8
AV
701 mlx5_wq_destroy(&rq->wq_ctrl);
702
703 return err;
704}
705
3b77235b 706static void mlx5e_free_rq(struct mlx5e_rq *rq)
f62b8bb8 707{
4415a031
TT
708 int i;
709
86994156
RS
710 if (rq->xdp_prog)
711 bpf_prog_put(rq->xdp_prog);
712
461017cb
TT
713 switch (rq->wq_type) {
714 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
7e426671 715 mlx5e_rq_free_mpwqe_info(rq);
a43b25da 716 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
461017cb
TT
717 break;
718 default: /* MLX5_WQ_TYPE_LINKED_LIST */
accd5883 719 kfree(rq->wqe.frag_info);
461017cb
TT
720 }
721
4415a031
TT
722 for (i = rq->page_cache.head; i != rq->page_cache.tail;
723 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
724 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
725
726 mlx5e_page_release(rq, dma_info, false);
727 }
f62b8bb8
AV
728 mlx5_wq_destroy(&rq->wq_ctrl);
729}
730
6a9764ef
SM
731static int mlx5e_create_rq(struct mlx5e_rq *rq,
732 struct mlx5e_rq_param *param)
f62b8bb8 733{
a43b25da 734 struct mlx5_core_dev *mdev = rq->mdev;
f62b8bb8
AV
735
736 void *in;
737 void *rqc;
738 void *wq;
739 int inlen;
740 int err;
741
742 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
743 sizeof(u64) * rq->wq_ctrl.buf.npages;
1b9a07ee 744 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
745 if (!in)
746 return -ENOMEM;
747
748 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
749 wq = MLX5_ADDR_OF(rqc, rqc, wq);
750
751 memcpy(rqc, param->rqc, sizeof(param->rqc));
752
97de9f31 753 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
f62b8bb8 754 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
f62b8bb8 755 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
68cdf5d6 756 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
757 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
758
759 mlx5_fill_page_array(&rq->wq_ctrl.buf,
760 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
761
7db22ffb 762 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
f62b8bb8
AV
763
764 kvfree(in);
765
766 return err;
767}
768
36350114
GP
769static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
770 int next_state)
f62b8bb8
AV
771{
772 struct mlx5e_channel *c = rq->channel;
a43b25da 773 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8
AV
774
775 void *in;
776 void *rqc;
777 int inlen;
778 int err;
779
780 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 781 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
782 if (!in)
783 return -ENOMEM;
784
785 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
786
787 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
788 MLX5_SET(rqc, rqc, state, next_state);
789
7db22ffb 790 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
f62b8bb8
AV
791
792 kvfree(in);
793
794 return err;
795}
796
102722fc
GE
797static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
798{
799 struct mlx5e_channel *c = rq->channel;
800 struct mlx5e_priv *priv = c->priv;
801 struct mlx5_core_dev *mdev = priv->mdev;
802
803 void *in;
804 void *rqc;
805 int inlen;
806 int err;
807
808 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 809 in = kvzalloc(inlen, GFP_KERNEL);
102722fc
GE
810 if (!in)
811 return -ENOMEM;
812
813 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
814
815 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
816 MLX5_SET64(modify_rq_in, in, modify_bitmask,
817 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
818 MLX5_SET(rqc, rqc, scatter_fcs, enable);
819 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
820
821 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
822
823 kvfree(in);
824
825 return err;
826}
827
36350114
GP
828static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
829{
830 struct mlx5e_channel *c = rq->channel;
a43b25da 831 struct mlx5_core_dev *mdev = c->mdev;
36350114
GP
832 void *in;
833 void *rqc;
834 int inlen;
835 int err;
836
837 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 838 in = kvzalloc(inlen, GFP_KERNEL);
36350114
GP
839 if (!in)
840 return -ENOMEM;
841
842 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
843
844 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
83b502a1
AV
845 MLX5_SET64(modify_rq_in, in, modify_bitmask,
846 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
36350114
GP
847 MLX5_SET(rqc, rqc, vsd, vsd);
848 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
849
850 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
851
852 kvfree(in);
853
854 return err;
855}
856
3b77235b 857static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
f62b8bb8 858{
a43b25da 859 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
f62b8bb8
AV
860}
861
862static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
863{
01c196a2 864 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
f62b8bb8 865 struct mlx5e_channel *c = rq->channel;
a43b25da 866
f62b8bb8 867 struct mlx5_wq_ll *wq = &rq->wq;
6a9764ef 868 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
f62b8bb8 869
01c196a2 870 while (time_before(jiffies, exp_time)) {
6a9764ef 871 if (wq->cur_sz >= min_wqes)
f62b8bb8
AV
872 return 0;
873
874 msleep(20);
875 }
876
a43b25da 877 netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
6a9764ef 878 rq->rqn, wq->cur_sz, min_wqes);
f62b8bb8
AV
879 return -ETIMEDOUT;
880}
881
f2fde18c
SM
882static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
883{
884 struct mlx5_wq_ll *wq = &rq->wq;
885 struct mlx5e_rx_wqe *wqe;
886 __be16 wqe_ix_be;
887 u16 wqe_ix;
888
8484f9ed 889 /* UMR WQE (if in progress) is always at wq->head */
a071cb9f
TT
890 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
891 rq->mpwqe.umr_in_progress)
21c59685 892 mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
8484f9ed 893
f2fde18c
SM
894 while (!mlx5_wq_ll_is_empty(wq)) {
895 wqe_ix_be = *wq->tail_next;
896 wqe_ix = be16_to_cpu(wqe_ix_be);
897 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
898 rq->dealloc_wqe(rq, wqe_ix);
899 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
900 &wqe->next.next_wqe_index);
901 }
accd5883
TT
902
903 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
904 /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
905 * but yet to be re-posted.
906 */
907 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
908
909 for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
910 rq->dealloc_wqe(rq, wqe_ix);
911 }
f2fde18c
SM
912}
913
f62b8bb8 914static int mlx5e_open_rq(struct mlx5e_channel *c,
6a9764ef 915 struct mlx5e_params *params,
f62b8bb8
AV
916 struct mlx5e_rq_param *param,
917 struct mlx5e_rq *rq)
918{
919 int err;
920
6a9764ef 921 err = mlx5e_alloc_rq(c, params, param, rq);
f62b8bb8
AV
922 if (err)
923 return err;
924
3b77235b 925 err = mlx5e_create_rq(rq, param);
f62b8bb8 926 if (err)
3b77235b 927 goto err_free_rq;
f62b8bb8 928
36350114 929 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
f62b8bb8 930 if (err)
3b77235b 931 goto err_destroy_rq;
f62b8bb8 932
6a9764ef 933 if (params->rx_am_enabled)
a1eaba4c 934 c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
cb3c7fd4 935
f62b8bb8
AV
936 return 0;
937
f62b8bb8
AV
938err_destroy_rq:
939 mlx5e_destroy_rq(rq);
3b77235b
SM
940err_free_rq:
941 mlx5e_free_rq(rq);
f62b8bb8
AV
942
943 return err;
944}
945
acc6c595
SM
946static void mlx5e_activate_rq(struct mlx5e_rq *rq)
947{
948 struct mlx5e_icosq *sq = &rq->channel->icosq;
949 u16 pi = sq->pc & sq->wq.sz_m1;
950 struct mlx5e_tx_wqe *nopwqe;
951
952 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
953 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
acc6c595
SM
954 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
955 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
956}
957
958static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
f62b8bb8 959{
c0f1147d 960 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
f62b8bb8 961 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
acc6c595 962}
cb3c7fd4 963
acc6c595
SM
964static void mlx5e_close_rq(struct mlx5e_rq *rq)
965{
966 cancel_work_sync(&rq->am.work);
f62b8bb8 967 mlx5e_destroy_rq(rq);
3b77235b
SM
968 mlx5e_free_rx_descs(rq);
969 mlx5e_free_rq(rq);
f62b8bb8
AV
970}
971
31391048 972static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
b5503b99 973{
31391048 974 kfree(sq->db.di);
b5503b99
SM
975}
976
31391048 977static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
b5503b99
SM
978{
979 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
980
31391048 981 sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
b5503b99 982 GFP_KERNEL, numa);
31391048
SM
983 if (!sq->db.di) {
984 mlx5e_free_xdpsq_db(sq);
b5503b99
SM
985 return -ENOMEM;
986 }
987
988 return 0;
989}
990
31391048 991static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
6a9764ef 992 struct mlx5e_params *params,
31391048
SM
993 struct mlx5e_sq_param *param,
994 struct mlx5e_xdpsq *sq)
995{
996 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 997 struct mlx5_core_dev *mdev = c->mdev;
31391048
SM
998 int err;
999
1000 sq->pdev = c->pdev;
1001 sq->mkey_be = c->mkey_be;
1002 sq->channel = c;
1003 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef 1004 sq->min_inline_mode = params->tx_min_inline_mode;
31391048
SM
1005
1006 param->wq.db_numa_node = cpu_to_node(c->cpu);
1007 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1008 if (err)
1009 return err;
1010 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1011
1012 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1013 if (err)
1014 goto err_sq_wq_destroy;
1015
1016 return 0;
1017
1018err_sq_wq_destroy:
1019 mlx5_wq_destroy(&sq->wq_ctrl);
1020
1021 return err;
1022}
1023
1024static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1025{
1026 mlx5e_free_xdpsq_db(sq);
1027 mlx5_wq_destroy(&sq->wq_ctrl);
1028}
1029
1030static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
f62b8bb8 1031{
f10b7cc7 1032 kfree(sq->db.ico_wqe);
f62b8bb8
AV
1033}
1034
31391048 1035static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
f10b7cc7
SM
1036{
1037 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1038
1039 sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
1040 GFP_KERNEL, numa);
1041 if (!sq->db.ico_wqe)
1042 return -ENOMEM;
1043
1044 return 0;
1045}
1046
31391048 1047static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
31391048
SM
1048 struct mlx5e_sq_param *param,
1049 struct mlx5e_icosq *sq)
f10b7cc7 1050{
31391048 1051 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1052 struct mlx5_core_dev *mdev = c->mdev;
31391048 1053 int err;
f10b7cc7 1054
31391048
SM
1055 sq->mkey_be = c->mkey_be;
1056 sq->channel = c;
1057 sq->uar_map = mdev->mlx5e_res.bfreg.map;
f62b8bb8 1058
31391048
SM
1059 param->wq.db_numa_node = cpu_to_node(c->cpu);
1060 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1061 if (err)
1062 return err;
1063 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
f62b8bb8 1064
31391048
SM
1065 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1066 if (err)
1067 goto err_sq_wq_destroy;
1068
1069 sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
f62b8bb8
AV
1070
1071 return 0;
31391048
SM
1072
1073err_sq_wq_destroy:
1074 mlx5_wq_destroy(&sq->wq_ctrl);
1075
1076 return err;
f62b8bb8
AV
1077}
1078
31391048 1079static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
f10b7cc7 1080{
31391048
SM
1081 mlx5e_free_icosq_db(sq);
1082 mlx5_wq_destroy(&sq->wq_ctrl);
f10b7cc7
SM
1083}
1084
31391048 1085static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
f10b7cc7 1086{
31391048
SM
1087 kfree(sq->db.wqe_info);
1088 kfree(sq->db.dma_fifo);
f10b7cc7
SM
1089}
1090
31391048 1091static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
b5503b99 1092{
31391048
SM
1093 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1094 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1095
31391048
SM
1096 sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
1097 GFP_KERNEL, numa);
1098 sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
1099 GFP_KERNEL, numa);
77bdf895 1100 if (!sq->db.dma_fifo || !sq->db.wqe_info) {
31391048
SM
1101 mlx5e_free_txqsq_db(sq);
1102 return -ENOMEM;
b5503b99 1103 }
31391048
SM
1104
1105 sq->dma_fifo_mask = df_sz - 1;
1106
1107 return 0;
b5503b99
SM
1108}
1109
31391048 1110static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
acc6c595 1111 int txq_ix,
6a9764ef 1112 struct mlx5e_params *params,
31391048
SM
1113 struct mlx5e_sq_param *param,
1114 struct mlx5e_txqsq *sq)
f62b8bb8 1115{
31391048 1116 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1117 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8
AV
1118 int err;
1119
f10b7cc7 1120 sq->pdev = c->pdev;
a43b25da 1121 sq->tstamp = c->tstamp;
f10b7cc7
SM
1122 sq->mkey_be = c->mkey_be;
1123 sq->channel = c;
acc6c595 1124 sq->txq_ix = txq_ix;
aff26157 1125 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef
SM
1126 sq->max_inline = params->tx_max_inline;
1127 sq->min_inline_mode = params->tx_min_inline_mode;
2ac9cfe7
IT
1128 if (MLX5_IPSEC_DEV(c->priv->mdev))
1129 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
f10b7cc7 1130
311c7c71 1131 param->wq.db_numa_node = cpu_to_node(c->cpu);
31391048 1132 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
f62b8bb8 1133 if (err)
aff26157 1134 return err;
31391048 1135 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
f62b8bb8 1136
31391048 1137 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
7ec0bb22 1138 if (err)
f62b8bb8
AV
1139 goto err_sq_wq_destroy;
1140
31391048 1141 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
f62b8bb8
AV
1142
1143 return 0;
1144
1145err_sq_wq_destroy:
1146 mlx5_wq_destroy(&sq->wq_ctrl);
1147
f62b8bb8
AV
1148 return err;
1149}
1150
31391048 1151static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1152{
31391048 1153 mlx5e_free_txqsq_db(sq);
f62b8bb8 1154 mlx5_wq_destroy(&sq->wq_ctrl);
f62b8bb8
AV
1155}
1156
33ad9711
SM
1157struct mlx5e_create_sq_param {
1158 struct mlx5_wq_ctrl *wq_ctrl;
1159 u32 cqn;
1160 u32 tisn;
1161 u8 tis_lst_sz;
1162 u8 min_inline_mode;
1163};
1164
a43b25da 1165static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
33ad9711
SM
1166 struct mlx5e_sq_param *param,
1167 struct mlx5e_create_sq_param *csp,
1168 u32 *sqn)
f62b8bb8 1169{
f62b8bb8
AV
1170 void *in;
1171 void *sqc;
1172 void *wq;
1173 int inlen;
1174 int err;
1175
1176 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
33ad9711 1177 sizeof(u64) * csp->wq_ctrl->buf.npages;
1b9a07ee 1178 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1179 if (!in)
1180 return -ENOMEM;
1181
1182 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1183 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1184
1185 memcpy(sqc, param->sqc, sizeof(param->sqc));
33ad9711
SM
1186 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1187 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1188 MLX5_SET(sqc, sqc, cqn, csp->cqn);
a6f402e4
SM
1189
1190 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
33ad9711 1191 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
a6f402e4 1192
33ad9711 1193 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
f62b8bb8
AV
1194
1195 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
a43b25da 1196 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
33ad9711 1197 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
68cdf5d6 1198 MLX5_ADAPTER_PAGE_SHIFT);
33ad9711 1199 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
f62b8bb8 1200
33ad9711 1201 mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
f62b8bb8 1202
33ad9711 1203 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
f62b8bb8
AV
1204
1205 kvfree(in);
1206
1207 return err;
1208}
1209
33ad9711
SM
1210struct mlx5e_modify_sq_param {
1211 int curr_state;
1212 int next_state;
1213 bool rl_update;
1214 int rl_index;
1215};
1216
a43b25da 1217static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
33ad9711 1218 struct mlx5e_modify_sq_param *p)
f62b8bb8 1219{
f62b8bb8
AV
1220 void *in;
1221 void *sqc;
1222 int inlen;
1223 int err;
1224
1225 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1b9a07ee 1226 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1227 if (!in)
1228 return -ENOMEM;
1229
1230 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1231
33ad9711
SM
1232 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1233 MLX5_SET(sqc, sqc, state, p->next_state);
1234 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
507f0c81 1235 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
33ad9711 1236 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
507f0c81 1237 }
f62b8bb8 1238
33ad9711 1239 err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
f62b8bb8
AV
1240
1241 kvfree(in);
1242
1243 return err;
1244}
1245
a43b25da 1246static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
33ad9711 1247{
a43b25da 1248 mlx5_core_destroy_sq(mdev, sqn);
f62b8bb8
AV
1249}
1250
a43b25da 1251static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
31391048
SM
1252 struct mlx5e_sq_param *param,
1253 struct mlx5e_create_sq_param *csp,
1254 u32 *sqn)
f62b8bb8 1255{
33ad9711 1256 struct mlx5e_modify_sq_param msp = {0};
31391048
SM
1257 int err;
1258
a43b25da 1259 err = mlx5e_create_sq(mdev, param, csp, sqn);
31391048
SM
1260 if (err)
1261 return err;
1262
1263 msp.curr_state = MLX5_SQC_STATE_RST;
1264 msp.next_state = MLX5_SQC_STATE_RDY;
a43b25da 1265 err = mlx5e_modify_sq(mdev, *sqn, &msp);
31391048 1266 if (err)
a43b25da 1267 mlx5e_destroy_sq(mdev, *sqn);
31391048
SM
1268
1269 return err;
1270}
1271
7f859ecf
SM
1272static int mlx5e_set_sq_maxrate(struct net_device *dev,
1273 struct mlx5e_txqsq *sq, u32 rate);
1274
31391048 1275static int mlx5e_open_txqsq(struct mlx5e_channel *c,
a43b25da 1276 u32 tisn,
acc6c595 1277 int txq_ix,
6a9764ef 1278 struct mlx5e_params *params,
31391048
SM
1279 struct mlx5e_sq_param *param,
1280 struct mlx5e_txqsq *sq)
1281{
1282 struct mlx5e_create_sq_param csp = {};
7f859ecf 1283 u32 tx_rate;
f62b8bb8
AV
1284 int err;
1285
6a9764ef 1286 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
f62b8bb8
AV
1287 if (err)
1288 return err;
1289
a43b25da 1290 csp.tisn = tisn;
31391048 1291 csp.tis_lst_sz = 1;
33ad9711
SM
1292 csp.cqn = sq->cq.mcq.cqn;
1293 csp.wq_ctrl = &sq->wq_ctrl;
1294 csp.min_inline_mode = sq->min_inline_mode;
a43b25da 1295 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
f62b8bb8 1296 if (err)
31391048 1297 goto err_free_txqsq;
f62b8bb8 1298
a43b25da 1299 tx_rate = c->priv->tx_rates[sq->txq_ix];
7f859ecf 1300 if (tx_rate)
a43b25da 1301 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
7f859ecf 1302
f62b8bb8
AV
1303 return 0;
1304
31391048 1305err_free_txqsq:
3b77235b 1306 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
31391048 1307 mlx5e_free_txqsq(sq);
f62b8bb8
AV
1308
1309 return err;
1310}
1311
acc6c595
SM
1312static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1313{
a43b25da 1314 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
acc6c595
SM
1315 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1316 netdev_tx_reset_queue(sq->txq);
1317 netif_tx_start_queue(sq->txq);
1318}
1319
f62b8bb8
AV
1320static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1321{
1322 __netif_tx_lock_bh(txq);
1323 netif_tx_stop_queue(txq);
1324 __netif_tx_unlock_bh(txq);
1325}
1326
acc6c595 1327static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1328{
33ad9711 1329 struct mlx5e_channel *c = sq->channel;
33ad9711 1330
c0f1147d 1331 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
6e8dd6d6 1332 /* prevent netif_tx_wake_queue */
33ad9711 1333 napi_synchronize(&c->napi);
29429f33 1334
31391048 1335 netif_tx_disable_queue(sq->txq);
f62b8bb8 1336
31391048
SM
1337 /* last doorbell out, godspeed .. */
1338 if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
1339 struct mlx5e_tx_wqe *nop;
864b2d71 1340
77bdf895 1341 sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
31391048
SM
1342 nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
1343 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
29429f33 1344 }
acc6c595
SM
1345}
1346
1347static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1348{
1349 struct mlx5e_channel *c = sq->channel;
a43b25da 1350 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8 1351
a43b25da 1352 mlx5e_destroy_sq(mdev, sq->sqn);
33ad9711
SM
1353 if (sq->rate_limit)
1354 mlx5_rl_remove_rate(mdev, sq->rate_limit);
31391048
SM
1355 mlx5e_free_txqsq_descs(sq);
1356 mlx5e_free_txqsq(sq);
1357}
1358
1359static int mlx5e_open_icosq(struct mlx5e_channel *c,
6a9764ef 1360 struct mlx5e_params *params,
31391048
SM
1361 struct mlx5e_sq_param *param,
1362 struct mlx5e_icosq *sq)
1363{
1364 struct mlx5e_create_sq_param csp = {};
1365 int err;
1366
6a9764ef 1367 err = mlx5e_alloc_icosq(c, param, sq);
31391048
SM
1368 if (err)
1369 return err;
1370
1371 csp.cqn = sq->cq.mcq.cqn;
1372 csp.wq_ctrl = &sq->wq_ctrl;
6a9764ef 1373 csp.min_inline_mode = params->tx_min_inline_mode;
31391048 1374 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1375 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1376 if (err)
1377 goto err_free_icosq;
1378
1379 return 0;
1380
1381err_free_icosq:
1382 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1383 mlx5e_free_icosq(sq);
1384
1385 return err;
1386}
1387
1388static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1389{
1390 struct mlx5e_channel *c = sq->channel;
1391
1392 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1393 napi_synchronize(&c->napi);
1394
a43b25da 1395 mlx5e_destroy_sq(c->mdev, sq->sqn);
31391048
SM
1396 mlx5e_free_icosq(sq);
1397}
1398
1399static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
6a9764ef 1400 struct mlx5e_params *params,
31391048
SM
1401 struct mlx5e_sq_param *param,
1402 struct mlx5e_xdpsq *sq)
1403{
1404 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1405 struct mlx5e_create_sq_param csp = {};
31391048
SM
1406 unsigned int inline_hdr_sz = 0;
1407 int err;
1408 int i;
1409
6a9764ef 1410 err = mlx5e_alloc_xdpsq(c, params, param, sq);
31391048
SM
1411 if (err)
1412 return err;
1413
1414 csp.tis_lst_sz = 1;
a43b25da 1415 csp.tisn = c->priv->tisn[0]; /* tc = 0 */
31391048
SM
1416 csp.cqn = sq->cq.mcq.cqn;
1417 csp.wq_ctrl = &sq->wq_ctrl;
1418 csp.min_inline_mode = sq->min_inline_mode;
1419 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1420 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1421 if (err)
1422 goto err_free_xdpsq;
1423
1424 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1425 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1426 ds_cnt++;
1427 }
1428
1429 /* Pre initialize fixed WQE fields */
1430 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1431 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1432 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1433 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1434 struct mlx5_wqe_data_seg *dseg;
1435
1436 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1437 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1438
1439 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1440 dseg->lkey = sq->mkey_be;
1441 }
1442
1443 return 0;
1444
1445err_free_xdpsq:
1446 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1447 mlx5e_free_xdpsq(sq);
1448
1449 return err;
1450}
1451
1452static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1453{
1454 struct mlx5e_channel *c = sq->channel;
1455
1456 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1457 napi_synchronize(&c->napi);
1458
a43b25da 1459 mlx5e_destroy_sq(c->mdev, sq->sqn);
31391048
SM
1460 mlx5e_free_xdpsq_descs(sq);
1461 mlx5e_free_xdpsq(sq);
f62b8bb8
AV
1462}
1463
95b6c6a5
EBE
1464static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1465 struct mlx5e_cq_param *param,
1466 struct mlx5e_cq *cq)
f62b8bb8 1467{
f62b8bb8
AV
1468 struct mlx5_core_cq *mcq = &cq->mcq;
1469 int eqn_not_used;
0b6e26ce 1470 unsigned int irqn;
f62b8bb8
AV
1471 int err;
1472 u32 i;
1473
f62b8bb8
AV
1474 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1475 &cq->wq_ctrl);
1476 if (err)
1477 return err;
1478
1479 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1480
f62b8bb8
AV
1481 mcq->cqe_sz = 64;
1482 mcq->set_ci_db = cq->wq_ctrl.db.db;
1483 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1484 *mcq->set_ci_db = 0;
1485 *mcq->arm_db = 0;
1486 mcq->vector = param->eq_ix;
1487 mcq->comp = mlx5e_completion_event;
1488 mcq->event = mlx5e_cq_error_event;
1489 mcq->irqn = irqn;
f62b8bb8
AV
1490
1491 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1492 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1493
1494 cqe->op_own = 0xf1;
1495 }
1496
a43b25da 1497 cq->mdev = mdev;
f62b8bb8
AV
1498
1499 return 0;
1500}
1501
95b6c6a5
EBE
1502static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1503 struct mlx5e_cq_param *param,
1504 struct mlx5e_cq *cq)
1505{
1506 struct mlx5_core_dev *mdev = c->priv->mdev;
1507 int err;
1508
1509 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1510 param->wq.db_numa_node = cpu_to_node(c->cpu);
1511 param->eq_ix = c->ix;
1512
1513 err = mlx5e_alloc_cq_common(mdev, param, cq);
1514
1515 cq->napi = &c->napi;
1516 cq->channel = c;
1517
1518 return err;
1519}
1520
3b77235b 1521static void mlx5e_free_cq(struct mlx5e_cq *cq)
f62b8bb8 1522{
1c1b5228 1523 mlx5_cqwq_destroy(&cq->wq_ctrl);
f62b8bb8
AV
1524}
1525
3b77235b 1526static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
f62b8bb8 1527{
a43b25da 1528 struct mlx5_core_dev *mdev = cq->mdev;
f62b8bb8
AV
1529 struct mlx5_core_cq *mcq = &cq->mcq;
1530
1531 void *in;
1532 void *cqc;
1533 int inlen;
0b6e26ce 1534 unsigned int irqn_not_used;
f62b8bb8
AV
1535 int eqn;
1536 int err;
1537
1538 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1c1b5228 1539 sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
1b9a07ee 1540 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1541 if (!in)
1542 return -ENOMEM;
1543
1544 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1545
1546 memcpy(cqc, param->cqc, sizeof(param->cqc));
1547
1c1b5228
TT
1548 mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
1549 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
f62b8bb8
AV
1550
1551 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1552
9908aa29 1553 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
f62b8bb8 1554 MLX5_SET(cqc, cqc, c_eqn, eqn);
30aa60b3 1555 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
1c1b5228 1556 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
68cdf5d6 1557 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
1558 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1559
1560 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1561
1562 kvfree(in);
1563
1564 if (err)
1565 return err;
1566
1567 mlx5e_cq_arm(cq);
1568
1569 return 0;
1570}
1571
3b77235b 1572static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
f62b8bb8 1573{
a43b25da 1574 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
f62b8bb8
AV
1575}
1576
1577static int mlx5e_open_cq(struct mlx5e_channel *c,
6a9764ef 1578 struct mlx5e_cq_moder moder,
f62b8bb8 1579 struct mlx5e_cq_param *param,
6a9764ef 1580 struct mlx5e_cq *cq)
f62b8bb8 1581{
a43b25da 1582 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8 1583 int err;
f62b8bb8 1584
3b77235b 1585 err = mlx5e_alloc_cq(c, param, cq);
f62b8bb8
AV
1586 if (err)
1587 return err;
1588
3b77235b 1589 err = mlx5e_create_cq(cq, param);
f62b8bb8 1590 if (err)
3b77235b 1591 goto err_free_cq;
f62b8bb8 1592
7524a5d8 1593 if (MLX5_CAP_GEN(mdev, cq_moderation))
6a9764ef 1594 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
f62b8bb8
AV
1595 return 0;
1596
3b77235b
SM
1597err_free_cq:
1598 mlx5e_free_cq(cq);
f62b8bb8
AV
1599
1600 return err;
1601}
1602
1603static void mlx5e_close_cq(struct mlx5e_cq *cq)
1604{
f62b8bb8 1605 mlx5e_destroy_cq(cq);
3b77235b 1606 mlx5e_free_cq(cq);
f62b8bb8
AV
1607}
1608
1609static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1610{
1611 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1612}
1613
1614static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
6a9764ef 1615 struct mlx5e_params *params,
f62b8bb8
AV
1616 struct mlx5e_channel_param *cparam)
1617{
f62b8bb8
AV
1618 int err;
1619 int tc;
1620
1621 for (tc = 0; tc < c->num_tc; tc++) {
6a9764ef
SM
1622 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1623 &cparam->tx_cq, &c->sq[tc].cq);
f62b8bb8
AV
1624 if (err)
1625 goto err_close_tx_cqs;
f62b8bb8
AV
1626 }
1627
1628 return 0;
1629
1630err_close_tx_cqs:
1631 for (tc--; tc >= 0; tc--)
1632 mlx5e_close_cq(&c->sq[tc].cq);
1633
1634 return err;
1635}
1636
1637static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1638{
1639 int tc;
1640
1641 for (tc = 0; tc < c->num_tc; tc++)
1642 mlx5e_close_cq(&c->sq[tc].cq);
1643}
1644
1645static int mlx5e_open_sqs(struct mlx5e_channel *c,
6a9764ef 1646 struct mlx5e_params *params,
f62b8bb8
AV
1647 struct mlx5e_channel_param *cparam)
1648{
1649 int err;
1650 int tc;
1651
6a9764ef
SM
1652 for (tc = 0; tc < params->num_tc; tc++) {
1653 int txq_ix = c->ix + tc * params->num_channels;
acc6c595 1654
a43b25da
SM
1655 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
1656 params, &cparam->sq, &c->sq[tc]);
f62b8bb8
AV
1657 if (err)
1658 goto err_close_sqs;
1659 }
1660
1661 return 0;
1662
1663err_close_sqs:
1664 for (tc--; tc >= 0; tc--)
31391048 1665 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1666
1667 return err;
1668}
1669
1670static void mlx5e_close_sqs(struct mlx5e_channel *c)
1671{
1672 int tc;
1673
1674 for (tc = 0; tc < c->num_tc; tc++)
31391048 1675 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1676}
1677
507f0c81 1678static int mlx5e_set_sq_maxrate(struct net_device *dev,
31391048 1679 struct mlx5e_txqsq *sq, u32 rate)
507f0c81
YP
1680{
1681 struct mlx5e_priv *priv = netdev_priv(dev);
1682 struct mlx5_core_dev *mdev = priv->mdev;
33ad9711 1683 struct mlx5e_modify_sq_param msp = {0};
507f0c81
YP
1684 u16 rl_index = 0;
1685 int err;
1686
1687 if (rate == sq->rate_limit)
1688 /* nothing to do */
1689 return 0;
1690
1691 if (sq->rate_limit)
1692 /* remove current rl index to free space to next ones */
1693 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1694
1695 sq->rate_limit = 0;
1696
1697 if (rate) {
1698 err = mlx5_rl_add_rate(mdev, rate, &rl_index);
1699 if (err) {
1700 netdev_err(dev, "Failed configuring rate %u: %d\n",
1701 rate, err);
1702 return err;
1703 }
1704 }
1705
33ad9711
SM
1706 msp.curr_state = MLX5_SQC_STATE_RDY;
1707 msp.next_state = MLX5_SQC_STATE_RDY;
1708 msp.rl_index = rl_index;
1709 msp.rl_update = true;
a43b25da 1710 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
507f0c81
YP
1711 if (err) {
1712 netdev_err(dev, "Failed configuring rate %u: %d\n",
1713 rate, err);
1714 /* remove the rate from the table */
1715 if (rate)
1716 mlx5_rl_remove_rate(mdev, rate);
1717 return err;
1718 }
1719
1720 sq->rate_limit = rate;
1721 return 0;
1722}
1723
1724static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1725{
1726 struct mlx5e_priv *priv = netdev_priv(dev);
1727 struct mlx5_core_dev *mdev = priv->mdev;
acc6c595 1728 struct mlx5e_txqsq *sq = priv->txq2sq[index];
507f0c81
YP
1729 int err = 0;
1730
1731 if (!mlx5_rl_is_supported(mdev)) {
1732 netdev_err(dev, "Rate limiting is not supported on this device\n");
1733 return -EINVAL;
1734 }
1735
1736 /* rate is given in Mb/sec, HW config is in Kb/sec */
1737 rate = rate << 10;
1738
1739 /* Check whether rate in valid range, 0 is always valid */
1740 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1741 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1742 return -ERANGE;
1743 }
1744
1745 mutex_lock(&priv->state_lock);
1746 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1747 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1748 if (!err)
1749 priv->tx_rates[index] = rate;
1750 mutex_unlock(&priv->state_lock);
1751
1752 return err;
1753}
1754
f62b8bb8 1755static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
6a9764ef 1756 struct mlx5e_params *params,
f62b8bb8
AV
1757 struct mlx5e_channel_param *cparam,
1758 struct mlx5e_channel **cp)
1759{
6a9764ef 1760 struct mlx5e_cq_moder icocq_moder = {0, 0};
f62b8bb8
AV
1761 struct net_device *netdev = priv->netdev;
1762 int cpu = mlx5e_get_cpu(priv, ix);
1763 struct mlx5e_channel *c;
1764 int err;
1765
1766 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1767 if (!c)
1768 return -ENOMEM;
1769
1770 c->priv = priv;
a43b25da
SM
1771 c->mdev = priv->mdev;
1772 c->tstamp = &priv->tstamp;
f62b8bb8
AV
1773 c->ix = ix;
1774 c->cpu = cpu;
1775 c->pdev = &priv->mdev->pdev->dev;
1776 c->netdev = priv->netdev;
b50d292b 1777 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
6a9764ef
SM
1778 c->num_tc = params->num_tc;
1779 c->xdp = !!params->xdp_prog;
cb3c7fd4 1780
f62b8bb8
AV
1781 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1782
6a9764ef 1783 err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
f62b8bb8
AV
1784 if (err)
1785 goto err_napi_del;
1786
6a9764ef 1787 err = mlx5e_open_tx_cqs(c, params, cparam);
d3c9bc27
TT
1788 if (err)
1789 goto err_close_icosq_cq;
1790
6a9764ef 1791 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
f62b8bb8
AV
1792 if (err)
1793 goto err_close_tx_cqs;
f62b8bb8 1794
d7a0ecab 1795 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
6a9764ef
SM
1796 err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1797 &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
d7a0ecab
SM
1798 if (err)
1799 goto err_close_rx_cq;
1800
f62b8bb8
AV
1801 napi_enable(&c->napi);
1802
6a9764ef 1803 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
f62b8bb8
AV
1804 if (err)
1805 goto err_disable_napi;
1806
6a9764ef 1807 err = mlx5e_open_sqs(c, params, cparam);
d3c9bc27
TT
1808 if (err)
1809 goto err_close_icosq;
1810
6a9764ef 1811 err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
d7a0ecab
SM
1812 if (err)
1813 goto err_close_sqs;
b5503b99 1814
6a9764ef 1815 err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
f62b8bb8 1816 if (err)
b5503b99 1817 goto err_close_xdp_sq;
f62b8bb8 1818
f62b8bb8
AV
1819 *cp = c;
1820
1821 return 0;
b5503b99 1822err_close_xdp_sq:
d7a0ecab 1823 if (c->xdp)
31391048 1824 mlx5e_close_xdpsq(&c->rq.xdpsq);
f62b8bb8
AV
1825
1826err_close_sqs:
1827 mlx5e_close_sqs(c);
1828
d3c9bc27 1829err_close_icosq:
31391048 1830 mlx5e_close_icosq(&c->icosq);
d3c9bc27 1831
f62b8bb8
AV
1832err_disable_napi:
1833 napi_disable(&c->napi);
d7a0ecab 1834 if (c->xdp)
31871f87 1835 mlx5e_close_cq(&c->rq.xdpsq.cq);
d7a0ecab
SM
1836
1837err_close_rx_cq:
f62b8bb8
AV
1838 mlx5e_close_cq(&c->rq.cq);
1839
1840err_close_tx_cqs:
1841 mlx5e_close_tx_cqs(c);
1842
d3c9bc27
TT
1843err_close_icosq_cq:
1844 mlx5e_close_cq(&c->icosq.cq);
1845
f62b8bb8
AV
1846err_napi_del:
1847 netif_napi_del(&c->napi);
1848 kfree(c);
1849
1850 return err;
1851}
1852
acc6c595
SM
1853static void mlx5e_activate_channel(struct mlx5e_channel *c)
1854{
1855 int tc;
1856
1857 for (tc = 0; tc < c->num_tc; tc++)
1858 mlx5e_activate_txqsq(&c->sq[tc]);
1859 mlx5e_activate_rq(&c->rq);
a43b25da 1860 netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
acc6c595
SM
1861}
1862
1863static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
1864{
1865 int tc;
1866
1867 mlx5e_deactivate_rq(&c->rq);
1868 for (tc = 0; tc < c->num_tc; tc++)
1869 mlx5e_deactivate_txqsq(&c->sq[tc]);
1870}
1871
f62b8bb8
AV
1872static void mlx5e_close_channel(struct mlx5e_channel *c)
1873{
1874 mlx5e_close_rq(&c->rq);
b5503b99 1875 if (c->xdp)
31391048 1876 mlx5e_close_xdpsq(&c->rq.xdpsq);
f62b8bb8 1877 mlx5e_close_sqs(c);
31391048 1878 mlx5e_close_icosq(&c->icosq);
f62b8bb8 1879 napi_disable(&c->napi);
b5503b99 1880 if (c->xdp)
31871f87 1881 mlx5e_close_cq(&c->rq.xdpsq.cq);
f62b8bb8
AV
1882 mlx5e_close_cq(&c->rq.cq);
1883 mlx5e_close_tx_cqs(c);
d3c9bc27 1884 mlx5e_close_cq(&c->icosq.cq);
f62b8bb8 1885 netif_napi_del(&c->napi);
7ae92ae5 1886
f62b8bb8
AV
1887 kfree(c);
1888}
1889
1890static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
6a9764ef 1891 struct mlx5e_params *params,
f62b8bb8
AV
1892 struct mlx5e_rq_param *param)
1893{
1894 void *rqc = param->rqc;
1895 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1896
6a9764ef 1897 switch (params->rq_wq_type) {
461017cb 1898 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
6a9764ef
SM
1899 MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9);
1900 MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6);
461017cb
TT
1901 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1902 break;
1903 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1904 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1905 }
1906
f62b8bb8
AV
1907 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1908 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
6a9764ef 1909 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size);
b50d292b 1910 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
593cf338 1911 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
6a9764ef 1912 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
102722fc 1913 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
f62b8bb8 1914
311c7c71 1915 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
f62b8bb8
AV
1916 param->wq.linear = 1;
1917}
1918
556dd1b9
TT
1919static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
1920{
1921 void *rqc = param->rqc;
1922 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1923
1924 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1925 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1926}
1927
d3c9bc27
TT
1928static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1929 struct mlx5e_sq_param *param)
f62b8bb8
AV
1930{
1931 void *sqc = param->sqc;
1932 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1933
f62b8bb8 1934 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
b50d292b 1935 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
f62b8bb8 1936
311c7c71 1937 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
d3c9bc27
TT
1938}
1939
1940static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
6a9764ef 1941 struct mlx5e_params *params,
d3c9bc27
TT
1942 struct mlx5e_sq_param *param)
1943{
1944 void *sqc = param->sqc;
1945 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1946
1947 mlx5e_build_sq_param_common(priv, param);
6a9764ef 1948 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2ac9cfe7 1949 MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
f62b8bb8
AV
1950}
1951
1952static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1953 struct mlx5e_cq_param *param)
1954{
1955 void *cqc = param->cqc;
1956
30aa60b3 1957 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
f62b8bb8
AV
1958}
1959
1960static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
6a9764ef 1961 struct mlx5e_params *params,
f62b8bb8
AV
1962 struct mlx5e_cq_param *param)
1963{
1964 void *cqc = param->cqc;
461017cb 1965 u8 log_cq_size;
f62b8bb8 1966
6a9764ef 1967 switch (params->rq_wq_type) {
461017cb 1968 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
6a9764ef 1969 log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides;
461017cb
TT
1970 break;
1971 default: /* MLX5_WQ_TYPE_LINKED_LIST */
6a9764ef 1972 log_cq_size = params->log_rq_size;
461017cb
TT
1973 }
1974
1975 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
6a9764ef 1976 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
7219ab34
TT
1977 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1978 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
1979 }
f62b8bb8
AV
1980
1981 mlx5e_build_common_cq_param(priv, param);
1213ad28 1982 param->cq_period_mode = params->rx_cq_period_mode;
f62b8bb8
AV
1983}
1984
1985static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
6a9764ef 1986 struct mlx5e_params *params,
f62b8bb8
AV
1987 struct mlx5e_cq_param *param)
1988{
1989 void *cqc = param->cqc;
1990
6a9764ef 1991 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
f62b8bb8
AV
1992
1993 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
1994
1995 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
f62b8bb8
AV
1996}
1997
d3c9bc27 1998static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
6a9764ef
SM
1999 u8 log_wq_size,
2000 struct mlx5e_cq_param *param)
d3c9bc27
TT
2001{
2002 void *cqc = param->cqc;
2003
2004 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2005
2006 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
2007
2008 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
d3c9bc27
TT
2009}
2010
2011static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
6a9764ef
SM
2012 u8 log_wq_size,
2013 struct mlx5e_sq_param *param)
d3c9bc27
TT
2014{
2015 void *sqc = param->sqc;
2016 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2017
2018 mlx5e_build_sq_param_common(priv, param);
2019
2020 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
bc77b240 2021 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
d3c9bc27
TT
2022}
2023
b5503b99 2024static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
6a9764ef 2025 struct mlx5e_params *params,
b5503b99
SM
2026 struct mlx5e_sq_param *param)
2027{
2028 void *sqc = param->sqc;
2029 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2030
2031 mlx5e_build_sq_param_common(priv, param);
6a9764ef 2032 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
b5503b99
SM
2033}
2034
6a9764ef
SM
2035static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2036 struct mlx5e_params *params,
2037 struct mlx5e_channel_param *cparam)
f62b8bb8 2038{
bc77b240 2039 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
d3c9bc27 2040
6a9764ef
SM
2041 mlx5e_build_rq_param(priv, params, &cparam->rq);
2042 mlx5e_build_sq_param(priv, params, &cparam->sq);
2043 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2044 mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2045 mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
2046 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2047 mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
f62b8bb8
AV
2048}
2049
55c2503d
SM
2050int mlx5e_open_channels(struct mlx5e_priv *priv,
2051 struct mlx5e_channels *chs)
f62b8bb8 2052{
6b87663f 2053 struct mlx5e_channel_param *cparam;
03289b88 2054 int err = -ENOMEM;
f62b8bb8 2055 int i;
f62b8bb8 2056
6a9764ef 2057 chs->num = chs->params.num_channels;
03289b88 2058
ff9c852f 2059 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
6b87663f 2060 cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
acc6c595
SM
2061 if (!chs->c || !cparam)
2062 goto err_free;
f62b8bb8 2063
6a9764ef 2064 mlx5e_build_channel_param(priv, &chs->params, cparam);
ff9c852f 2065 for (i = 0; i < chs->num; i++) {
6a9764ef 2066 err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
f62b8bb8
AV
2067 if (err)
2068 goto err_close_channels;
2069 }
2070
6b87663f 2071 kfree(cparam);
f62b8bb8
AV
2072 return 0;
2073
2074err_close_channels:
2075 for (i--; i >= 0; i--)
ff9c852f 2076 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2077
acc6c595 2078err_free:
ff9c852f 2079 kfree(chs->c);
6b87663f 2080 kfree(cparam);
ff9c852f 2081 chs->num = 0;
f62b8bb8
AV
2082 return err;
2083}
2084
acc6c595 2085static void mlx5e_activate_channels(struct mlx5e_channels *chs)
f62b8bb8
AV
2086{
2087 int i;
2088
acc6c595
SM
2089 for (i = 0; i < chs->num; i++)
2090 mlx5e_activate_channel(chs->c[i]);
2091}
2092
2093static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2094{
2095 int err = 0;
2096 int i;
2097
2098 for (i = 0; i < chs->num; i++) {
2099 err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
2100 if (err)
2101 break;
2102 }
2103
2104 return err;
2105}
2106
2107static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2108{
2109 int i;
2110
2111 for (i = 0; i < chs->num; i++)
2112 mlx5e_deactivate_channel(chs->c[i]);
2113}
2114
55c2503d 2115void mlx5e_close_channels(struct mlx5e_channels *chs)
acc6c595
SM
2116{
2117 int i;
c3b7c5c9 2118
ff9c852f
SM
2119 for (i = 0; i < chs->num; i++)
2120 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2121
ff9c852f
SM
2122 kfree(chs->c);
2123 chs->num = 0;
f62b8bb8
AV
2124}
2125
a5f97fee
SM
2126static int
2127mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
f62b8bb8
AV
2128{
2129 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
2130 void *rqtc;
2131 int inlen;
2132 int err;
1da36696 2133 u32 *in;
a5f97fee 2134 int i;
f62b8bb8 2135
f62b8bb8 2136 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2137 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
2138 if (!in)
2139 return -ENOMEM;
2140
2141 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2142
2143 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2144 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2145
a5f97fee
SM
2146 for (i = 0; i < sz; i++)
2147 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2be6967c 2148
398f3351
HHZ
2149 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2150 if (!err)
2151 rqt->enabled = true;
f62b8bb8
AV
2152
2153 kvfree(in);
1da36696
TT
2154 return err;
2155}
2156
cb67b832 2157void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
1da36696 2158{
398f3351
HHZ
2159 rqt->enabled = false;
2160 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
1da36696
TT
2161}
2162
8f493ffd 2163int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
6bfd390b
HHZ
2164{
2165 struct mlx5e_rqt *rqt = &priv->indir_rqt;
8f493ffd 2166 int err;
6bfd390b 2167
8f493ffd
SM
2168 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2169 if (err)
2170 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2171 return err;
6bfd390b
HHZ
2172}
2173
cb67b832 2174int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
1da36696 2175{
398f3351 2176 struct mlx5e_rqt *rqt;
1da36696
TT
2177 int err;
2178 int ix;
2179
6bfd390b 2180 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
398f3351 2181 rqt = &priv->direct_tir[ix].rqt;
a5f97fee 2182 err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
1da36696
TT
2183 if (err)
2184 goto err_destroy_rqts;
2185 }
2186
2187 return 0;
2188
2189err_destroy_rqts:
8f493ffd 2190 mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
1da36696 2191 for (ix--; ix >= 0; ix--)
398f3351 2192 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
1da36696 2193
f62b8bb8
AV
2194 return err;
2195}
2196
8f493ffd
SM
2197void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
2198{
2199 int i;
2200
2201 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
2202 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
2203}
2204
a5f97fee
SM
2205static int mlx5e_rx_hash_fn(int hfunc)
2206{
2207 return (hfunc == ETH_RSS_HASH_TOP) ?
2208 MLX5_RX_HASH_FN_TOEPLITZ :
2209 MLX5_RX_HASH_FN_INVERTED_XOR8;
2210}
2211
2212static int mlx5e_bits_invert(unsigned long a, int size)
2213{
2214 int inv = 0;
2215 int i;
2216
2217 for (i = 0; i < size; i++)
2218 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2219
2220 return inv;
2221}
2222
2223static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2224 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2225{
2226 int i;
2227
2228 for (i = 0; i < sz; i++) {
2229 u32 rqn;
2230
2231 if (rrp.is_rss) {
2232 int ix = i;
2233
2234 if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2235 ix = mlx5e_bits_invert(i, ilog2(sz));
2236
6a9764ef 2237 ix = priv->channels.params.indirection_rqt[ix];
a5f97fee
SM
2238 rqn = rrp.rss.channels->c[ix]->rq.rqn;
2239 } else {
2240 rqn = rrp.rqn;
2241 }
2242 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2243 }
2244}
2245
2246int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2247 struct mlx5e_redirect_rqt_param rrp)
5c50368f
AS
2248{
2249 struct mlx5_core_dev *mdev = priv->mdev;
5c50368f
AS
2250 void *rqtc;
2251 int inlen;
1da36696 2252 u32 *in;
5c50368f
AS
2253 int err;
2254
5c50368f 2255 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2256 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2257 if (!in)
2258 return -ENOMEM;
2259
2260 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2261
2262 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
5c50368f 2263 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
a5f97fee 2264 mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
1da36696 2265 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
5c50368f
AS
2266
2267 kvfree(in);
5c50368f
AS
2268 return err;
2269}
2270
a5f97fee
SM
2271static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2272 struct mlx5e_redirect_rqt_param rrp)
2273{
2274 if (!rrp.is_rss)
2275 return rrp.rqn;
2276
2277 if (ix >= rrp.rss.channels->num)
2278 return priv->drop_rq.rqn;
2279
2280 return rrp.rss.channels->c[ix]->rq.rqn;
2281}
2282
2283static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2284 struct mlx5e_redirect_rqt_param rrp)
40ab6a6e 2285{
1da36696
TT
2286 u32 rqtn;
2287 int ix;
2288
398f3351 2289 if (priv->indir_rqt.enabled) {
a5f97fee 2290 /* RSS RQ table */
398f3351 2291 rqtn = priv->indir_rqt.rqtn;
a5f97fee 2292 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
398f3351
HHZ
2293 }
2294
a5f97fee
SM
2295 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2296 struct mlx5e_redirect_rqt_param direct_rrp = {
2297 .is_rss = false,
95632791
AM
2298 {
2299 .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
2300 },
a5f97fee
SM
2301 };
2302
2303 /* Direct RQ Tables */
398f3351
HHZ
2304 if (!priv->direct_tir[ix].rqt.enabled)
2305 continue;
a5f97fee 2306
398f3351 2307 rqtn = priv->direct_tir[ix].rqt.rqtn;
a5f97fee 2308 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
1da36696 2309 }
40ab6a6e
AS
2310}
2311
a5f97fee
SM
2312static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2313 struct mlx5e_channels *chs)
2314{
2315 struct mlx5e_redirect_rqt_param rrp = {
2316 .is_rss = true,
95632791
AM
2317 {
2318 .rss = {
2319 .channels = chs,
2320 .hfunc = chs->params.rss_hfunc,
2321 }
2322 },
a5f97fee
SM
2323 };
2324
2325 mlx5e_redirect_rqts(priv, rrp);
2326}
2327
2328static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2329{
2330 struct mlx5e_redirect_rqt_param drop_rrp = {
2331 .is_rss = false,
95632791
AM
2332 {
2333 .rqn = priv->drop_rq.rqn,
2334 },
a5f97fee
SM
2335 };
2336
2337 mlx5e_redirect_rqts(priv, drop_rrp);
2338}
2339
6a9764ef 2340static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
5c50368f 2341{
6a9764ef 2342 if (!params->lro_en)
5c50368f
AS
2343 return;
2344
2345#define ROUGH_MAX_L2_L3_HDR_SZ 256
2346
2347 MLX5_SET(tirc, tirc, lro_enable_mask,
2348 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2349 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2350 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
6a9764ef
SM
2351 (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2352 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
5c50368f
AS
2353}
2354
6a9764ef
SM
2355void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
2356 enum mlx5e_traffic_types tt,
7b3722fa 2357 void *tirc, bool inner)
bdfc028d 2358{
7b3722fa
GP
2359 void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2360 MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
a100ff3e
GP
2361
2362#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2363 MLX5_HASH_FIELD_SEL_DST_IP)
2364
2365#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2366 MLX5_HASH_FIELD_SEL_DST_IP |\
2367 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2368 MLX5_HASH_FIELD_SEL_L4_DPORT)
2369
2370#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2371 MLX5_HASH_FIELD_SEL_DST_IP |\
2372 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2373
6a9764ef
SM
2374 MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
2375 if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
bdfc028d
TT
2376 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2377 rx_hash_toeplitz_key);
2378 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2379 rx_hash_toeplitz_key);
2380
2381 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
6a9764ef 2382 memcpy(rss_key, params->toeplitz_hash_key, len);
bdfc028d 2383 }
a100ff3e
GP
2384
2385 switch (tt) {
2386 case MLX5E_TT_IPV4_TCP:
2387 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2388 MLX5_L3_PROT_TYPE_IPV4);
2389 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2390 MLX5_L4_PROT_TYPE_TCP);
2391 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2392 MLX5_HASH_IP_L4PORTS);
2393 break;
2394
2395 case MLX5E_TT_IPV6_TCP:
2396 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2397 MLX5_L3_PROT_TYPE_IPV6);
2398 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2399 MLX5_L4_PROT_TYPE_TCP);
2400 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2401 MLX5_HASH_IP_L4PORTS);
2402 break;
2403
2404 case MLX5E_TT_IPV4_UDP:
2405 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2406 MLX5_L3_PROT_TYPE_IPV4);
2407 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2408 MLX5_L4_PROT_TYPE_UDP);
2409 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2410 MLX5_HASH_IP_L4PORTS);
2411 break;
2412
2413 case MLX5E_TT_IPV6_UDP:
2414 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2415 MLX5_L3_PROT_TYPE_IPV6);
2416 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2417 MLX5_L4_PROT_TYPE_UDP);
2418 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2419 MLX5_HASH_IP_L4PORTS);
2420 break;
2421
2422 case MLX5E_TT_IPV4_IPSEC_AH:
2423 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2424 MLX5_L3_PROT_TYPE_IPV4);
2425 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2426 MLX5_HASH_IP_IPSEC_SPI);
2427 break;
2428
2429 case MLX5E_TT_IPV6_IPSEC_AH:
2430 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2431 MLX5_L3_PROT_TYPE_IPV6);
2432 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2433 MLX5_HASH_IP_IPSEC_SPI);
2434 break;
2435
2436 case MLX5E_TT_IPV4_IPSEC_ESP:
2437 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2438 MLX5_L3_PROT_TYPE_IPV4);
2439 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2440 MLX5_HASH_IP_IPSEC_SPI);
2441 break;
2442
2443 case MLX5E_TT_IPV6_IPSEC_ESP:
2444 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2445 MLX5_L3_PROT_TYPE_IPV6);
2446 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2447 MLX5_HASH_IP_IPSEC_SPI);
2448 break;
2449
2450 case MLX5E_TT_IPV4:
2451 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2452 MLX5_L3_PROT_TYPE_IPV4);
2453 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2454 MLX5_HASH_IP);
2455 break;
2456
2457 case MLX5E_TT_IPV6:
2458 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2459 MLX5_L3_PROT_TYPE_IPV6);
2460 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2461 MLX5_HASH_IP);
2462 break;
2463 default:
2464 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2465 }
bdfc028d
TT
2466}
2467
ab0394fe 2468static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
5c50368f
AS
2469{
2470 struct mlx5_core_dev *mdev = priv->mdev;
2471
2472 void *in;
2473 void *tirc;
2474 int inlen;
2475 int err;
ab0394fe 2476 int tt;
1da36696 2477 int ix;
5c50368f
AS
2478
2479 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1b9a07ee 2480 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2481 if (!in)
2482 return -ENOMEM;
2483
2484 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2485 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2486
6a9764ef 2487 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
5c50368f 2488
1da36696 2489 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
724b2aa1 2490 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
1da36696 2491 inlen);
ab0394fe 2492 if (err)
1da36696 2493 goto free_in;
ab0394fe 2494 }
5c50368f 2495
6bfd390b 2496 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
1da36696
TT
2497 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2498 in, inlen);
2499 if (err)
2500 goto free_in;
2501 }
2502
2503free_in:
5c50368f
AS
2504 kvfree(in);
2505
2506 return err;
2507}
2508
7b3722fa
GP
2509static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
2510 enum mlx5e_traffic_types tt,
2511 u32 *tirc)
2512{
2513 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2514
2515 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2516
2517 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2518 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2519 MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
2520
2521 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
2522}
2523
cd255eff 2524static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
40ab6a6e 2525{
40ab6a6e 2526 struct mlx5_core_dev *mdev = priv->mdev;
c139dbfd 2527 u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu);
40ab6a6e
AS
2528 int err;
2529
cd255eff 2530 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
40ab6a6e
AS
2531 if (err)
2532 return err;
2533
cd255eff
SM
2534 /* Update vport context MTU */
2535 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2536 return 0;
2537}
40ab6a6e 2538
cd255eff
SM
2539static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
2540{
2541 struct mlx5_core_dev *mdev = priv->mdev;
2542 u16 hw_mtu = 0;
2543 int err;
40ab6a6e 2544
cd255eff
SM
2545 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2546 if (err || !hw_mtu) /* fallback to port oper mtu */
2547 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2548
c139dbfd 2549 *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu);
cd255eff
SM
2550}
2551
2e20a151 2552static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
cd255eff 2553{
2e20a151 2554 struct net_device *netdev = priv->netdev;
cd255eff
SM
2555 u16 mtu;
2556 int err;
2557
2558 err = mlx5e_set_mtu(priv, netdev->mtu);
2559 if (err)
2560 return err;
40ab6a6e 2561
cd255eff
SM
2562 mlx5e_query_mtu(priv, &mtu);
2563 if (mtu != netdev->mtu)
2564 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2565 __func__, mtu, netdev->mtu);
40ab6a6e 2566
cd255eff 2567 netdev->mtu = mtu;
40ab6a6e
AS
2568 return 0;
2569}
2570
08fb1dac
SM
2571static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2572{
2573 struct mlx5e_priv *priv = netdev_priv(netdev);
6a9764ef
SM
2574 int nch = priv->channels.params.num_channels;
2575 int ntc = priv->channels.params.num_tc;
08fb1dac
SM
2576 int tc;
2577
2578 netdev_reset_tc(netdev);
2579
2580 if (ntc == 1)
2581 return;
2582
2583 netdev_set_num_tc(netdev, ntc);
2584
7ccdd084
RS
2585 /* Map netdev TCs to offset 0
2586 * We have our own UP to TXQ mapping for QoS
2587 */
08fb1dac 2588 for (tc = 0; tc < ntc; tc++)
7ccdd084 2589 netdev_set_tc_queue(netdev, tc, nch, 0);
08fb1dac
SM
2590}
2591
acc6c595
SM
2592static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
2593{
2594 struct mlx5e_channel *c;
2595 struct mlx5e_txqsq *sq;
2596 int i, tc;
2597
2598 for (i = 0; i < priv->channels.num; i++)
2599 for (tc = 0; tc < priv->profile->max_tc; tc++)
2600 priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
2601
2602 for (i = 0; i < priv->channels.num; i++) {
2603 c = priv->channels.c[i];
2604 for (tc = 0; tc < c->num_tc; tc++) {
2605 sq = &c->sq[tc];
2606 priv->txq2sq[sq->txq_ix] = sq;
2607 }
2608 }
2609}
2610
603f4a45 2611void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2612{
9008ae07
SM
2613 int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2614 struct net_device *netdev = priv->netdev;
2615
2616 mlx5e_netdev_set_tcs(netdev);
053ee0a7
TR
2617 netif_set_real_num_tx_queues(netdev, num_txqs);
2618 netif_set_real_num_rx_queues(netdev, priv->channels.num);
9008ae07 2619
acc6c595
SM
2620 mlx5e_build_channels_tx_maps(priv);
2621 mlx5e_activate_channels(&priv->channels);
2622 netif_tx_start_all_queues(priv->netdev);
9008ae07 2623
a9f7705f 2624 if (MLX5_VPORT_MANAGER(priv->mdev))
9008ae07
SM
2625 mlx5e_add_sqs_fwd_rules(priv);
2626
acc6c595 2627 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
9008ae07 2628 mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
acc6c595
SM
2629}
2630
603f4a45 2631void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2632{
9008ae07
SM
2633 mlx5e_redirect_rqts_to_drop(priv);
2634
a9f7705f 2635 if (MLX5_VPORT_MANAGER(priv->mdev))
9008ae07
SM
2636 mlx5e_remove_sqs_fwd_rules(priv);
2637
acc6c595
SM
2638 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2639 * polling for inactive tx queues.
2640 */
2641 netif_tx_stop_all_queues(priv->netdev);
2642 netif_tx_disable(priv->netdev);
2643 mlx5e_deactivate_channels(&priv->channels);
2644}
2645
55c2503d 2646void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2e20a151
SM
2647 struct mlx5e_channels *new_chs,
2648 mlx5e_fp_hw_modify hw_modify)
55c2503d
SM
2649{
2650 struct net_device *netdev = priv->netdev;
2651 int new_num_txqs;
7ca42c80 2652 int carrier_ok;
55c2503d
SM
2653 new_num_txqs = new_chs->num * new_chs->params.num_tc;
2654
7ca42c80 2655 carrier_ok = netif_carrier_ok(netdev);
55c2503d
SM
2656 netif_carrier_off(netdev);
2657
2658 if (new_num_txqs < netdev->real_num_tx_queues)
2659 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2660
2661 mlx5e_deactivate_priv_channels(priv);
2662 mlx5e_close_channels(&priv->channels);
2663
2664 priv->channels = *new_chs;
2665
2e20a151
SM
2666 /* New channels are ready to roll, modify HW settings if needed */
2667 if (hw_modify)
2668 hw_modify(priv);
2669
55c2503d
SM
2670 mlx5e_refresh_tirs(priv, false);
2671 mlx5e_activate_priv_channels(priv);
2672
7ca42c80
ES
2673 /* return carrier back if needed */
2674 if (carrier_ok)
2675 netif_carrier_on(netdev);
55c2503d
SM
2676}
2677
40ab6a6e
AS
2678int mlx5e_open_locked(struct net_device *netdev)
2679{
2680 struct mlx5e_priv *priv = netdev_priv(netdev);
40ab6a6e
AS
2681 int err;
2682
2683 set_bit(MLX5E_STATE_OPENED, &priv->state);
2684
ff9c852f 2685 err = mlx5e_open_channels(priv, &priv->channels);
acc6c595 2686 if (err)
343b29f3 2687 goto err_clear_state_opened_flag;
40ab6a6e 2688
b676f653 2689 mlx5e_refresh_tirs(priv, false);
acc6c595 2690 mlx5e_activate_priv_channels(priv);
7ca42c80
ES
2691 if (priv->profile->update_carrier)
2692 priv->profile->update_carrier(priv);
ef9814de 2693 mlx5e_timestamp_init(priv);
be4891af 2694
cb67b832
HHZ
2695 if (priv->profile->update_stats)
2696 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
40ab6a6e 2697
9b37b07f 2698 return 0;
343b29f3
AS
2699
2700err_clear_state_opened_flag:
2701 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2702 return err;
40ab6a6e
AS
2703}
2704
cb67b832 2705int mlx5e_open(struct net_device *netdev)
40ab6a6e
AS
2706{
2707 struct mlx5e_priv *priv = netdev_priv(netdev);
2708 int err;
2709
2710 mutex_lock(&priv->state_lock);
2711 err = mlx5e_open_locked(netdev);
63bfd399
EBE
2712 if (!err)
2713 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
40ab6a6e
AS
2714 mutex_unlock(&priv->state_lock);
2715
2716 return err;
2717}
2718
2719int mlx5e_close_locked(struct net_device *netdev)
2720{
2721 struct mlx5e_priv *priv = netdev_priv(netdev);
2722
a1985740
AS
2723 /* May already be CLOSED in case a previous configuration operation
2724 * (e.g RX/TX queue size change) that involves close&open failed.
2725 */
2726 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2727 return 0;
2728
40ab6a6e
AS
2729 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2730
ef9814de 2731 mlx5e_timestamp_cleanup(priv);
40ab6a6e 2732 netif_carrier_off(priv->netdev);
acc6c595
SM
2733 mlx5e_deactivate_priv_channels(priv);
2734 mlx5e_close_channels(&priv->channels);
40ab6a6e
AS
2735
2736 return 0;
2737}
2738
cb67b832 2739int mlx5e_close(struct net_device *netdev)
40ab6a6e
AS
2740{
2741 struct mlx5e_priv *priv = netdev_priv(netdev);
2742 int err;
2743
26e59d80
MHY
2744 if (!netif_device_present(netdev))
2745 return -ENODEV;
2746
40ab6a6e 2747 mutex_lock(&priv->state_lock);
63bfd399 2748 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
40ab6a6e
AS
2749 err = mlx5e_close_locked(netdev);
2750 mutex_unlock(&priv->state_lock);
2751
2752 return err;
2753}
2754
a43b25da 2755static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3b77235b
SM
2756 struct mlx5e_rq *rq,
2757 struct mlx5e_rq_param *param)
40ab6a6e 2758{
40ab6a6e
AS
2759 void *rqc = param->rqc;
2760 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2761 int err;
2762
2763 param->wq.db_numa_node = param->wq.buf_numa_node;
2764
2765 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
2766 &rq->wq_ctrl);
2767 if (err)
2768 return err;
2769
a43b25da 2770 rq->mdev = mdev;
40ab6a6e
AS
2771
2772 return 0;
2773}
2774
a43b25da 2775static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
3b77235b
SM
2776 struct mlx5e_cq *cq,
2777 struct mlx5e_cq_param *param)
40ab6a6e 2778{
95b6c6a5 2779 return mlx5e_alloc_cq_common(mdev, param, cq);
40ab6a6e
AS
2780}
2781
a43b25da
SM
2782static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
2783 struct mlx5e_rq *drop_rq)
40ab6a6e 2784{
a43b25da
SM
2785 struct mlx5e_cq_param cq_param = {};
2786 struct mlx5e_rq_param rq_param = {};
2787 struct mlx5e_cq *cq = &drop_rq->cq;
40ab6a6e
AS
2788 int err;
2789
556dd1b9 2790 mlx5e_build_drop_rq_param(&rq_param);
40ab6a6e 2791
a43b25da 2792 err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
40ab6a6e
AS
2793 if (err)
2794 return err;
2795
3b77235b 2796 err = mlx5e_create_cq(cq, &cq_param);
40ab6a6e 2797 if (err)
3b77235b 2798 goto err_free_cq;
40ab6a6e 2799
a43b25da 2800 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
40ab6a6e 2801 if (err)
3b77235b 2802 goto err_destroy_cq;
40ab6a6e 2803
a43b25da 2804 err = mlx5e_create_rq(drop_rq, &rq_param);
40ab6a6e 2805 if (err)
3b77235b 2806 goto err_free_rq;
40ab6a6e
AS
2807
2808 return 0;
2809
3b77235b 2810err_free_rq:
a43b25da 2811 mlx5e_free_rq(drop_rq);
40ab6a6e
AS
2812
2813err_destroy_cq:
a43b25da 2814 mlx5e_destroy_cq(cq);
40ab6a6e 2815
3b77235b 2816err_free_cq:
a43b25da 2817 mlx5e_free_cq(cq);
3b77235b 2818
40ab6a6e
AS
2819 return err;
2820}
2821
a43b25da 2822static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
40ab6a6e 2823{
a43b25da
SM
2824 mlx5e_destroy_rq(drop_rq);
2825 mlx5e_free_rq(drop_rq);
2826 mlx5e_destroy_cq(&drop_rq->cq);
2827 mlx5e_free_cq(&drop_rq->cq);
40ab6a6e
AS
2828}
2829
5426a0b2
SM
2830int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
2831 u32 underlay_qpn, u32 *tisn)
40ab6a6e 2832{
c4f287c4 2833 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
40ab6a6e
AS
2834 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2835
08fb1dac 2836 MLX5_SET(tisc, tisc, prio, tc << 1);
5426a0b2 2837 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
b50d292b 2838 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
db60b802
AH
2839
2840 if (mlx5_lag_is_lacp_owner(mdev))
2841 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
2842
5426a0b2 2843 return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
40ab6a6e
AS
2844}
2845
5426a0b2 2846void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
40ab6a6e 2847{
5426a0b2 2848 mlx5_core_destroy_tis(mdev, tisn);
40ab6a6e
AS
2849}
2850
cb67b832 2851int mlx5e_create_tises(struct mlx5e_priv *priv)
40ab6a6e
AS
2852{
2853 int err;
2854 int tc;
2855
6bfd390b 2856 for (tc = 0; tc < priv->profile->max_tc; tc++) {
5426a0b2 2857 err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
40ab6a6e
AS
2858 if (err)
2859 goto err_close_tises;
2860 }
2861
2862 return 0;
2863
2864err_close_tises:
2865 for (tc--; tc >= 0; tc--)
5426a0b2 2866 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
40ab6a6e
AS
2867
2868 return err;
2869}
2870
cb67b832 2871void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
40ab6a6e
AS
2872{
2873 int tc;
2874
6bfd390b 2875 for (tc = 0; tc < priv->profile->max_tc; tc++)
5426a0b2 2876 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
40ab6a6e
AS
2877}
2878
6a9764ef
SM
2879static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
2880 enum mlx5e_traffic_types tt,
2881 u32 *tirc)
f62b8bb8 2882{
b50d292b 2883 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3191e05f 2884
6a9764ef 2885 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
f62b8bb8 2886
4cbeaff5 2887 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
398f3351 2888 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
7b3722fa 2889 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
f62b8bb8
AV
2890}
2891
6a9764ef 2892static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
f62b8bb8 2893{
b50d292b 2894 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
1da36696 2895
6a9764ef 2896 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
1da36696
TT
2897
2898 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2899 MLX5_SET(tirc, tirc, indirect_table, rqtn);
2900 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2901}
2902
8f493ffd 2903int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
1da36696 2904{
724b2aa1 2905 struct mlx5e_tir *tir;
f62b8bb8
AV
2906 void *tirc;
2907 int inlen;
7b3722fa 2908 int i = 0;
f62b8bb8 2909 int err;
1da36696 2910 u32 *in;
1da36696 2911 int tt;
f62b8bb8
AV
2912
2913 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 2914 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
2915 if (!in)
2916 return -ENOMEM;
2917
1da36696
TT
2918 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2919 memset(in, 0, inlen);
724b2aa1 2920 tir = &priv->indir_tir[tt];
1da36696 2921 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6a9764ef 2922 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
724b2aa1 2923 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
7b3722fa
GP
2924 if (err) {
2925 mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
2926 goto err_destroy_inner_tirs;
2927 }
f62b8bb8
AV
2928 }
2929
7b3722fa
GP
2930 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
2931 goto out;
2932
2933 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
2934 memset(in, 0, inlen);
2935 tir = &priv->inner_indir_tir[i];
2936 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2937 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
2938 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2939 if (err) {
2940 mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
2941 goto err_destroy_inner_tirs;
2942 }
2943 }
2944
2945out:
6bfd390b
HHZ
2946 kvfree(in);
2947
2948 return 0;
2949
7b3722fa
GP
2950err_destroy_inner_tirs:
2951 for (i--; i >= 0; i--)
2952 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
2953
6bfd390b
HHZ
2954 for (tt--; tt >= 0; tt--)
2955 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
2956
2957 kvfree(in);
2958
2959 return err;
2960}
2961
cb67b832 2962int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
2963{
2964 int nch = priv->profile->max_nch(priv->mdev);
2965 struct mlx5e_tir *tir;
2966 void *tirc;
2967 int inlen;
2968 int err;
2969 u32 *in;
2970 int ix;
2971
2972 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 2973 in = kvzalloc(inlen, GFP_KERNEL);
6bfd390b
HHZ
2974 if (!in)
2975 return -ENOMEM;
2976
1da36696
TT
2977 for (ix = 0; ix < nch; ix++) {
2978 memset(in, 0, inlen);
724b2aa1 2979 tir = &priv->direct_tir[ix];
1da36696 2980 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6a9764ef 2981 mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
724b2aa1 2982 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
1da36696
TT
2983 if (err)
2984 goto err_destroy_ch_tirs;
2985 }
2986
2987 kvfree(in);
2988
f62b8bb8
AV
2989 return 0;
2990
1da36696 2991err_destroy_ch_tirs:
8f493ffd 2992 mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
1da36696 2993 for (ix--; ix >= 0; ix--)
724b2aa1 2994 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
1da36696 2995
1da36696 2996 kvfree(in);
f62b8bb8
AV
2997
2998 return err;
2999}
3000
8f493ffd 3001void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
3002{
3003 int i;
3004
1da36696 3005 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
724b2aa1 3006 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
7b3722fa
GP
3007
3008 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
3009 return;
3010
3011 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3012 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
f62b8bb8
AV
3013}
3014
cb67b832 3015void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
3016{
3017 int nch = priv->profile->max_nch(priv->mdev);
3018 int i;
3019
3020 for (i = 0; i < nch; i++)
3021 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
3022}
3023
102722fc
GE
3024static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3025{
3026 int err = 0;
3027 int i;
3028
3029 for (i = 0; i < chs->num; i++) {
3030 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3031 if (err)
3032 return err;
3033 }
3034
3035 return 0;
3036}
3037
f6d96a20 3038static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
36350114
GP
3039{
3040 int err = 0;
3041 int i;
3042
ff9c852f
SM
3043 for (i = 0; i < chs->num; i++) {
3044 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
36350114
GP
3045 if (err)
3046 return err;
3047 }
3048
3049 return 0;
3050}
3051
0cf0f6d3
JP
3052static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
3053 struct tc_mqprio_qopt *mqprio)
08fb1dac
SM
3054{
3055 struct mlx5e_priv *priv = netdev_priv(netdev);
6f9485af 3056 struct mlx5e_channels new_channels = {};
0cf0f6d3 3057 u8 tc = mqprio->num_tc;
08fb1dac
SM
3058 int err = 0;
3059
0cf0f6d3
JP
3060 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3061
08fb1dac
SM
3062 if (tc && tc != MLX5E_MAX_NUM_TC)
3063 return -EINVAL;
3064
3065 mutex_lock(&priv->state_lock);
3066
6f9485af
SM
3067 new_channels.params = priv->channels.params;
3068 new_channels.params.num_tc = tc ? tc : 1;
08fb1dac 3069
20b6a1c7 3070 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6f9485af
SM
3071 priv->channels.params = new_channels.params;
3072 goto out;
3073 }
08fb1dac 3074
6f9485af
SM
3075 err = mlx5e_open_channels(priv, &new_channels);
3076 if (err)
3077 goto out;
08fb1dac 3078
2e20a151 3079 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
6f9485af 3080out:
08fb1dac 3081 mutex_unlock(&priv->state_lock);
08fb1dac
SM
3082 return err;
3083}
3084
e80541ec 3085#ifdef CONFIG_MLX5_ESWITCH
0cf0f6d3 3086static int mlx5e_setup_tc_cls_flower(struct net_device *dev,
0cf0f6d3 3087 struct tc_cls_flower_offload *cls_flower)
08fb1dac 3088{
e8f887ac
AV
3089 struct mlx5e_priv *priv = netdev_priv(dev);
3090
7f3b39da 3091 if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
5fd9fc4e 3092 cls_flower->common.chain_index)
0cf0f6d3 3093 return -EOPNOTSUPP;
e8f887ac 3094
0cf0f6d3
JP
3095 switch (cls_flower->command) {
3096 case TC_CLSFLOWER_REPLACE:
5fd9fc4e 3097 return mlx5e_configure_flower(priv, cls_flower);
0cf0f6d3
JP
3098 case TC_CLSFLOWER_DESTROY:
3099 return mlx5e_delete_flower(priv, cls_flower);
3100 case TC_CLSFLOWER_STATS:
3101 return mlx5e_stats_flower(priv, cls_flower);
3102 default:
a5fcf8a6 3103 return -EOPNOTSUPP;
0cf0f6d3
JP
3104 }
3105}
e80541ec 3106#endif
a5fcf8a6 3107
0cf0f6d3 3108static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 3109 void *type_data)
0cf0f6d3 3110{
2572ac53 3111 switch (type) {
fde6af47 3112#ifdef CONFIG_MLX5_ESWITCH
e3a2b7ed 3113 case TC_SETUP_CLSFLOWER:
de4784ca 3114 return mlx5e_setup_tc_cls_flower(dev, type_data);
fde6af47 3115#endif
0cf0f6d3 3116 case TC_SETUP_MQPRIO:
de4784ca 3117 return mlx5e_setup_tc_mqprio(dev, type_data);
e8f887ac
AV
3118 default:
3119 return -EOPNOTSUPP;
3120 }
08fb1dac
SM
3121}
3122
bc1f4470 3123static void
f62b8bb8
AV
3124mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3125{
3126 struct mlx5e_priv *priv = netdev_priv(dev);
9218b44d 3127 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
f62b8bb8 3128 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
269e6b3a 3129 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
f62b8bb8 3130
370bad0f
OG
3131 if (mlx5e_is_uplink_rep(priv)) {
3132 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3133 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3134 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3135 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3136 } else {
3137 stats->rx_packets = sstats->rx_packets;
3138 stats->rx_bytes = sstats->rx_bytes;
3139 stats->tx_packets = sstats->tx_packets;
3140 stats->tx_bytes = sstats->tx_bytes;
3141 stats->tx_dropped = sstats->tx_queue_dropped;
3142 }
269e6b3a
GP
3143
3144 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
269e6b3a
GP
3145
3146 stats->rx_length_errors =
9218b44d
GP
3147 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3148 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3149 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
269e6b3a 3150 stats->rx_crc_errors =
9218b44d
GP
3151 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3152 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3153 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
269e6b3a
GP
3154 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3155 stats->rx_frame_errors;
3156 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3157
3158 /* vport multicast also counts packets that are dropped due to steering
3159 * or rx out of buffer
3160 */
9218b44d
GP
3161 stats->multicast =
3162 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
f62b8bb8
AV
3163}
3164
3165static void mlx5e_set_rx_mode(struct net_device *dev)
3166{
3167 struct mlx5e_priv *priv = netdev_priv(dev);
3168
7bb29755 3169 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3170}
3171
3172static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3173{
3174 struct mlx5e_priv *priv = netdev_priv(netdev);
3175 struct sockaddr *saddr = addr;
3176
3177 if (!is_valid_ether_addr(saddr->sa_data))
3178 return -EADDRNOTAVAIL;
3179
3180 netif_addr_lock_bh(netdev);
3181 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3182 netif_addr_unlock_bh(netdev);
3183
7bb29755 3184 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3185
3186 return 0;
3187}
3188
0e405443
GP
3189#define MLX5E_SET_FEATURE(netdev, feature, enable) \
3190 do { \
3191 if (enable) \
3192 netdev->features |= feature; \
3193 else \
3194 netdev->features &= ~feature; \
3195 } while (0)
3196
3197typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3198
3199static int set_feature_lro(struct net_device *netdev, bool enable)
f62b8bb8
AV
3200{
3201 struct mlx5e_priv *priv = netdev_priv(netdev);
2e20a151
SM
3202 struct mlx5e_channels new_channels = {};
3203 int err = 0;
3204 bool reset;
f62b8bb8
AV
3205
3206 mutex_lock(&priv->state_lock);
f62b8bb8 3207
2e20a151
SM
3208 reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST);
3209 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3210
2e20a151
SM
3211 new_channels.params = priv->channels.params;
3212 new_channels.params.lro_en = enable;
3213
3214 if (!reset) {
3215 priv->channels.params = new_channels.params;
3216 err = mlx5e_modify_tirs_lro(priv);
3217 goto out;
98e81b0a 3218 }
f62b8bb8 3219
2e20a151
SM
3220 err = mlx5e_open_channels(priv, &new_channels);
3221 if (err)
3222 goto out;
0e405443 3223
2e20a151
SM
3224 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
3225out:
9b37b07f 3226 mutex_unlock(&priv->state_lock);
0e405443
GP
3227 return err;
3228}
3229
3230static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
3231{
3232 struct mlx5e_priv *priv = netdev_priv(netdev);
3233
3234 if (enable)
3235 mlx5e_enable_vlan_filter(priv);
3236 else
3237 mlx5e_disable_vlan_filter(priv);
3238
3239 return 0;
3240}
3241
3242static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3243{
3244 struct mlx5e_priv *priv = netdev_priv(netdev);
f62b8bb8 3245
0e405443 3246 if (!enable && mlx5e_tc_num_filters(priv)) {
e8f887ac
AV
3247 netdev_err(netdev,
3248 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3249 return -EINVAL;
3250 }
3251
0e405443
GP
3252 return 0;
3253}
3254
94cb1ebb
EBE
3255static int set_feature_rx_all(struct net_device *netdev, bool enable)
3256{
3257 struct mlx5e_priv *priv = netdev_priv(netdev);
3258 struct mlx5_core_dev *mdev = priv->mdev;
3259
3260 return mlx5_set_port_fcs(mdev, !enable);
3261}
3262
102722fc
GE
3263static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3264{
3265 struct mlx5e_priv *priv = netdev_priv(netdev);
3266 int err;
3267
3268 mutex_lock(&priv->state_lock);
3269
3270 priv->channels.params.scatter_fcs_en = enable;
3271 err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3272 if (err)
3273 priv->channels.params.scatter_fcs_en = !enable;
3274
3275 mutex_unlock(&priv->state_lock);
3276
3277 return err;
3278}
3279
36350114
GP
3280static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3281{
3282 struct mlx5e_priv *priv = netdev_priv(netdev);
ff9c852f 3283 int err = 0;
36350114
GP
3284
3285 mutex_lock(&priv->state_lock);
3286
6a9764ef 3287 priv->channels.params.vlan_strip_disable = !enable;
ff9c852f
SM
3288 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3289 goto unlock;
3290
3291 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
36350114 3292 if (err)
6a9764ef 3293 priv->channels.params.vlan_strip_disable = enable;
36350114 3294
ff9c852f 3295unlock:
36350114
GP
3296 mutex_unlock(&priv->state_lock);
3297
3298 return err;
3299}
3300
45bf454a
MG
3301#ifdef CONFIG_RFS_ACCEL
3302static int set_feature_arfs(struct net_device *netdev, bool enable)
3303{
3304 struct mlx5e_priv *priv = netdev_priv(netdev);
3305 int err;
3306
3307 if (enable)
3308 err = mlx5e_arfs_enable(priv);
3309 else
3310 err = mlx5e_arfs_disable(priv);
3311
3312 return err;
3313}
3314#endif
3315
0e405443
GP
3316static int mlx5e_handle_feature(struct net_device *netdev,
3317 netdev_features_t wanted_features,
3318 netdev_features_t feature,
3319 mlx5e_feature_handler feature_handler)
3320{
3321 netdev_features_t changes = wanted_features ^ netdev->features;
3322 bool enable = !!(wanted_features & feature);
3323 int err;
3324
3325 if (!(changes & feature))
3326 return 0;
3327
3328 err = feature_handler(netdev, enable);
3329 if (err) {
3330 netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
3331 enable ? "Enable" : "Disable", feature, err);
3332 return err;
3333 }
3334
3335 MLX5E_SET_FEATURE(netdev, feature, enable);
3336 return 0;
3337}
3338
3339static int mlx5e_set_features(struct net_device *netdev,
3340 netdev_features_t features)
3341{
3342 int err;
3343
3344 err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
3345 set_feature_lro);
3346 err |= mlx5e_handle_feature(netdev, features,
3347 NETIF_F_HW_VLAN_CTAG_FILTER,
3348 set_feature_vlan_filter);
3349 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
3350 set_feature_tc_num_filters);
94cb1ebb
EBE
3351 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
3352 set_feature_rx_all);
102722fc
GE
3353 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
3354 set_feature_rx_fcs);
36350114
GP
3355 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
3356 set_feature_rx_vlan);
45bf454a
MG
3357#ifdef CONFIG_RFS_ACCEL
3358 err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
3359 set_feature_arfs);
3360#endif
0e405443
GP
3361
3362 return err ? -EINVAL : 0;
f62b8bb8
AV
3363}
3364
3365static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
3366{
3367 struct mlx5e_priv *priv = netdev_priv(netdev);
2e20a151
SM
3368 struct mlx5e_channels new_channels = {};
3369 int curr_mtu;
98e81b0a 3370 int err = 0;
506753b0 3371 bool reset;
f62b8bb8 3372
f62b8bb8 3373 mutex_lock(&priv->state_lock);
98e81b0a 3374
6a9764ef
SM
3375 reset = !priv->channels.params.lro_en &&
3376 (priv->channels.params.rq_wq_type !=
506753b0
TT
3377 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
3378
2e20a151 3379 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3380
2e20a151 3381 curr_mtu = netdev->mtu;
f62b8bb8 3382 netdev->mtu = new_mtu;
98e81b0a 3383
2e20a151
SM
3384 if (!reset) {
3385 mlx5e_set_dev_port_mtu(priv);
3386 goto out;
3387 }
98e81b0a 3388
2e20a151
SM
3389 new_channels.params = priv->channels.params;
3390 err = mlx5e_open_channels(priv, &new_channels);
3391 if (err) {
3392 netdev->mtu = curr_mtu;
3393 goto out;
3394 }
3395
3396 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
f62b8bb8 3397
2e20a151
SM
3398out:
3399 mutex_unlock(&priv->state_lock);
f62b8bb8
AV
3400 return err;
3401}
3402
ef9814de
EBE
3403static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3404{
1170fbd8
FD
3405 struct mlx5e_priv *priv = netdev_priv(dev);
3406
ef9814de
EBE
3407 switch (cmd) {
3408 case SIOCSHWTSTAMP:
1170fbd8 3409 return mlx5e_hwstamp_set(priv, ifr);
ef9814de 3410 case SIOCGHWTSTAMP:
1170fbd8 3411 return mlx5e_hwstamp_get(priv, ifr);
ef9814de
EBE
3412 default:
3413 return -EOPNOTSUPP;
3414 }
3415}
3416
e80541ec 3417#ifdef CONFIG_MLX5_ESWITCH
66e49ded
SM
3418static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3419{
3420 struct mlx5e_priv *priv = netdev_priv(dev);
3421 struct mlx5_core_dev *mdev = priv->mdev;
3422
3423 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
3424}
3425
79aab093
MS
3426static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
3427 __be16 vlan_proto)
66e49ded
SM
3428{
3429 struct mlx5e_priv *priv = netdev_priv(dev);
3430 struct mlx5_core_dev *mdev = priv->mdev;
3431
79aab093
MS
3432 if (vlan_proto != htons(ETH_P_8021Q))
3433 return -EPROTONOSUPPORT;
3434
66e49ded
SM
3435 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
3436 vlan, qos);
3437}
3438
f942380c
MHY
3439static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
3440{
3441 struct mlx5e_priv *priv = netdev_priv(dev);
3442 struct mlx5_core_dev *mdev = priv->mdev;
3443
3444 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
3445}
3446
1edc57e2
MHY
3447static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
3448{
3449 struct mlx5e_priv *priv = netdev_priv(dev);
3450 struct mlx5_core_dev *mdev = priv->mdev;
3451
3452 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
3453}
bd77bf1c
MHY
3454
3455static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
3456 int max_tx_rate)
3457{
3458 struct mlx5e_priv *priv = netdev_priv(dev);
3459 struct mlx5_core_dev *mdev = priv->mdev;
3460
bd77bf1c 3461 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
c9497c98 3462 max_tx_rate, min_tx_rate);
bd77bf1c
MHY
3463}
3464
66e49ded
SM
3465static int mlx5_vport_link2ifla(u8 esw_link)
3466{
3467 switch (esw_link) {
3468 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
3469 return IFLA_VF_LINK_STATE_DISABLE;
3470 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
3471 return IFLA_VF_LINK_STATE_ENABLE;
3472 }
3473 return IFLA_VF_LINK_STATE_AUTO;
3474}
3475
3476static int mlx5_ifla_link2vport(u8 ifla_link)
3477{
3478 switch (ifla_link) {
3479 case IFLA_VF_LINK_STATE_DISABLE:
3480 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
3481 case IFLA_VF_LINK_STATE_ENABLE:
3482 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
3483 }
3484 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
3485}
3486
3487static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
3488 int link_state)
3489{
3490 struct mlx5e_priv *priv = netdev_priv(dev);
3491 struct mlx5_core_dev *mdev = priv->mdev;
3492
3493 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
3494 mlx5_ifla_link2vport(link_state));
3495}
3496
3497static int mlx5e_get_vf_config(struct net_device *dev,
3498 int vf, struct ifla_vf_info *ivi)
3499{
3500 struct mlx5e_priv *priv = netdev_priv(dev);
3501 struct mlx5_core_dev *mdev = priv->mdev;
3502 int err;
3503
3504 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
3505 if (err)
3506 return err;
3507 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
3508 return 0;
3509}
3510
3511static int mlx5e_get_vf_stats(struct net_device *dev,
3512 int vf, struct ifla_vf_stats *vf_stats)
3513{
3514 struct mlx5e_priv *priv = netdev_priv(dev);
3515 struct mlx5_core_dev *mdev = priv->mdev;
3516
3517 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
3518 vf_stats);
3519}
e80541ec 3520#endif
66e49ded 3521
1ad9a00a
PB
3522static void mlx5e_add_vxlan_port(struct net_device *netdev,
3523 struct udp_tunnel_info *ti)
b3f63c3d
MF
3524{
3525 struct mlx5e_priv *priv = netdev_priv(netdev);
3526
974c3f30
AD
3527 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3528 return;
3529
b3f63c3d
MF
3530 if (!mlx5e_vxlan_allowed(priv->mdev))
3531 return;
3532
974c3f30 3533 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
b3f63c3d
MF
3534}
3535
1ad9a00a
PB
3536static void mlx5e_del_vxlan_port(struct net_device *netdev,
3537 struct udp_tunnel_info *ti)
b3f63c3d
MF
3538{
3539 struct mlx5e_priv *priv = netdev_priv(netdev);
3540
974c3f30
AD
3541 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3542 return;
3543
b3f63c3d
MF
3544 if (!mlx5e_vxlan_allowed(priv->mdev))
3545 return;
3546
974c3f30 3547 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
b3f63c3d
MF
3548}
3549
27299841
GP
3550static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
3551 struct sk_buff *skb,
3552 netdev_features_t features)
b3f63c3d
MF
3553{
3554 struct udphdr *udph;
27299841
GP
3555 u8 proto;
3556 u16 port;
b3f63c3d
MF
3557
3558 switch (vlan_get_protocol(skb)) {
3559 case htons(ETH_P_IP):
3560 proto = ip_hdr(skb)->protocol;
3561 break;
3562 case htons(ETH_P_IPV6):
3563 proto = ipv6_hdr(skb)->nexthdr;
3564 break;
3565 default:
3566 goto out;
3567 }
3568
27299841
GP
3569 switch (proto) {
3570 case IPPROTO_GRE:
3571 return features;
3572 case IPPROTO_UDP:
b3f63c3d
MF
3573 udph = udp_hdr(skb);
3574 port = be16_to_cpu(udph->dest);
b3f63c3d 3575
27299841
GP
3576 /* Verify if UDP port is being offloaded by HW */
3577 if (mlx5e_vxlan_lookup_port(priv, port))
3578 return features;
3579 }
b3f63c3d
MF
3580
3581out:
3582 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3583 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3584}
3585
3586static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3587 struct net_device *netdev,
3588 netdev_features_t features)
3589{
3590 struct mlx5e_priv *priv = netdev_priv(netdev);
3591
3592 features = vlan_features_check(skb, features);
3593 features = vxlan_features_check(skb, features);
3594
2ac9cfe7
IT
3595#ifdef CONFIG_MLX5_EN_IPSEC
3596 if (mlx5e_ipsec_feature_check(skb, netdev, features))
3597 return features;
3598#endif
3599
b3f63c3d
MF
3600 /* Validate if the tunneled packet is being offloaded by HW */
3601 if (skb->encapsulation &&
3602 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
27299841 3603 return mlx5e_tunnel_features_check(priv, skb, features);
b3f63c3d
MF
3604
3605 return features;
3606}
3607
3947ca18
DJ
3608static void mlx5e_tx_timeout(struct net_device *dev)
3609{
3610 struct mlx5e_priv *priv = netdev_priv(dev);
3611 bool sched_work = false;
3612 int i;
3613
3614 netdev_err(dev, "TX timeout detected\n");
3615
6a9764ef 3616 for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
acc6c595 3617 struct mlx5e_txqsq *sq = priv->txq2sq[i];
3947ca18 3618
2c1ccc99 3619 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
3947ca18
DJ
3620 continue;
3621 sched_work = true;
c0f1147d 3622 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
3947ca18
DJ
3623 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
3624 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
3625 }
3626
3627 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
3628 schedule_work(&priv->tx_timeout_work);
3629}
3630
86994156
RS
3631static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3632{
3633 struct mlx5e_priv *priv = netdev_priv(netdev);
3634 struct bpf_prog *old_prog;
3635 int err = 0;
3636 bool reset, was_opened;
3637 int i;
3638
3639 mutex_lock(&priv->state_lock);
3640
3641 if ((netdev->features & NETIF_F_LRO) && prog) {
3642 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
3643 err = -EINVAL;
3644 goto unlock;
3645 }
3646
547eede0
IT
3647 if ((netdev->features & NETIF_F_HW_ESP) && prog) {
3648 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
3649 err = -EINVAL;
3650 goto unlock;
3651 }
3652
86994156
RS
3653 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3654 /* no need for full reset when exchanging programs */
6a9764ef 3655 reset = (!priv->channels.params.xdp_prog || !prog);
86994156
RS
3656
3657 if (was_opened && reset)
3658 mlx5e_close_locked(netdev);
c54c0629
DB
3659 if (was_opened && !reset) {
3660 /* num_channels is invariant here, so we can take the
3661 * batched reference right upfront.
3662 */
6a9764ef 3663 prog = bpf_prog_add(prog, priv->channels.num);
c54c0629
DB
3664 if (IS_ERR(prog)) {
3665 err = PTR_ERR(prog);
3666 goto unlock;
3667 }
3668 }
86994156 3669
c54c0629
DB
3670 /* exchange programs, extra prog reference we got from caller
3671 * as long as we don't fail from this point onwards.
3672 */
6a9764ef 3673 old_prog = xchg(&priv->channels.params.xdp_prog, prog);
86994156
RS
3674 if (old_prog)
3675 bpf_prog_put(old_prog);
3676
3677 if (reset) /* change RQ type according to priv->xdp_prog */
6a9764ef 3678 mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
86994156
RS
3679
3680 if (was_opened && reset)
3681 mlx5e_open_locked(netdev);
3682
3683 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
3684 goto unlock;
3685
3686 /* exchanging programs w/o reset, we update ref counts on behalf
3687 * of the channels RQs here.
3688 */
ff9c852f
SM
3689 for (i = 0; i < priv->channels.num; i++) {
3690 struct mlx5e_channel *c = priv->channels.c[i];
86994156 3691
c0f1147d 3692 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156
RS
3693 napi_synchronize(&c->napi);
3694 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3695
3696 old_prog = xchg(&c->rq.xdp_prog, prog);
3697
c0f1147d 3698 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156 3699 /* napi_schedule in case we have missed anything */
86994156
RS
3700 napi_schedule(&c->napi);
3701
3702 if (old_prog)
3703 bpf_prog_put(old_prog);
3704 }
3705
3706unlock:
3707 mutex_unlock(&priv->state_lock);
3708 return err;
3709}
3710
821b2e29 3711static u32 mlx5e_xdp_query(struct net_device *dev)
86994156
RS
3712{
3713 struct mlx5e_priv *priv = netdev_priv(dev);
821b2e29
MKL
3714 const struct bpf_prog *xdp_prog;
3715 u32 prog_id = 0;
86994156 3716
821b2e29
MKL
3717 mutex_lock(&priv->state_lock);
3718 xdp_prog = priv->channels.params.xdp_prog;
3719 if (xdp_prog)
3720 prog_id = xdp_prog->aux->id;
3721 mutex_unlock(&priv->state_lock);
3722
3723 return prog_id;
86994156
RS
3724}
3725
3726static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
3727{
3728 switch (xdp->command) {
3729 case XDP_SETUP_PROG:
3730 return mlx5e_xdp_set(dev, xdp->prog);
3731 case XDP_QUERY_PROG:
821b2e29
MKL
3732 xdp->prog_id = mlx5e_xdp_query(dev);
3733 xdp->prog_attached = !!xdp->prog_id;
86994156
RS
3734 return 0;
3735 default:
3736 return -EINVAL;
3737 }
3738}
3739
80378384
CO
3740#ifdef CONFIG_NET_POLL_CONTROLLER
3741/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3742 * reenabling interrupts.
3743 */
3744static void mlx5e_netpoll(struct net_device *dev)
3745{
3746 struct mlx5e_priv *priv = netdev_priv(dev);
ff9c852f
SM
3747 struct mlx5e_channels *chs = &priv->channels;
3748
80378384
CO
3749 int i;
3750
ff9c852f
SM
3751 for (i = 0; i < chs->num; i++)
3752 napi_schedule(&chs->c[i]->napi);
80378384
CO
3753}
3754#endif
3755
e80541ec 3756static const struct net_device_ops mlx5e_netdev_ops = {
f62b8bb8
AV
3757 .ndo_open = mlx5e_open,
3758 .ndo_stop = mlx5e_close,
3759 .ndo_start_xmit = mlx5e_xmit,
0cf0f6d3 3760 .ndo_setup_tc = mlx5e_setup_tc,
08fb1dac 3761 .ndo_select_queue = mlx5e_select_queue,
f62b8bb8
AV
3762 .ndo_get_stats64 = mlx5e_get_stats,
3763 .ndo_set_rx_mode = mlx5e_set_rx_mode,
3764 .ndo_set_mac_address = mlx5e_set_mac,
b0eed40e
SM
3765 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
3766 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
f62b8bb8 3767 .ndo_set_features = mlx5e_set_features,
b0eed40e
SM
3768 .ndo_change_mtu = mlx5e_change_mtu,
3769 .ndo_do_ioctl = mlx5e_ioctl,
507f0c81 3770 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
706b3583
SM
3771 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
3772 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
3773 .ndo_features_check = mlx5e_features_check,
45bf454a
MG
3774#ifdef CONFIG_RFS_ACCEL
3775 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3776#endif
3947ca18 3777 .ndo_tx_timeout = mlx5e_tx_timeout,
86994156 3778 .ndo_xdp = mlx5e_xdp,
80378384
CO
3779#ifdef CONFIG_NET_POLL_CONTROLLER
3780 .ndo_poll_controller = mlx5e_netpoll,
3781#endif
e80541ec 3782#ifdef CONFIG_MLX5_ESWITCH
706b3583 3783 /* SRIOV E-Switch NDOs */
b0eed40e
SM
3784 .ndo_set_vf_mac = mlx5e_set_vf_mac,
3785 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
f942380c 3786 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
1edc57e2 3787 .ndo_set_vf_trust = mlx5e_set_vf_trust,
bd77bf1c 3788 .ndo_set_vf_rate = mlx5e_set_vf_rate,
b0eed40e
SM
3789 .ndo_get_vf_config = mlx5e_get_vf_config,
3790 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
3791 .ndo_get_vf_stats = mlx5e_get_vf_stats,
370bad0f
OG
3792 .ndo_has_offload_stats = mlx5e_has_offload_stats,
3793 .ndo_get_offload_stats = mlx5e_get_offload_stats,
e80541ec 3794#endif
f62b8bb8
AV
3795};
3796
3797static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3798{
3799 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
9eb78923 3800 return -EOPNOTSUPP;
f62b8bb8
AV
3801 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3802 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3803 !MLX5_CAP_ETH(mdev, csum_cap) ||
3804 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
3805 !MLX5_CAP_ETH(mdev, vlan_cap) ||
796a27ec
GP
3806 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
3807 MLX5_CAP_FLOWTABLE(mdev,
3808 flow_table_properties_nic_receive.max_ft_level)
3809 < 3) {
f62b8bb8
AV
3810 mlx5_core_warn(mdev,
3811 "Not creating net device, some required device capabilities are missing\n");
9eb78923 3812 return -EOPNOTSUPP;
f62b8bb8 3813 }
66189961
TT
3814 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3815 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
7524a5d8 3816 if (!MLX5_CAP_GEN(mdev, cq_moderation))
3e432ab6 3817 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
66189961 3818
f62b8bb8
AV
3819 return 0;
3820}
3821
58d52291
AS
3822u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3823{
3824 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
3825
3826 return bf_buf_size -
3827 sizeof(struct mlx5e_tx_wqe) +
3828 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
3829}
3830
d8c9660d
TT
3831void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
3832 u32 *indirection_rqt, int len,
85082dba
TT
3833 int num_channels)
3834{
d8c9660d
TT
3835 int node = mdev->priv.numa_node;
3836 int node_num_of_cores;
85082dba
TT
3837 int i;
3838
d8c9660d
TT
3839 if (node == -1)
3840 node = first_online_node;
3841
3842 node_num_of_cores = cpumask_weight(cpumask_of_node(node));
3843
3844 if (node_num_of_cores)
3845 num_channels = min_t(int, num_channels, node_num_of_cores);
3846
85082dba
TT
3847 for (i = 0; i < len; i++)
3848 indirection_rqt[i] = i % num_channels;
3849}
3850
b797a684
SM
3851static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
3852{
3853 enum pcie_link_width width;
3854 enum pci_bus_speed speed;
3855 int err = 0;
3856
3857 err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
3858 if (err)
3859 return err;
3860
3861 if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
3862 return -EINVAL;
3863
3864 switch (speed) {
3865 case PCIE_SPEED_2_5GT:
3866 *pci_bw = 2500 * width;
3867 break;
3868 case PCIE_SPEED_5_0GT:
3869 *pci_bw = 5000 * width;
3870 break;
3871 case PCIE_SPEED_8_0GT:
3872 *pci_bw = 8000 * width;
3873 break;
3874 default:
3875 return -EINVAL;
3876 }
3877
3878 return 0;
3879}
3880
3881static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
3882{
3883 return (link_speed && pci_bw &&
3884 (pci_bw < 40000) && (pci_bw < link_speed));
3885}
3886
0f6e4cf6
EBE
3887static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
3888{
3889 return !(link_speed && pci_bw &&
3890 (pci_bw <= 16000) && (pci_bw < link_speed));
3891}
3892
9908aa29
TT
3893void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
3894{
3895 params->rx_cq_period_mode = cq_period_mode;
3896
3897 params->rx_cq_moderation.pkts =
3898 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
3899 params->rx_cq_moderation.usec =
3900 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
3901
3902 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
3903 params->rx_cq_moderation.usec =
3904 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
6a9764ef 3905
457fcd8a
SM
3906 if (params->rx_am_enabled)
3907 params->rx_cq_moderation =
3908 mlx5e_am_get_def_profile(params->rx_cq_period_mode);
3909
6a9764ef
SM
3910 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
3911 params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
9908aa29
TT
3912}
3913
2b029556
SM
3914u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
3915{
3916 int i;
3917
3918 /* The supported periods are organized in ascending order */
3919 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
3920 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
3921 break;
3922
3923 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
3924}
3925
8f493ffd
SM
3926void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
3927 struct mlx5e_params *params,
3928 u16 max_channels)
f62b8bb8 3929{
6a9764ef 3930 u8 cq_period_mode = 0;
b797a684
SM
3931 u32 link_speed = 0;
3932 u32 pci_bw = 0;
2fc4bfb7 3933
6a9764ef
SM
3934 params->num_channels = max_channels;
3935 params->num_tc = 1;
2b029556 3936
0f6e4cf6
EBE
3937 mlx5e_get_max_linkspeed(mdev, &link_speed);
3938 mlx5e_get_pci_bw(mdev, &pci_bw);
3939 mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
3940 link_speed, pci_bw);
3941
6a9764ef
SM
3942 /* SQ */
3943 params->log_sq_size = is_kdump_kernel() ?
b4e029da
KH
3944 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
3945 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
461017cb 3946
b797a684 3947 /* set CQE compression */
6a9764ef 3948 params->rx_cqe_compress_def = false;
b797a684 3949 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
e53eef63 3950 MLX5_CAP_GEN(mdev, vport_group_manager))
6a9764ef 3951 params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
0f6e4cf6 3952
6a9764ef
SM
3953 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
3954
3955 /* RQ */
3956 mlx5e_set_rq_params(mdev, params);
b797a684 3957
6a9764ef 3958 /* HW LRO */
c139dbfd 3959
5426a0b2 3960 /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
6a9764ef 3961 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
0f6e4cf6 3962 params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
6a9764ef 3963 params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
b0d4660b 3964
6a9764ef
SM
3965 /* CQ moderation params */
3966 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
3967 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
3968 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
3969 params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
3970 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
9908aa29 3971
6a9764ef
SM
3972 params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
3973 params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
9908aa29 3974
6a9764ef
SM
3975 /* TX inline */
3976 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
3977 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
3978 if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
a6f402e4 3979 !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
6a9764ef 3980 params->tx_min_inline_mode = MLX5_INLINE_MODE_L2;
a6f402e4 3981
6a9764ef
SM
3982 /* RSS */
3983 params->rss_hfunc = ETH_RSS_HASH_XOR;
3984 netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
3985 mlx5e_build_default_indir_rqt(mdev, params->indirection_rqt,
3986 MLX5E_INDIR_RQT_SIZE, max_channels);
3987}
f62b8bb8 3988
6a9764ef
SM
3989static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
3990 struct net_device *netdev,
3991 const struct mlx5e_profile *profile,
3992 void *ppriv)
3993{
3994 struct mlx5e_priv *priv = netdev_priv(netdev);
57afead5 3995
6a9764ef
SM
3996 priv->mdev = mdev;
3997 priv->netdev = netdev;
3998 priv->profile = profile;
3999 priv->ppriv = ppriv;
c139dbfd 4000 priv->hard_mtu = MLX5E_ETH_HARD_MTU;
2d75b2bc 4001
6a9764ef 4002 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
9908aa29 4003
f62b8bb8
AV
4004 mutex_init(&priv->state_lock);
4005
4006 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
4007 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3947ca18 4008 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
f62b8bb8
AV
4009 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
4010}
4011
4012static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4013{
4014 struct mlx5e_priv *priv = netdev_priv(netdev);
4015
e1d7d349 4016 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
108805fc
SM
4017 if (is_zero_ether_addr(netdev->dev_addr) &&
4018 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4019 eth_hw_addr_random(netdev);
4020 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4021 }
f62b8bb8
AV
4022}
4023
e80541ec 4024#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH)
cb67b832
HHZ
4025static const struct switchdev_ops mlx5e_switchdev_ops = {
4026 .switchdev_port_attr_get = mlx5e_attr_get,
4027};
e80541ec 4028#endif
cb67b832 4029
6bfd390b 4030static void mlx5e_build_nic_netdev(struct net_device *netdev)
f62b8bb8
AV
4031{
4032 struct mlx5e_priv *priv = netdev_priv(netdev);
4033 struct mlx5_core_dev *mdev = priv->mdev;
94cb1ebb
EBE
4034 bool fcs_supported;
4035 bool fcs_enabled;
f62b8bb8
AV
4036
4037 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
4038
e80541ec
SM
4039 netdev->netdev_ops = &mlx5e_netdev_ops;
4040
08fb1dac 4041#ifdef CONFIG_MLX5_CORE_EN_DCB
e80541ec
SM
4042 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
4043 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
08fb1dac 4044#endif
66e49ded 4045
f62b8bb8
AV
4046 netdev->watchdog_timeo = 15 * HZ;
4047
4048 netdev->ethtool_ops = &mlx5e_ethtool_ops;
4049
12be4b21 4050 netdev->vlan_features |= NETIF_F_SG;
f62b8bb8
AV
4051 netdev->vlan_features |= NETIF_F_IP_CSUM;
4052 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
4053 netdev->vlan_features |= NETIF_F_GRO;
4054 netdev->vlan_features |= NETIF_F_TSO;
4055 netdev->vlan_features |= NETIF_F_TSO6;
4056 netdev->vlan_features |= NETIF_F_RXCSUM;
4057 netdev->vlan_features |= NETIF_F_RXHASH;
4058
4059 if (!!MLX5_CAP_ETH(mdev, lro_cap))
4060 netdev->vlan_features |= NETIF_F_LRO;
4061
4062 netdev->hw_features = netdev->vlan_features;
e4cf27bd 4063 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
f62b8bb8
AV
4064 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4065 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4066
27299841
GP
4067 if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4068 netdev->hw_features |= NETIF_F_GSO_PARTIAL;
b3f63c3d 4069 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
f3ed653c 4070 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
b3f63c3d
MF
4071 netdev->hw_enc_features |= NETIF_F_TSO;
4072 netdev->hw_enc_features |= NETIF_F_TSO6;
27299841
GP
4073 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4074 }
4075
4076 if (mlx5e_vxlan_allowed(mdev)) {
4077 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
4078 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4079 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4080 NETIF_F_GSO_UDP_TUNNEL_CSUM;
b49663c8 4081 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
b3f63c3d
MF
4082 }
4083
27299841
GP
4084 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4085 netdev->hw_features |= NETIF_F_GSO_GRE |
4086 NETIF_F_GSO_GRE_CSUM;
4087 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4088 NETIF_F_GSO_GRE_CSUM;
4089 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4090 NETIF_F_GSO_GRE_CSUM;
4091 }
4092
94cb1ebb
EBE
4093 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4094
4095 if (fcs_supported)
4096 netdev->hw_features |= NETIF_F_RXALL;
4097
102722fc
GE
4098 if (MLX5_CAP_ETH(mdev, scatter_fcs))
4099 netdev->hw_features |= NETIF_F_RXFCS;
4100
f62b8bb8 4101 netdev->features = netdev->hw_features;
6a9764ef 4102 if (!priv->channels.params.lro_en)
f62b8bb8
AV
4103 netdev->features &= ~NETIF_F_LRO;
4104
94cb1ebb
EBE
4105 if (fcs_enabled)
4106 netdev->features &= ~NETIF_F_RXALL;
4107
102722fc
GE
4108 if (!priv->channels.params.scatter_fcs_en)
4109 netdev->features &= ~NETIF_F_RXFCS;
4110
e8f887ac
AV
4111#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4112 if (FT_CAP(flow_modify_en) &&
4113 FT_CAP(modify_root) &&
4114 FT_CAP(identified_miss_table_mode) &&
1cabe6b0
MG
4115 FT_CAP(flow_table_modify)) {
4116 netdev->hw_features |= NETIF_F_HW_TC;
4117#ifdef CONFIG_RFS_ACCEL
4118 netdev->hw_features |= NETIF_F_NTUPLE;
4119#endif
4120 }
e8f887ac 4121
f62b8bb8
AV
4122 netdev->features |= NETIF_F_HIGHDMA;
4123
4124 netdev->priv_flags |= IFF_UNICAST_FLT;
4125
4126 mlx5e_set_netdev_dev_addr(netdev);
cb67b832 4127
e80541ec 4128#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH)
a9f7705f 4129 if (MLX5_VPORT_MANAGER(mdev))
cb67b832
HHZ
4130 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4131#endif
547eede0
IT
4132
4133 mlx5e_ipsec_build_netdev(priv);
f62b8bb8
AV
4134}
4135
593cf338
RS
4136static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
4137{
4138 struct mlx5_core_dev *mdev = priv->mdev;
4139 int err;
4140
4141 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4142 if (err) {
4143 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4144 priv->q_counter = 0;
4145 }
4146}
4147
4148static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
4149{
4150 if (!priv->q_counter)
4151 return;
4152
4153 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
4154}
4155
6bfd390b
HHZ
4156static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
4157 struct net_device *netdev,
127ea380
HHZ
4158 const struct mlx5e_profile *profile,
4159 void *ppriv)
6bfd390b
HHZ
4160{
4161 struct mlx5e_priv *priv = netdev_priv(netdev);
547eede0 4162 int err;
6bfd390b 4163
127ea380 4164 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
547eede0
IT
4165 err = mlx5e_ipsec_init(priv);
4166 if (err)
4167 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
6bfd390b
HHZ
4168 mlx5e_build_nic_netdev(netdev);
4169 mlx5e_vxlan_init(priv);
4170}
4171
4172static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4173{
547eede0 4174 mlx5e_ipsec_cleanup(priv);
6bfd390b 4175 mlx5e_vxlan_cleanup(priv);
127ea380 4176
6a9764ef
SM
4177 if (priv->channels.params.xdp_prog)
4178 bpf_prog_put(priv->channels.params.xdp_prog);
6bfd390b
HHZ
4179}
4180
4181static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
4182{
4183 struct mlx5_core_dev *mdev = priv->mdev;
4184 int err;
6bfd390b 4185
8f493ffd
SM
4186 err = mlx5e_create_indirect_rqt(priv);
4187 if (err)
6bfd390b 4188 return err;
6bfd390b
HHZ
4189
4190 err = mlx5e_create_direct_rqts(priv);
8f493ffd 4191 if (err)
6bfd390b 4192 goto err_destroy_indirect_rqts;
6bfd390b
HHZ
4193
4194 err = mlx5e_create_indirect_tirs(priv);
8f493ffd 4195 if (err)
6bfd390b 4196 goto err_destroy_direct_rqts;
6bfd390b
HHZ
4197
4198 err = mlx5e_create_direct_tirs(priv);
8f493ffd 4199 if (err)
6bfd390b 4200 goto err_destroy_indirect_tirs;
6bfd390b
HHZ
4201
4202 err = mlx5e_create_flow_steering(priv);
4203 if (err) {
4204 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
4205 goto err_destroy_direct_tirs;
4206 }
4207
4208 err = mlx5e_tc_init(priv);
4209 if (err)
4210 goto err_destroy_flow_steering;
4211
4212 return 0;
4213
4214err_destroy_flow_steering:
4215 mlx5e_destroy_flow_steering(priv);
4216err_destroy_direct_tirs:
4217 mlx5e_destroy_direct_tirs(priv);
4218err_destroy_indirect_tirs:
4219 mlx5e_destroy_indirect_tirs(priv);
4220err_destroy_direct_rqts:
8f493ffd 4221 mlx5e_destroy_direct_rqts(priv);
6bfd390b
HHZ
4222err_destroy_indirect_rqts:
4223 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4224 return err;
4225}
4226
4227static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
4228{
6bfd390b
HHZ
4229 mlx5e_tc_cleanup(priv);
4230 mlx5e_destroy_flow_steering(priv);
4231 mlx5e_destroy_direct_tirs(priv);
4232 mlx5e_destroy_indirect_tirs(priv);
8f493ffd 4233 mlx5e_destroy_direct_rqts(priv);
6bfd390b
HHZ
4234 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4235}
4236
4237static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
4238{
4239 int err;
4240
4241 err = mlx5e_create_tises(priv);
4242 if (err) {
4243 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
4244 return err;
4245 }
4246
4247#ifdef CONFIG_MLX5_CORE_EN_DCB
e207b7e9 4248 mlx5e_dcbnl_initialize(priv);
6bfd390b
HHZ
4249#endif
4250 return 0;
4251}
4252
4253static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4254{
4255 struct net_device *netdev = priv->netdev;
4256 struct mlx5_core_dev *mdev = priv->mdev;
2c3b5bee
SM
4257 u16 max_mtu;
4258
4259 mlx5e_init_l2_addr(priv);
4260
63bfd399
EBE
4261 /* Marking the link as currently not needed by the Driver */
4262 if (!netif_running(netdev))
4263 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
4264
2c3b5bee
SM
4265 /* MTU range: 68 - hw-specific max */
4266 netdev->min_mtu = ETH_MIN_MTU;
4267 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
c139dbfd 4268 netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu);
2c3b5bee 4269 mlx5e_set_dev_port_mtu(priv);
6bfd390b 4270
7907f23a
AH
4271 mlx5_lag_add(mdev, netdev);
4272
6bfd390b 4273 mlx5e_enable_async_events(priv);
127ea380 4274
a9f7705f 4275 if (MLX5_VPORT_MANAGER(priv->mdev))
1d447a39 4276 mlx5e_register_vport_reps(priv);
2c3b5bee 4277
610e89e0
SM
4278 if (netdev->reg_state != NETREG_REGISTERED)
4279 return;
4280
4281 /* Device already registered: sync netdev system state */
4282 if (mlx5e_vxlan_allowed(mdev)) {
4283 rtnl_lock();
4284 udp_tunnel_get_rx_info(netdev);
4285 rtnl_unlock();
4286 }
4287
4288 queue_work(priv->wq, &priv->set_rx_mode_work);
2c3b5bee
SM
4289
4290 rtnl_lock();
4291 if (netif_running(netdev))
4292 mlx5e_open(netdev);
4293 netif_device_attach(netdev);
4294 rtnl_unlock();
6bfd390b
HHZ
4295}
4296
4297static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4298{
3deef8ce 4299 struct mlx5_core_dev *mdev = priv->mdev;
3deef8ce 4300
2c3b5bee
SM
4301 rtnl_lock();
4302 if (netif_running(priv->netdev))
4303 mlx5e_close(priv->netdev);
4304 netif_device_detach(priv->netdev);
4305 rtnl_unlock();
4306
6bfd390b 4307 queue_work(priv->wq, &priv->set_rx_mode_work);
1d447a39 4308
a9f7705f 4309 if (MLX5_VPORT_MANAGER(priv->mdev))
1d447a39
SM
4310 mlx5e_unregister_vport_reps(priv);
4311
6bfd390b 4312 mlx5e_disable_async_events(priv);
3deef8ce 4313 mlx5_lag_remove(mdev);
6bfd390b
HHZ
4314}
4315
4316static const struct mlx5e_profile mlx5e_nic_profile = {
4317 .init = mlx5e_nic_init,
4318 .cleanup = mlx5e_nic_cleanup,
4319 .init_rx = mlx5e_init_nic_rx,
4320 .cleanup_rx = mlx5e_cleanup_nic_rx,
4321 .init_tx = mlx5e_init_nic_tx,
4322 .cleanup_tx = mlx5e_cleanup_nic_tx,
4323 .enable = mlx5e_nic_enable,
4324 .disable = mlx5e_nic_disable,
3834a5e6 4325 .update_stats = mlx5e_update_ndo_stats,
6bfd390b 4326 .max_nch = mlx5e_get_max_num_channels,
7ca42c80 4327 .update_carrier = mlx5e_update_carrier,
20fd0c19
SM
4328 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
4329 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
6bfd390b
HHZ
4330 .max_tc = MLX5E_MAX_NUM_TC,
4331};
4332
2c3b5bee
SM
4333/* mlx5e generic netdev management API (move to en_common.c) */
4334
26e59d80
MHY
4335struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4336 const struct mlx5e_profile *profile,
4337 void *ppriv)
f62b8bb8 4338{
26e59d80 4339 int nch = profile->max_nch(mdev);
f62b8bb8
AV
4340 struct net_device *netdev;
4341 struct mlx5e_priv *priv;
f62b8bb8 4342
08fb1dac 4343 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
6bfd390b 4344 nch * profile->max_tc,
08fb1dac 4345 nch);
f62b8bb8
AV
4346 if (!netdev) {
4347 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
4348 return NULL;
4349 }
4350
be4891af
SM
4351#ifdef CONFIG_RFS_ACCEL
4352 netdev->rx_cpu_rmap = mdev->rmap;
4353#endif
4354
127ea380 4355 profile->init(mdev, netdev, profile, ppriv);
f62b8bb8
AV
4356
4357 netif_carrier_off(netdev);
4358
4359 priv = netdev_priv(netdev);
4360
7bb29755
MF
4361 priv->wq = create_singlethread_workqueue("mlx5e");
4362 if (!priv->wq)
26e59d80
MHY
4363 goto err_cleanup_nic;
4364
4365 return netdev;
4366
4367err_cleanup_nic:
31ac9338
OG
4368 if (profile->cleanup)
4369 profile->cleanup(priv);
26e59d80
MHY
4370 free_netdev(netdev);
4371
4372 return NULL;
4373}
4374
2c3b5bee 4375int mlx5e_attach_netdev(struct mlx5e_priv *priv)
26e59d80 4376{
2c3b5bee 4377 struct mlx5_core_dev *mdev = priv->mdev;
26e59d80 4378 const struct mlx5e_profile *profile;
26e59d80
MHY
4379 int err;
4380
26e59d80
MHY
4381 profile = priv->profile;
4382 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
7bb29755 4383
6bfd390b
HHZ
4384 err = profile->init_tx(priv);
4385 if (err)
ec8b9981 4386 goto out;
5c50368f 4387
a43b25da 4388 err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
5c50368f
AS
4389 if (err) {
4390 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
6bfd390b 4391 goto err_cleanup_tx;
5c50368f
AS
4392 }
4393
6bfd390b
HHZ
4394 err = profile->init_rx(priv);
4395 if (err)
5c50368f 4396 goto err_close_drop_rq;
5c50368f 4397
593cf338
RS
4398 mlx5e_create_q_counter(priv);
4399
6bfd390b
HHZ
4400 if (profile->enable)
4401 profile->enable(priv);
f62b8bb8 4402
26e59d80 4403 return 0;
5c50368f
AS
4404
4405err_close_drop_rq:
a43b25da 4406 mlx5e_close_drop_rq(&priv->drop_rq);
5c50368f 4407
6bfd390b
HHZ
4408err_cleanup_tx:
4409 profile->cleanup_tx(priv);
5c50368f 4410
26e59d80
MHY
4411out:
4412 return err;
f62b8bb8
AV
4413}
4414
2c3b5bee 4415void mlx5e_detach_netdev(struct mlx5e_priv *priv)
26e59d80 4416{
26e59d80
MHY
4417 const struct mlx5e_profile *profile = priv->profile;
4418
4419 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
26e59d80 4420
37f304d1
SM
4421 if (profile->disable)
4422 profile->disable(priv);
4423 flush_workqueue(priv->wq);
4424
26e59d80
MHY
4425 mlx5e_destroy_q_counter(priv);
4426 profile->cleanup_rx(priv);
a43b25da 4427 mlx5e_close_drop_rq(&priv->drop_rq);
26e59d80 4428 profile->cleanup_tx(priv);
26e59d80
MHY
4429 cancel_delayed_work_sync(&priv->update_stats_work);
4430}
4431
2c3b5bee
SM
4432void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
4433{
4434 const struct mlx5e_profile *profile = priv->profile;
4435 struct net_device *netdev = priv->netdev;
4436
4437 destroy_workqueue(priv->wq);
4438 if (profile->cleanup)
4439 profile->cleanup(priv);
4440 free_netdev(netdev);
4441}
4442
26e59d80
MHY
4443/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4444 * hardware contexts and to connect it to the current netdev.
4445 */
4446static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
4447{
4448 struct mlx5e_priv *priv = vpriv;
4449 struct net_device *netdev = priv->netdev;
4450 int err;
4451
4452 if (netif_device_present(netdev))
4453 return 0;
4454
4455 err = mlx5e_create_mdev_resources(mdev);
4456 if (err)
4457 return err;
4458
2c3b5bee 4459 err = mlx5e_attach_netdev(priv);
26e59d80
MHY
4460 if (err) {
4461 mlx5e_destroy_mdev_resources(mdev);
4462 return err;
4463 }
4464
4465 return 0;
4466}
4467
4468static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
4469{
4470 struct mlx5e_priv *priv = vpriv;
4471 struct net_device *netdev = priv->netdev;
4472
4473 if (!netif_device_present(netdev))
4474 return;
4475
2c3b5bee 4476 mlx5e_detach_netdev(priv);
26e59d80
MHY
4477 mlx5e_destroy_mdev_resources(mdev);
4478}
4479
b50d292b
HHZ
4480static void *mlx5e_add(struct mlx5_core_dev *mdev)
4481{
07c9f1e5
SM
4482 struct net_device *netdev;
4483 void *rpriv = NULL;
26e59d80 4484 void *priv;
26e59d80 4485 int err;
b50d292b 4486
26e59d80
MHY
4487 err = mlx5e_check_required_hca_cap(mdev);
4488 if (err)
b50d292b
HHZ
4489 return NULL;
4490
e80541ec 4491#ifdef CONFIG_MLX5_ESWITCH
a9f7705f 4492 if (MLX5_VPORT_MANAGER(mdev)) {
07c9f1e5 4493 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
1d447a39 4494 if (!rpriv) {
07c9f1e5 4495 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
1d447a39
SM
4496 return NULL;
4497 }
1d447a39 4498 }
e80541ec 4499#endif
127ea380 4500
1d447a39 4501 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
26e59d80
MHY
4502 if (!netdev) {
4503 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
07c9f1e5 4504 goto err_free_rpriv;
26e59d80
MHY
4505 }
4506
4507 priv = netdev_priv(netdev);
4508
4509 err = mlx5e_attach(mdev, priv);
4510 if (err) {
4511 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
4512 goto err_destroy_netdev;
4513 }
4514
4515 err = register_netdev(netdev);
4516 if (err) {
4517 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
4518 goto err_detach;
b50d292b 4519 }
26e59d80
MHY
4520
4521 return priv;
4522
4523err_detach:
4524 mlx5e_detach(mdev, priv);
26e59d80 4525err_destroy_netdev:
2c3b5bee 4526 mlx5e_destroy_netdev(priv);
07c9f1e5 4527err_free_rpriv:
1d447a39 4528 kfree(rpriv);
26e59d80 4529 return NULL;
b50d292b
HHZ
4530}
4531
b50d292b
HHZ
4532static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
4533{
4534 struct mlx5e_priv *priv = vpriv;
1d447a39 4535 void *ppriv = priv->ppriv;
127ea380 4536
5e1e93c7 4537 unregister_netdev(priv->netdev);
26e59d80 4538 mlx5e_detach(mdev, vpriv);
2c3b5bee 4539 mlx5e_destroy_netdev(priv);
1d447a39 4540 kfree(ppriv);
b50d292b
HHZ
4541}
4542
f62b8bb8
AV
4543static void *mlx5e_get_netdev(void *vpriv)
4544{
4545 struct mlx5e_priv *priv = vpriv;
4546
4547 return priv->netdev;
4548}
4549
4550static struct mlx5_interface mlx5e_interface = {
b50d292b
HHZ
4551 .add = mlx5e_add,
4552 .remove = mlx5e_remove,
26e59d80
MHY
4553 .attach = mlx5e_attach,
4554 .detach = mlx5e_detach,
f62b8bb8
AV
4555 .event = mlx5e_async_event,
4556 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
4557 .get_dev = mlx5e_get_netdev,
4558};
4559
4560void mlx5e_init(void)
4561{
2ac9cfe7 4562 mlx5e_ipsec_build_inverse_table();
665bc539 4563 mlx5e_build_ptys2ethtool_map();
f62b8bb8
AV
4564 mlx5_register_interface(&mlx5e_interface);
4565}
4566
4567void mlx5e_cleanup(void)
4568{
4569 mlx5_unregister_interface(&mlx5e_interface);
4570}