]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_main.c
net/mlx5e: Introduce mlx5e_channels
[thirdparty/kernel/stable.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
CommitLineData
f62b8bb8 1/*
b3f63c3d 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e8f887ac 33#include <net/tc_act/tc_gact.h>
b4e029da 34#include <linux/crash_dump.h>
e8f887ac 35#include <net/pkt_cls.h>
86d722ad 36#include <linux/mlx5/fs.h>
b3f63c3d 37#include <net/vxlan.h>
86994156 38#include <linux/bpf.h>
f62b8bb8 39#include "en.h"
e8f887ac 40#include "en_tc.h"
66e49ded 41#include "eswitch.h"
b3f63c3d 42#include "vxlan.h"
f62b8bb8
AV
43
44struct mlx5e_rq_param {
cb3c7fd4
GR
45 u32 rqc[MLX5_ST_SZ_DW(rqc)];
46 struct mlx5_wq_param wq;
47 bool am_enabled;
f62b8bb8
AV
48};
49
50struct mlx5e_sq_param {
51 u32 sqc[MLX5_ST_SZ_DW(sqc)];
52 struct mlx5_wq_param wq;
58d52291 53 u16 max_inline;
cff92d7c 54 u8 min_inline_mode;
f62b8bb8
AV
55};
56
57struct mlx5e_cq_param {
58 u32 cqc[MLX5_ST_SZ_DW(cqc)];
59 struct mlx5_wq_param wq;
60 u16 eq_ix;
9908aa29 61 u8 cq_period_mode;
f62b8bb8
AV
62};
63
64struct mlx5e_channel_param {
65 struct mlx5e_rq_param rq;
66 struct mlx5e_sq_param sq;
b5503b99 67 struct mlx5e_sq_param xdp_sq;
d3c9bc27 68 struct mlx5e_sq_param icosq;
f62b8bb8
AV
69 struct mlx5e_cq_param rx_cq;
70 struct mlx5e_cq_param tx_cq;
d3c9bc27 71 struct mlx5e_cq_param icosq_cq;
f62b8bb8
AV
72};
73
2fc4bfb7
SM
74static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
75{
76 return MLX5_CAP_GEN(mdev, striding_rq) &&
77 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
78 MLX5_CAP_ETH(mdev, reg_umr_sq);
79}
80
6dc4b54e 81void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
2fc4bfb7
SM
82{
83 priv->params.rq_wq_type = rq_type;
4078e637 84 priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
2fc4bfb7
SM
85 switch (priv->params.rq_wq_type) {
86 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
b4e029da
KH
87 priv->params.log_rq_size = is_kdump_kernel() ?
88 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
89 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
9bcc8606
SD
90 priv->params.mpwqe_log_stride_sz =
91 MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
f32f5bd2
DJ
92 MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv->mdev) :
93 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv->mdev);
2fc4bfb7
SM
94 priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
95 priv->params.mpwqe_log_stride_sz;
96 break;
97 default: /* MLX5_WQ_TYPE_LINKED_LIST */
b4e029da
KH
98 priv->params.log_rq_size = is_kdump_kernel() ?
99 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
100 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
4078e637
TT
101
102 /* Extra room needed for build_skb */
103 priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM +
104 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2fc4bfb7
SM
105 }
106 priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
107 BIT(priv->params.log_rq_size));
108
109 mlx5_core_info(priv->mdev,
110 "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
111 priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
112 BIT(priv->params.log_rq_size),
113 BIT(priv->params.mpwqe_log_stride_sz),
9bcc8606 114 MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS));
2fc4bfb7
SM
115}
116
117static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
118{
86994156
RS
119 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) &&
120 !priv->xdp_prog ?
2fc4bfb7
SM
121 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
122 MLX5_WQ_TYPE_LINKED_LIST;
123 mlx5e_set_rq_type_params(priv, rq_type);
124}
125
f62b8bb8
AV
126static void mlx5e_update_carrier(struct mlx5e_priv *priv)
127{
128 struct mlx5_core_dev *mdev = priv->mdev;
129 u8 port_state;
130
131 port_state = mlx5_query_vport_state(mdev,
e7546514 132 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
f62b8bb8 133
87424ad5
SD
134 if (port_state == VPORT_STATE_UP) {
135 netdev_info(priv->netdev, "Link up\n");
f62b8bb8 136 netif_carrier_on(priv->netdev);
87424ad5
SD
137 } else {
138 netdev_info(priv->netdev, "Link down\n");
f62b8bb8 139 netif_carrier_off(priv->netdev);
87424ad5 140 }
f62b8bb8
AV
141}
142
143static void mlx5e_update_carrier_work(struct work_struct *work)
144{
145 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
146 update_carrier_work);
147
148 mutex_lock(&priv->state_lock);
149 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
150 mlx5e_update_carrier(priv);
151 mutex_unlock(&priv->state_lock);
152}
153
3947ca18
DJ
154static void mlx5e_tx_timeout_work(struct work_struct *work)
155{
156 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
157 tx_timeout_work);
158 int err;
159
160 rtnl_lock();
161 mutex_lock(&priv->state_lock);
162 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
163 goto unlock;
164 mlx5e_close_locked(priv->netdev);
165 err = mlx5e_open_locked(priv->netdev);
166 if (err)
167 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
168 err);
169unlock:
170 mutex_unlock(&priv->state_lock);
171 rtnl_unlock();
172}
173
9218b44d 174static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
f62b8bb8 175{
9218b44d 176 struct mlx5e_sw_stats *s = &priv->stats.sw;
f62b8bb8
AV
177 struct mlx5e_rq_stats *rq_stats;
178 struct mlx5e_sq_stats *sq_stats;
9218b44d 179 u64 tx_offload_none = 0;
f62b8bb8
AV
180 int i, j;
181
9218b44d 182 memset(s, 0, sizeof(*s));
ff9c852f
SM
183 for (i = 0; i < priv->channels.num; i++) {
184 struct mlx5e_channel *c = priv->channels.c[i];
185
186 rq_stats = &c->rq.stats;
f62b8bb8 187
faf4478b
GP
188 s->rx_packets += rq_stats->packets;
189 s->rx_bytes += rq_stats->bytes;
bfe6d8d1
GP
190 s->rx_lro_packets += rq_stats->lro_packets;
191 s->rx_lro_bytes += rq_stats->lro_bytes;
f62b8bb8 192 s->rx_csum_none += rq_stats->csum_none;
bfe6d8d1
GP
193 s->rx_csum_complete += rq_stats->csum_complete;
194 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
86994156 195 s->rx_xdp_drop += rq_stats->xdp_drop;
b5503b99
SM
196 s->rx_xdp_tx += rq_stats->xdp_tx;
197 s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
f62b8bb8 198 s->rx_wqe_err += rq_stats->wqe_err;
461017cb 199 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
54984407 200 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
7219ab34
TT
201 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
202 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
4415a031
TT
203 s->rx_cache_reuse += rq_stats->cache_reuse;
204 s->rx_cache_full += rq_stats->cache_full;
205 s->rx_cache_empty += rq_stats->cache_empty;
206 s->rx_cache_busy += rq_stats->cache_busy;
f62b8bb8 207
a4418a6c 208 for (j = 0; j < priv->params.num_tc; j++) {
ff9c852f 209 sq_stats = &c->sq[j].stats;
f62b8bb8 210
faf4478b
GP
211 s->tx_packets += sq_stats->packets;
212 s->tx_bytes += sq_stats->bytes;
bfe6d8d1
GP
213 s->tx_tso_packets += sq_stats->tso_packets;
214 s->tx_tso_bytes += sq_stats->tso_bytes;
215 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
216 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
f62b8bb8
AV
217 s->tx_queue_stopped += sq_stats->stopped;
218 s->tx_queue_wake += sq_stats->wake;
219 s->tx_queue_dropped += sq_stats->dropped;
c8cf78fe 220 s->tx_xmit_more += sq_stats->xmit_more;
bfe6d8d1
GP
221 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
222 tx_offload_none += sq_stats->csum_none;
f62b8bb8
AV
223 }
224 }
225
9218b44d 226 /* Update calculated offload counters */
bfe6d8d1
GP
227 s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
228 s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
121fcdc8 229
bfe6d8d1 230 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
121fcdc8
GP
231 priv->stats.pport.phy_counters,
232 counter_set.phys_layer_cntrs.link_down_events);
9218b44d
GP
233}
234
235static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
236{
237 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
238 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
c4f287c4 239 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
9218b44d
GP
240 struct mlx5_core_dev *mdev = priv->mdev;
241
f62b8bb8
AV
242 MLX5_SET(query_vport_counter_in, in, opcode,
243 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
244 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
245 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
246
247 memset(out, 0, outlen);
9218b44d
GP
248 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
249}
250
251static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
252{
253 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
254 struct mlx5_core_dev *mdev = priv->mdev;
255 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
cf678570 256 int prio;
9218b44d
GP
257 void *out;
258 u32 *in;
259
260 in = mlx5_vzalloc(sz);
261 if (!in)
f62b8bb8
AV
262 goto free_out;
263
9218b44d 264 MLX5_SET(ppcnt_reg, in, local_port, 1);
f62b8bb8 265
9218b44d
GP
266 out = pstats->IEEE_802_3_counters;
267 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
268 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
f62b8bb8 269
9218b44d
GP
270 out = pstats->RFC_2863_counters;
271 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
272 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
273
274 out = pstats->RFC_2819_counters;
275 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
276 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
593cf338 277
121fcdc8
GP
278 out = pstats->phy_counters;
279 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
280 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
281
5db0a4f6
GP
282 if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
283 out = pstats->phy_statistical_counters;
284 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
285 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
286 }
287
cf678570
GP
288 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
289 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
290 out = pstats->per_prio_counters[prio];
291 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
292 mlx5_core_access_reg(mdev, in, sz, out, sz,
293 MLX5_REG_PPCNT, 0, 0);
294 }
295
f62b8bb8 296free_out:
9218b44d
GP
297 kvfree(in);
298}
299
300static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
301{
302 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
303
304 if (!priv->q_counter)
305 return;
306
307 mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
308 &qcnt->rx_out_of_buffer);
309}
310
0f7f3481
GP
311static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
312{
313 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
314 struct mlx5_core_dev *mdev = priv->mdev;
315 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
316 void *out;
317 u32 *in;
318
319 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
320 return;
321
322 in = mlx5_vzalloc(sz);
323 if (!in)
324 return;
325
326 out = pcie_stats->pcie_perf_counters;
327 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
328 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
329
330 kvfree(in);
331}
332
9218b44d
GP
333void mlx5e_update_stats(struct mlx5e_priv *priv)
334{
3dd69e3d 335 mlx5e_update_pcie_counters(priv);
9218b44d 336 mlx5e_update_pport_counters(priv);
3dd69e3d
SM
337 mlx5e_update_vport_counters(priv);
338 mlx5e_update_q_counter(priv);
121fcdc8 339 mlx5e_update_sw_counters(priv);
f62b8bb8
AV
340}
341
cb67b832 342void mlx5e_update_stats_work(struct work_struct *work)
f62b8bb8
AV
343{
344 struct delayed_work *dwork = to_delayed_work(work);
345 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
346 update_stats_work);
347 mutex_lock(&priv->state_lock);
348 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6bfd390b 349 priv->profile->update_stats(priv);
7bb29755
MF
350 queue_delayed_work(priv->wq, dwork,
351 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
f62b8bb8
AV
352 }
353 mutex_unlock(&priv->state_lock);
354}
355
daa21560
TT
356static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
357 enum mlx5_dev_event event, unsigned long param)
f62b8bb8 358{
daa21560 359 struct mlx5e_priv *priv = vpriv;
ee7f1220
EE
360 struct ptp_clock_event ptp_event;
361 struct mlx5_eqe *eqe = NULL;
daa21560 362
e0f46eb9 363 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
daa21560
TT
364 return;
365
f62b8bb8
AV
366 switch (event) {
367 case MLX5_DEV_EVENT_PORT_UP:
368 case MLX5_DEV_EVENT_PORT_DOWN:
7bb29755 369 queue_work(priv->wq, &priv->update_carrier_work);
f62b8bb8 370 break;
ee7f1220
EE
371 case MLX5_DEV_EVENT_PPS:
372 eqe = (struct mlx5_eqe *)param;
373 ptp_event.type = PTP_CLOCK_EXTTS;
374 ptp_event.index = eqe->data.pps.pin;
375 ptp_event.timestamp =
376 timecounter_cyc2time(&priv->tstamp.clock,
377 be64_to_cpu(eqe->data.pps.time_stamp));
378 mlx5e_pps_event_handler(vpriv, &ptp_event);
379 break;
f62b8bb8
AV
380 default:
381 break;
382 }
383}
384
f62b8bb8
AV
385static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
386{
e0f46eb9 387 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
f62b8bb8
AV
388}
389
390static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
391{
e0f46eb9 392 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
daa21560 393 synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
f62b8bb8
AV
394}
395
7e426671
TT
396static inline int mlx5e_get_wqe_mtt_sz(void)
397{
398 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
399 * To avoid copying garbage after the mtt array, we allocate
400 * a little more.
401 */
402 return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
403 MLX5_UMR_MTT_ALIGNMENT);
404}
405
31391048
SM
406static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
407 struct mlx5e_icosq *sq,
408 struct mlx5e_umr_wqe *wqe,
409 u16 ix)
7e426671
TT
410{
411 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
412 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
413 struct mlx5_wqe_data_seg *dseg = &wqe->data;
21c59685 414 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
7e426671
TT
415 u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
416 u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
417
418 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
419 ds_cnt);
420 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
421 cseg->imm = rq->mkey_be;
422
423 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
31616255 424 ucseg->xlt_octowords =
7e426671
TT
425 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
426 ucseg->bsf_octowords =
427 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
428 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
429
430 dseg->lkey = sq->mkey_be;
431 dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
432}
433
434static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
435 struct mlx5e_channel *c)
436{
437 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
438 int mtt_sz = mlx5e_get_wqe_mtt_sz();
439 int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
440 int i;
441
21c59685
SM
442 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
443 GFP_KERNEL, cpu_to_node(c->cpu));
444 if (!rq->mpwqe.info)
7e426671
TT
445 goto err_out;
446
447 /* We allocate more than mtt_sz as we will align the pointer */
21c59685 448 rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
7e426671 449 cpu_to_node(c->cpu));
21c59685 450 if (unlikely(!rq->mpwqe.mtt_no_align))
7e426671
TT
451 goto err_free_wqe_info;
452
453 for (i = 0; i < wq_sz; i++) {
21c59685 454 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671 455
21c59685 456 wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
7e426671
TT
457 MLX5_UMR_ALIGN);
458 wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
459 PCI_DMA_TODEVICE);
460 if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
461 goto err_unmap_mtts;
462
463 mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
464 }
465
466 return 0;
467
468err_unmap_mtts:
469 while (--i >= 0) {
21c59685 470 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671
TT
471
472 dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
473 PCI_DMA_TODEVICE);
474 }
21c59685 475 kfree(rq->mpwqe.mtt_no_align);
7e426671 476err_free_wqe_info:
21c59685 477 kfree(rq->mpwqe.info);
7e426671
TT
478
479err_out:
480 return -ENOMEM;
481}
482
483static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
484{
485 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
486 int mtt_sz = mlx5e_get_wqe_mtt_sz();
487 int i;
488
489 for (i = 0; i < wq_sz; i++) {
21c59685 490 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671
TT
491
492 dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
493 PCI_DMA_TODEVICE);
494 }
21c59685
SM
495 kfree(rq->mpwqe.mtt_no_align);
496 kfree(rq->mpwqe.info);
7e426671
TT
497}
498
ec8b9981
TT
499static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv,
500 u64 npages, u8 page_shift,
501 struct mlx5_core_mkey *umr_mkey)
3608ae77
TT
502{
503 struct mlx5_core_dev *mdev = priv->mdev;
3608ae77
TT
504 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
505 void *mkc;
506 u32 *in;
507 int err;
508
ec8b9981
TT
509 if (!MLX5E_VALID_NUM_MTTS(npages))
510 return -EINVAL;
511
3608ae77
TT
512 in = mlx5_vzalloc(inlen);
513 if (!in)
514 return -ENOMEM;
515
516 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
517
3608ae77
TT
518 MLX5_SET(mkc, mkc, free, 1);
519 MLX5_SET(mkc, mkc, umr_en, 1);
520 MLX5_SET(mkc, mkc, lw, 1);
521 MLX5_SET(mkc, mkc, lr, 1);
522 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
523
524 MLX5_SET(mkc, mkc, qpn, 0xffffff);
525 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
ec8b9981 526 MLX5_SET64(mkc, mkc, len, npages << page_shift);
3608ae77
TT
527 MLX5_SET(mkc, mkc, translations_octword_size,
528 MLX5_MTT_OCTW(npages));
ec8b9981 529 MLX5_SET(mkc, mkc, log_page_size, page_shift);
3608ae77 530
ec8b9981 531 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
3608ae77
TT
532
533 kvfree(in);
534 return err;
535}
536
ec8b9981
TT
537static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq *rq)
538{
539 struct mlx5e_priv *priv = rq->priv;
540 u64 num_mtts = MLX5E_REQUIRED_MTTS(BIT(priv->params.log_rq_size));
541
542 return mlx5e_create_umr_mkey(priv, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
543}
544
3b77235b
SM
545static int mlx5e_alloc_rq(struct mlx5e_channel *c,
546 struct mlx5e_rq_param *param,
547 struct mlx5e_rq *rq)
f62b8bb8
AV
548{
549 struct mlx5e_priv *priv = c->priv;
550 struct mlx5_core_dev *mdev = priv->mdev;
551 void *rqc = param->rqc;
552 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
461017cb 553 u32 byte_count;
1bfecfca
SM
554 u32 frag_sz;
555 int npages;
f62b8bb8
AV
556 int wq_sz;
557 int err;
558 int i;
559
311c7c71
SM
560 param->wq.db_numa_node = cpu_to_node(c->cpu);
561
f62b8bb8
AV
562 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
563 &rq->wq_ctrl);
564 if (err)
565 return err;
566
567 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
568
569 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
f62b8bb8 570
7e426671
TT
571 rq->wq_type = priv->params.rq_wq_type;
572 rq->pdev = c->pdev;
573 rq->netdev = c->netdev;
574 rq->tstamp = &priv->tstamp;
575 rq->channel = c;
576 rq->ix = c->ix;
577 rq->priv = c->priv;
97bc402d
DB
578
579 rq->xdp_prog = priv->xdp_prog ? bpf_prog_inc(priv->xdp_prog) : NULL;
580 if (IS_ERR(rq->xdp_prog)) {
581 err = PTR_ERR(rq->xdp_prog);
582 rq->xdp_prog = NULL;
583 goto err_rq_wq_destroy;
584 }
7e426671 585
d8bec2b2 586 if (rq->xdp_prog) {
b5503b99 587 rq->buff.map_dir = DMA_BIDIRECTIONAL;
d8bec2b2
MKL
588 rq->rx_headroom = XDP_PACKET_HEADROOM;
589 } else {
590 rq->buff.map_dir = DMA_FROM_DEVICE;
591 rq->rx_headroom = MLX5_RX_HEADROOM;
592 }
b5503b99 593
461017cb
TT
594 switch (priv->params.rq_wq_type) {
595 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
f5f82476
OG
596 if (mlx5e_is_vf_vport_rep(priv)) {
597 err = -EINVAL;
598 goto err_rq_wq_destroy;
599 }
600
461017cb
TT
601 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
602 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
6cd392a0 603 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
461017cb 604
d9d9f156
TT
605 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
606 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
1bfecfca
SM
607
608 rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
609 byte_count = rq->buff.wqe_sz;
ec8b9981
TT
610
611 err = mlx5e_create_rq_umr_mkey(rq);
7e426671
TT
612 if (err)
613 goto err_rq_wq_destroy;
ec8b9981
TT
614 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
615
616 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
617 if (err)
618 goto err_destroy_umr_mkey;
461017cb
TT
619 break;
620 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1bfecfca
SM
621 rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
622 GFP_KERNEL, cpu_to_node(c->cpu));
623 if (!rq->dma_info) {
461017cb
TT
624 err = -ENOMEM;
625 goto err_rq_wq_destroy;
626 }
1bfecfca 627
f5f82476
OG
628 if (mlx5e_is_vf_vport_rep(priv))
629 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
630 else
631 rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
632
461017cb 633 rq->alloc_wqe = mlx5e_alloc_rx_wqe;
6cd392a0 634 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
461017cb 635
1bfecfca 636 rq->buff.wqe_sz = (priv->params.lro_en) ?
461017cb
TT
637 priv->params.lro_wqe_sz :
638 MLX5E_SW2HW_MTU(priv->netdev->mtu);
1bfecfca
SM
639 byte_count = rq->buff.wqe_sz;
640
641 /* calc the required page order */
d8bec2b2 642 frag_sz = rq->rx_headroom +
1bfecfca
SM
643 byte_count /* packet data */ +
644 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
645 frag_sz = SKB_DATA_ALIGN(frag_sz);
646
647 npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
648 rq->buff.page_order = order_base_2(npages);
649
461017cb 650 byte_count |= MLX5_HW_START_PADDING;
7e426671 651 rq->mkey_be = c->mkey_be;
461017cb 652 }
f62b8bb8
AV
653
654 for (i = 0; i < wq_sz; i++) {
655 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
656
461017cb 657 wqe->data.byte_count = cpu_to_be32(byte_count);
7e426671 658 wqe->data.lkey = rq->mkey_be;
f62b8bb8
AV
659 }
660
cb3c7fd4
GR
661 INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
662 rq->am.mode = priv->params.rx_cq_period_mode;
663
4415a031
TT
664 rq->page_cache.head = 0;
665 rq->page_cache.tail = 0;
666
f62b8bb8
AV
667 return 0;
668
ec8b9981
TT
669err_destroy_umr_mkey:
670 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
671
f62b8bb8 672err_rq_wq_destroy:
97bc402d
DB
673 if (rq->xdp_prog)
674 bpf_prog_put(rq->xdp_prog);
f62b8bb8
AV
675 mlx5_wq_destroy(&rq->wq_ctrl);
676
677 return err;
678}
679
3b77235b 680static void mlx5e_free_rq(struct mlx5e_rq *rq)
f62b8bb8 681{
4415a031
TT
682 int i;
683
86994156
RS
684 if (rq->xdp_prog)
685 bpf_prog_put(rq->xdp_prog);
686
461017cb
TT
687 switch (rq->wq_type) {
688 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
7e426671 689 mlx5e_rq_free_mpwqe_info(rq);
ec8b9981 690 mlx5_core_destroy_mkey(rq->priv->mdev, &rq->umr_mkey);
461017cb
TT
691 break;
692 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1bfecfca 693 kfree(rq->dma_info);
461017cb
TT
694 }
695
4415a031
TT
696 for (i = rq->page_cache.head; i != rq->page_cache.tail;
697 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
698 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
699
700 mlx5e_page_release(rq, dma_info, false);
701 }
f62b8bb8
AV
702 mlx5_wq_destroy(&rq->wq_ctrl);
703}
704
3b77235b 705static int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
f62b8bb8 706{
50cfa25a 707 struct mlx5e_priv *priv = rq->priv;
f62b8bb8
AV
708 struct mlx5_core_dev *mdev = priv->mdev;
709
710 void *in;
711 void *rqc;
712 void *wq;
713 int inlen;
714 int err;
715
716 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
717 sizeof(u64) * rq->wq_ctrl.buf.npages;
718 in = mlx5_vzalloc(inlen);
719 if (!in)
720 return -ENOMEM;
721
722 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
723 wq = MLX5_ADDR_OF(rqc, rqc, wq);
724
725 memcpy(rqc, param->rqc, sizeof(param->rqc));
726
97de9f31 727 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
f62b8bb8 728 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
36350114 729 MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
f62b8bb8 730 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
68cdf5d6 731 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
732 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
733
734 mlx5_fill_page_array(&rq->wq_ctrl.buf,
735 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
736
7db22ffb 737 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
f62b8bb8
AV
738
739 kvfree(in);
740
741 return err;
742}
743
36350114
GP
744static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
745 int next_state)
f62b8bb8
AV
746{
747 struct mlx5e_channel *c = rq->channel;
748 struct mlx5e_priv *priv = c->priv;
749 struct mlx5_core_dev *mdev = priv->mdev;
750
751 void *in;
752 void *rqc;
753 int inlen;
754 int err;
755
756 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
757 in = mlx5_vzalloc(inlen);
758 if (!in)
759 return -ENOMEM;
760
761 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
762
763 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
764 MLX5_SET(rqc, rqc, state, next_state);
765
7db22ffb 766 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
f62b8bb8
AV
767
768 kvfree(in);
769
770 return err;
771}
772
36350114
GP
773static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
774{
775 struct mlx5e_channel *c = rq->channel;
776 struct mlx5e_priv *priv = c->priv;
777 struct mlx5_core_dev *mdev = priv->mdev;
778
779 void *in;
780 void *rqc;
781 int inlen;
782 int err;
783
784 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
785 in = mlx5_vzalloc(inlen);
786 if (!in)
787 return -ENOMEM;
788
789 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
790
791 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
83b502a1
AV
792 MLX5_SET64(modify_rq_in, in, modify_bitmask,
793 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
36350114
GP
794 MLX5_SET(rqc, rqc, vsd, vsd);
795 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
796
797 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
798
799 kvfree(in);
800
801 return err;
802}
803
3b77235b 804static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
f62b8bb8 805{
50cfa25a 806 mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
f62b8bb8
AV
807}
808
809static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
810{
01c196a2 811 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
f62b8bb8
AV
812 struct mlx5e_channel *c = rq->channel;
813 struct mlx5e_priv *priv = c->priv;
814 struct mlx5_wq_ll *wq = &rq->wq;
f62b8bb8 815
01c196a2 816 while (time_before(jiffies, exp_time)) {
f62b8bb8
AV
817 if (wq->cur_sz >= priv->params.min_rx_wqes)
818 return 0;
819
820 msleep(20);
821 }
822
823 return -ETIMEDOUT;
824}
825
f2fde18c
SM
826static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
827{
828 struct mlx5_wq_ll *wq = &rq->wq;
829 struct mlx5e_rx_wqe *wqe;
830 __be16 wqe_ix_be;
831 u16 wqe_ix;
832
8484f9ed
SM
833 /* UMR WQE (if in progress) is always at wq->head */
834 if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
21c59685 835 mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
8484f9ed 836
f2fde18c
SM
837 while (!mlx5_wq_ll_is_empty(wq)) {
838 wqe_ix_be = *wq->tail_next;
839 wqe_ix = be16_to_cpu(wqe_ix_be);
840 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
841 rq->dealloc_wqe(rq, wqe_ix);
842 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
843 &wqe->next.next_wqe_index);
844 }
845}
846
f62b8bb8
AV
847static int mlx5e_open_rq(struct mlx5e_channel *c,
848 struct mlx5e_rq_param *param,
849 struct mlx5e_rq *rq)
850{
31391048 851 struct mlx5e_icosq *sq = &c->icosq;
d3c9bc27 852 u16 pi = sq->pc & sq->wq.sz_m1;
864b2d71 853 struct mlx5e_tx_wqe *nopwqe;
f62b8bb8
AV
854 int err;
855
3b77235b 856 err = mlx5e_alloc_rq(c, param, rq);
f62b8bb8
AV
857 if (err)
858 return err;
859
3b77235b 860 err = mlx5e_create_rq(rq, param);
f62b8bb8 861 if (err)
3b77235b 862 goto err_free_rq;
f62b8bb8 863
c0f1147d 864 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
36350114 865 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
f62b8bb8 866 if (err)
3b77235b 867 goto err_destroy_rq;
f62b8bb8 868
cb3c7fd4
GR
869 if (param->am_enabled)
870 set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
871
f10b7cc7
SM
872 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
873 sq->db.ico_wqe[pi].num_wqebbs = 1;
864b2d71
SM
874 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
875 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
f62b8bb8
AV
876 return 0;
877
f62b8bb8 878err_destroy_rq:
3b77235b 879 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
f62b8bb8 880 mlx5e_destroy_rq(rq);
3b77235b
SM
881err_free_rq:
882 mlx5e_free_rq(rq);
f62b8bb8
AV
883
884 return err;
885}
886
887static void mlx5e_close_rq(struct mlx5e_rq *rq)
888{
c0f1147d 889 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
f62b8bb8 890 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
cb3c7fd4
GR
891 cancel_work_sync(&rq->am.work);
892
f62b8bb8 893 mlx5e_destroy_rq(rq);
3b77235b
SM
894 mlx5e_free_rx_descs(rq);
895 mlx5e_free_rq(rq);
f62b8bb8
AV
896}
897
31391048 898static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
b5503b99 899{
31391048 900 kfree(sq->db.di);
b5503b99
SM
901}
902
31391048 903static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
b5503b99
SM
904{
905 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
906
31391048 907 sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
b5503b99 908 GFP_KERNEL, numa);
31391048
SM
909 if (!sq->db.di) {
910 mlx5e_free_xdpsq_db(sq);
b5503b99
SM
911 return -ENOMEM;
912 }
913
914 return 0;
915}
916
31391048
SM
917static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
918 struct mlx5e_sq_param *param,
919 struct mlx5e_xdpsq *sq)
920{
921 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
922 struct mlx5e_priv *priv = c->priv;
923 struct mlx5_core_dev *mdev = priv->mdev;
924 int err;
925
926 sq->pdev = c->pdev;
927 sq->mkey_be = c->mkey_be;
928 sq->channel = c;
929 sq->uar_map = mdev->mlx5e_res.bfreg.map;
930 sq->min_inline_mode = param->min_inline_mode;
931
932 param->wq.db_numa_node = cpu_to_node(c->cpu);
933 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
934 if (err)
935 return err;
936 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
937
938 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
939 if (err)
940 goto err_sq_wq_destroy;
941
942 return 0;
943
944err_sq_wq_destroy:
945 mlx5_wq_destroy(&sq->wq_ctrl);
946
947 return err;
948}
949
950static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
951{
952 mlx5e_free_xdpsq_db(sq);
953 mlx5_wq_destroy(&sq->wq_ctrl);
954}
955
956static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
f62b8bb8 957{
f10b7cc7 958 kfree(sq->db.ico_wqe);
f62b8bb8
AV
959}
960
31391048 961static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
f10b7cc7
SM
962{
963 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
964
965 sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
966 GFP_KERNEL, numa);
967 if (!sq->db.ico_wqe)
968 return -ENOMEM;
969
970 return 0;
971}
972
31391048
SM
973static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
974 int tc,
975 struct mlx5e_sq_param *param,
976 struct mlx5e_icosq *sq)
f10b7cc7 977{
31391048
SM
978 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
979 struct mlx5e_priv *priv = c->priv;
980 struct mlx5_core_dev *mdev = priv->mdev;
981 int err;
f10b7cc7 982
31391048
SM
983 sq->pdev = c->pdev;
984 sq->mkey_be = c->mkey_be;
985 sq->channel = c;
986 sq->uar_map = mdev->mlx5e_res.bfreg.map;
f62b8bb8 987
31391048
SM
988 param->wq.db_numa_node = cpu_to_node(c->cpu);
989 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
990 if (err)
991 return err;
992 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
f62b8bb8 993
31391048
SM
994 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
995 if (err)
996 goto err_sq_wq_destroy;
997
998 sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
f62b8bb8
AV
999
1000 return 0;
31391048
SM
1001
1002err_sq_wq_destroy:
1003 mlx5_wq_destroy(&sq->wq_ctrl);
1004
1005 return err;
f62b8bb8
AV
1006}
1007
31391048 1008static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
f10b7cc7 1009{
31391048
SM
1010 mlx5e_free_icosq_db(sq);
1011 mlx5_wq_destroy(&sq->wq_ctrl);
f10b7cc7
SM
1012}
1013
31391048 1014static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
f10b7cc7 1015{
31391048
SM
1016 kfree(sq->db.wqe_info);
1017 kfree(sq->db.dma_fifo);
1018 kfree(sq->db.skb);
f10b7cc7
SM
1019}
1020
31391048 1021static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
b5503b99 1022{
31391048
SM
1023 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1024 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1025
1026 sq->db.skb = kzalloc_node(wq_sz * sizeof(*sq->db.skb),
1027 GFP_KERNEL, numa);
1028 sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
1029 GFP_KERNEL, numa);
1030 sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
1031 GFP_KERNEL, numa);
1032 if (!sq->db.skb || !sq->db.dma_fifo || !sq->db.wqe_info) {
1033 mlx5e_free_txqsq_db(sq);
1034 return -ENOMEM;
b5503b99 1035 }
31391048
SM
1036
1037 sq->dma_fifo_mask = df_sz - 1;
1038
1039 return 0;
b5503b99
SM
1040}
1041
31391048
SM
1042static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1043 int tc,
1044 struct mlx5e_sq_param *param,
1045 struct mlx5e_txqsq *sq)
f62b8bb8 1046{
31391048
SM
1047 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1048 struct mlx5e_priv *priv = c->priv;
f62b8bb8 1049 struct mlx5_core_dev *mdev = priv->mdev;
31391048 1050 int txq_ix;
f62b8bb8
AV
1051 int err;
1052
f10b7cc7
SM
1053 sq->pdev = c->pdev;
1054 sq->tstamp = &priv->tstamp;
1055 sq->mkey_be = c->mkey_be;
1056 sq->channel = c;
1057 sq->tc = tc;
aff26157 1058 sq->uar_map = mdev->mlx5e_res.bfreg.map;
31391048
SM
1059 sq->max_inline = param->max_inline;
1060 sq->min_inline_mode = param->min_inline_mode;
f10b7cc7 1061
311c7c71 1062 param->wq.db_numa_node = cpu_to_node(c->cpu);
31391048 1063 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
f62b8bb8 1064 if (err)
aff26157 1065 return err;
31391048 1066 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
f62b8bb8 1067
31391048 1068 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
7ec0bb22 1069 if (err)
f62b8bb8
AV
1070 goto err_sq_wq_destroy;
1071
ff9c852f 1072 txq_ix = c->ix + tc * priv->channels.num;
31391048
SM
1073 sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
1074 priv->txq_to_sq_map[txq_ix] = sq;
f62b8bb8 1075
31391048 1076 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
f62b8bb8
AV
1077
1078 return 0;
1079
1080err_sq_wq_destroy:
1081 mlx5_wq_destroy(&sq->wq_ctrl);
1082
f62b8bb8
AV
1083 return err;
1084}
1085
31391048 1086static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1087{
31391048 1088 mlx5e_free_txqsq_db(sq);
f62b8bb8 1089 mlx5_wq_destroy(&sq->wq_ctrl);
f62b8bb8
AV
1090}
1091
33ad9711
SM
1092struct mlx5e_create_sq_param {
1093 struct mlx5_wq_ctrl *wq_ctrl;
1094 u32 cqn;
1095 u32 tisn;
1096 u8 tis_lst_sz;
1097 u8 min_inline_mode;
1098};
1099
1100static int mlx5e_create_sq(struct mlx5e_priv *priv,
1101 struct mlx5e_sq_param *param,
1102 struct mlx5e_create_sq_param *csp,
1103 u32 *sqn)
f62b8bb8 1104{
f62b8bb8
AV
1105 struct mlx5_core_dev *mdev = priv->mdev;
1106
1107 void *in;
1108 void *sqc;
1109 void *wq;
1110 int inlen;
1111 int err;
1112
1113 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
33ad9711 1114 sizeof(u64) * csp->wq_ctrl->buf.npages;
f62b8bb8
AV
1115 in = mlx5_vzalloc(inlen);
1116 if (!in)
1117 return -ENOMEM;
1118
1119 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1120 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1121
1122 memcpy(sqc, param->sqc, sizeof(param->sqc));
33ad9711
SM
1123 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1124 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1125 MLX5_SET(sqc, sqc, cqn, csp->cqn);
a6f402e4
SM
1126
1127 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
33ad9711 1128 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
a6f402e4 1129
33ad9711 1130 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
f62b8bb8
AV
1131
1132 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
33ad9711
SM
1133 MLX5_SET(wq, wq, uar_page, priv->mdev->mlx5e_res.bfreg.index);
1134 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
68cdf5d6 1135 MLX5_ADAPTER_PAGE_SHIFT);
33ad9711 1136 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
f62b8bb8 1137
33ad9711 1138 mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
f62b8bb8 1139
33ad9711 1140 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
f62b8bb8
AV
1141
1142 kvfree(in);
1143
1144 return err;
1145}
1146
33ad9711
SM
1147struct mlx5e_modify_sq_param {
1148 int curr_state;
1149 int next_state;
1150 bool rl_update;
1151 int rl_index;
1152};
1153
1154static int mlx5e_modify_sq(struct mlx5e_priv *priv,
1155 u32 sqn,
1156 struct mlx5e_modify_sq_param *p)
f62b8bb8 1157{
f62b8bb8
AV
1158 struct mlx5_core_dev *mdev = priv->mdev;
1159
1160 void *in;
1161 void *sqc;
1162 int inlen;
1163 int err;
1164
1165 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1166 in = mlx5_vzalloc(inlen);
1167 if (!in)
1168 return -ENOMEM;
1169
1170 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1171
33ad9711
SM
1172 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1173 MLX5_SET(sqc, sqc, state, p->next_state);
1174 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
507f0c81 1175 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
33ad9711 1176 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
507f0c81 1177 }
f62b8bb8 1178
33ad9711 1179 err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
f62b8bb8
AV
1180
1181 kvfree(in);
1182
1183 return err;
1184}
1185
33ad9711
SM
1186static void mlx5e_destroy_sq(struct mlx5e_priv *priv, u32 sqn)
1187{
1188 mlx5_core_destroy_sq(priv->mdev, sqn);
f62b8bb8
AV
1189}
1190
31391048
SM
1191static int mlx5e_create_sq_rdy(struct mlx5e_priv *priv,
1192 struct mlx5e_sq_param *param,
1193 struct mlx5e_create_sq_param *csp,
1194 u32 *sqn)
f62b8bb8 1195{
33ad9711 1196 struct mlx5e_modify_sq_param msp = {0};
31391048
SM
1197 int err;
1198
1199 err = mlx5e_create_sq(priv, param, csp, sqn);
1200 if (err)
1201 return err;
1202
1203 msp.curr_state = MLX5_SQC_STATE_RST;
1204 msp.next_state = MLX5_SQC_STATE_RDY;
1205 err = mlx5e_modify_sq(priv, *sqn, &msp);
1206 if (err)
1207 mlx5e_destroy_sq(priv, *sqn);
1208
1209 return err;
1210}
1211
7f859ecf
SM
1212static int mlx5e_set_sq_maxrate(struct net_device *dev,
1213 struct mlx5e_txqsq *sq, u32 rate);
1214
31391048
SM
1215static int mlx5e_open_txqsq(struct mlx5e_channel *c,
1216 int tc,
1217 struct mlx5e_sq_param *param,
1218 struct mlx5e_txqsq *sq)
1219{
1220 struct mlx5e_create_sq_param csp = {};
33ad9711 1221 struct mlx5e_priv *priv = c->priv;
7f859ecf
SM
1222 u32 tx_rate;
1223 int txq_ix;
f62b8bb8
AV
1224 int err;
1225
31391048 1226 err = mlx5e_alloc_txqsq(c, tc, param, sq);
f62b8bb8
AV
1227 if (err)
1228 return err;
1229
31391048
SM
1230 csp.tisn = priv->tisn[sq->tc];
1231 csp.tis_lst_sz = 1;
33ad9711
SM
1232 csp.cqn = sq->cq.mcq.cqn;
1233 csp.wq_ctrl = &sq->wq_ctrl;
1234 csp.min_inline_mode = sq->min_inline_mode;
c0f1147d 1235 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
31391048 1236 err = mlx5e_create_sq_rdy(c->priv, param, &csp, &sq->sqn);
f62b8bb8 1237 if (err)
31391048 1238 goto err_free_txqsq;
f62b8bb8 1239
ff9c852f 1240 txq_ix = c->ix + tc * priv->channels.num;
7f859ecf
SM
1241 tx_rate = priv->tx_rates[txq_ix];
1242 if (tx_rate)
1243 mlx5e_set_sq_maxrate(priv->netdev, sq, tx_rate);
1244
31391048
SM
1245 netdev_tx_reset_queue(sq->txq);
1246 netif_tx_start_queue(sq->txq);
f62b8bb8
AV
1247 return 0;
1248
31391048 1249err_free_txqsq:
3b77235b 1250 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
31391048 1251 mlx5e_free_txqsq(sq);
f62b8bb8
AV
1252
1253 return err;
1254}
1255
1256static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1257{
1258 __netif_tx_lock_bh(txq);
1259 netif_tx_stop_queue(txq);
1260 __netif_tx_unlock_bh(txq);
1261}
1262
31391048 1263static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1264{
33ad9711
SM
1265 struct mlx5e_channel *c = sq->channel;
1266 struct mlx5e_priv *priv = c->priv;
1267 struct mlx5_core_dev *mdev = priv->mdev;
1268
c0f1147d 1269 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
6e8dd6d6 1270 /* prevent netif_tx_wake_queue */
33ad9711 1271 napi_synchronize(&c->napi);
29429f33 1272
31391048 1273 netif_tx_disable_queue(sq->txq);
f62b8bb8 1274
31391048
SM
1275 /* last doorbell out, godspeed .. */
1276 if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
1277 struct mlx5e_tx_wqe *nop;
864b2d71 1278
31391048
SM
1279 sq->db.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
1280 nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
1281 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
29429f33 1282 }
f62b8bb8 1283
33ad9711
SM
1284 mlx5e_destroy_sq(priv, sq->sqn);
1285 if (sq->rate_limit)
1286 mlx5_rl_remove_rate(mdev, sq->rate_limit);
31391048
SM
1287 mlx5e_free_txqsq_descs(sq);
1288 mlx5e_free_txqsq(sq);
1289}
1290
1291static int mlx5e_open_icosq(struct mlx5e_channel *c,
1292 int tc,
1293 struct mlx5e_sq_param *param,
1294 struct mlx5e_icosq *sq)
1295{
1296 struct mlx5e_create_sq_param csp = {};
1297 int err;
1298
1299 err = mlx5e_alloc_icosq(c, tc, param, sq);
1300 if (err)
1301 return err;
1302
1303 csp.cqn = sq->cq.mcq.cqn;
1304 csp.wq_ctrl = &sq->wq_ctrl;
1305 csp.min_inline_mode = param->min_inline_mode;
1306 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1307 err = mlx5e_create_sq_rdy(c->priv, param, &csp, &sq->sqn);
1308 if (err)
1309 goto err_free_icosq;
1310
1311 return 0;
1312
1313err_free_icosq:
1314 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1315 mlx5e_free_icosq(sq);
1316
1317 return err;
1318}
1319
1320static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1321{
1322 struct mlx5e_channel *c = sq->channel;
1323
1324 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1325 napi_synchronize(&c->napi);
1326
1327 mlx5e_destroy_sq(c->priv, sq->sqn);
1328 mlx5e_free_icosq(sq);
1329}
1330
1331static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
1332 struct mlx5e_sq_param *param,
1333 struct mlx5e_xdpsq *sq)
1334{
1335 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1336 struct mlx5e_create_sq_param csp = {};
1337 struct mlx5e_priv *priv = c->priv;
1338 unsigned int inline_hdr_sz = 0;
1339 int err;
1340 int i;
1341
1342 err = mlx5e_alloc_xdpsq(c, param, sq);
1343 if (err)
1344 return err;
1345
1346 csp.tis_lst_sz = 1;
1347 csp.tisn = priv->tisn[0]; /* tc = 0 */
1348 csp.cqn = sq->cq.mcq.cqn;
1349 csp.wq_ctrl = &sq->wq_ctrl;
1350 csp.min_inline_mode = sq->min_inline_mode;
1351 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1352 err = mlx5e_create_sq_rdy(c->priv, param, &csp, &sq->sqn);
1353 if (err)
1354 goto err_free_xdpsq;
1355
1356 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1357 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1358 ds_cnt++;
1359 }
1360
1361 /* Pre initialize fixed WQE fields */
1362 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1363 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1364 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1365 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1366 struct mlx5_wqe_data_seg *dseg;
1367
1368 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1369 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1370
1371 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1372 dseg->lkey = sq->mkey_be;
1373 }
1374
1375 return 0;
1376
1377err_free_xdpsq:
1378 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1379 mlx5e_free_xdpsq(sq);
1380
1381 return err;
1382}
1383
1384static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1385{
1386 struct mlx5e_channel *c = sq->channel;
1387
1388 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1389 napi_synchronize(&c->napi);
1390
1391 mlx5e_destroy_sq(c->priv, sq->sqn);
1392 mlx5e_free_xdpsq_descs(sq);
1393 mlx5e_free_xdpsq(sq);
f62b8bb8
AV
1394}
1395
3b77235b
SM
1396static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1397 struct mlx5e_cq_param *param,
1398 struct mlx5e_cq *cq)
f62b8bb8
AV
1399{
1400 struct mlx5e_priv *priv = c->priv;
1401 struct mlx5_core_dev *mdev = priv->mdev;
1402 struct mlx5_core_cq *mcq = &cq->mcq;
1403 int eqn_not_used;
0b6e26ce 1404 unsigned int irqn;
f62b8bb8
AV
1405 int err;
1406 u32 i;
1407
311c7c71
SM
1408 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1409 param->wq.db_numa_node = cpu_to_node(c->cpu);
f62b8bb8
AV
1410 param->eq_ix = c->ix;
1411
1412 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1413 &cq->wq_ctrl);
1414 if (err)
1415 return err;
1416
1417 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1418
1419 cq->napi = &c->napi;
1420
1421 mcq->cqe_sz = 64;
1422 mcq->set_ci_db = cq->wq_ctrl.db.db;
1423 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1424 *mcq->set_ci_db = 0;
1425 *mcq->arm_db = 0;
1426 mcq->vector = param->eq_ix;
1427 mcq->comp = mlx5e_completion_event;
1428 mcq->event = mlx5e_cq_error_event;
1429 mcq->irqn = irqn;
f62b8bb8
AV
1430
1431 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1432 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1433
1434 cqe->op_own = 0xf1;
1435 }
1436
1437 cq->channel = c;
50cfa25a 1438 cq->priv = priv;
f62b8bb8
AV
1439
1440 return 0;
1441}
1442
3b77235b 1443static void mlx5e_free_cq(struct mlx5e_cq *cq)
f62b8bb8 1444{
1c1b5228 1445 mlx5_cqwq_destroy(&cq->wq_ctrl);
f62b8bb8
AV
1446}
1447
3b77235b 1448static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
f62b8bb8 1449{
50cfa25a 1450 struct mlx5e_priv *priv = cq->priv;
f62b8bb8
AV
1451 struct mlx5_core_dev *mdev = priv->mdev;
1452 struct mlx5_core_cq *mcq = &cq->mcq;
1453
1454 void *in;
1455 void *cqc;
1456 int inlen;
0b6e26ce 1457 unsigned int irqn_not_used;
f62b8bb8
AV
1458 int eqn;
1459 int err;
1460
1461 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1c1b5228 1462 sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
f62b8bb8
AV
1463 in = mlx5_vzalloc(inlen);
1464 if (!in)
1465 return -ENOMEM;
1466
1467 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1468
1469 memcpy(cqc, param->cqc, sizeof(param->cqc));
1470
1c1b5228
TT
1471 mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
1472 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
f62b8bb8
AV
1473
1474 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1475
9908aa29 1476 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
f62b8bb8 1477 MLX5_SET(cqc, cqc, c_eqn, eqn);
30aa60b3 1478 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
1c1b5228 1479 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
68cdf5d6 1480 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
1481 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1482
1483 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1484
1485 kvfree(in);
1486
1487 if (err)
1488 return err;
1489
1490 mlx5e_cq_arm(cq);
1491
1492 return 0;
1493}
1494
3b77235b 1495static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
f62b8bb8 1496{
50cfa25a 1497 struct mlx5e_priv *priv = cq->priv;
f62b8bb8
AV
1498 struct mlx5_core_dev *mdev = priv->mdev;
1499
1500 mlx5_core_destroy_cq(mdev, &cq->mcq);
1501}
1502
1503static int mlx5e_open_cq(struct mlx5e_channel *c,
1504 struct mlx5e_cq_param *param,
1505 struct mlx5e_cq *cq,
9908aa29 1506 struct mlx5e_cq_moder moderation)
f62b8bb8
AV
1507{
1508 int err;
1509 struct mlx5e_priv *priv = c->priv;
1510 struct mlx5_core_dev *mdev = priv->mdev;
1511
3b77235b 1512 err = mlx5e_alloc_cq(c, param, cq);
f62b8bb8
AV
1513 if (err)
1514 return err;
1515
3b77235b 1516 err = mlx5e_create_cq(cq, param);
f62b8bb8 1517 if (err)
3b77235b 1518 goto err_free_cq;
f62b8bb8 1519
7524a5d8
GP
1520 if (MLX5_CAP_GEN(mdev, cq_moderation))
1521 mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
9908aa29
TT
1522 moderation.usec,
1523 moderation.pkts);
f62b8bb8
AV
1524 return 0;
1525
3b77235b
SM
1526err_free_cq:
1527 mlx5e_free_cq(cq);
f62b8bb8
AV
1528
1529 return err;
1530}
1531
1532static void mlx5e_close_cq(struct mlx5e_cq *cq)
1533{
f62b8bb8 1534 mlx5e_destroy_cq(cq);
3b77235b 1535 mlx5e_free_cq(cq);
f62b8bb8
AV
1536}
1537
1538static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1539{
1540 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1541}
1542
1543static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1544 struct mlx5e_channel_param *cparam)
1545{
1546 struct mlx5e_priv *priv = c->priv;
1547 int err;
1548 int tc;
1549
1550 for (tc = 0; tc < c->num_tc; tc++) {
1551 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
9908aa29 1552 priv->params.tx_cq_moderation);
f62b8bb8
AV
1553 if (err)
1554 goto err_close_tx_cqs;
f62b8bb8
AV
1555 }
1556
1557 return 0;
1558
1559err_close_tx_cqs:
1560 for (tc--; tc >= 0; tc--)
1561 mlx5e_close_cq(&c->sq[tc].cq);
1562
1563 return err;
1564}
1565
1566static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1567{
1568 int tc;
1569
1570 for (tc = 0; tc < c->num_tc; tc++)
1571 mlx5e_close_cq(&c->sq[tc].cq);
1572}
1573
1574static int mlx5e_open_sqs(struct mlx5e_channel *c,
1575 struct mlx5e_channel_param *cparam)
1576{
1577 int err;
1578 int tc;
1579
1580 for (tc = 0; tc < c->num_tc; tc++) {
31391048 1581 err = mlx5e_open_txqsq(c, tc, &cparam->sq, &c->sq[tc]);
f62b8bb8
AV
1582 if (err)
1583 goto err_close_sqs;
1584 }
1585
1586 return 0;
1587
1588err_close_sqs:
1589 for (tc--; tc >= 0; tc--)
31391048 1590 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1591
1592 return err;
1593}
1594
1595static void mlx5e_close_sqs(struct mlx5e_channel *c)
1596{
1597 int tc;
1598
1599 for (tc = 0; tc < c->num_tc; tc++)
31391048 1600 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1601}
1602
5283af89 1603static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
03289b88
SM
1604{
1605 int i;
1606
6bfd390b 1607 for (i = 0; i < priv->profile->max_tc; i++)
ff9c852f 1608 priv->channeltc_to_txq_map[ix][i] = ix + i * priv->channels.num;
03289b88
SM
1609}
1610
507f0c81 1611static int mlx5e_set_sq_maxrate(struct net_device *dev,
31391048 1612 struct mlx5e_txqsq *sq, u32 rate)
507f0c81
YP
1613{
1614 struct mlx5e_priv *priv = netdev_priv(dev);
1615 struct mlx5_core_dev *mdev = priv->mdev;
33ad9711 1616 struct mlx5e_modify_sq_param msp = {0};
507f0c81
YP
1617 u16 rl_index = 0;
1618 int err;
1619
1620 if (rate == sq->rate_limit)
1621 /* nothing to do */
1622 return 0;
1623
1624 if (sq->rate_limit)
1625 /* remove current rl index to free space to next ones */
1626 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1627
1628 sq->rate_limit = 0;
1629
1630 if (rate) {
1631 err = mlx5_rl_add_rate(mdev, rate, &rl_index);
1632 if (err) {
1633 netdev_err(dev, "Failed configuring rate %u: %d\n",
1634 rate, err);
1635 return err;
1636 }
1637 }
1638
33ad9711
SM
1639 msp.curr_state = MLX5_SQC_STATE_RDY;
1640 msp.next_state = MLX5_SQC_STATE_RDY;
1641 msp.rl_index = rl_index;
1642 msp.rl_update = true;
1643 err = mlx5e_modify_sq(priv, sq->sqn, &msp);
507f0c81
YP
1644 if (err) {
1645 netdev_err(dev, "Failed configuring rate %u: %d\n",
1646 rate, err);
1647 /* remove the rate from the table */
1648 if (rate)
1649 mlx5_rl_remove_rate(mdev, rate);
1650 return err;
1651 }
1652
1653 sq->rate_limit = rate;
1654 return 0;
1655}
1656
1657static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1658{
1659 struct mlx5e_priv *priv = netdev_priv(dev);
1660 struct mlx5_core_dev *mdev = priv->mdev;
31391048 1661 struct mlx5e_txqsq *sq = priv->txq_to_sq_map[index];
507f0c81
YP
1662 int err = 0;
1663
1664 if (!mlx5_rl_is_supported(mdev)) {
1665 netdev_err(dev, "Rate limiting is not supported on this device\n");
1666 return -EINVAL;
1667 }
1668
1669 /* rate is given in Mb/sec, HW config is in Kb/sec */
1670 rate = rate << 10;
1671
1672 /* Check whether rate in valid range, 0 is always valid */
1673 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1674 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1675 return -ERANGE;
1676 }
1677
1678 mutex_lock(&priv->state_lock);
1679 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1680 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1681 if (!err)
1682 priv->tx_rates[index] = rate;
1683 mutex_unlock(&priv->state_lock);
1684
1685 return err;
1686}
1687
b4e029da
KH
1688static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
1689{
1690 return is_kdump_kernel() ?
1691 MLX5E_MIN_NUM_CHANNELS :
1692 min_t(int, mdev->priv.eq_table.num_comp_vectors,
1693 MLX5E_MAX_NUM_CHANNELS);
1694}
1695
f62b8bb8
AV
1696static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1697 struct mlx5e_channel_param *cparam,
1698 struct mlx5e_channel **cp)
1699{
9908aa29 1700 struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
f62b8bb8 1701 struct net_device *netdev = priv->netdev;
cb3c7fd4 1702 struct mlx5e_cq_moder rx_cq_profile;
f62b8bb8
AV
1703 int cpu = mlx5e_get_cpu(priv, ix);
1704 struct mlx5e_channel *c;
1705 int err;
1706
1707 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1708 if (!c)
1709 return -ENOMEM;
1710
1711 c->priv = priv;
1712 c->ix = ix;
1713 c->cpu = cpu;
1714 c->pdev = &priv->mdev->pdev->dev;
1715 c->netdev = priv->netdev;
b50d292b 1716 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
a4418a6c 1717 c->num_tc = priv->params.num_tc;
d7a0ecab 1718 c->xdp = !!priv->xdp_prog;
f62b8bb8 1719
cb3c7fd4
GR
1720 if (priv->params.rx_am_enabled)
1721 rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
1722 else
1723 rx_cq_profile = priv->params.rx_cq_moderation;
1724
5283af89 1725 mlx5e_build_channeltc_to_txq_map(priv, ix);
03289b88 1726
f62b8bb8
AV
1727 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1728
9908aa29 1729 err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
f62b8bb8
AV
1730 if (err)
1731 goto err_napi_del;
1732
d3c9bc27
TT
1733 err = mlx5e_open_tx_cqs(c, cparam);
1734 if (err)
1735 goto err_close_icosq_cq;
1736
f62b8bb8 1737 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
cb3c7fd4 1738 rx_cq_profile);
f62b8bb8
AV
1739 if (err)
1740 goto err_close_tx_cqs;
f62b8bb8 1741
d7a0ecab 1742 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
31871f87 1743 err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->rq.xdpsq.cq,
d7a0ecab
SM
1744 priv->params.tx_cq_moderation) : 0;
1745 if (err)
1746 goto err_close_rx_cq;
1747
f62b8bb8
AV
1748 napi_enable(&c->napi);
1749
31391048 1750 err = mlx5e_open_icosq(c, 0, &cparam->icosq, &c->icosq);
f62b8bb8
AV
1751 if (err)
1752 goto err_disable_napi;
1753
d3c9bc27
TT
1754 err = mlx5e_open_sqs(c, cparam);
1755 if (err)
1756 goto err_close_icosq;
1757
2239185c 1758 err = c->xdp ? mlx5e_open_xdpsq(c, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
d7a0ecab
SM
1759 if (err)
1760 goto err_close_sqs;
b5503b99 1761
f62b8bb8
AV
1762 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1763 if (err)
b5503b99 1764 goto err_close_xdp_sq;
f62b8bb8
AV
1765
1766 netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
1767 *cp = c;
1768
1769 return 0;
b5503b99 1770err_close_xdp_sq:
d7a0ecab 1771 if (c->xdp)
31391048 1772 mlx5e_close_xdpsq(&c->rq.xdpsq);
f62b8bb8
AV
1773
1774err_close_sqs:
1775 mlx5e_close_sqs(c);
1776
d3c9bc27 1777err_close_icosq:
31391048 1778 mlx5e_close_icosq(&c->icosq);
d3c9bc27 1779
f62b8bb8
AV
1780err_disable_napi:
1781 napi_disable(&c->napi);
d7a0ecab 1782 if (c->xdp)
31871f87 1783 mlx5e_close_cq(&c->rq.xdpsq.cq);
d7a0ecab
SM
1784
1785err_close_rx_cq:
f62b8bb8
AV
1786 mlx5e_close_cq(&c->rq.cq);
1787
1788err_close_tx_cqs:
1789 mlx5e_close_tx_cqs(c);
1790
d3c9bc27
TT
1791err_close_icosq_cq:
1792 mlx5e_close_cq(&c->icosq.cq);
1793
f62b8bb8
AV
1794err_napi_del:
1795 netif_napi_del(&c->napi);
1796 kfree(c);
1797
1798 return err;
1799}
1800
1801static void mlx5e_close_channel(struct mlx5e_channel *c)
1802{
1803 mlx5e_close_rq(&c->rq);
b5503b99 1804 if (c->xdp)
31391048 1805 mlx5e_close_xdpsq(&c->rq.xdpsq);
f62b8bb8 1806 mlx5e_close_sqs(c);
31391048 1807 mlx5e_close_icosq(&c->icosq);
f62b8bb8 1808 napi_disable(&c->napi);
b5503b99 1809 if (c->xdp)
31871f87 1810 mlx5e_close_cq(&c->rq.xdpsq.cq);
f62b8bb8
AV
1811 mlx5e_close_cq(&c->rq.cq);
1812 mlx5e_close_tx_cqs(c);
d3c9bc27 1813 mlx5e_close_cq(&c->icosq.cq);
f62b8bb8 1814 netif_napi_del(&c->napi);
7ae92ae5 1815
f62b8bb8
AV
1816 kfree(c);
1817}
1818
1819static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1820 struct mlx5e_rq_param *param)
1821{
1822 void *rqc = param->rqc;
1823 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1824
461017cb
TT
1825 switch (priv->params.rq_wq_type) {
1826 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1827 MLX5_SET(wq, wq, log_wqe_num_of_strides,
d9d9f156 1828 priv->params.mpwqe_log_num_strides - 9);
461017cb 1829 MLX5_SET(wq, wq, log_wqe_stride_size,
d9d9f156 1830 priv->params.mpwqe_log_stride_sz - 6);
461017cb
TT
1831 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1832 break;
1833 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1834 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1835 }
1836
f62b8bb8
AV
1837 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1838 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1839 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
b50d292b 1840 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
593cf338 1841 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
f62b8bb8 1842
311c7c71 1843 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
f62b8bb8 1844 param->wq.linear = 1;
cb3c7fd4
GR
1845
1846 param->am_enabled = priv->params.rx_am_enabled;
f62b8bb8
AV
1847}
1848
556dd1b9
TT
1849static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
1850{
1851 void *rqc = param->rqc;
1852 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1853
1854 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1855 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1856}
1857
d3c9bc27
TT
1858static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1859 struct mlx5e_sq_param *param)
f62b8bb8
AV
1860{
1861 void *sqc = param->sqc;
1862 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1863
f62b8bb8 1864 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
b50d292b 1865 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
f62b8bb8 1866
311c7c71 1867 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
d3c9bc27
TT
1868}
1869
1870static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1871 struct mlx5e_sq_param *param)
1872{
1873 void *sqc = param->sqc;
1874 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1875
1876 mlx5e_build_sq_param_common(priv, param);
1877 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1878
58d52291 1879 param->max_inline = priv->params.tx_max_inline;
cff92d7c 1880 param->min_inline_mode = priv->params.tx_min_inline_mode;
f62b8bb8
AV
1881}
1882
1883static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1884 struct mlx5e_cq_param *param)
1885{
1886 void *cqc = param->cqc;
1887
30aa60b3 1888 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
f62b8bb8
AV
1889}
1890
1891static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1892 struct mlx5e_cq_param *param)
1893{
1894 void *cqc = param->cqc;
461017cb 1895 u8 log_cq_size;
f62b8bb8 1896
461017cb
TT
1897 switch (priv->params.rq_wq_type) {
1898 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1899 log_cq_size = priv->params.log_rq_size +
d9d9f156 1900 priv->params.mpwqe_log_num_strides;
461017cb
TT
1901 break;
1902 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1903 log_cq_size = priv->params.log_rq_size;
1904 }
1905
1906 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
9bcc8606 1907 if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
7219ab34
TT
1908 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1909 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
1910 }
f62b8bb8
AV
1911
1912 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
1913
1914 param->cq_period_mode = priv->params.rx_cq_period_mode;
f62b8bb8
AV
1915}
1916
1917static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1918 struct mlx5e_cq_param *param)
1919{
1920 void *cqc = param->cqc;
1921
d3c9bc27 1922 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
f62b8bb8
AV
1923
1924 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
1925
1926 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
f62b8bb8
AV
1927}
1928
d3c9bc27
TT
1929static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
1930 struct mlx5e_cq_param *param,
1931 u8 log_wq_size)
1932{
1933 void *cqc = param->cqc;
1934
1935 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
1936
1937 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
1938
1939 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
d3c9bc27
TT
1940}
1941
1942static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
1943 struct mlx5e_sq_param *param,
1944 u8 log_wq_size)
1945{
1946 void *sqc = param->sqc;
1947 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1948
1949 mlx5e_build_sq_param_common(priv, param);
1950
1951 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
bc77b240 1952 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
d3c9bc27
TT
1953}
1954
b5503b99
SM
1955static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
1956 struct mlx5e_sq_param *param)
1957{
1958 void *sqc = param->sqc;
1959 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1960
1961 mlx5e_build_sq_param_common(priv, param);
1962 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1963
1964 param->max_inline = priv->params.tx_max_inline;
b70149dd 1965 param->min_inline_mode = priv->params.tx_min_inline_mode;
b5503b99
SM
1966}
1967
6b87663f 1968static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
f62b8bb8 1969{
bc77b240 1970 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
d3c9bc27 1971
f62b8bb8
AV
1972 mlx5e_build_rq_param(priv, &cparam->rq);
1973 mlx5e_build_sq_param(priv, &cparam->sq);
b5503b99 1974 mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq);
d3c9bc27 1975 mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
f62b8bb8
AV
1976 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1977 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
d3c9bc27 1978 mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
f62b8bb8
AV
1979}
1980
ff9c852f 1981static int mlx5e_open_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
f62b8bb8 1982{
6b87663f 1983 struct mlx5e_channel_param *cparam;
03289b88 1984 int err = -ENOMEM;
f62b8bb8
AV
1985 int i;
1986 int j;
1987
ff9c852f 1988 chs->num = priv->params.num_channels;
03289b88 1989
ff9c852f
SM
1990 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
1991 priv->txq_to_sq_map = kcalloc(chs->num * priv->params.num_tc,
03289b88 1992 sizeof(struct mlx5e_sq *), GFP_KERNEL);
6b87663f 1993 cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
ff9c852f 1994 if (!chs->c || !priv->txq_to_sq_map || !cparam)
03289b88 1995 goto err_free_txq_to_sq_map;
f62b8bb8 1996
6b87663f 1997 mlx5e_build_channel_param(priv, cparam);
ff9c852f
SM
1998 for (i = 0; i < chs->num; i++) {
1999 err = mlx5e_open_channel(priv, i, cparam, &chs->c[i]);
f62b8bb8
AV
2000 if (err)
2001 goto err_close_channels;
2002 }
2003
ff9c852f
SM
2004 for (j = 0; j < chs->num; j++) {
2005 err = mlx5e_wait_for_min_rx_wqes(&chs->c[j]->rq);
f62b8bb8
AV
2006 if (err)
2007 goto err_close_channels;
2008 }
2009
c3b7c5c9
MHY
2010 /* FIXME: This is a W/A for tx timeout watch dog false alarm when
2011 * polling for inactive tx queues.
2012 */
2013 netif_tx_start_all_queues(priv->netdev);
2014
6b87663f 2015 kfree(cparam);
f62b8bb8
AV
2016 return 0;
2017
2018err_close_channels:
2019 for (i--; i >= 0; i--)
ff9c852f 2020 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2021
03289b88
SM
2022err_free_txq_to_sq_map:
2023 kfree(priv->txq_to_sq_map);
ff9c852f 2024 kfree(chs->c);
6b87663f 2025 kfree(cparam);
ff9c852f 2026 chs->num = 0;
f62b8bb8
AV
2027 return err;
2028}
2029
2030static void mlx5e_close_channels(struct mlx5e_priv *priv)
2031{
ff9c852f 2032 struct mlx5e_channels *chs = &priv->channels;
f62b8bb8
AV
2033 int i;
2034
c3b7c5c9
MHY
2035 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2036 * polling for inactive tx queues.
2037 */
2038 netif_tx_stop_all_queues(priv->netdev);
2039 netif_tx_disable(priv->netdev);
2040
ff9c852f
SM
2041 for (i = 0; i < chs->num; i++)
2042 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2043
03289b88 2044 kfree(priv->txq_to_sq_map);
ff9c852f
SM
2045 kfree(chs->c);
2046 chs->num = 0;
f62b8bb8
AV
2047}
2048
2be6967c
SM
2049static int mlx5e_rx_hash_fn(int hfunc)
2050{
2051 return (hfunc == ETH_RSS_HASH_TOP) ?
2052 MLX5_RX_HASH_FN_TOEPLITZ :
2053 MLX5_RX_HASH_FN_INVERTED_XOR8;
2054}
2055
2056static int mlx5e_bits_invert(unsigned long a, int size)
2057{
2058 int inv = 0;
2059 int i;
2060
2061 for (i = 0; i < size; i++)
2062 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2063
2064 return inv;
2065}
2066
936896e9
AS
2067static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
2068{
2069 int i;
2070
2071 for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
2072 int ix = i;
1da36696 2073 u32 rqn;
936896e9
AS
2074
2075 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
2076 ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
2077
2d75b2bc 2078 ix = priv->params.indirection_rqt[ix];
1da36696 2079 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
ff9c852f 2080 priv->channels.c[ix]->rq.rqn :
1da36696
TT
2081 priv->drop_rq.rqn;
2082 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
936896e9
AS
2083 }
2084}
2085
1da36696
TT
2086static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
2087 int ix)
4cbeaff5 2088{
1da36696 2089 u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
ff9c852f 2090 priv->channels.c[ix]->rq.rqn :
1da36696 2091 priv->drop_rq.rqn;
4cbeaff5 2092
1da36696 2093 MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
4cbeaff5
AS
2094}
2095
398f3351
HHZ
2096static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
2097 int ix, struct mlx5e_rqt *rqt)
f62b8bb8
AV
2098{
2099 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
2100 void *rqtc;
2101 int inlen;
2102 int err;
1da36696 2103 u32 *in;
f62b8bb8 2104
f62b8bb8
AV
2105 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2106 in = mlx5_vzalloc(inlen);
2107 if (!in)
2108 return -ENOMEM;
2109
2110 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2111
2112 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2113 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2114
1da36696
TT
2115 if (sz > 1) /* RSS */
2116 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
2117 else
2118 mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
2be6967c 2119
398f3351
HHZ
2120 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2121 if (!err)
2122 rqt->enabled = true;
f62b8bb8
AV
2123
2124 kvfree(in);
1da36696
TT
2125 return err;
2126}
2127
cb67b832 2128void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
1da36696 2129{
398f3351
HHZ
2130 rqt->enabled = false;
2131 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
1da36696
TT
2132}
2133
6bfd390b
HHZ
2134static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
2135{
2136 struct mlx5e_rqt *rqt = &priv->indir_rqt;
2137
2138 return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
2139}
2140
cb67b832 2141int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
1da36696 2142{
398f3351 2143 struct mlx5e_rqt *rqt;
1da36696
TT
2144 int err;
2145 int ix;
2146
6bfd390b 2147 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
398f3351
HHZ
2148 rqt = &priv->direct_tir[ix].rqt;
2149 err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
1da36696
TT
2150 if (err)
2151 goto err_destroy_rqts;
2152 }
2153
2154 return 0;
2155
2156err_destroy_rqts:
2157 for (ix--; ix >= 0; ix--)
398f3351 2158 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
1da36696 2159
f62b8bb8
AV
2160 return err;
2161}
2162
1da36696 2163int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
5c50368f
AS
2164{
2165 struct mlx5_core_dev *mdev = priv->mdev;
5c50368f
AS
2166 void *rqtc;
2167 int inlen;
1da36696 2168 u32 *in;
5c50368f
AS
2169 int err;
2170
5c50368f
AS
2171 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
2172 in = mlx5_vzalloc(inlen);
2173 if (!in)
2174 return -ENOMEM;
2175
2176 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2177
2178 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1da36696
TT
2179 if (sz > 1) /* RSS */
2180 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
2181 else
2182 mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
5c50368f
AS
2183
2184 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
2185
1da36696 2186 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
5c50368f
AS
2187
2188 kvfree(in);
2189
2190 return err;
2191}
2192
40ab6a6e
AS
2193static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
2194{
1da36696
TT
2195 u32 rqtn;
2196 int ix;
2197
398f3351
HHZ
2198 if (priv->indir_rqt.enabled) {
2199 rqtn = priv->indir_rqt.rqtn;
2200 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
2201 }
2202
1da36696 2203 for (ix = 0; ix < priv->params.num_channels; ix++) {
398f3351
HHZ
2204 if (!priv->direct_tir[ix].rqt.enabled)
2205 continue;
2206 rqtn = priv->direct_tir[ix].rqt.rqtn;
1da36696
TT
2207 mlx5e_redirect_rqt(priv, rqtn, 1, ix);
2208 }
40ab6a6e
AS
2209}
2210
5c50368f
AS
2211static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
2212{
2213 if (!priv->params.lro_en)
2214 return;
2215
2216#define ROUGH_MAX_L2_L3_HDR_SZ 256
2217
2218 MLX5_SET(tirc, tirc, lro_enable_mask,
2219 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2220 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2221 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
2222 (priv->params.lro_wqe_sz -
2223 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2b029556 2224 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
5c50368f
AS
2225}
2226
a100ff3e
GP
2227void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
2228 enum mlx5e_traffic_types tt)
bdfc028d 2229{
a100ff3e
GP
2230 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2231
2232#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2233 MLX5_HASH_FIELD_SEL_DST_IP)
2234
2235#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2236 MLX5_HASH_FIELD_SEL_DST_IP |\
2237 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2238 MLX5_HASH_FIELD_SEL_L4_DPORT)
2239
2240#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2241 MLX5_HASH_FIELD_SEL_DST_IP |\
2242 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2243
bdfc028d
TT
2244 MLX5_SET(tirc, tirc, rx_hash_fn,
2245 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
2246 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
2247 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2248 rx_hash_toeplitz_key);
2249 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2250 rx_hash_toeplitz_key);
2251
2252 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2253 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
2254 }
a100ff3e
GP
2255
2256 switch (tt) {
2257 case MLX5E_TT_IPV4_TCP:
2258 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2259 MLX5_L3_PROT_TYPE_IPV4);
2260 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2261 MLX5_L4_PROT_TYPE_TCP);
2262 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2263 MLX5_HASH_IP_L4PORTS);
2264 break;
2265
2266 case MLX5E_TT_IPV6_TCP:
2267 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2268 MLX5_L3_PROT_TYPE_IPV6);
2269 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2270 MLX5_L4_PROT_TYPE_TCP);
2271 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2272 MLX5_HASH_IP_L4PORTS);
2273 break;
2274
2275 case MLX5E_TT_IPV4_UDP:
2276 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2277 MLX5_L3_PROT_TYPE_IPV4);
2278 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2279 MLX5_L4_PROT_TYPE_UDP);
2280 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2281 MLX5_HASH_IP_L4PORTS);
2282 break;
2283
2284 case MLX5E_TT_IPV6_UDP:
2285 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2286 MLX5_L3_PROT_TYPE_IPV6);
2287 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2288 MLX5_L4_PROT_TYPE_UDP);
2289 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2290 MLX5_HASH_IP_L4PORTS);
2291 break;
2292
2293 case MLX5E_TT_IPV4_IPSEC_AH:
2294 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2295 MLX5_L3_PROT_TYPE_IPV4);
2296 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2297 MLX5_HASH_IP_IPSEC_SPI);
2298 break;
2299
2300 case MLX5E_TT_IPV6_IPSEC_AH:
2301 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2302 MLX5_L3_PROT_TYPE_IPV6);
2303 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2304 MLX5_HASH_IP_IPSEC_SPI);
2305 break;
2306
2307 case MLX5E_TT_IPV4_IPSEC_ESP:
2308 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2309 MLX5_L3_PROT_TYPE_IPV4);
2310 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2311 MLX5_HASH_IP_IPSEC_SPI);
2312 break;
2313
2314 case MLX5E_TT_IPV6_IPSEC_ESP:
2315 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2316 MLX5_L3_PROT_TYPE_IPV6);
2317 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2318 MLX5_HASH_IP_IPSEC_SPI);
2319 break;
2320
2321 case MLX5E_TT_IPV4:
2322 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2323 MLX5_L3_PROT_TYPE_IPV4);
2324 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2325 MLX5_HASH_IP);
2326 break;
2327
2328 case MLX5E_TT_IPV6:
2329 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2330 MLX5_L3_PROT_TYPE_IPV6);
2331 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2332 MLX5_HASH_IP);
2333 break;
2334 default:
2335 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2336 }
bdfc028d
TT
2337}
2338
ab0394fe 2339static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
5c50368f
AS
2340{
2341 struct mlx5_core_dev *mdev = priv->mdev;
2342
2343 void *in;
2344 void *tirc;
2345 int inlen;
2346 int err;
ab0394fe 2347 int tt;
1da36696 2348 int ix;
5c50368f
AS
2349
2350 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
2351 in = mlx5_vzalloc(inlen);
2352 if (!in)
2353 return -ENOMEM;
2354
2355 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2356 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2357
2358 mlx5e_build_tir_ctx_lro(tirc, priv);
2359
1da36696 2360 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
724b2aa1 2361 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
1da36696 2362 inlen);
ab0394fe 2363 if (err)
1da36696 2364 goto free_in;
ab0394fe 2365 }
5c50368f 2366
6bfd390b 2367 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
1da36696
TT
2368 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2369 in, inlen);
2370 if (err)
2371 goto free_in;
2372 }
2373
2374free_in:
5c50368f
AS
2375 kvfree(in);
2376
2377 return err;
2378}
2379
cd255eff 2380static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
40ab6a6e 2381{
40ab6a6e 2382 struct mlx5_core_dev *mdev = priv->mdev;
cd255eff 2383 u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
40ab6a6e
AS
2384 int err;
2385
cd255eff 2386 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
40ab6a6e
AS
2387 if (err)
2388 return err;
2389
cd255eff
SM
2390 /* Update vport context MTU */
2391 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2392 return 0;
2393}
40ab6a6e 2394
cd255eff
SM
2395static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
2396{
2397 struct mlx5_core_dev *mdev = priv->mdev;
2398 u16 hw_mtu = 0;
2399 int err;
40ab6a6e 2400
cd255eff
SM
2401 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2402 if (err || !hw_mtu) /* fallback to port oper mtu */
2403 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2404
2405 *mtu = MLX5E_HW2SW_MTU(hw_mtu);
2406}
2407
2408static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
2409{
2410 struct mlx5e_priv *priv = netdev_priv(netdev);
2411 u16 mtu;
2412 int err;
2413
2414 err = mlx5e_set_mtu(priv, netdev->mtu);
2415 if (err)
2416 return err;
40ab6a6e 2417
cd255eff
SM
2418 mlx5e_query_mtu(priv, &mtu);
2419 if (mtu != netdev->mtu)
2420 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2421 __func__, mtu, netdev->mtu);
40ab6a6e 2422
cd255eff 2423 netdev->mtu = mtu;
40ab6a6e
AS
2424 return 0;
2425}
2426
08fb1dac
SM
2427static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2428{
2429 struct mlx5e_priv *priv = netdev_priv(netdev);
2430 int nch = priv->params.num_channels;
2431 int ntc = priv->params.num_tc;
2432 int tc;
2433
2434 netdev_reset_tc(netdev);
2435
2436 if (ntc == 1)
2437 return;
2438
2439 netdev_set_num_tc(netdev, ntc);
2440
7ccdd084
RS
2441 /* Map netdev TCs to offset 0
2442 * We have our own UP to TXQ mapping for QoS
2443 */
08fb1dac 2444 for (tc = 0; tc < ntc; tc++)
7ccdd084 2445 netdev_set_tc_queue(netdev, tc, nch, 0);
08fb1dac
SM
2446}
2447
40ab6a6e
AS
2448int mlx5e_open_locked(struct net_device *netdev)
2449{
2450 struct mlx5e_priv *priv = netdev_priv(netdev);
cb67b832 2451 struct mlx5_core_dev *mdev = priv->mdev;
40ab6a6e
AS
2452 int num_txqs;
2453 int err;
2454
2455 set_bit(MLX5E_STATE_OPENED, &priv->state);
2456
08fb1dac
SM
2457 mlx5e_netdev_set_tcs(netdev);
2458
40ab6a6e
AS
2459 num_txqs = priv->params.num_channels * priv->params.num_tc;
2460 netif_set_real_num_tx_queues(netdev, num_txqs);
2461 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
2462
ff9c852f 2463 err = mlx5e_open_channels(priv, &priv->channels);
40ab6a6e
AS
2464 if (err) {
2465 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
2466 __func__, err);
343b29f3 2467 goto err_clear_state_opened_flag;
40ab6a6e
AS
2468 }
2469
0952da79 2470 err = mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
66189961
TT
2471 if (err) {
2472 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
2473 __func__, err);
2474 goto err_close_channels;
2475 }
2476
40ab6a6e 2477 mlx5e_redirect_rqts(priv);
ce89ef36 2478 mlx5e_update_carrier(priv);
ef9814de 2479 mlx5e_timestamp_init(priv);
be4891af 2480
cb67b832
HHZ
2481 if (priv->profile->update_stats)
2482 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
40ab6a6e 2483
cb67b832
HHZ
2484 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
2485 err = mlx5e_add_sqs_fwd_rules(priv);
2486 if (err)
2487 goto err_close_channels;
2488 }
9b37b07f 2489 return 0;
343b29f3 2490
66189961
TT
2491err_close_channels:
2492 mlx5e_close_channels(priv);
343b29f3
AS
2493err_clear_state_opened_flag:
2494 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2495 return err;
40ab6a6e
AS
2496}
2497
cb67b832 2498int mlx5e_open(struct net_device *netdev)
40ab6a6e
AS
2499{
2500 struct mlx5e_priv *priv = netdev_priv(netdev);
2501 int err;
2502
2503 mutex_lock(&priv->state_lock);
2504 err = mlx5e_open_locked(netdev);
2505 mutex_unlock(&priv->state_lock);
2506
2507 return err;
2508}
2509
2510int mlx5e_close_locked(struct net_device *netdev)
2511{
2512 struct mlx5e_priv *priv = netdev_priv(netdev);
cb67b832 2513 struct mlx5_core_dev *mdev = priv->mdev;
40ab6a6e 2514
a1985740
AS
2515 /* May already be CLOSED in case a previous configuration operation
2516 * (e.g RX/TX queue size change) that involves close&open failed.
2517 */
2518 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2519 return 0;
2520
40ab6a6e
AS
2521 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2522
cb67b832
HHZ
2523 if (MLX5_CAP_GEN(mdev, vport_group_manager))
2524 mlx5e_remove_sqs_fwd_rules(priv);
2525
ef9814de 2526 mlx5e_timestamp_cleanup(priv);
40ab6a6e 2527 netif_carrier_off(priv->netdev);
ce89ef36 2528 mlx5e_redirect_rqts(priv);
40ab6a6e
AS
2529 mlx5e_close_channels(priv);
2530
2531 return 0;
2532}
2533
cb67b832 2534int mlx5e_close(struct net_device *netdev)
40ab6a6e
AS
2535{
2536 struct mlx5e_priv *priv = netdev_priv(netdev);
2537 int err;
2538
26e59d80
MHY
2539 if (!netif_device_present(netdev))
2540 return -ENODEV;
2541
40ab6a6e
AS
2542 mutex_lock(&priv->state_lock);
2543 err = mlx5e_close_locked(netdev);
2544 mutex_unlock(&priv->state_lock);
2545
2546 return err;
2547}
2548
3b77235b
SM
2549static int mlx5e_alloc_drop_rq(struct mlx5e_priv *priv,
2550 struct mlx5e_rq *rq,
2551 struct mlx5e_rq_param *param)
40ab6a6e
AS
2552{
2553 struct mlx5_core_dev *mdev = priv->mdev;
2554 void *rqc = param->rqc;
2555 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2556 int err;
2557
2558 param->wq.db_numa_node = param->wq.buf_numa_node;
2559
2560 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
2561 &rq->wq_ctrl);
2562 if (err)
2563 return err;
2564
2565 rq->priv = priv;
2566
2567 return 0;
2568}
2569
3b77235b
SM
2570static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
2571 struct mlx5e_cq *cq,
2572 struct mlx5e_cq_param *param)
40ab6a6e
AS
2573{
2574 struct mlx5_core_dev *mdev = priv->mdev;
2575 struct mlx5_core_cq *mcq = &cq->mcq;
2576 int eqn_not_used;
0b6e26ce 2577 unsigned int irqn;
40ab6a6e
AS
2578 int err;
2579
2580 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
2581 &cq->wq_ctrl);
2582 if (err)
2583 return err;
2584
2585 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
2586
2587 mcq->cqe_sz = 64;
2588 mcq->set_ci_db = cq->wq_ctrl.db.db;
2589 mcq->arm_db = cq->wq_ctrl.db.db + 1;
2590 *mcq->set_ci_db = 0;
2591 *mcq->arm_db = 0;
2592 mcq->vector = param->eq_ix;
2593 mcq->comp = mlx5e_completion_event;
2594 mcq->event = mlx5e_cq_error_event;
2595 mcq->irqn = irqn;
40ab6a6e
AS
2596
2597 cq->priv = priv;
2598
2599 return 0;
2600}
2601
2602static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
2603{
2604 struct mlx5e_cq_param cq_param;
2605 struct mlx5e_rq_param rq_param;
2606 struct mlx5e_rq *rq = &priv->drop_rq;
2607 struct mlx5e_cq *cq = &priv->drop_rq.cq;
2608 int err;
2609
2610 memset(&cq_param, 0, sizeof(cq_param));
2611 memset(&rq_param, 0, sizeof(rq_param));
556dd1b9 2612 mlx5e_build_drop_rq_param(&rq_param);
40ab6a6e 2613
3b77235b 2614 err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
40ab6a6e
AS
2615 if (err)
2616 return err;
2617
3b77235b 2618 err = mlx5e_create_cq(cq, &cq_param);
40ab6a6e 2619 if (err)
3b77235b 2620 goto err_free_cq;
40ab6a6e 2621
3b77235b 2622 err = mlx5e_alloc_drop_rq(priv, rq, &rq_param);
40ab6a6e 2623 if (err)
3b77235b 2624 goto err_destroy_cq;
40ab6a6e 2625
3b77235b 2626 err = mlx5e_create_rq(rq, &rq_param);
40ab6a6e 2627 if (err)
3b77235b 2628 goto err_free_rq;
40ab6a6e
AS
2629
2630 return 0;
2631
3b77235b
SM
2632err_free_rq:
2633 mlx5e_free_rq(&priv->drop_rq);
40ab6a6e
AS
2634
2635err_destroy_cq:
2636 mlx5e_destroy_cq(&priv->drop_rq.cq);
2637
3b77235b
SM
2638err_free_cq:
2639 mlx5e_free_cq(&priv->drop_rq.cq);
2640
40ab6a6e
AS
2641 return err;
2642}
2643
2644static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
2645{
40ab6a6e 2646 mlx5e_destroy_rq(&priv->drop_rq);
3b77235b 2647 mlx5e_free_rq(&priv->drop_rq);
40ab6a6e 2648 mlx5e_destroy_cq(&priv->drop_rq.cq);
3b77235b 2649 mlx5e_free_cq(&priv->drop_rq.cq);
40ab6a6e
AS
2650}
2651
2652static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
2653{
2654 struct mlx5_core_dev *mdev = priv->mdev;
c4f287c4 2655 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
40ab6a6e
AS
2656 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2657
08fb1dac 2658 MLX5_SET(tisc, tisc, prio, tc << 1);
b50d292b 2659 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
db60b802
AH
2660
2661 if (mlx5_lag_is_lacp_owner(mdev))
2662 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
2663
40ab6a6e
AS
2664 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
2665}
2666
2667static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
2668{
2669 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2670}
2671
cb67b832 2672int mlx5e_create_tises(struct mlx5e_priv *priv)
40ab6a6e
AS
2673{
2674 int err;
2675 int tc;
2676
6bfd390b 2677 for (tc = 0; tc < priv->profile->max_tc; tc++) {
40ab6a6e
AS
2678 err = mlx5e_create_tis(priv, tc);
2679 if (err)
2680 goto err_close_tises;
2681 }
2682
2683 return 0;
2684
2685err_close_tises:
2686 for (tc--; tc >= 0; tc--)
2687 mlx5e_destroy_tis(priv, tc);
2688
2689 return err;
2690}
2691
cb67b832 2692void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
40ab6a6e
AS
2693{
2694 int tc;
2695
6bfd390b 2696 for (tc = 0; tc < priv->profile->max_tc; tc++)
40ab6a6e
AS
2697 mlx5e_destroy_tis(priv, tc);
2698}
2699
1da36696
TT
2700static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2701 enum mlx5e_traffic_types tt)
f62b8bb8 2702{
b50d292b 2703 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3191e05f 2704
5c50368f 2705 mlx5e_build_tir_ctx_lro(tirc, priv);
f62b8bb8 2706
4cbeaff5 2707 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
398f3351 2708 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
a100ff3e 2709 mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
f62b8bb8
AV
2710}
2711
1da36696
TT
2712static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2713 u32 rqtn)
f62b8bb8 2714{
b50d292b 2715 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
1da36696
TT
2716
2717 mlx5e_build_tir_ctx_lro(tirc, priv);
2718
2719 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2720 MLX5_SET(tirc, tirc, indirect_table, rqtn);
2721 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2722}
2723
6bfd390b 2724static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
1da36696 2725{
724b2aa1 2726 struct mlx5e_tir *tir;
f62b8bb8
AV
2727 void *tirc;
2728 int inlen;
2729 int err;
1da36696 2730 u32 *in;
1da36696 2731 int tt;
f62b8bb8
AV
2732
2733 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2734 in = mlx5_vzalloc(inlen);
2735 if (!in)
2736 return -ENOMEM;
2737
1da36696
TT
2738 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2739 memset(in, 0, inlen);
724b2aa1 2740 tir = &priv->indir_tir[tt];
1da36696
TT
2741 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2742 mlx5e_build_indir_tir_ctx(priv, tirc, tt);
724b2aa1 2743 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
f62b8bb8 2744 if (err)
40ab6a6e 2745 goto err_destroy_tirs;
f62b8bb8
AV
2746 }
2747
6bfd390b
HHZ
2748 kvfree(in);
2749
2750 return 0;
2751
2752err_destroy_tirs:
2753 for (tt--; tt >= 0; tt--)
2754 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
2755
2756 kvfree(in);
2757
2758 return err;
2759}
2760
cb67b832 2761int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
2762{
2763 int nch = priv->profile->max_nch(priv->mdev);
2764 struct mlx5e_tir *tir;
2765 void *tirc;
2766 int inlen;
2767 int err;
2768 u32 *in;
2769 int ix;
2770
2771 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2772 in = mlx5_vzalloc(inlen);
2773 if (!in)
2774 return -ENOMEM;
2775
1da36696
TT
2776 for (ix = 0; ix < nch; ix++) {
2777 memset(in, 0, inlen);
724b2aa1 2778 tir = &priv->direct_tir[ix];
1da36696
TT
2779 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2780 mlx5e_build_direct_tir_ctx(priv, tirc,
398f3351 2781 priv->direct_tir[ix].rqt.rqtn);
724b2aa1 2782 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
1da36696
TT
2783 if (err)
2784 goto err_destroy_ch_tirs;
2785 }
2786
2787 kvfree(in);
2788
f62b8bb8
AV
2789 return 0;
2790
1da36696
TT
2791err_destroy_ch_tirs:
2792 for (ix--; ix >= 0; ix--)
724b2aa1 2793 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
1da36696 2794
1da36696 2795 kvfree(in);
f62b8bb8
AV
2796
2797 return err;
2798}
2799
6bfd390b 2800static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
2801{
2802 int i;
2803
1da36696 2804 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
724b2aa1 2805 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
f62b8bb8
AV
2806}
2807
cb67b832 2808void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
2809{
2810 int nch = priv->profile->max_nch(priv->mdev);
2811 int i;
2812
2813 for (i = 0; i < nch; i++)
2814 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
2815}
2816
ff9c852f 2817int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
36350114
GP
2818{
2819 int err = 0;
2820 int i;
2821
ff9c852f
SM
2822 for (i = 0; i < chs->num; i++) {
2823 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
36350114
GP
2824 if (err)
2825 return err;
2826 }
2827
2828 return 0;
2829}
2830
08fb1dac
SM
2831static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
2832{
2833 struct mlx5e_priv *priv = netdev_priv(netdev);
2834 bool was_opened;
2835 int err = 0;
2836
2837 if (tc && tc != MLX5E_MAX_NUM_TC)
2838 return -EINVAL;
2839
2840 mutex_lock(&priv->state_lock);
2841
2842 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2843 if (was_opened)
2844 mlx5e_close_locked(priv->netdev);
2845
2846 priv->params.num_tc = tc ? tc : 1;
2847
2848 if (was_opened)
2849 err = mlx5e_open_locked(priv->netdev);
2850
2851 mutex_unlock(&priv->state_lock);
2852
2853 return err;
2854}
2855
2856static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
2857 __be16 proto, struct tc_to_netdev *tc)
2858{
e8f887ac
AV
2859 struct mlx5e_priv *priv = netdev_priv(dev);
2860
2861 if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
2862 goto mqprio;
2863
2864 switch (tc->type) {
e3a2b7ed
AV
2865 case TC_SETUP_CLSFLOWER:
2866 switch (tc->cls_flower->command) {
2867 case TC_CLSFLOWER_REPLACE:
2868 return mlx5e_configure_flower(priv, proto, tc->cls_flower);
2869 case TC_CLSFLOWER_DESTROY:
2870 return mlx5e_delete_flower(priv, tc->cls_flower);
aad7e08d
AV
2871 case TC_CLSFLOWER_STATS:
2872 return mlx5e_stats_flower(priv, tc->cls_flower);
e3a2b7ed 2873 }
e8f887ac
AV
2874 default:
2875 return -EOPNOTSUPP;
2876 }
2877
2878mqprio:
67ba422e 2879 if (tc->type != TC_SETUP_MQPRIO)
08fb1dac
SM
2880 return -EINVAL;
2881
56f36acd
AN
2882 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2883
2884 return mlx5e_setup_tc(dev, tc->mqprio->num_tc);
08fb1dac
SM
2885}
2886
bc1f4470 2887static void
f62b8bb8
AV
2888mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
2889{
2890 struct mlx5e_priv *priv = netdev_priv(dev);
9218b44d 2891 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
f62b8bb8 2892 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
269e6b3a 2893 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
f62b8bb8 2894
370bad0f
OG
2895 if (mlx5e_is_uplink_rep(priv)) {
2896 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
2897 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
2898 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
2899 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
2900 } else {
2901 stats->rx_packets = sstats->rx_packets;
2902 stats->rx_bytes = sstats->rx_bytes;
2903 stats->tx_packets = sstats->tx_packets;
2904 stats->tx_bytes = sstats->tx_bytes;
2905 stats->tx_dropped = sstats->tx_queue_dropped;
2906 }
269e6b3a
GP
2907
2908 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
269e6b3a
GP
2909
2910 stats->rx_length_errors =
9218b44d
GP
2911 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
2912 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
2913 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
269e6b3a 2914 stats->rx_crc_errors =
9218b44d
GP
2915 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
2916 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
2917 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
269e6b3a 2918 stats->tx_carrier_errors =
9218b44d 2919 PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
269e6b3a
GP
2920 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
2921 stats->rx_frame_errors;
2922 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
2923
2924 /* vport multicast also counts packets that are dropped due to steering
2925 * or rx out of buffer
2926 */
9218b44d
GP
2927 stats->multicast =
2928 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
f62b8bb8 2929
f62b8bb8
AV
2930}
2931
2932static void mlx5e_set_rx_mode(struct net_device *dev)
2933{
2934 struct mlx5e_priv *priv = netdev_priv(dev);
2935
7bb29755 2936 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
2937}
2938
2939static int mlx5e_set_mac(struct net_device *netdev, void *addr)
2940{
2941 struct mlx5e_priv *priv = netdev_priv(netdev);
2942 struct sockaddr *saddr = addr;
2943
2944 if (!is_valid_ether_addr(saddr->sa_data))
2945 return -EADDRNOTAVAIL;
2946
2947 netif_addr_lock_bh(netdev);
2948 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
2949 netif_addr_unlock_bh(netdev);
2950
7bb29755 2951 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
2952
2953 return 0;
2954}
2955
0e405443
GP
2956#define MLX5E_SET_FEATURE(netdev, feature, enable) \
2957 do { \
2958 if (enable) \
2959 netdev->features |= feature; \
2960 else \
2961 netdev->features &= ~feature; \
2962 } while (0)
2963
2964typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
2965
2966static int set_feature_lro(struct net_device *netdev, bool enable)
f62b8bb8
AV
2967{
2968 struct mlx5e_priv *priv = netdev_priv(netdev);
0e405443
GP
2969 bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2970 int err;
f62b8bb8
AV
2971
2972 mutex_lock(&priv->state_lock);
f62b8bb8 2973
0e405443
GP
2974 if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
2975 mlx5e_close_locked(priv->netdev);
98e81b0a 2976
0e405443
GP
2977 priv->params.lro_en = enable;
2978 err = mlx5e_modify_tirs_lro(priv);
2979 if (err) {
2980 netdev_err(netdev, "lro modify failed, %d\n", err);
2981 priv->params.lro_en = !enable;
98e81b0a 2982 }
f62b8bb8 2983
0e405443
GP
2984 if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
2985 mlx5e_open_locked(priv->netdev);
2986
9b37b07f
AS
2987 mutex_unlock(&priv->state_lock);
2988
0e405443
GP
2989 return err;
2990}
2991
2992static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
2993{
2994 struct mlx5e_priv *priv = netdev_priv(netdev);
2995
2996 if (enable)
2997 mlx5e_enable_vlan_filter(priv);
2998 else
2999 mlx5e_disable_vlan_filter(priv);
3000
3001 return 0;
3002}
3003
3004static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3005{
3006 struct mlx5e_priv *priv = netdev_priv(netdev);
f62b8bb8 3007
0e405443 3008 if (!enable && mlx5e_tc_num_filters(priv)) {
e8f887ac
AV
3009 netdev_err(netdev,
3010 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3011 return -EINVAL;
3012 }
3013
0e405443
GP
3014 return 0;
3015}
3016
94cb1ebb
EBE
3017static int set_feature_rx_all(struct net_device *netdev, bool enable)
3018{
3019 struct mlx5e_priv *priv = netdev_priv(netdev);
3020 struct mlx5_core_dev *mdev = priv->mdev;
3021
3022 return mlx5_set_port_fcs(mdev, !enable);
3023}
3024
36350114
GP
3025static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3026{
3027 struct mlx5e_priv *priv = netdev_priv(netdev);
ff9c852f 3028 int err = 0;
36350114
GP
3029
3030 mutex_lock(&priv->state_lock);
3031
3032 priv->params.vlan_strip_disable = !enable;
ff9c852f
SM
3033 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3034 goto unlock;
3035
3036 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
36350114
GP
3037 if (err)
3038 priv->params.vlan_strip_disable = enable;
3039
ff9c852f 3040unlock:
36350114
GP
3041 mutex_unlock(&priv->state_lock);
3042
3043 return err;
3044}
3045
45bf454a
MG
3046#ifdef CONFIG_RFS_ACCEL
3047static int set_feature_arfs(struct net_device *netdev, bool enable)
3048{
3049 struct mlx5e_priv *priv = netdev_priv(netdev);
3050 int err;
3051
3052 if (enable)
3053 err = mlx5e_arfs_enable(priv);
3054 else
3055 err = mlx5e_arfs_disable(priv);
3056
3057 return err;
3058}
3059#endif
3060
0e405443
GP
3061static int mlx5e_handle_feature(struct net_device *netdev,
3062 netdev_features_t wanted_features,
3063 netdev_features_t feature,
3064 mlx5e_feature_handler feature_handler)
3065{
3066 netdev_features_t changes = wanted_features ^ netdev->features;
3067 bool enable = !!(wanted_features & feature);
3068 int err;
3069
3070 if (!(changes & feature))
3071 return 0;
3072
3073 err = feature_handler(netdev, enable);
3074 if (err) {
3075 netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
3076 enable ? "Enable" : "Disable", feature, err);
3077 return err;
3078 }
3079
3080 MLX5E_SET_FEATURE(netdev, feature, enable);
3081 return 0;
3082}
3083
3084static int mlx5e_set_features(struct net_device *netdev,
3085 netdev_features_t features)
3086{
3087 int err;
3088
3089 err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
3090 set_feature_lro);
3091 err |= mlx5e_handle_feature(netdev, features,
3092 NETIF_F_HW_VLAN_CTAG_FILTER,
3093 set_feature_vlan_filter);
3094 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
3095 set_feature_tc_num_filters);
94cb1ebb
EBE
3096 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
3097 set_feature_rx_all);
36350114
GP
3098 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
3099 set_feature_rx_vlan);
45bf454a
MG
3100#ifdef CONFIG_RFS_ACCEL
3101 err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
3102 set_feature_arfs);
3103#endif
0e405443
GP
3104
3105 return err ? -EINVAL : 0;
f62b8bb8
AV
3106}
3107
3108static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
3109{
3110 struct mlx5e_priv *priv = netdev_priv(netdev);
98e81b0a 3111 bool was_opened;
98e81b0a 3112 int err = 0;
506753b0 3113 bool reset;
f62b8bb8 3114
f62b8bb8 3115 mutex_lock(&priv->state_lock);
98e81b0a 3116
506753b0
TT
3117 reset = !priv->params.lro_en &&
3118 (priv->params.rq_wq_type !=
3119 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
3120
98e81b0a 3121 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
506753b0 3122 if (was_opened && reset)
98e81b0a
AS
3123 mlx5e_close_locked(netdev);
3124
f62b8bb8 3125 netdev->mtu = new_mtu;
13f9bba7 3126 mlx5e_set_dev_port_mtu(netdev);
98e81b0a 3127
506753b0 3128 if (was_opened && reset)
98e81b0a
AS
3129 err = mlx5e_open_locked(netdev);
3130
f62b8bb8
AV
3131 mutex_unlock(&priv->state_lock);
3132
3133 return err;
3134}
3135
ef9814de
EBE
3136static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3137{
3138 switch (cmd) {
3139 case SIOCSHWTSTAMP:
3140 return mlx5e_hwstamp_set(dev, ifr);
3141 case SIOCGHWTSTAMP:
3142 return mlx5e_hwstamp_get(dev, ifr);
3143 default:
3144 return -EOPNOTSUPP;
3145 }
3146}
3147
66e49ded
SM
3148static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3149{
3150 struct mlx5e_priv *priv = netdev_priv(dev);
3151 struct mlx5_core_dev *mdev = priv->mdev;
3152
3153 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
3154}
3155
79aab093
MS
3156static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
3157 __be16 vlan_proto)
66e49ded
SM
3158{
3159 struct mlx5e_priv *priv = netdev_priv(dev);
3160 struct mlx5_core_dev *mdev = priv->mdev;
3161
79aab093
MS
3162 if (vlan_proto != htons(ETH_P_8021Q))
3163 return -EPROTONOSUPPORT;
3164
66e49ded
SM
3165 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
3166 vlan, qos);
3167}
3168
f942380c
MHY
3169static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
3170{
3171 struct mlx5e_priv *priv = netdev_priv(dev);
3172 struct mlx5_core_dev *mdev = priv->mdev;
3173
3174 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
3175}
3176
1edc57e2
MHY
3177static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
3178{
3179 struct mlx5e_priv *priv = netdev_priv(dev);
3180 struct mlx5_core_dev *mdev = priv->mdev;
3181
3182 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
3183}
bd77bf1c
MHY
3184
3185static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
3186 int max_tx_rate)
3187{
3188 struct mlx5e_priv *priv = netdev_priv(dev);
3189 struct mlx5_core_dev *mdev = priv->mdev;
3190
bd77bf1c 3191 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
c9497c98 3192 max_tx_rate, min_tx_rate);
bd77bf1c
MHY
3193}
3194
66e49ded
SM
3195static int mlx5_vport_link2ifla(u8 esw_link)
3196{
3197 switch (esw_link) {
3198 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
3199 return IFLA_VF_LINK_STATE_DISABLE;
3200 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
3201 return IFLA_VF_LINK_STATE_ENABLE;
3202 }
3203 return IFLA_VF_LINK_STATE_AUTO;
3204}
3205
3206static int mlx5_ifla_link2vport(u8 ifla_link)
3207{
3208 switch (ifla_link) {
3209 case IFLA_VF_LINK_STATE_DISABLE:
3210 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
3211 case IFLA_VF_LINK_STATE_ENABLE:
3212 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
3213 }
3214 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
3215}
3216
3217static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
3218 int link_state)
3219{
3220 struct mlx5e_priv *priv = netdev_priv(dev);
3221 struct mlx5_core_dev *mdev = priv->mdev;
3222
3223 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
3224 mlx5_ifla_link2vport(link_state));
3225}
3226
3227static int mlx5e_get_vf_config(struct net_device *dev,
3228 int vf, struct ifla_vf_info *ivi)
3229{
3230 struct mlx5e_priv *priv = netdev_priv(dev);
3231 struct mlx5_core_dev *mdev = priv->mdev;
3232 int err;
3233
3234 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
3235 if (err)
3236 return err;
3237 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
3238 return 0;
3239}
3240
3241static int mlx5e_get_vf_stats(struct net_device *dev,
3242 int vf, struct ifla_vf_stats *vf_stats)
3243{
3244 struct mlx5e_priv *priv = netdev_priv(dev);
3245 struct mlx5_core_dev *mdev = priv->mdev;
3246
3247 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
3248 vf_stats);
3249}
3250
1ad9a00a
PB
3251static void mlx5e_add_vxlan_port(struct net_device *netdev,
3252 struct udp_tunnel_info *ti)
b3f63c3d
MF
3253{
3254 struct mlx5e_priv *priv = netdev_priv(netdev);
3255
974c3f30
AD
3256 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3257 return;
3258
b3f63c3d
MF
3259 if (!mlx5e_vxlan_allowed(priv->mdev))
3260 return;
3261
974c3f30 3262 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
b3f63c3d
MF
3263}
3264
1ad9a00a
PB
3265static void mlx5e_del_vxlan_port(struct net_device *netdev,
3266 struct udp_tunnel_info *ti)
b3f63c3d
MF
3267{
3268 struct mlx5e_priv *priv = netdev_priv(netdev);
3269
974c3f30
AD
3270 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3271 return;
3272
b3f63c3d
MF
3273 if (!mlx5e_vxlan_allowed(priv->mdev))
3274 return;
3275
974c3f30 3276 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
b3f63c3d
MF
3277}
3278
3279static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
3280 struct sk_buff *skb,
3281 netdev_features_t features)
3282{
3283 struct udphdr *udph;
3284 u16 proto;
3285 u16 port = 0;
3286
3287 switch (vlan_get_protocol(skb)) {
3288 case htons(ETH_P_IP):
3289 proto = ip_hdr(skb)->protocol;
3290 break;
3291 case htons(ETH_P_IPV6):
3292 proto = ipv6_hdr(skb)->nexthdr;
3293 break;
3294 default:
3295 goto out;
3296 }
3297
3298 if (proto == IPPROTO_UDP) {
3299 udph = udp_hdr(skb);
3300 port = be16_to_cpu(udph->dest);
3301 }
3302
3303 /* Verify if UDP port is being offloaded by HW */
3304 if (port && mlx5e_vxlan_lookup_port(priv, port))
3305 return features;
3306
3307out:
3308 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3309 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3310}
3311
3312static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3313 struct net_device *netdev,
3314 netdev_features_t features)
3315{
3316 struct mlx5e_priv *priv = netdev_priv(netdev);
3317
3318 features = vlan_features_check(skb, features);
3319 features = vxlan_features_check(skb, features);
3320
3321 /* Validate if the tunneled packet is being offloaded by HW */
3322 if (skb->encapsulation &&
3323 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
3324 return mlx5e_vxlan_features_check(priv, skb, features);
3325
3326 return features;
3327}
3328
3947ca18
DJ
3329static void mlx5e_tx_timeout(struct net_device *dev)
3330{
3331 struct mlx5e_priv *priv = netdev_priv(dev);
3332 bool sched_work = false;
3333 int i;
3334
3335 netdev_err(dev, "TX timeout detected\n");
3336
ff9c852f 3337 for (i = 0; i < priv->channels.num * priv->params.num_tc; i++) {
31391048 3338 struct mlx5e_txqsq *sq = priv->txq_to_sq_map[i];
3947ca18 3339
2c1ccc99 3340 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
3947ca18
DJ
3341 continue;
3342 sched_work = true;
c0f1147d 3343 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
3947ca18
DJ
3344 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
3345 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
3346 }
3347
3348 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
3349 schedule_work(&priv->tx_timeout_work);
3350}
3351
86994156
RS
3352static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3353{
3354 struct mlx5e_priv *priv = netdev_priv(netdev);
3355 struct bpf_prog *old_prog;
3356 int err = 0;
3357 bool reset, was_opened;
3358 int i;
3359
3360 mutex_lock(&priv->state_lock);
3361
3362 if ((netdev->features & NETIF_F_LRO) && prog) {
3363 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
3364 err = -EINVAL;
3365 goto unlock;
3366 }
3367
3368 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3369 /* no need for full reset when exchanging programs */
3370 reset = (!priv->xdp_prog || !prog);
3371
3372 if (was_opened && reset)
3373 mlx5e_close_locked(netdev);
c54c0629
DB
3374 if (was_opened && !reset) {
3375 /* num_channels is invariant here, so we can take the
3376 * batched reference right upfront.
3377 */
3378 prog = bpf_prog_add(prog, priv->params.num_channels);
3379 if (IS_ERR(prog)) {
3380 err = PTR_ERR(prog);
3381 goto unlock;
3382 }
3383 }
86994156 3384
c54c0629
DB
3385 /* exchange programs, extra prog reference we got from caller
3386 * as long as we don't fail from this point onwards.
3387 */
86994156 3388 old_prog = xchg(&priv->xdp_prog, prog);
86994156
RS
3389 if (old_prog)
3390 bpf_prog_put(old_prog);
3391
3392 if (reset) /* change RQ type according to priv->xdp_prog */
3393 mlx5e_set_rq_priv_params(priv);
3394
3395 if (was_opened && reset)
3396 mlx5e_open_locked(netdev);
3397
3398 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
3399 goto unlock;
3400
3401 /* exchanging programs w/o reset, we update ref counts on behalf
3402 * of the channels RQs here.
3403 */
ff9c852f
SM
3404 for (i = 0; i < priv->channels.num; i++) {
3405 struct mlx5e_channel *c = priv->channels.c[i];
86994156 3406
c0f1147d 3407 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156
RS
3408 napi_synchronize(&c->napi);
3409 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3410
3411 old_prog = xchg(&c->rq.xdp_prog, prog);
3412
c0f1147d 3413 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156
RS
3414 /* napi_schedule in case we have missed anything */
3415 set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
3416 napi_schedule(&c->napi);
3417
3418 if (old_prog)
3419 bpf_prog_put(old_prog);
3420 }
3421
3422unlock:
3423 mutex_unlock(&priv->state_lock);
3424 return err;
3425}
3426
3427static bool mlx5e_xdp_attached(struct net_device *dev)
3428{
3429 struct mlx5e_priv *priv = netdev_priv(dev);
3430
3431 return !!priv->xdp_prog;
3432}
3433
3434static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
3435{
3436 switch (xdp->command) {
3437 case XDP_SETUP_PROG:
3438 return mlx5e_xdp_set(dev, xdp->prog);
3439 case XDP_QUERY_PROG:
3440 xdp->prog_attached = mlx5e_xdp_attached(dev);
3441 return 0;
3442 default:
3443 return -EINVAL;
3444 }
3445}
3446
80378384
CO
3447#ifdef CONFIG_NET_POLL_CONTROLLER
3448/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3449 * reenabling interrupts.
3450 */
3451static void mlx5e_netpoll(struct net_device *dev)
3452{
3453 struct mlx5e_priv *priv = netdev_priv(dev);
ff9c852f
SM
3454 struct mlx5e_channels *chs = &priv->channels;
3455
80378384
CO
3456 int i;
3457
ff9c852f
SM
3458 for (i = 0; i < chs->num; i++)
3459 napi_schedule(&chs->c[i]->napi);
80378384
CO
3460}
3461#endif
3462
b0eed40e 3463static const struct net_device_ops mlx5e_netdev_ops_basic = {
f62b8bb8
AV
3464 .ndo_open = mlx5e_open,
3465 .ndo_stop = mlx5e_close,
3466 .ndo_start_xmit = mlx5e_xmit,
08fb1dac
SM
3467 .ndo_setup_tc = mlx5e_ndo_setup_tc,
3468 .ndo_select_queue = mlx5e_select_queue,
f62b8bb8
AV
3469 .ndo_get_stats64 = mlx5e_get_stats,
3470 .ndo_set_rx_mode = mlx5e_set_rx_mode,
3471 .ndo_set_mac_address = mlx5e_set_mac,
b0eed40e
SM
3472 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
3473 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
f62b8bb8 3474 .ndo_set_features = mlx5e_set_features,
b0eed40e
SM
3475 .ndo_change_mtu = mlx5e_change_mtu,
3476 .ndo_do_ioctl = mlx5e_ioctl,
507f0c81 3477 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
45bf454a
MG
3478#ifdef CONFIG_RFS_ACCEL
3479 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3480#endif
3947ca18 3481 .ndo_tx_timeout = mlx5e_tx_timeout,
86994156 3482 .ndo_xdp = mlx5e_xdp,
80378384
CO
3483#ifdef CONFIG_NET_POLL_CONTROLLER
3484 .ndo_poll_controller = mlx5e_netpoll,
3485#endif
b0eed40e
SM
3486};
3487
3488static const struct net_device_ops mlx5e_netdev_ops_sriov = {
3489 .ndo_open = mlx5e_open,
3490 .ndo_stop = mlx5e_close,
3491 .ndo_start_xmit = mlx5e_xmit,
08fb1dac
SM
3492 .ndo_setup_tc = mlx5e_ndo_setup_tc,
3493 .ndo_select_queue = mlx5e_select_queue,
b0eed40e
SM
3494 .ndo_get_stats64 = mlx5e_get_stats,
3495 .ndo_set_rx_mode = mlx5e_set_rx_mode,
3496 .ndo_set_mac_address = mlx5e_set_mac,
3497 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
3498 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
3499 .ndo_set_features = mlx5e_set_features,
3500 .ndo_change_mtu = mlx5e_change_mtu,
3501 .ndo_do_ioctl = mlx5e_ioctl,
974c3f30
AD
3502 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
3503 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
507f0c81 3504 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
b3f63c3d 3505 .ndo_features_check = mlx5e_features_check,
45bf454a
MG
3506#ifdef CONFIG_RFS_ACCEL
3507 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3508#endif
b0eed40e
SM
3509 .ndo_set_vf_mac = mlx5e_set_vf_mac,
3510 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
f942380c 3511 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
1edc57e2 3512 .ndo_set_vf_trust = mlx5e_set_vf_trust,
bd77bf1c 3513 .ndo_set_vf_rate = mlx5e_set_vf_rate,
b0eed40e
SM
3514 .ndo_get_vf_config = mlx5e_get_vf_config,
3515 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
3516 .ndo_get_vf_stats = mlx5e_get_vf_stats,
3947ca18 3517 .ndo_tx_timeout = mlx5e_tx_timeout,
86994156 3518 .ndo_xdp = mlx5e_xdp,
80378384
CO
3519#ifdef CONFIG_NET_POLL_CONTROLLER
3520 .ndo_poll_controller = mlx5e_netpoll,
3521#endif
370bad0f
OG
3522 .ndo_has_offload_stats = mlx5e_has_offload_stats,
3523 .ndo_get_offload_stats = mlx5e_get_offload_stats,
f62b8bb8
AV
3524};
3525
3526static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3527{
3528 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
9eb78923 3529 return -EOPNOTSUPP;
f62b8bb8
AV
3530 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3531 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3532 !MLX5_CAP_ETH(mdev, csum_cap) ||
3533 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
3534 !MLX5_CAP_ETH(mdev, vlan_cap) ||
796a27ec
GP
3535 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
3536 MLX5_CAP_FLOWTABLE(mdev,
3537 flow_table_properties_nic_receive.max_ft_level)
3538 < 3) {
f62b8bb8
AV
3539 mlx5_core_warn(mdev,
3540 "Not creating net device, some required device capabilities are missing\n");
9eb78923 3541 return -EOPNOTSUPP;
f62b8bb8 3542 }
66189961
TT
3543 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3544 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
7524a5d8
GP
3545 if (!MLX5_CAP_GEN(mdev, cq_moderation))
3546 mlx5_core_warn(mdev, "CQ modiration is not supported\n");
66189961 3547
f62b8bb8
AV
3548 return 0;
3549}
3550
58d52291
AS
3551u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3552{
3553 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
3554
3555 return bf_buf_size -
3556 sizeof(struct mlx5e_tx_wqe) +
3557 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
3558}
3559
d8c9660d
TT
3560void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
3561 u32 *indirection_rqt, int len,
85082dba
TT
3562 int num_channels)
3563{
d8c9660d
TT
3564 int node = mdev->priv.numa_node;
3565 int node_num_of_cores;
85082dba
TT
3566 int i;
3567
d8c9660d
TT
3568 if (node == -1)
3569 node = first_online_node;
3570
3571 node_num_of_cores = cpumask_weight(cpumask_of_node(node));
3572
3573 if (node_num_of_cores)
3574 num_channels = min_t(int, num_channels, node_num_of_cores);
3575
85082dba
TT
3576 for (i = 0; i < len; i++)
3577 indirection_rqt[i] = i % num_channels;
3578}
3579
b797a684
SM
3580static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
3581{
3582 enum pcie_link_width width;
3583 enum pci_bus_speed speed;
3584 int err = 0;
3585
3586 err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
3587 if (err)
3588 return err;
3589
3590 if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
3591 return -EINVAL;
3592
3593 switch (speed) {
3594 case PCIE_SPEED_2_5GT:
3595 *pci_bw = 2500 * width;
3596 break;
3597 case PCIE_SPEED_5_0GT:
3598 *pci_bw = 5000 * width;
3599 break;
3600 case PCIE_SPEED_8_0GT:
3601 *pci_bw = 8000 * width;
3602 break;
3603 default:
3604 return -EINVAL;
3605 }
3606
3607 return 0;
3608}
3609
3610static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
3611{
3612 return (link_speed && pci_bw &&
3613 (pci_bw < 40000) && (pci_bw < link_speed));
3614}
3615
9908aa29
TT
3616void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
3617{
3618 params->rx_cq_period_mode = cq_period_mode;
3619
3620 params->rx_cq_moderation.pkts =
3621 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
3622 params->rx_cq_moderation.usec =
3623 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
3624
3625 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
3626 params->rx_cq_moderation.usec =
3627 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
3628}
3629
2b029556
SM
3630u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
3631{
3632 int i;
3633
3634 /* The supported periods are organized in ascending order */
3635 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
3636 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
3637 break;
3638
3639 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
3640}
3641
6bfd390b
HHZ
3642static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
3643 struct net_device *netdev,
127ea380
HHZ
3644 const struct mlx5e_profile *profile,
3645 void *ppriv)
f62b8bb8
AV
3646{
3647 struct mlx5e_priv *priv = netdev_priv(netdev);
b797a684
SM
3648 u32 link_speed = 0;
3649 u32 pci_bw = 0;
cb3c7fd4
GR
3650 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
3651 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
3652 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
f62b8bb8 3653
2fc4bfb7
SM
3654 priv->mdev = mdev;
3655 priv->netdev = netdev;
3656 priv->params.num_channels = profile->max_nch(mdev);
3657 priv->profile = profile;
3658 priv->ppriv = ppriv;
3659
2b029556
SM
3660 priv->params.lro_timeout =
3661 mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
3662
b4e029da
KH
3663 priv->params.log_sq_size = is_kdump_kernel() ?
3664 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
3665 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
461017cb 3666
b797a684 3667 /* set CQE compression */
9bcc8606 3668 priv->params.rx_cqe_compress_def = false;
b797a684
SM
3669 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
3670 MLX5_CAP_GEN(mdev, vport_group_manager)) {
3671 mlx5e_get_max_linkspeed(mdev, &link_speed);
3672 mlx5e_get_pci_bw(mdev, &pci_bw);
3673 mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
3674 link_speed, pci_bw);
9bcc8606 3675 priv->params.rx_cqe_compress_def =
b797a684
SM
3676 cqe_compress_heuristic(link_speed, pci_bw);
3677 }
b797a684 3678
b0d4660b
TT
3679 MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS,
3680 priv->params.rx_cqe_compress_def);
3681
2fc4bfb7
SM
3682 mlx5e_set_rq_priv_params(priv);
3683 if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
461017cb 3684 priv->params.lro_en = true;
9908aa29 3685
cb3c7fd4
GR
3686 priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
3687 mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
9908aa29
TT
3688
3689 priv->params.tx_cq_moderation.usec =
f62b8bb8 3690 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
9908aa29 3691 priv->params.tx_cq_moderation.pkts =
f62b8bb8 3692 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
58d52291 3693 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
8c7245a6 3694 mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
a6f402e4
SM
3695 if (priv->params.tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
3696 !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
3697 priv->params.tx_min_inline_mode = MLX5_INLINE_MODE_L2;
3698
f62b8bb8 3699 priv->params.num_tc = 1;
2be6967c 3700 priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
f62b8bb8 3701
57afead5
AS
3702 netdev_rss_key_fill(priv->params.toeplitz_hash_key,
3703 sizeof(priv->params.toeplitz_hash_key));
3704
d8c9660d 3705 mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
6bfd390b 3706 MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
2d75b2bc 3707
9908aa29 3708 /* Initialize pflags */
59ece1c9
SD
3709 MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
3710 priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
9908aa29 3711
f62b8bb8
AV
3712 mutex_init(&priv->state_lock);
3713
3714 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
3715 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3947ca18 3716 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
f62b8bb8
AV
3717 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
3718}
3719
3720static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
3721{
3722 struct mlx5e_priv *priv = netdev_priv(netdev);
3723
e1d7d349 3724 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
108805fc
SM
3725 if (is_zero_ether_addr(netdev->dev_addr) &&
3726 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
3727 eth_hw_addr_random(netdev);
3728 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
3729 }
f62b8bb8
AV
3730}
3731
cb67b832
HHZ
3732static const struct switchdev_ops mlx5e_switchdev_ops = {
3733 .switchdev_port_attr_get = mlx5e_attr_get,
3734};
3735
6bfd390b 3736static void mlx5e_build_nic_netdev(struct net_device *netdev)
f62b8bb8
AV
3737{
3738 struct mlx5e_priv *priv = netdev_priv(netdev);
3739 struct mlx5_core_dev *mdev = priv->mdev;
94cb1ebb
EBE
3740 bool fcs_supported;
3741 bool fcs_enabled;
f62b8bb8
AV
3742
3743 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
3744
08fb1dac 3745 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
b0eed40e 3746 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
08fb1dac 3747#ifdef CONFIG_MLX5_CORE_EN_DCB
80653f73
HN
3748 if (MLX5_CAP_GEN(mdev, qos))
3749 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
08fb1dac
SM
3750#endif
3751 } else {
b0eed40e 3752 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
08fb1dac 3753 }
66e49ded 3754
f62b8bb8
AV
3755 netdev->watchdog_timeo = 15 * HZ;
3756
3757 netdev->ethtool_ops = &mlx5e_ethtool_ops;
3758
12be4b21 3759 netdev->vlan_features |= NETIF_F_SG;
f62b8bb8
AV
3760 netdev->vlan_features |= NETIF_F_IP_CSUM;
3761 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3762 netdev->vlan_features |= NETIF_F_GRO;
3763 netdev->vlan_features |= NETIF_F_TSO;
3764 netdev->vlan_features |= NETIF_F_TSO6;
3765 netdev->vlan_features |= NETIF_F_RXCSUM;
3766 netdev->vlan_features |= NETIF_F_RXHASH;
3767
3768 if (!!MLX5_CAP_ETH(mdev, lro_cap))
3769 netdev->vlan_features |= NETIF_F_LRO;
3770
3771 netdev->hw_features = netdev->vlan_features;
e4cf27bd 3772 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
f62b8bb8
AV
3773 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3774 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3775
b3f63c3d 3776 if (mlx5e_vxlan_allowed(mdev)) {
b49663c8
AD
3777 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3778 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3779 NETIF_F_GSO_PARTIAL;
b3f63c3d 3780 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
f3ed653c 3781 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
b3f63c3d
MF
3782 netdev->hw_enc_features |= NETIF_F_TSO;
3783 netdev->hw_enc_features |= NETIF_F_TSO6;
b3f63c3d 3784 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
b49663c8
AD
3785 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3786 NETIF_F_GSO_PARTIAL;
3787 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
b3f63c3d
MF
3788 }
3789
94cb1ebb
EBE
3790 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
3791
3792 if (fcs_supported)
3793 netdev->hw_features |= NETIF_F_RXALL;
3794
f62b8bb8
AV
3795 netdev->features = netdev->hw_features;
3796 if (!priv->params.lro_en)
3797 netdev->features &= ~NETIF_F_LRO;
3798
94cb1ebb
EBE
3799 if (fcs_enabled)
3800 netdev->features &= ~NETIF_F_RXALL;
3801
e8f887ac
AV
3802#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
3803 if (FT_CAP(flow_modify_en) &&
3804 FT_CAP(modify_root) &&
3805 FT_CAP(identified_miss_table_mode) &&
1cabe6b0
MG
3806 FT_CAP(flow_table_modify)) {
3807 netdev->hw_features |= NETIF_F_HW_TC;
3808#ifdef CONFIG_RFS_ACCEL
3809 netdev->hw_features |= NETIF_F_NTUPLE;
3810#endif
3811 }
e8f887ac 3812
f62b8bb8
AV
3813 netdev->features |= NETIF_F_HIGHDMA;
3814
3815 netdev->priv_flags |= IFF_UNICAST_FLT;
3816
3817 mlx5e_set_netdev_dev_addr(netdev);
cb67b832
HHZ
3818
3819#ifdef CONFIG_NET_SWITCHDEV
3820 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3821 netdev->switchdev_ops = &mlx5e_switchdev_ops;
3822#endif
f62b8bb8
AV
3823}
3824
593cf338
RS
3825static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
3826{
3827 struct mlx5_core_dev *mdev = priv->mdev;
3828 int err;
3829
3830 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
3831 if (err) {
3832 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
3833 priv->q_counter = 0;
3834 }
3835}
3836
3837static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
3838{
3839 if (!priv->q_counter)
3840 return;
3841
3842 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
3843}
3844
6bfd390b
HHZ
3845static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
3846 struct net_device *netdev,
127ea380
HHZ
3847 const struct mlx5e_profile *profile,
3848 void *ppriv)
6bfd390b
HHZ
3849{
3850 struct mlx5e_priv *priv = netdev_priv(netdev);
3851
127ea380 3852 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
6bfd390b
HHZ
3853 mlx5e_build_nic_netdev(netdev);
3854 mlx5e_vxlan_init(priv);
3855}
3856
3857static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
3858{
3859 mlx5e_vxlan_cleanup(priv);
127ea380 3860
a055c19b
DB
3861 if (priv->xdp_prog)
3862 bpf_prog_put(priv->xdp_prog);
6bfd390b
HHZ
3863}
3864
3865static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
3866{
3867 struct mlx5_core_dev *mdev = priv->mdev;
3868 int err;
3869 int i;
3870
3871 err = mlx5e_create_indirect_rqts(priv);
3872 if (err) {
3873 mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
3874 return err;
3875 }
3876
3877 err = mlx5e_create_direct_rqts(priv);
3878 if (err) {
3879 mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
3880 goto err_destroy_indirect_rqts;
3881 }
3882
3883 err = mlx5e_create_indirect_tirs(priv);
3884 if (err) {
3885 mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
3886 goto err_destroy_direct_rqts;
3887 }
3888
3889 err = mlx5e_create_direct_tirs(priv);
3890 if (err) {
3891 mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
3892 goto err_destroy_indirect_tirs;
3893 }
3894
3895 err = mlx5e_create_flow_steering(priv);
3896 if (err) {
3897 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
3898 goto err_destroy_direct_tirs;
3899 }
3900
3901 err = mlx5e_tc_init(priv);
3902 if (err)
3903 goto err_destroy_flow_steering;
3904
3905 return 0;
3906
3907err_destroy_flow_steering:
3908 mlx5e_destroy_flow_steering(priv);
3909err_destroy_direct_tirs:
3910 mlx5e_destroy_direct_tirs(priv);
3911err_destroy_indirect_tirs:
3912 mlx5e_destroy_indirect_tirs(priv);
3913err_destroy_direct_rqts:
3914 for (i = 0; i < priv->profile->max_nch(mdev); i++)
3915 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
3916err_destroy_indirect_rqts:
3917 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
3918 return err;
3919}
3920
3921static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
3922{
3923 int i;
3924
3925 mlx5e_tc_cleanup(priv);
3926 mlx5e_destroy_flow_steering(priv);
3927 mlx5e_destroy_direct_tirs(priv);
3928 mlx5e_destroy_indirect_tirs(priv);
3929 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
3930 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
3931 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
3932}
3933
3934static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
3935{
3936 int err;
3937
3938 err = mlx5e_create_tises(priv);
3939 if (err) {
3940 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
3941 return err;
3942 }
3943
3944#ifdef CONFIG_MLX5_CORE_EN_DCB
e207b7e9 3945 mlx5e_dcbnl_initialize(priv);
6bfd390b
HHZ
3946#endif
3947 return 0;
3948}
3949
3950static void mlx5e_nic_enable(struct mlx5e_priv *priv)
3951{
3952 struct net_device *netdev = priv->netdev;
3953 struct mlx5_core_dev *mdev = priv->mdev;
127ea380
HHZ
3954 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3955 struct mlx5_eswitch_rep rep;
6bfd390b 3956
7907f23a
AH
3957 mlx5_lag_add(mdev, netdev);
3958
6bfd390b 3959 mlx5e_enable_async_events(priv);
127ea380
HHZ
3960
3961 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
dbe413e3 3962 mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
cb67b832
HHZ
3963 rep.load = mlx5e_nic_rep_load;
3964 rep.unload = mlx5e_nic_rep_unload;
9deb2241 3965 rep.vport = FDB_UPLINK_VPORT;
726293f1 3966 rep.netdev = netdev;
9deb2241 3967 mlx5_eswitch_register_vport_rep(esw, 0, &rep);
127ea380 3968 }
610e89e0
SM
3969
3970 if (netdev->reg_state != NETREG_REGISTERED)
3971 return;
3972
3973 /* Device already registered: sync netdev system state */
3974 if (mlx5e_vxlan_allowed(mdev)) {
3975 rtnl_lock();
3976 udp_tunnel_get_rx_info(netdev);
3977 rtnl_unlock();
3978 }
3979
3980 queue_work(priv->wq, &priv->set_rx_mode_work);
6bfd390b
HHZ
3981}
3982
3983static void mlx5e_nic_disable(struct mlx5e_priv *priv)
3984{
3deef8ce
SM
3985 struct mlx5_core_dev *mdev = priv->mdev;
3986 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3987
6bfd390b 3988 queue_work(priv->wq, &priv->set_rx_mode_work);
3deef8ce
SM
3989 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3990 mlx5_eswitch_unregister_vport_rep(esw, 0);
6bfd390b 3991 mlx5e_disable_async_events(priv);
3deef8ce 3992 mlx5_lag_remove(mdev);
6bfd390b
HHZ
3993}
3994
3995static const struct mlx5e_profile mlx5e_nic_profile = {
3996 .init = mlx5e_nic_init,
3997 .cleanup = mlx5e_nic_cleanup,
3998 .init_rx = mlx5e_init_nic_rx,
3999 .cleanup_rx = mlx5e_cleanup_nic_rx,
4000 .init_tx = mlx5e_init_nic_tx,
4001 .cleanup_tx = mlx5e_cleanup_nic_tx,
4002 .enable = mlx5e_nic_enable,
4003 .disable = mlx5e_nic_disable,
4004 .update_stats = mlx5e_update_stats,
4005 .max_nch = mlx5e_get_max_num_channels,
4006 .max_tc = MLX5E_MAX_NUM_TC,
4007};
4008
26e59d80
MHY
4009struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4010 const struct mlx5e_profile *profile,
4011 void *ppriv)
f62b8bb8 4012{
26e59d80 4013 int nch = profile->max_nch(mdev);
f62b8bb8
AV
4014 struct net_device *netdev;
4015 struct mlx5e_priv *priv;
f62b8bb8 4016
08fb1dac 4017 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
6bfd390b 4018 nch * profile->max_tc,
08fb1dac 4019 nch);
f62b8bb8
AV
4020 if (!netdev) {
4021 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
4022 return NULL;
4023 }
4024
be4891af
SM
4025#ifdef CONFIG_RFS_ACCEL
4026 netdev->rx_cpu_rmap = mdev->rmap;
4027#endif
4028
127ea380 4029 profile->init(mdev, netdev, profile, ppriv);
f62b8bb8
AV
4030
4031 netif_carrier_off(netdev);
4032
4033 priv = netdev_priv(netdev);
4034
7bb29755
MF
4035 priv->wq = create_singlethread_workqueue("mlx5e");
4036 if (!priv->wq)
26e59d80
MHY
4037 goto err_cleanup_nic;
4038
4039 return netdev;
4040
4041err_cleanup_nic:
4042 profile->cleanup(priv);
4043 free_netdev(netdev);
4044
4045 return NULL;
4046}
4047
4048int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
4049{
4050 const struct mlx5e_profile *profile;
4051 struct mlx5e_priv *priv;
b80f71f5 4052 u16 max_mtu;
26e59d80
MHY
4053 int err;
4054
4055 priv = netdev_priv(netdev);
4056 profile = priv->profile;
4057 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
7bb29755 4058
6bfd390b
HHZ
4059 err = profile->init_tx(priv);
4060 if (err)
ec8b9981 4061 goto out;
5c50368f
AS
4062
4063 err = mlx5e_open_drop_rq(priv);
4064 if (err) {
4065 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
6bfd390b 4066 goto err_cleanup_tx;
5c50368f
AS
4067 }
4068
6bfd390b
HHZ
4069 err = profile->init_rx(priv);
4070 if (err)
5c50368f 4071 goto err_close_drop_rq;
5c50368f 4072
593cf338
RS
4073 mlx5e_create_q_counter(priv);
4074
33cfaaa8 4075 mlx5e_init_l2_addr(priv);
5c50368f 4076
b80f71f5
JW
4077 /* MTU range: 68 - hw-specific max */
4078 netdev->min_mtu = ETH_MIN_MTU;
4079 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
4080 netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
4081
13f9bba7
SM
4082 mlx5e_set_dev_port_mtu(netdev);
4083
6bfd390b
HHZ
4084 if (profile->enable)
4085 profile->enable(priv);
f62b8bb8 4086
26e59d80
MHY
4087 rtnl_lock();
4088 if (netif_running(netdev))
4089 mlx5e_open(netdev);
4090 netif_device_attach(netdev);
4091 rtnl_unlock();
f62b8bb8 4092
26e59d80 4093 return 0;
5c50368f
AS
4094
4095err_close_drop_rq:
4096 mlx5e_close_drop_rq(priv);
4097
6bfd390b
HHZ
4098err_cleanup_tx:
4099 profile->cleanup_tx(priv);
5c50368f 4100
26e59d80
MHY
4101out:
4102 return err;
f62b8bb8
AV
4103}
4104
127ea380
HHZ
4105static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
4106{
4107 struct mlx5_eswitch *esw = mdev->priv.eswitch;
4108 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
4109 int vport;
dbe413e3 4110 u8 mac[ETH_ALEN];
127ea380
HHZ
4111
4112 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
4113 return;
4114
dbe413e3
HHZ
4115 mlx5_query_nic_vport_mac_address(mdev, 0, mac);
4116
127ea380
HHZ
4117 for (vport = 1; vport < total_vfs; vport++) {
4118 struct mlx5_eswitch_rep rep;
4119
cb67b832
HHZ
4120 rep.load = mlx5e_vport_rep_load;
4121 rep.unload = mlx5e_vport_rep_unload;
127ea380 4122 rep.vport = vport;
dbe413e3 4123 ether_addr_copy(rep.hw_id, mac);
9deb2241 4124 mlx5_eswitch_register_vport_rep(esw, vport, &rep);
127ea380
HHZ
4125 }
4126}
4127
6f08a22c
SM
4128static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev)
4129{
4130 struct mlx5_eswitch *esw = mdev->priv.eswitch;
4131 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
4132 int vport;
4133
4134 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
4135 return;
4136
4137 for (vport = 1; vport < total_vfs; vport++)
4138 mlx5_eswitch_unregister_vport_rep(esw, vport);
4139}
4140
26e59d80
MHY
4141void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
4142{
4143 struct mlx5e_priv *priv = netdev_priv(netdev);
4144 const struct mlx5e_profile *profile = priv->profile;
4145
4146 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
26e59d80
MHY
4147
4148 rtnl_lock();
4149 if (netif_running(netdev))
4150 mlx5e_close(netdev);
4151 netif_device_detach(netdev);
4152 rtnl_unlock();
4153
37f304d1
SM
4154 if (profile->disable)
4155 profile->disable(priv);
4156 flush_workqueue(priv->wq);
4157
26e59d80
MHY
4158 mlx5e_destroy_q_counter(priv);
4159 profile->cleanup_rx(priv);
4160 mlx5e_close_drop_rq(priv);
4161 profile->cleanup_tx(priv);
26e59d80
MHY
4162 cancel_delayed_work_sync(&priv->update_stats_work);
4163}
4164
4165/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4166 * hardware contexts and to connect it to the current netdev.
4167 */
4168static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
4169{
4170 struct mlx5e_priv *priv = vpriv;
4171 struct net_device *netdev = priv->netdev;
4172 int err;
4173
4174 if (netif_device_present(netdev))
4175 return 0;
4176
4177 err = mlx5e_create_mdev_resources(mdev);
4178 if (err)
4179 return err;
4180
4181 err = mlx5e_attach_netdev(mdev, netdev);
4182 if (err) {
4183 mlx5e_destroy_mdev_resources(mdev);
4184 return err;
4185 }
4186
6f08a22c 4187 mlx5e_register_vport_rep(mdev);
26e59d80
MHY
4188 return 0;
4189}
4190
4191static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
4192{
4193 struct mlx5e_priv *priv = vpriv;
4194 struct net_device *netdev = priv->netdev;
4195
4196 if (!netif_device_present(netdev))
4197 return;
4198
6f08a22c 4199 mlx5e_unregister_vport_rep(mdev);
26e59d80
MHY
4200 mlx5e_detach_netdev(mdev, netdev);
4201 mlx5e_destroy_mdev_resources(mdev);
4202}
4203
b50d292b
HHZ
4204static void *mlx5e_add(struct mlx5_core_dev *mdev)
4205{
127ea380 4206 struct mlx5_eswitch *esw = mdev->priv.eswitch;
26e59d80 4207 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
127ea380 4208 void *ppriv = NULL;
26e59d80
MHY
4209 void *priv;
4210 int vport;
4211 int err;
4212 struct net_device *netdev;
b50d292b 4213
26e59d80
MHY
4214 err = mlx5e_check_required_hca_cap(mdev);
4215 if (err)
b50d292b
HHZ
4216 return NULL;
4217
127ea380
HHZ
4218 if (MLX5_CAP_GEN(mdev, vport_group_manager))
4219 ppriv = &esw->offloads.vport_reps[0];
4220
26e59d80
MHY
4221 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
4222 if (!netdev) {
4223 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
4224 goto err_unregister_reps;
4225 }
4226
4227 priv = netdev_priv(netdev);
4228
4229 err = mlx5e_attach(mdev, priv);
4230 if (err) {
4231 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
4232 goto err_destroy_netdev;
4233 }
4234
4235 err = register_netdev(netdev);
4236 if (err) {
4237 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
4238 goto err_detach;
b50d292b 4239 }
26e59d80
MHY
4240
4241 return priv;
4242
4243err_detach:
4244 mlx5e_detach(mdev, priv);
4245
4246err_destroy_netdev:
4247 mlx5e_destroy_netdev(mdev, priv);
4248
4249err_unregister_reps:
4250 for (vport = 1; vport < total_vfs; vport++)
4251 mlx5_eswitch_unregister_vport_rep(esw, vport);
4252
4253 return NULL;
b50d292b
HHZ
4254}
4255
cb67b832 4256void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
f62b8bb8 4257{
6bfd390b 4258 const struct mlx5e_profile *profile = priv->profile;
f62b8bb8
AV
4259 struct net_device *netdev = priv->netdev;
4260
7bb29755 4261 destroy_workqueue(priv->wq);
6bfd390b
HHZ
4262 if (profile->cleanup)
4263 profile->cleanup(priv);
26e59d80 4264 free_netdev(netdev);
f62b8bb8
AV
4265}
4266
b50d292b
HHZ
4267static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
4268{
4269 struct mlx5e_priv *priv = vpriv;
127ea380 4270
5e1e93c7 4271 unregister_netdev(priv->netdev);
26e59d80
MHY
4272 mlx5e_detach(mdev, vpriv);
4273 mlx5e_destroy_netdev(mdev, priv);
b50d292b
HHZ
4274}
4275
f62b8bb8
AV
4276static void *mlx5e_get_netdev(void *vpriv)
4277{
4278 struct mlx5e_priv *priv = vpriv;
4279
4280 return priv->netdev;
4281}
4282
4283static struct mlx5_interface mlx5e_interface = {
b50d292b
HHZ
4284 .add = mlx5e_add,
4285 .remove = mlx5e_remove,
26e59d80
MHY
4286 .attach = mlx5e_attach,
4287 .detach = mlx5e_detach,
f62b8bb8
AV
4288 .event = mlx5e_async_event,
4289 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
4290 .get_dev = mlx5e_get_netdev,
4291};
4292
4293void mlx5e_init(void)
4294{
665bc539 4295 mlx5e_build_ptys2ethtool_map();
f62b8bb8
AV
4296 mlx5_register_interface(&mlx5e_interface);
4297}
4298
4299void mlx5e_cleanup(void)
4300{
4301 mlx5_unregister_interface(&mlx5e_interface);
4302}