]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_main.c
{net,IB}/mlx5: Modify QP commands via mlx5 ifc
[thirdparty/kernel/stable.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
CommitLineData
f62b8bb8 1/*
b3f63c3d 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e8f887ac
AV
33#include <net/tc_act/tc_gact.h>
34#include <net/pkt_cls.h>
86d722ad 35#include <linux/mlx5/fs.h>
b3f63c3d 36#include <net/vxlan.h>
f62b8bb8 37#include "en.h"
e8f887ac 38#include "en_tc.h"
66e49ded 39#include "eswitch.h"
b3f63c3d 40#include "vxlan.h"
f62b8bb8 41
29429f33
DJ
42enum {
43 MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
44 MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
45 MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
46 MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
47};
48
f62b8bb8 49struct mlx5e_rq_param {
cb3c7fd4
GR
50 u32 rqc[MLX5_ST_SZ_DW(rqc)];
51 struct mlx5_wq_param wq;
52 bool am_enabled;
f62b8bb8
AV
53};
54
55struct mlx5e_sq_param {
56 u32 sqc[MLX5_ST_SZ_DW(sqc)];
57 struct mlx5_wq_param wq;
58d52291 58 u16 max_inline;
cff92d7c 59 u8 min_inline_mode;
d3c9bc27 60 bool icosq;
f62b8bb8
AV
61};
62
63struct mlx5e_cq_param {
64 u32 cqc[MLX5_ST_SZ_DW(cqc)];
65 struct mlx5_wq_param wq;
66 u16 eq_ix;
9908aa29 67 u8 cq_period_mode;
f62b8bb8
AV
68};
69
70struct mlx5e_channel_param {
71 struct mlx5e_rq_param rq;
72 struct mlx5e_sq_param sq;
d3c9bc27 73 struct mlx5e_sq_param icosq;
f62b8bb8
AV
74 struct mlx5e_cq_param rx_cq;
75 struct mlx5e_cq_param tx_cq;
d3c9bc27 76 struct mlx5e_cq_param icosq_cq;
f62b8bb8
AV
77};
78
79static void mlx5e_update_carrier(struct mlx5e_priv *priv)
80{
81 struct mlx5_core_dev *mdev = priv->mdev;
82 u8 port_state;
83
84 port_state = mlx5_query_vport_state(mdev,
e7546514 85 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
f62b8bb8 86
87424ad5
SD
87 if (port_state == VPORT_STATE_UP) {
88 netdev_info(priv->netdev, "Link up\n");
f62b8bb8 89 netif_carrier_on(priv->netdev);
87424ad5
SD
90 } else {
91 netdev_info(priv->netdev, "Link down\n");
f62b8bb8 92 netif_carrier_off(priv->netdev);
87424ad5 93 }
f62b8bb8
AV
94}
95
96static void mlx5e_update_carrier_work(struct work_struct *work)
97{
98 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
99 update_carrier_work);
100
101 mutex_lock(&priv->state_lock);
102 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
103 mlx5e_update_carrier(priv);
104 mutex_unlock(&priv->state_lock);
105}
106
3947ca18
DJ
107static void mlx5e_tx_timeout_work(struct work_struct *work)
108{
109 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
110 tx_timeout_work);
111 int err;
112
113 rtnl_lock();
114 mutex_lock(&priv->state_lock);
115 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
116 goto unlock;
117 mlx5e_close_locked(priv->netdev);
118 err = mlx5e_open_locked(priv->netdev);
119 if (err)
120 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
121 err);
122unlock:
123 mutex_unlock(&priv->state_lock);
124 rtnl_unlock();
125}
126
9218b44d 127static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
f62b8bb8 128{
9218b44d 129 struct mlx5e_sw_stats *s = &priv->stats.sw;
f62b8bb8
AV
130 struct mlx5e_rq_stats *rq_stats;
131 struct mlx5e_sq_stats *sq_stats;
9218b44d 132 u64 tx_offload_none = 0;
f62b8bb8
AV
133 int i, j;
134
9218b44d 135 memset(s, 0, sizeof(*s));
f62b8bb8
AV
136 for (i = 0; i < priv->params.num_channels; i++) {
137 rq_stats = &priv->channel[i]->rq.stats;
138
faf4478b
GP
139 s->rx_packets += rq_stats->packets;
140 s->rx_bytes += rq_stats->bytes;
bfe6d8d1
GP
141 s->rx_lro_packets += rq_stats->lro_packets;
142 s->rx_lro_bytes += rq_stats->lro_bytes;
f62b8bb8 143 s->rx_csum_none += rq_stats->csum_none;
bfe6d8d1
GP
144 s->rx_csum_complete += rq_stats->csum_complete;
145 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
f62b8bb8 146 s->rx_wqe_err += rq_stats->wqe_err;
461017cb 147 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
bc77b240 148 s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
54984407 149 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
7219ab34
TT
150 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
151 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
f62b8bb8 152
a4418a6c 153 for (j = 0; j < priv->params.num_tc; j++) {
f62b8bb8
AV
154 sq_stats = &priv->channel[i]->sq[j].stats;
155
faf4478b
GP
156 s->tx_packets += sq_stats->packets;
157 s->tx_bytes += sq_stats->bytes;
bfe6d8d1
GP
158 s->tx_tso_packets += sq_stats->tso_packets;
159 s->tx_tso_bytes += sq_stats->tso_bytes;
160 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
161 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
f62b8bb8
AV
162 s->tx_queue_stopped += sq_stats->stopped;
163 s->tx_queue_wake += sq_stats->wake;
164 s->tx_queue_dropped += sq_stats->dropped;
bfe6d8d1
GP
165 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
166 tx_offload_none += sq_stats->csum_none;
f62b8bb8
AV
167 }
168 }
169
9218b44d 170 /* Update calculated offload counters */
bfe6d8d1
GP
171 s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
172 s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
121fcdc8 173
bfe6d8d1 174 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
121fcdc8
GP
175 priv->stats.pport.phy_counters,
176 counter_set.phys_layer_cntrs.link_down_events);
9218b44d
GP
177}
178
179static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
180{
181 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
182 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
183 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
184 struct mlx5_core_dev *mdev = priv->mdev;
185
f62b8bb8
AV
186 memset(in, 0, sizeof(in));
187
188 MLX5_SET(query_vport_counter_in, in, opcode,
189 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
190 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
191 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
192
193 memset(out, 0, outlen);
194
9218b44d
GP
195 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
196}
197
198static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
199{
200 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
201 struct mlx5_core_dev *mdev = priv->mdev;
202 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
cf678570 203 int prio;
9218b44d
GP
204 void *out;
205 u32 *in;
206
207 in = mlx5_vzalloc(sz);
208 if (!in)
f62b8bb8
AV
209 goto free_out;
210
9218b44d 211 MLX5_SET(ppcnt_reg, in, local_port, 1);
f62b8bb8 212
9218b44d
GP
213 out = pstats->IEEE_802_3_counters;
214 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
215 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
f62b8bb8 216
9218b44d
GP
217 out = pstats->RFC_2863_counters;
218 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
219 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
220
221 out = pstats->RFC_2819_counters;
222 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
223 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
593cf338 224
121fcdc8
GP
225 out = pstats->phy_counters;
226 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
227 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
228
cf678570
GP
229 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
230 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
231 out = pstats->per_prio_counters[prio];
232 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
233 mlx5_core_access_reg(mdev, in, sz, out, sz,
234 MLX5_REG_PPCNT, 0, 0);
235 }
236
f62b8bb8 237free_out:
9218b44d
GP
238 kvfree(in);
239}
240
241static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
242{
243 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
244
245 if (!priv->q_counter)
246 return;
247
248 mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
249 &qcnt->rx_out_of_buffer);
250}
251
252void mlx5e_update_stats(struct mlx5e_priv *priv)
253{
9218b44d
GP
254 mlx5e_update_q_counter(priv);
255 mlx5e_update_vport_counters(priv);
256 mlx5e_update_pport_counters(priv);
121fcdc8 257 mlx5e_update_sw_counters(priv);
f62b8bb8
AV
258}
259
cb67b832 260void mlx5e_update_stats_work(struct work_struct *work)
f62b8bb8
AV
261{
262 struct delayed_work *dwork = to_delayed_work(work);
263 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
264 update_stats_work);
265 mutex_lock(&priv->state_lock);
266 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6bfd390b 267 priv->profile->update_stats(priv);
7bb29755
MF
268 queue_delayed_work(priv->wq, dwork,
269 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
f62b8bb8
AV
270 }
271 mutex_unlock(&priv->state_lock);
272}
273
daa21560
TT
274static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
275 enum mlx5_dev_event event, unsigned long param)
f62b8bb8 276{
daa21560
TT
277 struct mlx5e_priv *priv = vpriv;
278
e0f46eb9 279 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
daa21560
TT
280 return;
281
f62b8bb8
AV
282 switch (event) {
283 case MLX5_DEV_EVENT_PORT_UP:
284 case MLX5_DEV_EVENT_PORT_DOWN:
7bb29755 285 queue_work(priv->wq, &priv->update_carrier_work);
f62b8bb8
AV
286 break;
287
288 default:
289 break;
290 }
291}
292
f62b8bb8
AV
293static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
294{
e0f46eb9 295 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
f62b8bb8
AV
296}
297
298static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
299{
e0f46eb9 300 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
daa21560 301 synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
f62b8bb8
AV
302}
303
facc9699
SM
304#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
305#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
306
f62b8bb8
AV
307static int mlx5e_create_rq(struct mlx5e_channel *c,
308 struct mlx5e_rq_param *param,
309 struct mlx5e_rq *rq)
310{
311 struct mlx5e_priv *priv = c->priv;
312 struct mlx5_core_dev *mdev = priv->mdev;
313 void *rqc = param->rqc;
314 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
461017cb 315 u32 byte_count;
f62b8bb8
AV
316 int wq_sz;
317 int err;
318 int i;
319
311c7c71
SM
320 param->wq.db_numa_node = cpu_to_node(c->cpu);
321
f62b8bb8
AV
322 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
323 &rq->wq_ctrl);
324 if (err)
325 return err;
326
327 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
328
329 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
f62b8bb8 330
461017cb
TT
331 switch (priv->params.rq_wq_type) {
332 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
333 rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
334 GFP_KERNEL, cpu_to_node(c->cpu));
335 if (!rq->wqe_info) {
336 err = -ENOMEM;
337 goto err_rq_wq_destroy;
338 }
339 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
340 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
6cd392a0 341 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
461017cb 342
d9d9f156
TT
343 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
344 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
345 rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
461017cb
TT
346 byte_count = rq->wqe_sz;
347 break;
348 default: /* MLX5_WQ_TYPE_LINKED_LIST */
349 rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
350 cpu_to_node(c->cpu));
351 if (!rq->skb) {
352 err = -ENOMEM;
353 goto err_rq_wq_destroy;
354 }
355 rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
356 rq->alloc_wqe = mlx5e_alloc_rx_wqe;
6cd392a0 357 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
461017cb
TT
358
359 rq->wqe_sz = (priv->params.lro_en) ?
360 priv->params.lro_wqe_sz :
361 MLX5E_SW2HW_MTU(priv->netdev->mtu);
c5adb96f
TT
362 rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
363 byte_count = rq->wqe_sz;
461017cb
TT
364 byte_count |= MLX5_HW_START_PADDING;
365 }
f62b8bb8
AV
366
367 for (i = 0; i < wq_sz; i++) {
368 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
369
461017cb 370 wqe->data.byte_count = cpu_to_be32(byte_count);
f62b8bb8
AV
371 }
372
cb3c7fd4
GR
373 INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
374 rq->am.mode = priv->params.rx_cq_period_mode;
375
461017cb 376 rq->wq_type = priv->params.rq_wq_type;
f62b8bb8
AV
377 rq->pdev = c->pdev;
378 rq->netdev = c->netdev;
ef9814de 379 rq->tstamp = &priv->tstamp;
f62b8bb8
AV
380 rq->channel = c;
381 rq->ix = c->ix;
50cfa25a 382 rq->priv = c->priv;
bc77b240
TT
383 rq->mkey_be = c->mkey_be;
384 rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
f62b8bb8
AV
385
386 return 0;
387
388err_rq_wq_destroy:
389 mlx5_wq_destroy(&rq->wq_ctrl);
390
391 return err;
392}
393
394static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
395{
461017cb
TT
396 switch (rq->wq_type) {
397 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
398 kfree(rq->wqe_info);
399 break;
400 default: /* MLX5_WQ_TYPE_LINKED_LIST */
401 kfree(rq->skb);
402 }
403
f62b8bb8
AV
404 mlx5_wq_destroy(&rq->wq_ctrl);
405}
406
407static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
408{
50cfa25a 409 struct mlx5e_priv *priv = rq->priv;
f62b8bb8
AV
410 struct mlx5_core_dev *mdev = priv->mdev;
411
412 void *in;
413 void *rqc;
414 void *wq;
415 int inlen;
416 int err;
417
418 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
419 sizeof(u64) * rq->wq_ctrl.buf.npages;
420 in = mlx5_vzalloc(inlen);
421 if (!in)
422 return -ENOMEM;
423
424 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
425 wq = MLX5_ADDR_OF(rqc, rqc, wq);
426
427 memcpy(rqc, param->rqc, sizeof(param->rqc));
428
97de9f31 429 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
f62b8bb8
AV
430 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
431 MLX5_SET(rqc, rqc, flush_in_error_en, 1);
36350114 432 MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
f62b8bb8 433 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
68cdf5d6 434 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
435 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
436
437 mlx5_fill_page_array(&rq->wq_ctrl.buf,
438 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
439
7db22ffb 440 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
f62b8bb8
AV
441
442 kvfree(in);
443
444 return err;
445}
446
36350114
GP
447static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
448 int next_state)
f62b8bb8
AV
449{
450 struct mlx5e_channel *c = rq->channel;
451 struct mlx5e_priv *priv = c->priv;
452 struct mlx5_core_dev *mdev = priv->mdev;
453
454 void *in;
455 void *rqc;
456 int inlen;
457 int err;
458
459 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
460 in = mlx5_vzalloc(inlen);
461 if (!in)
462 return -ENOMEM;
463
464 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
465
466 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
467 MLX5_SET(rqc, rqc, state, next_state);
468
7db22ffb 469 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
f62b8bb8
AV
470
471 kvfree(in);
472
473 return err;
474}
475
36350114
GP
476static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
477{
478 struct mlx5e_channel *c = rq->channel;
479 struct mlx5e_priv *priv = c->priv;
480 struct mlx5_core_dev *mdev = priv->mdev;
481
482 void *in;
483 void *rqc;
484 int inlen;
485 int err;
486
487 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
488 in = mlx5_vzalloc(inlen);
489 if (!in)
490 return -ENOMEM;
491
492 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
493
494 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
495 MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD);
496 MLX5_SET(rqc, rqc, vsd, vsd);
497 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
498
499 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
500
501 kvfree(in);
502
503 return err;
504}
505
f62b8bb8
AV
506static void mlx5e_disable_rq(struct mlx5e_rq *rq)
507{
50cfa25a 508 mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
f62b8bb8
AV
509}
510
511static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
512{
01c196a2 513 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
f62b8bb8
AV
514 struct mlx5e_channel *c = rq->channel;
515 struct mlx5e_priv *priv = c->priv;
516 struct mlx5_wq_ll *wq = &rq->wq;
f62b8bb8 517
01c196a2 518 while (time_before(jiffies, exp_time)) {
f62b8bb8
AV
519 if (wq->cur_sz >= priv->params.min_rx_wqes)
520 return 0;
521
522 msleep(20);
523 }
524
525 return -ETIMEDOUT;
526}
527
528static int mlx5e_open_rq(struct mlx5e_channel *c,
529 struct mlx5e_rq_param *param,
530 struct mlx5e_rq *rq)
531{
d3c9bc27
TT
532 struct mlx5e_sq *sq = &c->icosq;
533 u16 pi = sq->pc & sq->wq.sz_m1;
f62b8bb8
AV
534 int err;
535
536 err = mlx5e_create_rq(c, param, rq);
537 if (err)
538 return err;
539
540 err = mlx5e_enable_rq(rq, param);
541 if (err)
542 goto err_destroy_rq;
543
36350114 544 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
f62b8bb8
AV
545 if (err)
546 goto err_disable_rq;
547
cb3c7fd4
GR
548 if (param->am_enabled)
549 set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
550
f62b8bb8 551 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
d3c9bc27
TT
552
553 sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
554 sq->ico_wqe_info[pi].num_wqebbs = 1;
555 mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
f62b8bb8
AV
556
557 return 0;
558
559err_disable_rq:
560 mlx5e_disable_rq(rq);
561err_destroy_rq:
562 mlx5e_destroy_rq(rq);
563
564 return err;
565}
566
567static void mlx5e_close_rq(struct mlx5e_rq *rq)
568{
6cd392a0
DJ
569 int tout = 0;
570 int err;
571
f62b8bb8
AV
572 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
573 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
574
6cd392a0
DJ
575 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
576 while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
577 tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
578 msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
579
580 if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
581 set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
f62b8bb8
AV
582
583 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
584 napi_synchronize(&rq->channel->napi);
585
cb3c7fd4
GR
586 cancel_work_sync(&rq->am.work);
587
f62b8bb8 588 mlx5e_disable_rq(rq);
6cd392a0 589 mlx5e_free_rx_descs(rq);
f62b8bb8
AV
590 mlx5e_destroy_rq(rq);
591}
592
593static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
594{
34802a42 595 kfree(sq->wqe_info);
f62b8bb8
AV
596 kfree(sq->dma_fifo);
597 kfree(sq->skb);
598}
599
600static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
601{
602 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
603 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
604
605 sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
606 sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
607 numa);
34802a42
AS
608 sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
609 numa);
f62b8bb8 610
34802a42 611 if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
f62b8bb8
AV
612 mlx5e_free_sq_db(sq);
613 return -ENOMEM;
614 }
615
616 sq->dma_fifo_mask = df_sz - 1;
617
618 return 0;
619}
620
621static int mlx5e_create_sq(struct mlx5e_channel *c,
622 int tc,
623 struct mlx5e_sq_param *param,
624 struct mlx5e_sq *sq)
625{
626 struct mlx5e_priv *priv = c->priv;
627 struct mlx5_core_dev *mdev = priv->mdev;
628
629 void *sqc = param->sqc;
630 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
631 int err;
632
fd4782c2 633 err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
f62b8bb8
AV
634 if (err)
635 return err;
636
311c7c71
SM
637 param->wq.db_numa_node = cpu_to_node(c->cpu);
638
f62b8bb8
AV
639 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
640 &sq->wq_ctrl);
641 if (err)
642 goto err_unmap_free_uar;
643
644 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
0ba42241
ML
645 if (sq->uar.bf_map) {
646 set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
647 sq->uar_map = sq->uar.bf_map;
648 } else {
649 sq->uar_map = sq->uar.map;
650 }
f62b8bb8 651 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
58d52291 652 sq->max_inline = param->max_inline;
cff92d7c
HHZ
653 sq->min_inline_mode =
654 MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ?
655 param->min_inline_mode : 0;
f62b8bb8 656
7ec0bb22
DC
657 err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
658 if (err)
f62b8bb8
AV
659 goto err_sq_wq_destroy;
660
d3c9bc27
TT
661 if (param->icosq) {
662 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
663
664 sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
665 wq_sz,
666 GFP_KERNEL,
667 cpu_to_node(c->cpu));
668 if (!sq->ico_wqe_info) {
669 err = -ENOMEM;
670 goto err_free_sq_db;
671 }
672 } else {
673 int txq_ix;
674
675 txq_ix = c->ix + tc * priv->params.num_channels;
676 sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
677 priv->txq_to_sq_map[txq_ix] = sq;
678 }
f62b8bb8 679
88a85f99 680 sq->pdev = c->pdev;
ef9814de 681 sq->tstamp = &priv->tstamp;
88a85f99
AS
682 sq->mkey_be = c->mkey_be;
683 sq->channel = c;
684 sq->tc = tc;
685 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
686 sq->bf_budget = MLX5E_SQ_BF_BUDGET;
f62b8bb8
AV
687
688 return 0;
689
d3c9bc27
TT
690err_free_sq_db:
691 mlx5e_free_sq_db(sq);
692
f62b8bb8
AV
693err_sq_wq_destroy:
694 mlx5_wq_destroy(&sq->wq_ctrl);
695
696err_unmap_free_uar:
697 mlx5_unmap_free_uar(mdev, &sq->uar);
698
699 return err;
700}
701
702static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
703{
704 struct mlx5e_channel *c = sq->channel;
705 struct mlx5e_priv *priv = c->priv;
706
d3c9bc27 707 kfree(sq->ico_wqe_info);
f62b8bb8
AV
708 mlx5e_free_sq_db(sq);
709 mlx5_wq_destroy(&sq->wq_ctrl);
710 mlx5_unmap_free_uar(priv->mdev, &sq->uar);
711}
712
713static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
714{
715 struct mlx5e_channel *c = sq->channel;
716 struct mlx5e_priv *priv = c->priv;
717 struct mlx5_core_dev *mdev = priv->mdev;
718
719 void *in;
720 void *sqc;
721 void *wq;
722 int inlen;
723 int err;
724
725 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
726 sizeof(u64) * sq->wq_ctrl.buf.npages;
727 in = mlx5_vzalloc(inlen);
728 if (!in)
729 return -ENOMEM;
730
731 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
732 wq = MLX5_ADDR_OF(sqc, sqc, wq);
733
734 memcpy(sqc, param->sqc, sizeof(param->sqc));
735
d3c9bc27
TT
736 MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
737 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
cff92d7c 738 MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
f62b8bb8 739 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
d3c9bc27 740 MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
f62b8bb8
AV
741 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
742
743 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
744 MLX5_SET(wq, wq, uar_page, sq->uar.index);
745 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
68cdf5d6 746 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
747 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
748
749 mlx5_fill_page_array(&sq->wq_ctrl.buf,
750 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
751
7db22ffb 752 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
f62b8bb8
AV
753
754 kvfree(in);
755
756 return err;
757}
758
507f0c81
YP
759static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
760 int next_state, bool update_rl, int rl_index)
f62b8bb8
AV
761{
762 struct mlx5e_channel *c = sq->channel;
763 struct mlx5e_priv *priv = c->priv;
764 struct mlx5_core_dev *mdev = priv->mdev;
765
766 void *in;
767 void *sqc;
768 int inlen;
769 int err;
770
771 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
772 in = mlx5_vzalloc(inlen);
773 if (!in)
774 return -ENOMEM;
775
776 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
777
778 MLX5_SET(modify_sq_in, in, sq_state, curr_state);
779 MLX5_SET(sqc, sqc, state, next_state);
507f0c81
YP
780 if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
781 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
782 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
783 }
f62b8bb8 784
7db22ffb 785 err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
f62b8bb8
AV
786
787 kvfree(in);
788
789 return err;
790}
791
792static void mlx5e_disable_sq(struct mlx5e_sq *sq)
793{
794 struct mlx5e_channel *c = sq->channel;
795 struct mlx5e_priv *priv = c->priv;
796 struct mlx5_core_dev *mdev = priv->mdev;
797
7db22ffb 798 mlx5_core_destroy_sq(mdev, sq->sqn);
507f0c81
YP
799 if (sq->rate_limit)
800 mlx5_rl_remove_rate(mdev, sq->rate_limit);
f62b8bb8
AV
801}
802
803static int mlx5e_open_sq(struct mlx5e_channel *c,
804 int tc,
805 struct mlx5e_sq_param *param,
806 struct mlx5e_sq *sq)
807{
808 int err;
809
810 err = mlx5e_create_sq(c, tc, param, sq);
811 if (err)
812 return err;
813
814 err = mlx5e_enable_sq(sq, param);
815 if (err)
816 goto err_destroy_sq;
817
507f0c81
YP
818 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
819 false, 0);
f62b8bb8
AV
820 if (err)
821 goto err_disable_sq;
822
d3c9bc27
TT
823 if (sq->txq) {
824 set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
825 netdev_tx_reset_queue(sq->txq);
826 netif_tx_start_queue(sq->txq);
827 }
f62b8bb8
AV
828
829 return 0;
830
831err_disable_sq:
832 mlx5e_disable_sq(sq);
833err_destroy_sq:
834 mlx5e_destroy_sq(sq);
835
836 return err;
837}
838
839static inline void netif_tx_disable_queue(struct netdev_queue *txq)
840{
841 __netif_tx_lock_bh(txq);
842 netif_tx_stop_queue(txq);
843 __netif_tx_unlock_bh(txq);
844}
845
846static void mlx5e_close_sq(struct mlx5e_sq *sq)
847{
29429f33
DJ
848 int tout = 0;
849 int err;
850
d3c9bc27
TT
851 if (sq->txq) {
852 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
853 /* prevent netif_tx_wake_queue */
854 napi_synchronize(&sq->channel->napi);
855 netif_tx_disable_queue(sq->txq);
f62b8bb8 856
d3c9bc27
TT
857 /* ensure hw is notified of all pending wqes */
858 if (mlx5e_sq_has_room_for(sq, 1))
859 mlx5e_send_nop(sq, true);
860
29429f33 861 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
30d0844b 862 MLX5_SQC_STATE_ERR, false, 0);
29429f33
DJ
863 if (err)
864 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
d3c9bc27 865 }
f62b8bb8 866
29429f33
DJ
867 /* wait till sq is empty, unless a TX timeout occurred on this SQ */
868 while (sq->cc != sq->pc &&
869 !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
870 msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
871 if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
872 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
873 }
f62b8bb8
AV
874
875 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
876 napi_synchronize(&sq->channel->napi);
877
29429f33 878 mlx5e_free_tx_descs(sq);
f62b8bb8
AV
879 mlx5e_disable_sq(sq);
880 mlx5e_destroy_sq(sq);
881}
882
883static int mlx5e_create_cq(struct mlx5e_channel *c,
884 struct mlx5e_cq_param *param,
885 struct mlx5e_cq *cq)
886{
887 struct mlx5e_priv *priv = c->priv;
888 struct mlx5_core_dev *mdev = priv->mdev;
889 struct mlx5_core_cq *mcq = &cq->mcq;
890 int eqn_not_used;
0b6e26ce 891 unsigned int irqn;
f62b8bb8
AV
892 int err;
893 u32 i;
894
311c7c71
SM
895 param->wq.buf_numa_node = cpu_to_node(c->cpu);
896 param->wq.db_numa_node = cpu_to_node(c->cpu);
f62b8bb8
AV
897 param->eq_ix = c->ix;
898
899 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
900 &cq->wq_ctrl);
901 if (err)
902 return err;
903
904 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
905
906 cq->napi = &c->napi;
907
908 mcq->cqe_sz = 64;
909 mcq->set_ci_db = cq->wq_ctrl.db.db;
910 mcq->arm_db = cq->wq_ctrl.db.db + 1;
911 *mcq->set_ci_db = 0;
912 *mcq->arm_db = 0;
913 mcq->vector = param->eq_ix;
914 mcq->comp = mlx5e_completion_event;
915 mcq->event = mlx5e_cq_error_event;
916 mcq->irqn = irqn;
b50d292b 917 mcq->uar = &mdev->mlx5e_res.cq_uar;
f62b8bb8
AV
918
919 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
920 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
921
922 cqe->op_own = 0xf1;
923 }
924
925 cq->channel = c;
50cfa25a 926 cq->priv = priv;
f62b8bb8
AV
927
928 return 0;
929}
930
931static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
932{
933 mlx5_wq_destroy(&cq->wq_ctrl);
934}
935
936static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
937{
50cfa25a 938 struct mlx5e_priv *priv = cq->priv;
f62b8bb8
AV
939 struct mlx5_core_dev *mdev = priv->mdev;
940 struct mlx5_core_cq *mcq = &cq->mcq;
941
942 void *in;
943 void *cqc;
944 int inlen;
0b6e26ce 945 unsigned int irqn_not_used;
f62b8bb8
AV
946 int eqn;
947 int err;
948
949 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
950 sizeof(u64) * cq->wq_ctrl.buf.npages;
951 in = mlx5_vzalloc(inlen);
952 if (!in)
953 return -ENOMEM;
954
955 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
956
957 memcpy(cqc, param->cqc, sizeof(param->cqc));
958
959 mlx5_fill_page_array(&cq->wq_ctrl.buf,
960 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
961
962 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
963
9908aa29 964 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
f62b8bb8
AV
965 MLX5_SET(cqc, cqc, c_eqn, eqn);
966 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
967 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
68cdf5d6 968 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
969 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
970
971 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
972
973 kvfree(in);
974
975 if (err)
976 return err;
977
978 mlx5e_cq_arm(cq);
979
980 return 0;
981}
982
983static void mlx5e_disable_cq(struct mlx5e_cq *cq)
984{
50cfa25a 985 struct mlx5e_priv *priv = cq->priv;
f62b8bb8
AV
986 struct mlx5_core_dev *mdev = priv->mdev;
987
988 mlx5_core_destroy_cq(mdev, &cq->mcq);
989}
990
991static int mlx5e_open_cq(struct mlx5e_channel *c,
992 struct mlx5e_cq_param *param,
993 struct mlx5e_cq *cq,
9908aa29 994 struct mlx5e_cq_moder moderation)
f62b8bb8
AV
995{
996 int err;
997 struct mlx5e_priv *priv = c->priv;
998 struct mlx5_core_dev *mdev = priv->mdev;
999
1000 err = mlx5e_create_cq(c, param, cq);
1001 if (err)
1002 return err;
1003
1004 err = mlx5e_enable_cq(cq, param);
1005 if (err)
1006 goto err_destroy_cq;
1007
7524a5d8
GP
1008 if (MLX5_CAP_GEN(mdev, cq_moderation))
1009 mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
9908aa29
TT
1010 moderation.usec,
1011 moderation.pkts);
f62b8bb8
AV
1012 return 0;
1013
1014err_destroy_cq:
1015 mlx5e_destroy_cq(cq);
1016
1017 return err;
1018}
1019
1020static void mlx5e_close_cq(struct mlx5e_cq *cq)
1021{
1022 mlx5e_disable_cq(cq);
1023 mlx5e_destroy_cq(cq);
1024}
1025
1026static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1027{
1028 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1029}
1030
1031static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1032 struct mlx5e_channel_param *cparam)
1033{
1034 struct mlx5e_priv *priv = c->priv;
1035 int err;
1036 int tc;
1037
1038 for (tc = 0; tc < c->num_tc; tc++) {
1039 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
9908aa29 1040 priv->params.tx_cq_moderation);
f62b8bb8
AV
1041 if (err)
1042 goto err_close_tx_cqs;
f62b8bb8
AV
1043 }
1044
1045 return 0;
1046
1047err_close_tx_cqs:
1048 for (tc--; tc >= 0; tc--)
1049 mlx5e_close_cq(&c->sq[tc].cq);
1050
1051 return err;
1052}
1053
1054static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1055{
1056 int tc;
1057
1058 for (tc = 0; tc < c->num_tc; tc++)
1059 mlx5e_close_cq(&c->sq[tc].cq);
1060}
1061
1062static int mlx5e_open_sqs(struct mlx5e_channel *c,
1063 struct mlx5e_channel_param *cparam)
1064{
1065 int err;
1066 int tc;
1067
1068 for (tc = 0; tc < c->num_tc; tc++) {
1069 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1070 if (err)
1071 goto err_close_sqs;
1072 }
1073
1074 return 0;
1075
1076err_close_sqs:
1077 for (tc--; tc >= 0; tc--)
1078 mlx5e_close_sq(&c->sq[tc]);
1079
1080 return err;
1081}
1082
1083static void mlx5e_close_sqs(struct mlx5e_channel *c)
1084{
1085 int tc;
1086
1087 for (tc = 0; tc < c->num_tc; tc++)
1088 mlx5e_close_sq(&c->sq[tc]);
1089}
1090
5283af89 1091static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
03289b88
SM
1092{
1093 int i;
1094
6bfd390b 1095 for (i = 0; i < priv->profile->max_tc; i++)
5283af89
RS
1096 priv->channeltc_to_txq_map[ix][i] =
1097 ix + i * priv->params.num_channels;
03289b88
SM
1098}
1099
507f0c81
YP
1100static int mlx5e_set_sq_maxrate(struct net_device *dev,
1101 struct mlx5e_sq *sq, u32 rate)
1102{
1103 struct mlx5e_priv *priv = netdev_priv(dev);
1104 struct mlx5_core_dev *mdev = priv->mdev;
1105 u16 rl_index = 0;
1106 int err;
1107
1108 if (rate == sq->rate_limit)
1109 /* nothing to do */
1110 return 0;
1111
1112 if (sq->rate_limit)
1113 /* remove current rl index to free space to next ones */
1114 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1115
1116 sq->rate_limit = 0;
1117
1118 if (rate) {
1119 err = mlx5_rl_add_rate(mdev, rate, &rl_index);
1120 if (err) {
1121 netdev_err(dev, "Failed configuring rate %u: %d\n",
1122 rate, err);
1123 return err;
1124 }
1125 }
1126
1127 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
1128 MLX5_SQC_STATE_RDY, true, rl_index);
1129 if (err) {
1130 netdev_err(dev, "Failed configuring rate %u: %d\n",
1131 rate, err);
1132 /* remove the rate from the table */
1133 if (rate)
1134 mlx5_rl_remove_rate(mdev, rate);
1135 return err;
1136 }
1137
1138 sq->rate_limit = rate;
1139 return 0;
1140}
1141
1142static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1143{
1144 struct mlx5e_priv *priv = netdev_priv(dev);
1145 struct mlx5_core_dev *mdev = priv->mdev;
1146 struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
1147 int err = 0;
1148
1149 if (!mlx5_rl_is_supported(mdev)) {
1150 netdev_err(dev, "Rate limiting is not supported on this device\n");
1151 return -EINVAL;
1152 }
1153
1154 /* rate is given in Mb/sec, HW config is in Kb/sec */
1155 rate = rate << 10;
1156
1157 /* Check whether rate in valid range, 0 is always valid */
1158 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1159 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1160 return -ERANGE;
1161 }
1162
1163 mutex_lock(&priv->state_lock);
1164 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1165 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1166 if (!err)
1167 priv->tx_rates[index] = rate;
1168 mutex_unlock(&priv->state_lock);
1169
1170 return err;
1171}
1172
f62b8bb8
AV
1173static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1174 struct mlx5e_channel_param *cparam,
1175 struct mlx5e_channel **cp)
1176{
9908aa29 1177 struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
f62b8bb8 1178 struct net_device *netdev = priv->netdev;
cb3c7fd4 1179 struct mlx5e_cq_moder rx_cq_profile;
f62b8bb8
AV
1180 int cpu = mlx5e_get_cpu(priv, ix);
1181 struct mlx5e_channel *c;
507f0c81 1182 struct mlx5e_sq *sq;
f62b8bb8 1183 int err;
507f0c81 1184 int i;
f62b8bb8
AV
1185
1186 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1187 if (!c)
1188 return -ENOMEM;
1189
1190 c->priv = priv;
1191 c->ix = ix;
1192 c->cpu = cpu;
1193 c->pdev = &priv->mdev->pdev->dev;
1194 c->netdev = priv->netdev;
b50d292b 1195 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
a4418a6c 1196 c->num_tc = priv->params.num_tc;
f62b8bb8 1197
cb3c7fd4
GR
1198 if (priv->params.rx_am_enabled)
1199 rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
1200 else
1201 rx_cq_profile = priv->params.rx_cq_moderation;
1202
5283af89 1203 mlx5e_build_channeltc_to_txq_map(priv, ix);
03289b88 1204
f62b8bb8
AV
1205 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1206
9908aa29 1207 err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
f62b8bb8
AV
1208 if (err)
1209 goto err_napi_del;
1210
d3c9bc27
TT
1211 err = mlx5e_open_tx_cqs(c, cparam);
1212 if (err)
1213 goto err_close_icosq_cq;
1214
f62b8bb8 1215 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
cb3c7fd4 1216 rx_cq_profile);
f62b8bb8
AV
1217 if (err)
1218 goto err_close_tx_cqs;
f62b8bb8
AV
1219
1220 napi_enable(&c->napi);
1221
d3c9bc27 1222 err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
f62b8bb8
AV
1223 if (err)
1224 goto err_disable_napi;
1225
d3c9bc27
TT
1226 err = mlx5e_open_sqs(c, cparam);
1227 if (err)
1228 goto err_close_icosq;
1229
507f0c81
YP
1230 for (i = 0; i < priv->params.num_tc; i++) {
1231 u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
1232
1233 if (priv->tx_rates[txq_ix]) {
1234 sq = priv->txq_to_sq_map[txq_ix];
1235 mlx5e_set_sq_maxrate(priv->netdev, sq,
1236 priv->tx_rates[txq_ix]);
1237 }
1238 }
1239
f62b8bb8
AV
1240 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1241 if (err)
1242 goto err_close_sqs;
1243
1244 netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
1245 *cp = c;
1246
1247 return 0;
1248
1249err_close_sqs:
1250 mlx5e_close_sqs(c);
1251
d3c9bc27
TT
1252err_close_icosq:
1253 mlx5e_close_sq(&c->icosq);
1254
f62b8bb8
AV
1255err_disable_napi:
1256 napi_disable(&c->napi);
1257 mlx5e_close_cq(&c->rq.cq);
1258
1259err_close_tx_cqs:
1260 mlx5e_close_tx_cqs(c);
1261
d3c9bc27
TT
1262err_close_icosq_cq:
1263 mlx5e_close_cq(&c->icosq.cq);
1264
f62b8bb8
AV
1265err_napi_del:
1266 netif_napi_del(&c->napi);
7ae92ae5 1267 napi_hash_del(&c->napi);
f62b8bb8
AV
1268 kfree(c);
1269
1270 return err;
1271}
1272
1273static void mlx5e_close_channel(struct mlx5e_channel *c)
1274{
1275 mlx5e_close_rq(&c->rq);
1276 mlx5e_close_sqs(c);
d3c9bc27 1277 mlx5e_close_sq(&c->icosq);
f62b8bb8
AV
1278 napi_disable(&c->napi);
1279 mlx5e_close_cq(&c->rq.cq);
1280 mlx5e_close_tx_cqs(c);
d3c9bc27 1281 mlx5e_close_cq(&c->icosq.cq);
f62b8bb8 1282 netif_napi_del(&c->napi);
7ae92ae5
ED
1283
1284 napi_hash_del(&c->napi);
1285 synchronize_rcu();
1286
f62b8bb8
AV
1287 kfree(c);
1288}
1289
1290static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1291 struct mlx5e_rq_param *param)
1292{
1293 void *rqc = param->rqc;
1294 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1295
461017cb
TT
1296 switch (priv->params.rq_wq_type) {
1297 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1298 MLX5_SET(wq, wq, log_wqe_num_of_strides,
d9d9f156 1299 priv->params.mpwqe_log_num_strides - 9);
461017cb 1300 MLX5_SET(wq, wq, log_wqe_stride_size,
d9d9f156 1301 priv->params.mpwqe_log_stride_sz - 6);
461017cb
TT
1302 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1303 break;
1304 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1305 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1306 }
1307
f62b8bb8
AV
1308 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1309 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1310 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
b50d292b 1311 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
593cf338 1312 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
f62b8bb8 1313
311c7c71 1314 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
f62b8bb8 1315 param->wq.linear = 1;
cb3c7fd4
GR
1316
1317 param->am_enabled = priv->params.rx_am_enabled;
f62b8bb8
AV
1318}
1319
556dd1b9
TT
1320static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
1321{
1322 void *rqc = param->rqc;
1323 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1324
1325 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1326 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1327}
1328
d3c9bc27
TT
1329static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1330 struct mlx5e_sq_param *param)
f62b8bb8
AV
1331{
1332 void *sqc = param->sqc;
1333 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1334
f62b8bb8 1335 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
b50d292b 1336 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
f62b8bb8 1337
311c7c71 1338 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
d3c9bc27
TT
1339}
1340
1341static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1342 struct mlx5e_sq_param *param)
1343{
1344 void *sqc = param->sqc;
1345 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1346
1347 mlx5e_build_sq_param_common(priv, param);
1348 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1349
58d52291 1350 param->max_inline = priv->params.tx_max_inline;
cff92d7c 1351 param->min_inline_mode = priv->params.tx_min_inline_mode;
f62b8bb8
AV
1352}
1353
1354static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1355 struct mlx5e_cq_param *param)
1356{
1357 void *cqc = param->cqc;
1358
b50d292b 1359 MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index);
f62b8bb8
AV
1360}
1361
1362static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1363 struct mlx5e_cq_param *param)
1364{
1365 void *cqc = param->cqc;
461017cb 1366 u8 log_cq_size;
f62b8bb8 1367
461017cb
TT
1368 switch (priv->params.rq_wq_type) {
1369 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1370 log_cq_size = priv->params.log_rq_size +
d9d9f156 1371 priv->params.mpwqe_log_num_strides;
461017cb
TT
1372 break;
1373 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1374 log_cq_size = priv->params.log_rq_size;
1375 }
1376
1377 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
7219ab34
TT
1378 if (priv->params.rx_cqe_compress) {
1379 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1380 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
1381 }
f62b8bb8
AV
1382
1383 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
1384
1385 param->cq_period_mode = priv->params.rx_cq_period_mode;
f62b8bb8
AV
1386}
1387
1388static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1389 struct mlx5e_cq_param *param)
1390{
1391 void *cqc = param->cqc;
1392
d3c9bc27 1393 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
f62b8bb8
AV
1394
1395 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
1396
1397 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
f62b8bb8
AV
1398}
1399
d3c9bc27
TT
1400static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
1401 struct mlx5e_cq_param *param,
1402 u8 log_wq_size)
1403{
1404 void *cqc = param->cqc;
1405
1406 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
1407
1408 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
1409
1410 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
d3c9bc27
TT
1411}
1412
1413static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
1414 struct mlx5e_sq_param *param,
1415 u8 log_wq_size)
1416{
1417 void *sqc = param->sqc;
1418 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1419
1420 mlx5e_build_sq_param_common(priv, param);
1421
1422 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
bc77b240 1423 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
d3c9bc27
TT
1424
1425 param->icosq = true;
1426}
1427
6b87663f 1428static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
f62b8bb8 1429{
bc77b240 1430 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
d3c9bc27 1431
f62b8bb8
AV
1432 mlx5e_build_rq_param(priv, &cparam->rq);
1433 mlx5e_build_sq_param(priv, &cparam->sq);
d3c9bc27 1434 mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
f62b8bb8
AV
1435 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1436 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
d3c9bc27 1437 mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
f62b8bb8
AV
1438}
1439
1440static int mlx5e_open_channels(struct mlx5e_priv *priv)
1441{
6b87663f 1442 struct mlx5e_channel_param *cparam;
a4418a6c 1443 int nch = priv->params.num_channels;
03289b88 1444 int err = -ENOMEM;
f62b8bb8
AV
1445 int i;
1446 int j;
1447
a4418a6c
AS
1448 priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
1449 GFP_KERNEL);
03289b88 1450
a4418a6c 1451 priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
03289b88
SM
1452 sizeof(struct mlx5e_sq *), GFP_KERNEL);
1453
6b87663f
AB
1454 cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
1455
1456 if (!priv->channel || !priv->txq_to_sq_map || !cparam)
03289b88 1457 goto err_free_txq_to_sq_map;
f62b8bb8 1458
6b87663f
AB
1459 mlx5e_build_channel_param(priv, cparam);
1460
a4418a6c 1461 for (i = 0; i < nch; i++) {
6b87663f 1462 err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
f62b8bb8
AV
1463 if (err)
1464 goto err_close_channels;
1465 }
1466
a4418a6c 1467 for (j = 0; j < nch; j++) {
f62b8bb8
AV
1468 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1469 if (err)
1470 goto err_close_channels;
1471 }
1472
c3b7c5c9
MHY
1473 /* FIXME: This is a W/A for tx timeout watch dog false alarm when
1474 * polling for inactive tx queues.
1475 */
1476 netif_tx_start_all_queues(priv->netdev);
1477
6b87663f 1478 kfree(cparam);
f62b8bb8
AV
1479 return 0;
1480
1481err_close_channels:
1482 for (i--; i >= 0; i--)
1483 mlx5e_close_channel(priv->channel[i]);
1484
03289b88
SM
1485err_free_txq_to_sq_map:
1486 kfree(priv->txq_to_sq_map);
f62b8bb8 1487 kfree(priv->channel);
6b87663f 1488 kfree(cparam);
f62b8bb8
AV
1489
1490 return err;
1491}
1492
1493static void mlx5e_close_channels(struct mlx5e_priv *priv)
1494{
1495 int i;
1496
c3b7c5c9
MHY
1497 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
1498 * polling for inactive tx queues.
1499 */
1500 netif_tx_stop_all_queues(priv->netdev);
1501 netif_tx_disable(priv->netdev);
1502
f62b8bb8
AV
1503 for (i = 0; i < priv->params.num_channels; i++)
1504 mlx5e_close_channel(priv->channel[i]);
1505
03289b88 1506 kfree(priv->txq_to_sq_map);
f62b8bb8
AV
1507 kfree(priv->channel);
1508}
1509
2be6967c
SM
1510static int mlx5e_rx_hash_fn(int hfunc)
1511{
1512 return (hfunc == ETH_RSS_HASH_TOP) ?
1513 MLX5_RX_HASH_FN_TOEPLITZ :
1514 MLX5_RX_HASH_FN_INVERTED_XOR8;
1515}
1516
1517static int mlx5e_bits_invert(unsigned long a, int size)
1518{
1519 int inv = 0;
1520 int i;
1521
1522 for (i = 0; i < size; i++)
1523 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1524
1525 return inv;
1526}
1527
936896e9
AS
1528static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
1529{
1530 int i;
1531
1532 for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
1533 int ix = i;
1da36696 1534 u32 rqn;
936896e9
AS
1535
1536 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1537 ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
1538
2d75b2bc 1539 ix = priv->params.indirection_rqt[ix];
1da36696
TT
1540 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1541 priv->channel[ix]->rq.rqn :
1542 priv->drop_rq.rqn;
1543 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
936896e9
AS
1544 }
1545}
1546
1da36696
TT
1547static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
1548 int ix)
4cbeaff5 1549{
1da36696
TT
1550 u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1551 priv->channel[ix]->rq.rqn :
1552 priv->drop_rq.rqn;
4cbeaff5 1553
1da36696 1554 MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
4cbeaff5
AS
1555}
1556
398f3351
HHZ
1557static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
1558 int ix, struct mlx5e_rqt *rqt)
f62b8bb8
AV
1559{
1560 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
1561 void *rqtc;
1562 int inlen;
1563 int err;
1da36696 1564 u32 *in;
f62b8bb8 1565
f62b8bb8
AV
1566 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1567 in = mlx5_vzalloc(inlen);
1568 if (!in)
1569 return -ENOMEM;
1570
1571 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1572
1573 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1574 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1575
1da36696
TT
1576 if (sz > 1) /* RSS */
1577 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
1578 else
1579 mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
2be6967c 1580
398f3351
HHZ
1581 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
1582 if (!err)
1583 rqt->enabled = true;
f62b8bb8
AV
1584
1585 kvfree(in);
1da36696
TT
1586 return err;
1587}
1588
cb67b832 1589void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
1da36696 1590{
398f3351
HHZ
1591 rqt->enabled = false;
1592 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
1da36696
TT
1593}
1594
6bfd390b
HHZ
1595static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
1596{
1597 struct mlx5e_rqt *rqt = &priv->indir_rqt;
1598
1599 return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
1600}
1601
cb67b832 1602int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
1da36696 1603{
398f3351 1604 struct mlx5e_rqt *rqt;
1da36696
TT
1605 int err;
1606 int ix;
1607
6bfd390b 1608 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
398f3351
HHZ
1609 rqt = &priv->direct_tir[ix].rqt;
1610 err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
1da36696
TT
1611 if (err)
1612 goto err_destroy_rqts;
1613 }
1614
1615 return 0;
1616
1617err_destroy_rqts:
1618 for (ix--; ix >= 0; ix--)
398f3351 1619 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
1da36696 1620
f62b8bb8
AV
1621 return err;
1622}
1623
1da36696 1624int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
5c50368f
AS
1625{
1626 struct mlx5_core_dev *mdev = priv->mdev;
5c50368f
AS
1627 void *rqtc;
1628 int inlen;
1da36696 1629 u32 *in;
5c50368f
AS
1630 int err;
1631
5c50368f
AS
1632 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1633 in = mlx5_vzalloc(inlen);
1634 if (!in)
1635 return -ENOMEM;
1636
1637 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
1638
1639 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1da36696
TT
1640 if (sz > 1) /* RSS */
1641 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
1642 else
1643 mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
5c50368f
AS
1644
1645 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
1646
1da36696 1647 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
5c50368f
AS
1648
1649 kvfree(in);
1650
1651 return err;
1652}
1653
40ab6a6e
AS
1654static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
1655{
1da36696
TT
1656 u32 rqtn;
1657 int ix;
1658
398f3351
HHZ
1659 if (priv->indir_rqt.enabled) {
1660 rqtn = priv->indir_rqt.rqtn;
1661 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
1662 }
1663
1da36696 1664 for (ix = 0; ix < priv->params.num_channels; ix++) {
398f3351
HHZ
1665 if (!priv->direct_tir[ix].rqt.enabled)
1666 continue;
1667 rqtn = priv->direct_tir[ix].rqt.rqtn;
1da36696
TT
1668 mlx5e_redirect_rqt(priv, rqtn, 1, ix);
1669 }
40ab6a6e
AS
1670}
1671
5c50368f
AS
1672static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1673{
1674 if (!priv->params.lro_en)
1675 return;
1676
1677#define ROUGH_MAX_L2_L3_HDR_SZ 256
1678
1679 MLX5_SET(tirc, tirc, lro_enable_mask,
1680 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1681 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1682 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1683 (priv->params.lro_wqe_sz -
1684 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1685 MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1686 MLX5_CAP_ETH(priv->mdev,
d9a40271 1687 lro_timer_supported_periods[2]));
5c50368f
AS
1688}
1689
bdfc028d
TT
1690void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
1691{
1692 MLX5_SET(tirc, tirc, rx_hash_fn,
1693 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1694 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1695 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1696 rx_hash_toeplitz_key);
1697 size_t len = MLX5_FLD_SZ_BYTES(tirc,
1698 rx_hash_toeplitz_key);
1699
1700 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1701 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
1702 }
1703}
1704
ab0394fe 1705static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
5c50368f
AS
1706{
1707 struct mlx5_core_dev *mdev = priv->mdev;
1708
1709 void *in;
1710 void *tirc;
1711 int inlen;
1712 int err;
ab0394fe 1713 int tt;
1da36696 1714 int ix;
5c50368f
AS
1715
1716 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1717 in = mlx5_vzalloc(inlen);
1718 if (!in)
1719 return -ENOMEM;
1720
1721 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
1722 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
1723
1724 mlx5e_build_tir_ctx_lro(tirc, priv);
1725
1da36696 1726 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
724b2aa1 1727 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
1da36696 1728 inlen);
ab0394fe 1729 if (err)
1da36696 1730 goto free_in;
ab0394fe 1731 }
5c50368f 1732
6bfd390b 1733 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
1da36696
TT
1734 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
1735 in, inlen);
1736 if (err)
1737 goto free_in;
1738 }
1739
1740free_in:
5c50368f
AS
1741 kvfree(in);
1742
1743 return err;
1744}
1745
cd255eff 1746static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
40ab6a6e 1747{
40ab6a6e 1748 struct mlx5_core_dev *mdev = priv->mdev;
cd255eff 1749 u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
40ab6a6e
AS
1750 int err;
1751
cd255eff 1752 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
40ab6a6e
AS
1753 if (err)
1754 return err;
1755
cd255eff
SM
1756 /* Update vport context MTU */
1757 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
1758 return 0;
1759}
40ab6a6e 1760
cd255eff
SM
1761static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
1762{
1763 struct mlx5_core_dev *mdev = priv->mdev;
1764 u16 hw_mtu = 0;
1765 int err;
40ab6a6e 1766
cd255eff
SM
1767 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
1768 if (err || !hw_mtu) /* fallback to port oper mtu */
1769 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1770
1771 *mtu = MLX5E_HW2SW_MTU(hw_mtu);
1772}
1773
1774static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1775{
1776 struct mlx5e_priv *priv = netdev_priv(netdev);
1777 u16 mtu;
1778 int err;
1779
1780 err = mlx5e_set_mtu(priv, netdev->mtu);
1781 if (err)
1782 return err;
40ab6a6e 1783
cd255eff
SM
1784 mlx5e_query_mtu(priv, &mtu);
1785 if (mtu != netdev->mtu)
1786 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
1787 __func__, mtu, netdev->mtu);
40ab6a6e 1788
cd255eff 1789 netdev->mtu = mtu;
40ab6a6e
AS
1790 return 0;
1791}
1792
08fb1dac
SM
1793static void mlx5e_netdev_set_tcs(struct net_device *netdev)
1794{
1795 struct mlx5e_priv *priv = netdev_priv(netdev);
1796 int nch = priv->params.num_channels;
1797 int ntc = priv->params.num_tc;
1798 int tc;
1799
1800 netdev_reset_tc(netdev);
1801
1802 if (ntc == 1)
1803 return;
1804
1805 netdev_set_num_tc(netdev, ntc);
1806
7ccdd084
RS
1807 /* Map netdev TCs to offset 0
1808 * We have our own UP to TXQ mapping for QoS
1809 */
08fb1dac 1810 for (tc = 0; tc < ntc; tc++)
7ccdd084 1811 netdev_set_tc_queue(netdev, tc, nch, 0);
08fb1dac
SM
1812}
1813
40ab6a6e
AS
1814int mlx5e_open_locked(struct net_device *netdev)
1815{
1816 struct mlx5e_priv *priv = netdev_priv(netdev);
cb67b832 1817 struct mlx5_core_dev *mdev = priv->mdev;
40ab6a6e
AS
1818 int num_txqs;
1819 int err;
1820
1821 set_bit(MLX5E_STATE_OPENED, &priv->state);
1822
08fb1dac
SM
1823 mlx5e_netdev_set_tcs(netdev);
1824
40ab6a6e
AS
1825 num_txqs = priv->params.num_channels * priv->params.num_tc;
1826 netif_set_real_num_tx_queues(netdev, num_txqs);
1827 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1828
1829 err = mlx5e_set_dev_port_mtu(netdev);
1830 if (err)
343b29f3 1831 goto err_clear_state_opened_flag;
40ab6a6e
AS
1832
1833 err = mlx5e_open_channels(priv);
1834 if (err) {
1835 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
1836 __func__, err);
343b29f3 1837 goto err_clear_state_opened_flag;
40ab6a6e
AS
1838 }
1839
724b2aa1 1840 err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev);
66189961
TT
1841 if (err) {
1842 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1843 __func__, err);
1844 goto err_close_channels;
1845 }
1846
40ab6a6e 1847 mlx5e_redirect_rqts(priv);
ce89ef36 1848 mlx5e_update_carrier(priv);
ef9814de 1849 mlx5e_timestamp_init(priv);
5a7b27eb
MG
1850#ifdef CONFIG_RFS_ACCEL
1851 priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
1852#endif
cb67b832
HHZ
1853 if (priv->profile->update_stats)
1854 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
40ab6a6e 1855
cb67b832
HHZ
1856 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
1857 err = mlx5e_add_sqs_fwd_rules(priv);
1858 if (err)
1859 goto err_close_channels;
1860 }
9b37b07f 1861 return 0;
343b29f3 1862
66189961
TT
1863err_close_channels:
1864 mlx5e_close_channels(priv);
343b29f3
AS
1865err_clear_state_opened_flag:
1866 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1867 return err;
40ab6a6e
AS
1868}
1869
cb67b832 1870int mlx5e_open(struct net_device *netdev)
40ab6a6e
AS
1871{
1872 struct mlx5e_priv *priv = netdev_priv(netdev);
1873 int err;
1874
1875 mutex_lock(&priv->state_lock);
1876 err = mlx5e_open_locked(netdev);
1877 mutex_unlock(&priv->state_lock);
1878
1879 return err;
1880}
1881
1882int mlx5e_close_locked(struct net_device *netdev)
1883{
1884 struct mlx5e_priv *priv = netdev_priv(netdev);
cb67b832 1885 struct mlx5_core_dev *mdev = priv->mdev;
40ab6a6e 1886
a1985740
AS
1887 /* May already be CLOSED in case a previous configuration operation
1888 * (e.g RX/TX queue size change) that involves close&open failed.
1889 */
1890 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1891 return 0;
1892
40ab6a6e
AS
1893 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1894
cb67b832
HHZ
1895 if (MLX5_CAP_GEN(mdev, vport_group_manager))
1896 mlx5e_remove_sqs_fwd_rules(priv);
1897
ef9814de 1898 mlx5e_timestamp_cleanup(priv);
40ab6a6e 1899 netif_carrier_off(priv->netdev);
ce89ef36 1900 mlx5e_redirect_rqts(priv);
40ab6a6e
AS
1901 mlx5e_close_channels(priv);
1902
1903 return 0;
1904}
1905
cb67b832 1906int mlx5e_close(struct net_device *netdev)
40ab6a6e
AS
1907{
1908 struct mlx5e_priv *priv = netdev_priv(netdev);
1909 int err;
1910
1911 mutex_lock(&priv->state_lock);
1912 err = mlx5e_close_locked(netdev);
1913 mutex_unlock(&priv->state_lock);
1914
1915 return err;
1916}
1917
1918static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
1919 struct mlx5e_rq *rq,
1920 struct mlx5e_rq_param *param)
1921{
1922 struct mlx5_core_dev *mdev = priv->mdev;
1923 void *rqc = param->rqc;
1924 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
1925 int err;
1926
1927 param->wq.db_numa_node = param->wq.buf_numa_node;
1928
1929 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
1930 &rq->wq_ctrl);
1931 if (err)
1932 return err;
1933
1934 rq->priv = priv;
1935
1936 return 0;
1937}
1938
1939static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
1940 struct mlx5e_cq *cq,
1941 struct mlx5e_cq_param *param)
1942{
1943 struct mlx5_core_dev *mdev = priv->mdev;
1944 struct mlx5_core_cq *mcq = &cq->mcq;
1945 int eqn_not_used;
0b6e26ce 1946 unsigned int irqn;
40ab6a6e
AS
1947 int err;
1948
1949 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1950 &cq->wq_ctrl);
1951 if (err)
1952 return err;
1953
1954 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1955
1956 mcq->cqe_sz = 64;
1957 mcq->set_ci_db = cq->wq_ctrl.db.db;
1958 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1959 *mcq->set_ci_db = 0;
1960 *mcq->arm_db = 0;
1961 mcq->vector = param->eq_ix;
1962 mcq->comp = mlx5e_completion_event;
1963 mcq->event = mlx5e_cq_error_event;
1964 mcq->irqn = irqn;
b50d292b 1965 mcq->uar = &mdev->mlx5e_res.cq_uar;
40ab6a6e
AS
1966
1967 cq->priv = priv;
1968
1969 return 0;
1970}
1971
1972static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
1973{
1974 struct mlx5e_cq_param cq_param;
1975 struct mlx5e_rq_param rq_param;
1976 struct mlx5e_rq *rq = &priv->drop_rq;
1977 struct mlx5e_cq *cq = &priv->drop_rq.cq;
1978 int err;
1979
1980 memset(&cq_param, 0, sizeof(cq_param));
1981 memset(&rq_param, 0, sizeof(rq_param));
556dd1b9 1982 mlx5e_build_drop_rq_param(&rq_param);
40ab6a6e
AS
1983
1984 err = mlx5e_create_drop_cq(priv, cq, &cq_param);
1985 if (err)
1986 return err;
1987
1988 err = mlx5e_enable_cq(cq, &cq_param);
1989 if (err)
1990 goto err_destroy_cq;
1991
1992 err = mlx5e_create_drop_rq(priv, rq, &rq_param);
1993 if (err)
1994 goto err_disable_cq;
1995
1996 err = mlx5e_enable_rq(rq, &rq_param);
1997 if (err)
1998 goto err_destroy_rq;
1999
2000 return 0;
2001
2002err_destroy_rq:
2003 mlx5e_destroy_rq(&priv->drop_rq);
2004
2005err_disable_cq:
2006 mlx5e_disable_cq(&priv->drop_rq.cq);
2007
2008err_destroy_cq:
2009 mlx5e_destroy_cq(&priv->drop_rq.cq);
2010
2011 return err;
2012}
2013
2014static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
2015{
2016 mlx5e_disable_rq(&priv->drop_rq);
2017 mlx5e_destroy_rq(&priv->drop_rq);
2018 mlx5e_disable_cq(&priv->drop_rq.cq);
2019 mlx5e_destroy_cq(&priv->drop_rq.cq);
2020}
2021
2022static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
2023{
2024 struct mlx5_core_dev *mdev = priv->mdev;
2025 u32 in[MLX5_ST_SZ_DW(create_tis_in)];
2026 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2027
2028 memset(in, 0, sizeof(in));
2029
08fb1dac 2030 MLX5_SET(tisc, tisc, prio, tc << 1);
b50d292b 2031 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
40ab6a6e
AS
2032
2033 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
2034}
2035
2036static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
2037{
2038 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2039}
2040
cb67b832 2041int mlx5e_create_tises(struct mlx5e_priv *priv)
40ab6a6e
AS
2042{
2043 int err;
2044 int tc;
2045
6bfd390b 2046 for (tc = 0; tc < priv->profile->max_tc; tc++) {
40ab6a6e
AS
2047 err = mlx5e_create_tis(priv, tc);
2048 if (err)
2049 goto err_close_tises;
2050 }
2051
2052 return 0;
2053
2054err_close_tises:
2055 for (tc--; tc >= 0; tc--)
2056 mlx5e_destroy_tis(priv, tc);
2057
2058 return err;
2059}
2060
cb67b832 2061void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
40ab6a6e
AS
2062{
2063 int tc;
2064
6bfd390b 2065 for (tc = 0; tc < priv->profile->max_tc; tc++)
40ab6a6e
AS
2066 mlx5e_destroy_tis(priv, tc);
2067}
2068
1da36696
TT
2069static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2070 enum mlx5e_traffic_types tt)
f62b8bb8
AV
2071{
2072 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2073
b50d292b 2074 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3191e05f 2075
5a6f8aef
AS
2076#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2077 MLX5_HASH_FIELD_SEL_DST_IP)
f62b8bb8 2078
5a6f8aef
AS
2079#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2080 MLX5_HASH_FIELD_SEL_DST_IP |\
2081 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2082 MLX5_HASH_FIELD_SEL_L4_DPORT)
f62b8bb8 2083
a741749f
AS
2084#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2085 MLX5_HASH_FIELD_SEL_DST_IP |\
2086 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2087
5c50368f 2088 mlx5e_build_tir_ctx_lro(tirc, priv);
f62b8bb8 2089
4cbeaff5 2090 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
398f3351 2091 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
1da36696 2092 mlx5e_build_tir_ctx_hash(tirc, priv);
f62b8bb8
AV
2093
2094 switch (tt) {
2095 case MLX5E_TT_IPV4_TCP:
2096 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2097 MLX5_L3_PROT_TYPE_IPV4);
2098 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2099 MLX5_L4_PROT_TYPE_TCP);
2100 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 2101 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
2102 break;
2103
2104 case MLX5E_TT_IPV6_TCP:
2105 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2106 MLX5_L3_PROT_TYPE_IPV6);
2107 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2108 MLX5_L4_PROT_TYPE_TCP);
2109 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 2110 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
2111 break;
2112
2113 case MLX5E_TT_IPV4_UDP:
2114 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2115 MLX5_L3_PROT_TYPE_IPV4);
2116 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2117 MLX5_L4_PROT_TYPE_UDP);
2118 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 2119 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
2120 break;
2121
2122 case MLX5E_TT_IPV6_UDP:
2123 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2124 MLX5_L3_PROT_TYPE_IPV6);
2125 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2126 MLX5_L4_PROT_TYPE_UDP);
2127 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 2128 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
2129 break;
2130
a741749f
AS
2131 case MLX5E_TT_IPV4_IPSEC_AH:
2132 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2133 MLX5_L3_PROT_TYPE_IPV4);
2134 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2135 MLX5_HASH_IP_IPSEC_SPI);
2136 break;
2137
2138 case MLX5E_TT_IPV6_IPSEC_AH:
2139 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2140 MLX5_L3_PROT_TYPE_IPV6);
2141 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2142 MLX5_HASH_IP_IPSEC_SPI);
2143 break;
2144
2145 case MLX5E_TT_IPV4_IPSEC_ESP:
2146 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2147 MLX5_L3_PROT_TYPE_IPV4);
2148 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2149 MLX5_HASH_IP_IPSEC_SPI);
2150 break;
2151
2152 case MLX5E_TT_IPV6_IPSEC_ESP:
2153 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2154 MLX5_L3_PROT_TYPE_IPV6);
2155 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2156 MLX5_HASH_IP_IPSEC_SPI);
2157 break;
2158
f62b8bb8
AV
2159 case MLX5E_TT_IPV4:
2160 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2161 MLX5_L3_PROT_TYPE_IPV4);
2162 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2163 MLX5_HASH_IP);
2164 break;
2165
2166 case MLX5E_TT_IPV6:
2167 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2168 MLX5_L3_PROT_TYPE_IPV6);
2169 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2170 MLX5_HASH_IP);
2171 break;
1da36696
TT
2172 default:
2173 WARN_ONCE(true,
2174 "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
f62b8bb8
AV
2175 }
2176}
2177
1da36696
TT
2178static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2179 u32 rqtn)
f62b8bb8 2180{
b50d292b 2181 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
1da36696
TT
2182
2183 mlx5e_build_tir_ctx_lro(tirc, priv);
2184
2185 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2186 MLX5_SET(tirc, tirc, indirect_table, rqtn);
2187 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2188}
2189
6bfd390b 2190static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
1da36696 2191{
724b2aa1 2192 struct mlx5e_tir *tir;
f62b8bb8
AV
2193 void *tirc;
2194 int inlen;
2195 int err;
1da36696 2196 u32 *in;
1da36696 2197 int tt;
f62b8bb8
AV
2198
2199 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2200 in = mlx5_vzalloc(inlen);
2201 if (!in)
2202 return -ENOMEM;
2203
1da36696
TT
2204 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2205 memset(in, 0, inlen);
724b2aa1 2206 tir = &priv->indir_tir[tt];
1da36696
TT
2207 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2208 mlx5e_build_indir_tir_ctx(priv, tirc, tt);
724b2aa1 2209 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
f62b8bb8 2210 if (err)
40ab6a6e 2211 goto err_destroy_tirs;
f62b8bb8
AV
2212 }
2213
6bfd390b
HHZ
2214 kvfree(in);
2215
2216 return 0;
2217
2218err_destroy_tirs:
2219 for (tt--; tt >= 0; tt--)
2220 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
2221
2222 kvfree(in);
2223
2224 return err;
2225}
2226
cb67b832 2227int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
2228{
2229 int nch = priv->profile->max_nch(priv->mdev);
2230 struct mlx5e_tir *tir;
2231 void *tirc;
2232 int inlen;
2233 int err;
2234 u32 *in;
2235 int ix;
2236
2237 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2238 in = mlx5_vzalloc(inlen);
2239 if (!in)
2240 return -ENOMEM;
2241
1da36696
TT
2242 for (ix = 0; ix < nch; ix++) {
2243 memset(in, 0, inlen);
724b2aa1 2244 tir = &priv->direct_tir[ix];
1da36696
TT
2245 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2246 mlx5e_build_direct_tir_ctx(priv, tirc,
398f3351 2247 priv->direct_tir[ix].rqt.rqtn);
724b2aa1 2248 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
1da36696
TT
2249 if (err)
2250 goto err_destroy_ch_tirs;
2251 }
2252
2253 kvfree(in);
2254
f62b8bb8
AV
2255 return 0;
2256
1da36696
TT
2257err_destroy_ch_tirs:
2258 for (ix--; ix >= 0; ix--)
724b2aa1 2259 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
1da36696 2260
1da36696 2261 kvfree(in);
f62b8bb8
AV
2262
2263 return err;
2264}
2265
6bfd390b 2266static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
2267{
2268 int i;
2269
1da36696 2270 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
724b2aa1 2271 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
f62b8bb8
AV
2272}
2273
cb67b832 2274void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
2275{
2276 int nch = priv->profile->max_nch(priv->mdev);
2277 int i;
2278
2279 for (i = 0; i < nch; i++)
2280 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
2281}
2282
36350114
GP
2283int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
2284{
2285 int err = 0;
2286 int i;
2287
2288 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2289 return 0;
2290
2291 for (i = 0; i < priv->params.num_channels; i++) {
2292 err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
2293 if (err)
2294 return err;
2295 }
2296
2297 return 0;
2298}
2299
08fb1dac
SM
2300static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
2301{
2302 struct mlx5e_priv *priv = netdev_priv(netdev);
2303 bool was_opened;
2304 int err = 0;
2305
2306 if (tc && tc != MLX5E_MAX_NUM_TC)
2307 return -EINVAL;
2308
2309 mutex_lock(&priv->state_lock);
2310
2311 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2312 if (was_opened)
2313 mlx5e_close_locked(priv->netdev);
2314
2315 priv->params.num_tc = tc ? tc : 1;
2316
2317 if (was_opened)
2318 err = mlx5e_open_locked(priv->netdev);
2319
2320 mutex_unlock(&priv->state_lock);
2321
2322 return err;
2323}
2324
2325static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
2326 __be16 proto, struct tc_to_netdev *tc)
2327{
e8f887ac
AV
2328 struct mlx5e_priv *priv = netdev_priv(dev);
2329
2330 if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
2331 goto mqprio;
2332
2333 switch (tc->type) {
e3a2b7ed
AV
2334 case TC_SETUP_CLSFLOWER:
2335 switch (tc->cls_flower->command) {
2336 case TC_CLSFLOWER_REPLACE:
2337 return mlx5e_configure_flower(priv, proto, tc->cls_flower);
2338 case TC_CLSFLOWER_DESTROY:
2339 return mlx5e_delete_flower(priv, tc->cls_flower);
aad7e08d
AV
2340 case TC_CLSFLOWER_STATS:
2341 return mlx5e_stats_flower(priv, tc->cls_flower);
e3a2b7ed 2342 }
e8f887ac
AV
2343 default:
2344 return -EOPNOTSUPP;
2345 }
2346
2347mqprio:
67ba422e 2348 if (tc->type != TC_SETUP_MQPRIO)
08fb1dac
SM
2349 return -EINVAL;
2350
2351 return mlx5e_setup_tc(dev, tc->tc);
2352}
2353
cb67b832 2354struct rtnl_link_stats64 *
f62b8bb8
AV
2355mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
2356{
2357 struct mlx5e_priv *priv = netdev_priv(dev);
9218b44d 2358 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
f62b8bb8 2359 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
269e6b3a 2360 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
f62b8bb8 2361
9218b44d
GP
2362 stats->rx_packets = sstats->rx_packets;
2363 stats->rx_bytes = sstats->rx_bytes;
2364 stats->tx_packets = sstats->tx_packets;
2365 stats->tx_bytes = sstats->tx_bytes;
269e6b3a
GP
2366
2367 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
9218b44d 2368 stats->tx_dropped = sstats->tx_queue_dropped;
269e6b3a
GP
2369
2370 stats->rx_length_errors =
9218b44d
GP
2371 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
2372 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
2373 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
269e6b3a 2374 stats->rx_crc_errors =
9218b44d
GP
2375 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
2376 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
2377 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
269e6b3a 2378 stats->tx_carrier_errors =
9218b44d 2379 PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
269e6b3a
GP
2380 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
2381 stats->rx_frame_errors;
2382 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
2383
2384 /* vport multicast also counts packets that are dropped due to steering
2385 * or rx out of buffer
2386 */
9218b44d
GP
2387 stats->multicast =
2388 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
f62b8bb8
AV
2389
2390 return stats;
2391}
2392
2393static void mlx5e_set_rx_mode(struct net_device *dev)
2394{
2395 struct mlx5e_priv *priv = netdev_priv(dev);
2396
7bb29755 2397 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
2398}
2399
2400static int mlx5e_set_mac(struct net_device *netdev, void *addr)
2401{
2402 struct mlx5e_priv *priv = netdev_priv(netdev);
2403 struct sockaddr *saddr = addr;
2404
2405 if (!is_valid_ether_addr(saddr->sa_data))
2406 return -EADDRNOTAVAIL;
2407
2408 netif_addr_lock_bh(netdev);
2409 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
2410 netif_addr_unlock_bh(netdev);
2411
7bb29755 2412 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
2413
2414 return 0;
2415}
2416
0e405443
GP
2417#define MLX5E_SET_FEATURE(netdev, feature, enable) \
2418 do { \
2419 if (enable) \
2420 netdev->features |= feature; \
2421 else \
2422 netdev->features &= ~feature; \
2423 } while (0)
2424
2425typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
2426
2427static int set_feature_lro(struct net_device *netdev, bool enable)
f62b8bb8
AV
2428{
2429 struct mlx5e_priv *priv = netdev_priv(netdev);
0e405443
GP
2430 bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2431 int err;
f62b8bb8
AV
2432
2433 mutex_lock(&priv->state_lock);
f62b8bb8 2434
0e405443
GP
2435 if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
2436 mlx5e_close_locked(priv->netdev);
98e81b0a 2437
0e405443
GP
2438 priv->params.lro_en = enable;
2439 err = mlx5e_modify_tirs_lro(priv);
2440 if (err) {
2441 netdev_err(netdev, "lro modify failed, %d\n", err);
2442 priv->params.lro_en = !enable;
98e81b0a 2443 }
f62b8bb8 2444
0e405443
GP
2445 if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
2446 mlx5e_open_locked(priv->netdev);
2447
9b37b07f
AS
2448 mutex_unlock(&priv->state_lock);
2449
0e405443
GP
2450 return err;
2451}
2452
2453static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
2454{
2455 struct mlx5e_priv *priv = netdev_priv(netdev);
2456
2457 if (enable)
2458 mlx5e_enable_vlan_filter(priv);
2459 else
2460 mlx5e_disable_vlan_filter(priv);
2461
2462 return 0;
2463}
2464
2465static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
2466{
2467 struct mlx5e_priv *priv = netdev_priv(netdev);
f62b8bb8 2468
0e405443 2469 if (!enable && mlx5e_tc_num_filters(priv)) {
e8f887ac
AV
2470 netdev_err(netdev,
2471 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
2472 return -EINVAL;
2473 }
2474
0e405443
GP
2475 return 0;
2476}
2477
94cb1ebb
EBE
2478static int set_feature_rx_all(struct net_device *netdev, bool enable)
2479{
2480 struct mlx5e_priv *priv = netdev_priv(netdev);
2481 struct mlx5_core_dev *mdev = priv->mdev;
2482
2483 return mlx5_set_port_fcs(mdev, !enable);
2484}
2485
36350114
GP
2486static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
2487{
2488 struct mlx5e_priv *priv = netdev_priv(netdev);
2489 int err;
2490
2491 mutex_lock(&priv->state_lock);
2492
2493 priv->params.vlan_strip_disable = !enable;
2494 err = mlx5e_modify_rqs_vsd(priv, !enable);
2495 if (err)
2496 priv->params.vlan_strip_disable = enable;
2497
2498 mutex_unlock(&priv->state_lock);
2499
2500 return err;
2501}
2502
45bf454a
MG
2503#ifdef CONFIG_RFS_ACCEL
2504static int set_feature_arfs(struct net_device *netdev, bool enable)
2505{
2506 struct mlx5e_priv *priv = netdev_priv(netdev);
2507 int err;
2508
2509 if (enable)
2510 err = mlx5e_arfs_enable(priv);
2511 else
2512 err = mlx5e_arfs_disable(priv);
2513
2514 return err;
2515}
2516#endif
2517
0e405443
GP
2518static int mlx5e_handle_feature(struct net_device *netdev,
2519 netdev_features_t wanted_features,
2520 netdev_features_t feature,
2521 mlx5e_feature_handler feature_handler)
2522{
2523 netdev_features_t changes = wanted_features ^ netdev->features;
2524 bool enable = !!(wanted_features & feature);
2525 int err;
2526
2527 if (!(changes & feature))
2528 return 0;
2529
2530 err = feature_handler(netdev, enable);
2531 if (err) {
2532 netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
2533 enable ? "Enable" : "Disable", feature, err);
2534 return err;
2535 }
2536
2537 MLX5E_SET_FEATURE(netdev, feature, enable);
2538 return 0;
2539}
2540
2541static int mlx5e_set_features(struct net_device *netdev,
2542 netdev_features_t features)
2543{
2544 int err;
2545
2546 err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
2547 set_feature_lro);
2548 err |= mlx5e_handle_feature(netdev, features,
2549 NETIF_F_HW_VLAN_CTAG_FILTER,
2550 set_feature_vlan_filter);
2551 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
2552 set_feature_tc_num_filters);
94cb1ebb
EBE
2553 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
2554 set_feature_rx_all);
36350114
GP
2555 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
2556 set_feature_rx_vlan);
45bf454a
MG
2557#ifdef CONFIG_RFS_ACCEL
2558 err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
2559 set_feature_arfs);
2560#endif
0e405443
GP
2561
2562 return err ? -EINVAL : 0;
f62b8bb8
AV
2563}
2564
d8edd246
SM
2565#define MXL5_HW_MIN_MTU 64
2566#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
2567
f62b8bb8
AV
2568static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
2569{
2570 struct mlx5e_priv *priv = netdev_priv(netdev);
2571 struct mlx5_core_dev *mdev = priv->mdev;
98e81b0a 2572 bool was_opened;
046339ea 2573 u16 max_mtu;
d8edd246 2574 u16 min_mtu;
98e81b0a 2575 int err = 0;
f62b8bb8 2576
facc9699 2577 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
f62b8bb8 2578
50a9eea6 2579 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
d8edd246 2580 min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
50a9eea6 2581
d8edd246 2582 if (new_mtu > max_mtu || new_mtu < min_mtu) {
facc9699 2583 netdev_err(netdev,
d8edd246
SM
2584 "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
2585 __func__, new_mtu, min_mtu, max_mtu);
f62b8bb8
AV
2586 return -EINVAL;
2587 }
2588
2589 mutex_lock(&priv->state_lock);
98e81b0a
AS
2590
2591 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2592 if (was_opened)
2593 mlx5e_close_locked(netdev);
2594
f62b8bb8 2595 netdev->mtu = new_mtu;
98e81b0a
AS
2596
2597 if (was_opened)
2598 err = mlx5e_open_locked(netdev);
2599
f62b8bb8
AV
2600 mutex_unlock(&priv->state_lock);
2601
2602 return err;
2603}
2604
ef9814de
EBE
2605static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2606{
2607 switch (cmd) {
2608 case SIOCSHWTSTAMP:
2609 return mlx5e_hwstamp_set(dev, ifr);
2610 case SIOCGHWTSTAMP:
2611 return mlx5e_hwstamp_get(dev, ifr);
2612 default:
2613 return -EOPNOTSUPP;
2614 }
2615}
2616
66e49ded
SM
2617static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2618{
2619 struct mlx5e_priv *priv = netdev_priv(dev);
2620 struct mlx5_core_dev *mdev = priv->mdev;
2621
2622 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
2623}
2624
2625static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2626{
2627 struct mlx5e_priv *priv = netdev_priv(dev);
2628 struct mlx5_core_dev *mdev = priv->mdev;
2629
2630 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
2631 vlan, qos);
2632}
2633
f942380c
MHY
2634static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2635{
2636 struct mlx5e_priv *priv = netdev_priv(dev);
2637 struct mlx5_core_dev *mdev = priv->mdev;
2638
2639 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
2640}
2641
1edc57e2
MHY
2642static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
2643{
2644 struct mlx5e_priv *priv = netdev_priv(dev);
2645 struct mlx5_core_dev *mdev = priv->mdev;
2646
2647 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
2648}
66e49ded
SM
2649static int mlx5_vport_link2ifla(u8 esw_link)
2650{
2651 switch (esw_link) {
2652 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
2653 return IFLA_VF_LINK_STATE_DISABLE;
2654 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
2655 return IFLA_VF_LINK_STATE_ENABLE;
2656 }
2657 return IFLA_VF_LINK_STATE_AUTO;
2658}
2659
2660static int mlx5_ifla_link2vport(u8 ifla_link)
2661{
2662 switch (ifla_link) {
2663 case IFLA_VF_LINK_STATE_DISABLE:
2664 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
2665 case IFLA_VF_LINK_STATE_ENABLE:
2666 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
2667 }
2668 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
2669}
2670
2671static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
2672 int link_state)
2673{
2674 struct mlx5e_priv *priv = netdev_priv(dev);
2675 struct mlx5_core_dev *mdev = priv->mdev;
2676
2677 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
2678 mlx5_ifla_link2vport(link_state));
2679}
2680
2681static int mlx5e_get_vf_config(struct net_device *dev,
2682 int vf, struct ifla_vf_info *ivi)
2683{
2684 struct mlx5e_priv *priv = netdev_priv(dev);
2685 struct mlx5_core_dev *mdev = priv->mdev;
2686 int err;
2687
2688 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
2689 if (err)
2690 return err;
2691 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
2692 return 0;
2693}
2694
2695static int mlx5e_get_vf_stats(struct net_device *dev,
2696 int vf, struct ifla_vf_stats *vf_stats)
2697{
2698 struct mlx5e_priv *priv = netdev_priv(dev);
2699 struct mlx5_core_dev *mdev = priv->mdev;
2700
2701 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
2702 vf_stats);
2703}
2704
b3f63c3d 2705static void mlx5e_add_vxlan_port(struct net_device *netdev,
974c3f30 2706 struct udp_tunnel_info *ti)
b3f63c3d
MF
2707{
2708 struct mlx5e_priv *priv = netdev_priv(netdev);
2709
974c3f30
AD
2710 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2711 return;
2712
b3f63c3d
MF
2713 if (!mlx5e_vxlan_allowed(priv->mdev))
2714 return;
2715
974c3f30 2716 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
b3f63c3d
MF
2717}
2718
2719static void mlx5e_del_vxlan_port(struct net_device *netdev,
974c3f30 2720 struct udp_tunnel_info *ti)
b3f63c3d
MF
2721{
2722 struct mlx5e_priv *priv = netdev_priv(netdev);
2723
974c3f30
AD
2724 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2725 return;
2726
b3f63c3d
MF
2727 if (!mlx5e_vxlan_allowed(priv->mdev))
2728 return;
2729
974c3f30 2730 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
b3f63c3d
MF
2731}
2732
2733static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
2734 struct sk_buff *skb,
2735 netdev_features_t features)
2736{
2737 struct udphdr *udph;
2738 u16 proto;
2739 u16 port = 0;
2740
2741 switch (vlan_get_protocol(skb)) {
2742 case htons(ETH_P_IP):
2743 proto = ip_hdr(skb)->protocol;
2744 break;
2745 case htons(ETH_P_IPV6):
2746 proto = ipv6_hdr(skb)->nexthdr;
2747 break;
2748 default:
2749 goto out;
2750 }
2751
2752 if (proto == IPPROTO_UDP) {
2753 udph = udp_hdr(skb);
2754 port = be16_to_cpu(udph->dest);
2755 }
2756
2757 /* Verify if UDP port is being offloaded by HW */
2758 if (port && mlx5e_vxlan_lookup_port(priv, port))
2759 return features;
2760
2761out:
2762 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
2763 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2764}
2765
2766static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
2767 struct net_device *netdev,
2768 netdev_features_t features)
2769{
2770 struct mlx5e_priv *priv = netdev_priv(netdev);
2771
2772 features = vlan_features_check(skb, features);
2773 features = vxlan_features_check(skb, features);
2774
2775 /* Validate if the tunneled packet is being offloaded by HW */
2776 if (skb->encapsulation &&
2777 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
2778 return mlx5e_vxlan_features_check(priv, skb, features);
2779
2780 return features;
2781}
2782
3947ca18
DJ
2783static void mlx5e_tx_timeout(struct net_device *dev)
2784{
2785 struct mlx5e_priv *priv = netdev_priv(dev);
2786 bool sched_work = false;
2787 int i;
2788
2789 netdev_err(dev, "TX timeout detected\n");
2790
2791 for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
2792 struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
2793
2c1ccc99 2794 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
3947ca18
DJ
2795 continue;
2796 sched_work = true;
2797 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
2798 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
2799 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
2800 }
2801
2802 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
2803 schedule_work(&priv->tx_timeout_work);
2804}
2805
b0eed40e 2806static const struct net_device_ops mlx5e_netdev_ops_basic = {
f62b8bb8
AV
2807 .ndo_open = mlx5e_open,
2808 .ndo_stop = mlx5e_close,
2809 .ndo_start_xmit = mlx5e_xmit,
08fb1dac
SM
2810 .ndo_setup_tc = mlx5e_ndo_setup_tc,
2811 .ndo_select_queue = mlx5e_select_queue,
f62b8bb8
AV
2812 .ndo_get_stats64 = mlx5e_get_stats,
2813 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2814 .ndo_set_mac_address = mlx5e_set_mac,
b0eed40e
SM
2815 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2816 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
f62b8bb8 2817 .ndo_set_features = mlx5e_set_features,
b0eed40e
SM
2818 .ndo_change_mtu = mlx5e_change_mtu,
2819 .ndo_do_ioctl = mlx5e_ioctl,
507f0c81 2820 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
45bf454a
MG
2821#ifdef CONFIG_RFS_ACCEL
2822 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
2823#endif
3947ca18 2824 .ndo_tx_timeout = mlx5e_tx_timeout,
b0eed40e
SM
2825};
2826
2827static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2828 .ndo_open = mlx5e_open,
2829 .ndo_stop = mlx5e_close,
2830 .ndo_start_xmit = mlx5e_xmit,
08fb1dac
SM
2831 .ndo_setup_tc = mlx5e_ndo_setup_tc,
2832 .ndo_select_queue = mlx5e_select_queue,
b0eed40e
SM
2833 .ndo_get_stats64 = mlx5e_get_stats,
2834 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2835 .ndo_set_mac_address = mlx5e_set_mac,
2836 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2837 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2838 .ndo_set_features = mlx5e_set_features,
2839 .ndo_change_mtu = mlx5e_change_mtu,
2840 .ndo_do_ioctl = mlx5e_ioctl,
974c3f30
AD
2841 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
2842 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
507f0c81 2843 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
b3f63c3d 2844 .ndo_features_check = mlx5e_features_check,
45bf454a
MG
2845#ifdef CONFIG_RFS_ACCEL
2846 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
2847#endif
b0eed40e
SM
2848 .ndo_set_vf_mac = mlx5e_set_vf_mac,
2849 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
f942380c 2850 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
1edc57e2 2851 .ndo_set_vf_trust = mlx5e_set_vf_trust,
b0eed40e
SM
2852 .ndo_get_vf_config = mlx5e_get_vf_config,
2853 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
2854 .ndo_get_vf_stats = mlx5e_get_vf_stats,
3947ca18 2855 .ndo_tx_timeout = mlx5e_tx_timeout,
f62b8bb8
AV
2856};
2857
2858static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2859{
2860 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2861 return -ENOTSUPP;
2862 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
2863 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
2864 !MLX5_CAP_ETH(mdev, csum_cap) ||
2865 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
2866 !MLX5_CAP_ETH(mdev, vlan_cap) ||
796a27ec
GP
2867 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
2868 MLX5_CAP_FLOWTABLE(mdev,
2869 flow_table_properties_nic_receive.max_ft_level)
2870 < 3) {
f62b8bb8
AV
2871 mlx5_core_warn(mdev,
2872 "Not creating net device, some required device capabilities are missing\n");
2873 return -ENOTSUPP;
2874 }
66189961
TT
2875 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
2876 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
7524a5d8
GP
2877 if (!MLX5_CAP_GEN(mdev, cq_moderation))
2878 mlx5_core_warn(mdev, "CQ modiration is not supported\n");
66189961 2879
f62b8bb8
AV
2880 return 0;
2881}
2882
58d52291
AS
2883u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2884{
2885 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
2886
2887 return bf_buf_size -
2888 sizeof(struct mlx5e_tx_wqe) +
2889 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2890}
2891
08fb1dac
SM
2892#ifdef CONFIG_MLX5_CORE_EN_DCB
2893static void mlx5e_ets_init(struct mlx5e_priv *priv)
2894{
2895 int i;
2896
2897 priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
2898 for (i = 0; i < priv->params.ets.ets_cap; i++) {
2899 priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
2900 priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
2901 priv->params.ets.prio_tc[i] = i;
2902 }
2903
2904 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
2905 priv->params.ets.prio_tc[0] = 1;
2906 priv->params.ets.prio_tc[1] = 0;
2907}
2908#endif
2909
d8c9660d
TT
2910void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
2911 u32 *indirection_rqt, int len,
85082dba
TT
2912 int num_channels)
2913{
d8c9660d
TT
2914 int node = mdev->priv.numa_node;
2915 int node_num_of_cores;
85082dba
TT
2916 int i;
2917
d8c9660d
TT
2918 if (node == -1)
2919 node = first_online_node;
2920
2921 node_num_of_cores = cpumask_weight(cpumask_of_node(node));
2922
2923 if (node_num_of_cores)
2924 num_channels = min_t(int, num_channels, node_num_of_cores);
2925
85082dba
TT
2926 for (i = 0; i < len; i++)
2927 indirection_rqt[i] = i % num_channels;
2928}
2929
bc77b240
TT
2930static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
2931{
2932 return MLX5_CAP_GEN(mdev, striding_rq) &&
2933 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
2934 MLX5_CAP_ETH(mdev, reg_umr_sq);
2935}
2936
b797a684
SM
2937static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
2938{
2939 enum pcie_link_width width;
2940 enum pci_bus_speed speed;
2941 int err = 0;
2942
2943 err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
2944 if (err)
2945 return err;
2946
2947 if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
2948 return -EINVAL;
2949
2950 switch (speed) {
2951 case PCIE_SPEED_2_5GT:
2952 *pci_bw = 2500 * width;
2953 break;
2954 case PCIE_SPEED_5_0GT:
2955 *pci_bw = 5000 * width;
2956 break;
2957 case PCIE_SPEED_8_0GT:
2958 *pci_bw = 8000 * width;
2959 break;
2960 default:
2961 return -EINVAL;
2962 }
2963
2964 return 0;
2965}
2966
2967static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
2968{
2969 return (link_speed && pci_bw &&
2970 (pci_bw < 40000) && (pci_bw < link_speed));
2971}
2972
9908aa29
TT
2973void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
2974{
2975 params->rx_cq_period_mode = cq_period_mode;
2976
2977 params->rx_cq_moderation.pkts =
2978 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2979 params->rx_cq_moderation.usec =
2980 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2981
2982 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
2983 params->rx_cq_moderation.usec =
2984 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
2985}
2986
cff92d7c
HHZ
2987static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
2988 u8 *min_inline_mode)
2989{
2990 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
2991 case MLX5E_INLINE_MODE_L2:
2992 *min_inline_mode = MLX5_INLINE_MODE_L2;
2993 break;
2994 case MLX5E_INLINE_MODE_VPORT_CONTEXT:
2995 mlx5_query_nic_vport_min_inline(mdev,
2996 min_inline_mode);
2997 break;
2998 case MLX5_INLINE_MODE_NOT_REQUIRED:
2999 *min_inline_mode = MLX5_INLINE_MODE_NONE;
3000 break;
3001 }
3002}
3003
6bfd390b
HHZ
3004static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
3005 struct net_device *netdev,
127ea380
HHZ
3006 const struct mlx5e_profile *profile,
3007 void *ppriv)
f62b8bb8
AV
3008{
3009 struct mlx5e_priv *priv = netdev_priv(netdev);
b797a684
SM
3010 u32 link_speed = 0;
3011 u32 pci_bw = 0;
cb3c7fd4
GR
3012 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
3013 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
3014 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
f62b8bb8
AV
3015
3016 priv->params.log_sq_size =
3017 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
bc77b240 3018 priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
461017cb
TT
3019 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
3020 MLX5_WQ_TYPE_LINKED_LIST;
3021
b797a684
SM
3022 /* set CQE compression */
3023 priv->params.rx_cqe_compress_admin = false;
3024 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
3025 MLX5_CAP_GEN(mdev, vport_group_manager)) {
3026 mlx5e_get_max_linkspeed(mdev, &link_speed);
3027 mlx5e_get_pci_bw(mdev, &pci_bw);
3028 mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
3029 link_speed, pci_bw);
3030 priv->params.rx_cqe_compress_admin =
3031 cqe_compress_heuristic(link_speed, pci_bw);
3032 }
3033
3034 priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
3035
461017cb
TT
3036 switch (priv->params.rq_wq_type) {
3037 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
3038 priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
d9d9f156
TT
3039 priv->params.mpwqe_log_stride_sz =
3040 priv->params.rx_cqe_compress ?
3041 MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
3042 MLX5_MPWRQ_LOG_STRIDE_SIZE;
3043 priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
3044 priv->params.mpwqe_log_stride_sz;
461017cb
TT
3045 priv->params.lro_en = true;
3046 break;
3047 default: /* MLX5_WQ_TYPE_LINKED_LIST */
3048 priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
3049 }
3050
d9d9f156
TT
3051 mlx5_core_info(mdev,
3052 "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
3053 priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
3054 BIT(priv->params.log_rq_size),
3055 BIT(priv->params.mpwqe_log_stride_sz),
3056 priv->params.rx_cqe_compress_admin);
3057
461017cb
TT
3058 priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
3059 BIT(priv->params.log_rq_size));
9908aa29 3060
cb3c7fd4
GR
3061 priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
3062 mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
9908aa29
TT
3063
3064 priv->params.tx_cq_moderation.usec =
f62b8bb8 3065 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
9908aa29 3066 priv->params.tx_cq_moderation.pkts =
f62b8bb8 3067 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
58d52291 3068 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
cff92d7c 3069 mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
f62b8bb8 3070 priv->params.num_tc = 1;
2be6967c 3071 priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
f62b8bb8 3072
57afead5
AS
3073 netdev_rss_key_fill(priv->params.toeplitz_hash_key,
3074 sizeof(priv->params.toeplitz_hash_key));
3075
d8c9660d 3076 mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
6bfd390b 3077 MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
2d75b2bc 3078
f62b8bb8
AV
3079 priv->params.lro_wqe_sz =
3080 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
3081
9908aa29
TT
3082 /* Initialize pflags */
3083 MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
3084 priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
3085
f62b8bb8
AV
3086 priv->mdev = mdev;
3087 priv->netdev = netdev;
6bfd390b
HHZ
3088 priv->params.num_channels = profile->max_nch(mdev);
3089 priv->profile = profile;
127ea380 3090 priv->ppriv = ppriv;
f62b8bb8 3091
08fb1dac
SM
3092#ifdef CONFIG_MLX5_CORE_EN_DCB
3093 mlx5e_ets_init(priv);
3094#endif
f62b8bb8 3095
f62b8bb8
AV
3096 mutex_init(&priv->state_lock);
3097
3098 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
3099 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3947ca18 3100 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
f62b8bb8
AV
3101 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
3102}
3103
3104static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
3105{
3106 struct mlx5e_priv *priv = netdev_priv(netdev);
3107
e1d7d349 3108 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
108805fc
SM
3109 if (is_zero_ether_addr(netdev->dev_addr) &&
3110 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
3111 eth_hw_addr_random(netdev);
3112 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
3113 }
f62b8bb8
AV
3114}
3115
cb67b832
HHZ
3116static const struct switchdev_ops mlx5e_switchdev_ops = {
3117 .switchdev_port_attr_get = mlx5e_attr_get,
3118};
3119
6bfd390b 3120static void mlx5e_build_nic_netdev(struct net_device *netdev)
f62b8bb8
AV
3121{
3122 struct mlx5e_priv *priv = netdev_priv(netdev);
3123 struct mlx5_core_dev *mdev = priv->mdev;
94cb1ebb
EBE
3124 bool fcs_supported;
3125 bool fcs_enabled;
f62b8bb8
AV
3126
3127 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
3128
08fb1dac 3129 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
b0eed40e 3130 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
08fb1dac
SM
3131#ifdef CONFIG_MLX5_CORE_EN_DCB
3132 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
3133#endif
3134 } else {
b0eed40e 3135 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
08fb1dac 3136 }
66e49ded 3137
f62b8bb8
AV
3138 netdev->watchdog_timeo = 15 * HZ;
3139
3140 netdev->ethtool_ops = &mlx5e_ethtool_ops;
3141
12be4b21 3142 netdev->vlan_features |= NETIF_F_SG;
f62b8bb8
AV
3143 netdev->vlan_features |= NETIF_F_IP_CSUM;
3144 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3145 netdev->vlan_features |= NETIF_F_GRO;
3146 netdev->vlan_features |= NETIF_F_TSO;
3147 netdev->vlan_features |= NETIF_F_TSO6;
3148 netdev->vlan_features |= NETIF_F_RXCSUM;
3149 netdev->vlan_features |= NETIF_F_RXHASH;
3150
3151 if (!!MLX5_CAP_ETH(mdev, lro_cap))
3152 netdev->vlan_features |= NETIF_F_LRO;
3153
3154 netdev->hw_features = netdev->vlan_features;
e4cf27bd 3155 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
f62b8bb8
AV
3156 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3157 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3158
b3f63c3d 3159 if (mlx5e_vxlan_allowed(mdev)) {
b49663c8
AD
3160 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3161 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3162 NETIF_F_GSO_PARTIAL;
b3f63c3d 3163 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
f3ed653c 3164 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
b3f63c3d
MF
3165 netdev->hw_enc_features |= NETIF_F_TSO;
3166 netdev->hw_enc_features |= NETIF_F_TSO6;
b3f63c3d 3167 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
b49663c8
AD
3168 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3169 NETIF_F_GSO_PARTIAL;
3170 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
b3f63c3d
MF
3171 }
3172
94cb1ebb
EBE
3173 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
3174
3175 if (fcs_supported)
3176 netdev->hw_features |= NETIF_F_RXALL;
3177
f62b8bb8
AV
3178 netdev->features = netdev->hw_features;
3179 if (!priv->params.lro_en)
3180 netdev->features &= ~NETIF_F_LRO;
3181
94cb1ebb
EBE
3182 if (fcs_enabled)
3183 netdev->features &= ~NETIF_F_RXALL;
3184
e8f887ac
AV
3185#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
3186 if (FT_CAP(flow_modify_en) &&
3187 FT_CAP(modify_root) &&
3188 FT_CAP(identified_miss_table_mode) &&
1cabe6b0
MG
3189 FT_CAP(flow_table_modify)) {
3190 netdev->hw_features |= NETIF_F_HW_TC;
3191#ifdef CONFIG_RFS_ACCEL
3192 netdev->hw_features |= NETIF_F_NTUPLE;
3193#endif
3194 }
e8f887ac 3195
f62b8bb8
AV
3196 netdev->features |= NETIF_F_HIGHDMA;
3197
3198 netdev->priv_flags |= IFF_UNICAST_FLT;
3199
3200 mlx5e_set_netdev_dev_addr(netdev);
cb67b832
HHZ
3201
3202#ifdef CONFIG_NET_SWITCHDEV
3203 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3204 netdev->switchdev_ops = &mlx5e_switchdev_ops;
3205#endif
f62b8bb8
AV
3206}
3207
593cf338
RS
3208static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
3209{
3210 struct mlx5_core_dev *mdev = priv->mdev;
3211 int err;
3212
3213 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
3214 if (err) {
3215 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
3216 priv->q_counter = 0;
3217 }
3218}
3219
3220static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
3221{
3222 if (!priv->q_counter)
3223 return;
3224
3225 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
3226}
3227
bc77b240
TT
3228static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
3229{
3230 struct mlx5_core_dev *mdev = priv->mdev;
ec22eb53
SM
3231 u64 npages = priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
3232 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
3233 void *mkc;
3234 u32 *in;
bc77b240
TT
3235 int err;
3236
3237 in = mlx5_vzalloc(inlen);
3238 if (!in)
3239 return -ENOMEM;
3240
ec22eb53 3241 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
bc77b240 3242
ec22eb53
SM
3243 MLX5_SET(mkc, mkc, free, 1);
3244 MLX5_SET(mkc, mkc, umr_en, 1);
3245 MLX5_SET(mkc, mkc, lw, 1);
3246 MLX5_SET(mkc, mkc, lr, 1);
3247 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
bc77b240 3248
ec22eb53
SM
3249 MLX5_SET(mkc, mkc, qpn, 0xffffff);
3250 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
3251 MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT);
3252 MLX5_SET(mkc, mkc, translations_octword_size,
3253 mlx5e_get_mtt_octw(npages));
3254 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
bc77b240 3255
ec22eb53 3256 err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen);
bc77b240 3257
ec22eb53 3258 kvfree(in);
bc77b240
TT
3259 return err;
3260}
3261
6bfd390b
HHZ
3262static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
3263 struct net_device *netdev,
127ea380
HHZ
3264 const struct mlx5e_profile *profile,
3265 void *ppriv)
6bfd390b
HHZ
3266{
3267 struct mlx5e_priv *priv = netdev_priv(netdev);
3268
127ea380 3269 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
6bfd390b
HHZ
3270 mlx5e_build_nic_netdev(netdev);
3271 mlx5e_vxlan_init(priv);
3272}
3273
3274static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
3275{
127ea380
HHZ
3276 struct mlx5_core_dev *mdev = priv->mdev;
3277 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3278
6bfd390b 3279 mlx5e_vxlan_cleanup(priv);
127ea380
HHZ
3280
3281 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3282 mlx5_eswitch_unregister_vport_rep(esw, 0);
6bfd390b
HHZ
3283}
3284
3285static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
3286{
3287 struct mlx5_core_dev *mdev = priv->mdev;
3288 int err;
3289 int i;
3290
3291 err = mlx5e_create_indirect_rqts(priv);
3292 if (err) {
3293 mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
3294 return err;
3295 }
3296
3297 err = mlx5e_create_direct_rqts(priv);
3298 if (err) {
3299 mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
3300 goto err_destroy_indirect_rqts;
3301 }
3302
3303 err = mlx5e_create_indirect_tirs(priv);
3304 if (err) {
3305 mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
3306 goto err_destroy_direct_rqts;
3307 }
3308
3309 err = mlx5e_create_direct_tirs(priv);
3310 if (err) {
3311 mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
3312 goto err_destroy_indirect_tirs;
3313 }
3314
3315 err = mlx5e_create_flow_steering(priv);
3316 if (err) {
3317 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
3318 goto err_destroy_direct_tirs;
3319 }
3320
3321 err = mlx5e_tc_init(priv);
3322 if (err)
3323 goto err_destroy_flow_steering;
3324
3325 return 0;
3326
3327err_destroy_flow_steering:
3328 mlx5e_destroy_flow_steering(priv);
3329err_destroy_direct_tirs:
3330 mlx5e_destroy_direct_tirs(priv);
3331err_destroy_indirect_tirs:
3332 mlx5e_destroy_indirect_tirs(priv);
3333err_destroy_direct_rqts:
3334 for (i = 0; i < priv->profile->max_nch(mdev); i++)
3335 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
3336err_destroy_indirect_rqts:
3337 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
3338 return err;
3339}
3340
3341static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
3342{
3343 int i;
3344
3345 mlx5e_tc_cleanup(priv);
3346 mlx5e_destroy_flow_steering(priv);
3347 mlx5e_destroy_direct_tirs(priv);
3348 mlx5e_destroy_indirect_tirs(priv);
3349 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
3350 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
3351 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
3352}
3353
3354static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
3355{
3356 int err;
3357
3358 err = mlx5e_create_tises(priv);
3359 if (err) {
3360 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
3361 return err;
3362 }
3363
3364#ifdef CONFIG_MLX5_CORE_EN_DCB
3365 mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
3366#endif
3367 return 0;
3368}
3369
3370static void mlx5e_nic_enable(struct mlx5e_priv *priv)
3371{
3372 struct net_device *netdev = priv->netdev;
3373 struct mlx5_core_dev *mdev = priv->mdev;
127ea380
HHZ
3374 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3375 struct mlx5_eswitch_rep rep;
6bfd390b
HHZ
3376
3377 if (mlx5e_vxlan_allowed(mdev)) {
3378 rtnl_lock();
3379 udp_tunnel_get_rx_info(netdev);
3380 rtnl_unlock();
3381 }
3382
3383 mlx5e_enable_async_events(priv);
3384 queue_work(priv->wq, &priv->set_rx_mode_work);
127ea380
HHZ
3385
3386 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
cb67b832
HHZ
3387 rep.load = mlx5e_nic_rep_load;
3388 rep.unload = mlx5e_nic_rep_unload;
127ea380
HHZ
3389 rep.vport = 0;
3390 rep.priv_data = priv;
3391 mlx5_eswitch_register_vport_rep(esw, &rep);
3392 }
6bfd390b
HHZ
3393}
3394
3395static void mlx5e_nic_disable(struct mlx5e_priv *priv)
3396{
3397 queue_work(priv->wq, &priv->set_rx_mode_work);
3398 mlx5e_disable_async_events(priv);
3399}
3400
3401static const struct mlx5e_profile mlx5e_nic_profile = {
3402 .init = mlx5e_nic_init,
3403 .cleanup = mlx5e_nic_cleanup,
3404 .init_rx = mlx5e_init_nic_rx,
3405 .cleanup_rx = mlx5e_cleanup_nic_rx,
3406 .init_tx = mlx5e_init_nic_tx,
3407 .cleanup_tx = mlx5e_cleanup_nic_tx,
3408 .enable = mlx5e_nic_enable,
3409 .disable = mlx5e_nic_disable,
3410 .update_stats = mlx5e_update_stats,
3411 .max_nch = mlx5e_get_max_num_channels,
3412 .max_tc = MLX5E_MAX_NUM_TC,
3413};
3414
cb67b832
HHZ
3415void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
3416 const struct mlx5e_profile *profile, void *ppriv)
f62b8bb8
AV
3417{
3418 struct net_device *netdev;
3419 struct mlx5e_priv *priv;
6bfd390b 3420 int nch = profile->max_nch(mdev);
f62b8bb8
AV
3421 int err;
3422
08fb1dac 3423 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
6bfd390b 3424 nch * profile->max_tc,
08fb1dac 3425 nch);
f62b8bb8
AV
3426 if (!netdev) {
3427 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
3428 return NULL;
3429 }
3430
127ea380 3431 profile->init(mdev, netdev, profile, ppriv);
f62b8bb8
AV
3432
3433 netif_carrier_off(netdev);
3434
3435 priv = netdev_priv(netdev);
3436
7bb29755
MF
3437 priv->wq = create_singlethread_workqueue("mlx5e");
3438 if (!priv->wq)
3439 goto err_free_netdev;
3440
bc77b240
TT
3441 err = mlx5e_create_umr_mkey(priv);
3442 if (err) {
3443 mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
b50d292b 3444 goto err_destroy_wq;
bc77b240
TT
3445 }
3446
6bfd390b
HHZ
3447 err = profile->init_tx(priv);
3448 if (err)
bc77b240 3449 goto err_destroy_umr_mkey;
5c50368f
AS
3450
3451 err = mlx5e_open_drop_rq(priv);
3452 if (err) {
3453 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
6bfd390b 3454 goto err_cleanup_tx;
5c50368f
AS
3455 }
3456
6bfd390b
HHZ
3457 err = profile->init_rx(priv);
3458 if (err)
5c50368f 3459 goto err_close_drop_rq;
5c50368f 3460
593cf338
RS
3461 mlx5e_create_q_counter(priv);
3462
33cfaaa8 3463 mlx5e_init_l2_addr(priv);
5c50368f 3464
f62b8bb8
AV
3465 err = register_netdev(netdev);
3466 if (err) {
1f2a3003 3467 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
6bfd390b 3468 goto err_dealloc_q_counters;
01a14098 3469 }
b3f63c3d 3470
6bfd390b
HHZ
3471 if (profile->enable)
3472 profile->enable(priv);
f62b8bb8
AV
3473
3474 return priv;
3475
593cf338
RS
3476err_dealloc_q_counters:
3477 mlx5e_destroy_q_counter(priv);
6bfd390b 3478 profile->cleanup_rx(priv);
5c50368f
AS
3479
3480err_close_drop_rq:
3481 mlx5e_close_drop_rq(priv);
3482
6bfd390b
HHZ
3483err_cleanup_tx:
3484 profile->cleanup_tx(priv);
5c50368f 3485
bc77b240
TT
3486err_destroy_umr_mkey:
3487 mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
3488
7bb29755
MF
3489err_destroy_wq:
3490 destroy_workqueue(priv->wq);
3491
f62b8bb8
AV
3492err_free_netdev:
3493 free_netdev(netdev);
3494
3495 return NULL;
3496}
3497
127ea380
HHZ
3498static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
3499{
3500 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3501 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
3502 int vport;
3503
3504 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
3505 return;
3506
3507 for (vport = 1; vport < total_vfs; vport++) {
3508 struct mlx5_eswitch_rep rep;
3509
cb67b832
HHZ
3510 rep.load = mlx5e_vport_rep_load;
3511 rep.unload = mlx5e_vport_rep_unload;
127ea380
HHZ
3512 rep.vport = vport;
3513 mlx5_eswitch_register_vport_rep(esw, &rep);
3514 }
3515}
3516
b50d292b
HHZ
3517static void *mlx5e_add(struct mlx5_core_dev *mdev)
3518{
127ea380
HHZ
3519 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3520 void *ppriv = NULL;
b50d292b
HHZ
3521 void *ret;
3522
3523 if (mlx5e_check_required_hca_cap(mdev))
3524 return NULL;
3525
3526 if (mlx5e_create_mdev_resources(mdev))
3527 return NULL;
3528
127ea380
HHZ
3529 mlx5e_register_vport_rep(mdev);
3530
3531 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3532 ppriv = &esw->offloads.vport_reps[0];
3533
3534 ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
b50d292b
HHZ
3535 if (!ret) {
3536 mlx5e_destroy_mdev_resources(mdev);
3537 return NULL;
3538 }
3539 return ret;
3540}
3541
cb67b832 3542void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
f62b8bb8 3543{
6bfd390b 3544 const struct mlx5e_profile *profile = priv->profile;
f62b8bb8
AV
3545 struct net_device *netdev = priv->netdev;
3546
9b37b07f 3547 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
6bfd390b
HHZ
3548 if (profile->disable)
3549 profile->disable(priv);
9b37b07f 3550
7bb29755 3551 flush_workqueue(priv->wq);
5fc7197d
MD
3552 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
3553 netif_device_detach(netdev);
811afeaa 3554 mlx5e_close(netdev);
5fc7197d
MD
3555 } else {
3556 unregister_netdev(netdev);
3557 }
3558
593cf338 3559 mlx5e_destroy_q_counter(priv);
6bfd390b 3560 profile->cleanup_rx(priv);
5c50368f 3561 mlx5e_close_drop_rq(priv);
6bfd390b 3562 profile->cleanup_tx(priv);
bc77b240 3563 mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
7bb29755
MF
3564 cancel_delayed_work_sync(&priv->update_stats_work);
3565 destroy_workqueue(priv->wq);
6bfd390b
HHZ
3566 if (profile->cleanup)
3567 profile->cleanup(priv);
5fc7197d
MD
3568
3569 if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
3570 free_netdev(netdev);
f62b8bb8
AV
3571}
3572
b50d292b
HHZ
3573static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
3574{
127ea380
HHZ
3575 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3576 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
b50d292b 3577 struct mlx5e_priv *priv = vpriv;
127ea380 3578 int vport;
b50d292b
HHZ
3579
3580 mlx5e_destroy_netdev(mdev, priv);
127ea380
HHZ
3581
3582 for (vport = 1; vport < total_vfs; vport++)
3583 mlx5_eswitch_unregister_vport_rep(esw, vport);
3584
b50d292b
HHZ
3585 mlx5e_destroy_mdev_resources(mdev);
3586}
3587
f62b8bb8
AV
3588static void *mlx5e_get_netdev(void *vpriv)
3589{
3590 struct mlx5e_priv *priv = vpriv;
3591
3592 return priv->netdev;
3593}
3594
3595static struct mlx5_interface mlx5e_interface = {
b50d292b
HHZ
3596 .add = mlx5e_add,
3597 .remove = mlx5e_remove,
f62b8bb8
AV
3598 .event = mlx5e_async_event,
3599 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
3600 .get_dev = mlx5e_get_netdev,
3601};
3602
3603void mlx5e_init(void)
3604{
665bc539 3605 mlx5e_build_ptys2ethtool_map();
f62b8bb8
AV
3606 mlx5_register_interface(&mlx5e_interface);
3607}
3608
3609void mlx5e_cleanup(void)
3610{
3611 mlx5_unregister_interface(&mlx5e_interface);
3612}