]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rep.c
CommitLineData
cb67b832
HHZ
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <generated/utsrelease.h>
34#include <linux/mlx5/fs.h>
35#include <net/switchdev.h>
d957b4e3 36#include <net/pkt_cls.h>
717503b9 37#include <net/act_api.h>
232c0013
HHZ
38#include <net/netevent.h>
39#include <net/arp.h>
f60f315d 40#include <net/devlink.h>
5cc3a8c6 41#include <net/ipv6_stubs.h>
cb67b832
HHZ
42
43#include "eswitch.h"
49964352 44#include "esw/chains.h"
cb67b832 45#include "en.h"
1d447a39 46#include "en_rep.h"
adb4c123 47#include "en_tc.h"
101f4de9 48#include "en/tc_tun.h"
f6dfb4c3 49#include "fs_core.h"
97417f61 50#include "lib/port_tun.h"
71c6eaeb 51#include "lib/mlx5.h"
5970882a
VB
52#define CREATE_TRACE_POINTS
53#include "diag/en_rep_tracepoint.h"
cb67b832 54
4c8fb298 55#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
e7164313 56 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
8956f001 57#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
4246f698 58
cb67b832
HHZ
59static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
60
f5bc2c5d
OS
61struct mlx5e_rep_indr_block_priv {
62 struct net_device *netdev;
63 struct mlx5e_rep_priv *rpriv;
64
65 struct list_head list;
66};
67
25f2d0e7
EB
68static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
69 struct net_device *netdev);
f5bc2c5d 70
cb67b832
HHZ
71static void mlx5e_rep_get_drvinfo(struct net_device *dev,
72 struct ethtool_drvinfo *drvinfo)
73{
cf83c8fd
DL
74 struct mlx5e_priv *priv = netdev_priv(dev);
75 struct mlx5_core_dev *mdev = priv->mdev;
76
cb67b832
HHZ
77 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
78 sizeof(drvinfo->driver));
79 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
cf83c8fd
DL
80 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
81 "%d.%d.%04d (%.16s)",
82 fw_rev_maj(mdev), fw_rev_min(mdev),
83 fw_rev_sub(mdev), mdev->board_id);
84}
85
86static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
87 struct ethtool_drvinfo *drvinfo)
88{
89 struct mlx5e_priv *priv = netdev_priv(dev);
90
91 mlx5e_rep_get_drvinfo(dev, drvinfo);
92 strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
93 sizeof(drvinfo->bus_info));
cb67b832
HHZ
94}
95
96static const struct counter_desc sw_rep_stats_desc[] = {
97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
100 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
101};
102
a228060a
OG
103struct vport_stats {
104 u64 vport_rx_packets;
105 u64 vport_tx_packets;
106 u64 vport_rx_bytes;
107 u64 vport_tx_bytes;
108};
109
110static const struct counter_desc vport_rep_stats_desc[] = {
111 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
112 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
113 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
114 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
115};
116
117#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
118#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
cb67b832 119
8a236b15 120static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
cb67b832 121{
8a236b15
VB
122 return NUM_VPORT_REP_SW_COUNTERS;
123}
cb67b832 124
8a236b15
VB
125static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
126{
127 int i;
128
129 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
130 strcpy(data + (idx++) * ETH_GSTRING_LEN,
131 sw_rep_stats_desc[i].format);
132 return idx;
133}
134
135static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
136{
137 int i;
138
139 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
140 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
141 sw_rep_stats_desc, i);
142 return idx;
143}
144
145static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
146{
147 struct mlx5e_sw_stats *s = &priv->stats.sw;
148 struct rtnl_link_stats64 stats64 = {};
149
150 memset(s, 0, sizeof(*s));
151 mlx5e_fold_sw_stats64(priv, &stats64);
152
153 s->rx_packets = stats64.rx_packets;
154 s->rx_bytes = stats64.rx_bytes;
155 s->tx_packets = stats64.tx_packets;
156 s->tx_bytes = stats64.tx_bytes;
157 s->tx_queue_dropped = stats64.tx_dropped;
158}
159
160static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
161{
162 return NUM_VPORT_REP_HW_COUNTERS;
163}
164
165static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
166{
167 int i;
168
169 for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
170 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
171 return idx;
172}
173
174static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
175{
176 int i;
177
178 for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
179 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
180 vport_rep_stats_desc, i);
181 return idx;
cb67b832
HHZ
182}
183
7c453526 184static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
370bad0f
OG
185{
186 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
187 struct mlx5e_rep_priv *rpriv = priv->ppriv;
188 struct mlx5_eswitch_rep *rep = rpriv->rep;
370bad0f
OG
189 struct rtnl_link_stats64 *vport_stats;
190 struct ifla_vf_stats vf_stats;
191 int err;
192
193 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
194 if (err) {
237ac8de
RD
195 netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
196 rep->vport, err);
370bad0f
OG
197 return;
198 }
199
200 vport_stats = &priv->stats.vf_vport;
201 /* flip tx/rx as we are reporting the counters for the switch vport */
202 vport_stats->rx_packets = vf_stats.tx_packets;
203 vport_stats->rx_bytes = vf_stats.tx_bytes;
204 vport_stats->tx_packets = vf_stats.rx_packets;
205 vport_stats->tx_bytes = vf_stats.rx_bytes;
206}
207
8a236b15
VB
208static void mlx5e_rep_get_strings(struct net_device *dev,
209 u32 stringset, uint8_t *data)
210{
211 struct mlx5e_priv *priv = netdev_priv(dev);
212
213 switch (stringset) {
214 case ETH_SS_STATS:
215 mlx5e_stats_fill_strings(priv, data);
216 break;
217 }
370bad0f
OG
218}
219
cb67b832
HHZ
220static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
221 struct ethtool_stats *stats, u64 *data)
222{
223 struct mlx5e_priv *priv = netdev_priv(dev);
cb67b832 224
8a236b15 225 mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
cb67b832
HHZ
226}
227
228static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
229{
8a236b15
VB
230 struct mlx5e_priv *priv = netdev_priv(dev);
231
cb67b832
HHZ
232 switch (sset) {
233 case ETH_SS_STATS:
8a236b15 234 return mlx5e_stats_total_num(priv);
cb67b832
HHZ
235 default:
236 return -EOPNOTSUPP;
237 }
238}
239
f128f138
GT
240static void mlx5e_rep_get_ringparam(struct net_device *dev,
241 struct ethtool_ringparam *param)
242{
243 struct mlx5e_priv *priv = netdev_priv(dev);
244
245 mlx5e_ethtool_get_ringparam(priv, param);
246}
247
248static int mlx5e_rep_set_ringparam(struct net_device *dev,
249 struct ethtool_ringparam *param)
250{
251 struct mlx5e_priv *priv = netdev_priv(dev);
252
253 return mlx5e_ethtool_set_ringparam(priv, param);
254}
255
84a09733
GT
256static void mlx5e_rep_get_channels(struct net_device *dev,
257 struct ethtool_channels *ch)
258{
259 struct mlx5e_priv *priv = netdev_priv(dev);
260
261 mlx5e_ethtool_get_channels(priv, ch);
262}
263
264static int mlx5e_rep_set_channels(struct net_device *dev,
265 struct ethtool_channels *ch)
266{
267 struct mlx5e_priv *priv = netdev_priv(dev);
84a09733 268
20f7b37f 269 return mlx5e_ethtool_set_channels(priv, ch);
84a09733
GT
270}
271
ff9b85de
OG
272static int mlx5e_rep_get_coalesce(struct net_device *netdev,
273 struct ethtool_coalesce *coal)
274{
275 struct mlx5e_priv *priv = netdev_priv(netdev);
276
277 return mlx5e_ethtool_get_coalesce(priv, coal);
278}
279
280static int mlx5e_rep_set_coalesce(struct net_device *netdev,
281 struct ethtool_coalesce *coal)
282{
283 struct mlx5e_priv *priv = netdev_priv(netdev);
284
285 return mlx5e_ethtool_set_coalesce(priv, coal);
286}
287
84a09733
GT
288static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
289{
290 struct mlx5e_priv *priv = netdev_priv(netdev);
291
292 return mlx5e_ethtool_get_rxfh_key_size(priv);
293}
294
295static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
296{
297 struct mlx5e_priv *priv = netdev_priv(netdev);
298
299 return mlx5e_ethtool_get_rxfh_indir_size(priv);
300}
301
ff9b85de
OG
302static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
303 struct ethtool_pauseparam *pauseparam)
304{
305 struct mlx5e_priv *priv = netdev_priv(netdev);
306
307 mlx5e_ethtool_get_pauseparam(priv, pauseparam);
308}
309
310static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
311 struct ethtool_pauseparam *pauseparam)
312{
313 struct mlx5e_priv *priv = netdev_priv(netdev);
314
315 return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
316}
317
318static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
319 struct ethtool_link_ksettings *link_ksettings)
320{
321 struct mlx5e_priv *priv = netdev_priv(netdev);
322
323 return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
324}
325
326static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
327 const struct ethtool_link_ksettings *link_ksettings)
328{
329 struct mlx5e_priv *priv = netdev_priv(netdev);
330
331 return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
332}
333
9b81d5a9 334static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
55808762
JK
335 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
336 ETHTOOL_COALESCE_MAX_FRAMES |
337 ETHTOOL_COALESCE_USE_ADAPTIVE,
ff9b85de
OG
338 .get_drvinfo = mlx5e_rep_get_drvinfo,
339 .get_link = ethtool_op_get_link,
340 .get_strings = mlx5e_rep_get_strings,
341 .get_sset_count = mlx5e_rep_get_sset_count,
342 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
343 .get_ringparam = mlx5e_rep_get_ringparam,
344 .set_ringparam = mlx5e_rep_set_ringparam,
345 .get_channels = mlx5e_rep_get_channels,
346 .set_channels = mlx5e_rep_set_channels,
347 .get_coalesce = mlx5e_rep_get_coalesce,
348 .set_coalesce = mlx5e_rep_set_coalesce,
349 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
350 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
351};
352
353static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
55808762
JK
354 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
355 ETHTOOL_COALESCE_MAX_FRAMES |
356 ETHTOOL_COALESCE_USE_ADAPTIVE,
cf83c8fd 357 .get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
cb67b832
HHZ
358 .get_link = ethtool_op_get_link,
359 .get_strings = mlx5e_rep_get_strings,
360 .get_sset_count = mlx5e_rep_get_sset_count,
361 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
f128f138
GT
362 .get_ringparam = mlx5e_rep_get_ringparam,
363 .set_ringparam = mlx5e_rep_set_ringparam,
84a09733
GT
364 .get_channels = mlx5e_rep_get_channels,
365 .set_channels = mlx5e_rep_set_channels,
ff9b85de
OG
366 .get_coalesce = mlx5e_rep_get_coalesce,
367 .set_coalesce = mlx5e_rep_set_coalesce,
368 .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
369 .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
84a09733
GT
370 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
371 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
01013ad3
VB
372 .get_rxfh = mlx5e_get_rxfh,
373 .set_rxfh = mlx5e_set_rxfh,
b63293e7
VB
374 .get_rxnfc = mlx5e_get_rxnfc,
375 .set_rxnfc = mlx5e_set_rxnfc,
ff9b85de
OG
376 .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
377 .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
cb67b832
HHZ
378};
379
724ee179
PP
380static void mlx5e_rep_get_port_parent_id(struct net_device *dev,
381 struct netdev_phys_item_id *ppid)
cb67b832 382{
7ff40a46
PB
383 struct mlx5e_priv *priv;
384 u64 parent_id;
385
386 priv = netdev_priv(dev);
cb67b832 387
7ff40a46
PB
388 parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
389 ppid->id_len = sizeof(parent_id);
390 memcpy(ppid->id, &parent_id, sizeof(parent_id));
cb67b832
HHZ
391}
392
f7a68945
MB
393static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
394 struct mlx5_eswitch_rep *rep)
395{
2c47bf80 396 struct mlx5e_rep_sq *rep_sq, *tmp;
5ed99fb4 397 struct mlx5e_rep_priv *rpriv;
f7a68945 398
f6455de0 399 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
f7a68945
MB
400 return;
401
5ed99fb4 402 rpriv = mlx5e_rep_to_rep_priv(rep);
2c47bf80
MB
403 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
404 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
405 list_del(&rep_sq->list);
406 kfree(rep_sq);
f7a68945
MB
407 }
408}
409
410static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
411 struct mlx5_eswitch_rep *rep,
5ecadff0 412 u32 *sqns_array, int sqns_num)
f7a68945
MB
413{
414 struct mlx5_flow_handle *flow_rule;
5ed99fb4 415 struct mlx5e_rep_priv *rpriv;
2c47bf80 416 struct mlx5e_rep_sq *rep_sq;
f7a68945
MB
417 int err;
418 int i;
419
f6455de0 420 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
f7a68945
MB
421 return 0;
422
5ed99fb4 423 rpriv = mlx5e_rep_to_rep_priv(rep);
f7a68945 424 for (i = 0; i < sqns_num; i++) {
2c47bf80
MB
425 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
426 if (!rep_sq) {
f7a68945
MB
427 err = -ENOMEM;
428 goto out_err;
429 }
430
431 /* Add re-inject rule to the PF/representor sqs */
432 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
433 rep->vport,
434 sqns_array[i]);
435 if (IS_ERR(flow_rule)) {
436 err = PTR_ERR(flow_rule);
2c47bf80 437 kfree(rep_sq);
f7a68945
MB
438 goto out_err;
439 }
2c47bf80
MB
440 rep_sq->send_to_vport_rule = flow_rule;
441 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
f7a68945
MB
442 }
443 return 0;
444
445out_err:
446 mlx5e_sqs2vport_stop(esw, rep);
447 return err;
448}
449
cb67b832 450int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
cb67b832
HHZ
451{
452 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
453 struct mlx5e_rep_priv *rpriv = priv->ppriv;
454 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 455 struct mlx5e_channel *c;
9008ae07
SM
456 int n, tc, num_sqs = 0;
457 int err = -ENOMEM;
5ecadff0 458 u32 *sqs;
cb67b832 459
5ecadff0 460 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
cb67b832 461 if (!sqs)
9008ae07 462 goto out;
cb67b832 463
ff9c852f
SM
464 for (n = 0; n < priv->channels.num; n++) {
465 c = priv->channels.c[n];
cb67b832
HHZ
466 for (tc = 0; tc < c->num_tc; tc++)
467 sqs[num_sqs++] = c->sq[tc].sqn;
468 }
469
f7a68945 470 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
cb67b832 471 kfree(sqs);
9008ae07
SM
472
473out:
474 if (err)
475 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
cb67b832
HHZ
476 return err;
477}
478
cb67b832
HHZ
479void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
480{
481 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
482 struct mlx5e_rep_priv *rpriv = priv->ppriv;
483 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 484
f7a68945 485 mlx5e_sqs2vport_stop(esw, rep);
cb67b832
HHZ
486}
487
5cc3a8c6
SM
488static unsigned long mlx5e_rep_ipv6_interval(void)
489{
490 if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl)
491 return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME);
492
493 return ~0UL;
494}
495
f6dfb4c3
HHZ
496static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
497{
5cc3a8c6
SM
498 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
499 unsigned long ipv6_interval = mlx5e_rep_ipv6_interval();
5ed99fb4 500 struct net_device *netdev = rpriv->netdev;
f6dfb4c3
HHZ
501 struct mlx5e_priv *priv = netdev_priv(netdev);
502
503 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
504 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
505}
506
507void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
508{
509 struct mlx5e_rep_priv *rpriv = priv->ppriv;
510 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
511
512 mlx5_fc_queue_stats_work(priv->mdev,
513 &neigh_update->neigh_stats_work,
514 neigh_update->min_interval);
515}
516
61081f9c
VB
517static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
518{
519 return refcount_inc_not_zero(&nhe->refcnt);
520}
521
522static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
523
524static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
525{
526 if (refcount_dec_and_test(&nhe->refcnt)) {
527 mlx5e_rep_neigh_entry_remove(nhe);
1216ce9d 528 kfree_rcu(nhe, rcu);
61081f9c
VB
529 }
530}
531
1216ce9d
VB
532static struct mlx5e_neigh_hash_entry *
533mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
534 struct mlx5e_neigh_hash_entry *nhe)
535{
536 struct mlx5e_neigh_hash_entry *next = NULL;
537
538 rcu_read_lock();
539
540 for (next = nhe ?
541 list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
542 &nhe->neigh_list,
543 struct mlx5e_neigh_hash_entry,
544 neigh_list) :
545 list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
546 struct mlx5e_neigh_hash_entry,
547 neigh_list);
548 next;
549 next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
550 &next->neigh_list,
551 struct mlx5e_neigh_hash_entry,
552 neigh_list))
553 if (mlx5e_rep_neigh_entry_hold(next))
554 break;
555
556 rcu_read_unlock();
557
558 if (nhe)
559 mlx5e_rep_neigh_entry_release(nhe);
560
561 return next;
562}
563
f6dfb4c3
HHZ
564static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
565{
566 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
567 neigh_update.neigh_stats_work.work);
5ed99fb4 568 struct net_device *netdev = rpriv->netdev;
f6dfb4c3 569 struct mlx5e_priv *priv = netdev_priv(netdev);
1216ce9d 570 struct mlx5e_neigh_hash_entry *nhe = NULL;
f6dfb4c3
HHZ
571
572 rtnl_lock();
573 if (!list_empty(&rpriv->neigh_update.neigh_list))
574 mlx5e_rep_queue_neigh_stats_work(priv);
575
1216ce9d
VB
576 while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
577 mlx5e_tc_update_neigh_used_value(nhe);
f6dfb4c3
HHZ
578
579 rtnl_unlock();
580}
581
232c0013
HHZ
582static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
583 struct mlx5e_encap_entry *e,
584 bool neigh_connected,
585 unsigned char ha[ETH_ALEN])
586{
587 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
2a1f1768
VB
588 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
589 bool encap_connected;
590 LIST_HEAD(flow_list);
232c0013
HHZ
591
592 ASSERT_RTNL();
593
2a1f1768
VB
594 /* wait for encap to be fully initialized */
595 wait_for_completion(&e->res_ready);
596
597 mutex_lock(&esw->offloads.encap_tbl_lock);
598 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
64d7b685
VB
599 if (e->compl_result < 0 || (encap_connected == neigh_connected &&
600 ether_addr_equal(e->h_dest, ha)))
2a1f1768
VB
601 goto unlock;
602
603 mlx5e_take_all_encap_flows(e, &flow_list);
604
61c806da
OG
605 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
606 (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
2a1f1768 607 mlx5e_tc_encap_flows_del(priv, e, &flow_list);
232c0013
HHZ
608
609 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
610 ether_addr_copy(e->h_dest, ha);
611 ether_addr_copy(eth->h_dest, ha);
6707f74b
TZ
612 /* Update the encap source mac, in case that we delete
613 * the flows when encap source mac changed.
614 */
615 ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
232c0013 616
2a1f1768 617 mlx5e_tc_encap_flows_add(priv, e, &flow_list);
232c0013 618 }
2a1f1768
VB
619unlock:
620 mutex_unlock(&esw->offloads.encap_tbl_lock);
621 mlx5e_put_encap_flow_list(priv, &flow_list);
232c0013
HHZ
622}
623
624static void mlx5e_rep_neigh_update(struct work_struct *work)
625{
626 struct mlx5e_neigh_hash_entry *nhe =
627 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
628 struct neighbour *n = nhe->n;
629 struct mlx5e_encap_entry *e;
630 unsigned char ha[ETH_ALEN];
631 struct mlx5e_priv *priv;
632 bool neigh_connected;
232c0013
HHZ
633 u8 nud_state, dead;
634
635 rtnl_lock();
636
637 /* If these parameters are changed after we release the lock,
638 * we'll receive another event letting us know about it.
639 * We use this lock to avoid inconsistency between the neigh validity
640 * and it's hw address.
641 */
642 read_lock_bh(&n->lock);
643 memcpy(ha, n->ha, ETH_ALEN);
644 nud_state = n->nud_state;
645 dead = n->dead;
646 read_unlock_bh(&n->lock);
647
648 neigh_connected = (nud_state & NUD_VALID) && !dead;
649
5970882a
VB
650 trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
651
232c0013 652 list_for_each_entry(e, &nhe->encap_list, encap_list) {
948993f2
VB
653 if (!mlx5e_encap_take(e))
654 continue;
655
232c0013 656 priv = netdev_priv(e->out_dev);
2a1f1768 657 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
948993f2 658 mlx5e_encap_put(priv, e);
232c0013
HHZ
659 }
660 mlx5e_rep_neigh_entry_release(nhe);
661 rtnl_unlock();
662 neigh_release(n);
663}
664
f5bc2c5d
OS
665static struct mlx5e_rep_indr_block_priv *
666mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
667 struct net_device *netdev)
668{
669 struct mlx5e_rep_indr_block_priv *cb_priv;
670
671 /* All callback list access should be protected by RTNL. */
672 ASSERT_RTNL();
673
674 list_for_each_entry(cb_priv,
675 &rpriv->uplink_priv.tc_indr_block_priv_list,
676 list)
677 if (cb_priv->netdev == netdev)
678 return cb_priv;
679
680 return NULL;
681}
682
683static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
684{
685 struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
686 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
687
688 list_for_each_entry_safe(cb_priv, temp, head, list) {
25f2d0e7 689 mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
f5bc2c5d
OS
690 kfree(cb_priv);
691 }
692}
693
694static int
695mlx5e_rep_indr_offload(struct net_device *netdev,
f9e30088 696 struct flow_cls_offload *flower,
5a37a8df 697 struct mlx5e_rep_indr_block_priv *indr_priv,
698 unsigned long flags)
f5bc2c5d 699{
ef381359 700 struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
d9ee0491 701 int err = 0;
ef381359
OS
702
703 switch (flower->command) {
f9e30088 704 case FLOW_CLS_REPLACE:
d9ee0491 705 err = mlx5e_configure_flower(netdev, priv, flower, flags);
ef381359 706 break;
f9e30088 707 case FLOW_CLS_DESTROY:
d9ee0491 708 err = mlx5e_delete_flower(netdev, priv, flower, flags);
ef381359 709 break;
f9e30088 710 case FLOW_CLS_STATS:
d9ee0491 711 err = mlx5e_stats_flower(netdev, priv, flower, flags);
ef381359
OS
712 break;
713 default:
714 err = -EOPNOTSUPP;
715 }
716
717 return err;
f5bc2c5d
OS
718}
719
5a37a8df 720static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
721 void *type_data, void *indr_priv)
f5bc2c5d 722{
5a37a8df 723 unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
f5bc2c5d
OS
724 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
725
726 switch (type) {
727 case TC_SETUP_CLSFLOWER:
5a37a8df 728 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
729 flags);
f5bc2c5d
OS
730 default:
731 return -EOPNOTSUPP;
732 }
733}
734
07c264ab 735static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
736 void *type_data, void *indr_priv)
737{
738 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
739 struct flow_cls_offload *f = type_data;
740 struct flow_cls_offload tmp;
741 struct mlx5e_priv *mpriv;
742 struct mlx5_eswitch *esw;
743 unsigned long flags;
744 int err;
745
746 mpriv = netdev_priv(priv->rpriv->netdev);
747 esw = mpriv->mdev->priv.eswitch;
748
749 flags = MLX5_TC_FLAG(EGRESS) |
750 MLX5_TC_FLAG(ESW_OFFLOAD) |
751 MLX5_TC_FLAG(FT_OFFLOAD);
752
753 switch (type) {
754 case TC_SETUP_CLSFLOWER:
755 memcpy(&tmp, f, sizeof(*f));
756
757 /* Re-use tc offload path by moving the ft flow to the
758 * reserved ft chain.
759 *
760 * FT offload can use prio range [0, INT_MAX], so we normalize
761 * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
762 * as with tc, where prio 0 isn't supported.
763 *
764 * We only support chain 0 of FT offload.
765 */
766 if (!mlx5_esw_chains_prios_supported(esw) ||
767 tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) ||
768 tmp.common.chain_index)
769 return -EOPNOTSUPP;
770
771 tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
772 tmp.common.prio++;
773 err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
774 memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
775 return err;
776 default:
777 return -EOPNOTSUPP;
778 }
779}
780
5a37a8df 781static void mlx5e_rep_indr_block_unbind(void *cb_priv)
955bcb6e
PNA
782{
783 struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
784
785 list_del(&indr_priv->list);
786 kfree(indr_priv);
787}
788
789static LIST_HEAD(mlx5e_block_cb_list);
790
f5bc2c5d 791static int
5a37a8df 792mlx5e_rep_indr_setup_block(struct net_device *netdev,
793 struct mlx5e_rep_priv *rpriv,
794 struct flow_block_offload *f,
795 flow_setup_cb_t *setup_cb)
f5bc2c5d
OS
796{
797 struct mlx5e_rep_indr_block_priv *indr_priv;
955bcb6e 798 struct flow_block_cb *block_cb;
f5bc2c5d 799
32f8c409 800 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
f5bc2c5d
OS
801 return -EOPNOTSUPP;
802
c9f14470 803 f->unlocked_driver_cb = true;
955bcb6e
PNA
804 f->driver_block_list = &mlx5e_block_cb_list;
805
f5bc2c5d 806 switch (f->command) {
9c0e189e 807 case FLOW_BLOCK_BIND:
f5bc2c5d
OS
808 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
809 if (indr_priv)
810 return -EEXIST;
811
812 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
813 if (!indr_priv)
814 return -ENOMEM;
815
816 indr_priv->netdev = netdev;
817 indr_priv->rpriv = rpriv;
818 list_add(&indr_priv->list,
819 &rpriv->uplink_priv.tc_indr_block_priv_list);
820
5a37a8df 821 block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv,
822 mlx5e_rep_indr_block_unbind);
955bcb6e 823 if (IS_ERR(block_cb)) {
f5bc2c5d
OS
824 list_del(&indr_priv->list);
825 kfree(indr_priv);
955bcb6e 826 return PTR_ERR(block_cb);
f5bc2c5d 827 }
955bcb6e
PNA
828 flow_block_cb_add(block_cb, f);
829 list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
f5bc2c5d 830
955bcb6e 831 return 0;
9c0e189e 832 case FLOW_BLOCK_UNBIND:
25f2d0e7
EB
833 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
834 if (!indr_priv)
835 return -ENOENT;
836
5a37a8df 837 block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv);
955bcb6e
PNA
838 if (!block_cb)
839 return -ENOENT;
f5bc2c5d 840
955bcb6e
PNA
841 flow_block_cb_remove(block_cb, f);
842 list_del(&block_cb->driver_list);
f5bc2c5d
OS
843 return 0;
844 default:
845 return -EOPNOTSUPP;
846 }
847 return 0;
848}
849
850static
5a37a8df 851int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
852 enum tc_setup_type type, void *type_data)
f5bc2c5d
OS
853{
854 switch (type) {
855 case TC_SETUP_BLOCK:
5a37a8df 856 return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
857 mlx5e_rep_indr_setup_tc_cb);
07c264ab 858 case TC_SETUP_FT:
859 return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
860 mlx5e_rep_indr_setup_ft_cb);
f5bc2c5d
OS
861 default:
862 return -EOPNOTSUPP;
863 }
864}
865
866static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
867 struct net_device *netdev)
868{
869 int err;
870
4e481908 871 err = __flow_indr_block_cb_register(netdev, rpriv,
5a37a8df 872 mlx5e_rep_indr_setup_cb,
4e481908 873 rpriv);
f5bc2c5d
OS
874 if (err) {
875 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
876
877 mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
878 netdev_name(netdev), err);
879 }
880 return err;
881}
882
25f2d0e7
EB
883static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
884 struct net_device *netdev)
f5bc2c5d 885{
5a37a8df 886 __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_cb,
4e481908 887 rpriv);
f5bc2c5d
OS
888}
889
890static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
891 unsigned long event, void *ptr)
892{
893 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
894 uplink_priv.netdevice_nb);
895 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
896 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
897
35a605db 898 if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
24bcd210 899 !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
f5bc2c5d
OS
900 return NOTIFY_OK;
901
902 switch (event) {
903 case NETDEV_REGISTER:
904 mlx5e_rep_indr_register_block(rpriv, netdev);
905 break;
906 case NETDEV_UNREGISTER:
25f2d0e7 907 mlx5e_rep_indr_unregister_block(rpriv, netdev);
f5bc2c5d
OS
908 break;
909 }
910 return NOTIFY_OK;
911}
912
93415e45
VB
913static void
914mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
915 struct mlx5e_neigh_hash_entry *nhe,
916 struct neighbour *n)
917{
918 /* Take a reference to ensure the neighbour and mlx5 encap
919 * entry won't be destructed until we drop the reference in
920 * delayed work.
921 */
922 neigh_hold(n);
923
924 /* This assignment is valid as long as the the neigh reference
925 * is taken
926 */
927 nhe->n = n;
928
929 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
930 mlx5e_rep_neigh_entry_release(nhe);
931 neigh_release(n);
932 }
933}
934
232c0013
HHZ
935static struct mlx5e_neigh_hash_entry *
936mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
937 struct mlx5e_neigh *m_neigh);
938
939static int mlx5e_rep_netevent_event(struct notifier_block *nb,
940 unsigned long event, void *ptr)
941{
942 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
943 neigh_update.netevent_nb);
944 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
5ed99fb4 945 struct net_device *netdev = rpriv->netdev;
232c0013
HHZ
946 struct mlx5e_priv *priv = netdev_priv(netdev);
947 struct mlx5e_neigh_hash_entry *nhe = NULL;
948 struct mlx5e_neigh m_neigh = {};
a2fa1fe5 949 struct neigh_parms *p;
232c0013 950 struct neighbour *n;
a2fa1fe5 951 bool found = false;
232c0013
HHZ
952
953 switch (event) {
954 case NETEVENT_NEIGH_UPDATE:
955 n = ptr;
956#if IS_ENABLED(CONFIG_IPV6)
5cc3a8c6 957 if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
232c0013
HHZ
958#else
959 if (n->tbl != &arp_tbl)
960#endif
961 return NOTIFY_DONE;
962
963 m_neigh.dev = n->dev;
f6dfb4c3 964 m_neigh.family = n->ops->family;
232c0013
HHZ
965 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
966
1216ce9d 967 rcu_read_lock();
232c0013 968 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
1216ce9d 969 rcu_read_unlock();
61081f9c 970 if (!nhe)
232c0013 971 return NOTIFY_DONE;
232c0013 972
93415e45 973 mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
232c0013 974 break;
a2fa1fe5
HHZ
975
976 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
977 p = ptr;
978
979 /* We check the device is present since we don't care about
980 * changes in the default table, we only care about changes
981 * done per device delay prob time parameter.
982 */
983#if IS_ENABLED(CONFIG_IPV6)
5cc3a8c6 984 if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
a2fa1fe5
HHZ
985#else
986 if (!p->dev || p->tbl != &arp_tbl)
987#endif
988 return NOTIFY_DONE;
989
1216ce9d
VB
990 rcu_read_lock();
991 list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
992 neigh_list) {
a2fa1fe5
HHZ
993 if (p->dev == nhe->m_neigh.dev) {
994 found = true;
995 break;
996 }
997 }
1216ce9d 998 rcu_read_unlock();
a2fa1fe5
HHZ
999 if (!found)
1000 return NOTIFY_DONE;
1001
1002 neigh_update->min_interval = min_t(unsigned long,
1003 NEIGH_VAR(p, DELAY_PROBE_TIME),
1004 neigh_update->min_interval);
1005 mlx5_fc_update_sampling_interval(priv->mdev,
1006 neigh_update->min_interval);
1007 break;
232c0013
HHZ
1008 }
1009 return NOTIFY_DONE;
1010}
1011
37b498ff
HHZ
1012static const struct rhashtable_params mlx5e_neigh_ht_params = {
1013 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
1014 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
1015 .key_len = sizeof(struct mlx5e_neigh),
1016 .automatic_shrinking = true,
1017};
1018
1019static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
1020{
1021 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
232c0013
HHZ
1022 int err;
1023
1024 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
1025 if (err)
1026 return err;
37b498ff
HHZ
1027
1028 INIT_LIST_HEAD(&neigh_update->neigh_list);
70e83bd3 1029 mutex_init(&neigh_update->encap_lock);
f6dfb4c3
HHZ
1030 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
1031 mlx5e_rep_neigh_stats_work);
1032 mlx5e_rep_neigh_update_init_interval(rpriv);
232c0013
HHZ
1033
1034 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
1035 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
1036 if (err)
1037 goto out_err;
1038 return 0;
1039
1040out_err:
1041 rhashtable_destroy(&neigh_update->neigh_ht);
1042 return err;
37b498ff
HHZ
1043}
1044
1045static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
1046{
1047 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
5ed99fb4 1048 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
232c0013
HHZ
1049
1050 unregister_netevent_notifier(&neigh_update->netevent_nb);
1051
1052 flush_workqueue(priv->wq); /* flush neigh update works */
37b498ff 1053
f6dfb4c3
HHZ
1054 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
1055
70e83bd3 1056 mutex_destroy(&neigh_update->encap_lock);
37b498ff
HHZ
1057 rhashtable_destroy(&neigh_update->neigh_ht);
1058}
1059
1060static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
1061 struct mlx5e_neigh_hash_entry *nhe)
1062{
1063 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1064 int err;
1065
1066 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
1067 &nhe->rhash_node,
1068 mlx5e_neigh_ht_params);
1069 if (err)
1070 return err;
1071
1216ce9d 1072 list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
37b498ff
HHZ
1073
1074 return err;
1075}
1076
61081f9c 1077static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
37b498ff 1078{
61081f9c 1079 struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
37b498ff 1080
70e83bd3 1081 mutex_lock(&rpriv->neigh_update.encap_lock);
232c0013 1082
1216ce9d 1083 list_del_rcu(&nhe->neigh_list);
37b498ff
HHZ
1084
1085 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
1086 &nhe->rhash_node,
1087 mlx5e_neigh_ht_params);
70e83bd3 1088 mutex_unlock(&rpriv->neigh_update.encap_lock);
37b498ff
HHZ
1089}
1090
70e83bd3
VB
1091/* This function must only be called under the representor's encap_lock or
1092 * inside rcu read lock section.
232c0013 1093 */
37b498ff
HHZ
1094static struct mlx5e_neigh_hash_entry *
1095mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
1096 struct mlx5e_neigh *m_neigh)
1097{
1098 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1099 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
61081f9c 1100 struct mlx5e_neigh_hash_entry *nhe;
37b498ff 1101
61081f9c
VB
1102 nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
1103 mlx5e_neigh_ht_params);
1104 return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
37b498ff
HHZ
1105}
1106
232c0013
HHZ
1107static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
1108 struct mlx5e_encap_entry *e,
1109 struct mlx5e_neigh_hash_entry **nhe)
1110{
1111 int err;
1112
1113 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
1114 if (!*nhe)
1115 return -ENOMEM;
1116
61081f9c 1117 (*nhe)->priv = priv;
232c0013
HHZ
1118 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
1119 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
ac0d9176 1120 spin_lock_init(&(*nhe)->encap_list_lock);
232c0013
HHZ
1121 INIT_LIST_HEAD(&(*nhe)->encap_list);
1122 refcount_set(&(*nhe)->refcnt, 1);
1123
1124 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
1125 if (err)
1126 goto out_free;
1127 return 0;
1128
1129out_free:
1130 kfree(*nhe);
1131 return err;
1132}
1133
232c0013
HHZ
1134int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
1135 struct mlx5e_encap_entry *e)
1136{
97417f61
EB
1137 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1138 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1139 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
232c0013
HHZ
1140 struct mlx5e_neigh_hash_entry *nhe;
1141 int err;
1142
97417f61
EB
1143 err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
1144 if (err)
1145 return err;
70e83bd3
VB
1146
1147 mutex_lock(&rpriv->neigh_update.encap_lock);
232c0013
HHZ
1148 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1149 if (!nhe) {
1150 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
97417f61 1151 if (err) {
70e83bd3 1152 mutex_unlock(&rpriv->neigh_update.encap_lock);
97417f61
EB
1153 mlx5_tun_entropy_refcount_dec(tun_entropy,
1154 e->reformat_type);
232c0013 1155 return err;
97417f61 1156 }
232c0013 1157 }
70e83bd3 1158
61081f9c 1159 e->nhe = nhe;
ac0d9176
VB
1160 spin_lock(&nhe->encap_list_lock);
1161 list_add_rcu(&e->encap_list, &nhe->encap_list);
1162 spin_unlock(&nhe->encap_list_lock);
1163
70e83bd3
VB
1164 mutex_unlock(&rpriv->neigh_update.encap_lock);
1165
232c0013
HHZ
1166 return 0;
1167}
1168
1169void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
1170 struct mlx5e_encap_entry *e)
1171{
97417f61
EB
1172 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1173 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1174 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
61081f9c
VB
1175
1176 if (!e->nhe)
1177 return;
232c0013 1178
ac0d9176
VB
1179 spin_lock(&e->nhe->encap_list_lock);
1180 list_del_rcu(&e->encap_list);
1181 spin_unlock(&e->nhe->encap_list_lock);
232c0013 1182
61081f9c
VB
1183 mlx5e_rep_neigh_entry_release(e->nhe);
1184 e->nhe = NULL;
97417f61 1185 mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
232c0013
HHZ
1186}
1187
9b81d5a9 1188static int mlx5e_rep_open(struct net_device *dev)
20a1ea67
OG
1189{
1190 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
1191 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1192 struct mlx5_eswitch_rep *rep = rpriv->rep;
20a1ea67
OG
1193 int err;
1194
63bfd399
EBE
1195 mutex_lock(&priv->state_lock);
1196 err = mlx5e_open_locked(dev);
20a1ea67 1197 if (err)
63bfd399 1198 goto unlock;
20a1ea67 1199
84c9c8f2 1200 if (!mlx5_modify_vport_admin_state(priv->mdev,
cc9c82a8 1201 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
cbc44e76
BW
1202 rep->vport, 1,
1203 MLX5_VPORT_ADMIN_STATE_UP))
20a1ea67
OG
1204 netif_carrier_on(dev);
1205
63bfd399
EBE
1206unlock:
1207 mutex_unlock(&priv->state_lock);
1208 return err;
20a1ea67
OG
1209}
1210
9b81d5a9 1211static int mlx5e_rep_close(struct net_device *dev)
20a1ea67
OG
1212{
1213 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
1214 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1215 struct mlx5_eswitch_rep *rep = rpriv->rep;
63bfd399 1216 int ret;
20a1ea67 1217
63bfd399 1218 mutex_lock(&priv->state_lock);
84c9c8f2 1219 mlx5_modify_vport_admin_state(priv->mdev,
cc9c82a8 1220 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
cbc44e76
BW
1221 rep->vport, 1,
1222 MLX5_VPORT_ADMIN_STATE_DOWN);
63bfd399
EBE
1223 ret = mlx5e_close_locked(dev);
1224 mutex_unlock(&priv->state_lock);
1225 return ret;
20a1ea67
OG
1226}
1227
de4784ca 1228static int
855afa09 1229mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
f9e30088 1230 struct flow_cls_offload *cls_flower, int flags)
d957b4e3 1231{
8c818c27 1232 switch (cls_flower->command) {
f9e30088 1233 case FLOW_CLS_REPLACE:
71d82d2a
OS
1234 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
1235 flags);
f9e30088 1236 case FLOW_CLS_DESTROY:
71d82d2a
OS
1237 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
1238 flags);
f9e30088 1239 case FLOW_CLS_STATS:
71d82d2a
OS
1240 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
1241 flags);
60bd4af8
OG
1242 default:
1243 return -EOPNOTSUPP;
1244 }
1245}
1246
fcb64c0f
EC
1247static
1248int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
1249 struct tc_cls_matchall_offload *ma)
1250{
1251 switch (ma->command) {
1252 case TC_CLSMATCHALL_REPLACE:
1253 return mlx5e_tc_configure_matchall(priv, ma);
1254 case TC_CLSMATCHALL_DESTROY:
1255 return mlx5e_tc_delete_matchall(priv, ma);
1256 case TC_CLSMATCHALL_STATS:
1257 mlx5e_tc_stats_matchall(priv, ma);
1258 return 0;
1259 default:
1260 return -EOPNOTSUPP;
1261 }
1262}
1263
855afa09
JP
1264static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1265 void *cb_priv)
1266{
226f2ca3 1267 unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
855afa09
JP
1268 struct mlx5e_priv *priv = cb_priv;
1269
1270 switch (type) {
1271 case TC_SETUP_CLSFLOWER:
226f2ca3 1272 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
fcb64c0f
EC
1273 case TC_SETUP_CLSMATCHALL:
1274 return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
855afa09
JP
1275 default:
1276 return -EOPNOTSUPP;
1277 }
1278}
1279
84179981
PB
1280static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
1281 void *cb_priv)
1282{
82270e12 1283 struct flow_cls_offload tmp, *f = type_data;
84179981
PB
1284 struct mlx5e_priv *priv = cb_priv;
1285 struct mlx5_eswitch *esw;
1286 unsigned long flags;
1287 int err;
1288
1289 flags = MLX5_TC_FLAG(INGRESS) |
1290 MLX5_TC_FLAG(ESW_OFFLOAD) |
1291 MLX5_TC_FLAG(FT_OFFLOAD);
1292 esw = priv->mdev->priv.eswitch;
3929502b 1293
84179981
PB
1294 switch (type) {
1295 case TC_SETUP_CLSFLOWER:
82270e12
PB
1296 memcpy(&tmp, f, sizeof(*f));
1297
046826c8 1298 if (!mlx5_esw_chains_prios_supported(esw))
84179981
PB
1299 return -EOPNOTSUPP;
1300
1301 /* Re-use tc offload path by moving the ft flow to the
1302 * reserved ft chain.
82270e12 1303 *
39ac237c
PB
1304 * FT offload can use prio range [0, INT_MAX], so we normalize
1305 * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
82270e12
PB
1306 * as with tc, where prio 0 isn't supported.
1307 *
1308 * We only support chain 0 of FT offload.
84179981 1309 */
39ac237c 1310 if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
82270e12
PB
1311 return -EOPNOTSUPP;
1312 if (tmp.common.chain_index != 0)
1313 return -EOPNOTSUPP;
1314
39ac237c 1315 tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
82270e12
PB
1316 tmp.common.prio++;
1317 err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
1318 memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
84179981
PB
1319 return err;
1320 default:
1321 return -EOPNOTSUPP;
1322 }
1323}
1324
1325static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
1326static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
8c818c27 1327static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 1328 void *type_data)
8c818c27 1329{
4e95bc26 1330 struct mlx5e_priv *priv = netdev_priv(dev);
c9f14470 1331 struct flow_block_offload *f = type_data;
4e95bc26 1332
84179981
PB
1333 f->unlocked_driver_cb = true;
1334
2572ac53 1335 switch (type) {
855afa09 1336 case TC_SETUP_BLOCK:
3929502b 1337 return flow_block_cb_setup_simple(type_data,
84179981 1338 &mlx5e_rep_block_tc_cb_list,
4e95bc26
PNA
1339 mlx5e_rep_setup_tc_cb,
1340 priv, priv, true);
84179981
PB
1341 case TC_SETUP_FT:
1342 return flow_block_cb_setup_simple(type_data,
1343 &mlx5e_rep_block_ft_cb_list,
1344 mlx5e_rep_setup_ft_cb,
1345 priv, priv, true);
d957b4e3
OG
1346 default:
1347 return -EOPNOTSUPP;
1348 }
1349}
1350
370bad0f
OG
1351bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1352{
1d447a39
SM
1353 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1354 struct mlx5_eswitch_rep *rep;
1355
733d3e54 1356 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1d447a39 1357 return false;
370bad0f 1358
d9ee0491
OG
1359 if (!rpriv) /* non vport rep mlx5e instances don't use this field */
1360 return false;
370bad0f 1361
d9ee0491 1362 rep = rpriv->rep;
b05af6aa 1363 return (rep->vport == MLX5_VPORT_UPLINK);
370bad0f
OG
1364}
1365
13e509a4 1366static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
370bad0f 1367{
370bad0f
OG
1368 switch (attr_id) {
1369 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
370bad0f
OG
1370 return true;
1371 }
1372
1373 return false;
1374}
1375
1376static int
1377mlx5e_get_sw_stats64(const struct net_device *dev,
1378 struct rtnl_link_stats64 *stats)
1379{
1380 struct mlx5e_priv *priv = netdev_priv(dev);
370bad0f 1381
b832d4fd 1382 mlx5e_fold_sw_stats64(priv, stats);
370bad0f
OG
1383 return 0;
1384}
1385
13e509a4
OG
1386static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
1387 void *sp)
370bad0f
OG
1388{
1389 switch (attr_id) {
1390 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1391 return mlx5e_get_sw_stats64(dev, sp);
1392 }
1393
1394 return -EINVAL;
1395}
1396
bc1f4470 1397static void
9b81d5a9 1398mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
370bad0f
OG
1399{
1400 struct mlx5e_priv *priv = netdev_priv(dev);
1401
ed56c519 1402 /* update HW stats in background for next time */
cdeef2b1 1403 mlx5e_queue_update_stats(priv);
370bad0f 1404 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
370bad0f
OG
1405}
1406
9b81d5a9 1407static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
d9ee0491
OG
1408{
1409 return mlx5e_change_mtu(netdev, new_mtu, NULL);
1410}
1411
b36cdb42 1412static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
d9ee0491 1413{
b9ab5d0e 1414 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
d9ee0491
OG
1415}
1416
b36cdb42 1417static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
d9ee0491 1418{
b36cdb42
OG
1419 struct sockaddr *saddr = addr;
1420
1421 if (!is_valid_ether_addr(saddr->sa_data))
1422 return -EADDRNOTAVAIL;
1423
1424 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1425 return 0;
d9ee0491
OG
1426}
1427
6ce966fd
OG
1428static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1429 __be16 vlan_proto)
1430{
1431 netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1432
1433 if (vlan != 0)
1434 return -EOPNOTSUPP;
1435
1436 /* allow setting 0-vid for compatibility with libvirt */
1437 return 0;
1438}
1439
ab8f963a 1440static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
f60f315d
PP
1441{
1442 struct mlx5e_priv *priv = netdev_priv(dev);
1443 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1444
1445 return &rpriv->dl_port;
1446}
1447
9b81d5a9
VP
1448static const struct net_device_ops mlx5e_netdev_ops_rep = {
1449 .ndo_open = mlx5e_rep_open,
1450 .ndo_stop = mlx5e_rep_close,
d9ee0491 1451 .ndo_start_xmit = mlx5e_xmit,
d9ee0491 1452 .ndo_setup_tc = mlx5e_rep_setup_tc,
ab8f963a 1453 .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
9b81d5a9 1454 .ndo_get_stats64 = mlx5e_rep_get_stats,
13e509a4
OG
1455 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1456 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
9b81d5a9 1457 .ndo_change_mtu = mlx5e_rep_change_mtu,
d9ee0491 1458};
250a42b6 1459
d9ee0491 1460static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
b36cdb42 1461 .ndo_open = mlx5e_open,
d9ee0491 1462 .ndo_stop = mlx5e_close,
cb67b832 1463 .ndo_start_xmit = mlx5e_xmit,
b36cdb42 1464 .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
8c818c27 1465 .ndo_setup_tc = mlx5e_rep_setup_tc,
ab8f963a 1466 .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
d9ee0491 1467 .ndo_get_stats64 = mlx5e_get_stats,
13e509a4
OG
1468 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1469 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
d9ee0491 1470 .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
073caf50
OG
1471 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
1472 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
1473 .ndo_features_check = mlx5e_features_check,
1474 .ndo_set_vf_mac = mlx5e_set_vf_mac,
1475 .ndo_set_vf_rate = mlx5e_set_vf_rate,
1476 .ndo_get_vf_config = mlx5e_get_vf_config,
1477 .ndo_get_vf_stats = mlx5e_get_vf_stats,
6ce966fd 1478 .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
d3cbd425 1479 .ndo_set_features = mlx5e_set_features,
cb67b832
HHZ
1480};
1481
ffec9702
TZ
1482bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
1483{
1484 return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
1485}
1486
a0646c88
EB
1487bool mlx5e_eswitch_rep(struct net_device *netdev)
1488{
9b81d5a9 1489 if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
a0646c88
EB
1490 netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
1491 return true;
1492
1493 return false;
1494}
1495
025380b2 1496static void mlx5e_build_rep_params(struct net_device *netdev)
cb67b832 1497{
025380b2 1498 struct mlx5e_priv *priv = netdev_priv(netdev);
d9ee0491
OG
1499 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1500 struct mlx5_eswitch_rep *rep = rpriv->rep;
025380b2
OG
1501 struct mlx5_core_dev *mdev = priv->mdev;
1502 struct mlx5e_params *params;
1503
cb67b832
HHZ
1504 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1505 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1506 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1507
025380b2 1508 params = &priv->channels.params;
472a1e44 1509 params->hard_mtu = MLX5E_ETH_HARD_MTU;
025380b2 1510 params->sw_mtu = netdev->mtu;
d9ee0491
OG
1511
1512 /* SQ */
b05af6aa 1513 if (rep->vport == MLX5_VPORT_UPLINK)
d9ee0491
OG
1514 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1515 else
5d1f7354 1516 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
cb67b832 1517
749359f4
GT
1518 /* RQ */
1519 mlx5e_build_rq_params(mdev, params);
1520
1521 /* CQ moderation params */
9a317425 1522 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
6a9764ef 1523 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
cb67b832 1524
6a9764ef 1525 params->num_tc = 1;
69dad68d 1526 params->tunneled_offload_en = false;
5f195c2c
CM
1527
1528 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
84a09733
GT
1529
1530 /* RSS */
025380b2 1531 mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
cb67b832
HHZ
1532}
1533
1534static void mlx5e_build_rep_netdev(struct net_device *netdev)
1535{
250a42b6 1536 struct mlx5e_priv *priv = netdev_priv(netdev);
d9ee0491
OG
1537 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1538 struct mlx5_eswitch_rep *rep = rpriv->rep;
250a42b6 1539 struct mlx5_core_dev *mdev = priv->mdev;
250a42b6 1540
b05af6aa 1541 if (rep->vport == MLX5_VPORT_UPLINK) {
c42260f1 1542 SET_NETDEV_DEV(netdev, mdev->device);
d9ee0491
OG
1543 netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
1544 /* we want a persistent mac for the uplink rep */
e1d974d0 1545 mlx5_query_mac_address(mdev, netdev->dev_addr);
ff9b85de 1546 netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
b36cdb42
OG
1547#ifdef CONFIG_MLX5_CORE_EN_DCB
1548 if (MLX5_CAP_GEN(mdev, qos))
1549 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1550#endif
d9ee0491 1551 } else {
9b81d5a9 1552 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
d9ee0491 1553 eth_hw_addr_random(netdev);
9b81d5a9 1554 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
d9ee0491 1555 }
cb67b832
HHZ
1556
1557 netdev->watchdog_timeo = 15 * HZ;
1558
d3cbd425 1559 netdev->features |= NETIF_F_NETNS_LOCAL;
cb67b832 1560
d3cbd425 1561 netdev->hw_features |= NETIF_F_HW_TC;
dabeb3b0
GT
1562 netdev->hw_features |= NETIF_F_SG;
1563 netdev->hw_features |= NETIF_F_IP_CSUM;
1564 netdev->hw_features |= NETIF_F_IPV6_CSUM;
1565 netdev->hw_features |= NETIF_F_GRO;
1566 netdev->hw_features |= NETIF_F_TSO;
1567 netdev->hw_features |= NETIF_F_TSO6;
1568 netdev->hw_features |= NETIF_F_RXCSUM;
1569
d3cbd425
CM
1570 if (rep->vport == MLX5_VPORT_UPLINK)
1571 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1572 else
1ee4457c
OG
1573 netdev->features |= NETIF_F_VLAN_CHALLENGED;
1574
dabeb3b0 1575 netdev->features |= netdev->hw_features;
cb67b832
HHZ
1576}
1577
182570b2
FD
1578static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1579 struct net_device *netdev,
1580 const struct mlx5e_profile *profile,
1581 void *ppriv)
cb67b832 1582{
6a9764ef 1583 struct mlx5e_priv *priv = netdev_priv(netdev);
182570b2 1584 int err;
6a9764ef 1585
519a0bf5 1586 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
182570b2
FD
1587 if (err)
1588 return err;
6a9764ef 1589
8956f001 1590 priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
c139dbfd 1591
025380b2 1592 mlx5e_build_rep_params(netdev);
cb67b832 1593 mlx5e_build_rep_netdev(netdev);
237f258c
FD
1594
1595 mlx5e_timestamp_init(priv);
182570b2
FD
1596
1597 return 0;
1598}
1599
1600static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1601{
1602 mlx5e_netdev_cleanup(priv->netdev, priv);
cb67b832
HHZ
1603}
1604
84a09733
GT
1605static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1606{
20f7b37f
SM
1607 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1608 struct mlx5_eswitch_rep *rep = rpriv->rep;
84a09733
GT
1609 struct ttc_params ttc_params = {};
1610 int tt, err;
1611
1612 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1613 MLX5_FLOW_NAMESPACE_KERNEL);
1614
1615 /* The inner_ttc in the ttc params is intentionally not set */
1616 ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1617 mlx5e_set_ttc_ft_params(&ttc_params);
20f7b37f
SM
1618
1619 if (rep->vport != MLX5_VPORT_UPLINK)
1620 /* To give uplik rep TTC a lower level for chaining from root ft */
1621 ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
1622
84a09733
GT
1623 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1624 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1625
1626 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1627 if (err) {
1628 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1629 return err;
1630 }
1631 return 0;
1632}
1633
20f7b37f
SM
1634static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
1635{
1636 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1637 struct mlx5_eswitch_rep *rep = rpriv->rep;
1638 struct mlx5_flow_table_attr ft_attr = {};
1639 struct mlx5_flow_namespace *ns;
1640 int err = 0;
1641
1642 if (rep->vport != MLX5_VPORT_UPLINK) {
1643 /* non uplik reps will skip any bypass tables and go directly to
1644 * their own ttc
1645 */
1646 rpriv->root_ft = priv->fs.ttc.ft.t;
1647 return 0;
1648 }
1649
1650 /* uplink root ft will be used to auto chain, to ethtool or ttc tables */
1651 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1652 if (!ns) {
1653 netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
1654 return -EOPNOTSUPP;
1655 }
1656
1657 ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
c6fe5729 1658 ft_attr.prio = 1;
20f7b37f
SM
1659 ft_attr.level = 1;
1660
1661 rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
1662 if (IS_ERR(rpriv->root_ft)) {
1663 err = PTR_ERR(rpriv->root_ft);
1664 rpriv->root_ft = NULL;
1665 }
1666
1667 return err;
1668}
1669
1670static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
1671{
1672 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1673 struct mlx5_eswitch_rep *rep = rpriv->rep;
1674
1675 if (rep->vport != MLX5_VPORT_UPLINK)
1676 return;
1677 mlx5_destroy_flow_table(rpriv->root_ft);
1678}
1679
092297e0 1680static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
cb67b832
HHZ
1681{
1682 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
1683 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1684 struct mlx5_eswitch_rep *rep = rpriv->rep;
74491de9 1685 struct mlx5_flow_handle *flow_rule;
c966f7d5 1686 struct mlx5_flow_destination dest;
092297e0 1687
20f7b37f
SM
1688 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1689 dest.ft = rpriv->root_ft;
1690
1691 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
092297e0
GT
1692 if (IS_ERR(flow_rule))
1693 return PTR_ERR(flow_rule);
1694 rpriv->vport_rx_rule = flow_rule;
1695 return 0;
1696}
1697
1698static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1699{
1700 struct mlx5_core_dev *mdev = priv->mdev;
cb67b832 1701 int err;
cb67b832 1702
2c3b5bee
SM
1703 mlx5e_init_l2_addr(priv);
1704
1462e48d
RD
1705 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1706 if (err) {
1707 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1708 return err;
1709 }
1710
84a09733 1711 err = mlx5e_create_indirect_rqt(priv);
8f493ffd 1712 if (err)
1462e48d 1713 goto err_close_drop_rq;
cb67b832 1714
db05815b 1715 err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
84a09733
GT
1716 if (err)
1717 goto err_destroy_indirect_rqts;
1718
1719 err = mlx5e_create_indirect_tirs(priv, false);
8f493ffd 1720 if (err)
cb67b832 1721 goto err_destroy_direct_rqts;
cb67b832 1722
db05815b 1723 err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
84a09733
GT
1724 if (err)
1725 goto err_destroy_indirect_tirs;
1726
1727 err = mlx5e_create_rep_ttc_table(priv);
092297e0 1728 if (err)
cb67b832 1729 goto err_destroy_direct_tirs;
cb67b832 1730
20f7b37f 1731 err = mlx5e_create_rep_root_ft(priv);
84a09733
GT
1732 if (err)
1733 goto err_destroy_ttc_table;
1734
20f7b37f
SM
1735 err = mlx5e_create_rep_vport_rx_rule(priv);
1736 if (err)
1737 goto err_destroy_root_ft;
1738
6783e8b2
VB
1739 mlx5e_ethtool_init_steering(priv);
1740
cb67b832
HHZ
1741 return 0;
1742
20f7b37f
SM
1743err_destroy_root_ft:
1744 mlx5e_destroy_rep_root_ft(priv);
84a09733
GT
1745err_destroy_ttc_table:
1746 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
cb67b832 1747err_destroy_direct_tirs:
db05815b 1748 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
84a09733
GT
1749err_destroy_indirect_tirs:
1750 mlx5e_destroy_indirect_tirs(priv, false);
cb67b832 1751err_destroy_direct_rqts:
db05815b 1752 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
84a09733
GT
1753err_destroy_indirect_rqts:
1754 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1462e48d
RD
1755err_close_drop_rq:
1756 mlx5e_close_drop_rq(&priv->drop_rq);
cb67b832
HHZ
1757 return err;
1758}
1759
1760static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1761{
1d447a39 1762 struct mlx5e_rep_priv *rpriv = priv->ppriv;
cb67b832 1763
5ed99fb4 1764 mlx5_del_flow_rules(rpriv->vport_rx_rule);
20f7b37f 1765 mlx5e_destroy_rep_root_ft(priv);
84a09733 1766 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
db05815b 1767 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
84a09733 1768 mlx5e_destroy_indirect_tirs(priv, false);
db05815b 1769 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
84a09733 1770 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1462e48d 1771 mlx5e_close_drop_rq(&priv->drop_rq);
cb67b832
HHZ
1772}
1773
8520fa57
VB
1774static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
1775{
8520fa57 1776 mlx5e_create_q_counters(priv);
67b38de6 1777 return mlx5e_init_rep_rx(priv);
8520fa57
VB
1778}
1779
1780static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
1781{
8520fa57 1782 mlx5e_cleanup_rep_rx(priv);
67b38de6 1783 mlx5e_destroy_q_counters(priv);
8520fa57
VB
1784}
1785
29b598dd
RD
1786static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
1787{
1788 struct mlx5_rep_uplink_priv *uplink_priv;
1789 struct net_device *netdev;
1790 struct mlx5e_priv *priv;
1791 int err;
1792
1793 netdev = rpriv->netdev;
1794 priv = netdev_priv(netdev);
1795 uplink_priv = &rpriv->uplink_priv;
1796
1797 mutex_init(&uplink_priv->unready_flows_lock);
1798 INIT_LIST_HEAD(&uplink_priv->unready_flows);
1799
1800 /* init shared tc flow table */
1801 err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
1802 if (err)
1803 return err;
1804
1805 mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
1806
1807 /* init indirect block notifications */
1808 INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
1809 uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
d48834f9
JP
1810 err = register_netdevice_notifier_dev_net(rpriv->netdev,
1811 &uplink_priv->netdevice_nb,
1812 &uplink_priv->netdevice_nn);
29b598dd
RD
1813 if (err) {
1814 mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
1815 goto tc_esw_cleanup;
1816 }
1817
1818 return 0;
1819
1820tc_esw_cleanup:
1821 mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
1822 return err;
1823}
1824
cb67b832
HHZ
1825static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1826{
d9ee0491 1827 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3c145626 1828 int err;
cb67b832
HHZ
1829
1830 err = mlx5e_create_tises(priv);
1831 if (err) {
1832 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1833 return err;
1834 }
d9ee0491 1835
b05af6aa 1836 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
29b598dd 1837 err = mlx5e_init_uplink_rep_tx(rpriv);
d9ee0491
OG
1838 if (err)
1839 goto destroy_tises;
d9ee0491
OG
1840 }
1841
cb67b832 1842 return 0;
d9ee0491 1843
d9ee0491 1844destroy_tises:
3c145626 1845 mlx5e_destroy_tises(priv);
d9ee0491
OG
1846 return err;
1847}
1848
29b598dd
RD
1849static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
1850{
d48834f9
JP
1851 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1852
29b598dd 1853 /* clean indirect TC block notifications */
d48834f9
JP
1854 unregister_netdevice_notifier_dev_net(rpriv->netdev,
1855 &uplink_priv->netdevice_nb,
1856 &uplink_priv->netdevice_nn);
29b598dd
RD
1857 mlx5e_rep_indr_clean_block_privs(rpriv);
1858
1859 /* delete shared tc flow table */
1860 mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
1861 mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
1862}
1863
d9ee0491
OG
1864static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1865{
1866 struct mlx5e_rep_priv *rpriv = priv->ppriv;
d9ee0491 1867
3c145626 1868 mlx5e_destroy_tises(priv);
d9ee0491 1869
29b598dd
RD
1870 if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
1871 mlx5e_cleanup_uplink_rep_tx(rpriv);
cb67b832
HHZ
1872}
1873
9b81d5a9 1874static void mlx5e_rep_enable(struct mlx5e_priv *priv)
b36cdb42 1875{
6d7ee2ed 1876 mlx5e_set_netdev_mtu_boundaries(priv);
b36cdb42
OG
1877}
1878
a90f88fe
GT
1879static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
1880{
1881 return 0;
1882}
1883
b36cdb42
OG
1884static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1885{
1886 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
b36cdb42 1887
b4a23329
RD
1888 if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1889 struct mlx5_eqe *eqe = data;
b36cdb42 1890
b4a23329
RD
1891 switch (eqe->sub_type) {
1892 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1893 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1894 queue_work(priv->wq, &priv->update_carrier_work);
1895 break;
1896 default:
1897 return NOTIFY_DONE;
1898 }
1899
1900 return NOTIFY_OK;
b36cdb42
OG
1901 }
1902
b4a23329
RD
1903 if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
1904 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1905
1906 queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
1907
1908 return NOTIFY_OK;
1909 }
1910
1911 return NOTIFY_DONE;
b36cdb42
OG
1912}
1913
1914static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1915{
1916 struct net_device *netdev = priv->netdev;
1917 struct mlx5_core_dev *mdev = priv->mdev;
b4a23329 1918 struct mlx5e_rep_priv *rpriv = priv->ppriv;
b36cdb42
OG
1919 u16 max_mtu;
1920
1921 netdev->min_mtu = ETH_MIN_MTU;
1922 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1923 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1924 mlx5e_set_dev_port_mtu(priv);
1925
b4a23329
RD
1926 INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
1927 mlx5e_tc_reoffload_flows_work);
1928
b36cdb42
OG
1929 mlx5_lag_add(mdev, netdev);
1930 priv->events_nb.notifier_call = uplink_rep_async_event;
1931 mlx5_notifier_register(mdev, &priv->events_nb);
1932#ifdef CONFIG_MLX5_CORE_EN_DCB
1933 mlx5e_dcbnl_initialize(priv);
1934 mlx5e_dcbnl_init_app(priv);
1935#endif
1936}
1937
1938static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1939{
1940 struct mlx5_core_dev *mdev = priv->mdev;
b4a23329 1941 struct mlx5e_rep_priv *rpriv = priv->ppriv;
b36cdb42
OG
1942
1943#ifdef CONFIG_MLX5_CORE_EN_DCB
1944 mlx5e_dcbnl_delete_app(priv);
1945#endif
1946 mlx5_notifier_unregister(mdev, &priv->events_nb);
b4a23329 1947 cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
e387f7d5 1948 mlx5_lag_remove(mdev);
b36cdb42
OG
1949}
1950
8a236b15
VB
1951static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
1952static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
1953
1954/* The stats groups order is opposite to the update_stats() order calls */
1955static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
1956 &MLX5E_STATS_GRP(sw_rep),
1957 &MLX5E_STATS_GRP(vport_rep),
1958};
1959
1960static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
1961{
1962 return ARRAY_SIZE(mlx5e_rep_stats_grps);
1963}
1964
1965/* The stats groups order is opposite to the update_stats() order calls */
1966static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
7c453526
VB
1967 &MLX5E_STATS_GRP(sw),
1968 &MLX5E_STATS_GRP(qcnt),
1969 &MLX5E_STATS_GRP(vnic_env),
1970 &MLX5E_STATS_GRP(vport),
1971 &MLX5E_STATS_GRP(802_3),
1972 &MLX5E_STATS_GRP(2863),
1973 &MLX5E_STATS_GRP(2819),
1974 &MLX5E_STATS_GRP(phy),
1975 &MLX5E_STATS_GRP(eth_ext),
1976 &MLX5E_STATS_GRP(pcie),
1977 &MLX5E_STATS_GRP(per_prio),
1978 &MLX5E_STATS_GRP(pme),
1979 &MLX5E_STATS_GRP(channels),
1980 &MLX5E_STATS_GRP(per_port_buff_congest),
8a236b15
VB
1981};
1982
1983static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
1984{
1985 return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
1986}
1987
9b81d5a9 1988static const struct mlx5e_profile mlx5e_rep_profile = {
cb67b832 1989 .init = mlx5e_init_rep,
182570b2 1990 .cleanup = mlx5e_cleanup_rep,
cb67b832
HHZ
1991 .init_rx = mlx5e_init_rep_rx,
1992 .cleanup_rx = mlx5e_cleanup_rep_rx,
1993 .init_tx = mlx5e_init_rep_tx,
d9ee0491 1994 .cleanup_tx = mlx5e_cleanup_rep_tx,
9b81d5a9 1995 .enable = mlx5e_rep_enable,
a90f88fe 1996 .update_rx = mlx5e_update_rep_rx,
8a236b15 1997 .update_stats = mlx5e_update_ndo_stats,
20fd0c19 1998 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
dfd9e750 1999 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
cb67b832 2000 .max_tc = 1,
694826e3 2001 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
8a236b15
VB
2002 .stats_grps = mlx5e_rep_stats_grps,
2003 .stats_grps_num = mlx5e_rep_stats_grps_num,
cb67b832
HHZ
2004};
2005
b36cdb42
OG
2006static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
2007 .init = mlx5e_init_rep,
2008 .cleanup = mlx5e_cleanup_rep,
8520fa57
VB
2009 .init_rx = mlx5e_init_ul_rep_rx,
2010 .cleanup_rx = mlx5e_cleanup_ul_rep_rx,
b36cdb42
OG
2011 .init_tx = mlx5e_init_rep_tx,
2012 .cleanup_tx = mlx5e_cleanup_rep_tx,
2013 .enable = mlx5e_uplink_rep_enable,
2014 .disable = mlx5e_uplink_rep_disable,
a90f88fe 2015 .update_rx = mlx5e_update_rep_rx,
8a236b15 2016 .update_stats = mlx5e_update_ndo_stats,
b36cdb42
OG
2017 .update_carrier = mlx5e_update_carrier,
2018 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
dfd9e750 2019 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
b36cdb42 2020 .max_tc = MLX5E_MAX_NUM_TC,
694826e3 2021 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
8a236b15
VB
2022 .stats_grps = mlx5e_ul_rep_stats_grps,
2023 .stats_grps_num = mlx5e_ul_rep_stats_grps_num,
b36cdb42
OG
2024};
2025
f60f315d
PP
2026static bool
2027is_devlink_port_supported(const struct mlx5_core_dev *dev,
2028 const struct mlx5e_rep_priv *rpriv)
2029{
2030 return rpriv->rep->vport == MLX5_VPORT_UPLINK ||
2031 rpriv->rep->vport == MLX5_VPORT_PF ||
2032 mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
2033}
2034
c938451f
PP
2035static unsigned int
2036vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num)
2037{
2038 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
2039}
2040
f60f315d
PP
2041static int register_devlink_port(struct mlx5_core_dev *dev,
2042 struct mlx5e_rep_priv *rpriv)
2043{
2044 struct devlink *devlink = priv_to_devlink(dev);
2045 struct mlx5_eswitch_rep *rep = rpriv->rep;
2046 struct netdev_phys_item_id ppid = {};
c938451f 2047 unsigned int dl_port_index = 0;
7482d9cb 2048 u16 pfnum;
f60f315d
PP
2049
2050 if (!is_devlink_port_supported(dev, rpriv))
2051 return 0;
2052
724ee179 2053 mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
7482d9cb 2054 pfnum = PCI_FUNC(dev->pdev->devfn);
f60f315d 2055
c938451f 2056 if (rep->vport == MLX5_VPORT_UPLINK) {
f60f315d
PP
2057 devlink_port_attrs_set(&rpriv->dl_port,
2058 DEVLINK_PORT_FLAVOUR_PHYSICAL,
7482d9cb 2059 pfnum, false, 0,
f60f315d 2060 &ppid.id[0], ppid.id_len);
c938451f
PP
2061 dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
2062 } else if (rep->vport == MLX5_VPORT_PF) {
f60f315d
PP
2063 devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
2064 &ppid.id[0], ppid.id_len,
7482d9cb 2065 pfnum);
c938451f
PP
2066 dl_port_index = rep->vport;
2067 } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
2068 rpriv->rep->vport)) {
f60f315d
PP
2069 devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
2070 &ppid.id[0], ppid.id_len,
7482d9cb 2071 pfnum, rep->vport - 1);
c938451f
PP
2072 dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
2073 }
f60f315d 2074
c938451f 2075 return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
f60f315d
PP
2076}
2077
2078static void unregister_devlink_port(struct mlx5_core_dev *dev,
2079 struct mlx5e_rep_priv *rpriv)
2080{
2081 if (is_devlink_port_supported(dev, rpriv))
2082 devlink_port_unregister(&rpriv->dl_port);
2083}
2084
1d447a39 2085/* e-Switch vport representors */
1d447a39 2086static int
4c66df01 2087mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1d447a39 2088{
b36cdb42 2089 const struct mlx5e_profile *profile;
1d447a39 2090 struct mlx5e_rep_priv *rpriv;
26e59d80 2091 struct net_device *netdev;
779d986d 2092 int nch, err;
26e59d80 2093
1d447a39
SM
2094 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
2095 if (!rpriv)
2096 return -ENOMEM;
2097
d9ee0491
OG
2098 /* rpriv->rep to be looked up when profile->init() is called */
2099 rpriv->rep = rep;
2100
779d986d 2101 nch = mlx5e_get_max_num_channels(dev);
9b81d5a9
VP
2102 profile = (rep->vport == MLX5_VPORT_UPLINK) ?
2103 &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
b36cdb42 2104 netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
26e59d80 2105 if (!netdev) {
237ac8de
RD
2106 mlx5_core_warn(dev,
2107 "Failed to create representor netdev for vport %d\n",
2108 rep->vport);
1d447a39 2109 kfree(rpriv);
cb67b832
HHZ
2110 return -EINVAL;
2111 }
26e59d80 2112
71c6eaeb 2113 dev_net_set(netdev, mlx5_core_net(dev));
5ed99fb4 2114 rpriv->netdev = netdev;
8693115a 2115 rep->rep_data[REP_ETH].priv = rpriv;
5ed99fb4 2116 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
26e59d80 2117
b05af6aa 2118 if (rep->vport == MLX5_VPORT_UPLINK) {
aec002f6
OG
2119 err = mlx5e_create_mdev_resources(dev);
2120 if (err)
2121 goto err_destroy_netdev;
2122 }
2123
2c3b5bee 2124 err = mlx5e_attach_netdev(netdev_priv(netdev));
26e59d80 2125 if (err) {
237ac8de
RD
2126 netdev_warn(netdev,
2127 "Failed to attach representor netdev for vport %d\n",
2128 rep->vport);
aec002f6 2129 goto err_destroy_mdev_resources;
26e59d80
MHY
2130 }
2131
37b498ff
HHZ
2132 err = mlx5e_rep_neigh_init(rpriv);
2133 if (err) {
237ac8de
RD
2134 netdev_warn(netdev,
2135 "Failed to initialized neighbours handling for vport %d\n",
2136 rep->vport);
37b498ff
HHZ
2137 goto err_detach_netdev;
2138 }
2139
f60f315d
PP
2140 err = register_devlink_port(dev, rpriv);
2141 if (err) {
237ac8de
RD
2142 netdev_warn(netdev, "Failed to register devlink port %d\n",
2143 rep->vport);
f60f315d
PP
2144 goto err_neigh_cleanup;
2145 }
2146
26e59d80
MHY
2147 err = register_netdev(netdev);
2148 if (err) {
237ac8de
RD
2149 netdev_warn(netdev,
2150 "Failed to register representor netdev for vport %d\n",
2151 rep->vport);
f60f315d 2152 goto err_devlink_cleanup;
26e59d80
MHY
2153 }
2154
f60f315d
PP
2155 if (is_devlink_port_supported(dev, rpriv))
2156 devlink_port_type_eth_set(&rpriv->dl_port, netdev);
cb67b832 2157 return 0;
26e59d80 2158
f60f315d
PP
2159err_devlink_cleanup:
2160 unregister_devlink_port(dev, rpriv);
2161
37b498ff
HHZ
2162err_neigh_cleanup:
2163 mlx5e_rep_neigh_cleanup(rpriv);
2164
26e59d80 2165err_detach_netdev:
2c3b5bee 2166 mlx5e_detach_netdev(netdev_priv(netdev));
26e59d80 2167
aec002f6 2168err_destroy_mdev_resources:
b05af6aa 2169 if (rep->vport == MLX5_VPORT_UPLINK)
aec002f6
OG
2170 mlx5e_destroy_mdev_resources(dev);
2171
26e59d80 2172err_destroy_netdev:
2c3b5bee 2173 mlx5e_destroy_netdev(netdev_priv(netdev));
1d447a39 2174 kfree(rpriv);
26e59d80 2175 return err;
cb67b832
HHZ
2176}
2177
1d447a39 2178static void
4c66df01 2179mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
cb67b832 2180{
5ed99fb4
MB
2181 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
2182 struct net_device *netdev = rpriv->netdev;
1d447a39 2183 struct mlx5e_priv *priv = netdev_priv(netdev);
f60f315d 2184 struct mlx5_core_dev *dev = priv->mdev;
1d447a39 2185 void *ppriv = priv->ppriv;
cb67b832 2186
f60f315d
PP
2187 if (is_devlink_port_supported(dev, rpriv))
2188 devlink_port_type_clear(&rpriv->dl_port);
5ed99fb4 2189 unregister_netdev(netdev);
f60f315d 2190 unregister_devlink_port(dev, rpriv);
37b498ff 2191 mlx5e_rep_neigh_cleanup(rpriv);
1d447a39 2192 mlx5e_detach_netdev(priv);
b05af6aa 2193 if (rep->vport == MLX5_VPORT_UPLINK)
aec002f6 2194 mlx5e_destroy_mdev_resources(priv->mdev);
1d447a39
SM
2195 mlx5e_destroy_netdev(priv);
2196 kfree(ppriv); /* mlx5e_rep_priv */
2197}
2198
22215908
MB
2199static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
2200{
2201 struct mlx5e_rep_priv *rpriv;
2202
2203 rpriv = mlx5e_rep_to_rep_priv(rep);
2204
2205 return rpriv->netdev;
2206}
2207
8693115a
PP
2208static const struct mlx5_eswitch_rep_ops rep_ops = {
2209 .load = mlx5e_vport_rep_load,
2210 .unload = mlx5e_vport_rep_unload,
2211 .get_proto_dev = mlx5e_vport_rep_get_proto_dev
2212};
2213
aec002f6 2214void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
1d447a39 2215{
aec002f6 2216 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1d447a39 2217
8693115a 2218 mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1d447a39
SM
2219}
2220
aec002f6 2221void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
1d447a39 2222{
1d447a39 2223 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1d447a39 2224
f8e8fa02 2225 mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1d447a39 2226}