]>
Commit | Line | Data |
---|---|---|
cb67b832 HHZ |
1 | /* |
2 | * Copyright (c) 2016, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <generated/utsrelease.h> | |
34 | #include <linux/mlx5/fs.h> | |
35 | #include <net/switchdev.h> | |
d957b4e3 | 36 | #include <net/pkt_cls.h> |
232c0013 HHZ |
37 | #include <net/netevent.h> |
38 | #include <net/arp.h> | |
cb67b832 HHZ |
39 | |
40 | #include "eswitch.h" | |
41 | #include "en.h" | |
1d447a39 | 42 | #include "en_rep.h" |
adb4c123 | 43 | #include "en_tc.h" |
f6dfb4c3 | 44 | #include "fs_core.h" |
cb67b832 HHZ |
45 | |
46 | static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; | |
47 | ||
48 | static void mlx5e_rep_get_drvinfo(struct net_device *dev, | |
49 | struct ethtool_drvinfo *drvinfo) | |
50 | { | |
51 | strlcpy(drvinfo->driver, mlx5e_rep_driver_name, | |
52 | sizeof(drvinfo->driver)); | |
53 | strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); | |
54 | } | |
55 | ||
56 | static const struct counter_desc sw_rep_stats_desc[] = { | |
57 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, | |
58 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, | |
59 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, | |
60 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, | |
61 | }; | |
62 | ||
63 | #define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) | |
64 | ||
65 | static void mlx5e_rep_get_strings(struct net_device *dev, | |
66 | u32 stringset, uint8_t *data) | |
67 | { | |
68 | int i; | |
69 | ||
70 | switch (stringset) { | |
71 | case ETH_SS_STATS: | |
72 | for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) | |
73 | strcpy(data + (i * ETH_GSTRING_LEN), | |
74 | sw_rep_stats_desc[i].format); | |
75 | break; | |
76 | } | |
77 | } | |
78 | ||
370bad0f OG |
79 | static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) |
80 | { | |
81 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | |
1d447a39 SM |
82 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
83 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
370bad0f OG |
84 | struct rtnl_link_stats64 *vport_stats; |
85 | struct ifla_vf_stats vf_stats; | |
86 | int err; | |
87 | ||
88 | err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats); | |
89 | if (err) { | |
90 | pr_warn("vport %d error %d reading stats\n", rep->vport, err); | |
91 | return; | |
92 | } | |
93 | ||
94 | vport_stats = &priv->stats.vf_vport; | |
95 | /* flip tx/rx as we are reporting the counters for the switch vport */ | |
96 | vport_stats->rx_packets = vf_stats.tx_packets; | |
97 | vport_stats->rx_bytes = vf_stats.tx_bytes; | |
98 | vport_stats->tx_packets = vf_stats.rx_packets; | |
99 | vport_stats->tx_bytes = vf_stats.rx_bytes; | |
100 | } | |
101 | ||
102 | static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) | |
cb67b832 HHZ |
103 | { |
104 | struct mlx5e_sw_stats *s = &priv->stats.sw; | |
105 | struct mlx5e_rq_stats *rq_stats; | |
106 | struct mlx5e_sq_stats *sq_stats; | |
107 | int i, j; | |
108 | ||
109 | memset(s, 0, sizeof(*s)); | |
ff9c852f SM |
110 | for (i = 0; i < priv->channels.num; i++) { |
111 | struct mlx5e_channel *c = priv->channels.c[i]; | |
112 | ||
113 | rq_stats = &c->rq.stats; | |
cb67b832 HHZ |
114 | |
115 | s->rx_packets += rq_stats->packets; | |
116 | s->rx_bytes += rq_stats->bytes; | |
117 | ||
6a9764ef | 118 | for (j = 0; j < priv->channels.params.num_tc; j++) { |
ff9c852f | 119 | sq_stats = &c->sq[j].stats; |
cb67b832 HHZ |
120 | |
121 | s->tx_packets += sq_stats->packets; | |
122 | s->tx_bytes += sq_stats->bytes; | |
123 | } | |
124 | } | |
125 | } | |
126 | ||
370bad0f OG |
127 | static void mlx5e_rep_update_stats(struct mlx5e_priv *priv) |
128 | { | |
129 | mlx5e_rep_update_sw_counters(priv); | |
130 | mlx5e_rep_update_hw_counters(priv); | |
131 | } | |
132 | ||
cb67b832 HHZ |
133 | static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, |
134 | struct ethtool_stats *stats, u64 *data) | |
135 | { | |
136 | struct mlx5e_priv *priv = netdev_priv(dev); | |
137 | int i; | |
138 | ||
139 | if (!data) | |
140 | return; | |
141 | ||
142 | mutex_lock(&priv->state_lock); | |
143 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) | |
370bad0f | 144 | mlx5e_rep_update_sw_counters(priv); |
cb67b832 HHZ |
145 | mutex_unlock(&priv->state_lock); |
146 | ||
147 | for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) | |
148 | data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, | |
149 | sw_rep_stats_desc, i); | |
150 | } | |
151 | ||
152 | static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) | |
153 | { | |
154 | switch (sset) { | |
155 | case ETH_SS_STATS: | |
156 | return NUM_VPORT_REP_COUNTERS; | |
157 | default: | |
158 | return -EOPNOTSUPP; | |
159 | } | |
160 | } | |
161 | ||
162 | static const struct ethtool_ops mlx5e_rep_ethtool_ops = { | |
163 | .get_drvinfo = mlx5e_rep_get_drvinfo, | |
164 | .get_link = ethtool_op_get_link, | |
165 | .get_strings = mlx5e_rep_get_strings, | |
166 | .get_sset_count = mlx5e_rep_get_sset_count, | |
167 | .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, | |
168 | }; | |
169 | ||
170 | int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) | |
171 | { | |
172 | struct mlx5e_priv *priv = netdev_priv(dev); | |
1d447a39 SM |
173 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
174 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
cb67b832 | 175 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
cb67b832 HHZ |
176 | |
177 | if (esw->mode == SRIOV_NONE) | |
178 | return -EOPNOTSUPP; | |
179 | ||
180 | switch (attr->id) { | |
181 | case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: | |
cb67b832 | 182 | attr->u.ppid.id_len = ETH_ALEN; |
dbe413e3 | 183 | ether_addr_copy(attr->u.ppid.id, rep->hw_id); |
cb67b832 HHZ |
184 | break; |
185 | default: | |
186 | return -EOPNOTSUPP; | |
187 | } | |
188 | ||
189 | return 0; | |
190 | } | |
191 | ||
192 | int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) | |
cb67b832 HHZ |
193 | { |
194 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | |
1d447a39 SM |
195 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
196 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
cb67b832 | 197 | struct mlx5e_channel *c; |
9008ae07 SM |
198 | int n, tc, num_sqs = 0; |
199 | int err = -ENOMEM; | |
cb67b832 HHZ |
200 | u16 *sqs; |
201 | ||
6a9764ef | 202 | sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL); |
cb67b832 | 203 | if (!sqs) |
9008ae07 | 204 | goto out; |
cb67b832 | 205 | |
ff9c852f SM |
206 | for (n = 0; n < priv->channels.num; n++) { |
207 | c = priv->channels.c[n]; | |
cb67b832 HHZ |
208 | for (tc = 0; tc < c->num_tc; tc++) |
209 | sqs[num_sqs++] = c->sq[tc].sqn; | |
210 | } | |
211 | ||
212 | err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs); | |
cb67b832 | 213 | kfree(sqs); |
9008ae07 SM |
214 | |
215 | out: | |
216 | if (err) | |
217 | netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err); | |
cb67b832 HHZ |
218 | return err; |
219 | } | |
220 | ||
cb67b832 HHZ |
221 | void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) |
222 | { | |
223 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | |
1d447a39 SM |
224 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
225 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
cb67b832 HHZ |
226 | |
227 | mlx5_eswitch_sqs2vport_stop(esw, rep); | |
228 | } | |
229 | ||
f6dfb4c3 HHZ |
230 | static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) |
231 | { | |
232 | #if IS_ENABLED(CONFIG_IPV6) | |
233 | unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms, | |
234 | DELAY_PROBE_TIME); | |
235 | #else | |
236 | unsigned long ipv6_interval = ~0UL; | |
237 | #endif | |
238 | unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, | |
239 | DELAY_PROBE_TIME); | |
240 | struct net_device *netdev = rpriv->rep->netdev; | |
241 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
242 | ||
243 | rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval); | |
244 | mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval); | |
245 | } | |
246 | ||
247 | void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv) | |
248 | { | |
249 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
250 | struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; | |
251 | ||
252 | mlx5_fc_queue_stats_work(priv->mdev, | |
253 | &neigh_update->neigh_stats_work, | |
254 | neigh_update->min_interval); | |
255 | } | |
256 | ||
257 | static void mlx5e_rep_neigh_stats_work(struct work_struct *work) | |
258 | { | |
259 | struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv, | |
260 | neigh_update.neigh_stats_work.work); | |
261 | struct net_device *netdev = rpriv->rep->netdev; | |
262 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
263 | struct mlx5e_neigh_hash_entry *nhe; | |
264 | ||
265 | rtnl_lock(); | |
266 | if (!list_empty(&rpriv->neigh_update.neigh_list)) | |
267 | mlx5e_rep_queue_neigh_stats_work(priv); | |
268 | ||
269 | list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list) | |
270 | mlx5e_tc_update_neigh_used_value(nhe); | |
271 | ||
272 | rtnl_unlock(); | |
273 | } | |
274 | ||
232c0013 HHZ |
275 | static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe) |
276 | { | |
277 | refcount_inc(&nhe->refcnt); | |
278 | } | |
279 | ||
280 | static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe) | |
281 | { | |
282 | if (refcount_dec_and_test(&nhe->refcnt)) | |
283 | kfree(nhe); | |
284 | } | |
285 | ||
286 | static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, | |
287 | struct mlx5e_encap_entry *e, | |
288 | bool neigh_connected, | |
289 | unsigned char ha[ETH_ALEN]) | |
290 | { | |
291 | struct ethhdr *eth = (struct ethhdr *)e->encap_header; | |
292 | ||
293 | ASSERT_RTNL(); | |
294 | ||
295 | if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) || | |
296 | !ether_addr_equal(e->h_dest, ha)) | |
297 | mlx5e_tc_encap_flows_del(priv, e); | |
298 | ||
299 | if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { | |
300 | ether_addr_copy(e->h_dest, ha); | |
301 | ether_addr_copy(eth->h_dest, ha); | |
302 | ||
303 | mlx5e_tc_encap_flows_add(priv, e); | |
304 | } | |
305 | } | |
306 | ||
307 | static void mlx5e_rep_neigh_update(struct work_struct *work) | |
308 | { | |
309 | struct mlx5e_neigh_hash_entry *nhe = | |
310 | container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work); | |
311 | struct neighbour *n = nhe->n; | |
312 | struct mlx5e_encap_entry *e; | |
313 | unsigned char ha[ETH_ALEN]; | |
314 | struct mlx5e_priv *priv; | |
315 | bool neigh_connected; | |
316 | bool encap_connected; | |
317 | u8 nud_state, dead; | |
318 | ||
319 | rtnl_lock(); | |
320 | ||
321 | /* If these parameters are changed after we release the lock, | |
322 | * we'll receive another event letting us know about it. | |
323 | * We use this lock to avoid inconsistency between the neigh validity | |
324 | * and it's hw address. | |
325 | */ | |
326 | read_lock_bh(&n->lock); | |
327 | memcpy(ha, n->ha, ETH_ALEN); | |
328 | nud_state = n->nud_state; | |
329 | dead = n->dead; | |
330 | read_unlock_bh(&n->lock); | |
331 | ||
332 | neigh_connected = (nud_state & NUD_VALID) && !dead; | |
333 | ||
334 | list_for_each_entry(e, &nhe->encap_list, encap_list) { | |
335 | encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID); | |
336 | priv = netdev_priv(e->out_dev); | |
337 | ||
338 | if (encap_connected != neigh_connected || | |
339 | !ether_addr_equal(e->h_dest, ha)) | |
340 | mlx5e_rep_update_flows(priv, e, neigh_connected, ha); | |
341 | } | |
342 | mlx5e_rep_neigh_entry_release(nhe); | |
343 | rtnl_unlock(); | |
344 | neigh_release(n); | |
345 | } | |
346 | ||
347 | static struct mlx5e_neigh_hash_entry * | |
348 | mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, | |
349 | struct mlx5e_neigh *m_neigh); | |
350 | ||
351 | static int mlx5e_rep_netevent_event(struct notifier_block *nb, | |
352 | unsigned long event, void *ptr) | |
353 | { | |
354 | struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv, | |
355 | neigh_update.netevent_nb); | |
356 | struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; | |
357 | struct net_device *netdev = rpriv->rep->netdev; | |
358 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
359 | struct mlx5e_neigh_hash_entry *nhe = NULL; | |
360 | struct mlx5e_neigh m_neigh = {}; | |
a2fa1fe5 | 361 | struct neigh_parms *p; |
232c0013 | 362 | struct neighbour *n; |
a2fa1fe5 | 363 | bool found = false; |
232c0013 HHZ |
364 | |
365 | switch (event) { | |
366 | case NETEVENT_NEIGH_UPDATE: | |
367 | n = ptr; | |
368 | #if IS_ENABLED(CONFIG_IPV6) | |
369 | if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) | |
370 | #else | |
371 | if (n->tbl != &arp_tbl) | |
372 | #endif | |
373 | return NOTIFY_DONE; | |
374 | ||
375 | m_neigh.dev = n->dev; | |
f6dfb4c3 | 376 | m_neigh.family = n->ops->family; |
232c0013 HHZ |
377 | memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len); |
378 | ||
379 | /* We are in atomic context and can't take RTNL mutex, so use | |
380 | * spin_lock_bh to lookup the neigh table. bh is used since | |
381 | * netevent can be called from a softirq context. | |
382 | */ | |
383 | spin_lock_bh(&neigh_update->encap_lock); | |
384 | nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh); | |
385 | if (!nhe) { | |
386 | spin_unlock_bh(&neigh_update->encap_lock); | |
387 | return NOTIFY_DONE; | |
388 | } | |
389 | ||
390 | /* This assignment is valid as long as the the neigh reference | |
391 | * is taken | |
392 | */ | |
393 | nhe->n = n; | |
394 | ||
395 | /* Take a reference to ensure the neighbour and mlx5 encap | |
396 | * entry won't be destructed until we drop the reference in | |
397 | * delayed work. | |
398 | */ | |
399 | neigh_hold(n); | |
400 | mlx5e_rep_neigh_entry_hold(nhe); | |
401 | ||
402 | if (!queue_work(priv->wq, &nhe->neigh_update_work)) { | |
403 | mlx5e_rep_neigh_entry_release(nhe); | |
404 | neigh_release(n); | |
405 | } | |
406 | spin_unlock_bh(&neigh_update->encap_lock); | |
407 | break; | |
a2fa1fe5 HHZ |
408 | |
409 | case NETEVENT_DELAY_PROBE_TIME_UPDATE: | |
410 | p = ptr; | |
411 | ||
412 | /* We check the device is present since we don't care about | |
413 | * changes in the default table, we only care about changes | |
414 | * done per device delay prob time parameter. | |
415 | */ | |
416 | #if IS_ENABLED(CONFIG_IPV6) | |
417 | if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl)) | |
418 | #else | |
419 | if (!p->dev || p->tbl != &arp_tbl) | |
420 | #endif | |
421 | return NOTIFY_DONE; | |
422 | ||
423 | /* We are in atomic context and can't take RTNL mutex, | |
424 | * so use spin_lock_bh to walk the neigh list and look for | |
425 | * the relevant device. bh is used since netevent can be | |
426 | * called from a softirq context. | |
427 | */ | |
428 | spin_lock_bh(&neigh_update->encap_lock); | |
429 | list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) { | |
430 | if (p->dev == nhe->m_neigh.dev) { | |
431 | found = true; | |
432 | break; | |
433 | } | |
434 | } | |
435 | spin_unlock_bh(&neigh_update->encap_lock); | |
436 | if (!found) | |
437 | return NOTIFY_DONE; | |
438 | ||
439 | neigh_update->min_interval = min_t(unsigned long, | |
440 | NEIGH_VAR(p, DELAY_PROBE_TIME), | |
441 | neigh_update->min_interval); | |
442 | mlx5_fc_update_sampling_interval(priv->mdev, | |
443 | neigh_update->min_interval); | |
444 | break; | |
232c0013 HHZ |
445 | } |
446 | return NOTIFY_DONE; | |
447 | } | |
448 | ||
37b498ff HHZ |
449 | static const struct rhashtable_params mlx5e_neigh_ht_params = { |
450 | .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node), | |
451 | .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh), | |
452 | .key_len = sizeof(struct mlx5e_neigh), | |
453 | .automatic_shrinking = true, | |
454 | }; | |
455 | ||
456 | static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv) | |
457 | { | |
458 | struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; | |
232c0013 HHZ |
459 | int err; |
460 | ||
461 | err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params); | |
462 | if (err) | |
463 | return err; | |
37b498ff HHZ |
464 | |
465 | INIT_LIST_HEAD(&neigh_update->neigh_list); | |
232c0013 | 466 | spin_lock_init(&neigh_update->encap_lock); |
f6dfb4c3 HHZ |
467 | INIT_DELAYED_WORK(&neigh_update->neigh_stats_work, |
468 | mlx5e_rep_neigh_stats_work); | |
469 | mlx5e_rep_neigh_update_init_interval(rpriv); | |
232c0013 HHZ |
470 | |
471 | rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event; | |
472 | err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb); | |
473 | if (err) | |
474 | goto out_err; | |
475 | return 0; | |
476 | ||
477 | out_err: | |
478 | rhashtable_destroy(&neigh_update->neigh_ht); | |
479 | return err; | |
37b498ff HHZ |
480 | } |
481 | ||
482 | static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) | |
483 | { | |
484 | struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; | |
232c0013 HHZ |
485 | struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev); |
486 | ||
487 | unregister_netevent_notifier(&neigh_update->netevent_nb); | |
488 | ||
489 | flush_workqueue(priv->wq); /* flush neigh update works */ | |
37b498ff | 490 | |
f6dfb4c3 HHZ |
491 | cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work); |
492 | ||
37b498ff HHZ |
493 | rhashtable_destroy(&neigh_update->neigh_ht); |
494 | } | |
495 | ||
496 | static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv, | |
497 | struct mlx5e_neigh_hash_entry *nhe) | |
498 | { | |
499 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
500 | int err; | |
501 | ||
502 | err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht, | |
503 | &nhe->rhash_node, | |
504 | mlx5e_neigh_ht_params); | |
505 | if (err) | |
506 | return err; | |
507 | ||
508 | list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list); | |
509 | ||
510 | return err; | |
511 | } | |
512 | ||
513 | static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv, | |
514 | struct mlx5e_neigh_hash_entry *nhe) | |
515 | { | |
516 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
517 | ||
232c0013 HHZ |
518 | spin_lock_bh(&rpriv->neigh_update.encap_lock); |
519 | ||
37b498ff HHZ |
520 | list_del(&nhe->neigh_list); |
521 | ||
522 | rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht, | |
523 | &nhe->rhash_node, | |
524 | mlx5e_neigh_ht_params); | |
232c0013 | 525 | spin_unlock_bh(&rpriv->neigh_update.encap_lock); |
37b498ff HHZ |
526 | } |
527 | ||
232c0013 HHZ |
528 | /* This function must only be called under RTNL lock or under the |
529 | * representor's encap_lock in case RTNL mutex can't be held. | |
530 | */ | |
37b498ff HHZ |
531 | static struct mlx5e_neigh_hash_entry * |
532 | mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, | |
533 | struct mlx5e_neigh *m_neigh) | |
534 | { | |
535 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
536 | struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; | |
537 | ||
538 | return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh, | |
539 | mlx5e_neigh_ht_params); | |
540 | } | |
541 | ||
232c0013 HHZ |
542 | static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv, |
543 | struct mlx5e_encap_entry *e, | |
544 | struct mlx5e_neigh_hash_entry **nhe) | |
545 | { | |
546 | int err; | |
547 | ||
548 | *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL); | |
549 | if (!*nhe) | |
550 | return -ENOMEM; | |
551 | ||
552 | memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh)); | |
553 | INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update); | |
554 | INIT_LIST_HEAD(&(*nhe)->encap_list); | |
555 | refcount_set(&(*nhe)->refcnt, 1); | |
556 | ||
557 | err = mlx5e_rep_neigh_entry_insert(priv, *nhe); | |
558 | if (err) | |
559 | goto out_free; | |
560 | return 0; | |
561 | ||
562 | out_free: | |
563 | kfree(*nhe); | |
564 | return err; | |
565 | } | |
566 | ||
567 | static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv, | |
568 | struct mlx5e_neigh_hash_entry *nhe) | |
569 | { | |
570 | /* The neigh hash entry must be removed from the hash table regardless | |
571 | * of the reference count value, so it won't be found by the next | |
572 | * neigh notification call. The neigh hash entry reference count is | |
573 | * incremented only during creation and neigh notification calls and | |
574 | * protects from freeing the nhe struct. | |
575 | */ | |
576 | mlx5e_rep_neigh_entry_remove(priv, nhe); | |
577 | mlx5e_rep_neigh_entry_release(nhe); | |
578 | } | |
579 | ||
580 | int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, | |
581 | struct mlx5e_encap_entry *e) | |
582 | { | |
583 | struct mlx5e_neigh_hash_entry *nhe; | |
584 | int err; | |
585 | ||
586 | nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); | |
587 | if (!nhe) { | |
588 | err = mlx5e_rep_neigh_entry_create(priv, e, &nhe); | |
589 | if (err) | |
590 | return err; | |
591 | } | |
592 | list_add(&e->encap_list, &nhe->encap_list); | |
593 | return 0; | |
594 | } | |
595 | ||
596 | void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, | |
597 | struct mlx5e_encap_entry *e) | |
598 | { | |
599 | struct mlx5e_neigh_hash_entry *nhe; | |
600 | ||
601 | list_del(&e->encap_list); | |
602 | nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); | |
603 | ||
604 | if (list_empty(&nhe->encap_list)) | |
605 | mlx5e_rep_neigh_entry_destroy(priv, nhe); | |
606 | } | |
607 | ||
20a1ea67 OG |
608 | static int mlx5e_rep_open(struct net_device *dev) |
609 | { | |
610 | struct mlx5e_priv *priv = netdev_priv(dev); | |
1d447a39 SM |
611 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
612 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
20a1ea67 OG |
613 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
614 | int err; | |
615 | ||
63bfd399 EBE |
616 | mutex_lock(&priv->state_lock); |
617 | err = mlx5e_open_locked(dev); | |
20a1ea67 | 618 | if (err) |
63bfd399 | 619 | goto unlock; |
20a1ea67 | 620 | |
63bfd399 EBE |
621 | if (!mlx5_eswitch_set_vport_state(esw, rep->vport, |
622 | MLX5_ESW_VPORT_ADMIN_STATE_UP)) | |
20a1ea67 OG |
623 | netif_carrier_on(dev); |
624 | ||
63bfd399 EBE |
625 | unlock: |
626 | mutex_unlock(&priv->state_lock); | |
627 | return err; | |
20a1ea67 OG |
628 | } |
629 | ||
630 | static int mlx5e_rep_close(struct net_device *dev) | |
631 | { | |
632 | struct mlx5e_priv *priv = netdev_priv(dev); | |
1d447a39 SM |
633 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
634 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
20a1ea67 | 635 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
63bfd399 | 636 | int ret; |
20a1ea67 | 637 | |
63bfd399 | 638 | mutex_lock(&priv->state_lock); |
20a1ea67 | 639 | (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); |
63bfd399 EBE |
640 | ret = mlx5e_close_locked(dev); |
641 | mutex_unlock(&priv->state_lock); | |
642 | return ret; | |
20a1ea67 OG |
643 | } |
644 | ||
cb67b832 HHZ |
645 | static int mlx5e_rep_get_phys_port_name(struct net_device *dev, |
646 | char *buf, size_t len) | |
647 | { | |
648 | struct mlx5e_priv *priv = netdev_priv(dev); | |
1d447a39 SM |
649 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
650 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
cb67b832 HHZ |
651 | int ret; |
652 | ||
653 | ret = snprintf(buf, len, "%d", rep->vport - 1); | |
654 | if (ret >= len) | |
655 | return -EOPNOTSUPP; | |
656 | ||
657 | return 0; | |
658 | } | |
659 | ||
de4784ca JP |
660 | static int |
661 | mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, | |
662 | struct tc_cls_flower_offload *cls_flower) | |
d957b4e3 OG |
663 | { |
664 | struct mlx5e_priv *priv = netdev_priv(dev); | |
665 | ||
7f3b39da | 666 | if (!is_classid_clsact_ingress(cls_flower->common.classid) || |
5fd9fc4e | 667 | cls_flower->common.chain_index) |
d957b4e3 OG |
668 | return -EOPNOTSUPP; |
669 | ||
8c818c27 | 670 | if (cls_flower->egress_dev) { |
ebe06875 | 671 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
ebe06875 | 672 | |
8c818c27 JP |
673 | dev = mlx5_eswitch_get_uplink_netdev(esw); |
674 | return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, | |
de4784ca | 675 | cls_flower); |
ebe06875 HHZ |
676 | } |
677 | ||
8c818c27 JP |
678 | switch (cls_flower->command) { |
679 | case TC_CLSFLOWER_REPLACE: | |
5fd9fc4e | 680 | return mlx5e_configure_flower(priv, cls_flower); |
8c818c27 JP |
681 | case TC_CLSFLOWER_DESTROY: |
682 | return mlx5e_delete_flower(priv, cls_flower); | |
683 | case TC_CLSFLOWER_STATS: | |
684 | return mlx5e_stats_flower(priv, cls_flower); | |
685 | default: | |
a5fcf8a6 | 686 | return -EOPNOTSUPP; |
8c818c27 JP |
687 | } |
688 | } | |
a5fcf8a6 | 689 | |
8c818c27 | 690 | static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, |
de4784ca | 691 | void *type_data) |
8c818c27 | 692 | { |
2572ac53 | 693 | switch (type) { |
d957b4e3 | 694 | case TC_SETUP_CLSFLOWER: |
de4784ca | 695 | return mlx5e_rep_setup_tc_cls_flower(dev, type_data); |
d957b4e3 OG |
696 | default: |
697 | return -EOPNOTSUPP; | |
698 | } | |
699 | } | |
700 | ||
370bad0f OG |
701 | bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) |
702 | { | |
370bad0f | 703 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
1d447a39 SM |
704 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
705 | struct mlx5_eswitch_rep *rep; | |
706 | ||
707 | if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) | |
708 | return false; | |
370bad0f | 709 | |
1d447a39 SM |
710 | rep = rpriv->rep; |
711 | if (esw->mode == SRIOV_OFFLOADS && | |
712 | rep && rep->vport == FDB_UPLINK_VPORT) | |
370bad0f OG |
713 | return true; |
714 | ||
715 | return false; | |
716 | } | |
717 | ||
20fd0c19 | 718 | static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) |
370bad0f | 719 | { |
1d447a39 SM |
720 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
721 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
370bad0f OG |
722 | |
723 | if (rep && rep->vport != FDB_UPLINK_VPORT) | |
724 | return true; | |
725 | ||
726 | return false; | |
727 | } | |
728 | ||
729 | bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id) | |
730 | { | |
731 | struct mlx5e_priv *priv = netdev_priv(dev); | |
732 | ||
733 | switch (attr_id) { | |
734 | case IFLA_OFFLOAD_XSTATS_CPU_HIT: | |
735 | if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv)) | |
736 | return true; | |
737 | } | |
738 | ||
739 | return false; | |
740 | } | |
741 | ||
742 | static int | |
743 | mlx5e_get_sw_stats64(const struct net_device *dev, | |
744 | struct rtnl_link_stats64 *stats) | |
745 | { | |
746 | struct mlx5e_priv *priv = netdev_priv(dev); | |
747 | struct mlx5e_sw_stats *sstats = &priv->stats.sw; | |
748 | ||
749 | stats->rx_packets = sstats->rx_packets; | |
750 | stats->rx_bytes = sstats->rx_bytes; | |
751 | stats->tx_packets = sstats->tx_packets; | |
752 | stats->tx_bytes = sstats->tx_bytes; | |
753 | ||
754 | stats->tx_dropped = sstats->tx_queue_dropped; | |
755 | ||
756 | return 0; | |
757 | } | |
758 | ||
759 | int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, | |
760 | void *sp) | |
761 | { | |
762 | switch (attr_id) { | |
763 | case IFLA_OFFLOAD_XSTATS_CPU_HIT: | |
764 | return mlx5e_get_sw_stats64(dev, sp); | |
765 | } | |
766 | ||
767 | return -EINVAL; | |
768 | } | |
769 | ||
bc1f4470 | 770 | static void |
370bad0f OG |
771 | mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) |
772 | { | |
773 | struct mlx5e_priv *priv = netdev_priv(dev); | |
774 | ||
775 | memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); | |
370bad0f OG |
776 | } |
777 | ||
cb67b832 HHZ |
778 | static const struct switchdev_ops mlx5e_rep_switchdev_ops = { |
779 | .switchdev_port_attr_get = mlx5e_attr_get, | |
780 | }; | |
781 | ||
782 | static const struct net_device_ops mlx5e_netdev_ops_rep = { | |
20a1ea67 OG |
783 | .ndo_open = mlx5e_rep_open, |
784 | .ndo_stop = mlx5e_rep_close, | |
cb67b832 HHZ |
785 | .ndo_start_xmit = mlx5e_xmit, |
786 | .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, | |
8c818c27 | 787 | .ndo_setup_tc = mlx5e_rep_setup_tc, |
370bad0f OG |
788 | .ndo_get_stats64 = mlx5e_rep_get_stats, |
789 | .ndo_has_offload_stats = mlx5e_has_offload_stats, | |
790 | .ndo_get_offload_stats = mlx5e_get_offload_stats, | |
cb67b832 HHZ |
791 | }; |
792 | ||
6a9764ef SM |
793 | static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, |
794 | struct mlx5e_params *params) | |
cb67b832 | 795 | { |
cb67b832 HHZ |
796 | u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? |
797 | MLX5_CQ_PERIOD_MODE_START_FROM_CQE : | |
798 | MLX5_CQ_PERIOD_MODE_START_FROM_EQE; | |
799 | ||
6a9764ef SM |
800 | params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; |
801 | params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; | |
802 | params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; | |
cb67b832 | 803 | |
6a9764ef SM |
804 | params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); |
805 | mlx5e_set_rx_cq_mode_params(params, cq_period_mode); | |
cb67b832 | 806 | |
6a9764ef SM |
807 | params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); |
808 | params->num_tc = 1; | |
809 | params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; | |
5f195c2c CM |
810 | |
811 | mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); | |
cb67b832 HHZ |
812 | } |
813 | ||
814 | static void mlx5e_build_rep_netdev(struct net_device *netdev) | |
815 | { | |
816 | netdev->netdev_ops = &mlx5e_netdev_ops_rep; | |
817 | ||
818 | netdev->watchdog_timeo = 15 * HZ; | |
819 | ||
820 | netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; | |
821 | ||
822 | #ifdef CONFIG_NET_SWITCHDEV | |
823 | netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; | |
824 | #endif | |
825 | ||
abd32772 | 826 | netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; |
d957b4e3 | 827 | netdev->hw_features |= NETIF_F_HW_TC; |
cb67b832 HHZ |
828 | |
829 | eth_hw_addr_random(netdev); | |
830 | } | |
831 | ||
832 | static void mlx5e_init_rep(struct mlx5_core_dev *mdev, | |
833 | struct net_device *netdev, | |
834 | const struct mlx5e_profile *profile, | |
835 | void *ppriv) | |
836 | { | |
6a9764ef SM |
837 | struct mlx5e_priv *priv = netdev_priv(netdev); |
838 | ||
839 | priv->mdev = mdev; | |
840 | priv->netdev = netdev; | |
841 | priv->profile = profile; | |
842 | priv->ppriv = ppriv; | |
843 | ||
844 | mutex_init(&priv->state_lock); | |
845 | ||
846 | INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); | |
847 | ||
848 | priv->channels.params.num_channels = profile->max_nch(mdev); | |
c139dbfd ES |
849 | |
850 | priv->hard_mtu = MLX5E_ETH_HARD_MTU; | |
851 | ||
6a9764ef | 852 | mlx5e_build_rep_params(mdev, &priv->channels.params); |
cb67b832 HHZ |
853 | mlx5e_build_rep_netdev(netdev); |
854 | } | |
855 | ||
856 | static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) | |
857 | { | |
858 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | |
1d447a39 SM |
859 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
860 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
74491de9 | 861 | struct mlx5_flow_handle *flow_rule; |
cb67b832 | 862 | int err; |
cb67b832 | 863 | |
2c3b5bee SM |
864 | mlx5e_init_l2_addr(priv); |
865 | ||
cb67b832 | 866 | err = mlx5e_create_direct_rqts(priv); |
8f493ffd | 867 | if (err) |
cb67b832 | 868 | return err; |
cb67b832 HHZ |
869 | |
870 | err = mlx5e_create_direct_tirs(priv); | |
8f493ffd | 871 | if (err) |
cb67b832 | 872 | goto err_destroy_direct_rqts; |
cb67b832 HHZ |
873 | |
874 | flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, | |
875 | rep->vport, | |
876 | priv->direct_tir[0].tirn); | |
877 | if (IS_ERR(flow_rule)) { | |
878 | err = PTR_ERR(flow_rule); | |
879 | goto err_destroy_direct_tirs; | |
880 | } | |
881 | rep->vport_rx_rule = flow_rule; | |
882 | ||
d957b4e3 OG |
883 | err = mlx5e_tc_init(priv); |
884 | if (err) | |
885 | goto err_del_flow_rule; | |
886 | ||
cb67b832 HHZ |
887 | return 0; |
888 | ||
d957b4e3 | 889 | err_del_flow_rule: |
74491de9 | 890 | mlx5_del_flow_rules(rep->vport_rx_rule); |
cb67b832 HHZ |
891 | err_destroy_direct_tirs: |
892 | mlx5e_destroy_direct_tirs(priv); | |
893 | err_destroy_direct_rqts: | |
8f493ffd | 894 | mlx5e_destroy_direct_rqts(priv); |
cb67b832 HHZ |
895 | return err; |
896 | } | |
897 | ||
898 | static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) | |
899 | { | |
1d447a39 SM |
900 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
901 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
cb67b832 | 902 | |
d957b4e3 | 903 | mlx5e_tc_cleanup(priv); |
74491de9 | 904 | mlx5_del_flow_rules(rep->vport_rx_rule); |
cb67b832 | 905 | mlx5e_destroy_direct_tirs(priv); |
8f493ffd | 906 | mlx5e_destroy_direct_rqts(priv); |
cb67b832 HHZ |
907 | } |
908 | ||
909 | static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) | |
910 | { | |
911 | int err; | |
912 | ||
913 | err = mlx5e_create_tises(priv); | |
914 | if (err) { | |
915 | mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); | |
916 | return err; | |
917 | } | |
918 | return 0; | |
919 | } | |
920 | ||
921 | static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev) | |
922 | { | |
923 | #define MLX5E_PORT_REPRESENTOR_NCH 1 | |
924 | return MLX5E_PORT_REPRESENTOR_NCH; | |
925 | } | |
926 | ||
927 | static struct mlx5e_profile mlx5e_rep_profile = { | |
928 | .init = mlx5e_init_rep, | |
929 | .init_rx = mlx5e_init_rep_rx, | |
930 | .cleanup_rx = mlx5e_cleanup_rep_rx, | |
931 | .init_tx = mlx5e_init_rep_tx, | |
932 | .cleanup_tx = mlx5e_cleanup_nic_tx, | |
370bad0f | 933 | .update_stats = mlx5e_rep_update_stats, |
cb67b832 | 934 | .max_nch = mlx5e_get_rep_max_num_channels, |
7ca42c80 | 935 | .update_carrier = NULL, |
20fd0c19 SM |
936 | .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, |
937 | .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */, | |
cb67b832 HHZ |
938 | .max_tc = 1, |
939 | }; | |
940 | ||
1d447a39 SM |
941 | /* e-Switch vport representors */ |
942 | ||
943 | static int | |
944 | mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) | |
945 | { | |
37b498ff HHZ |
946 | struct mlx5e_priv *priv = netdev_priv(rep->netdev); |
947 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
948 | ||
949 | int err; | |
950 | ||
951 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { | |
952 | err = mlx5e_add_sqs_fwd_rules(priv); | |
953 | if (err) | |
954 | return err; | |
955 | } | |
956 | ||
957 | err = mlx5e_rep_neigh_init(rpriv); | |
958 | if (err) | |
959 | goto err_remove_sqs; | |
1d447a39 | 960 | |
1d447a39 | 961 | return 0; |
37b498ff HHZ |
962 | |
963 | err_remove_sqs: | |
964 | mlx5e_remove_sqs_fwd_rules(priv); | |
965 | return err; | |
1d447a39 SM |
966 | } |
967 | ||
968 | static void | |
969 | mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) | |
cb67b832 | 970 | { |
37b498ff HHZ |
971 | struct mlx5e_priv *priv = netdev_priv(rep->netdev); |
972 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
1d447a39 SM |
973 | |
974 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) | |
975 | mlx5e_remove_sqs_fwd_rules(priv); | |
976 | ||
977 | /* clean (and re-init) existing uplink offloaded TC rules */ | |
978 | mlx5e_tc_cleanup(priv); | |
979 | mlx5e_tc_init(priv); | |
37b498ff HHZ |
980 | |
981 | mlx5e_rep_neigh_cleanup(rpriv); | |
1d447a39 SM |
982 | } |
983 | ||
984 | static int | |
985 | mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) | |
986 | { | |
987 | struct mlx5e_rep_priv *rpriv; | |
26e59d80 MHY |
988 | struct net_device *netdev; |
989 | int err; | |
990 | ||
1d447a39 SM |
991 | rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); |
992 | if (!rpriv) | |
993 | return -ENOMEM; | |
994 | ||
995 | netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv); | |
26e59d80 MHY |
996 | if (!netdev) { |
997 | pr_warn("Failed to create representor netdev for vport %d\n", | |
998 | rep->vport); | |
1d447a39 | 999 | kfree(rpriv); |
cb67b832 HHZ |
1000 | return -EINVAL; |
1001 | } | |
26e59d80 | 1002 | |
726293f1 | 1003 | rep->netdev = netdev; |
1d447a39 | 1004 | rpriv->rep = rep; |
26e59d80 | 1005 | |
2c3b5bee | 1006 | err = mlx5e_attach_netdev(netdev_priv(netdev)); |
26e59d80 MHY |
1007 | if (err) { |
1008 | pr_warn("Failed to attach representor netdev for vport %d\n", | |
1009 | rep->vport); | |
1010 | goto err_destroy_netdev; | |
1011 | } | |
1012 | ||
37b498ff HHZ |
1013 | err = mlx5e_rep_neigh_init(rpriv); |
1014 | if (err) { | |
1015 | pr_warn("Failed to initialized neighbours handling for vport %d\n", | |
1016 | rep->vport); | |
1017 | goto err_detach_netdev; | |
1018 | } | |
1019 | ||
26e59d80 MHY |
1020 | err = register_netdev(netdev); |
1021 | if (err) { | |
1022 | pr_warn("Failed to register representor netdev for vport %d\n", | |
1023 | rep->vport); | |
37b498ff | 1024 | goto err_neigh_cleanup; |
26e59d80 MHY |
1025 | } |
1026 | ||
cb67b832 | 1027 | return 0; |
26e59d80 | 1028 | |
37b498ff HHZ |
1029 | err_neigh_cleanup: |
1030 | mlx5e_rep_neigh_cleanup(rpriv); | |
1031 | ||
26e59d80 | 1032 | err_detach_netdev: |
2c3b5bee | 1033 | mlx5e_detach_netdev(netdev_priv(netdev)); |
26e59d80 MHY |
1034 | |
1035 | err_destroy_netdev: | |
2c3b5bee | 1036 | mlx5e_destroy_netdev(netdev_priv(netdev)); |
1d447a39 | 1037 | kfree(rpriv); |
26e59d80 | 1038 | return err; |
cb67b832 HHZ |
1039 | } |
1040 | ||
1d447a39 SM |
1041 | static void |
1042 | mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) | |
cb67b832 | 1043 | { |
726293f1 | 1044 | struct net_device *netdev = rep->netdev; |
1d447a39 | 1045 | struct mlx5e_priv *priv = netdev_priv(netdev); |
37b498ff | 1046 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1d447a39 | 1047 | void *ppriv = priv->ppriv; |
cb67b832 | 1048 | |
37b498ff HHZ |
1049 | unregister_netdev(rep->netdev); |
1050 | ||
1051 | mlx5e_rep_neigh_cleanup(rpriv); | |
1d447a39 SM |
1052 | mlx5e_detach_netdev(priv); |
1053 | mlx5e_destroy_netdev(priv); | |
1054 | kfree(ppriv); /* mlx5e_rep_priv */ | |
1055 | } | |
1056 | ||
1057 | static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv) | |
1058 | { | |
1059 | struct mlx5_core_dev *mdev = priv->mdev; | |
1060 | struct mlx5_eswitch *esw = mdev->priv.eswitch; | |
1061 | int total_vfs = MLX5_TOTAL_VPORTS(mdev); | |
1062 | int vport; | |
1063 | u8 mac[ETH_ALEN]; | |
1064 | ||
1065 | mlx5_query_nic_vport_mac_address(mdev, 0, mac); | |
1066 | ||
1067 | for (vport = 1; vport < total_vfs; vport++) { | |
1068 | struct mlx5_eswitch_rep rep; | |
1069 | ||
1070 | rep.load = mlx5e_vport_rep_load; | |
1071 | rep.unload = mlx5e_vport_rep_unload; | |
1072 | rep.vport = vport; | |
1073 | ether_addr_copy(rep.hw_id, mac); | |
1074 | mlx5_eswitch_register_vport_rep(esw, vport, &rep); | |
1075 | } | |
1076 | } | |
1077 | ||
1078 | static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv) | |
1079 | { | |
1080 | struct mlx5_core_dev *mdev = priv->mdev; | |
1081 | struct mlx5_eswitch *esw = mdev->priv.eswitch; | |
1082 | int total_vfs = MLX5_TOTAL_VPORTS(mdev); | |
1083 | int vport; | |
1084 | ||
1085 | for (vport = 1; vport < total_vfs; vport++) | |
1086 | mlx5_eswitch_unregister_vport_rep(esw, vport); | |
1087 | } | |
1088 | ||
1089 | void mlx5e_register_vport_reps(struct mlx5e_priv *priv) | |
1090 | { | |
1091 | struct mlx5_core_dev *mdev = priv->mdev; | |
1092 | struct mlx5_eswitch *esw = mdev->priv.eswitch; | |
1093 | struct mlx5_eswitch_rep rep; | |
1094 | ||
1095 | mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id); | |
1096 | rep.load = mlx5e_nic_rep_load; | |
1097 | rep.unload = mlx5e_nic_rep_unload; | |
1098 | rep.vport = FDB_UPLINK_VPORT; | |
1099 | rep.netdev = priv->netdev; | |
1100 | mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/ | |
1101 | ||
1102 | mlx5e_rep_register_vf_vports(priv); /* VFs vports */ | |
1103 | } | |
1104 | ||
1105 | void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) | |
1106 | { | |
1107 | struct mlx5_core_dev *mdev = priv->mdev; | |
1108 | struct mlx5_eswitch *esw = mdev->priv.eswitch; | |
1109 | ||
1110 | mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */ | |
1111 | mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/ | |
cb67b832 | 1112 | } |
07c9f1e5 SM |
1113 | |
1114 | void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev) | |
1115 | { | |
1116 | struct mlx5_eswitch *esw = mdev->priv.eswitch; | |
1117 | struct mlx5e_rep_priv *rpriv; | |
1118 | ||
1119 | rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); | |
1120 | if (!rpriv) | |
1121 | return NULL; | |
1122 | ||
1123 | rpriv->rep = &esw->offloads.vport_reps[0]; | |
1124 | return rpriv; | |
1125 | } |