]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40 #include "esw/chains.h"
41 #include "rdma.h"
42 #include "en.h"
43 #include "fs_core.h"
44 #include "lib/devcom.h"
45 #include "lib/eq.h"
46
47 /* There are two match-all miss flows, one for unicast dst mac and
48 * one for multicast.
49 */
50 #define MLX5_ESW_MISS_FLOWS (2)
51 #define UPLINK_REP_INDEX 0
52
53 /* Per vport tables */
54
55 #define MLX5_ESW_VPORT_TABLE_SIZE 128
56
57 /* This struct is used as a key to the hash table and we need it to be packed
58 * so hash result is consistent
59 */
60 struct mlx5_vport_key {
61 u32 chain;
62 u16 prio;
63 u16 vport;
64 u16 vhca_id;
65 } __packed;
66
67 struct mlx5_vport_table {
68 struct hlist_node hlist;
69 struct mlx5_flow_table *fdb;
70 u32 num_rules;
71 struct mlx5_vport_key key;
72 };
73
74 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
75
76 static struct mlx5_flow_table *
77 esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
78 {
79 struct mlx5_flow_table_attr ft_attr = {};
80 struct mlx5_flow_table *fdb;
81
82 ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
83 ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
84 ft_attr.prio = FDB_PER_VPORT;
85 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
86 if (IS_ERR(fdb)) {
87 esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
88 PTR_ERR(fdb));
89 }
90
91 return fdb;
92 }
93
94 static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
95 struct mlx5_esw_flow_attr *attr,
96 struct mlx5_vport_key *key)
97 {
98 key->vport = attr->in_rep->vport;
99 key->chain = attr->chain;
100 key->prio = attr->prio;
101 key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
102 return jhash(key, sizeof(*key), 0);
103 }
104
105 /* caller must hold vports.lock */
106 static struct mlx5_vport_table *
107 esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
108 {
109 struct mlx5_vport_table *e;
110
111 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
112 if (!memcmp(&e->key, skey, sizeof(*skey)))
113 return e;
114
115 return NULL;
116 }
117
118 static void
119 esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
120 {
121 struct mlx5_vport_table *e;
122 struct mlx5_vport_key key;
123 u32 hkey;
124
125 mutex_lock(&esw->fdb_table.offloads.vports.lock);
126 hkey = flow_attr_to_vport_key(esw, attr, &key);
127 e = esw_vport_tbl_lookup(esw, &key, hkey);
128 if (!e || --e->num_rules)
129 goto out;
130
131 hash_del(&e->hlist);
132 mlx5_destroy_flow_table(e->fdb);
133 kfree(e);
134 out:
135 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
136 }
137
138 static struct mlx5_flow_table *
139 esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
140 {
141 struct mlx5_core_dev *dev = esw->dev;
142 struct mlx5_flow_namespace *ns;
143 struct mlx5_flow_table *fdb;
144 struct mlx5_vport_table *e;
145 struct mlx5_vport_key skey;
146 u32 hkey;
147
148 mutex_lock(&esw->fdb_table.offloads.vports.lock);
149 hkey = flow_attr_to_vport_key(esw, attr, &skey);
150 e = esw_vport_tbl_lookup(esw, &skey, hkey);
151 if (e) {
152 e->num_rules++;
153 goto out;
154 }
155
156 e = kzalloc(sizeof(*e), GFP_KERNEL);
157 if (!e) {
158 fdb = ERR_PTR(-ENOMEM);
159 goto err_alloc;
160 }
161
162 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
163 if (!ns) {
164 esw_warn(dev, "Failed to get FDB namespace\n");
165 fdb = ERR_PTR(-ENOENT);
166 goto err_ns;
167 }
168
169 fdb = esw_vport_tbl_create(esw, ns);
170 if (IS_ERR(fdb))
171 goto err_ns;
172
173 e->fdb = fdb;
174 e->num_rules = 1;
175 e->key = skey;
176 hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
177 out:
178 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
179 return e->fdb;
180
181 err_ns:
182 kfree(e);
183 err_alloc:
184 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
185 return fdb;
186 }
187
188 int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
189 {
190 struct mlx5_esw_flow_attr attr = {};
191 struct mlx5_eswitch_rep rep = {};
192 struct mlx5_flow_table *fdb;
193 struct mlx5_vport *vport;
194 int i;
195
196 attr.prio = 1;
197 attr.in_rep = &rep;
198 mlx5_esw_for_all_vports(esw, i, vport) {
199 attr.in_rep->vport = vport->vport;
200 fdb = esw_vport_tbl_get(esw, &attr);
201 if (IS_ERR(fdb))
202 goto out;
203 }
204 return 0;
205
206 out:
207 mlx5_esw_vport_tbl_put(esw);
208 return PTR_ERR(fdb);
209 }
210
211 void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
212 {
213 struct mlx5_esw_flow_attr attr = {};
214 struct mlx5_eswitch_rep rep = {};
215 struct mlx5_vport *vport;
216 int i;
217
218 attr.prio = 1;
219 attr.in_rep = &rep;
220 mlx5_esw_for_all_vports(esw, i, vport) {
221 attr.in_rep->vport = vport->vport;
222 esw_vport_tbl_put(esw, &attr);
223 }
224 }
225
226 /* End: Per vport tables */
227
228 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
229 u16 vport_num)
230 {
231 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
232
233 WARN_ON(idx > esw->total_vports - 1);
234 return &esw->offloads.vport_reps[idx];
235 }
236
237 static bool
238 esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
239 const struct mlx5_vport *vport)
240 {
241 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
242 mlx5_eswitch_is_vf_vport(esw, vport->vport));
243 }
244
245 static void
246 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
247 struct mlx5_flow_spec *spec,
248 struct mlx5_esw_flow_attr *attr)
249 {
250 void *misc2;
251 void *misc;
252
253 /* Use metadata matching because vport is not represented by single
254 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
255 */
256 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
257 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
258 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
259 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
260 attr->in_rep->vport));
261
262 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
263 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
264 mlx5_eswitch_get_vport_metadata_mask());
265
266 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
267 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
268 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
269 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
270 } else {
271 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
272 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
273
274 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
275 MLX5_SET(fte_match_set_misc, misc,
276 source_eswitch_owner_vhca_id,
277 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
278
279 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
280 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
281 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
282 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
283 source_eswitch_owner_vhca_id);
284
285 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
286 }
287
288 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
289 attr->in_rep->vport == MLX5_VPORT_UPLINK)
290 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
291 }
292
293 struct mlx5_flow_handle *
294 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
295 struct mlx5_flow_spec *spec,
296 struct mlx5_esw_flow_attr *attr)
297 {
298 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
299 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
300 bool split = !!(attr->split_count);
301 struct mlx5_flow_handle *rule;
302 struct mlx5_flow_table *fdb;
303 int j, i = 0;
304
305 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
306 return ERR_PTR(-EOPNOTSUPP);
307
308 flow_act.action = attr->action;
309 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
310 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
311 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
312 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
313 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
314 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
315 flow_act.vlan[0].vid = attr->vlan_vid[0];
316 flow_act.vlan[0].prio = attr->vlan_prio[0];
317 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
318 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
319 flow_act.vlan[1].vid = attr->vlan_vid[1];
320 flow_act.vlan[1].prio = attr->vlan_prio[1];
321 }
322 }
323
324 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
325 struct mlx5_flow_table *ft;
326
327 if (attr->dest_ft) {
328 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
329 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
330 dest[i].ft = attr->dest_ft;
331 i++;
332 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
333 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
334 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
335 dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
336 i++;
337 } else if (attr->dest_chain) {
338 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
339 ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
340 1, 0);
341 if (IS_ERR(ft)) {
342 rule = ERR_CAST(ft);
343 goto err_create_goto_table;
344 }
345
346 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
347 dest[i].ft = ft;
348 i++;
349 } else {
350 for (j = attr->split_count; j < attr->out_count; j++) {
351 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
352 dest[i].vport.num = attr->dests[j].rep->vport;
353 dest[i].vport.vhca_id =
354 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
355 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
356 dest[i].vport.flags |=
357 MLX5_FLOW_DEST_VPORT_VHCA_ID;
358 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
359 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
360 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
361 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
362 dest[i].vport.pkt_reformat =
363 attr->dests[j].pkt_reformat;
364 }
365 i++;
366 }
367 }
368 }
369 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
370 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
371 dest[i].counter_id = mlx5_fc_id(attr->counter);
372 i++;
373 }
374
375 if (attr->outer_match_level != MLX5_MATCH_NONE)
376 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
377 if (attr->inner_match_level != MLX5_MATCH_NONE)
378 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
379
380 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
381 flow_act.modify_hdr = attr->modify_hdr;
382
383 if (split) {
384 fdb = esw_vport_tbl_get(esw, attr);
385 } else {
386 if (attr->chain || attr->prio)
387 fdb = mlx5_esw_chains_get_table(esw, attr->chain,
388 attr->prio, 0);
389 else
390 fdb = attr->fdb;
391
392 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
393 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
394 }
395 if (IS_ERR(fdb)) {
396 rule = ERR_CAST(fdb);
397 goto err_esw_get;
398 }
399
400 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
401 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
402 &flow_act, dest, i);
403 else
404 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
405 if (IS_ERR(rule))
406 goto err_add_rule;
407 else
408 atomic64_inc(&esw->offloads.num_flows);
409
410 return rule;
411
412 err_add_rule:
413 if (split)
414 esw_vport_tbl_put(esw, attr);
415 else if (attr->chain || attr->prio)
416 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
417 err_esw_get:
418 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
419 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
420 err_create_goto_table:
421 return rule;
422 }
423
424 struct mlx5_flow_handle *
425 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
426 struct mlx5_flow_spec *spec,
427 struct mlx5_esw_flow_attr *attr)
428 {
429 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
430 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
431 struct mlx5_flow_table *fast_fdb;
432 struct mlx5_flow_table *fwd_fdb;
433 struct mlx5_flow_handle *rule;
434 int i;
435
436 fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
437 if (IS_ERR(fast_fdb)) {
438 rule = ERR_CAST(fast_fdb);
439 goto err_get_fast;
440 }
441
442 fwd_fdb = esw_vport_tbl_get(esw, attr);
443 if (IS_ERR(fwd_fdb)) {
444 rule = ERR_CAST(fwd_fdb);
445 goto err_get_fwd;
446 }
447
448 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
449 for (i = 0; i < attr->split_count; i++) {
450 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
451 dest[i].vport.num = attr->dests[i].rep->vport;
452 dest[i].vport.vhca_id =
453 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
454 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
455 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
456 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
457 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
458 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
459 }
460 }
461 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
462 dest[i].ft = fwd_fdb,
463 i++;
464
465 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
466
467 if (attr->outer_match_level != MLX5_MATCH_NONE)
468 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
469
470 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
471 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
472
473 if (IS_ERR(rule))
474 goto add_err;
475
476 atomic64_inc(&esw->offloads.num_flows);
477
478 return rule;
479 add_err:
480 esw_vport_tbl_put(esw, attr);
481 err_get_fwd:
482 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
483 err_get_fast:
484 return rule;
485 }
486
487 static void
488 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
489 struct mlx5_flow_handle *rule,
490 struct mlx5_esw_flow_attr *attr,
491 bool fwd_rule)
492 {
493 bool split = (attr->split_count > 0);
494 int i;
495
496 mlx5_del_flow_rules(rule);
497
498 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
499 /* unref the term table */
500 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
501 if (attr->dests[i].termtbl)
502 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
503 }
504 }
505
506 atomic64_dec(&esw->offloads.num_flows);
507
508 if (fwd_rule) {
509 esw_vport_tbl_put(esw, attr);
510 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
511 } else {
512 if (split)
513 esw_vport_tbl_put(esw, attr);
514 else if (attr->chain || attr->prio)
515 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
516 0);
517 if (attr->dest_chain)
518 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
519 }
520 }
521
522 void
523 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
524 struct mlx5_flow_handle *rule,
525 struct mlx5_esw_flow_attr *attr)
526 {
527 __mlx5_eswitch_del_rule(esw, rule, attr, false);
528 }
529
530 void
531 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
532 struct mlx5_flow_handle *rule,
533 struct mlx5_esw_flow_attr *attr)
534 {
535 __mlx5_eswitch_del_rule(esw, rule, attr, true);
536 }
537
538 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
539 {
540 struct mlx5_eswitch_rep *rep;
541 int i, err = 0;
542
543 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
544 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
545 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
546 continue;
547
548 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
549 if (err)
550 goto out;
551 }
552
553 out:
554 return err;
555 }
556
557 static struct mlx5_eswitch_rep *
558 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
559 {
560 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
561
562 in_rep = attr->in_rep;
563 out_rep = attr->dests[0].rep;
564
565 if (push)
566 vport = in_rep;
567 else if (pop)
568 vport = out_rep;
569 else
570 vport = in_rep;
571
572 return vport;
573 }
574
575 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
576 bool push, bool pop, bool fwd)
577 {
578 struct mlx5_eswitch_rep *in_rep, *out_rep;
579
580 if ((push || pop) && !fwd)
581 goto out_notsupp;
582
583 in_rep = attr->in_rep;
584 out_rep = attr->dests[0].rep;
585
586 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
587 goto out_notsupp;
588
589 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
590 goto out_notsupp;
591
592 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
593 if (!push && !pop && fwd)
594 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
595 goto out_notsupp;
596
597 /* protects against (1) setting rules with different vlans to push and
598 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
599 */
600 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
601 goto out_notsupp;
602
603 return 0;
604
605 out_notsupp:
606 return -EOPNOTSUPP;
607 }
608
609 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
610 struct mlx5_esw_flow_attr *attr)
611 {
612 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
613 struct mlx5_eswitch_rep *vport = NULL;
614 bool push, pop, fwd;
615 int err = 0;
616
617 /* nop if we're on the vlan push/pop non emulation mode */
618 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
619 return 0;
620
621 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
622 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
623 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
624 !attr->dest_chain);
625
626 mutex_lock(&esw->state_lock);
627
628 err = esw_add_vlan_action_check(attr, push, pop, fwd);
629 if (err)
630 goto unlock;
631
632 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
633
634 vport = esw_vlan_action_get_vport(attr, push, pop);
635
636 if (!push && !pop && fwd) {
637 /* tracks VF --> wire rules without vlan push action */
638 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
639 vport->vlan_refcount++;
640 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
641 }
642
643 goto unlock;
644 }
645
646 if (!push && !pop)
647 goto unlock;
648
649 if (!(offloads->vlan_push_pop_refcount)) {
650 /* it's the 1st vlan rule, apply global vlan pop policy */
651 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
652 if (err)
653 goto out;
654 }
655 offloads->vlan_push_pop_refcount++;
656
657 if (push) {
658 if (vport->vlan_refcount)
659 goto skip_set_push;
660
661 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
662 SET_VLAN_INSERT | SET_VLAN_STRIP);
663 if (err)
664 goto out;
665 vport->vlan = attr->vlan_vid[0];
666 skip_set_push:
667 vport->vlan_refcount++;
668 }
669 out:
670 if (!err)
671 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
672 unlock:
673 mutex_unlock(&esw->state_lock);
674 return err;
675 }
676
677 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
678 struct mlx5_esw_flow_attr *attr)
679 {
680 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
681 struct mlx5_eswitch_rep *vport = NULL;
682 bool push, pop, fwd;
683 int err = 0;
684
685 /* nop if we're on the vlan push/pop non emulation mode */
686 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
687 return 0;
688
689 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
690 return 0;
691
692 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
693 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
694 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
695
696 mutex_lock(&esw->state_lock);
697
698 vport = esw_vlan_action_get_vport(attr, push, pop);
699
700 if (!push && !pop && fwd) {
701 /* tracks VF --> wire rules without vlan push action */
702 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
703 vport->vlan_refcount--;
704
705 goto out;
706 }
707
708 if (push) {
709 vport->vlan_refcount--;
710 if (vport->vlan_refcount)
711 goto skip_unset_push;
712
713 vport->vlan = 0;
714 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
715 0, 0, SET_VLAN_STRIP);
716 if (err)
717 goto out;
718 }
719
720 skip_unset_push:
721 offloads->vlan_push_pop_refcount--;
722 if (offloads->vlan_push_pop_refcount)
723 goto out;
724
725 /* no more vlan rules, stop global vlan pop policy */
726 err = esw_set_global_vlan_pop(esw, 0);
727
728 out:
729 mutex_unlock(&esw->state_lock);
730 return err;
731 }
732
733 struct mlx5_flow_handle *
734 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
735 u32 sqn)
736 {
737 struct mlx5_flow_act flow_act = {0};
738 struct mlx5_flow_destination dest = {};
739 struct mlx5_flow_handle *flow_rule;
740 struct mlx5_flow_spec *spec;
741 void *misc;
742
743 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
744 if (!spec) {
745 flow_rule = ERR_PTR(-ENOMEM);
746 goto out;
747 }
748
749 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
750 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
751 /* source vport is the esw manager */
752 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
753
754 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
755 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
756 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
757
758 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
759 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
760 dest.vport.num = vport;
761 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
762
763 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
764 spec, &flow_act, &dest, 1);
765 if (IS_ERR(flow_rule))
766 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
767 out:
768 kvfree(spec);
769 return flow_rule;
770 }
771 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
772
773 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
774 {
775 mlx5_del_flow_rules(rule);
776 }
777
778 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
779 {
780 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
781 MLX5_FDB_TO_VPORT_REG_C_1;
782 }
783
784 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
785 {
786 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
787 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
788 u8 curr, wanted;
789 int err;
790
791 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
792 !mlx5_eswitch_vport_match_metadata_enabled(esw))
793 return 0;
794
795 err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
796 out, sizeof(out));
797 if (err)
798 return err;
799
800 curr = MLX5_GET(query_esw_vport_context_out, out,
801 esw_vport_context.fdb_to_vport_reg_c_id);
802 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
803 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
804 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
805
806 if (enable)
807 curr |= wanted;
808 else
809 curr &= ~wanted;
810
811 MLX5_SET(modify_esw_vport_context_in, in,
812 esw_vport_context.fdb_to_vport_reg_c_id, curr);
813
814 MLX5_SET(modify_esw_vport_context_in, in,
815 field_select.fdb_to_vport_reg_c_id, 1);
816
817 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in,
818 sizeof(in));
819 if (!err) {
820 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
821 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
822 else
823 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
824 }
825
826 return err;
827 }
828
829 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
830 struct mlx5_core_dev *peer_dev,
831 struct mlx5_flow_spec *spec,
832 struct mlx5_flow_destination *dest)
833 {
834 void *misc;
835
836 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
837 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
838 misc_parameters_2);
839 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
840 mlx5_eswitch_get_vport_metadata_mask());
841
842 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
843 } else {
844 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
845 misc_parameters);
846
847 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
848 MLX5_CAP_GEN(peer_dev, vhca_id));
849
850 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
851
852 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
853 misc_parameters);
854 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
855 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
856 source_eswitch_owner_vhca_id);
857 }
858
859 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
860 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
861 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
862 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
863 }
864
865 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
866 struct mlx5_eswitch *peer_esw,
867 struct mlx5_flow_spec *spec,
868 u16 vport)
869 {
870 void *misc;
871
872 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
873 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
874 misc_parameters_2);
875 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
876 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
877 vport));
878 } else {
879 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
880 misc_parameters);
881 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
882 }
883 }
884
885 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
886 struct mlx5_core_dev *peer_dev)
887 {
888 struct mlx5_flow_destination dest = {};
889 struct mlx5_flow_act flow_act = {0};
890 struct mlx5_flow_handle **flows;
891 struct mlx5_flow_handle *flow;
892 struct mlx5_flow_spec *spec;
893 /* total vports is the same for both e-switches */
894 int nvports = esw->total_vports;
895 void *misc;
896 int err, i;
897
898 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
899 if (!spec)
900 return -ENOMEM;
901
902 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
903
904 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
905 if (!flows) {
906 err = -ENOMEM;
907 goto alloc_flows_err;
908 }
909
910 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
911 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
912 misc_parameters);
913
914 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
915 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
916 spec, MLX5_VPORT_PF);
917
918 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
919 spec, &flow_act, &dest, 1);
920 if (IS_ERR(flow)) {
921 err = PTR_ERR(flow);
922 goto add_pf_flow_err;
923 }
924 flows[MLX5_VPORT_PF] = flow;
925 }
926
927 if (mlx5_ecpf_vport_exists(esw->dev)) {
928 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
929 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
930 spec, &flow_act, &dest, 1);
931 if (IS_ERR(flow)) {
932 err = PTR_ERR(flow);
933 goto add_ecpf_flow_err;
934 }
935 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
936 }
937
938 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
939 esw_set_peer_miss_rule_source_port(esw,
940 peer_dev->priv.eswitch,
941 spec, i);
942
943 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
944 spec, &flow_act, &dest, 1);
945 if (IS_ERR(flow)) {
946 err = PTR_ERR(flow);
947 goto add_vf_flow_err;
948 }
949 flows[i] = flow;
950 }
951
952 esw->fdb_table.offloads.peer_miss_rules = flows;
953
954 kvfree(spec);
955 return 0;
956
957 add_vf_flow_err:
958 nvports = --i;
959 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
960 mlx5_del_flow_rules(flows[i]);
961
962 if (mlx5_ecpf_vport_exists(esw->dev))
963 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
964 add_ecpf_flow_err:
965 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
966 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
967 add_pf_flow_err:
968 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
969 kvfree(flows);
970 alloc_flows_err:
971 kvfree(spec);
972 return err;
973 }
974
975 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
976 {
977 struct mlx5_flow_handle **flows;
978 int i;
979
980 flows = esw->fdb_table.offloads.peer_miss_rules;
981
982 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
983 mlx5_core_max_vfs(esw->dev))
984 mlx5_del_flow_rules(flows[i]);
985
986 if (mlx5_ecpf_vport_exists(esw->dev))
987 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
988
989 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
990 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
991
992 kvfree(flows);
993 }
994
995 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
996 {
997 struct mlx5_flow_act flow_act = {0};
998 struct mlx5_flow_destination dest = {};
999 struct mlx5_flow_handle *flow_rule = NULL;
1000 struct mlx5_flow_spec *spec;
1001 void *headers_c;
1002 void *headers_v;
1003 int err = 0;
1004 u8 *dmac_c;
1005 u8 *dmac_v;
1006
1007 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1008 if (!spec) {
1009 err = -ENOMEM;
1010 goto out;
1011 }
1012
1013 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1014 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1015 outer_headers);
1016 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1017 outer_headers.dmac_47_16);
1018 dmac_c[0] = 0x01;
1019
1020 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1021 dest.vport.num = esw->manager_vport;
1022 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1023
1024 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1025 spec, &flow_act, &dest, 1);
1026 if (IS_ERR(flow_rule)) {
1027 err = PTR_ERR(flow_rule);
1028 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1029 goto out;
1030 }
1031
1032 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1033
1034 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1035 outer_headers);
1036 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1037 outer_headers.dmac_47_16);
1038 dmac_v[0] = 0x01;
1039 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1040 spec, &flow_act, &dest, 1);
1041 if (IS_ERR(flow_rule)) {
1042 err = PTR_ERR(flow_rule);
1043 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1044 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1045 goto out;
1046 }
1047
1048 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1049
1050 out:
1051 kvfree(spec);
1052 return err;
1053 }
1054
1055 struct mlx5_flow_handle *
1056 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1057 {
1058 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1059 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1060 struct mlx5_flow_context *flow_context;
1061 struct mlx5_flow_handle *flow_rule;
1062 struct mlx5_flow_destination dest;
1063 struct mlx5_flow_spec *spec;
1064 void *misc;
1065
1066 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1067 return ERR_PTR(-EOPNOTSUPP);
1068
1069 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1070 if (!spec)
1071 return ERR_PTR(-ENOMEM);
1072
1073 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1074 misc_parameters_2);
1075 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1076 ESW_CHAIN_TAG_METADATA_MASK);
1077 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1078 misc_parameters_2);
1079 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1080 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1081 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1082 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1083 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1084
1085 flow_context = &spec->flow_context;
1086 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1087 flow_context->flow_tag = tag;
1088 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1089 dest.ft = esw->offloads.ft_offloads;
1090
1091 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1092 kfree(spec);
1093
1094 if (IS_ERR(flow_rule))
1095 esw_warn(esw->dev,
1096 "Failed to create restore rule for tag: %d, err(%d)\n",
1097 tag, (int)PTR_ERR(flow_rule));
1098
1099 return flow_rule;
1100 }
1101
1102 u32
1103 esw_get_max_restore_tag(struct mlx5_eswitch *esw)
1104 {
1105 return ESW_CHAIN_TAG_METADATA_MASK;
1106 }
1107
1108 #define MAX_PF_SQ 256
1109 #define MAX_SQ_NVPORTS 32
1110
1111 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1112 u32 *flow_group_in)
1113 {
1114 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1115 flow_group_in,
1116 match_criteria);
1117
1118 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1119 MLX5_SET(create_flow_group_in, flow_group_in,
1120 match_criteria_enable,
1121 MLX5_MATCH_MISC_PARAMETERS_2);
1122
1123 MLX5_SET(fte_match_param, match_criteria,
1124 misc_parameters_2.metadata_reg_c_0,
1125 mlx5_eswitch_get_vport_metadata_mask());
1126 } else {
1127 MLX5_SET(create_flow_group_in, flow_group_in,
1128 match_criteria_enable,
1129 MLX5_MATCH_MISC_PARAMETERS);
1130
1131 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1132 misc_parameters.source_port);
1133 }
1134 }
1135
1136 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1137 {
1138 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1139 struct mlx5_flow_table_attr ft_attr = {};
1140 struct mlx5_core_dev *dev = esw->dev;
1141 struct mlx5_flow_namespace *root_ns;
1142 struct mlx5_flow_table *fdb = NULL;
1143 u32 flags = 0, *flow_group_in;
1144 int table_size, ix, err = 0;
1145 struct mlx5_flow_group *g;
1146 void *match_criteria;
1147 u8 *dmac;
1148
1149 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1150
1151 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1152 if (!flow_group_in)
1153 return -ENOMEM;
1154
1155 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1156 if (!root_ns) {
1157 esw_warn(dev, "Failed to get FDB flow namespace\n");
1158 err = -EOPNOTSUPP;
1159 goto ns_err;
1160 }
1161 esw->fdb_table.offloads.ns = root_ns;
1162 err = mlx5_flow_namespace_set_mode(root_ns,
1163 esw->dev->priv.steering->mode);
1164 if (err) {
1165 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1166 goto ns_err;
1167 }
1168
1169 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1170 MLX5_ESW_MISS_FLOWS + esw->total_vports;
1171
1172 /* create the slow path fdb with encap set, so further table instances
1173 * can be created at run time while VFs are probed if the FW allows that.
1174 */
1175 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1176 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1177 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1178
1179 ft_attr.flags = flags;
1180 ft_attr.max_fte = table_size;
1181 ft_attr.prio = FDB_SLOW_PATH;
1182
1183 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1184 if (IS_ERR(fdb)) {
1185 err = PTR_ERR(fdb);
1186 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1187 goto slow_fdb_err;
1188 }
1189 esw->fdb_table.offloads.slow_fdb = fdb;
1190
1191 err = mlx5_esw_chains_create(esw);
1192 if (err) {
1193 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1194 goto fdb_chains_err;
1195 }
1196
1197 /* create send-to-vport group */
1198 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1199 MLX5_MATCH_MISC_PARAMETERS);
1200
1201 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1202
1203 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1204 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1205
1206 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
1207 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1208 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1209
1210 g = mlx5_create_flow_group(fdb, flow_group_in);
1211 if (IS_ERR(g)) {
1212 err = PTR_ERR(g);
1213 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1214 goto send_vport_err;
1215 }
1216 esw->fdb_table.offloads.send_to_vport_grp = g;
1217
1218 /* create peer esw miss group */
1219 memset(flow_group_in, 0, inlen);
1220
1221 esw_set_flow_group_source_port(esw, flow_group_in);
1222
1223 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1224 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1225 flow_group_in,
1226 match_criteria);
1227
1228 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1229 misc_parameters.source_eswitch_owner_vhca_id);
1230
1231 MLX5_SET(create_flow_group_in, flow_group_in,
1232 source_eswitch_owner_vhca_id_valid, 1);
1233 }
1234
1235 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1236 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1237 ix + esw->total_vports - 1);
1238 ix += esw->total_vports;
1239
1240 g = mlx5_create_flow_group(fdb, flow_group_in);
1241 if (IS_ERR(g)) {
1242 err = PTR_ERR(g);
1243 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1244 goto peer_miss_err;
1245 }
1246 esw->fdb_table.offloads.peer_miss_grp = g;
1247
1248 /* create miss group */
1249 memset(flow_group_in, 0, inlen);
1250 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1251 MLX5_MATCH_OUTER_HEADERS);
1252 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1253 match_criteria);
1254 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1255 outer_headers.dmac_47_16);
1256 dmac[0] = 0x01;
1257
1258 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1259 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1260 ix + MLX5_ESW_MISS_FLOWS);
1261
1262 g = mlx5_create_flow_group(fdb, flow_group_in);
1263 if (IS_ERR(g)) {
1264 err = PTR_ERR(g);
1265 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1266 goto miss_err;
1267 }
1268 esw->fdb_table.offloads.miss_grp = g;
1269
1270 err = esw_add_fdb_miss_rule(esw);
1271 if (err)
1272 goto miss_rule_err;
1273
1274 esw->nvports = nvports;
1275 kvfree(flow_group_in);
1276 return 0;
1277
1278 miss_rule_err:
1279 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1280 miss_err:
1281 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1282 peer_miss_err:
1283 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1284 send_vport_err:
1285 mlx5_esw_chains_destroy(esw);
1286 fdb_chains_err:
1287 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1288 slow_fdb_err:
1289 /* Holds true only as long as DMFS is the default */
1290 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1291 ns_err:
1292 kvfree(flow_group_in);
1293 return err;
1294 }
1295
1296 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1297 {
1298 if (!esw->fdb_table.offloads.slow_fdb)
1299 return;
1300
1301 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1302 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1303 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1304 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1305 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1306 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1307
1308 mlx5_esw_chains_destroy(esw);
1309 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1310 /* Holds true only as long as DMFS is the default */
1311 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1312 MLX5_FLOW_STEERING_MODE_DMFS);
1313 }
1314
1315 static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
1316 {
1317 struct mlx5_flow_table_attr ft_attr = {};
1318 struct mlx5_core_dev *dev = esw->dev;
1319 struct mlx5_flow_table *ft_offloads;
1320 struct mlx5_flow_namespace *ns;
1321 int err = 0;
1322
1323 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1324 if (!ns) {
1325 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1326 return -EOPNOTSUPP;
1327 }
1328
1329 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
1330 ft_attr.prio = 1;
1331
1332 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1333 if (IS_ERR(ft_offloads)) {
1334 err = PTR_ERR(ft_offloads);
1335 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1336 return err;
1337 }
1338
1339 esw->offloads.ft_offloads = ft_offloads;
1340 return 0;
1341 }
1342
1343 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1344 {
1345 struct mlx5_esw_offload *offloads = &esw->offloads;
1346
1347 mlx5_destroy_flow_table(offloads->ft_offloads);
1348 }
1349
1350 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
1351 {
1352 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1353 struct mlx5_flow_group *g;
1354 u32 *flow_group_in;
1355 int err = 0;
1356
1357 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1358 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1359 if (!flow_group_in)
1360 return -ENOMEM;
1361
1362 /* create vport rx group */
1363 esw_set_flow_group_source_port(esw, flow_group_in);
1364
1365 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1366 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1367
1368 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1369
1370 if (IS_ERR(g)) {
1371 err = PTR_ERR(g);
1372 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1373 goto out;
1374 }
1375
1376 esw->offloads.vport_rx_group = g;
1377 out:
1378 kvfree(flow_group_in);
1379 return err;
1380 }
1381
1382 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1383 {
1384 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1385 }
1386
1387 struct mlx5_flow_handle *
1388 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1389 struct mlx5_flow_destination *dest)
1390 {
1391 struct mlx5_flow_act flow_act = {0};
1392 struct mlx5_flow_handle *flow_rule;
1393 struct mlx5_flow_spec *spec;
1394 void *misc;
1395
1396 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1397 if (!spec) {
1398 flow_rule = ERR_PTR(-ENOMEM);
1399 goto out;
1400 }
1401
1402 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1403 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1404 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1405 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1406
1407 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1408 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1409 mlx5_eswitch_get_vport_metadata_mask());
1410
1411 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1412 } else {
1413 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1414 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1415
1416 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1417 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1418
1419 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1420 }
1421
1422 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1423 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1424 &flow_act, dest, 1);
1425 if (IS_ERR(flow_rule)) {
1426 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1427 goto out;
1428 }
1429
1430 out:
1431 kvfree(spec);
1432 return flow_rule;
1433 }
1434
1435
1436 static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
1437 {
1438 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1439 struct mlx5_core_dev *dev = esw->dev;
1440 int vport;
1441
1442 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1443 return -EOPNOTSUPP;
1444
1445 if (esw->mode == MLX5_ESWITCH_NONE)
1446 return -EOPNOTSUPP;
1447
1448 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1449 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1450 mlx5_mode = MLX5_INLINE_MODE_NONE;
1451 goto out;
1452 case MLX5_CAP_INLINE_MODE_L2:
1453 mlx5_mode = MLX5_INLINE_MODE_L2;
1454 goto out;
1455 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1456 goto query_vports;
1457 }
1458
1459 query_vports:
1460 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1461 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
1462 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1463 if (prev_mlx5_mode != mlx5_mode)
1464 return -EINVAL;
1465 prev_mlx5_mode = mlx5_mode;
1466 }
1467
1468 out:
1469 *mode = mlx5_mode;
1470 return 0;
1471 }
1472
1473 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1474 {
1475 struct mlx5_esw_offload *offloads = &esw->offloads;
1476
1477 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1478 return;
1479
1480 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
1481 mlx5_destroy_flow_group(offloads->restore_group);
1482 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1483 }
1484
1485 static int esw_create_restore_table(struct mlx5_eswitch *esw)
1486 {
1487 u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
1488 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1489 struct mlx5_flow_table_attr ft_attr = {};
1490 struct mlx5_core_dev *dev = esw->dev;
1491 struct mlx5_flow_namespace *ns;
1492 struct mlx5_modify_hdr *mod_hdr;
1493 void *match_criteria, *misc;
1494 struct mlx5_flow_table *ft;
1495 struct mlx5_flow_group *g;
1496 u32 *flow_group_in;
1497 int err = 0;
1498
1499 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1500 return 0;
1501
1502 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1503 if (!ns) {
1504 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1505 return -EOPNOTSUPP;
1506 }
1507
1508 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1509 if (!flow_group_in) {
1510 err = -ENOMEM;
1511 goto out_free;
1512 }
1513
1514 ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
1515 ft = mlx5_create_flow_table(ns, &ft_attr);
1516 if (IS_ERR(ft)) {
1517 err = PTR_ERR(ft);
1518 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
1519 err);
1520 goto out_free;
1521 }
1522
1523 memset(flow_group_in, 0, inlen);
1524 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1525 match_criteria);
1526 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
1527 misc_parameters_2);
1528
1529 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1530 ESW_CHAIN_TAG_METADATA_MASK);
1531 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1532 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1533 ft_attr.max_fte - 1);
1534 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1535 MLX5_MATCH_MISC_PARAMETERS_2);
1536 g = mlx5_create_flow_group(ft, flow_group_in);
1537 if (IS_ERR(g)) {
1538 err = PTR_ERR(g);
1539 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
1540 err);
1541 goto err_group;
1542 }
1543
1544 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
1545 MLX5_SET(copy_action_in, modact, src_field,
1546 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1547 MLX5_SET(copy_action_in, modact, dst_field,
1548 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1549 mod_hdr = mlx5_modify_header_alloc(esw->dev,
1550 MLX5_FLOW_NAMESPACE_KERNEL, 1,
1551 modact);
1552 if (IS_ERR(mod_hdr)) {
1553 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
1554 err);
1555 err = PTR_ERR(mod_hdr);
1556 goto err_mod_hdr;
1557 }
1558
1559 esw->offloads.ft_offloads_restore = ft;
1560 esw->offloads.restore_group = g;
1561 esw->offloads.restore_copy_hdr_id = mod_hdr;
1562
1563 kvfree(flow_group_in);
1564
1565 return 0;
1566
1567 err_mod_hdr:
1568 mlx5_destroy_flow_group(g);
1569 err_group:
1570 mlx5_destroy_flow_table(ft);
1571 out_free:
1572 kvfree(flow_group_in);
1573
1574 return err;
1575 }
1576
1577 static int esw_offloads_start(struct mlx5_eswitch *esw,
1578 struct netlink_ext_ack *extack)
1579 {
1580 int err, err1;
1581
1582 if (esw->mode != MLX5_ESWITCH_LEGACY &&
1583 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1584 NL_SET_ERR_MSG_MOD(extack,
1585 "Can't set offloads mode, SRIOV legacy not enabled");
1586 return -EINVAL;
1587 }
1588
1589 mlx5_eswitch_disable_locked(esw, false);
1590 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
1591 esw->dev->priv.sriov.num_vfs);
1592 if (err) {
1593 NL_SET_ERR_MSG_MOD(extack,
1594 "Failed setting eswitch to offloads");
1595 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
1596 MLX5_ESWITCH_IGNORE_NUM_VFS);
1597 if (err1) {
1598 NL_SET_ERR_MSG_MOD(extack,
1599 "Failed setting eswitch back to legacy");
1600 }
1601 }
1602 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1603 if (mlx5_eswitch_inline_mode_get(esw,
1604 &esw->offloads.inline_mode)) {
1605 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1606 NL_SET_ERR_MSG_MOD(extack,
1607 "Inline mode is different between vports");
1608 }
1609 }
1610 return err;
1611 }
1612
1613 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1614 {
1615 kfree(esw->offloads.vport_reps);
1616 }
1617
1618 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1619 {
1620 int total_vports = esw->total_vports;
1621 struct mlx5_eswitch_rep *rep;
1622 int vport_index;
1623 u8 rep_type;
1624
1625 esw->offloads.vport_reps = kcalloc(total_vports,
1626 sizeof(struct mlx5_eswitch_rep),
1627 GFP_KERNEL);
1628 if (!esw->offloads.vport_reps)
1629 return -ENOMEM;
1630
1631 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1632 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
1633 rep->vport_index = vport_index;
1634
1635 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1636 atomic_set(&rep->rep_data[rep_type].state,
1637 REP_UNREGISTERED);
1638 }
1639
1640 return 0;
1641 }
1642
1643 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1644 struct mlx5_eswitch_rep *rep, u8 rep_type)
1645 {
1646 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1647 REP_LOADED, REP_REGISTERED) == REP_LOADED)
1648 esw->offloads.rep_ops[rep_type]->unload(rep);
1649 }
1650
1651 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1652 {
1653 struct mlx5_eswitch_rep *rep;
1654 int i;
1655
1656 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
1657 __esw_offloads_unload_rep(esw, rep, rep_type);
1658
1659 if (mlx5_ecpf_vport_exists(esw->dev)) {
1660 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1661 __esw_offloads_unload_rep(esw, rep, rep_type);
1662 }
1663
1664 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1665 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1666 __esw_offloads_unload_rep(esw, rep, rep_type);
1667 }
1668
1669 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1670 __esw_offloads_unload_rep(esw, rep, rep_type);
1671 }
1672
1673 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
1674 {
1675 struct mlx5_eswitch_rep *rep;
1676 int rep_type;
1677 int err;
1678
1679 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1680 return 0;
1681
1682 rep = mlx5_eswitch_get_rep(esw, vport_num);
1683 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1684 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1685 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1686 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1687 if (err)
1688 goto err_reps;
1689 }
1690
1691 return 0;
1692
1693 err_reps:
1694 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
1695 for (--rep_type; rep_type >= 0; rep_type--)
1696 __esw_offloads_unload_rep(esw, rep, rep_type);
1697 return err;
1698 }
1699
1700 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
1701 {
1702 struct mlx5_eswitch_rep *rep;
1703 int rep_type;
1704
1705 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1706 return;
1707
1708 rep = mlx5_eswitch_get_rep(esw, vport_num);
1709 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
1710 __esw_offloads_unload_rep(esw, rep, rep_type);
1711 }
1712
1713 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1714 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1715
1716 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1717 struct mlx5_eswitch *peer_esw)
1718 {
1719 int err;
1720
1721 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1722 if (err)
1723 return err;
1724
1725 return 0;
1726 }
1727
1728 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1729 {
1730 mlx5e_tc_clean_fdb_peer_flows(esw);
1731 esw_del_fdb_peer_miss_rules(esw);
1732 }
1733
1734 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1735 struct mlx5_eswitch *peer_esw,
1736 bool pair)
1737 {
1738 struct mlx5_flow_root_namespace *peer_ns;
1739 struct mlx5_flow_root_namespace *ns;
1740 int err;
1741
1742 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1743 ns = esw->dev->priv.steering->fdb_root_ns;
1744
1745 if (pair) {
1746 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1747 if (err)
1748 return err;
1749
1750 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
1751 if (err) {
1752 mlx5_flow_namespace_set_peer(ns, NULL);
1753 return err;
1754 }
1755 } else {
1756 mlx5_flow_namespace_set_peer(ns, NULL);
1757 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1758 }
1759
1760 return 0;
1761 }
1762
1763 static int mlx5_esw_offloads_devcom_event(int event,
1764 void *my_data,
1765 void *event_data)
1766 {
1767 struct mlx5_eswitch *esw = my_data;
1768 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1769 struct mlx5_eswitch *peer_esw = event_data;
1770 int err;
1771
1772 switch (event) {
1773 case ESW_OFFLOADS_DEVCOM_PAIR:
1774 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1775 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1776 break;
1777
1778 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
1779 if (err)
1780 goto err_out;
1781 err = mlx5_esw_offloads_pair(esw, peer_esw);
1782 if (err)
1783 goto err_peer;
1784
1785 err = mlx5_esw_offloads_pair(peer_esw, esw);
1786 if (err)
1787 goto err_pair;
1788
1789 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1790 break;
1791
1792 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1793 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1794 break;
1795
1796 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1797 mlx5_esw_offloads_unpair(peer_esw);
1798 mlx5_esw_offloads_unpair(esw);
1799 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
1800 break;
1801 }
1802
1803 return 0;
1804
1805 err_pair:
1806 mlx5_esw_offloads_unpair(esw);
1807 err_peer:
1808 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
1809 err_out:
1810 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1811 event, err);
1812 return err;
1813 }
1814
1815 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1816 {
1817 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1818
1819 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1820 mutex_init(&esw->offloads.peer_mutex);
1821
1822 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1823 return;
1824
1825 mlx5_devcom_register_component(devcom,
1826 MLX5_DEVCOM_ESW_OFFLOADS,
1827 mlx5_esw_offloads_devcom_event,
1828 esw);
1829
1830 mlx5_devcom_send_event(devcom,
1831 MLX5_DEVCOM_ESW_OFFLOADS,
1832 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1833 }
1834
1835 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1836 {
1837 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1838
1839 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1840 return;
1841
1842 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1843 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1844
1845 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1846 }
1847
1848 static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1849 struct mlx5_vport *vport)
1850 {
1851 struct mlx5_flow_act flow_act = {0};
1852 struct mlx5_flow_spec *spec;
1853 int err = 0;
1854
1855 /* For prio tag mode, there is only 1 FTEs:
1856 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1857 * required, allow
1858 * Unmatched traffic is allowed by default
1859 */
1860 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1861 if (!spec)
1862 return -ENOMEM;
1863
1864 /* Untagged packets - push prio tag VLAN, allow */
1865 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1866 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1867 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1868 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1869 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1870 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1871 flow_act.vlan[0].vid = 0;
1872 flow_act.vlan[0].prio = 0;
1873
1874 if (vport->ingress.offloads.modify_metadata_rule) {
1875 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1876 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1877 }
1878
1879 vport->ingress.allow_rule =
1880 mlx5_add_flow_rules(vport->ingress.acl, spec,
1881 &flow_act, NULL, 0);
1882 if (IS_ERR(vport->ingress.allow_rule)) {
1883 err = PTR_ERR(vport->ingress.allow_rule);
1884 esw_warn(esw->dev,
1885 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1886 vport->vport, err);
1887 vport->ingress.allow_rule = NULL;
1888 }
1889
1890 kvfree(spec);
1891 return err;
1892 }
1893
1894 static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1895 struct mlx5_vport *vport)
1896 {
1897 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
1898 struct mlx5_flow_act flow_act = {};
1899 int err = 0;
1900 u32 key;
1901
1902 key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
1903 key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
1904
1905 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1906 MLX5_SET(set_action_in, action, field,
1907 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1908 MLX5_SET(set_action_in, action, data, key);
1909 MLX5_SET(set_action_in, action, offset,
1910 ESW_SOURCE_PORT_METADATA_OFFSET);
1911 MLX5_SET(set_action_in, action, length,
1912 ESW_SOURCE_PORT_METADATA_BITS);
1913
1914 vport->ingress.offloads.modify_metadata =
1915 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1916 1, action);
1917 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
1918 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
1919 esw_warn(esw->dev,
1920 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1921 vport->vport, err);
1922 return err;
1923 }
1924
1925 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1926 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1927 vport->ingress.offloads.modify_metadata_rule =
1928 mlx5_add_flow_rules(vport->ingress.acl,
1929 NULL, &flow_act, NULL, 0);
1930 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
1931 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
1932 esw_warn(esw->dev,
1933 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1934 vport->vport, err);
1935 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
1936 vport->ingress.offloads.modify_metadata_rule = NULL;
1937 }
1938 return err;
1939 }
1940
1941 static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1942 struct mlx5_vport *vport)
1943 {
1944 if (vport->ingress.offloads.modify_metadata_rule) {
1945 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
1946 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
1947
1948 vport->ingress.offloads.modify_metadata_rule = NULL;
1949 }
1950 }
1951
1952 static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
1953 struct mlx5_vport *vport)
1954 {
1955 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1956 struct mlx5_flow_group *g;
1957 void *match_criteria;
1958 u32 *flow_group_in;
1959 u32 flow_index = 0;
1960 int ret = 0;
1961
1962 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1963 if (!flow_group_in)
1964 return -ENOMEM;
1965
1966 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
1967 /* This group is to hold FTE to match untagged packets when prio_tag
1968 * is enabled.
1969 */
1970 memset(flow_group_in, 0, inlen);
1971
1972 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1973 flow_group_in, match_criteria);
1974 MLX5_SET(create_flow_group_in, flow_group_in,
1975 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1976 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1977 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1978 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1979
1980 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1981 if (IS_ERR(g)) {
1982 ret = PTR_ERR(g);
1983 esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
1984 vport->vport, ret);
1985 goto prio_tag_err;
1986 }
1987 vport->ingress.offloads.metadata_prio_tag_grp = g;
1988 flow_index++;
1989 }
1990
1991 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1992 /* This group holds an FTE with no matches for add metadata for
1993 * tagged packets, if prio-tag is enabled (as a fallthrough),
1994 * or all traffic in case prio-tag is disabled.
1995 */
1996 memset(flow_group_in, 0, inlen);
1997 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1998 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1999
2000 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
2001 if (IS_ERR(g)) {
2002 ret = PTR_ERR(g);
2003 esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
2004 vport->vport, ret);
2005 goto metadata_err;
2006 }
2007 vport->ingress.offloads.metadata_allmatch_grp = g;
2008 }
2009
2010 kvfree(flow_group_in);
2011 return 0;
2012
2013 metadata_err:
2014 if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
2015 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
2016 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
2017 }
2018 prio_tag_err:
2019 kvfree(flow_group_in);
2020 return ret;
2021 }
2022
2023 static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
2024 {
2025 if (vport->ingress.offloads.metadata_allmatch_grp) {
2026 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
2027 vport->ingress.offloads.metadata_allmatch_grp = NULL;
2028 }
2029
2030 if (vport->ingress.offloads.metadata_prio_tag_grp) {
2031 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
2032 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
2033 }
2034 }
2035
2036 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
2037 struct mlx5_vport *vport)
2038 {
2039 int num_ftes = 0;
2040 int err;
2041
2042 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
2043 !esw_check_ingress_prio_tag_enabled(esw, vport))
2044 return 0;
2045
2046 esw_vport_cleanup_ingress_rules(esw, vport);
2047
2048 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2049 num_ftes++;
2050 if (esw_check_ingress_prio_tag_enabled(esw, vport))
2051 num_ftes++;
2052
2053 err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
2054 if (err) {
2055 esw_warn(esw->dev,
2056 "failed to enable ingress acl (%d) on vport[%d]\n",
2057 err, vport->vport);
2058 return err;
2059 }
2060
2061 err = esw_vport_create_ingress_acl_group(esw, vport);
2062 if (err)
2063 goto group_err;
2064
2065 esw_debug(esw->dev,
2066 "vport[%d] configure ingress rules\n", vport->vport);
2067
2068 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2069 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
2070 if (err)
2071 goto metadata_err;
2072 }
2073
2074 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
2075 err = esw_vport_ingress_prio_tag_config(esw, vport);
2076 if (err)
2077 goto prio_tag_err;
2078 }
2079 return 0;
2080
2081 prio_tag_err:
2082 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2083 metadata_err:
2084 esw_vport_destroy_ingress_acl_group(vport);
2085 group_err:
2086 esw_vport_destroy_ingress_acl_table(vport);
2087 return err;
2088 }
2089
2090 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
2091 struct mlx5_vport *vport)
2092 {
2093 int err;
2094
2095 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
2096 return 0;
2097
2098 esw_vport_cleanup_egress_rules(esw, vport);
2099
2100 err = esw_vport_enable_egress_acl(esw, vport);
2101 if (err)
2102 return err;
2103
2104 /* For prio tag mode, there is only 1 FTEs:
2105 * 1) prio tag packets - pop the prio tag VLAN, allow
2106 * Unmatched traffic is allowed by default
2107 */
2108 esw_debug(esw->dev,
2109 "vport[%d] configure prio tag egress rules\n", vport->vport);
2110
2111 /* prio tag vlan rule - pop it so VF receives untagged packets */
2112 err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
2113 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
2114 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
2115 if (err)
2116 esw_vport_disable_egress_acl(esw, vport);
2117
2118 return err;
2119 }
2120
2121 static bool
2122 esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2123 {
2124 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2125 return false;
2126
2127 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2128 MLX5_FDB_TO_VPORT_REG_C_0))
2129 return false;
2130
2131 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2132 return false;
2133
2134 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2135 mlx5_ecpf_vport_exists(esw->dev))
2136 return false;
2137
2138 return true;
2139 }
2140
2141 static bool
2142 esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
2143 {
2144 return mlx5_core_mp_enabled(esw->dev);
2145 }
2146
2147 static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
2148 {
2149 return esw_check_vport_match_metadata_mandatory(esw) &&
2150 esw_check_vport_match_metadata_supported(esw);
2151 }
2152
2153 int
2154 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2155 struct mlx5_vport *vport)
2156 {
2157 int err;
2158
2159 err = esw_vport_ingress_config(esw, vport);
2160 if (err)
2161 return err;
2162
2163 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
2164 err = esw_vport_egress_config(esw, vport);
2165 if (err) {
2166 esw_vport_cleanup_ingress_rules(esw, vport);
2167 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2168 esw_vport_destroy_ingress_acl_group(vport);
2169 esw_vport_destroy_ingress_acl_table(vport);
2170 }
2171 }
2172 return err;
2173 }
2174
2175 void
2176 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2177 struct mlx5_vport *vport)
2178 {
2179 esw_vport_disable_egress_acl(esw, vport);
2180 esw_vport_cleanup_ingress_rules(esw, vport);
2181 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2182 esw_vport_destroy_ingress_acl_group(vport);
2183 esw_vport_destroy_ingress_acl_table(vport);
2184 }
2185
2186 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2187 {
2188 struct mlx5_vport *vport;
2189 int err;
2190
2191 if (esw_use_vport_metadata(esw))
2192 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2193
2194 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2195 err = esw_vport_create_offloads_acl_tables(esw, vport);
2196 if (err)
2197 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2198 return err;
2199 }
2200
2201 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2202 {
2203 struct mlx5_vport *vport;
2204
2205 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2206 esw_vport_destroy_offloads_acl_tables(esw, vport);
2207 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2208 }
2209
2210 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
2211 {
2212 int num_vfs = esw->esw_funcs.num_vfs;
2213 int total_vports;
2214 int err;
2215
2216 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2217 total_vports = esw->total_vports;
2218 else
2219 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2220
2221 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
2222
2223 err = esw_create_uplink_offloads_acl_tables(esw);
2224 if (err)
2225 return err;
2226
2227 err = esw_create_offloads_table(esw, total_vports);
2228 if (err)
2229 goto create_offloads_err;
2230
2231 err = esw_create_restore_table(esw);
2232 if (err)
2233 goto create_restore_err;
2234
2235 err = esw_create_offloads_fdb_tables(esw, total_vports);
2236 if (err)
2237 goto create_fdb_err;
2238
2239 err = esw_create_vport_rx_group(esw, total_vports);
2240 if (err)
2241 goto create_fg_err;
2242
2243 mutex_init(&esw->fdb_table.offloads.vports.lock);
2244 hash_init(esw->fdb_table.offloads.vports.table);
2245
2246 return 0;
2247
2248 create_fg_err:
2249 esw_destroy_offloads_fdb_tables(esw);
2250 create_fdb_err:
2251 esw_destroy_restore_table(esw);
2252 create_restore_err:
2253 esw_destroy_offloads_table(esw);
2254 create_offloads_err:
2255 esw_destroy_uplink_offloads_acl_tables(esw);
2256
2257 return err;
2258 }
2259
2260 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2261 {
2262 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
2263 esw_destroy_vport_rx_group(esw);
2264 esw_destroy_offloads_fdb_tables(esw);
2265 esw_destroy_restore_table(esw);
2266 esw_destroy_offloads_table(esw);
2267 esw_destroy_uplink_offloads_acl_tables(esw);
2268 }
2269
2270 static void
2271 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
2272 {
2273 bool host_pf_disabled;
2274 u16 new_num_vfs;
2275
2276 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2277 host_params_context.host_num_of_vfs);
2278 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2279 host_params_context.host_pf_disabled);
2280
2281 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2282 return;
2283
2284 /* Number of VFs can only change from "0 to x" or "x to 0". */
2285 if (esw->esw_funcs.num_vfs > 0) {
2286 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
2287 } else {
2288 int err;
2289
2290 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2291 MLX5_VPORT_UC_ADDR_CHANGE);
2292 if (err)
2293 return;
2294 }
2295 esw->esw_funcs.num_vfs = new_num_vfs;
2296 }
2297
2298 static void esw_functions_changed_event_handler(struct work_struct *work)
2299 {
2300 struct mlx5_host_work *host_work;
2301 struct mlx5_eswitch *esw;
2302 const u32 *out;
2303
2304 host_work = container_of(work, struct mlx5_host_work, work);
2305 esw = host_work->esw;
2306
2307 out = mlx5_esw_query_functions(esw->dev);
2308 if (IS_ERR(out))
2309 goto out;
2310
2311 esw_vfs_changed_event_handler(esw, out);
2312 kvfree(out);
2313 out:
2314 kfree(host_work);
2315 }
2316
2317 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
2318 {
2319 struct mlx5_esw_functions *esw_funcs;
2320 struct mlx5_host_work *host_work;
2321 struct mlx5_eswitch *esw;
2322
2323 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2324 if (!host_work)
2325 return NOTIFY_DONE;
2326
2327 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2328 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
2329
2330 host_work->esw = esw;
2331
2332 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
2333 queue_work(esw->work_queue, &host_work->work);
2334
2335 return NOTIFY_OK;
2336 }
2337
2338 int esw_offloads_enable(struct mlx5_eswitch *esw)
2339 {
2340 struct mlx5_vport *vport;
2341 int err, i;
2342
2343 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2344 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2345 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2346 else
2347 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2348
2349 mutex_init(&esw->offloads.termtbl_mutex);
2350 mlx5_rdma_enable_roce(esw->dev);
2351
2352 err = esw_set_passing_vport_metadata(esw, true);
2353 if (err)
2354 goto err_vport_metadata;
2355
2356 err = esw_offloads_steering_init(esw);
2357 if (err)
2358 goto err_steering_init;
2359
2360 /* Representor will control the vport link state */
2361 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2362 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2363
2364 /* Uplink vport rep must load first. */
2365 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
2366 if (err)
2367 goto err_uplink;
2368
2369 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
2370 if (err)
2371 goto err_vports;
2372
2373 esw_offloads_devcom_init(esw);
2374
2375 return 0;
2376
2377 err_vports:
2378 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2379 err_uplink:
2380 esw_set_passing_vport_metadata(esw, false);
2381 err_steering_init:
2382 esw_offloads_steering_cleanup(esw);
2383 err_vport_metadata:
2384 mlx5_rdma_disable_roce(esw->dev);
2385 mutex_destroy(&esw->offloads.termtbl_mutex);
2386 return err;
2387 }
2388
2389 static int esw_offloads_stop(struct mlx5_eswitch *esw,
2390 struct netlink_ext_ack *extack)
2391 {
2392 int err, err1;
2393
2394 mlx5_eswitch_disable_locked(esw, false);
2395 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2396 MLX5_ESWITCH_IGNORE_NUM_VFS);
2397 if (err) {
2398 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
2399 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2400 MLX5_ESWITCH_IGNORE_NUM_VFS);
2401 if (err1) {
2402 NL_SET_ERR_MSG_MOD(extack,
2403 "Failed setting eswitch back to offloads");
2404 }
2405 }
2406
2407 return err;
2408 }
2409
2410 void esw_offloads_disable(struct mlx5_eswitch *esw)
2411 {
2412 esw_offloads_devcom_cleanup(esw);
2413 mlx5_eswitch_disable_pf_vf_vports(esw);
2414 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2415 esw_set_passing_vport_metadata(esw, false);
2416 esw_offloads_steering_cleanup(esw);
2417 mlx5_rdma_disable_roce(esw->dev);
2418 mutex_destroy(&esw->offloads.termtbl_mutex);
2419 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2420 }
2421
2422 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
2423 {
2424 switch (mode) {
2425 case DEVLINK_ESWITCH_MODE_LEGACY:
2426 *mlx5_mode = MLX5_ESWITCH_LEGACY;
2427 break;
2428 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
2429 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
2430 break;
2431 default:
2432 return -EINVAL;
2433 }
2434
2435 return 0;
2436 }
2437
2438 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2439 {
2440 switch (mlx5_mode) {
2441 case MLX5_ESWITCH_LEGACY:
2442 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2443 break;
2444 case MLX5_ESWITCH_OFFLOADS:
2445 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2446 break;
2447 default:
2448 return -EINVAL;
2449 }
2450
2451 return 0;
2452 }
2453
2454 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2455 {
2456 switch (mode) {
2457 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2458 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2459 break;
2460 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2461 *mlx5_mode = MLX5_INLINE_MODE_L2;
2462 break;
2463 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2464 *mlx5_mode = MLX5_INLINE_MODE_IP;
2465 break;
2466 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2467 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2468 break;
2469 default:
2470 return -EINVAL;
2471 }
2472
2473 return 0;
2474 }
2475
2476 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2477 {
2478 switch (mlx5_mode) {
2479 case MLX5_INLINE_MODE_NONE:
2480 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2481 break;
2482 case MLX5_INLINE_MODE_L2:
2483 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2484 break;
2485 case MLX5_INLINE_MODE_IP:
2486 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2487 break;
2488 case MLX5_INLINE_MODE_TCP_UDP:
2489 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2490 break;
2491 default:
2492 return -EINVAL;
2493 }
2494
2495 return 0;
2496 }
2497
2498 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
2499 {
2500 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2501 return -EOPNOTSUPP;
2502
2503 if(!MLX5_ESWITCH_MANAGER(dev))
2504 return -EPERM;
2505
2506 return 0;
2507 }
2508
2509 static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
2510 {
2511 /* devlink commands in NONE eswitch mode are currently supported only
2512 * on ECPF.
2513 */
2514 return (esw->mode == MLX5_ESWITCH_NONE &&
2515 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
2516 }
2517
2518 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2519 struct netlink_ext_ack *extack)
2520 {
2521 struct mlx5_core_dev *dev = devlink_priv(devlink);
2522 struct mlx5_eswitch *esw = dev->priv.eswitch;
2523 u16 cur_mlx5_mode, mlx5_mode = 0;
2524 int err;
2525
2526 err = mlx5_eswitch_check(dev);
2527 if (err)
2528 return err;
2529
2530 if (esw_mode_from_devlink(mode, &mlx5_mode))
2531 return -EINVAL;
2532
2533 mutex_lock(&esw->mode_lock);
2534 err = eswitch_devlink_esw_mode_check(esw);
2535 if (err)
2536 goto unlock;
2537
2538 cur_mlx5_mode = esw->mode;
2539
2540 if (cur_mlx5_mode == mlx5_mode)
2541 goto unlock;
2542
2543 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
2544 err = esw_offloads_start(esw, extack);
2545 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
2546 err = esw_offloads_stop(esw, extack);
2547 else
2548 err = -EINVAL;
2549
2550 unlock:
2551 mutex_unlock(&esw->mode_lock);
2552 return err;
2553 }
2554
2555 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2556 {
2557 struct mlx5_core_dev *dev = devlink_priv(devlink);
2558 struct mlx5_eswitch *esw = dev->priv.eswitch;
2559 int err;
2560
2561 err = mlx5_eswitch_check(dev);
2562 if (err)
2563 return err;
2564
2565 mutex_lock(&esw->mode_lock);
2566 err = eswitch_devlink_esw_mode_check(dev->priv.eswitch);
2567 if (err)
2568 goto unlock;
2569
2570 err = esw_mode_to_devlink(esw->mode, mode);
2571 unlock:
2572 mutex_unlock(&esw->mode_lock);
2573 return err;
2574 }
2575
2576 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2577 struct netlink_ext_ack *extack)
2578 {
2579 struct mlx5_core_dev *dev = devlink_priv(devlink);
2580 struct mlx5_eswitch *esw = dev->priv.eswitch;
2581 int err, vport, num_vport;
2582 u8 mlx5_mode;
2583
2584 err = mlx5_eswitch_check(dev);
2585 if (err)
2586 return err;
2587
2588 mutex_lock(&esw->mode_lock);
2589 err = eswitch_devlink_esw_mode_check(esw);
2590 if (err)
2591 goto out;
2592
2593 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2594 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2595 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2596 goto out;
2597 /* fall through */
2598 case MLX5_CAP_INLINE_MODE_L2:
2599 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
2600 err = -EOPNOTSUPP;
2601 goto out;
2602 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2603 break;
2604 }
2605
2606 if (atomic64_read(&esw->offloads.num_flows) > 0) {
2607 NL_SET_ERR_MSG_MOD(extack,
2608 "Can't set inline mode when flows are configured");
2609 err = -EOPNOTSUPP;
2610 goto out;
2611 }
2612
2613 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2614 if (err)
2615 goto out;
2616
2617 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
2618 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2619 if (err) {
2620 NL_SET_ERR_MSG_MOD(extack,
2621 "Failed to set min inline on vport");
2622 goto revert_inline_mode;
2623 }
2624 }
2625
2626 esw->offloads.inline_mode = mlx5_mode;
2627 mutex_unlock(&esw->mode_lock);
2628 return 0;
2629
2630 revert_inline_mode:
2631 num_vport = --vport;
2632 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
2633 mlx5_modify_nic_vport_min_inline(dev,
2634 vport,
2635 esw->offloads.inline_mode);
2636 out:
2637 mutex_unlock(&esw->mode_lock);
2638 return err;
2639 }
2640
2641 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2642 {
2643 struct mlx5_core_dev *dev = devlink_priv(devlink);
2644 struct mlx5_eswitch *esw = dev->priv.eswitch;
2645 int err;
2646
2647 err = mlx5_eswitch_check(dev);
2648 if (err)
2649 return err;
2650
2651 mutex_lock(&esw->mode_lock);
2652 err = eswitch_devlink_esw_mode_check(esw);
2653 if (err)
2654 goto unlock;
2655
2656 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2657 unlock:
2658 mutex_unlock(&esw->mode_lock);
2659 return err;
2660 }
2661
2662 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2663 enum devlink_eswitch_encap_mode encap,
2664 struct netlink_ext_ack *extack)
2665 {
2666 struct mlx5_core_dev *dev = devlink_priv(devlink);
2667 struct mlx5_eswitch *esw = dev->priv.eswitch;
2668 int err;
2669
2670 err = mlx5_eswitch_check(dev);
2671 if (err)
2672 return err;
2673
2674 mutex_lock(&esw->mode_lock);
2675 err = eswitch_devlink_esw_mode_check(esw);
2676 if (err)
2677 goto unlock;
2678
2679 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
2680 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
2681 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
2682 err = -EOPNOTSUPP;
2683 goto unlock;
2684 }
2685
2686 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
2687 err = -EOPNOTSUPP;
2688 goto unlock;
2689 }
2690
2691 if (esw->mode == MLX5_ESWITCH_LEGACY) {
2692 esw->offloads.encap = encap;
2693 goto unlock;
2694 }
2695
2696 if (esw->offloads.encap == encap)
2697 goto unlock;
2698
2699 if (atomic64_read(&esw->offloads.num_flows) > 0) {
2700 NL_SET_ERR_MSG_MOD(extack,
2701 "Can't set encapsulation when flows are configured");
2702 err = -EOPNOTSUPP;
2703 goto unlock;
2704 }
2705
2706 esw_destroy_offloads_fdb_tables(esw);
2707
2708 esw->offloads.encap = encap;
2709
2710 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2711
2712 if (err) {
2713 NL_SET_ERR_MSG_MOD(extack,
2714 "Failed re-creating fast FDB table");
2715 esw->offloads.encap = !encap;
2716 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
2717 }
2718
2719 unlock:
2720 mutex_unlock(&esw->mode_lock);
2721 return err;
2722 }
2723
2724 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2725 enum devlink_eswitch_encap_mode *encap)
2726 {
2727 struct mlx5_core_dev *dev = devlink_priv(devlink);
2728 struct mlx5_eswitch *esw = dev->priv.eswitch;
2729 int err;
2730
2731 err = mlx5_eswitch_check(dev);
2732 if (err)
2733 return err;
2734
2735 mutex_lock(&esw->mode_lock);
2736 err = eswitch_devlink_esw_mode_check(esw);
2737 if (err)
2738 goto unlock;
2739
2740 *encap = esw->offloads.encap;
2741 unlock:
2742 mutex_unlock(&esw->mode_lock);
2743 return 0;
2744 }
2745
2746 static bool
2747 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
2748 {
2749 /* Currently, only ECPF based device has representor for host PF. */
2750 if (vport_num == MLX5_VPORT_PF &&
2751 !mlx5_core_is_ecpf_esw_manager(esw->dev))
2752 return false;
2753
2754 if (vport_num == MLX5_VPORT_ECPF &&
2755 !mlx5_ecpf_vport_exists(esw->dev))
2756 return false;
2757
2758 return true;
2759 }
2760
2761 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
2762 const struct mlx5_eswitch_rep_ops *ops,
2763 u8 rep_type)
2764 {
2765 struct mlx5_eswitch_rep_data *rep_data;
2766 struct mlx5_eswitch_rep *rep;
2767 int i;
2768
2769 esw->offloads.rep_ops[rep_type] = ops;
2770 mlx5_esw_for_all_reps(esw, i, rep) {
2771 if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
2772 rep_data = &rep->rep_data[rep_type];
2773 atomic_set(&rep_data->state, REP_REGISTERED);
2774 }
2775 }
2776 }
2777 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
2778
2779 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
2780 {
2781 struct mlx5_eswitch_rep *rep;
2782 int i;
2783
2784 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
2785 __unload_reps_all_vport(esw, rep_type);
2786
2787 mlx5_esw_for_all_reps(esw, i, rep)
2788 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2789 }
2790 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
2791
2792 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
2793 {
2794 struct mlx5_eswitch_rep *rep;
2795
2796 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2797 return rep->rep_data[rep_type].priv;
2798 }
2799
2800 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2801 u16 vport,
2802 u8 rep_type)
2803 {
2804 struct mlx5_eswitch_rep *rep;
2805
2806 rep = mlx5_eswitch_get_rep(esw, vport);
2807
2808 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2809 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2810 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
2811 return NULL;
2812 }
2813 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
2814
2815 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2816 {
2817 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
2818 }
2819 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2820
2821 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2822 u16 vport)
2823 {
2824 return mlx5_eswitch_get_rep(esw, vport);
2825 }
2826 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
2827
2828 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2829 {
2830 return vport_num >= MLX5_VPORT_FIRST_VF &&
2831 vport_num <= esw->dev->priv.sriov.max_vfs;
2832 }
2833
2834 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
2835 {
2836 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
2837 }
2838 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
2839
2840 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2841 {
2842 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2843 }
2844 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2845
2846 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
2847 u16 vport_num)
2848 {
2849 u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0);
2850 u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
2851 u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
2852 u32 val;
2853
2854 /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
2855 WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
2856
2857 /* Trim vhca_id to ESW_VHCA_ID_BITS */
2858 vhca_id &= vhca_id_mask;
2859
2860 /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
2861 * don't overlap with VF numbers, and themselves, after trimming.
2862 */
2863 WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) <
2864 vport_num_mask - 1);
2865 WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) <
2866 vport_num_mask - 1);
2867 WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) ==
2868 (MLX5_VPORT_ECPF & vport_num_mask));
2869
2870 /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
2871 * overlap with pf and ecpf.
2872 */
2873 if (vport_num != MLX5_VPORT_UPLINK &&
2874 vport_num != MLX5_VPORT_ECPF)
2875 WARN_ON_ONCE(vport_num >= vport_num_mask - 1);
2876
2877 /* We can now trim vport_num to ESW_VPORT_BITS */
2878 vport_num &= vport_num_mask;
2879
2880 val = (vhca_id << ESW_VPORT_BITS) | vport_num;
2881 return val << (32 - ESW_SOURCE_PORT_METADATA_BITS);
2882 }
2883 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);