]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
gpu: host1x: Use SMMU on Tegra124 and Tegra210
[thirdparty/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40 #include "esw/chains.h"
41 #include "rdma.h"
42 #include "en.h"
43 #include "fs_core.h"
44 #include "lib/devcom.h"
45 #include "lib/eq.h"
46
47 /* There are two match-all miss flows, one for unicast dst mac and
48 * one for multicast.
49 */
50 #define MLX5_ESW_MISS_FLOWS (2)
51 #define UPLINK_REP_INDEX 0
52
53 /* Per vport tables */
54
55 #define MLX5_ESW_VPORT_TABLE_SIZE 128
56
57 /* This struct is used as a key to the hash table and we need it to be packed
58 * so hash result is consistent
59 */
60 struct mlx5_vport_key {
61 u32 chain;
62 u16 prio;
63 u16 vport;
64 u16 vhca_id;
65 } __packed;
66
67 struct mlx5_vport_table {
68 struct hlist_node hlist;
69 struct mlx5_flow_table *fdb;
70 u32 num_rules;
71 struct mlx5_vport_key key;
72 };
73
74 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
75
76 static struct mlx5_flow_table *
77 esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
78 {
79 struct mlx5_flow_table_attr ft_attr = {};
80 struct mlx5_flow_table *fdb;
81
82 ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
83 ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
84 ft_attr.prio = FDB_PER_VPORT;
85 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
86 if (IS_ERR(fdb)) {
87 esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
88 PTR_ERR(fdb));
89 }
90
91 return fdb;
92 }
93
94 static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
95 struct mlx5_esw_flow_attr *attr,
96 struct mlx5_vport_key *key)
97 {
98 key->vport = attr->in_rep->vport;
99 key->chain = attr->chain;
100 key->prio = attr->prio;
101 key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
102 return jhash(key, sizeof(*key), 0);
103 }
104
105 /* caller must hold vports.lock */
106 static struct mlx5_vport_table *
107 esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
108 {
109 struct mlx5_vport_table *e;
110
111 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
112 if (!memcmp(&e->key, skey, sizeof(*skey)))
113 return e;
114
115 return NULL;
116 }
117
118 static void
119 esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
120 {
121 struct mlx5_vport_table *e;
122 struct mlx5_vport_key key;
123 u32 hkey;
124
125 mutex_lock(&esw->fdb_table.offloads.vports.lock);
126 hkey = flow_attr_to_vport_key(esw, attr, &key);
127 e = esw_vport_tbl_lookup(esw, &key, hkey);
128 if (!e || --e->num_rules)
129 goto out;
130
131 hash_del(&e->hlist);
132 mlx5_destroy_flow_table(e->fdb);
133 kfree(e);
134 out:
135 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
136 }
137
138 static struct mlx5_flow_table *
139 esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
140 {
141 struct mlx5_core_dev *dev = esw->dev;
142 struct mlx5_flow_namespace *ns;
143 struct mlx5_flow_table *fdb;
144 struct mlx5_vport_table *e;
145 struct mlx5_vport_key skey;
146 u32 hkey;
147
148 mutex_lock(&esw->fdb_table.offloads.vports.lock);
149 hkey = flow_attr_to_vport_key(esw, attr, &skey);
150 e = esw_vport_tbl_lookup(esw, &skey, hkey);
151 if (e) {
152 e->num_rules++;
153 goto out;
154 }
155
156 e = kzalloc(sizeof(*e), GFP_KERNEL);
157 if (!e) {
158 fdb = ERR_PTR(-ENOMEM);
159 goto err_alloc;
160 }
161
162 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
163 if (!ns) {
164 esw_warn(dev, "Failed to get FDB namespace\n");
165 fdb = ERR_PTR(-ENOENT);
166 goto err_ns;
167 }
168
169 fdb = esw_vport_tbl_create(esw, ns);
170 if (IS_ERR(fdb))
171 goto err_ns;
172
173 e->fdb = fdb;
174 e->num_rules = 1;
175 e->key = skey;
176 hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
177 out:
178 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
179 return e->fdb;
180
181 err_ns:
182 kfree(e);
183 err_alloc:
184 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
185 return fdb;
186 }
187
188 int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
189 {
190 struct mlx5_esw_flow_attr attr = {};
191 struct mlx5_eswitch_rep rep = {};
192 struct mlx5_flow_table *fdb;
193 struct mlx5_vport *vport;
194 int i;
195
196 attr.prio = 1;
197 attr.in_rep = &rep;
198 mlx5_esw_for_all_vports(esw, i, vport) {
199 attr.in_rep->vport = vport->vport;
200 fdb = esw_vport_tbl_get(esw, &attr);
201 if (IS_ERR(fdb))
202 goto out;
203 }
204 return 0;
205
206 out:
207 mlx5_esw_vport_tbl_put(esw);
208 return PTR_ERR(fdb);
209 }
210
211 void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
212 {
213 struct mlx5_esw_flow_attr attr = {};
214 struct mlx5_eswitch_rep rep = {};
215 struct mlx5_vport *vport;
216 int i;
217
218 attr.prio = 1;
219 attr.in_rep = &rep;
220 mlx5_esw_for_all_vports(esw, i, vport) {
221 attr.in_rep->vport = vport->vport;
222 esw_vport_tbl_put(esw, &attr);
223 }
224 }
225
226 /* End: Per vport tables */
227
228 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
229 u16 vport_num)
230 {
231 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
232
233 WARN_ON(idx > esw->total_vports - 1);
234 return &esw->offloads.vport_reps[idx];
235 }
236
237 static bool
238 esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
239 const struct mlx5_vport *vport)
240 {
241 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
242 mlx5_eswitch_is_vf_vport(esw, vport->vport));
243 }
244
245 static void
246 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
247 struct mlx5_flow_spec *spec,
248 struct mlx5_esw_flow_attr *attr)
249 {
250 void *misc2;
251 void *misc;
252
253 /* Use metadata matching because vport is not represented by single
254 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
255 */
256 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
257 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
258 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
259 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
260 attr->in_rep->vport));
261
262 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
263 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
264 mlx5_eswitch_get_vport_metadata_mask());
265
266 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
267 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
268 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
269 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
270 } else {
271 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
272 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
273
274 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
275 MLX5_SET(fte_match_set_misc, misc,
276 source_eswitch_owner_vhca_id,
277 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
278
279 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
280 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
281 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
282 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
283 source_eswitch_owner_vhca_id);
284
285 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
286 }
287
288 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
289 attr->in_rep->vport == MLX5_VPORT_UPLINK)
290 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
291 }
292
293 struct mlx5_flow_handle *
294 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
295 struct mlx5_flow_spec *spec,
296 struct mlx5_esw_flow_attr *attr)
297 {
298 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
299 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
300 bool split = !!(attr->split_count);
301 struct mlx5_flow_handle *rule;
302 struct mlx5_flow_table *fdb;
303 bool hairpin = false;
304 int j, i = 0;
305
306 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
307 return ERR_PTR(-EOPNOTSUPP);
308
309 flow_act.action = attr->action;
310 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
311 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
312 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
313 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
314 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
315 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
316 flow_act.vlan[0].vid = attr->vlan_vid[0];
317 flow_act.vlan[0].prio = attr->vlan_prio[0];
318 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
319 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
320 flow_act.vlan[1].vid = attr->vlan_vid[1];
321 flow_act.vlan[1].prio = attr->vlan_prio[1];
322 }
323 }
324
325 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
326 struct mlx5_flow_table *ft;
327
328 if (attr->dest_ft) {
329 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
330 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
331 dest[i].ft = attr->dest_ft;
332 i++;
333 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
334 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
335 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
336 dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
337 i++;
338 } else if (attr->dest_chain) {
339 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
340 ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
341 1, 0);
342 if (IS_ERR(ft)) {
343 rule = ERR_CAST(ft);
344 goto err_create_goto_table;
345 }
346
347 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
348 dest[i].ft = ft;
349 i++;
350 } else {
351 for (j = attr->split_count; j < attr->out_count; j++) {
352 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
353 dest[i].vport.num = attr->dests[j].rep->vport;
354 dest[i].vport.vhca_id =
355 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
356 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
357 dest[i].vport.flags |=
358 MLX5_FLOW_DEST_VPORT_VHCA_ID;
359 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
360 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
361 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
362 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
363 dest[i].vport.pkt_reformat =
364 attr->dests[j].pkt_reformat;
365 }
366 i++;
367 }
368 }
369 }
370 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
371 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
372 dest[i].counter_id = mlx5_fc_id(attr->counter);
373 i++;
374 }
375
376 if (attr->outer_match_level != MLX5_MATCH_NONE)
377 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
378 if (attr->inner_match_level != MLX5_MATCH_NONE)
379 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
380
381 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
382 flow_act.modify_hdr = attr->modify_hdr;
383
384 if (split) {
385 fdb = esw_vport_tbl_get(esw, attr);
386 } else {
387 if (attr->chain || attr->prio)
388 fdb = mlx5_esw_chains_get_table(esw, attr->chain,
389 attr->prio, 0);
390 else
391 fdb = attr->fdb;
392
393 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
394 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
395 }
396 if (IS_ERR(fdb)) {
397 rule = ERR_CAST(fdb);
398 goto err_esw_get;
399 }
400
401 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) {
402 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
403 &flow_act, dest, i);
404 hairpin = true;
405 } else {
406 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
407 }
408 if (IS_ERR(rule))
409 goto err_add_rule;
410 else
411 atomic64_inc(&esw->offloads.num_flows);
412
413 if (hairpin)
414 attr->flags |= MLX5_ESW_ATTR_FLAG_HAIRPIN;
415
416 return rule;
417
418 err_add_rule:
419 if (split)
420 esw_vport_tbl_put(esw, attr);
421 else if (attr->chain || attr->prio)
422 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
423 err_esw_get:
424 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
425 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
426 err_create_goto_table:
427 return rule;
428 }
429
430 struct mlx5_flow_handle *
431 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
432 struct mlx5_flow_spec *spec,
433 struct mlx5_esw_flow_attr *attr)
434 {
435 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
436 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
437 struct mlx5_flow_table *fast_fdb;
438 struct mlx5_flow_table *fwd_fdb;
439 struct mlx5_flow_handle *rule;
440 int i;
441
442 fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
443 if (IS_ERR(fast_fdb)) {
444 rule = ERR_CAST(fast_fdb);
445 goto err_get_fast;
446 }
447
448 fwd_fdb = esw_vport_tbl_get(esw, attr);
449 if (IS_ERR(fwd_fdb)) {
450 rule = ERR_CAST(fwd_fdb);
451 goto err_get_fwd;
452 }
453
454 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
455 for (i = 0; i < attr->split_count; i++) {
456 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
457 dest[i].vport.num = attr->dests[i].rep->vport;
458 dest[i].vport.vhca_id =
459 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
460 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
461 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
462 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
463 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
464 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
465 }
466 }
467 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
468 dest[i].ft = fwd_fdb,
469 i++;
470
471 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
472
473 if (attr->outer_match_level != MLX5_MATCH_NONE)
474 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
475
476 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
477 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
478
479 if (IS_ERR(rule))
480 goto add_err;
481
482 atomic64_inc(&esw->offloads.num_flows);
483
484 return rule;
485 add_err:
486 esw_vport_tbl_put(esw, attr);
487 err_get_fwd:
488 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
489 err_get_fast:
490 return rule;
491 }
492
493 static void
494 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
495 struct mlx5_flow_handle *rule,
496 struct mlx5_esw_flow_attr *attr,
497 bool fwd_rule)
498 {
499 bool split = (attr->split_count > 0);
500 int i;
501
502 mlx5_del_flow_rules(rule);
503
504 if (attr->flags & MLX5_ESW_ATTR_FLAG_HAIRPIN) {
505 /* unref the term table */
506 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
507 if (attr->dests[i].termtbl)
508 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
509 }
510 }
511
512 atomic64_dec(&esw->offloads.num_flows);
513
514 if (fwd_rule) {
515 esw_vport_tbl_put(esw, attr);
516 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
517 } else {
518 if (split)
519 esw_vport_tbl_put(esw, attr);
520 else if (attr->chain || attr->prio)
521 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
522 0);
523 if (attr->dest_chain)
524 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
525 }
526 }
527
528 void
529 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
530 struct mlx5_flow_handle *rule,
531 struct mlx5_esw_flow_attr *attr)
532 {
533 __mlx5_eswitch_del_rule(esw, rule, attr, false);
534 }
535
536 void
537 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
538 struct mlx5_flow_handle *rule,
539 struct mlx5_esw_flow_attr *attr)
540 {
541 __mlx5_eswitch_del_rule(esw, rule, attr, true);
542 }
543
544 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
545 {
546 struct mlx5_eswitch_rep *rep;
547 int i, err = 0;
548
549 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
550 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
551 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
552 continue;
553
554 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
555 if (err)
556 goto out;
557 }
558
559 out:
560 return err;
561 }
562
563 static struct mlx5_eswitch_rep *
564 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
565 {
566 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
567
568 in_rep = attr->in_rep;
569 out_rep = attr->dests[0].rep;
570
571 if (push)
572 vport = in_rep;
573 else if (pop)
574 vport = out_rep;
575 else
576 vport = in_rep;
577
578 return vport;
579 }
580
581 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
582 bool push, bool pop, bool fwd)
583 {
584 struct mlx5_eswitch_rep *in_rep, *out_rep;
585
586 if ((push || pop) && !fwd)
587 goto out_notsupp;
588
589 in_rep = attr->in_rep;
590 out_rep = attr->dests[0].rep;
591
592 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
593 goto out_notsupp;
594
595 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
596 goto out_notsupp;
597
598 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
599 if (!push && !pop && fwd)
600 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
601 goto out_notsupp;
602
603 /* protects against (1) setting rules with different vlans to push and
604 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
605 */
606 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
607 goto out_notsupp;
608
609 return 0;
610
611 out_notsupp:
612 return -EOPNOTSUPP;
613 }
614
615 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
616 struct mlx5_esw_flow_attr *attr)
617 {
618 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
619 struct mlx5_eswitch_rep *vport = NULL;
620 bool push, pop, fwd;
621 int err = 0;
622
623 /* nop if we're on the vlan push/pop non emulation mode */
624 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
625 return 0;
626
627 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
628 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
629 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
630 !attr->dest_chain);
631
632 mutex_lock(&esw->state_lock);
633
634 err = esw_add_vlan_action_check(attr, push, pop, fwd);
635 if (err)
636 goto unlock;
637
638 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
639
640 vport = esw_vlan_action_get_vport(attr, push, pop);
641
642 if (!push && !pop && fwd) {
643 /* tracks VF --> wire rules without vlan push action */
644 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
645 vport->vlan_refcount++;
646 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
647 }
648
649 goto unlock;
650 }
651
652 if (!push && !pop)
653 goto unlock;
654
655 if (!(offloads->vlan_push_pop_refcount)) {
656 /* it's the 1st vlan rule, apply global vlan pop policy */
657 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
658 if (err)
659 goto out;
660 }
661 offloads->vlan_push_pop_refcount++;
662
663 if (push) {
664 if (vport->vlan_refcount)
665 goto skip_set_push;
666
667 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
668 SET_VLAN_INSERT | SET_VLAN_STRIP);
669 if (err)
670 goto out;
671 vport->vlan = attr->vlan_vid[0];
672 skip_set_push:
673 vport->vlan_refcount++;
674 }
675 out:
676 if (!err)
677 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
678 unlock:
679 mutex_unlock(&esw->state_lock);
680 return err;
681 }
682
683 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
684 struct mlx5_esw_flow_attr *attr)
685 {
686 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
687 struct mlx5_eswitch_rep *vport = NULL;
688 bool push, pop, fwd;
689 int err = 0;
690
691 /* nop if we're on the vlan push/pop non emulation mode */
692 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
693 return 0;
694
695 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
696 return 0;
697
698 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
699 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
700 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
701
702 mutex_lock(&esw->state_lock);
703
704 vport = esw_vlan_action_get_vport(attr, push, pop);
705
706 if (!push && !pop && fwd) {
707 /* tracks VF --> wire rules without vlan push action */
708 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
709 vport->vlan_refcount--;
710
711 goto out;
712 }
713
714 if (push) {
715 vport->vlan_refcount--;
716 if (vport->vlan_refcount)
717 goto skip_unset_push;
718
719 vport->vlan = 0;
720 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
721 0, 0, SET_VLAN_STRIP);
722 if (err)
723 goto out;
724 }
725
726 skip_unset_push:
727 offloads->vlan_push_pop_refcount--;
728 if (offloads->vlan_push_pop_refcount)
729 goto out;
730
731 /* no more vlan rules, stop global vlan pop policy */
732 err = esw_set_global_vlan_pop(esw, 0);
733
734 out:
735 mutex_unlock(&esw->state_lock);
736 return err;
737 }
738
739 struct mlx5_flow_handle *
740 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
741 u32 sqn)
742 {
743 struct mlx5_flow_act flow_act = {0};
744 struct mlx5_flow_destination dest = {};
745 struct mlx5_flow_handle *flow_rule;
746 struct mlx5_flow_spec *spec;
747 void *misc;
748
749 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
750 if (!spec) {
751 flow_rule = ERR_PTR(-ENOMEM);
752 goto out;
753 }
754
755 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
756 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
757 /* source vport is the esw manager */
758 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
759
760 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
761 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
762 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
763
764 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
765 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
766 dest.vport.num = vport;
767 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
768
769 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
770 spec, &flow_act, &dest, 1);
771 if (IS_ERR(flow_rule))
772 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
773 out:
774 kvfree(spec);
775 return flow_rule;
776 }
777 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
778
779 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
780 {
781 mlx5_del_flow_rules(rule);
782 }
783
784 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
785 {
786 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
787 MLX5_FDB_TO_VPORT_REG_C_1;
788 }
789
790 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
791 {
792 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
793 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
794 u8 curr, wanted;
795 int err;
796
797 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
798 !mlx5_eswitch_vport_match_metadata_enabled(esw))
799 return 0;
800
801 err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
802 out, sizeof(out));
803 if (err)
804 return err;
805
806 curr = MLX5_GET(query_esw_vport_context_out, out,
807 esw_vport_context.fdb_to_vport_reg_c_id);
808 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
809 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
810 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
811
812 if (enable)
813 curr |= wanted;
814 else
815 curr &= ~wanted;
816
817 MLX5_SET(modify_esw_vport_context_in, in,
818 esw_vport_context.fdb_to_vport_reg_c_id, curr);
819
820 MLX5_SET(modify_esw_vport_context_in, in,
821 field_select.fdb_to_vport_reg_c_id, 1);
822
823 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in,
824 sizeof(in));
825 if (!err) {
826 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
827 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
828 else
829 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
830 }
831
832 return err;
833 }
834
835 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
836 struct mlx5_core_dev *peer_dev,
837 struct mlx5_flow_spec *spec,
838 struct mlx5_flow_destination *dest)
839 {
840 void *misc;
841
842 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
843 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
844 misc_parameters_2);
845 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
846 mlx5_eswitch_get_vport_metadata_mask());
847
848 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
849 } else {
850 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
851 misc_parameters);
852
853 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
854 MLX5_CAP_GEN(peer_dev, vhca_id));
855
856 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
857
858 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
859 misc_parameters);
860 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
861 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
862 source_eswitch_owner_vhca_id);
863 }
864
865 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
866 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
867 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
868 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
869 }
870
871 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
872 struct mlx5_eswitch *peer_esw,
873 struct mlx5_flow_spec *spec,
874 u16 vport)
875 {
876 void *misc;
877
878 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
879 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
880 misc_parameters_2);
881 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
882 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
883 vport));
884 } else {
885 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
886 misc_parameters);
887 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
888 }
889 }
890
891 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
892 struct mlx5_core_dev *peer_dev)
893 {
894 struct mlx5_flow_destination dest = {};
895 struct mlx5_flow_act flow_act = {0};
896 struct mlx5_flow_handle **flows;
897 struct mlx5_flow_handle *flow;
898 struct mlx5_flow_spec *spec;
899 /* total vports is the same for both e-switches */
900 int nvports = esw->total_vports;
901 void *misc;
902 int err, i;
903
904 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
905 if (!spec)
906 return -ENOMEM;
907
908 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
909
910 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
911 if (!flows) {
912 err = -ENOMEM;
913 goto alloc_flows_err;
914 }
915
916 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
917 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
918 misc_parameters);
919
920 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
921 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
922 spec, MLX5_VPORT_PF);
923
924 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
925 spec, &flow_act, &dest, 1);
926 if (IS_ERR(flow)) {
927 err = PTR_ERR(flow);
928 goto add_pf_flow_err;
929 }
930 flows[MLX5_VPORT_PF] = flow;
931 }
932
933 if (mlx5_ecpf_vport_exists(esw->dev)) {
934 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
935 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
936 spec, &flow_act, &dest, 1);
937 if (IS_ERR(flow)) {
938 err = PTR_ERR(flow);
939 goto add_ecpf_flow_err;
940 }
941 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
942 }
943
944 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
945 esw_set_peer_miss_rule_source_port(esw,
946 peer_dev->priv.eswitch,
947 spec, i);
948
949 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
950 spec, &flow_act, &dest, 1);
951 if (IS_ERR(flow)) {
952 err = PTR_ERR(flow);
953 goto add_vf_flow_err;
954 }
955 flows[i] = flow;
956 }
957
958 esw->fdb_table.offloads.peer_miss_rules = flows;
959
960 kvfree(spec);
961 return 0;
962
963 add_vf_flow_err:
964 nvports = --i;
965 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
966 mlx5_del_flow_rules(flows[i]);
967
968 if (mlx5_ecpf_vport_exists(esw->dev))
969 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
970 add_ecpf_flow_err:
971 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
972 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
973 add_pf_flow_err:
974 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
975 kvfree(flows);
976 alloc_flows_err:
977 kvfree(spec);
978 return err;
979 }
980
981 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
982 {
983 struct mlx5_flow_handle **flows;
984 int i;
985
986 flows = esw->fdb_table.offloads.peer_miss_rules;
987
988 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
989 mlx5_core_max_vfs(esw->dev))
990 mlx5_del_flow_rules(flows[i]);
991
992 if (mlx5_ecpf_vport_exists(esw->dev))
993 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
994
995 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
996 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
997
998 kvfree(flows);
999 }
1000
1001 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1002 {
1003 struct mlx5_flow_act flow_act = {0};
1004 struct mlx5_flow_destination dest = {};
1005 struct mlx5_flow_handle *flow_rule = NULL;
1006 struct mlx5_flow_spec *spec;
1007 void *headers_c;
1008 void *headers_v;
1009 int err = 0;
1010 u8 *dmac_c;
1011 u8 *dmac_v;
1012
1013 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1014 if (!spec) {
1015 err = -ENOMEM;
1016 goto out;
1017 }
1018
1019 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1020 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1021 outer_headers);
1022 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1023 outer_headers.dmac_47_16);
1024 dmac_c[0] = 0x01;
1025
1026 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1027 dest.vport.num = esw->manager_vport;
1028 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1029
1030 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1031 spec, &flow_act, &dest, 1);
1032 if (IS_ERR(flow_rule)) {
1033 err = PTR_ERR(flow_rule);
1034 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1035 goto out;
1036 }
1037
1038 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1039
1040 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1041 outer_headers);
1042 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1043 outer_headers.dmac_47_16);
1044 dmac_v[0] = 0x01;
1045 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1046 spec, &flow_act, &dest, 1);
1047 if (IS_ERR(flow_rule)) {
1048 err = PTR_ERR(flow_rule);
1049 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1050 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1051 goto out;
1052 }
1053
1054 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1055
1056 out:
1057 kvfree(spec);
1058 return err;
1059 }
1060
1061 struct mlx5_flow_handle *
1062 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1063 {
1064 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1065 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1066 struct mlx5_flow_context *flow_context;
1067 struct mlx5_flow_handle *flow_rule;
1068 struct mlx5_flow_destination dest;
1069 struct mlx5_flow_spec *spec;
1070 void *misc;
1071
1072 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1073 return ERR_PTR(-EOPNOTSUPP);
1074
1075 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1076 if (!spec)
1077 return ERR_PTR(-ENOMEM);
1078
1079 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1080 misc_parameters_2);
1081 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1082 ESW_CHAIN_TAG_METADATA_MASK);
1083 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1084 misc_parameters_2);
1085 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1086 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1087 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1088 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1089 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1090
1091 flow_context = &spec->flow_context;
1092 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1093 flow_context->flow_tag = tag;
1094 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1095 dest.ft = esw->offloads.ft_offloads;
1096
1097 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1098 kfree(spec);
1099
1100 if (IS_ERR(flow_rule))
1101 esw_warn(esw->dev,
1102 "Failed to create restore rule for tag: %d, err(%d)\n",
1103 tag, (int)PTR_ERR(flow_rule));
1104
1105 return flow_rule;
1106 }
1107
1108 u32
1109 esw_get_max_restore_tag(struct mlx5_eswitch *esw)
1110 {
1111 return ESW_CHAIN_TAG_METADATA_MASK;
1112 }
1113
1114 #define MAX_PF_SQ 256
1115 #define MAX_SQ_NVPORTS 32
1116
1117 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1118 u32 *flow_group_in)
1119 {
1120 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1121 flow_group_in,
1122 match_criteria);
1123
1124 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1125 MLX5_SET(create_flow_group_in, flow_group_in,
1126 match_criteria_enable,
1127 MLX5_MATCH_MISC_PARAMETERS_2);
1128
1129 MLX5_SET(fte_match_param, match_criteria,
1130 misc_parameters_2.metadata_reg_c_0,
1131 mlx5_eswitch_get_vport_metadata_mask());
1132 } else {
1133 MLX5_SET(create_flow_group_in, flow_group_in,
1134 match_criteria_enable,
1135 MLX5_MATCH_MISC_PARAMETERS);
1136
1137 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1138 misc_parameters.source_port);
1139 }
1140 }
1141
1142 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1143 {
1144 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1145 struct mlx5_flow_table_attr ft_attr = {};
1146 struct mlx5_core_dev *dev = esw->dev;
1147 struct mlx5_flow_namespace *root_ns;
1148 struct mlx5_flow_table *fdb = NULL;
1149 u32 flags = 0, *flow_group_in;
1150 int table_size, ix, err = 0;
1151 struct mlx5_flow_group *g;
1152 void *match_criteria;
1153 u8 *dmac;
1154
1155 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1156
1157 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1158 if (!flow_group_in)
1159 return -ENOMEM;
1160
1161 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1162 if (!root_ns) {
1163 esw_warn(dev, "Failed to get FDB flow namespace\n");
1164 err = -EOPNOTSUPP;
1165 goto ns_err;
1166 }
1167 esw->fdb_table.offloads.ns = root_ns;
1168 err = mlx5_flow_namespace_set_mode(root_ns,
1169 esw->dev->priv.steering->mode);
1170 if (err) {
1171 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1172 goto ns_err;
1173 }
1174
1175 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1176 MLX5_ESW_MISS_FLOWS + esw->total_vports;
1177
1178 /* create the slow path fdb with encap set, so further table instances
1179 * can be created at run time while VFs are probed if the FW allows that.
1180 */
1181 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1182 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1183 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1184
1185 ft_attr.flags = flags;
1186 ft_attr.max_fte = table_size;
1187 ft_attr.prio = FDB_SLOW_PATH;
1188
1189 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1190 if (IS_ERR(fdb)) {
1191 err = PTR_ERR(fdb);
1192 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1193 goto slow_fdb_err;
1194 }
1195 esw->fdb_table.offloads.slow_fdb = fdb;
1196
1197 err = mlx5_esw_chains_create(esw);
1198 if (err) {
1199 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1200 goto fdb_chains_err;
1201 }
1202
1203 /* create send-to-vport group */
1204 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1205 MLX5_MATCH_MISC_PARAMETERS);
1206
1207 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1208
1209 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1210 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1211
1212 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
1213 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1214 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1215
1216 g = mlx5_create_flow_group(fdb, flow_group_in);
1217 if (IS_ERR(g)) {
1218 err = PTR_ERR(g);
1219 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1220 goto send_vport_err;
1221 }
1222 esw->fdb_table.offloads.send_to_vport_grp = g;
1223
1224 /* create peer esw miss group */
1225 memset(flow_group_in, 0, inlen);
1226
1227 esw_set_flow_group_source_port(esw, flow_group_in);
1228
1229 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1230 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1231 flow_group_in,
1232 match_criteria);
1233
1234 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1235 misc_parameters.source_eswitch_owner_vhca_id);
1236
1237 MLX5_SET(create_flow_group_in, flow_group_in,
1238 source_eswitch_owner_vhca_id_valid, 1);
1239 }
1240
1241 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1242 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1243 ix + esw->total_vports - 1);
1244 ix += esw->total_vports;
1245
1246 g = mlx5_create_flow_group(fdb, flow_group_in);
1247 if (IS_ERR(g)) {
1248 err = PTR_ERR(g);
1249 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1250 goto peer_miss_err;
1251 }
1252 esw->fdb_table.offloads.peer_miss_grp = g;
1253
1254 /* create miss group */
1255 memset(flow_group_in, 0, inlen);
1256 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1257 MLX5_MATCH_OUTER_HEADERS);
1258 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1259 match_criteria);
1260 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1261 outer_headers.dmac_47_16);
1262 dmac[0] = 0x01;
1263
1264 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1265 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1266 ix + MLX5_ESW_MISS_FLOWS);
1267
1268 g = mlx5_create_flow_group(fdb, flow_group_in);
1269 if (IS_ERR(g)) {
1270 err = PTR_ERR(g);
1271 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1272 goto miss_err;
1273 }
1274 esw->fdb_table.offloads.miss_grp = g;
1275
1276 err = esw_add_fdb_miss_rule(esw);
1277 if (err)
1278 goto miss_rule_err;
1279
1280 esw->nvports = nvports;
1281 kvfree(flow_group_in);
1282 return 0;
1283
1284 miss_rule_err:
1285 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1286 miss_err:
1287 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1288 peer_miss_err:
1289 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1290 send_vport_err:
1291 mlx5_esw_chains_destroy(esw);
1292 fdb_chains_err:
1293 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1294 slow_fdb_err:
1295 /* Holds true only as long as DMFS is the default */
1296 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1297 ns_err:
1298 kvfree(flow_group_in);
1299 return err;
1300 }
1301
1302 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1303 {
1304 if (!esw->fdb_table.offloads.slow_fdb)
1305 return;
1306
1307 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1308 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1309 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1310 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1311 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1312 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1313
1314 mlx5_esw_chains_destroy(esw);
1315 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1316 /* Holds true only as long as DMFS is the default */
1317 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1318 MLX5_FLOW_STEERING_MODE_DMFS);
1319 }
1320
1321 static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
1322 {
1323 struct mlx5_flow_table_attr ft_attr = {};
1324 struct mlx5_core_dev *dev = esw->dev;
1325 struct mlx5_flow_table *ft_offloads;
1326 struct mlx5_flow_namespace *ns;
1327 int err = 0;
1328
1329 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1330 if (!ns) {
1331 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1332 return -EOPNOTSUPP;
1333 }
1334
1335 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
1336 ft_attr.prio = 1;
1337
1338 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1339 if (IS_ERR(ft_offloads)) {
1340 err = PTR_ERR(ft_offloads);
1341 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1342 return err;
1343 }
1344
1345 esw->offloads.ft_offloads = ft_offloads;
1346 return 0;
1347 }
1348
1349 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1350 {
1351 struct mlx5_esw_offload *offloads = &esw->offloads;
1352
1353 mlx5_destroy_flow_table(offloads->ft_offloads);
1354 }
1355
1356 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
1357 {
1358 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1359 struct mlx5_flow_group *g;
1360 u32 *flow_group_in;
1361 int err = 0;
1362
1363 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1364 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1365 if (!flow_group_in)
1366 return -ENOMEM;
1367
1368 /* create vport rx group */
1369 esw_set_flow_group_source_port(esw, flow_group_in);
1370
1371 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1372 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1373
1374 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1375
1376 if (IS_ERR(g)) {
1377 err = PTR_ERR(g);
1378 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1379 goto out;
1380 }
1381
1382 esw->offloads.vport_rx_group = g;
1383 out:
1384 kvfree(flow_group_in);
1385 return err;
1386 }
1387
1388 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1389 {
1390 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1391 }
1392
1393 struct mlx5_flow_handle *
1394 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1395 struct mlx5_flow_destination *dest)
1396 {
1397 struct mlx5_flow_act flow_act = {0};
1398 struct mlx5_flow_handle *flow_rule;
1399 struct mlx5_flow_spec *spec;
1400 void *misc;
1401
1402 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1403 if (!spec) {
1404 flow_rule = ERR_PTR(-ENOMEM);
1405 goto out;
1406 }
1407
1408 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1409 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1410 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1411 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1412
1413 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1414 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1415 mlx5_eswitch_get_vport_metadata_mask());
1416
1417 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1418 } else {
1419 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1420 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1421
1422 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1423 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1424
1425 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1426 }
1427
1428 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1429 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1430 &flow_act, dest, 1);
1431 if (IS_ERR(flow_rule)) {
1432 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1433 goto out;
1434 }
1435
1436 out:
1437 kvfree(spec);
1438 return flow_rule;
1439 }
1440
1441
1442 static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
1443 {
1444 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1445 struct mlx5_core_dev *dev = esw->dev;
1446 int vport;
1447
1448 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1449 return -EOPNOTSUPP;
1450
1451 if (esw->mode == MLX5_ESWITCH_NONE)
1452 return -EOPNOTSUPP;
1453
1454 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1455 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1456 mlx5_mode = MLX5_INLINE_MODE_NONE;
1457 goto out;
1458 case MLX5_CAP_INLINE_MODE_L2:
1459 mlx5_mode = MLX5_INLINE_MODE_L2;
1460 goto out;
1461 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1462 goto query_vports;
1463 }
1464
1465 query_vports:
1466 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1467 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
1468 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1469 if (prev_mlx5_mode != mlx5_mode)
1470 return -EINVAL;
1471 prev_mlx5_mode = mlx5_mode;
1472 }
1473
1474 out:
1475 *mode = mlx5_mode;
1476 return 0;
1477 }
1478
1479 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1480 {
1481 struct mlx5_esw_offload *offloads = &esw->offloads;
1482
1483 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1484 return;
1485
1486 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
1487 mlx5_destroy_flow_group(offloads->restore_group);
1488 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1489 }
1490
1491 static int esw_create_restore_table(struct mlx5_eswitch *esw)
1492 {
1493 u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
1494 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1495 struct mlx5_flow_table_attr ft_attr = {};
1496 struct mlx5_core_dev *dev = esw->dev;
1497 struct mlx5_flow_namespace *ns;
1498 struct mlx5_modify_hdr *mod_hdr;
1499 void *match_criteria, *misc;
1500 struct mlx5_flow_table *ft;
1501 struct mlx5_flow_group *g;
1502 u32 *flow_group_in;
1503 int err = 0;
1504
1505 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1506 return 0;
1507
1508 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1509 if (!ns) {
1510 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1511 return -EOPNOTSUPP;
1512 }
1513
1514 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1515 if (!flow_group_in) {
1516 err = -ENOMEM;
1517 goto out_free;
1518 }
1519
1520 ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
1521 ft = mlx5_create_flow_table(ns, &ft_attr);
1522 if (IS_ERR(ft)) {
1523 err = PTR_ERR(ft);
1524 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
1525 err);
1526 goto out_free;
1527 }
1528
1529 memset(flow_group_in, 0, inlen);
1530 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1531 match_criteria);
1532 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
1533 misc_parameters_2);
1534
1535 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1536 ESW_CHAIN_TAG_METADATA_MASK);
1537 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1538 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1539 ft_attr.max_fte - 1);
1540 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1541 MLX5_MATCH_MISC_PARAMETERS_2);
1542 g = mlx5_create_flow_group(ft, flow_group_in);
1543 if (IS_ERR(g)) {
1544 err = PTR_ERR(g);
1545 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
1546 err);
1547 goto err_group;
1548 }
1549
1550 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
1551 MLX5_SET(copy_action_in, modact, src_field,
1552 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1553 MLX5_SET(copy_action_in, modact, dst_field,
1554 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1555 mod_hdr = mlx5_modify_header_alloc(esw->dev,
1556 MLX5_FLOW_NAMESPACE_KERNEL, 1,
1557 modact);
1558 if (IS_ERR(mod_hdr)) {
1559 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
1560 err);
1561 err = PTR_ERR(mod_hdr);
1562 goto err_mod_hdr;
1563 }
1564
1565 esw->offloads.ft_offloads_restore = ft;
1566 esw->offloads.restore_group = g;
1567 esw->offloads.restore_copy_hdr_id = mod_hdr;
1568
1569 kvfree(flow_group_in);
1570
1571 return 0;
1572
1573 err_mod_hdr:
1574 mlx5_destroy_flow_group(g);
1575 err_group:
1576 mlx5_destroy_flow_table(ft);
1577 out_free:
1578 kvfree(flow_group_in);
1579
1580 return err;
1581 }
1582
1583 static int esw_offloads_start(struct mlx5_eswitch *esw,
1584 struct netlink_ext_ack *extack)
1585 {
1586 int err, err1;
1587
1588 if (esw->mode != MLX5_ESWITCH_LEGACY &&
1589 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1590 NL_SET_ERR_MSG_MOD(extack,
1591 "Can't set offloads mode, SRIOV legacy not enabled");
1592 return -EINVAL;
1593 }
1594
1595 mlx5_eswitch_disable_locked(esw, false);
1596 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
1597 esw->dev->priv.sriov.num_vfs);
1598 if (err) {
1599 NL_SET_ERR_MSG_MOD(extack,
1600 "Failed setting eswitch to offloads");
1601 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
1602 MLX5_ESWITCH_IGNORE_NUM_VFS);
1603 if (err1) {
1604 NL_SET_ERR_MSG_MOD(extack,
1605 "Failed setting eswitch back to legacy");
1606 }
1607 }
1608 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1609 if (mlx5_eswitch_inline_mode_get(esw,
1610 &esw->offloads.inline_mode)) {
1611 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1612 NL_SET_ERR_MSG_MOD(extack,
1613 "Inline mode is different between vports");
1614 }
1615 }
1616 return err;
1617 }
1618
1619 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1620 {
1621 kfree(esw->offloads.vport_reps);
1622 }
1623
1624 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1625 {
1626 int total_vports = esw->total_vports;
1627 struct mlx5_eswitch_rep *rep;
1628 int vport_index;
1629 u8 rep_type;
1630
1631 esw->offloads.vport_reps = kcalloc(total_vports,
1632 sizeof(struct mlx5_eswitch_rep),
1633 GFP_KERNEL);
1634 if (!esw->offloads.vport_reps)
1635 return -ENOMEM;
1636
1637 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1638 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
1639 rep->vport_index = vport_index;
1640
1641 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1642 atomic_set(&rep->rep_data[rep_type].state,
1643 REP_UNREGISTERED);
1644 }
1645
1646 return 0;
1647 }
1648
1649 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1650 struct mlx5_eswitch_rep *rep, u8 rep_type)
1651 {
1652 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1653 REP_LOADED, REP_REGISTERED) == REP_LOADED)
1654 esw->offloads.rep_ops[rep_type]->unload(rep);
1655 }
1656
1657 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1658 {
1659 struct mlx5_eswitch_rep *rep;
1660 int i;
1661
1662 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
1663 __esw_offloads_unload_rep(esw, rep, rep_type);
1664
1665 if (mlx5_ecpf_vport_exists(esw->dev)) {
1666 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1667 __esw_offloads_unload_rep(esw, rep, rep_type);
1668 }
1669
1670 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1671 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1672 __esw_offloads_unload_rep(esw, rep, rep_type);
1673 }
1674
1675 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1676 __esw_offloads_unload_rep(esw, rep, rep_type);
1677 }
1678
1679 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
1680 {
1681 struct mlx5_eswitch_rep *rep;
1682 int rep_type;
1683 int err;
1684
1685 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1686 return 0;
1687
1688 rep = mlx5_eswitch_get_rep(esw, vport_num);
1689 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1690 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1691 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1692 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1693 if (err)
1694 goto err_reps;
1695 }
1696
1697 return 0;
1698
1699 err_reps:
1700 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
1701 for (--rep_type; rep_type >= 0; rep_type--)
1702 __esw_offloads_unload_rep(esw, rep, rep_type);
1703 return err;
1704 }
1705
1706 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
1707 {
1708 struct mlx5_eswitch_rep *rep;
1709 int rep_type;
1710
1711 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1712 return;
1713
1714 rep = mlx5_eswitch_get_rep(esw, vport_num);
1715 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
1716 __esw_offloads_unload_rep(esw, rep, rep_type);
1717 }
1718
1719 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1720 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1721
1722 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1723 struct mlx5_eswitch *peer_esw)
1724 {
1725 int err;
1726
1727 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1728 if (err)
1729 return err;
1730
1731 return 0;
1732 }
1733
1734 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1735 {
1736 mlx5e_tc_clean_fdb_peer_flows(esw);
1737 esw_del_fdb_peer_miss_rules(esw);
1738 }
1739
1740 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1741 struct mlx5_eswitch *peer_esw,
1742 bool pair)
1743 {
1744 struct mlx5_flow_root_namespace *peer_ns;
1745 struct mlx5_flow_root_namespace *ns;
1746 int err;
1747
1748 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1749 ns = esw->dev->priv.steering->fdb_root_ns;
1750
1751 if (pair) {
1752 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1753 if (err)
1754 return err;
1755
1756 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
1757 if (err) {
1758 mlx5_flow_namespace_set_peer(ns, NULL);
1759 return err;
1760 }
1761 } else {
1762 mlx5_flow_namespace_set_peer(ns, NULL);
1763 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1764 }
1765
1766 return 0;
1767 }
1768
1769 static int mlx5_esw_offloads_devcom_event(int event,
1770 void *my_data,
1771 void *event_data)
1772 {
1773 struct mlx5_eswitch *esw = my_data;
1774 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1775 struct mlx5_eswitch *peer_esw = event_data;
1776 int err;
1777
1778 switch (event) {
1779 case ESW_OFFLOADS_DEVCOM_PAIR:
1780 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1781 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1782 break;
1783
1784 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
1785 if (err)
1786 goto err_out;
1787 err = mlx5_esw_offloads_pair(esw, peer_esw);
1788 if (err)
1789 goto err_peer;
1790
1791 err = mlx5_esw_offloads_pair(peer_esw, esw);
1792 if (err)
1793 goto err_pair;
1794
1795 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1796 break;
1797
1798 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1799 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1800 break;
1801
1802 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1803 mlx5_esw_offloads_unpair(peer_esw);
1804 mlx5_esw_offloads_unpair(esw);
1805 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
1806 break;
1807 }
1808
1809 return 0;
1810
1811 err_pair:
1812 mlx5_esw_offloads_unpair(esw);
1813 err_peer:
1814 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
1815 err_out:
1816 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1817 event, err);
1818 return err;
1819 }
1820
1821 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1822 {
1823 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1824
1825 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1826 mutex_init(&esw->offloads.peer_mutex);
1827
1828 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1829 return;
1830
1831 mlx5_devcom_register_component(devcom,
1832 MLX5_DEVCOM_ESW_OFFLOADS,
1833 mlx5_esw_offloads_devcom_event,
1834 esw);
1835
1836 mlx5_devcom_send_event(devcom,
1837 MLX5_DEVCOM_ESW_OFFLOADS,
1838 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1839 }
1840
1841 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1842 {
1843 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1844
1845 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1846 return;
1847
1848 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1849 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1850
1851 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1852 }
1853
1854 static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1855 struct mlx5_vport *vport)
1856 {
1857 struct mlx5_flow_act flow_act = {0};
1858 struct mlx5_flow_spec *spec;
1859 int err = 0;
1860
1861 /* For prio tag mode, there is only 1 FTEs:
1862 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1863 * required, allow
1864 * Unmatched traffic is allowed by default
1865 */
1866 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1867 if (!spec)
1868 return -ENOMEM;
1869
1870 /* Untagged packets - push prio tag VLAN, allow */
1871 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1872 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1873 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1874 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1875 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1876 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1877 flow_act.vlan[0].vid = 0;
1878 flow_act.vlan[0].prio = 0;
1879
1880 if (vport->ingress.offloads.modify_metadata_rule) {
1881 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1882 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1883 }
1884
1885 vport->ingress.allow_rule =
1886 mlx5_add_flow_rules(vport->ingress.acl, spec,
1887 &flow_act, NULL, 0);
1888 if (IS_ERR(vport->ingress.allow_rule)) {
1889 err = PTR_ERR(vport->ingress.allow_rule);
1890 esw_warn(esw->dev,
1891 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1892 vport->vport, err);
1893 vport->ingress.allow_rule = NULL;
1894 }
1895
1896 kvfree(spec);
1897 return err;
1898 }
1899
1900 static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1901 struct mlx5_vport *vport)
1902 {
1903 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
1904 struct mlx5_flow_act flow_act = {};
1905 int err = 0;
1906 u32 key;
1907
1908 key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
1909 key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
1910
1911 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1912 MLX5_SET(set_action_in, action, field,
1913 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1914 MLX5_SET(set_action_in, action, data, key);
1915 MLX5_SET(set_action_in, action, offset,
1916 ESW_SOURCE_PORT_METADATA_OFFSET);
1917 MLX5_SET(set_action_in, action, length,
1918 ESW_SOURCE_PORT_METADATA_BITS);
1919
1920 vport->ingress.offloads.modify_metadata =
1921 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1922 1, action);
1923 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
1924 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
1925 esw_warn(esw->dev,
1926 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1927 vport->vport, err);
1928 return err;
1929 }
1930
1931 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1932 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1933 vport->ingress.offloads.modify_metadata_rule =
1934 mlx5_add_flow_rules(vport->ingress.acl,
1935 NULL, &flow_act, NULL, 0);
1936 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
1937 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
1938 esw_warn(esw->dev,
1939 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1940 vport->vport, err);
1941 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
1942 vport->ingress.offloads.modify_metadata_rule = NULL;
1943 }
1944 return err;
1945 }
1946
1947 static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1948 struct mlx5_vport *vport)
1949 {
1950 if (vport->ingress.offloads.modify_metadata_rule) {
1951 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
1952 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
1953
1954 vport->ingress.offloads.modify_metadata_rule = NULL;
1955 }
1956 }
1957
1958 static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
1959 struct mlx5_vport *vport)
1960 {
1961 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1962 struct mlx5_flow_group *g;
1963 void *match_criteria;
1964 u32 *flow_group_in;
1965 u32 flow_index = 0;
1966 int ret = 0;
1967
1968 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1969 if (!flow_group_in)
1970 return -ENOMEM;
1971
1972 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
1973 /* This group is to hold FTE to match untagged packets when prio_tag
1974 * is enabled.
1975 */
1976 memset(flow_group_in, 0, inlen);
1977
1978 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1979 flow_group_in, match_criteria);
1980 MLX5_SET(create_flow_group_in, flow_group_in,
1981 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1982 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1983 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1984 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1985
1986 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1987 if (IS_ERR(g)) {
1988 ret = PTR_ERR(g);
1989 esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
1990 vport->vport, ret);
1991 goto prio_tag_err;
1992 }
1993 vport->ingress.offloads.metadata_prio_tag_grp = g;
1994 flow_index++;
1995 }
1996
1997 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1998 /* This group holds an FTE with no matches for add metadata for
1999 * tagged packets, if prio-tag is enabled (as a fallthrough),
2000 * or all traffic in case prio-tag is disabled.
2001 */
2002 memset(flow_group_in, 0, inlen);
2003 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
2004 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
2005
2006 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
2007 if (IS_ERR(g)) {
2008 ret = PTR_ERR(g);
2009 esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
2010 vport->vport, ret);
2011 goto metadata_err;
2012 }
2013 vport->ingress.offloads.metadata_allmatch_grp = g;
2014 }
2015
2016 kvfree(flow_group_in);
2017 return 0;
2018
2019 metadata_err:
2020 if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
2021 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
2022 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
2023 }
2024 prio_tag_err:
2025 kvfree(flow_group_in);
2026 return ret;
2027 }
2028
2029 static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
2030 {
2031 if (vport->ingress.offloads.metadata_allmatch_grp) {
2032 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
2033 vport->ingress.offloads.metadata_allmatch_grp = NULL;
2034 }
2035
2036 if (vport->ingress.offloads.metadata_prio_tag_grp) {
2037 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
2038 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
2039 }
2040 }
2041
2042 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
2043 struct mlx5_vport *vport)
2044 {
2045 int num_ftes = 0;
2046 int err;
2047
2048 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
2049 !esw_check_ingress_prio_tag_enabled(esw, vport))
2050 return 0;
2051
2052 esw_vport_cleanup_ingress_rules(esw, vport);
2053
2054 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2055 num_ftes++;
2056 if (esw_check_ingress_prio_tag_enabled(esw, vport))
2057 num_ftes++;
2058
2059 err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
2060 if (err) {
2061 esw_warn(esw->dev,
2062 "failed to enable ingress acl (%d) on vport[%d]\n",
2063 err, vport->vport);
2064 return err;
2065 }
2066
2067 err = esw_vport_create_ingress_acl_group(esw, vport);
2068 if (err)
2069 goto group_err;
2070
2071 esw_debug(esw->dev,
2072 "vport[%d] configure ingress rules\n", vport->vport);
2073
2074 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2075 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
2076 if (err)
2077 goto metadata_err;
2078 }
2079
2080 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
2081 err = esw_vport_ingress_prio_tag_config(esw, vport);
2082 if (err)
2083 goto prio_tag_err;
2084 }
2085 return 0;
2086
2087 prio_tag_err:
2088 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2089 metadata_err:
2090 esw_vport_destroy_ingress_acl_group(vport);
2091 group_err:
2092 esw_vport_destroy_ingress_acl_table(vport);
2093 return err;
2094 }
2095
2096 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
2097 struct mlx5_vport *vport)
2098 {
2099 int err;
2100
2101 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
2102 return 0;
2103
2104 esw_vport_cleanup_egress_rules(esw, vport);
2105
2106 err = esw_vport_enable_egress_acl(esw, vport);
2107 if (err)
2108 return err;
2109
2110 /* For prio tag mode, there is only 1 FTEs:
2111 * 1) prio tag packets - pop the prio tag VLAN, allow
2112 * Unmatched traffic is allowed by default
2113 */
2114 esw_debug(esw->dev,
2115 "vport[%d] configure prio tag egress rules\n", vport->vport);
2116
2117 /* prio tag vlan rule - pop it so VF receives untagged packets */
2118 err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
2119 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
2120 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
2121 if (err)
2122 esw_vport_disable_egress_acl(esw, vport);
2123
2124 return err;
2125 }
2126
2127 static bool
2128 esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2129 {
2130 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2131 return false;
2132
2133 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2134 MLX5_FDB_TO_VPORT_REG_C_0))
2135 return false;
2136
2137 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2138 return false;
2139
2140 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2141 mlx5_ecpf_vport_exists(esw->dev))
2142 return false;
2143
2144 return true;
2145 }
2146
2147 static bool
2148 esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
2149 {
2150 return mlx5_core_mp_enabled(esw->dev);
2151 }
2152
2153 static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
2154 {
2155 return esw_check_vport_match_metadata_mandatory(esw) &&
2156 esw_check_vport_match_metadata_supported(esw);
2157 }
2158
2159 int
2160 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2161 struct mlx5_vport *vport)
2162 {
2163 int err;
2164
2165 err = esw_vport_ingress_config(esw, vport);
2166 if (err)
2167 return err;
2168
2169 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
2170 err = esw_vport_egress_config(esw, vport);
2171 if (err) {
2172 esw_vport_cleanup_ingress_rules(esw, vport);
2173 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2174 esw_vport_destroy_ingress_acl_group(vport);
2175 esw_vport_destroy_ingress_acl_table(vport);
2176 }
2177 }
2178 return err;
2179 }
2180
2181 void
2182 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2183 struct mlx5_vport *vport)
2184 {
2185 esw_vport_disable_egress_acl(esw, vport);
2186 esw_vport_cleanup_ingress_rules(esw, vport);
2187 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2188 esw_vport_destroy_ingress_acl_group(vport);
2189 esw_vport_destroy_ingress_acl_table(vport);
2190 }
2191
2192 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2193 {
2194 struct mlx5_vport *vport;
2195 int err;
2196
2197 if (esw_use_vport_metadata(esw))
2198 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2199
2200 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2201 err = esw_vport_create_offloads_acl_tables(esw, vport);
2202 if (err)
2203 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2204 return err;
2205 }
2206
2207 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2208 {
2209 struct mlx5_vport *vport;
2210
2211 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2212 esw_vport_destroy_offloads_acl_tables(esw, vport);
2213 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2214 }
2215
2216 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
2217 {
2218 int num_vfs = esw->esw_funcs.num_vfs;
2219 int total_vports;
2220 int err;
2221
2222 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2223 total_vports = esw->total_vports;
2224 else
2225 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2226
2227 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
2228
2229 err = esw_create_uplink_offloads_acl_tables(esw);
2230 if (err)
2231 return err;
2232
2233 err = esw_create_offloads_table(esw, total_vports);
2234 if (err)
2235 goto create_offloads_err;
2236
2237 err = esw_create_restore_table(esw);
2238 if (err)
2239 goto create_restore_err;
2240
2241 err = esw_create_offloads_fdb_tables(esw, total_vports);
2242 if (err)
2243 goto create_fdb_err;
2244
2245 err = esw_create_vport_rx_group(esw, total_vports);
2246 if (err)
2247 goto create_fg_err;
2248
2249 mutex_init(&esw->fdb_table.offloads.vports.lock);
2250 hash_init(esw->fdb_table.offloads.vports.table);
2251
2252 return 0;
2253
2254 create_fg_err:
2255 esw_destroy_offloads_fdb_tables(esw);
2256 create_fdb_err:
2257 esw_destroy_restore_table(esw);
2258 create_restore_err:
2259 esw_destroy_offloads_table(esw);
2260 create_offloads_err:
2261 esw_destroy_uplink_offloads_acl_tables(esw);
2262
2263 return err;
2264 }
2265
2266 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2267 {
2268 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
2269 esw_destroy_vport_rx_group(esw);
2270 esw_destroy_offloads_fdb_tables(esw);
2271 esw_destroy_restore_table(esw);
2272 esw_destroy_offloads_table(esw);
2273 esw_destroy_uplink_offloads_acl_tables(esw);
2274 }
2275
2276 static void
2277 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
2278 {
2279 bool host_pf_disabled;
2280 u16 new_num_vfs;
2281
2282 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2283 host_params_context.host_num_of_vfs);
2284 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2285 host_params_context.host_pf_disabled);
2286
2287 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2288 return;
2289
2290 /* Number of VFs can only change from "0 to x" or "x to 0". */
2291 if (esw->esw_funcs.num_vfs > 0) {
2292 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
2293 } else {
2294 int err;
2295
2296 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2297 MLX5_VPORT_UC_ADDR_CHANGE);
2298 if (err)
2299 return;
2300 }
2301 esw->esw_funcs.num_vfs = new_num_vfs;
2302 }
2303
2304 static void esw_functions_changed_event_handler(struct work_struct *work)
2305 {
2306 struct mlx5_host_work *host_work;
2307 struct mlx5_eswitch *esw;
2308 const u32 *out;
2309
2310 host_work = container_of(work, struct mlx5_host_work, work);
2311 esw = host_work->esw;
2312
2313 out = mlx5_esw_query_functions(esw->dev);
2314 if (IS_ERR(out))
2315 goto out;
2316
2317 esw_vfs_changed_event_handler(esw, out);
2318 kvfree(out);
2319 out:
2320 kfree(host_work);
2321 }
2322
2323 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
2324 {
2325 struct mlx5_esw_functions *esw_funcs;
2326 struct mlx5_host_work *host_work;
2327 struct mlx5_eswitch *esw;
2328
2329 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2330 if (!host_work)
2331 return NOTIFY_DONE;
2332
2333 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2334 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
2335
2336 host_work->esw = esw;
2337
2338 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
2339 queue_work(esw->work_queue, &host_work->work);
2340
2341 return NOTIFY_OK;
2342 }
2343
2344 int esw_offloads_enable(struct mlx5_eswitch *esw)
2345 {
2346 struct mlx5_vport *vport;
2347 int err, i;
2348
2349 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2350 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2351 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2352 else
2353 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2354
2355 mutex_init(&esw->offloads.termtbl_mutex);
2356 mlx5_rdma_enable_roce(esw->dev);
2357
2358 err = esw_set_passing_vport_metadata(esw, true);
2359 if (err)
2360 goto err_vport_metadata;
2361
2362 err = esw_offloads_steering_init(esw);
2363 if (err)
2364 goto err_steering_init;
2365
2366 /* Representor will control the vport link state */
2367 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2368 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2369
2370 /* Uplink vport rep must load first. */
2371 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
2372 if (err)
2373 goto err_uplink;
2374
2375 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
2376 if (err)
2377 goto err_vports;
2378
2379 esw_offloads_devcom_init(esw);
2380
2381 return 0;
2382
2383 err_vports:
2384 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2385 err_uplink:
2386 esw_set_passing_vport_metadata(esw, false);
2387 err_steering_init:
2388 esw_offloads_steering_cleanup(esw);
2389 err_vport_metadata:
2390 mlx5_rdma_disable_roce(esw->dev);
2391 mutex_destroy(&esw->offloads.termtbl_mutex);
2392 return err;
2393 }
2394
2395 static int esw_offloads_stop(struct mlx5_eswitch *esw,
2396 struct netlink_ext_ack *extack)
2397 {
2398 int err, err1;
2399
2400 mlx5_eswitch_disable_locked(esw, false);
2401 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2402 MLX5_ESWITCH_IGNORE_NUM_VFS);
2403 if (err) {
2404 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
2405 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2406 MLX5_ESWITCH_IGNORE_NUM_VFS);
2407 if (err1) {
2408 NL_SET_ERR_MSG_MOD(extack,
2409 "Failed setting eswitch back to offloads");
2410 }
2411 }
2412
2413 return err;
2414 }
2415
2416 void esw_offloads_disable(struct mlx5_eswitch *esw)
2417 {
2418 esw_offloads_devcom_cleanup(esw);
2419 mlx5_eswitch_disable_pf_vf_vports(esw);
2420 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2421 esw_set_passing_vport_metadata(esw, false);
2422 esw_offloads_steering_cleanup(esw);
2423 mlx5_rdma_disable_roce(esw->dev);
2424 mutex_destroy(&esw->offloads.termtbl_mutex);
2425 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2426 }
2427
2428 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
2429 {
2430 switch (mode) {
2431 case DEVLINK_ESWITCH_MODE_LEGACY:
2432 *mlx5_mode = MLX5_ESWITCH_LEGACY;
2433 break;
2434 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
2435 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
2436 break;
2437 default:
2438 return -EINVAL;
2439 }
2440
2441 return 0;
2442 }
2443
2444 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2445 {
2446 switch (mlx5_mode) {
2447 case MLX5_ESWITCH_LEGACY:
2448 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2449 break;
2450 case MLX5_ESWITCH_OFFLOADS:
2451 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2452 break;
2453 default:
2454 return -EINVAL;
2455 }
2456
2457 return 0;
2458 }
2459
2460 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2461 {
2462 switch (mode) {
2463 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2464 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2465 break;
2466 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2467 *mlx5_mode = MLX5_INLINE_MODE_L2;
2468 break;
2469 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2470 *mlx5_mode = MLX5_INLINE_MODE_IP;
2471 break;
2472 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2473 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2474 break;
2475 default:
2476 return -EINVAL;
2477 }
2478
2479 return 0;
2480 }
2481
2482 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2483 {
2484 switch (mlx5_mode) {
2485 case MLX5_INLINE_MODE_NONE:
2486 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2487 break;
2488 case MLX5_INLINE_MODE_L2:
2489 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2490 break;
2491 case MLX5_INLINE_MODE_IP:
2492 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2493 break;
2494 case MLX5_INLINE_MODE_TCP_UDP:
2495 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2496 break;
2497 default:
2498 return -EINVAL;
2499 }
2500
2501 return 0;
2502 }
2503
2504 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
2505 {
2506 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2507 return -EOPNOTSUPP;
2508
2509 if(!MLX5_ESWITCH_MANAGER(dev))
2510 return -EPERM;
2511
2512 return 0;
2513 }
2514
2515 static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
2516 {
2517 /* devlink commands in NONE eswitch mode are currently supported only
2518 * on ECPF.
2519 */
2520 return (esw->mode == MLX5_ESWITCH_NONE &&
2521 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
2522 }
2523
2524 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2525 struct netlink_ext_ack *extack)
2526 {
2527 struct mlx5_core_dev *dev = devlink_priv(devlink);
2528 struct mlx5_eswitch *esw = dev->priv.eswitch;
2529 u16 cur_mlx5_mode, mlx5_mode = 0;
2530 int err;
2531
2532 err = mlx5_eswitch_check(dev);
2533 if (err)
2534 return err;
2535
2536 if (esw_mode_from_devlink(mode, &mlx5_mode))
2537 return -EINVAL;
2538
2539 mutex_lock(&esw->mode_lock);
2540 err = eswitch_devlink_esw_mode_check(esw);
2541 if (err)
2542 goto unlock;
2543
2544 cur_mlx5_mode = esw->mode;
2545
2546 if (cur_mlx5_mode == mlx5_mode)
2547 goto unlock;
2548
2549 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
2550 err = esw_offloads_start(esw, extack);
2551 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
2552 err = esw_offloads_stop(esw, extack);
2553 else
2554 err = -EINVAL;
2555
2556 unlock:
2557 mutex_unlock(&esw->mode_lock);
2558 return err;
2559 }
2560
2561 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2562 {
2563 struct mlx5_core_dev *dev = devlink_priv(devlink);
2564 struct mlx5_eswitch *esw = dev->priv.eswitch;
2565 int err;
2566
2567 err = mlx5_eswitch_check(dev);
2568 if (err)
2569 return err;
2570
2571 mutex_lock(&esw->mode_lock);
2572 err = eswitch_devlink_esw_mode_check(dev->priv.eswitch);
2573 if (err)
2574 goto unlock;
2575
2576 err = esw_mode_to_devlink(esw->mode, mode);
2577 unlock:
2578 mutex_unlock(&esw->mode_lock);
2579 return err;
2580 }
2581
2582 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2583 struct netlink_ext_ack *extack)
2584 {
2585 struct mlx5_core_dev *dev = devlink_priv(devlink);
2586 struct mlx5_eswitch *esw = dev->priv.eswitch;
2587 int err, vport, num_vport;
2588 u8 mlx5_mode;
2589
2590 err = mlx5_eswitch_check(dev);
2591 if (err)
2592 return err;
2593
2594 mutex_lock(&esw->mode_lock);
2595 err = eswitch_devlink_esw_mode_check(esw);
2596 if (err)
2597 goto out;
2598
2599 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2600 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2601 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2602 goto out;
2603 /* fall through */
2604 case MLX5_CAP_INLINE_MODE_L2:
2605 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
2606 err = -EOPNOTSUPP;
2607 goto out;
2608 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2609 break;
2610 }
2611
2612 if (atomic64_read(&esw->offloads.num_flows) > 0) {
2613 NL_SET_ERR_MSG_MOD(extack,
2614 "Can't set inline mode when flows are configured");
2615 err = -EOPNOTSUPP;
2616 goto out;
2617 }
2618
2619 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2620 if (err)
2621 goto out;
2622
2623 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
2624 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2625 if (err) {
2626 NL_SET_ERR_MSG_MOD(extack,
2627 "Failed to set min inline on vport");
2628 goto revert_inline_mode;
2629 }
2630 }
2631
2632 esw->offloads.inline_mode = mlx5_mode;
2633 mutex_unlock(&esw->mode_lock);
2634 return 0;
2635
2636 revert_inline_mode:
2637 num_vport = --vport;
2638 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
2639 mlx5_modify_nic_vport_min_inline(dev,
2640 vport,
2641 esw->offloads.inline_mode);
2642 out:
2643 mutex_unlock(&esw->mode_lock);
2644 return err;
2645 }
2646
2647 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2648 {
2649 struct mlx5_core_dev *dev = devlink_priv(devlink);
2650 struct mlx5_eswitch *esw = dev->priv.eswitch;
2651 int err;
2652
2653 err = mlx5_eswitch_check(dev);
2654 if (err)
2655 return err;
2656
2657 mutex_lock(&esw->mode_lock);
2658 err = eswitch_devlink_esw_mode_check(esw);
2659 if (err)
2660 goto unlock;
2661
2662 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2663 unlock:
2664 mutex_unlock(&esw->mode_lock);
2665 return err;
2666 }
2667
2668 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2669 enum devlink_eswitch_encap_mode encap,
2670 struct netlink_ext_ack *extack)
2671 {
2672 struct mlx5_core_dev *dev = devlink_priv(devlink);
2673 struct mlx5_eswitch *esw = dev->priv.eswitch;
2674 int err;
2675
2676 err = mlx5_eswitch_check(dev);
2677 if (err)
2678 return err;
2679
2680 mutex_lock(&esw->mode_lock);
2681 err = eswitch_devlink_esw_mode_check(esw);
2682 if (err)
2683 goto unlock;
2684
2685 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
2686 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
2687 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
2688 err = -EOPNOTSUPP;
2689 goto unlock;
2690 }
2691
2692 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
2693 err = -EOPNOTSUPP;
2694 goto unlock;
2695 }
2696
2697 if (esw->mode == MLX5_ESWITCH_LEGACY) {
2698 esw->offloads.encap = encap;
2699 goto unlock;
2700 }
2701
2702 if (esw->offloads.encap == encap)
2703 goto unlock;
2704
2705 if (atomic64_read(&esw->offloads.num_flows) > 0) {
2706 NL_SET_ERR_MSG_MOD(extack,
2707 "Can't set encapsulation when flows are configured");
2708 err = -EOPNOTSUPP;
2709 goto unlock;
2710 }
2711
2712 esw_destroy_offloads_fdb_tables(esw);
2713
2714 esw->offloads.encap = encap;
2715
2716 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2717
2718 if (err) {
2719 NL_SET_ERR_MSG_MOD(extack,
2720 "Failed re-creating fast FDB table");
2721 esw->offloads.encap = !encap;
2722 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
2723 }
2724
2725 unlock:
2726 mutex_unlock(&esw->mode_lock);
2727 return err;
2728 }
2729
2730 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2731 enum devlink_eswitch_encap_mode *encap)
2732 {
2733 struct mlx5_core_dev *dev = devlink_priv(devlink);
2734 struct mlx5_eswitch *esw = dev->priv.eswitch;
2735 int err;
2736
2737 err = mlx5_eswitch_check(dev);
2738 if (err)
2739 return err;
2740
2741 mutex_lock(&esw->mode_lock);
2742 err = eswitch_devlink_esw_mode_check(esw);
2743 if (err)
2744 goto unlock;
2745
2746 *encap = esw->offloads.encap;
2747 unlock:
2748 mutex_unlock(&esw->mode_lock);
2749 return 0;
2750 }
2751
2752 static bool
2753 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
2754 {
2755 /* Currently, only ECPF based device has representor for host PF. */
2756 if (vport_num == MLX5_VPORT_PF &&
2757 !mlx5_core_is_ecpf_esw_manager(esw->dev))
2758 return false;
2759
2760 if (vport_num == MLX5_VPORT_ECPF &&
2761 !mlx5_ecpf_vport_exists(esw->dev))
2762 return false;
2763
2764 return true;
2765 }
2766
2767 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
2768 const struct mlx5_eswitch_rep_ops *ops,
2769 u8 rep_type)
2770 {
2771 struct mlx5_eswitch_rep_data *rep_data;
2772 struct mlx5_eswitch_rep *rep;
2773 int i;
2774
2775 esw->offloads.rep_ops[rep_type] = ops;
2776 mlx5_esw_for_all_reps(esw, i, rep) {
2777 if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
2778 rep_data = &rep->rep_data[rep_type];
2779 atomic_set(&rep_data->state, REP_REGISTERED);
2780 }
2781 }
2782 }
2783 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
2784
2785 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
2786 {
2787 struct mlx5_eswitch_rep *rep;
2788 int i;
2789
2790 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
2791 __unload_reps_all_vport(esw, rep_type);
2792
2793 mlx5_esw_for_all_reps(esw, i, rep)
2794 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2795 }
2796 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
2797
2798 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
2799 {
2800 struct mlx5_eswitch_rep *rep;
2801
2802 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2803 return rep->rep_data[rep_type].priv;
2804 }
2805
2806 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2807 u16 vport,
2808 u8 rep_type)
2809 {
2810 struct mlx5_eswitch_rep *rep;
2811
2812 rep = mlx5_eswitch_get_rep(esw, vport);
2813
2814 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2815 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2816 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
2817 return NULL;
2818 }
2819 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
2820
2821 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2822 {
2823 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
2824 }
2825 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2826
2827 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2828 u16 vport)
2829 {
2830 return mlx5_eswitch_get_rep(esw, vport);
2831 }
2832 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
2833
2834 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2835 {
2836 return vport_num >= MLX5_VPORT_FIRST_VF &&
2837 vport_num <= esw->dev->priv.sriov.max_vfs;
2838 }
2839
2840 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
2841 {
2842 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
2843 }
2844 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
2845
2846 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2847 {
2848 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2849 }
2850 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2851
2852 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
2853 u16 vport_num)
2854 {
2855 u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0);
2856 u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
2857 u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
2858 u32 val;
2859
2860 /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
2861 WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
2862
2863 /* Trim vhca_id to ESW_VHCA_ID_BITS */
2864 vhca_id &= vhca_id_mask;
2865
2866 /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
2867 * don't overlap with VF numbers, and themselves, after trimming.
2868 */
2869 WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) <
2870 vport_num_mask - 1);
2871 WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) <
2872 vport_num_mask - 1);
2873 WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) ==
2874 (MLX5_VPORT_ECPF & vport_num_mask));
2875
2876 /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
2877 * overlap with pf and ecpf.
2878 */
2879 if (vport_num != MLX5_VPORT_UPLINK &&
2880 vport_num != MLX5_VPORT_ECPF)
2881 WARN_ON_ONCE(vport_num >= vport_num_mask - 1);
2882
2883 /* We can now trim vport_num to ESW_VPORT_BITS */
2884 vport_num &= vport_num_mask;
2885
2886 val = (vhca_id << ESW_VPORT_BITS) | vport_num;
2887 return val << (32 - ESW_SOURCE_PORT_METADATA_BITS);
2888 }
2889 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);