2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
40 #include "esw/chains.h"
44 #include "lib/devcom.h"
47 /* There are two match-all miss flows, one for unicast dst mac and
50 #define MLX5_ESW_MISS_FLOWS (2)
51 #define UPLINK_REP_INDEX 0
53 /* Per vport tables */
55 #define MLX5_ESW_VPORT_TABLE_SIZE 128
57 /* This struct is used as a key to the hash table and we need it to be packed
58 * so hash result is consistent
60 struct mlx5_vport_key
{
67 struct mlx5_vport_table
{
68 struct hlist_node hlist
;
69 struct mlx5_flow_table
*fdb
;
71 struct mlx5_vport_key key
;
74 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
76 static struct mlx5_flow_table
*
77 esw_vport_tbl_create(struct mlx5_eswitch
*esw
, struct mlx5_flow_namespace
*ns
)
79 struct mlx5_flow_table_attr ft_attr
= {};
80 struct mlx5_flow_table
*fdb
;
82 ft_attr
.autogroup
.max_num_groups
= MLX5_ESW_VPORT_TBL_NUM_GROUPS
;
83 ft_attr
.max_fte
= MLX5_ESW_VPORT_TABLE_SIZE
;
84 ft_attr
.prio
= FDB_PER_VPORT
;
85 fdb
= mlx5_create_auto_grouped_flow_table(ns
, &ft_attr
);
87 esw_warn(esw
->dev
, "Failed to create per vport FDB Table err %ld\n",
94 static u32
flow_attr_to_vport_key(struct mlx5_eswitch
*esw
,
95 struct mlx5_esw_flow_attr
*attr
,
96 struct mlx5_vport_key
*key
)
98 key
->vport
= attr
->in_rep
->vport
;
99 key
->chain
= attr
->chain
;
100 key
->prio
= attr
->prio
;
101 key
->vhca_id
= MLX5_CAP_GEN(esw
->dev
, vhca_id
);
102 return jhash(key
, sizeof(*key
), 0);
105 /* caller must hold vports.lock */
106 static struct mlx5_vport_table
*
107 esw_vport_tbl_lookup(struct mlx5_eswitch
*esw
, struct mlx5_vport_key
*skey
, u32 key
)
109 struct mlx5_vport_table
*e
;
111 hash_for_each_possible(esw
->fdb_table
.offloads
.vports
.table
, e
, hlist
, key
)
112 if (!memcmp(&e
->key
, skey
, sizeof(*skey
)))
119 esw_vport_tbl_put(struct mlx5_eswitch
*esw
, struct mlx5_esw_flow_attr
*attr
)
121 struct mlx5_vport_table
*e
;
122 struct mlx5_vport_key key
;
125 mutex_lock(&esw
->fdb_table
.offloads
.vports
.lock
);
126 hkey
= flow_attr_to_vport_key(esw
, attr
, &key
);
127 e
= esw_vport_tbl_lookup(esw
, &key
, hkey
);
128 if (!e
|| --e
->num_rules
)
132 mlx5_destroy_flow_table(e
->fdb
);
135 mutex_unlock(&esw
->fdb_table
.offloads
.vports
.lock
);
138 static struct mlx5_flow_table
*
139 esw_vport_tbl_get(struct mlx5_eswitch
*esw
, struct mlx5_esw_flow_attr
*attr
)
141 struct mlx5_core_dev
*dev
= esw
->dev
;
142 struct mlx5_flow_namespace
*ns
;
143 struct mlx5_flow_table
*fdb
;
144 struct mlx5_vport_table
*e
;
145 struct mlx5_vport_key skey
;
148 mutex_lock(&esw
->fdb_table
.offloads
.vports
.lock
);
149 hkey
= flow_attr_to_vport_key(esw
, attr
, &skey
);
150 e
= esw_vport_tbl_lookup(esw
, &skey
, hkey
);
156 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
158 fdb
= ERR_PTR(-ENOMEM
);
162 ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_FDB
);
164 esw_warn(dev
, "Failed to get FDB namespace\n");
165 fdb
= ERR_PTR(-ENOENT
);
169 fdb
= esw_vport_tbl_create(esw
, ns
);
176 hash_add(esw
->fdb_table
.offloads
.vports
.table
, &e
->hlist
, hkey
);
178 mutex_unlock(&esw
->fdb_table
.offloads
.vports
.lock
);
184 mutex_unlock(&esw
->fdb_table
.offloads
.vports
.lock
);
188 int mlx5_esw_vport_tbl_get(struct mlx5_eswitch
*esw
)
190 struct mlx5_esw_flow_attr attr
= {};
191 struct mlx5_eswitch_rep rep
= {};
192 struct mlx5_flow_table
*fdb
;
193 struct mlx5_vport
*vport
;
198 mlx5_esw_for_all_vports(esw
, i
, vport
) {
199 attr
.in_rep
->vport
= vport
->vport
;
200 fdb
= esw_vport_tbl_get(esw
, &attr
);
207 mlx5_esw_vport_tbl_put(esw
);
211 void mlx5_esw_vport_tbl_put(struct mlx5_eswitch
*esw
)
213 struct mlx5_esw_flow_attr attr
= {};
214 struct mlx5_eswitch_rep rep
= {};
215 struct mlx5_vport
*vport
;
220 mlx5_esw_for_all_vports(esw
, i
, vport
) {
221 attr
.in_rep
->vport
= vport
->vport
;
222 esw_vport_tbl_put(esw
, &attr
);
226 /* End: Per vport tables */
228 static struct mlx5_eswitch_rep
*mlx5_eswitch_get_rep(struct mlx5_eswitch
*esw
,
231 int idx
= mlx5_eswitch_vport_num_to_index(esw
, vport_num
);
233 WARN_ON(idx
> esw
->total_vports
- 1);
234 return &esw
->offloads
.vport_reps
[idx
];
238 esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch
*esw
,
239 const struct mlx5_vport
*vport
)
241 return (MLX5_CAP_GEN(esw
->dev
, prio_tag_required
) &&
242 mlx5_eswitch_is_vf_vport(esw
, vport
->vport
));
246 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch
*esw
,
247 struct mlx5_flow_spec
*spec
,
248 struct mlx5_esw_flow_attr
*attr
)
253 /* Use metadata matching because vport is not represented by single
254 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
256 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
257 misc2
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters_2
);
258 MLX5_SET(fte_match_set_misc2
, misc2
, metadata_reg_c_0
,
259 mlx5_eswitch_get_vport_metadata_for_match(attr
->in_mdev
->priv
.eswitch
,
260 attr
->in_rep
->vport
));
262 misc2
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters_2
);
263 MLX5_SET(fte_match_set_misc2
, misc2
, metadata_reg_c_0
,
264 mlx5_eswitch_get_vport_metadata_mask());
266 spec
->match_criteria_enable
|= MLX5_MATCH_MISC_PARAMETERS_2
;
267 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
268 if (memchr_inv(misc
, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc
)))
269 spec
->match_criteria_enable
|= MLX5_MATCH_MISC_PARAMETERS
;
271 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
272 MLX5_SET(fte_match_set_misc
, misc
, source_port
, attr
->in_rep
->vport
);
274 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
275 MLX5_SET(fte_match_set_misc
, misc
,
276 source_eswitch_owner_vhca_id
,
277 MLX5_CAP_GEN(attr
->in_mdev
, vhca_id
));
279 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
280 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
281 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
282 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
,
283 source_eswitch_owner_vhca_id
);
285 spec
->match_criteria_enable
|= MLX5_MATCH_MISC_PARAMETERS
;
288 if (MLX5_CAP_ESW_FLOWTABLE(esw
->dev
, flow_source
) &&
289 attr
->in_rep
->vport
== MLX5_VPORT_UPLINK
)
290 spec
->flow_context
.flow_source
= MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK
;
293 struct mlx5_flow_handle
*
294 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch
*esw
,
295 struct mlx5_flow_spec
*spec
,
296 struct mlx5_esw_flow_attr
*attr
)
298 struct mlx5_flow_destination dest
[MLX5_MAX_FLOW_FWD_VPORTS
+ 1] = {};
299 struct mlx5_flow_act flow_act
= { .flags
= FLOW_ACT_NO_APPEND
, };
300 bool split
= !!(attr
->split_count
);
301 struct mlx5_flow_handle
*rule
;
302 struct mlx5_flow_table
*fdb
;
303 bool hairpin
= false;
306 if (esw
->mode
!= MLX5_ESWITCH_OFFLOADS
)
307 return ERR_PTR(-EOPNOTSUPP
);
309 flow_act
.action
= attr
->action
;
310 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
311 if (!mlx5_eswitch_vlan_actions_supported(esw
->dev
, 1))
312 flow_act
.action
&= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
|
313 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
314 else if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
) {
315 flow_act
.vlan
[0].ethtype
= ntohs(attr
->vlan_proto
[0]);
316 flow_act
.vlan
[0].vid
= attr
->vlan_vid
[0];
317 flow_act
.vlan
[0].prio
= attr
->vlan_prio
[0];
318 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2
) {
319 flow_act
.vlan
[1].ethtype
= ntohs(attr
->vlan_proto
[1]);
320 flow_act
.vlan
[1].vid
= attr
->vlan_vid
[1];
321 flow_act
.vlan
[1].prio
= attr
->vlan_prio
[1];
325 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) {
326 struct mlx5_flow_table
*ft
;
329 flow_act
.flags
|= FLOW_ACT_IGNORE_FLOW_LEVEL
;
330 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
331 dest
[i
].ft
= attr
->dest_ft
;
333 } else if (attr
->flags
& MLX5_ESW_ATTR_FLAG_SLOW_PATH
) {
334 flow_act
.flags
|= FLOW_ACT_IGNORE_FLOW_LEVEL
;
335 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
336 dest
[i
].ft
= mlx5_esw_chains_get_tc_end_ft(esw
);
338 } else if (attr
->dest_chain
) {
339 flow_act
.flags
|= FLOW_ACT_IGNORE_FLOW_LEVEL
;
340 ft
= mlx5_esw_chains_get_table(esw
, attr
->dest_chain
,
344 goto err_create_goto_table
;
347 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
351 for (j
= attr
->split_count
; j
< attr
->out_count
; j
++) {
352 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
353 dest
[i
].vport
.num
= attr
->dests
[j
].rep
->vport
;
354 dest
[i
].vport
.vhca_id
=
355 MLX5_CAP_GEN(attr
->dests
[j
].mdev
, vhca_id
);
356 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
357 dest
[i
].vport
.flags
|=
358 MLX5_FLOW_DEST_VPORT_VHCA_ID
;
359 if (attr
->dests
[j
].flags
& MLX5_ESW_DEST_ENCAP
) {
360 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT
;
361 flow_act
.pkt_reformat
= attr
->dests
[j
].pkt_reformat
;
362 dest
[i
].vport
.flags
|= MLX5_FLOW_DEST_VPORT_REFORMAT_ID
;
363 dest
[i
].vport
.pkt_reformat
=
364 attr
->dests
[j
].pkt_reformat
;
370 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
) {
371 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
372 dest
[i
].counter_id
= mlx5_fc_id(attr
->counter
);
376 if (attr
->outer_match_level
!= MLX5_MATCH_NONE
)
377 spec
->match_criteria_enable
|= MLX5_MATCH_OUTER_HEADERS
;
378 if (attr
->inner_match_level
!= MLX5_MATCH_NONE
)
379 spec
->match_criteria_enable
|= MLX5_MATCH_INNER_HEADERS
;
381 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
)
382 flow_act
.modify_hdr
= attr
->modify_hdr
;
385 fdb
= esw_vport_tbl_get(esw
, attr
);
387 if (attr
->chain
|| attr
->prio
)
388 fdb
= mlx5_esw_chains_get_table(esw
, attr
->chain
,
393 if (!(attr
->flags
& MLX5_ESW_ATTR_FLAG_NO_IN_PORT
))
394 mlx5_eswitch_set_rule_source_port(esw
, spec
, attr
);
397 rule
= ERR_CAST(fdb
);
401 if (mlx5_eswitch_termtbl_required(esw
, attr
, &flow_act
, spec
)) {
402 rule
= mlx5_eswitch_add_termtbl_rule(esw
, fdb
, spec
, attr
,
406 rule
= mlx5_add_flow_rules(fdb
, spec
, &flow_act
, dest
, i
);
411 atomic64_inc(&esw
->offloads
.num_flows
);
414 attr
->flags
|= MLX5_ESW_ATTR_FLAG_HAIRPIN
;
420 esw_vport_tbl_put(esw
, attr
);
421 else if (attr
->chain
|| attr
->prio
)
422 mlx5_esw_chains_put_table(esw
, attr
->chain
, attr
->prio
, 0);
424 if (!(attr
->flags
& MLX5_ESW_ATTR_FLAG_SLOW_PATH
) && attr
->dest_chain
)
425 mlx5_esw_chains_put_table(esw
, attr
->dest_chain
, 1, 0);
426 err_create_goto_table
:
430 struct mlx5_flow_handle
*
431 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch
*esw
,
432 struct mlx5_flow_spec
*spec
,
433 struct mlx5_esw_flow_attr
*attr
)
435 struct mlx5_flow_destination dest
[MLX5_MAX_FLOW_FWD_VPORTS
+ 1] = {};
436 struct mlx5_flow_act flow_act
= { .flags
= FLOW_ACT_NO_APPEND
, };
437 struct mlx5_flow_table
*fast_fdb
;
438 struct mlx5_flow_table
*fwd_fdb
;
439 struct mlx5_flow_handle
*rule
;
442 fast_fdb
= mlx5_esw_chains_get_table(esw
, attr
->chain
, attr
->prio
, 0);
443 if (IS_ERR(fast_fdb
)) {
444 rule
= ERR_CAST(fast_fdb
);
448 fwd_fdb
= esw_vport_tbl_get(esw
, attr
);
449 if (IS_ERR(fwd_fdb
)) {
450 rule
= ERR_CAST(fwd_fdb
);
454 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
455 for (i
= 0; i
< attr
->split_count
; i
++) {
456 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
457 dest
[i
].vport
.num
= attr
->dests
[i
].rep
->vport
;
458 dest
[i
].vport
.vhca_id
=
459 MLX5_CAP_GEN(attr
->dests
[i
].mdev
, vhca_id
);
460 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
461 dest
[i
].vport
.flags
|= MLX5_FLOW_DEST_VPORT_VHCA_ID
;
462 if (attr
->dests
[i
].flags
& MLX5_ESW_DEST_ENCAP
) {
463 dest
[i
].vport
.flags
|= MLX5_FLOW_DEST_VPORT_REFORMAT_ID
;
464 dest
[i
].vport
.pkt_reformat
= attr
->dests
[i
].pkt_reformat
;
467 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
468 dest
[i
].ft
= fwd_fdb
,
471 mlx5_eswitch_set_rule_source_port(esw
, spec
, attr
);
473 if (attr
->outer_match_level
!= MLX5_MATCH_NONE
)
474 spec
->match_criteria_enable
|= MLX5_MATCH_OUTER_HEADERS
;
476 flow_act
.flags
|= FLOW_ACT_IGNORE_FLOW_LEVEL
;
477 rule
= mlx5_add_flow_rules(fast_fdb
, spec
, &flow_act
, dest
, i
);
482 atomic64_inc(&esw
->offloads
.num_flows
);
486 esw_vport_tbl_put(esw
, attr
);
488 mlx5_esw_chains_put_table(esw
, attr
->chain
, attr
->prio
, 0);
494 __mlx5_eswitch_del_rule(struct mlx5_eswitch
*esw
,
495 struct mlx5_flow_handle
*rule
,
496 struct mlx5_esw_flow_attr
*attr
,
499 bool split
= (attr
->split_count
> 0);
502 mlx5_del_flow_rules(rule
);
504 if (attr
->flags
& MLX5_ESW_ATTR_FLAG_HAIRPIN
) {
505 /* unref the term table */
506 for (i
= 0; i
< MLX5_MAX_FLOW_FWD_VPORTS
; i
++) {
507 if (attr
->dests
[i
].termtbl
)
508 mlx5_eswitch_termtbl_put(esw
, attr
->dests
[i
].termtbl
);
512 atomic64_dec(&esw
->offloads
.num_flows
);
515 esw_vport_tbl_put(esw
, attr
);
516 mlx5_esw_chains_put_table(esw
, attr
->chain
, attr
->prio
, 0);
519 esw_vport_tbl_put(esw
, attr
);
520 else if (attr
->chain
|| attr
->prio
)
521 mlx5_esw_chains_put_table(esw
, attr
->chain
, attr
->prio
,
523 if (attr
->dest_chain
)
524 mlx5_esw_chains_put_table(esw
, attr
->dest_chain
, 1, 0);
529 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch
*esw
,
530 struct mlx5_flow_handle
*rule
,
531 struct mlx5_esw_flow_attr
*attr
)
533 __mlx5_eswitch_del_rule(esw
, rule
, attr
, false);
537 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch
*esw
,
538 struct mlx5_flow_handle
*rule
,
539 struct mlx5_esw_flow_attr
*attr
)
541 __mlx5_eswitch_del_rule(esw
, rule
, attr
, true);
544 static int esw_set_global_vlan_pop(struct mlx5_eswitch
*esw
, u8 val
)
546 struct mlx5_eswitch_rep
*rep
;
549 esw_debug(esw
->dev
, "%s applying global %s policy\n", __func__
, val
? "pop" : "none");
550 mlx5_esw_for_each_host_func_rep(esw
, i
, rep
, esw
->esw_funcs
.num_vfs
) {
551 if (atomic_read(&rep
->rep_data
[REP_ETH
].state
) != REP_LOADED
)
554 err
= __mlx5_eswitch_set_vport_vlan(esw
, rep
->vport
, 0, 0, val
);
563 static struct mlx5_eswitch_rep
*
564 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr
*attr
, bool push
, bool pop
)
566 struct mlx5_eswitch_rep
*in_rep
, *out_rep
, *vport
= NULL
;
568 in_rep
= attr
->in_rep
;
569 out_rep
= attr
->dests
[0].rep
;
581 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr
*attr
,
582 bool push
, bool pop
, bool fwd
)
584 struct mlx5_eswitch_rep
*in_rep
, *out_rep
;
586 if ((push
|| pop
) && !fwd
)
589 in_rep
= attr
->in_rep
;
590 out_rep
= attr
->dests
[0].rep
;
592 if (push
&& in_rep
->vport
== MLX5_VPORT_UPLINK
)
595 if (pop
&& out_rep
->vport
== MLX5_VPORT_UPLINK
)
598 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
599 if (!push
&& !pop
&& fwd
)
600 if (in_rep
->vlan
&& out_rep
->vport
== MLX5_VPORT_UPLINK
)
603 /* protects against (1) setting rules with different vlans to push and
604 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
606 if (push
&& in_rep
->vlan_refcount
&& (in_rep
->vlan
!= attr
->vlan_vid
[0]))
615 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch
*esw
,
616 struct mlx5_esw_flow_attr
*attr
)
618 struct offloads_fdb
*offloads
= &esw
->fdb_table
.offloads
;
619 struct mlx5_eswitch_rep
*vport
= NULL
;
623 /* nop if we're on the vlan push/pop non emulation mode */
624 if (mlx5_eswitch_vlan_actions_supported(esw
->dev
, 1))
627 push
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
);
628 pop
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
629 fwd
= !!((attr
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) &&
632 mutex_lock(&esw
->state_lock
);
634 err
= esw_add_vlan_action_check(attr
, push
, pop
, fwd
);
638 attr
->flags
&= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED
;
640 vport
= esw_vlan_action_get_vport(attr
, push
, pop
);
642 if (!push
&& !pop
&& fwd
) {
643 /* tracks VF --> wire rules without vlan push action */
644 if (attr
->dests
[0].rep
->vport
== MLX5_VPORT_UPLINK
) {
645 vport
->vlan_refcount
++;
646 attr
->flags
|= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED
;
655 if (!(offloads
->vlan_push_pop_refcount
)) {
656 /* it's the 1st vlan rule, apply global vlan pop policy */
657 err
= esw_set_global_vlan_pop(esw
, SET_VLAN_STRIP
);
661 offloads
->vlan_push_pop_refcount
++;
664 if (vport
->vlan_refcount
)
667 err
= __mlx5_eswitch_set_vport_vlan(esw
, vport
->vport
, attr
->vlan_vid
[0], 0,
668 SET_VLAN_INSERT
| SET_VLAN_STRIP
);
671 vport
->vlan
= attr
->vlan_vid
[0];
673 vport
->vlan_refcount
++;
677 attr
->flags
|= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED
;
679 mutex_unlock(&esw
->state_lock
);
683 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch
*esw
,
684 struct mlx5_esw_flow_attr
*attr
)
686 struct offloads_fdb
*offloads
= &esw
->fdb_table
.offloads
;
687 struct mlx5_eswitch_rep
*vport
= NULL
;
691 /* nop if we're on the vlan push/pop non emulation mode */
692 if (mlx5_eswitch_vlan_actions_supported(esw
->dev
, 1))
695 if (!(attr
->flags
& MLX5_ESW_ATTR_FLAG_VLAN_HANDLED
))
698 push
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
);
699 pop
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
700 fwd
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
);
702 mutex_lock(&esw
->state_lock
);
704 vport
= esw_vlan_action_get_vport(attr
, push
, pop
);
706 if (!push
&& !pop
&& fwd
) {
707 /* tracks VF --> wire rules without vlan push action */
708 if (attr
->dests
[0].rep
->vport
== MLX5_VPORT_UPLINK
)
709 vport
->vlan_refcount
--;
715 vport
->vlan_refcount
--;
716 if (vport
->vlan_refcount
)
717 goto skip_unset_push
;
720 err
= __mlx5_eswitch_set_vport_vlan(esw
, vport
->vport
,
721 0, 0, SET_VLAN_STRIP
);
727 offloads
->vlan_push_pop_refcount
--;
728 if (offloads
->vlan_push_pop_refcount
)
731 /* no more vlan rules, stop global vlan pop policy */
732 err
= esw_set_global_vlan_pop(esw
, 0);
735 mutex_unlock(&esw
->state_lock
);
739 struct mlx5_flow_handle
*
740 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch
*esw
, u16 vport
,
743 struct mlx5_flow_act flow_act
= {0};
744 struct mlx5_flow_destination dest
= {};
745 struct mlx5_flow_handle
*flow_rule
;
746 struct mlx5_flow_spec
*spec
;
749 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
751 flow_rule
= ERR_PTR(-ENOMEM
);
755 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
756 MLX5_SET(fte_match_set_misc
, misc
, source_sqn
, sqn
);
757 /* source vport is the esw manager */
758 MLX5_SET(fte_match_set_misc
, misc
, source_port
, esw
->manager_vport
);
760 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
761 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_sqn
);
762 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
764 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
765 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
766 dest
.vport
.num
= vport
;
767 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
769 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
770 spec
, &flow_act
, &dest
, 1);
771 if (IS_ERR(flow_rule
))
772 esw_warn(esw
->dev
, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule
));
777 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule
);
779 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle
*rule
)
781 mlx5_del_flow_rules(rule
);
784 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch
*esw
)
786 return MLX5_CAP_ESW_FLOWTABLE(esw
->dev
, fdb_to_vport_reg_c_id
) &
787 MLX5_FDB_TO_VPORT_REG_C_1
;
790 static int esw_set_passing_vport_metadata(struct mlx5_eswitch
*esw
, bool enable
)
792 u32 out
[MLX5_ST_SZ_DW(query_esw_vport_context_out
)] = {};
793 u32 in
[MLX5_ST_SZ_DW(modify_esw_vport_context_in
)] = {};
797 if (!mlx5_eswitch_reg_c1_loopback_supported(esw
) &&
798 !mlx5_eswitch_vport_match_metadata_enabled(esw
))
801 err
= mlx5_eswitch_query_esw_vport_context(esw
->dev
, 0, false,
806 curr
= MLX5_GET(query_esw_vport_context_out
, out
,
807 esw_vport_context
.fdb_to_vport_reg_c_id
);
808 wanted
= MLX5_FDB_TO_VPORT_REG_C_0
;
809 if (mlx5_eswitch_reg_c1_loopback_supported(esw
))
810 wanted
|= MLX5_FDB_TO_VPORT_REG_C_1
;
817 MLX5_SET(modify_esw_vport_context_in
, in
,
818 esw_vport_context
.fdb_to_vport_reg_c_id
, curr
);
820 MLX5_SET(modify_esw_vport_context_in
, in
,
821 field_select
.fdb_to_vport_reg_c_id
, 1);
823 err
= mlx5_eswitch_modify_esw_vport_context(esw
->dev
, 0, false, in
,
826 if (enable
&& (curr
& MLX5_FDB_TO_VPORT_REG_C_1
))
827 esw
->flags
|= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED
;
829 esw
->flags
&= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED
;
835 static void peer_miss_rules_setup(struct mlx5_eswitch
*esw
,
836 struct mlx5_core_dev
*peer_dev
,
837 struct mlx5_flow_spec
*spec
,
838 struct mlx5_flow_destination
*dest
)
842 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
843 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
845 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
846 mlx5_eswitch_get_vport_metadata_mask());
848 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS_2
;
850 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
853 MLX5_SET(fte_match_set_misc
, misc
, source_eswitch_owner_vhca_id
,
854 MLX5_CAP_GEN(peer_dev
, vhca_id
));
856 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
858 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
860 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
861 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
,
862 source_eswitch_owner_vhca_id
);
865 dest
->type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
866 dest
->vport
.num
= peer_dev
->priv
.eswitch
->manager_vport
;
867 dest
->vport
.vhca_id
= MLX5_CAP_GEN(peer_dev
, vhca_id
);
868 dest
->vport
.flags
|= MLX5_FLOW_DEST_VPORT_VHCA_ID
;
871 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch
*esw
,
872 struct mlx5_eswitch
*peer_esw
,
873 struct mlx5_flow_spec
*spec
,
878 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
879 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
881 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
882 mlx5_eswitch_get_vport_metadata_for_match(peer_esw
,
885 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
887 MLX5_SET(fte_match_set_misc
, misc
, source_port
, vport
);
891 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch
*esw
,
892 struct mlx5_core_dev
*peer_dev
)
894 struct mlx5_flow_destination dest
= {};
895 struct mlx5_flow_act flow_act
= {0};
896 struct mlx5_flow_handle
**flows
;
897 struct mlx5_flow_handle
*flow
;
898 struct mlx5_flow_spec
*spec
;
899 /* total vports is the same for both e-switches */
900 int nvports
= esw
->total_vports
;
904 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
908 peer_miss_rules_setup(esw
, peer_dev
, spec
, &dest
);
910 flows
= kvzalloc(nvports
* sizeof(*flows
), GFP_KERNEL
);
913 goto alloc_flows_err
;
916 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
917 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
920 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
921 esw_set_peer_miss_rule_source_port(esw
, peer_dev
->priv
.eswitch
,
922 spec
, MLX5_VPORT_PF
);
924 flow
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
925 spec
, &flow_act
, &dest
, 1);
928 goto add_pf_flow_err
;
930 flows
[MLX5_VPORT_PF
] = flow
;
933 if (mlx5_ecpf_vport_exists(esw
->dev
)) {
934 MLX5_SET(fte_match_set_misc
, misc
, source_port
, MLX5_VPORT_ECPF
);
935 flow
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
936 spec
, &flow_act
, &dest
, 1);
939 goto add_ecpf_flow_err
;
941 flows
[mlx5_eswitch_ecpf_idx(esw
)] = flow
;
944 mlx5_esw_for_each_vf_vport_num(esw
, i
, mlx5_core_max_vfs(esw
->dev
)) {
945 esw_set_peer_miss_rule_source_port(esw
,
946 peer_dev
->priv
.eswitch
,
949 flow
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
950 spec
, &flow_act
, &dest
, 1);
953 goto add_vf_flow_err
;
958 esw
->fdb_table
.offloads
.peer_miss_rules
= flows
;
965 mlx5_esw_for_each_vf_vport_num_reverse(esw
, i
, nvports
)
966 mlx5_del_flow_rules(flows
[i
]);
968 if (mlx5_ecpf_vport_exists(esw
->dev
))
969 mlx5_del_flow_rules(flows
[mlx5_eswitch_ecpf_idx(esw
)]);
971 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
))
972 mlx5_del_flow_rules(flows
[MLX5_VPORT_PF
]);
974 esw_warn(esw
->dev
, "FDB: Failed to add peer miss flow rule err %d\n", err
);
981 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch
*esw
)
983 struct mlx5_flow_handle
**flows
;
986 flows
= esw
->fdb_table
.offloads
.peer_miss_rules
;
988 mlx5_esw_for_each_vf_vport_num_reverse(esw
, i
,
989 mlx5_core_max_vfs(esw
->dev
))
990 mlx5_del_flow_rules(flows
[i
]);
992 if (mlx5_ecpf_vport_exists(esw
->dev
))
993 mlx5_del_flow_rules(flows
[mlx5_eswitch_ecpf_idx(esw
)]);
995 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
))
996 mlx5_del_flow_rules(flows
[MLX5_VPORT_PF
]);
1001 static int esw_add_fdb_miss_rule(struct mlx5_eswitch
*esw
)
1003 struct mlx5_flow_act flow_act
= {0};
1004 struct mlx5_flow_destination dest
= {};
1005 struct mlx5_flow_handle
*flow_rule
= NULL
;
1006 struct mlx5_flow_spec
*spec
;
1013 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1019 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
1020 headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
1022 dmac_c
= MLX5_ADDR_OF(fte_match_param
, headers_c
,
1023 outer_headers
.dmac_47_16
);
1026 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
1027 dest
.vport
.num
= esw
->manager_vport
;
1028 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1030 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
1031 spec
, &flow_act
, &dest
, 1);
1032 if (IS_ERR(flow_rule
)) {
1033 err
= PTR_ERR(flow_rule
);
1034 esw_warn(esw
->dev
, "FDB: Failed to add unicast miss flow rule err %d\n", err
);
1038 esw
->fdb_table
.offloads
.miss_rule_uni
= flow_rule
;
1040 headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
1042 dmac_v
= MLX5_ADDR_OF(fte_match_param
, headers_v
,
1043 outer_headers
.dmac_47_16
);
1045 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
1046 spec
, &flow_act
, &dest
, 1);
1047 if (IS_ERR(flow_rule
)) {
1048 err
= PTR_ERR(flow_rule
);
1049 esw_warn(esw
->dev
, "FDB: Failed to add multicast miss flow rule err %d\n", err
);
1050 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule_uni
);
1054 esw
->fdb_table
.offloads
.miss_rule_multi
= flow_rule
;
1061 struct mlx5_flow_handle
*
1062 esw_add_restore_rule(struct mlx5_eswitch
*esw
, u32 tag
)
1064 struct mlx5_flow_act flow_act
= { .flags
= FLOW_ACT_NO_APPEND
, };
1065 struct mlx5_flow_table
*ft
= esw
->offloads
.ft_offloads_restore
;
1066 struct mlx5_flow_context
*flow_context
;
1067 struct mlx5_flow_handle
*flow_rule
;
1068 struct mlx5_flow_destination dest
;
1069 struct mlx5_flow_spec
*spec
;
1072 if (!mlx5_eswitch_reg_c1_loopback_supported(esw
))
1073 return ERR_PTR(-EOPNOTSUPP
);
1075 spec
= kzalloc(sizeof(*spec
), GFP_KERNEL
);
1077 return ERR_PTR(-ENOMEM
);
1079 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
1081 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
1082 ESW_CHAIN_TAG_METADATA_MASK
);
1083 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
1085 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
, tag
);
1086 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS_2
;
1087 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
|
1088 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
;
1089 flow_act
.modify_hdr
= esw
->offloads
.restore_copy_hdr_id
;
1091 flow_context
= &spec
->flow_context
;
1092 flow_context
->flags
|= FLOW_CONTEXT_HAS_TAG
;
1093 flow_context
->flow_tag
= tag
;
1094 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
1095 dest
.ft
= esw
->offloads
.ft_offloads
;
1097 flow_rule
= mlx5_add_flow_rules(ft
, spec
, &flow_act
, &dest
, 1);
1100 if (IS_ERR(flow_rule
))
1102 "Failed to create restore rule for tag: %d, err(%d)\n",
1103 tag
, (int)PTR_ERR(flow_rule
));
1109 esw_get_max_restore_tag(struct mlx5_eswitch
*esw
)
1111 return ESW_CHAIN_TAG_METADATA_MASK
;
1114 #define MAX_PF_SQ 256
1115 #define MAX_SQ_NVPORTS 32
1117 static void esw_set_flow_group_source_port(struct mlx5_eswitch
*esw
,
1120 void *match_criteria
= MLX5_ADDR_OF(create_flow_group_in
,
1124 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
1125 MLX5_SET(create_flow_group_in
, flow_group_in
,
1126 match_criteria_enable
,
1127 MLX5_MATCH_MISC_PARAMETERS_2
);
1129 MLX5_SET(fte_match_param
, match_criteria
,
1130 misc_parameters_2
.metadata_reg_c_0
,
1131 mlx5_eswitch_get_vport_metadata_mask());
1133 MLX5_SET(create_flow_group_in
, flow_group_in
,
1134 match_criteria_enable
,
1135 MLX5_MATCH_MISC_PARAMETERS
);
1137 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
,
1138 misc_parameters
.source_port
);
1142 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch
*esw
, int nvports
)
1144 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1145 struct mlx5_flow_table_attr ft_attr
= {};
1146 struct mlx5_core_dev
*dev
= esw
->dev
;
1147 struct mlx5_flow_namespace
*root_ns
;
1148 struct mlx5_flow_table
*fdb
= NULL
;
1149 u32 flags
= 0, *flow_group_in
;
1150 int table_size
, ix
, err
= 0;
1151 struct mlx5_flow_group
*g
;
1152 void *match_criteria
;
1155 esw_debug(esw
->dev
, "Create offloads FDB Tables\n");
1157 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
1161 root_ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_FDB
);
1163 esw_warn(dev
, "Failed to get FDB flow namespace\n");
1167 esw
->fdb_table
.offloads
.ns
= root_ns
;
1168 err
= mlx5_flow_namespace_set_mode(root_ns
,
1169 esw
->dev
->priv
.steering
->mode
);
1171 esw_warn(dev
, "Failed to set FDB namespace steering mode\n");
1175 table_size
= nvports
* MAX_SQ_NVPORTS
+ MAX_PF_SQ
+
1176 MLX5_ESW_MISS_FLOWS
+ esw
->total_vports
;
1178 /* create the slow path fdb with encap set, so further table instances
1179 * can be created at run time while VFs are probed if the FW allows that.
1181 if (esw
->offloads
.encap
!= DEVLINK_ESWITCH_ENCAP_MODE_NONE
)
1182 flags
|= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT
|
1183 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP
);
1185 ft_attr
.flags
= flags
;
1186 ft_attr
.max_fte
= table_size
;
1187 ft_attr
.prio
= FDB_SLOW_PATH
;
1189 fdb
= mlx5_create_flow_table(root_ns
, &ft_attr
);
1192 esw_warn(dev
, "Failed to create slow path FDB Table err %d\n", err
);
1195 esw
->fdb_table
.offloads
.slow_fdb
= fdb
;
1197 err
= mlx5_esw_chains_create(esw
);
1199 esw_warn(dev
, "Failed to create fdb chains err(%d)\n", err
);
1200 goto fdb_chains_err
;
1203 /* create send-to-vport group */
1204 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
1205 MLX5_MATCH_MISC_PARAMETERS
);
1207 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
1209 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_sqn
);
1210 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_port
);
1212 ix
= nvports
* MAX_SQ_NVPORTS
+ MAX_PF_SQ
;
1213 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
1214 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, ix
- 1);
1216 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
1219 esw_warn(dev
, "Failed to create send-to-vport flow group err(%d)\n", err
);
1220 goto send_vport_err
;
1222 esw
->fdb_table
.offloads
.send_to_vport_grp
= g
;
1224 /* create peer esw miss group */
1225 memset(flow_group_in
, 0, inlen
);
1227 esw_set_flow_group_source_port(esw
, flow_group_in
);
1229 if (!mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
1230 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
,
1234 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
,
1235 misc_parameters
.source_eswitch_owner_vhca_id
);
1237 MLX5_SET(create_flow_group_in
, flow_group_in
,
1238 source_eswitch_owner_vhca_id_valid
, 1);
1241 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, ix
);
1242 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
,
1243 ix
+ esw
->total_vports
- 1);
1244 ix
+= esw
->total_vports
;
1246 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
1249 esw_warn(dev
, "Failed to create peer miss flow group err(%d)\n", err
);
1252 esw
->fdb_table
.offloads
.peer_miss_grp
= g
;
1254 /* create miss group */
1255 memset(flow_group_in
, 0, inlen
);
1256 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
1257 MLX5_MATCH_OUTER_HEADERS
);
1258 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
,
1260 dmac
= MLX5_ADDR_OF(fte_match_param
, match_criteria
,
1261 outer_headers
.dmac_47_16
);
1264 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, ix
);
1265 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
,
1266 ix
+ MLX5_ESW_MISS_FLOWS
);
1268 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
1271 esw_warn(dev
, "Failed to create miss flow group err(%d)\n", err
);
1274 esw
->fdb_table
.offloads
.miss_grp
= g
;
1276 err
= esw_add_fdb_miss_rule(esw
);
1280 esw
->nvports
= nvports
;
1281 kvfree(flow_group_in
);
1285 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.miss_grp
);
1287 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.peer_miss_grp
);
1289 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.send_to_vport_grp
);
1291 mlx5_esw_chains_destroy(esw
);
1293 mlx5_destroy_flow_table(esw
->fdb_table
.offloads
.slow_fdb
);
1295 /* Holds true only as long as DMFS is the default */
1296 mlx5_flow_namespace_set_mode(root_ns
, MLX5_FLOW_STEERING_MODE_DMFS
);
1298 kvfree(flow_group_in
);
1302 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch
*esw
)
1304 if (!esw
->fdb_table
.offloads
.slow_fdb
)
1307 esw_debug(esw
->dev
, "Destroy offloads FDB Tables\n");
1308 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule_multi
);
1309 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule_uni
);
1310 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.send_to_vport_grp
);
1311 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.peer_miss_grp
);
1312 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.miss_grp
);
1314 mlx5_esw_chains_destroy(esw
);
1315 mlx5_destroy_flow_table(esw
->fdb_table
.offloads
.slow_fdb
);
1316 /* Holds true only as long as DMFS is the default */
1317 mlx5_flow_namespace_set_mode(esw
->fdb_table
.offloads
.ns
,
1318 MLX5_FLOW_STEERING_MODE_DMFS
);
1321 static int esw_create_offloads_table(struct mlx5_eswitch
*esw
, int nvports
)
1323 struct mlx5_flow_table_attr ft_attr
= {};
1324 struct mlx5_core_dev
*dev
= esw
->dev
;
1325 struct mlx5_flow_table
*ft_offloads
;
1326 struct mlx5_flow_namespace
*ns
;
1329 ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_OFFLOADS
);
1331 esw_warn(esw
->dev
, "Failed to get offloads flow namespace\n");
1335 ft_attr
.max_fte
= nvports
+ MLX5_ESW_MISS_FLOWS
;
1338 ft_offloads
= mlx5_create_flow_table(ns
, &ft_attr
);
1339 if (IS_ERR(ft_offloads
)) {
1340 err
= PTR_ERR(ft_offloads
);
1341 esw_warn(esw
->dev
, "Failed to create offloads table, err %d\n", err
);
1345 esw
->offloads
.ft_offloads
= ft_offloads
;
1349 static void esw_destroy_offloads_table(struct mlx5_eswitch
*esw
)
1351 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
1353 mlx5_destroy_flow_table(offloads
->ft_offloads
);
1356 static int esw_create_vport_rx_group(struct mlx5_eswitch
*esw
, int nvports
)
1358 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1359 struct mlx5_flow_group
*g
;
1363 nvports
= nvports
+ MLX5_ESW_MISS_FLOWS
;
1364 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
1368 /* create vport rx group */
1369 esw_set_flow_group_source_port(esw
, flow_group_in
);
1371 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
1372 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, nvports
- 1);
1374 g
= mlx5_create_flow_group(esw
->offloads
.ft_offloads
, flow_group_in
);
1378 mlx5_core_warn(esw
->dev
, "Failed to create vport rx group err %d\n", err
);
1382 esw
->offloads
.vport_rx_group
= g
;
1384 kvfree(flow_group_in
);
1388 static void esw_destroy_vport_rx_group(struct mlx5_eswitch
*esw
)
1390 mlx5_destroy_flow_group(esw
->offloads
.vport_rx_group
);
1393 struct mlx5_flow_handle
*
1394 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, u16 vport
,
1395 struct mlx5_flow_destination
*dest
)
1397 struct mlx5_flow_act flow_act
= {0};
1398 struct mlx5_flow_handle
*flow_rule
;
1399 struct mlx5_flow_spec
*spec
;
1402 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1404 flow_rule
= ERR_PTR(-ENOMEM
);
1408 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
1409 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters_2
);
1410 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
1411 mlx5_eswitch_get_vport_metadata_for_match(esw
, vport
));
1413 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters_2
);
1414 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
1415 mlx5_eswitch_get_vport_metadata_mask());
1417 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS_2
;
1419 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
1420 MLX5_SET(fte_match_set_misc
, misc
, source_port
, vport
);
1422 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
1423 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
1425 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
1428 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1429 flow_rule
= mlx5_add_flow_rules(esw
->offloads
.ft_offloads
, spec
,
1430 &flow_act
, dest
, 1);
1431 if (IS_ERR(flow_rule
)) {
1432 esw_warn(esw
->dev
, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule
));
1442 static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch
*esw
, u8
*mode
)
1444 u8 prev_mlx5_mode
, mlx5_mode
= MLX5_INLINE_MODE_L2
;
1445 struct mlx5_core_dev
*dev
= esw
->dev
;
1448 if (!MLX5_CAP_GEN(dev
, vport_group_manager
))
1451 if (esw
->mode
== MLX5_ESWITCH_NONE
)
1454 switch (MLX5_CAP_ETH(dev
, wqe_inline_mode
)) {
1455 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED
:
1456 mlx5_mode
= MLX5_INLINE_MODE_NONE
;
1458 case MLX5_CAP_INLINE_MODE_L2
:
1459 mlx5_mode
= MLX5_INLINE_MODE_L2
;
1461 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
:
1466 mlx5_query_nic_vport_min_inline(dev
, esw
->first_host_vport
, &prev_mlx5_mode
);
1467 mlx5_esw_for_each_host_func_vport(esw
, vport
, esw
->esw_funcs
.num_vfs
) {
1468 mlx5_query_nic_vport_min_inline(dev
, vport
, &mlx5_mode
);
1469 if (prev_mlx5_mode
!= mlx5_mode
)
1471 prev_mlx5_mode
= mlx5_mode
;
1479 static void esw_destroy_restore_table(struct mlx5_eswitch
*esw
)
1481 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
1483 if (!mlx5_eswitch_reg_c1_loopback_supported(esw
))
1486 mlx5_modify_header_dealloc(esw
->dev
, offloads
->restore_copy_hdr_id
);
1487 mlx5_destroy_flow_group(offloads
->restore_group
);
1488 mlx5_destroy_flow_table(offloads
->ft_offloads_restore
);
1491 static int esw_create_restore_table(struct mlx5_eswitch
*esw
)
1493 u8 modact
[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto
)] = {};
1494 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1495 struct mlx5_flow_table_attr ft_attr
= {};
1496 struct mlx5_core_dev
*dev
= esw
->dev
;
1497 struct mlx5_flow_namespace
*ns
;
1498 struct mlx5_modify_hdr
*mod_hdr
;
1499 void *match_criteria
, *misc
;
1500 struct mlx5_flow_table
*ft
;
1501 struct mlx5_flow_group
*g
;
1505 if (!mlx5_eswitch_reg_c1_loopback_supported(esw
))
1508 ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_OFFLOADS
);
1510 esw_warn(esw
->dev
, "Failed to get offloads flow namespace\n");
1514 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
1515 if (!flow_group_in
) {
1520 ft_attr
.max_fte
= 1 << ESW_CHAIN_TAG_METADATA_BITS
;
1521 ft
= mlx5_create_flow_table(ns
, &ft_attr
);
1524 esw_warn(esw
->dev
, "Failed to create restore table, err %d\n",
1529 memset(flow_group_in
, 0, inlen
);
1530 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
,
1532 misc
= MLX5_ADDR_OF(fte_match_param
, match_criteria
,
1535 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
1536 ESW_CHAIN_TAG_METADATA_MASK
);
1537 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
1538 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
,
1539 ft_attr
.max_fte
- 1);
1540 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
1541 MLX5_MATCH_MISC_PARAMETERS_2
);
1542 g
= mlx5_create_flow_group(ft
, flow_group_in
);
1545 esw_warn(dev
, "Failed to create restore flow group, err: %d\n",
1550 MLX5_SET(copy_action_in
, modact
, action_type
, MLX5_ACTION_TYPE_COPY
);
1551 MLX5_SET(copy_action_in
, modact
, src_field
,
1552 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1
);
1553 MLX5_SET(copy_action_in
, modact
, dst_field
,
1554 MLX5_ACTION_IN_FIELD_METADATA_REG_B
);
1555 mod_hdr
= mlx5_modify_header_alloc(esw
->dev
,
1556 MLX5_FLOW_NAMESPACE_KERNEL
, 1,
1558 if (IS_ERR(mod_hdr
)) {
1559 esw_warn(dev
, "Failed to create restore mod header, err: %d\n",
1561 err
= PTR_ERR(mod_hdr
);
1565 esw
->offloads
.ft_offloads_restore
= ft
;
1566 esw
->offloads
.restore_group
= g
;
1567 esw
->offloads
.restore_copy_hdr_id
= mod_hdr
;
1569 kvfree(flow_group_in
);
1574 mlx5_destroy_flow_group(g
);
1576 mlx5_destroy_flow_table(ft
);
1578 kvfree(flow_group_in
);
1583 static int esw_offloads_start(struct mlx5_eswitch
*esw
,
1584 struct netlink_ext_ack
*extack
)
1588 if (esw
->mode
!= MLX5_ESWITCH_LEGACY
&&
1589 !mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
1590 NL_SET_ERR_MSG_MOD(extack
,
1591 "Can't set offloads mode, SRIOV legacy not enabled");
1595 mlx5_eswitch_disable_locked(esw
, false);
1596 err
= mlx5_eswitch_enable_locked(esw
, MLX5_ESWITCH_OFFLOADS
,
1597 esw
->dev
->priv
.sriov
.num_vfs
);
1599 NL_SET_ERR_MSG_MOD(extack
,
1600 "Failed setting eswitch to offloads");
1601 err1
= mlx5_eswitch_enable_locked(esw
, MLX5_ESWITCH_LEGACY
,
1602 MLX5_ESWITCH_IGNORE_NUM_VFS
);
1604 NL_SET_ERR_MSG_MOD(extack
,
1605 "Failed setting eswitch back to legacy");
1608 if (esw
->offloads
.inline_mode
== MLX5_INLINE_MODE_NONE
) {
1609 if (mlx5_eswitch_inline_mode_get(esw
,
1610 &esw
->offloads
.inline_mode
)) {
1611 esw
->offloads
.inline_mode
= MLX5_INLINE_MODE_L2
;
1612 NL_SET_ERR_MSG_MOD(extack
,
1613 "Inline mode is different between vports");
1619 void esw_offloads_cleanup_reps(struct mlx5_eswitch
*esw
)
1621 kfree(esw
->offloads
.vport_reps
);
1624 int esw_offloads_init_reps(struct mlx5_eswitch
*esw
)
1626 int total_vports
= esw
->total_vports
;
1627 struct mlx5_eswitch_rep
*rep
;
1631 esw
->offloads
.vport_reps
= kcalloc(total_vports
,
1632 sizeof(struct mlx5_eswitch_rep
),
1634 if (!esw
->offloads
.vport_reps
)
1637 mlx5_esw_for_all_reps(esw
, vport_index
, rep
) {
1638 rep
->vport
= mlx5_eswitch_index_to_vport_num(esw
, vport_index
);
1639 rep
->vport_index
= vport_index
;
1641 for (rep_type
= 0; rep_type
< NUM_REP_TYPES
; rep_type
++)
1642 atomic_set(&rep
->rep_data
[rep_type
].state
,
1649 static void __esw_offloads_unload_rep(struct mlx5_eswitch
*esw
,
1650 struct mlx5_eswitch_rep
*rep
, u8 rep_type
)
1652 if (atomic_cmpxchg(&rep
->rep_data
[rep_type
].state
,
1653 REP_LOADED
, REP_REGISTERED
) == REP_LOADED
)
1654 esw
->offloads
.rep_ops
[rep_type
]->unload(rep
);
1657 static void __unload_reps_all_vport(struct mlx5_eswitch
*esw
, u8 rep_type
)
1659 struct mlx5_eswitch_rep
*rep
;
1662 mlx5_esw_for_each_vf_rep_reverse(esw
, i
, rep
, esw
->esw_funcs
.num_vfs
)
1663 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1665 if (mlx5_ecpf_vport_exists(esw
->dev
)) {
1666 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_ECPF
);
1667 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1670 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
1671 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_PF
);
1672 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1675 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_UPLINK
);
1676 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1679 int esw_offloads_load_rep(struct mlx5_eswitch
*esw
, u16 vport_num
)
1681 struct mlx5_eswitch_rep
*rep
;
1685 if (esw
->mode
!= MLX5_ESWITCH_OFFLOADS
)
1688 rep
= mlx5_eswitch_get_rep(esw
, vport_num
);
1689 for (rep_type
= 0; rep_type
< NUM_REP_TYPES
; rep_type
++)
1690 if (atomic_cmpxchg(&rep
->rep_data
[rep_type
].state
,
1691 REP_REGISTERED
, REP_LOADED
) == REP_REGISTERED
) {
1692 err
= esw
->offloads
.rep_ops
[rep_type
]->load(esw
->dev
, rep
);
1700 atomic_set(&rep
->rep_data
[rep_type
].state
, REP_REGISTERED
);
1701 for (--rep_type
; rep_type
>= 0; rep_type
--)
1702 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1706 void esw_offloads_unload_rep(struct mlx5_eswitch
*esw
, u16 vport_num
)
1708 struct mlx5_eswitch_rep
*rep
;
1711 if (esw
->mode
!= MLX5_ESWITCH_OFFLOADS
)
1714 rep
= mlx5_eswitch_get_rep(esw
, vport_num
);
1715 for (rep_type
= NUM_REP_TYPES
- 1; rep_type
>= 0; rep_type
--)
1716 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1719 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1720 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1722 static int mlx5_esw_offloads_pair(struct mlx5_eswitch
*esw
,
1723 struct mlx5_eswitch
*peer_esw
)
1727 err
= esw_add_fdb_peer_miss_rules(esw
, peer_esw
->dev
);
1734 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch
*esw
)
1736 mlx5e_tc_clean_fdb_peer_flows(esw
);
1737 esw_del_fdb_peer_miss_rules(esw
);
1740 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch
*esw
,
1741 struct mlx5_eswitch
*peer_esw
,
1744 struct mlx5_flow_root_namespace
*peer_ns
;
1745 struct mlx5_flow_root_namespace
*ns
;
1748 peer_ns
= peer_esw
->dev
->priv
.steering
->fdb_root_ns
;
1749 ns
= esw
->dev
->priv
.steering
->fdb_root_ns
;
1752 err
= mlx5_flow_namespace_set_peer(ns
, peer_ns
);
1756 err
= mlx5_flow_namespace_set_peer(peer_ns
, ns
);
1758 mlx5_flow_namespace_set_peer(ns
, NULL
);
1762 mlx5_flow_namespace_set_peer(ns
, NULL
);
1763 mlx5_flow_namespace_set_peer(peer_ns
, NULL
);
1769 static int mlx5_esw_offloads_devcom_event(int event
,
1773 struct mlx5_eswitch
*esw
= my_data
;
1774 struct mlx5_devcom
*devcom
= esw
->dev
->priv
.devcom
;
1775 struct mlx5_eswitch
*peer_esw
= event_data
;
1779 case ESW_OFFLOADS_DEVCOM_PAIR
:
1780 if (mlx5_eswitch_vport_match_metadata_enabled(esw
) !=
1781 mlx5_eswitch_vport_match_metadata_enabled(peer_esw
))
1784 err
= mlx5_esw_offloads_set_ns_peer(esw
, peer_esw
, true);
1787 err
= mlx5_esw_offloads_pair(esw
, peer_esw
);
1791 err
= mlx5_esw_offloads_pair(peer_esw
, esw
);
1795 mlx5_devcom_set_paired(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
, true);
1798 case ESW_OFFLOADS_DEVCOM_UNPAIR
:
1799 if (!mlx5_devcom_is_paired(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
))
1802 mlx5_devcom_set_paired(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
, false);
1803 mlx5_esw_offloads_unpair(peer_esw
);
1804 mlx5_esw_offloads_unpair(esw
);
1805 mlx5_esw_offloads_set_ns_peer(esw
, peer_esw
, false);
1812 mlx5_esw_offloads_unpair(esw
);
1814 mlx5_esw_offloads_set_ns_peer(esw
, peer_esw
, false);
1816 mlx5_core_err(esw
->dev
, "esw offloads devcom event failure, event %u err %d",
1821 static void esw_offloads_devcom_init(struct mlx5_eswitch
*esw
)
1823 struct mlx5_devcom
*devcom
= esw
->dev
->priv
.devcom
;
1825 INIT_LIST_HEAD(&esw
->offloads
.peer_flows
);
1826 mutex_init(&esw
->offloads
.peer_mutex
);
1828 if (!MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
1831 mlx5_devcom_register_component(devcom
,
1832 MLX5_DEVCOM_ESW_OFFLOADS
,
1833 mlx5_esw_offloads_devcom_event
,
1836 mlx5_devcom_send_event(devcom
,
1837 MLX5_DEVCOM_ESW_OFFLOADS
,
1838 ESW_OFFLOADS_DEVCOM_PAIR
, esw
);
1841 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch
*esw
)
1843 struct mlx5_devcom
*devcom
= esw
->dev
->priv
.devcom
;
1845 if (!MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
1848 mlx5_devcom_send_event(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
,
1849 ESW_OFFLOADS_DEVCOM_UNPAIR
, esw
);
1851 mlx5_devcom_unregister_component(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
);
1854 static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch
*esw
,
1855 struct mlx5_vport
*vport
)
1857 struct mlx5_flow_act flow_act
= {0};
1858 struct mlx5_flow_spec
*spec
;
1861 /* For prio tag mode, there is only 1 FTEs:
1862 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1864 * Unmatched traffic is allowed by default
1866 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1870 /* Untagged packets - push prio tag VLAN, allow */
1871 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.cvlan_tag
);
1872 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.cvlan_tag
, 0);
1873 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
1874 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
|
1875 MLX5_FLOW_CONTEXT_ACTION_ALLOW
;
1876 flow_act
.vlan
[0].ethtype
= ETH_P_8021Q
;
1877 flow_act
.vlan
[0].vid
= 0;
1878 flow_act
.vlan
[0].prio
= 0;
1880 if (vport
->ingress
.offloads
.modify_metadata_rule
) {
1881 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
;
1882 flow_act
.modify_hdr
= vport
->ingress
.offloads
.modify_metadata
;
1885 vport
->ingress
.allow_rule
=
1886 mlx5_add_flow_rules(vport
->ingress
.acl
, spec
,
1887 &flow_act
, NULL
, 0);
1888 if (IS_ERR(vport
->ingress
.allow_rule
)) {
1889 err
= PTR_ERR(vport
->ingress
.allow_rule
);
1891 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1893 vport
->ingress
.allow_rule
= NULL
;
1900 static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch
*esw
,
1901 struct mlx5_vport
*vport
)
1903 u8 action
[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto
)] = {};
1904 struct mlx5_flow_act flow_act
= {};
1908 key
= mlx5_eswitch_get_vport_metadata_for_match(esw
, vport
->vport
);
1909 key
>>= ESW_SOURCE_PORT_METADATA_OFFSET
;
1911 MLX5_SET(set_action_in
, action
, action_type
, MLX5_ACTION_TYPE_SET
);
1912 MLX5_SET(set_action_in
, action
, field
,
1913 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0
);
1914 MLX5_SET(set_action_in
, action
, data
, key
);
1915 MLX5_SET(set_action_in
, action
, offset
,
1916 ESW_SOURCE_PORT_METADATA_OFFSET
);
1917 MLX5_SET(set_action_in
, action
, length
,
1918 ESW_SOURCE_PORT_METADATA_BITS
);
1920 vport
->ingress
.offloads
.modify_metadata
=
1921 mlx5_modify_header_alloc(esw
->dev
, MLX5_FLOW_NAMESPACE_ESW_INGRESS
,
1923 if (IS_ERR(vport
->ingress
.offloads
.modify_metadata
)) {
1924 err
= PTR_ERR(vport
->ingress
.offloads
.modify_metadata
);
1926 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1931 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
| MLX5_FLOW_CONTEXT_ACTION_ALLOW
;
1932 flow_act
.modify_hdr
= vport
->ingress
.offloads
.modify_metadata
;
1933 vport
->ingress
.offloads
.modify_metadata_rule
=
1934 mlx5_add_flow_rules(vport
->ingress
.acl
,
1935 NULL
, &flow_act
, NULL
, 0);
1936 if (IS_ERR(vport
->ingress
.offloads
.modify_metadata_rule
)) {
1937 err
= PTR_ERR(vport
->ingress
.offloads
.modify_metadata_rule
);
1939 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1941 mlx5_modify_header_dealloc(esw
->dev
, vport
->ingress
.offloads
.modify_metadata
);
1942 vport
->ingress
.offloads
.modify_metadata_rule
= NULL
;
1947 static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch
*esw
,
1948 struct mlx5_vport
*vport
)
1950 if (vport
->ingress
.offloads
.modify_metadata_rule
) {
1951 mlx5_del_flow_rules(vport
->ingress
.offloads
.modify_metadata_rule
);
1952 mlx5_modify_header_dealloc(esw
->dev
, vport
->ingress
.offloads
.modify_metadata
);
1954 vport
->ingress
.offloads
.modify_metadata_rule
= NULL
;
1958 static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch
*esw
,
1959 struct mlx5_vport
*vport
)
1961 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1962 struct mlx5_flow_group
*g
;
1963 void *match_criteria
;
1968 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
1972 if (esw_check_ingress_prio_tag_enabled(esw
, vport
)) {
1973 /* This group is to hold FTE to match untagged packets when prio_tag
1976 memset(flow_group_in
, 0, inlen
);
1978 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
,
1979 flow_group_in
, match_criteria
);
1980 MLX5_SET(create_flow_group_in
, flow_group_in
,
1981 match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1982 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.cvlan_tag
);
1983 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, flow_index
);
1984 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, flow_index
);
1986 g
= mlx5_create_flow_group(vport
->ingress
.acl
, flow_group_in
);
1989 esw_warn(esw
->dev
, "vport[%d] ingress create untagged flow group, err(%d)\n",
1993 vport
->ingress
.offloads
.metadata_prio_tag_grp
= g
;
1997 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
1998 /* This group holds an FTE with no matches for add metadata for
1999 * tagged packets, if prio-tag is enabled (as a fallthrough),
2000 * or all traffic in case prio-tag is disabled.
2002 memset(flow_group_in
, 0, inlen
);
2003 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, flow_index
);
2004 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, flow_index
);
2006 g
= mlx5_create_flow_group(vport
->ingress
.acl
, flow_group_in
);
2009 esw_warn(esw
->dev
, "vport[%d] ingress create drop flow group, err(%d)\n",
2013 vport
->ingress
.offloads
.metadata_allmatch_grp
= g
;
2016 kvfree(flow_group_in
);
2020 if (!IS_ERR_OR_NULL(vport
->ingress
.offloads
.metadata_prio_tag_grp
)) {
2021 mlx5_destroy_flow_group(vport
->ingress
.offloads
.metadata_prio_tag_grp
);
2022 vport
->ingress
.offloads
.metadata_prio_tag_grp
= NULL
;
2025 kvfree(flow_group_in
);
2029 static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport
*vport
)
2031 if (vport
->ingress
.offloads
.metadata_allmatch_grp
) {
2032 mlx5_destroy_flow_group(vport
->ingress
.offloads
.metadata_allmatch_grp
);
2033 vport
->ingress
.offloads
.metadata_allmatch_grp
= NULL
;
2036 if (vport
->ingress
.offloads
.metadata_prio_tag_grp
) {
2037 mlx5_destroy_flow_group(vport
->ingress
.offloads
.metadata_prio_tag_grp
);
2038 vport
->ingress
.offloads
.metadata_prio_tag_grp
= NULL
;
2042 static int esw_vport_ingress_config(struct mlx5_eswitch
*esw
,
2043 struct mlx5_vport
*vport
)
2048 if (!mlx5_eswitch_vport_match_metadata_enabled(esw
) &&
2049 !esw_check_ingress_prio_tag_enabled(esw
, vport
))
2052 esw_vport_cleanup_ingress_rules(esw
, vport
);
2054 if (mlx5_eswitch_vport_match_metadata_enabled(esw
))
2056 if (esw_check_ingress_prio_tag_enabled(esw
, vport
))
2059 err
= esw_vport_create_ingress_acl_table(esw
, vport
, num_ftes
);
2062 "failed to enable ingress acl (%d) on vport[%d]\n",
2067 err
= esw_vport_create_ingress_acl_group(esw
, vport
);
2072 "vport[%d] configure ingress rules\n", vport
->vport
);
2074 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
2075 err
= esw_vport_add_ingress_acl_modify_metadata(esw
, vport
);
2080 if (esw_check_ingress_prio_tag_enabled(esw
, vport
)) {
2081 err
= esw_vport_ingress_prio_tag_config(esw
, vport
);
2088 esw_vport_del_ingress_acl_modify_metadata(esw
, vport
);
2090 esw_vport_destroy_ingress_acl_group(vport
);
2092 esw_vport_destroy_ingress_acl_table(vport
);
2096 static int esw_vport_egress_config(struct mlx5_eswitch
*esw
,
2097 struct mlx5_vport
*vport
)
2101 if (!MLX5_CAP_GEN(esw
->dev
, prio_tag_required
))
2104 esw_vport_cleanup_egress_rules(esw
, vport
);
2106 err
= esw_vport_enable_egress_acl(esw
, vport
);
2110 /* For prio tag mode, there is only 1 FTEs:
2111 * 1) prio tag packets - pop the prio tag VLAN, allow
2112 * Unmatched traffic is allowed by default
2115 "vport[%d] configure prio tag egress rules\n", vport
->vport
);
2117 /* prio tag vlan rule - pop it so VF receives untagged packets */
2118 err
= mlx5_esw_create_vport_egress_acl_vlan(esw
, vport
, 0,
2119 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
|
2120 MLX5_FLOW_CONTEXT_ACTION_ALLOW
);
2122 esw_vport_disable_egress_acl(esw
, vport
);
2128 esw_check_vport_match_metadata_supported(const struct mlx5_eswitch
*esw
)
2130 if (!MLX5_CAP_ESW(esw
->dev
, esw_uplink_ingress_acl
))
2133 if (!(MLX5_CAP_ESW_FLOWTABLE(esw
->dev
, fdb_to_vport_reg_c_id
) &
2134 MLX5_FDB_TO_VPORT_REG_C_0
))
2137 if (!MLX5_CAP_ESW_FLOWTABLE(esw
->dev
, flow_source
))
2140 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
) ||
2141 mlx5_ecpf_vport_exists(esw
->dev
))
2148 esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch
*esw
)
2150 return mlx5_core_mp_enabled(esw
->dev
);
2153 static bool esw_use_vport_metadata(const struct mlx5_eswitch
*esw
)
2155 return esw_check_vport_match_metadata_mandatory(esw
) &&
2156 esw_check_vport_match_metadata_supported(esw
);
2160 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch
*esw
,
2161 struct mlx5_vport
*vport
)
2165 err
= esw_vport_ingress_config(esw
, vport
);
2169 if (mlx5_eswitch_is_vf_vport(esw
, vport
->vport
)) {
2170 err
= esw_vport_egress_config(esw
, vport
);
2172 esw_vport_cleanup_ingress_rules(esw
, vport
);
2173 esw_vport_del_ingress_acl_modify_metadata(esw
, vport
);
2174 esw_vport_destroy_ingress_acl_group(vport
);
2175 esw_vport_destroy_ingress_acl_table(vport
);
2182 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch
*esw
,
2183 struct mlx5_vport
*vport
)
2185 esw_vport_disable_egress_acl(esw
, vport
);
2186 esw_vport_cleanup_ingress_rules(esw
, vport
);
2187 esw_vport_del_ingress_acl_modify_metadata(esw
, vport
);
2188 esw_vport_destroy_ingress_acl_group(vport
);
2189 esw_vport_destroy_ingress_acl_table(vport
);
2192 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch
*esw
)
2194 struct mlx5_vport
*vport
;
2197 if (esw_use_vport_metadata(esw
))
2198 esw
->flags
|= MLX5_ESWITCH_VPORT_MATCH_METADATA
;
2200 vport
= mlx5_eswitch_get_vport(esw
, MLX5_VPORT_UPLINK
);
2201 err
= esw_vport_create_offloads_acl_tables(esw
, vport
);
2203 esw
->flags
&= ~MLX5_ESWITCH_VPORT_MATCH_METADATA
;
2207 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch
*esw
)
2209 struct mlx5_vport
*vport
;
2211 vport
= mlx5_eswitch_get_vport(esw
, MLX5_VPORT_UPLINK
);
2212 esw_vport_destroy_offloads_acl_tables(esw
, vport
);
2213 esw
->flags
&= ~MLX5_ESWITCH_VPORT_MATCH_METADATA
;
2216 static int esw_offloads_steering_init(struct mlx5_eswitch
*esw
)
2218 int num_vfs
= esw
->esw_funcs
.num_vfs
;
2222 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
))
2223 total_vports
= esw
->total_vports
;
2225 total_vports
= num_vfs
+ MLX5_SPECIAL_VPORTS(esw
->dev
);
2227 memset(&esw
->fdb_table
.offloads
, 0, sizeof(struct offloads_fdb
));
2229 err
= esw_create_uplink_offloads_acl_tables(esw
);
2233 err
= esw_create_offloads_table(esw
, total_vports
);
2235 goto create_offloads_err
;
2237 err
= esw_create_restore_table(esw
);
2239 goto create_restore_err
;
2241 err
= esw_create_offloads_fdb_tables(esw
, total_vports
);
2243 goto create_fdb_err
;
2245 err
= esw_create_vport_rx_group(esw
, total_vports
);
2249 mutex_init(&esw
->fdb_table
.offloads
.vports
.lock
);
2250 hash_init(esw
->fdb_table
.offloads
.vports
.table
);
2255 esw_destroy_offloads_fdb_tables(esw
);
2257 esw_destroy_restore_table(esw
);
2259 esw_destroy_offloads_table(esw
);
2260 create_offloads_err
:
2261 esw_destroy_uplink_offloads_acl_tables(esw
);
2266 static void esw_offloads_steering_cleanup(struct mlx5_eswitch
*esw
)
2268 mutex_destroy(&esw
->fdb_table
.offloads
.vports
.lock
);
2269 esw_destroy_vport_rx_group(esw
);
2270 esw_destroy_offloads_fdb_tables(esw
);
2271 esw_destroy_restore_table(esw
);
2272 esw_destroy_offloads_table(esw
);
2273 esw_destroy_uplink_offloads_acl_tables(esw
);
2277 esw_vfs_changed_event_handler(struct mlx5_eswitch
*esw
, const u32
*out
)
2279 bool host_pf_disabled
;
2282 new_num_vfs
= MLX5_GET(query_esw_functions_out
, out
,
2283 host_params_context
.host_num_of_vfs
);
2284 host_pf_disabled
= MLX5_GET(query_esw_functions_out
, out
,
2285 host_params_context
.host_pf_disabled
);
2287 if (new_num_vfs
== esw
->esw_funcs
.num_vfs
|| host_pf_disabled
)
2290 /* Number of VFs can only change from "0 to x" or "x to 0". */
2291 if (esw
->esw_funcs
.num_vfs
> 0) {
2292 mlx5_eswitch_unload_vf_vports(esw
, esw
->esw_funcs
.num_vfs
);
2296 err
= mlx5_eswitch_load_vf_vports(esw
, new_num_vfs
,
2297 MLX5_VPORT_UC_ADDR_CHANGE
);
2301 esw
->esw_funcs
.num_vfs
= new_num_vfs
;
2304 static void esw_functions_changed_event_handler(struct work_struct
*work
)
2306 struct mlx5_host_work
*host_work
;
2307 struct mlx5_eswitch
*esw
;
2310 host_work
= container_of(work
, struct mlx5_host_work
, work
);
2311 esw
= host_work
->esw
;
2313 out
= mlx5_esw_query_functions(esw
->dev
);
2317 esw_vfs_changed_event_handler(esw
, out
);
2323 int mlx5_esw_funcs_changed_handler(struct notifier_block
*nb
, unsigned long type
, void *data
)
2325 struct mlx5_esw_functions
*esw_funcs
;
2326 struct mlx5_host_work
*host_work
;
2327 struct mlx5_eswitch
*esw
;
2329 host_work
= kzalloc(sizeof(*host_work
), GFP_ATOMIC
);
2333 esw_funcs
= mlx5_nb_cof(nb
, struct mlx5_esw_functions
, nb
);
2334 esw
= container_of(esw_funcs
, struct mlx5_eswitch
, esw_funcs
);
2336 host_work
->esw
= esw
;
2338 INIT_WORK(&host_work
->work
, esw_functions_changed_event_handler
);
2339 queue_work(esw
->work_queue
, &host_work
->work
);
2344 int esw_offloads_enable(struct mlx5_eswitch
*esw
)
2346 struct mlx5_vport
*vport
;
2349 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw
->dev
, reformat
) &&
2350 MLX5_CAP_ESW_FLOWTABLE_FDB(esw
->dev
, decap
))
2351 esw
->offloads
.encap
= DEVLINK_ESWITCH_ENCAP_MODE_BASIC
;
2353 esw
->offloads
.encap
= DEVLINK_ESWITCH_ENCAP_MODE_NONE
;
2355 mutex_init(&esw
->offloads
.termtbl_mutex
);
2356 mlx5_rdma_enable_roce(esw
->dev
);
2358 err
= esw_set_passing_vport_metadata(esw
, true);
2360 goto err_vport_metadata
;
2362 err
= esw_offloads_steering_init(esw
);
2364 goto err_steering_init
;
2366 /* Representor will control the vport link state */
2367 mlx5_esw_for_each_vf_vport(esw
, i
, vport
, esw
->esw_funcs
.num_vfs
)
2368 vport
->info
.link_state
= MLX5_VPORT_ADMIN_STATE_DOWN
;
2370 /* Uplink vport rep must load first. */
2371 err
= esw_offloads_load_rep(esw
, MLX5_VPORT_UPLINK
);
2375 err
= mlx5_eswitch_enable_pf_vf_vports(esw
, MLX5_VPORT_UC_ADDR_CHANGE
);
2379 esw_offloads_devcom_init(esw
);
2384 esw_offloads_unload_rep(esw
, MLX5_VPORT_UPLINK
);
2386 esw_set_passing_vport_metadata(esw
, false);
2388 esw_offloads_steering_cleanup(esw
);
2390 mlx5_rdma_disable_roce(esw
->dev
);
2391 mutex_destroy(&esw
->offloads
.termtbl_mutex
);
2395 static int esw_offloads_stop(struct mlx5_eswitch
*esw
,
2396 struct netlink_ext_ack
*extack
)
2400 mlx5_eswitch_disable_locked(esw
, false);
2401 err
= mlx5_eswitch_enable_locked(esw
, MLX5_ESWITCH_LEGACY
,
2402 MLX5_ESWITCH_IGNORE_NUM_VFS
);
2404 NL_SET_ERR_MSG_MOD(extack
, "Failed setting eswitch to legacy");
2405 err1
= mlx5_eswitch_enable_locked(esw
, MLX5_ESWITCH_OFFLOADS
,
2406 MLX5_ESWITCH_IGNORE_NUM_VFS
);
2408 NL_SET_ERR_MSG_MOD(extack
,
2409 "Failed setting eswitch back to offloads");
2416 void esw_offloads_disable(struct mlx5_eswitch
*esw
)
2418 esw_offloads_devcom_cleanup(esw
);
2419 mlx5_eswitch_disable_pf_vf_vports(esw
);
2420 esw_offloads_unload_rep(esw
, MLX5_VPORT_UPLINK
);
2421 esw_set_passing_vport_metadata(esw
, false);
2422 esw_offloads_steering_cleanup(esw
);
2423 mlx5_rdma_disable_roce(esw
->dev
);
2424 mutex_destroy(&esw
->offloads
.termtbl_mutex
);
2425 esw
->offloads
.encap
= DEVLINK_ESWITCH_ENCAP_MODE_NONE
;
2428 static int esw_mode_from_devlink(u16 mode
, u16
*mlx5_mode
)
2431 case DEVLINK_ESWITCH_MODE_LEGACY
:
2432 *mlx5_mode
= MLX5_ESWITCH_LEGACY
;
2434 case DEVLINK_ESWITCH_MODE_SWITCHDEV
:
2435 *mlx5_mode
= MLX5_ESWITCH_OFFLOADS
;
2444 static int esw_mode_to_devlink(u16 mlx5_mode
, u16
*mode
)
2446 switch (mlx5_mode
) {
2447 case MLX5_ESWITCH_LEGACY
:
2448 *mode
= DEVLINK_ESWITCH_MODE_LEGACY
;
2450 case MLX5_ESWITCH_OFFLOADS
:
2451 *mode
= DEVLINK_ESWITCH_MODE_SWITCHDEV
;
2460 static int esw_inline_mode_from_devlink(u8 mode
, u8
*mlx5_mode
)
2463 case DEVLINK_ESWITCH_INLINE_MODE_NONE
:
2464 *mlx5_mode
= MLX5_INLINE_MODE_NONE
;
2466 case DEVLINK_ESWITCH_INLINE_MODE_LINK
:
2467 *mlx5_mode
= MLX5_INLINE_MODE_L2
;
2469 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK
:
2470 *mlx5_mode
= MLX5_INLINE_MODE_IP
;
2472 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT
:
2473 *mlx5_mode
= MLX5_INLINE_MODE_TCP_UDP
;
2482 static int esw_inline_mode_to_devlink(u8 mlx5_mode
, u8
*mode
)
2484 switch (mlx5_mode
) {
2485 case MLX5_INLINE_MODE_NONE
:
2486 *mode
= DEVLINK_ESWITCH_INLINE_MODE_NONE
;
2488 case MLX5_INLINE_MODE_L2
:
2489 *mode
= DEVLINK_ESWITCH_INLINE_MODE_LINK
;
2491 case MLX5_INLINE_MODE_IP
:
2492 *mode
= DEVLINK_ESWITCH_INLINE_MODE_NETWORK
;
2494 case MLX5_INLINE_MODE_TCP_UDP
:
2495 *mode
= DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT
;
2504 static int mlx5_eswitch_check(const struct mlx5_core_dev
*dev
)
2506 if (MLX5_CAP_GEN(dev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
2509 if(!MLX5_ESWITCH_MANAGER(dev
))
2515 static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch
*esw
)
2517 /* devlink commands in NONE eswitch mode are currently supported only
2520 return (esw
->mode
== MLX5_ESWITCH_NONE
&&
2521 !mlx5_core_is_ecpf_esw_manager(esw
->dev
)) ? -EOPNOTSUPP
: 0;
2524 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
,
2525 struct netlink_ext_ack
*extack
)
2527 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2528 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2529 u16 cur_mlx5_mode
, mlx5_mode
= 0;
2532 err
= mlx5_eswitch_check(dev
);
2536 if (esw_mode_from_devlink(mode
, &mlx5_mode
))
2539 mutex_lock(&esw
->mode_lock
);
2540 err
= eswitch_devlink_esw_mode_check(esw
);
2544 cur_mlx5_mode
= esw
->mode
;
2546 if (cur_mlx5_mode
== mlx5_mode
)
2549 if (mode
== DEVLINK_ESWITCH_MODE_SWITCHDEV
)
2550 err
= esw_offloads_start(esw
, extack
);
2551 else if (mode
== DEVLINK_ESWITCH_MODE_LEGACY
)
2552 err
= esw_offloads_stop(esw
, extack
);
2557 mutex_unlock(&esw
->mode_lock
);
2561 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
)
2563 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2564 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2567 err
= mlx5_eswitch_check(dev
);
2571 mutex_lock(&esw
->mode_lock
);
2572 err
= eswitch_devlink_esw_mode_check(dev
->priv
.eswitch
);
2576 err
= esw_mode_to_devlink(esw
->mode
, mode
);
2578 mutex_unlock(&esw
->mode_lock
);
2582 int mlx5_devlink_eswitch_inline_mode_set(struct devlink
*devlink
, u8 mode
,
2583 struct netlink_ext_ack
*extack
)
2585 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2586 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2587 int err
, vport
, num_vport
;
2590 err
= mlx5_eswitch_check(dev
);
2594 mutex_lock(&esw
->mode_lock
);
2595 err
= eswitch_devlink_esw_mode_check(esw
);
2599 switch (MLX5_CAP_ETH(dev
, wqe_inline_mode
)) {
2600 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED
:
2601 if (mode
== DEVLINK_ESWITCH_INLINE_MODE_NONE
)
2604 case MLX5_CAP_INLINE_MODE_L2
:
2605 NL_SET_ERR_MSG_MOD(extack
, "Inline mode can't be set");
2608 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
:
2612 if (atomic64_read(&esw
->offloads
.num_flows
) > 0) {
2613 NL_SET_ERR_MSG_MOD(extack
,
2614 "Can't set inline mode when flows are configured");
2619 err
= esw_inline_mode_from_devlink(mode
, &mlx5_mode
);
2623 mlx5_esw_for_each_host_func_vport(esw
, vport
, esw
->esw_funcs
.num_vfs
) {
2624 err
= mlx5_modify_nic_vport_min_inline(dev
, vport
, mlx5_mode
);
2626 NL_SET_ERR_MSG_MOD(extack
,
2627 "Failed to set min inline on vport");
2628 goto revert_inline_mode
;
2632 esw
->offloads
.inline_mode
= mlx5_mode
;
2633 mutex_unlock(&esw
->mode_lock
);
2637 num_vport
= --vport
;
2638 mlx5_esw_for_each_host_func_vport_reverse(esw
, vport
, num_vport
)
2639 mlx5_modify_nic_vport_min_inline(dev
,
2641 esw
->offloads
.inline_mode
);
2643 mutex_unlock(&esw
->mode_lock
);
2647 int mlx5_devlink_eswitch_inline_mode_get(struct devlink
*devlink
, u8
*mode
)
2649 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2650 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2653 err
= mlx5_eswitch_check(dev
);
2657 mutex_lock(&esw
->mode_lock
);
2658 err
= eswitch_devlink_esw_mode_check(esw
);
2662 err
= esw_inline_mode_to_devlink(esw
->offloads
.inline_mode
, mode
);
2664 mutex_unlock(&esw
->mode_lock
);
2668 int mlx5_devlink_eswitch_encap_mode_set(struct devlink
*devlink
,
2669 enum devlink_eswitch_encap_mode encap
,
2670 struct netlink_ext_ack
*extack
)
2672 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2673 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2676 err
= mlx5_eswitch_check(dev
);
2680 mutex_lock(&esw
->mode_lock
);
2681 err
= eswitch_devlink_esw_mode_check(esw
);
2685 if (encap
!= DEVLINK_ESWITCH_ENCAP_MODE_NONE
&&
2686 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, reformat
) ||
2687 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, decap
))) {
2692 if (encap
&& encap
!= DEVLINK_ESWITCH_ENCAP_MODE_BASIC
) {
2697 if (esw
->mode
== MLX5_ESWITCH_LEGACY
) {
2698 esw
->offloads
.encap
= encap
;
2702 if (esw
->offloads
.encap
== encap
)
2705 if (atomic64_read(&esw
->offloads
.num_flows
) > 0) {
2706 NL_SET_ERR_MSG_MOD(extack
,
2707 "Can't set encapsulation when flows are configured");
2712 esw_destroy_offloads_fdb_tables(esw
);
2714 esw
->offloads
.encap
= encap
;
2716 err
= esw_create_offloads_fdb_tables(esw
, esw
->nvports
);
2719 NL_SET_ERR_MSG_MOD(extack
,
2720 "Failed re-creating fast FDB table");
2721 esw
->offloads
.encap
= !encap
;
2722 (void)esw_create_offloads_fdb_tables(esw
, esw
->nvports
);
2726 mutex_unlock(&esw
->mode_lock
);
2730 int mlx5_devlink_eswitch_encap_mode_get(struct devlink
*devlink
,
2731 enum devlink_eswitch_encap_mode
*encap
)
2733 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2734 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2737 err
= mlx5_eswitch_check(dev
);
2741 mutex_lock(&esw
->mode_lock
);
2742 err
= eswitch_devlink_esw_mode_check(esw
);
2746 *encap
= esw
->offloads
.encap
;
2748 mutex_unlock(&esw
->mode_lock
);
2753 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch
*esw
, u16 vport_num
)
2755 /* Currently, only ECPF based device has representor for host PF. */
2756 if (vport_num
== MLX5_VPORT_PF
&&
2757 !mlx5_core_is_ecpf_esw_manager(esw
->dev
))
2760 if (vport_num
== MLX5_VPORT_ECPF
&&
2761 !mlx5_ecpf_vport_exists(esw
->dev
))
2767 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch
*esw
,
2768 const struct mlx5_eswitch_rep_ops
*ops
,
2771 struct mlx5_eswitch_rep_data
*rep_data
;
2772 struct mlx5_eswitch_rep
*rep
;
2775 esw
->offloads
.rep_ops
[rep_type
] = ops
;
2776 mlx5_esw_for_all_reps(esw
, i
, rep
) {
2777 if (likely(mlx5_eswitch_vport_has_rep(esw
, i
))) {
2778 rep_data
= &rep
->rep_data
[rep_type
];
2779 atomic_set(&rep_data
->state
, REP_REGISTERED
);
2783 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps
);
2785 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch
*esw
, u8 rep_type
)
2787 struct mlx5_eswitch_rep
*rep
;
2790 if (esw
->mode
== MLX5_ESWITCH_OFFLOADS
)
2791 __unload_reps_all_vport(esw
, rep_type
);
2793 mlx5_esw_for_all_reps(esw
, i
, rep
)
2794 atomic_set(&rep
->rep_data
[rep_type
].state
, REP_UNREGISTERED
);
2796 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps
);
2798 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch
*esw
, u8 rep_type
)
2800 struct mlx5_eswitch_rep
*rep
;
2802 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_UPLINK
);
2803 return rep
->rep_data
[rep_type
].priv
;
2806 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch
*esw
,
2810 struct mlx5_eswitch_rep
*rep
;
2812 rep
= mlx5_eswitch_get_rep(esw
, vport
);
2814 if (atomic_read(&rep
->rep_data
[rep_type
].state
) == REP_LOADED
&&
2815 esw
->offloads
.rep_ops
[rep_type
]->get_proto_dev
)
2816 return esw
->offloads
.rep_ops
[rep_type
]->get_proto_dev(rep
);
2819 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev
);
2821 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch
*esw
, u8 rep_type
)
2823 return mlx5_eswitch_get_proto_dev(esw
, MLX5_VPORT_UPLINK
, rep_type
);
2825 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev
);
2827 struct mlx5_eswitch_rep
*mlx5_eswitch_vport_rep(struct mlx5_eswitch
*esw
,
2830 return mlx5_eswitch_get_rep(esw
, vport
);
2832 EXPORT_SYMBOL(mlx5_eswitch_vport_rep
);
2834 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch
*esw
, u16 vport_num
)
2836 return vport_num
>= MLX5_VPORT_FIRST_VF
&&
2837 vport_num
<= esw
->dev
->priv
.sriov
.max_vfs
;
2840 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch
*esw
)
2842 return !!(esw
->flags
& MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED
);
2844 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled
);
2846 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch
*esw
)
2848 return !!(esw
->flags
& MLX5_ESWITCH_VPORT_MATCH_METADATA
);
2850 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled
);
2852 u32
mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch
*esw
,
2855 u32 vport_num_mask
= GENMASK(ESW_VPORT_BITS
- 1, 0);
2856 u32 vhca_id_mask
= GENMASK(ESW_VHCA_ID_BITS
- 1, 0);
2857 u32 vhca_id
= MLX5_CAP_GEN(esw
->dev
, vhca_id
);
2860 /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
2861 WARN_ON_ONCE(vhca_id
>= BIT(ESW_VHCA_ID_BITS
));
2863 /* Trim vhca_id to ESW_VHCA_ID_BITS */
2864 vhca_id
&= vhca_id_mask
;
2866 /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
2867 * don't overlap with VF numbers, and themselves, after trimming.
2869 WARN_ON_ONCE((MLX5_VPORT_UPLINK
& vport_num_mask
) <
2870 vport_num_mask
- 1);
2871 WARN_ON_ONCE((MLX5_VPORT_ECPF
& vport_num_mask
) <
2872 vport_num_mask
- 1);
2873 WARN_ON_ONCE((MLX5_VPORT_UPLINK
& vport_num_mask
) ==
2874 (MLX5_VPORT_ECPF
& vport_num_mask
));
2876 /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
2877 * overlap with pf and ecpf.
2879 if (vport_num
!= MLX5_VPORT_UPLINK
&&
2880 vport_num
!= MLX5_VPORT_ECPF
)
2881 WARN_ON_ONCE(vport_num
>= vport_num_mask
- 1);
2883 /* We can now trim vport_num to ESW_VPORT_BITS */
2884 vport_num
&= vport_num_mask
;
2886 val
= (vhca_id
<< ESW_VPORT_BITS
) | vport_num
;
2887 return val
<< (32 - ESW_SOURCE_PORT_METADATA_BITS
);
2889 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match
);