2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
40 #include "esw/chains.h"
44 #include "lib/devcom.h"
47 /* There are two match-all miss flows, one for unicast dst mac and
50 #define MLX5_ESW_MISS_FLOWS (2)
51 #define UPLINK_REP_INDEX 0
53 /* Per vport tables */
55 #define MLX5_ESW_VPORT_TABLE_SIZE 128
57 /* This struct is used as a key to the hash table and we need it to be packed
58 * so hash result is consistent
60 struct mlx5_vport_key
{
67 struct mlx5_vport_table
{
68 struct hlist_node hlist
;
69 struct mlx5_flow_table
*fdb
;
71 struct mlx5_vport_key key
;
74 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
76 static struct mlx5_flow_table
*
77 esw_vport_tbl_create(struct mlx5_eswitch
*esw
, struct mlx5_flow_namespace
*ns
)
79 struct mlx5_flow_table_attr ft_attr
= {};
80 struct mlx5_flow_table
*fdb
;
82 ft_attr
.autogroup
.max_num_groups
= MLX5_ESW_VPORT_TBL_NUM_GROUPS
;
83 ft_attr
.max_fte
= MLX5_ESW_VPORT_TABLE_SIZE
;
84 ft_attr
.prio
= FDB_PER_VPORT
;
85 fdb
= mlx5_create_auto_grouped_flow_table(ns
, &ft_attr
);
87 esw_warn(esw
->dev
, "Failed to create per vport FDB Table err %ld\n",
94 static u32
flow_attr_to_vport_key(struct mlx5_eswitch
*esw
,
95 struct mlx5_esw_flow_attr
*attr
,
96 struct mlx5_vport_key
*key
)
98 key
->vport
= attr
->in_rep
->vport
;
99 key
->chain
= attr
->chain
;
100 key
->prio
= attr
->prio
;
101 key
->vhca_id
= MLX5_CAP_GEN(esw
->dev
, vhca_id
);
102 return jhash(key
, sizeof(*key
), 0);
105 /* caller must hold vports.lock */
106 static struct mlx5_vport_table
*
107 esw_vport_tbl_lookup(struct mlx5_eswitch
*esw
, struct mlx5_vport_key
*skey
, u32 key
)
109 struct mlx5_vport_table
*e
;
111 hash_for_each_possible(esw
->fdb_table
.offloads
.vports
.table
, e
, hlist
, key
)
112 if (!memcmp(&e
->key
, skey
, sizeof(*skey
)))
119 esw_vport_tbl_put(struct mlx5_eswitch
*esw
, struct mlx5_esw_flow_attr
*attr
)
121 struct mlx5_vport_table
*e
;
122 struct mlx5_vport_key key
;
125 mutex_lock(&esw
->fdb_table
.offloads
.vports
.lock
);
126 hkey
= flow_attr_to_vport_key(esw
, attr
, &key
);
127 e
= esw_vport_tbl_lookup(esw
, &key
, hkey
);
128 if (!e
|| --e
->num_rules
)
132 mlx5_destroy_flow_table(e
->fdb
);
135 mutex_unlock(&esw
->fdb_table
.offloads
.vports
.lock
);
138 static struct mlx5_flow_table
*
139 esw_vport_tbl_get(struct mlx5_eswitch
*esw
, struct mlx5_esw_flow_attr
*attr
)
141 struct mlx5_core_dev
*dev
= esw
->dev
;
142 struct mlx5_flow_namespace
*ns
;
143 struct mlx5_flow_table
*fdb
;
144 struct mlx5_vport_table
*e
;
145 struct mlx5_vport_key skey
;
148 mutex_lock(&esw
->fdb_table
.offloads
.vports
.lock
);
149 hkey
= flow_attr_to_vport_key(esw
, attr
, &skey
);
150 e
= esw_vport_tbl_lookup(esw
, &skey
, hkey
);
156 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
158 fdb
= ERR_PTR(-ENOMEM
);
162 ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_FDB
);
164 esw_warn(dev
, "Failed to get FDB namespace\n");
165 fdb
= ERR_PTR(-ENOENT
);
169 fdb
= esw_vport_tbl_create(esw
, ns
);
176 hash_add(esw
->fdb_table
.offloads
.vports
.table
, &e
->hlist
, hkey
);
178 mutex_unlock(&esw
->fdb_table
.offloads
.vports
.lock
);
184 mutex_unlock(&esw
->fdb_table
.offloads
.vports
.lock
);
188 int mlx5_esw_vport_tbl_get(struct mlx5_eswitch
*esw
)
190 struct mlx5_esw_flow_attr attr
= {};
191 struct mlx5_eswitch_rep rep
= {};
192 struct mlx5_flow_table
*fdb
;
193 struct mlx5_vport
*vport
;
198 mlx5_esw_for_all_vports(esw
, i
, vport
) {
199 attr
.in_rep
->vport
= vport
->vport
;
200 fdb
= esw_vport_tbl_get(esw
, &attr
);
207 mlx5_esw_vport_tbl_put(esw
);
211 void mlx5_esw_vport_tbl_put(struct mlx5_eswitch
*esw
)
213 struct mlx5_esw_flow_attr attr
= {};
214 struct mlx5_eswitch_rep rep
= {};
215 struct mlx5_vport
*vport
;
220 mlx5_esw_for_all_vports(esw
, i
, vport
) {
221 attr
.in_rep
->vport
= vport
->vport
;
222 esw_vport_tbl_put(esw
, &attr
);
226 /* End: Per vport tables */
228 static struct mlx5_eswitch_rep
*mlx5_eswitch_get_rep(struct mlx5_eswitch
*esw
,
231 int idx
= mlx5_eswitch_vport_num_to_index(esw
, vport_num
);
233 WARN_ON(idx
> esw
->total_vports
- 1);
234 return &esw
->offloads
.vport_reps
[idx
];
238 esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch
*esw
,
239 const struct mlx5_vport
*vport
)
241 return (MLX5_CAP_GEN(esw
->dev
, prio_tag_required
) &&
242 mlx5_eswitch_is_vf_vport(esw
, vport
->vport
));
246 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch
*esw
,
247 struct mlx5_flow_spec
*spec
,
248 struct mlx5_esw_flow_attr
*attr
)
253 /* Use metadata matching because vport is not represented by single
254 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
256 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
257 misc2
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters_2
);
258 MLX5_SET(fte_match_set_misc2
, misc2
, metadata_reg_c_0
,
259 mlx5_eswitch_get_vport_metadata_for_match(attr
->in_mdev
->priv
.eswitch
,
260 attr
->in_rep
->vport
));
262 misc2
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters_2
);
263 MLX5_SET(fte_match_set_misc2
, misc2
, metadata_reg_c_0
,
264 mlx5_eswitch_get_vport_metadata_mask());
266 spec
->match_criteria_enable
|= MLX5_MATCH_MISC_PARAMETERS_2
;
267 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
268 if (memchr_inv(misc
, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc
)))
269 spec
->match_criteria_enable
|= MLX5_MATCH_MISC_PARAMETERS
;
271 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
272 MLX5_SET(fte_match_set_misc
, misc
, source_port
, attr
->in_rep
->vport
);
274 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
275 MLX5_SET(fte_match_set_misc
, misc
,
276 source_eswitch_owner_vhca_id
,
277 MLX5_CAP_GEN(attr
->in_mdev
, vhca_id
));
279 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
280 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
281 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
282 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
,
283 source_eswitch_owner_vhca_id
);
285 spec
->match_criteria_enable
|= MLX5_MATCH_MISC_PARAMETERS
;
288 if (MLX5_CAP_ESW_FLOWTABLE(esw
->dev
, flow_source
) &&
289 attr
->in_rep
->vport
== MLX5_VPORT_UPLINK
)
290 spec
->flow_context
.flow_source
= MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK
;
293 struct mlx5_flow_handle
*
294 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch
*esw
,
295 struct mlx5_flow_spec
*spec
,
296 struct mlx5_esw_flow_attr
*attr
)
298 struct mlx5_flow_destination dest
[MLX5_MAX_FLOW_FWD_VPORTS
+ 1] = {};
299 struct mlx5_flow_act flow_act
= { .flags
= FLOW_ACT_NO_APPEND
, };
300 bool split
= !!(attr
->split_count
);
301 struct mlx5_flow_handle
*rule
;
302 struct mlx5_flow_table
*fdb
;
305 if (esw
->mode
!= MLX5_ESWITCH_OFFLOADS
)
306 return ERR_PTR(-EOPNOTSUPP
);
308 flow_act
.action
= attr
->action
;
309 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
310 if (!mlx5_eswitch_vlan_actions_supported(esw
->dev
, 1))
311 flow_act
.action
&= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
|
312 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
313 else if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
) {
314 flow_act
.vlan
[0].ethtype
= ntohs(attr
->vlan_proto
[0]);
315 flow_act
.vlan
[0].vid
= attr
->vlan_vid
[0];
316 flow_act
.vlan
[0].prio
= attr
->vlan_prio
[0];
317 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2
) {
318 flow_act
.vlan
[1].ethtype
= ntohs(attr
->vlan_proto
[1]);
319 flow_act
.vlan
[1].vid
= attr
->vlan_vid
[1];
320 flow_act
.vlan
[1].prio
= attr
->vlan_prio
[1];
324 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) {
325 struct mlx5_flow_table
*ft
;
328 flow_act
.flags
|= FLOW_ACT_IGNORE_FLOW_LEVEL
;
329 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
330 dest
[i
].ft
= attr
->dest_ft
;
332 } else if (attr
->flags
& MLX5_ESW_ATTR_FLAG_SLOW_PATH
) {
333 flow_act
.flags
|= FLOW_ACT_IGNORE_FLOW_LEVEL
;
334 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
335 dest
[i
].ft
= mlx5_esw_chains_get_tc_end_ft(esw
);
337 } else if (attr
->dest_chain
) {
338 flow_act
.flags
|= FLOW_ACT_IGNORE_FLOW_LEVEL
;
339 ft
= mlx5_esw_chains_get_table(esw
, attr
->dest_chain
,
343 goto err_create_goto_table
;
346 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
350 for (j
= attr
->split_count
; j
< attr
->out_count
; j
++) {
351 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
352 dest
[i
].vport
.num
= attr
->dests
[j
].rep
->vport
;
353 dest
[i
].vport
.vhca_id
=
354 MLX5_CAP_GEN(attr
->dests
[j
].mdev
, vhca_id
);
355 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
356 dest
[i
].vport
.flags
|=
357 MLX5_FLOW_DEST_VPORT_VHCA_ID
;
358 if (attr
->dests
[j
].flags
& MLX5_ESW_DEST_ENCAP
) {
359 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT
;
360 flow_act
.pkt_reformat
= attr
->dests
[j
].pkt_reformat
;
361 dest
[i
].vport
.flags
|= MLX5_FLOW_DEST_VPORT_REFORMAT_ID
;
362 dest
[i
].vport
.pkt_reformat
=
363 attr
->dests
[j
].pkt_reformat
;
369 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
) {
370 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
371 dest
[i
].counter_id
= mlx5_fc_id(attr
->counter
);
375 if (attr
->outer_match_level
!= MLX5_MATCH_NONE
)
376 spec
->match_criteria_enable
|= MLX5_MATCH_OUTER_HEADERS
;
377 if (attr
->inner_match_level
!= MLX5_MATCH_NONE
)
378 spec
->match_criteria_enable
|= MLX5_MATCH_INNER_HEADERS
;
380 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
)
381 flow_act
.modify_hdr
= attr
->modify_hdr
;
384 fdb
= esw_vport_tbl_get(esw
, attr
);
386 if (attr
->chain
|| attr
->prio
)
387 fdb
= mlx5_esw_chains_get_table(esw
, attr
->chain
,
392 if (!(attr
->flags
& MLX5_ESW_ATTR_FLAG_NO_IN_PORT
))
393 mlx5_eswitch_set_rule_source_port(esw
, spec
, attr
);
396 rule
= ERR_CAST(fdb
);
400 if (mlx5_eswitch_termtbl_required(esw
, attr
, &flow_act
, spec
))
401 rule
= mlx5_eswitch_add_termtbl_rule(esw
, fdb
, spec
, attr
,
404 rule
= mlx5_add_flow_rules(fdb
, spec
, &flow_act
, dest
, i
);
408 atomic64_inc(&esw
->offloads
.num_flows
);
414 esw_vport_tbl_put(esw
, attr
);
415 else if (attr
->chain
|| attr
->prio
)
416 mlx5_esw_chains_put_table(esw
, attr
->chain
, attr
->prio
, 0);
418 if (!(attr
->flags
& MLX5_ESW_ATTR_FLAG_SLOW_PATH
) && attr
->dest_chain
)
419 mlx5_esw_chains_put_table(esw
, attr
->dest_chain
, 1, 0);
420 err_create_goto_table
:
424 struct mlx5_flow_handle
*
425 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch
*esw
,
426 struct mlx5_flow_spec
*spec
,
427 struct mlx5_esw_flow_attr
*attr
)
429 struct mlx5_flow_destination dest
[MLX5_MAX_FLOW_FWD_VPORTS
+ 1] = {};
430 struct mlx5_flow_act flow_act
= { .flags
= FLOW_ACT_NO_APPEND
, };
431 struct mlx5_flow_table
*fast_fdb
;
432 struct mlx5_flow_table
*fwd_fdb
;
433 struct mlx5_flow_handle
*rule
;
436 fast_fdb
= mlx5_esw_chains_get_table(esw
, attr
->chain
, attr
->prio
, 0);
437 if (IS_ERR(fast_fdb
)) {
438 rule
= ERR_CAST(fast_fdb
);
442 fwd_fdb
= esw_vport_tbl_get(esw
, attr
);
443 if (IS_ERR(fwd_fdb
)) {
444 rule
= ERR_CAST(fwd_fdb
);
448 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
449 for (i
= 0; i
< attr
->split_count
; i
++) {
450 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
451 dest
[i
].vport
.num
= attr
->dests
[i
].rep
->vport
;
452 dest
[i
].vport
.vhca_id
=
453 MLX5_CAP_GEN(attr
->dests
[i
].mdev
, vhca_id
);
454 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
455 dest
[i
].vport
.flags
|= MLX5_FLOW_DEST_VPORT_VHCA_ID
;
456 if (attr
->dests
[i
].flags
& MLX5_ESW_DEST_ENCAP
) {
457 dest
[i
].vport
.flags
|= MLX5_FLOW_DEST_VPORT_REFORMAT_ID
;
458 dest
[i
].vport
.pkt_reformat
= attr
->dests
[i
].pkt_reformat
;
461 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
462 dest
[i
].ft
= fwd_fdb
,
465 mlx5_eswitch_set_rule_source_port(esw
, spec
, attr
);
467 if (attr
->outer_match_level
!= MLX5_MATCH_NONE
)
468 spec
->match_criteria_enable
|= MLX5_MATCH_OUTER_HEADERS
;
470 flow_act
.flags
|= FLOW_ACT_IGNORE_FLOW_LEVEL
;
471 rule
= mlx5_add_flow_rules(fast_fdb
, spec
, &flow_act
, dest
, i
);
476 atomic64_inc(&esw
->offloads
.num_flows
);
480 esw_vport_tbl_put(esw
, attr
);
482 mlx5_esw_chains_put_table(esw
, attr
->chain
, attr
->prio
, 0);
488 __mlx5_eswitch_del_rule(struct mlx5_eswitch
*esw
,
489 struct mlx5_flow_handle
*rule
,
490 struct mlx5_esw_flow_attr
*attr
,
493 bool split
= (attr
->split_count
> 0);
496 mlx5_del_flow_rules(rule
);
498 if (!(attr
->flags
& MLX5_ESW_ATTR_FLAG_SLOW_PATH
)) {
499 /* unref the term table */
500 for (i
= 0; i
< MLX5_MAX_FLOW_FWD_VPORTS
; i
++) {
501 if (attr
->dests
[i
].termtbl
)
502 mlx5_eswitch_termtbl_put(esw
, attr
->dests
[i
].termtbl
);
506 atomic64_dec(&esw
->offloads
.num_flows
);
509 esw_vport_tbl_put(esw
, attr
);
510 mlx5_esw_chains_put_table(esw
, attr
->chain
, attr
->prio
, 0);
513 esw_vport_tbl_put(esw
, attr
);
514 else if (attr
->chain
|| attr
->prio
)
515 mlx5_esw_chains_put_table(esw
, attr
->chain
, attr
->prio
,
517 if (attr
->dest_chain
)
518 mlx5_esw_chains_put_table(esw
, attr
->dest_chain
, 1, 0);
523 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch
*esw
,
524 struct mlx5_flow_handle
*rule
,
525 struct mlx5_esw_flow_attr
*attr
)
527 __mlx5_eswitch_del_rule(esw
, rule
, attr
, false);
531 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch
*esw
,
532 struct mlx5_flow_handle
*rule
,
533 struct mlx5_esw_flow_attr
*attr
)
535 __mlx5_eswitch_del_rule(esw
, rule
, attr
, true);
538 static int esw_set_global_vlan_pop(struct mlx5_eswitch
*esw
, u8 val
)
540 struct mlx5_eswitch_rep
*rep
;
543 esw_debug(esw
->dev
, "%s applying global %s policy\n", __func__
, val
? "pop" : "none");
544 mlx5_esw_for_each_host_func_rep(esw
, i
, rep
, esw
->esw_funcs
.num_vfs
) {
545 if (atomic_read(&rep
->rep_data
[REP_ETH
].state
) != REP_LOADED
)
548 err
= __mlx5_eswitch_set_vport_vlan(esw
, rep
->vport
, 0, 0, val
);
557 static struct mlx5_eswitch_rep
*
558 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr
*attr
, bool push
, bool pop
)
560 struct mlx5_eswitch_rep
*in_rep
, *out_rep
, *vport
= NULL
;
562 in_rep
= attr
->in_rep
;
563 out_rep
= attr
->dests
[0].rep
;
575 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr
*attr
,
576 bool push
, bool pop
, bool fwd
)
578 struct mlx5_eswitch_rep
*in_rep
, *out_rep
;
580 if ((push
|| pop
) && !fwd
)
583 in_rep
= attr
->in_rep
;
584 out_rep
= attr
->dests
[0].rep
;
586 if (push
&& in_rep
->vport
== MLX5_VPORT_UPLINK
)
589 if (pop
&& out_rep
->vport
== MLX5_VPORT_UPLINK
)
592 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
593 if (!push
&& !pop
&& fwd
)
594 if (in_rep
->vlan
&& out_rep
->vport
== MLX5_VPORT_UPLINK
)
597 /* protects against (1) setting rules with different vlans to push and
598 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
600 if (push
&& in_rep
->vlan_refcount
&& (in_rep
->vlan
!= attr
->vlan_vid
[0]))
609 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch
*esw
,
610 struct mlx5_esw_flow_attr
*attr
)
612 struct offloads_fdb
*offloads
= &esw
->fdb_table
.offloads
;
613 struct mlx5_eswitch_rep
*vport
= NULL
;
617 /* nop if we're on the vlan push/pop non emulation mode */
618 if (mlx5_eswitch_vlan_actions_supported(esw
->dev
, 1))
621 push
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
);
622 pop
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
623 fwd
= !!((attr
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) &&
626 mutex_lock(&esw
->state_lock
);
628 err
= esw_add_vlan_action_check(attr
, push
, pop
, fwd
);
632 attr
->flags
&= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED
;
634 vport
= esw_vlan_action_get_vport(attr
, push
, pop
);
636 if (!push
&& !pop
&& fwd
) {
637 /* tracks VF --> wire rules without vlan push action */
638 if (attr
->dests
[0].rep
->vport
== MLX5_VPORT_UPLINK
) {
639 vport
->vlan_refcount
++;
640 attr
->flags
|= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED
;
649 if (!(offloads
->vlan_push_pop_refcount
)) {
650 /* it's the 1st vlan rule, apply global vlan pop policy */
651 err
= esw_set_global_vlan_pop(esw
, SET_VLAN_STRIP
);
655 offloads
->vlan_push_pop_refcount
++;
658 if (vport
->vlan_refcount
)
661 err
= __mlx5_eswitch_set_vport_vlan(esw
, vport
->vport
, attr
->vlan_vid
[0], 0,
662 SET_VLAN_INSERT
| SET_VLAN_STRIP
);
665 vport
->vlan
= attr
->vlan_vid
[0];
667 vport
->vlan_refcount
++;
671 attr
->flags
|= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED
;
673 mutex_unlock(&esw
->state_lock
);
677 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch
*esw
,
678 struct mlx5_esw_flow_attr
*attr
)
680 struct offloads_fdb
*offloads
= &esw
->fdb_table
.offloads
;
681 struct mlx5_eswitch_rep
*vport
= NULL
;
685 /* nop if we're on the vlan push/pop non emulation mode */
686 if (mlx5_eswitch_vlan_actions_supported(esw
->dev
, 1))
689 if (!(attr
->flags
& MLX5_ESW_ATTR_FLAG_VLAN_HANDLED
))
692 push
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
);
693 pop
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
694 fwd
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
);
696 mutex_lock(&esw
->state_lock
);
698 vport
= esw_vlan_action_get_vport(attr
, push
, pop
);
700 if (!push
&& !pop
&& fwd
) {
701 /* tracks VF --> wire rules without vlan push action */
702 if (attr
->dests
[0].rep
->vport
== MLX5_VPORT_UPLINK
)
703 vport
->vlan_refcount
--;
709 vport
->vlan_refcount
--;
710 if (vport
->vlan_refcount
)
711 goto skip_unset_push
;
714 err
= __mlx5_eswitch_set_vport_vlan(esw
, vport
->vport
,
715 0, 0, SET_VLAN_STRIP
);
721 offloads
->vlan_push_pop_refcount
--;
722 if (offloads
->vlan_push_pop_refcount
)
725 /* no more vlan rules, stop global vlan pop policy */
726 err
= esw_set_global_vlan_pop(esw
, 0);
729 mutex_unlock(&esw
->state_lock
);
733 struct mlx5_flow_handle
*
734 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch
*esw
, u16 vport
,
737 struct mlx5_flow_act flow_act
= {0};
738 struct mlx5_flow_destination dest
= {};
739 struct mlx5_flow_handle
*flow_rule
;
740 struct mlx5_flow_spec
*spec
;
743 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
745 flow_rule
= ERR_PTR(-ENOMEM
);
749 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
750 MLX5_SET(fte_match_set_misc
, misc
, source_sqn
, sqn
);
751 /* source vport is the esw manager */
752 MLX5_SET(fte_match_set_misc
, misc
, source_port
, esw
->manager_vport
);
754 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
755 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_sqn
);
756 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
758 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
759 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
760 dest
.vport
.num
= vport
;
761 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
763 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
764 spec
, &flow_act
, &dest
, 1);
765 if (IS_ERR(flow_rule
))
766 esw_warn(esw
->dev
, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule
));
771 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule
);
773 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle
*rule
)
775 mlx5_del_flow_rules(rule
);
778 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch
*esw
)
780 return MLX5_CAP_ESW_FLOWTABLE(esw
->dev
, fdb_to_vport_reg_c_id
) &
781 MLX5_FDB_TO_VPORT_REG_C_1
;
784 static int esw_set_passing_vport_metadata(struct mlx5_eswitch
*esw
, bool enable
)
786 u32 out
[MLX5_ST_SZ_DW(query_esw_vport_context_out
)] = {};
787 u32 in
[MLX5_ST_SZ_DW(modify_esw_vport_context_in
)] = {};
791 if (!mlx5_eswitch_reg_c1_loopback_supported(esw
) &&
792 !mlx5_eswitch_vport_match_metadata_enabled(esw
))
795 err
= mlx5_eswitch_query_esw_vport_context(esw
->dev
, 0, false,
800 curr
= MLX5_GET(query_esw_vport_context_out
, out
,
801 esw_vport_context
.fdb_to_vport_reg_c_id
);
802 wanted
= MLX5_FDB_TO_VPORT_REG_C_0
;
803 if (mlx5_eswitch_reg_c1_loopback_supported(esw
))
804 wanted
|= MLX5_FDB_TO_VPORT_REG_C_1
;
811 MLX5_SET(modify_esw_vport_context_in
, in
,
812 esw_vport_context
.fdb_to_vport_reg_c_id
, curr
);
814 MLX5_SET(modify_esw_vport_context_in
, in
,
815 field_select
.fdb_to_vport_reg_c_id
, 1);
817 err
= mlx5_eswitch_modify_esw_vport_context(esw
->dev
, 0, false, in
,
820 if (enable
&& (curr
& MLX5_FDB_TO_VPORT_REG_C_1
))
821 esw
->flags
|= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED
;
823 esw
->flags
&= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED
;
829 static void peer_miss_rules_setup(struct mlx5_eswitch
*esw
,
830 struct mlx5_core_dev
*peer_dev
,
831 struct mlx5_flow_spec
*spec
,
832 struct mlx5_flow_destination
*dest
)
836 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
837 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
839 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
840 mlx5_eswitch_get_vport_metadata_mask());
842 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS_2
;
844 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
847 MLX5_SET(fte_match_set_misc
, misc
, source_eswitch_owner_vhca_id
,
848 MLX5_CAP_GEN(peer_dev
, vhca_id
));
850 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
852 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
854 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
855 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
,
856 source_eswitch_owner_vhca_id
);
859 dest
->type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
860 dest
->vport
.num
= peer_dev
->priv
.eswitch
->manager_vport
;
861 dest
->vport
.vhca_id
= MLX5_CAP_GEN(peer_dev
, vhca_id
);
862 dest
->vport
.flags
|= MLX5_FLOW_DEST_VPORT_VHCA_ID
;
865 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch
*esw
,
866 struct mlx5_eswitch
*peer_esw
,
867 struct mlx5_flow_spec
*spec
,
872 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
873 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
875 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
876 mlx5_eswitch_get_vport_metadata_for_match(peer_esw
,
879 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
881 MLX5_SET(fte_match_set_misc
, misc
, source_port
, vport
);
885 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch
*esw
,
886 struct mlx5_core_dev
*peer_dev
)
888 struct mlx5_flow_destination dest
= {};
889 struct mlx5_flow_act flow_act
= {0};
890 struct mlx5_flow_handle
**flows
;
891 struct mlx5_flow_handle
*flow
;
892 struct mlx5_flow_spec
*spec
;
893 /* total vports is the same for both e-switches */
894 int nvports
= esw
->total_vports
;
898 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
902 peer_miss_rules_setup(esw
, peer_dev
, spec
, &dest
);
904 flows
= kvzalloc(nvports
* sizeof(*flows
), GFP_KERNEL
);
907 goto alloc_flows_err
;
910 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
911 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
914 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
915 esw_set_peer_miss_rule_source_port(esw
, peer_dev
->priv
.eswitch
,
916 spec
, MLX5_VPORT_PF
);
918 flow
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
919 spec
, &flow_act
, &dest
, 1);
922 goto add_pf_flow_err
;
924 flows
[MLX5_VPORT_PF
] = flow
;
927 if (mlx5_ecpf_vport_exists(esw
->dev
)) {
928 MLX5_SET(fte_match_set_misc
, misc
, source_port
, MLX5_VPORT_ECPF
);
929 flow
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
930 spec
, &flow_act
, &dest
, 1);
933 goto add_ecpf_flow_err
;
935 flows
[mlx5_eswitch_ecpf_idx(esw
)] = flow
;
938 mlx5_esw_for_each_vf_vport_num(esw
, i
, mlx5_core_max_vfs(esw
->dev
)) {
939 esw_set_peer_miss_rule_source_port(esw
,
940 peer_dev
->priv
.eswitch
,
943 flow
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
944 spec
, &flow_act
, &dest
, 1);
947 goto add_vf_flow_err
;
952 esw
->fdb_table
.offloads
.peer_miss_rules
= flows
;
959 mlx5_esw_for_each_vf_vport_num_reverse(esw
, i
, nvports
)
960 mlx5_del_flow_rules(flows
[i
]);
962 if (mlx5_ecpf_vport_exists(esw
->dev
))
963 mlx5_del_flow_rules(flows
[mlx5_eswitch_ecpf_idx(esw
)]);
965 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
))
966 mlx5_del_flow_rules(flows
[MLX5_VPORT_PF
]);
968 esw_warn(esw
->dev
, "FDB: Failed to add peer miss flow rule err %d\n", err
);
975 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch
*esw
)
977 struct mlx5_flow_handle
**flows
;
980 flows
= esw
->fdb_table
.offloads
.peer_miss_rules
;
982 mlx5_esw_for_each_vf_vport_num_reverse(esw
, i
,
983 mlx5_core_max_vfs(esw
->dev
))
984 mlx5_del_flow_rules(flows
[i
]);
986 if (mlx5_ecpf_vport_exists(esw
->dev
))
987 mlx5_del_flow_rules(flows
[mlx5_eswitch_ecpf_idx(esw
)]);
989 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
))
990 mlx5_del_flow_rules(flows
[MLX5_VPORT_PF
]);
995 static int esw_add_fdb_miss_rule(struct mlx5_eswitch
*esw
)
997 struct mlx5_flow_act flow_act
= {0};
998 struct mlx5_flow_destination dest
= {};
999 struct mlx5_flow_handle
*flow_rule
= NULL
;
1000 struct mlx5_flow_spec
*spec
;
1007 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1013 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
1014 headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
1016 dmac_c
= MLX5_ADDR_OF(fte_match_param
, headers_c
,
1017 outer_headers
.dmac_47_16
);
1020 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
1021 dest
.vport
.num
= esw
->manager_vport
;
1022 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1024 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
1025 spec
, &flow_act
, &dest
, 1);
1026 if (IS_ERR(flow_rule
)) {
1027 err
= PTR_ERR(flow_rule
);
1028 esw_warn(esw
->dev
, "FDB: Failed to add unicast miss flow rule err %d\n", err
);
1032 esw
->fdb_table
.offloads
.miss_rule_uni
= flow_rule
;
1034 headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
1036 dmac_v
= MLX5_ADDR_OF(fte_match_param
, headers_v
,
1037 outer_headers
.dmac_47_16
);
1039 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
1040 spec
, &flow_act
, &dest
, 1);
1041 if (IS_ERR(flow_rule
)) {
1042 err
= PTR_ERR(flow_rule
);
1043 esw_warn(esw
->dev
, "FDB: Failed to add multicast miss flow rule err %d\n", err
);
1044 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule_uni
);
1048 esw
->fdb_table
.offloads
.miss_rule_multi
= flow_rule
;
1055 struct mlx5_flow_handle
*
1056 esw_add_restore_rule(struct mlx5_eswitch
*esw
, u32 tag
)
1058 struct mlx5_flow_act flow_act
= { .flags
= FLOW_ACT_NO_APPEND
, };
1059 struct mlx5_flow_table
*ft
= esw
->offloads
.ft_offloads_restore
;
1060 struct mlx5_flow_context
*flow_context
;
1061 struct mlx5_flow_handle
*flow_rule
;
1062 struct mlx5_flow_destination dest
;
1063 struct mlx5_flow_spec
*spec
;
1066 if (!mlx5_eswitch_reg_c1_loopback_supported(esw
))
1067 return ERR_PTR(-EOPNOTSUPP
);
1069 spec
= kzalloc(sizeof(*spec
), GFP_KERNEL
);
1071 return ERR_PTR(-ENOMEM
);
1073 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
1075 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
1076 ESW_CHAIN_TAG_METADATA_MASK
);
1077 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
1079 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
, tag
);
1080 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS_2
;
1081 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
|
1082 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
;
1083 flow_act
.modify_hdr
= esw
->offloads
.restore_copy_hdr_id
;
1085 flow_context
= &spec
->flow_context
;
1086 flow_context
->flags
|= FLOW_CONTEXT_HAS_TAG
;
1087 flow_context
->flow_tag
= tag
;
1088 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
1089 dest
.ft
= esw
->offloads
.ft_offloads
;
1091 flow_rule
= mlx5_add_flow_rules(ft
, spec
, &flow_act
, &dest
, 1);
1094 if (IS_ERR(flow_rule
))
1096 "Failed to create restore rule for tag: %d, err(%d)\n",
1097 tag
, (int)PTR_ERR(flow_rule
));
1103 esw_get_max_restore_tag(struct mlx5_eswitch
*esw
)
1105 return ESW_CHAIN_TAG_METADATA_MASK
;
1108 #define MAX_PF_SQ 256
1109 #define MAX_SQ_NVPORTS 32
1111 static void esw_set_flow_group_source_port(struct mlx5_eswitch
*esw
,
1114 void *match_criteria
= MLX5_ADDR_OF(create_flow_group_in
,
1118 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
1119 MLX5_SET(create_flow_group_in
, flow_group_in
,
1120 match_criteria_enable
,
1121 MLX5_MATCH_MISC_PARAMETERS_2
);
1123 MLX5_SET(fte_match_param
, match_criteria
,
1124 misc_parameters_2
.metadata_reg_c_0
,
1125 mlx5_eswitch_get_vport_metadata_mask());
1127 MLX5_SET(create_flow_group_in
, flow_group_in
,
1128 match_criteria_enable
,
1129 MLX5_MATCH_MISC_PARAMETERS
);
1131 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
,
1132 misc_parameters
.source_port
);
1136 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch
*esw
, int nvports
)
1138 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1139 struct mlx5_flow_table_attr ft_attr
= {};
1140 struct mlx5_core_dev
*dev
= esw
->dev
;
1141 struct mlx5_flow_namespace
*root_ns
;
1142 struct mlx5_flow_table
*fdb
= NULL
;
1143 u32 flags
= 0, *flow_group_in
;
1144 int table_size
, ix
, err
= 0;
1145 struct mlx5_flow_group
*g
;
1146 void *match_criteria
;
1149 esw_debug(esw
->dev
, "Create offloads FDB Tables\n");
1151 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
1155 root_ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_FDB
);
1157 esw_warn(dev
, "Failed to get FDB flow namespace\n");
1161 esw
->fdb_table
.offloads
.ns
= root_ns
;
1162 err
= mlx5_flow_namespace_set_mode(root_ns
,
1163 esw
->dev
->priv
.steering
->mode
);
1165 esw_warn(dev
, "Failed to set FDB namespace steering mode\n");
1169 table_size
= nvports
* MAX_SQ_NVPORTS
+ MAX_PF_SQ
+
1170 MLX5_ESW_MISS_FLOWS
+ esw
->total_vports
;
1172 /* create the slow path fdb with encap set, so further table instances
1173 * can be created at run time while VFs are probed if the FW allows that.
1175 if (esw
->offloads
.encap
!= DEVLINK_ESWITCH_ENCAP_MODE_NONE
)
1176 flags
|= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT
|
1177 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP
);
1179 ft_attr
.flags
= flags
;
1180 ft_attr
.max_fte
= table_size
;
1181 ft_attr
.prio
= FDB_SLOW_PATH
;
1183 fdb
= mlx5_create_flow_table(root_ns
, &ft_attr
);
1186 esw_warn(dev
, "Failed to create slow path FDB Table err %d\n", err
);
1189 esw
->fdb_table
.offloads
.slow_fdb
= fdb
;
1191 err
= mlx5_esw_chains_create(esw
);
1193 esw_warn(dev
, "Failed to create fdb chains err(%d)\n", err
);
1194 goto fdb_chains_err
;
1197 /* create send-to-vport group */
1198 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
1199 MLX5_MATCH_MISC_PARAMETERS
);
1201 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
1203 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_sqn
);
1204 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_port
);
1206 ix
= nvports
* MAX_SQ_NVPORTS
+ MAX_PF_SQ
;
1207 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
1208 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, ix
- 1);
1210 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
1213 esw_warn(dev
, "Failed to create send-to-vport flow group err(%d)\n", err
);
1214 goto send_vport_err
;
1216 esw
->fdb_table
.offloads
.send_to_vport_grp
= g
;
1218 /* create peer esw miss group */
1219 memset(flow_group_in
, 0, inlen
);
1221 esw_set_flow_group_source_port(esw
, flow_group_in
);
1223 if (!mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
1224 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
,
1228 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
,
1229 misc_parameters
.source_eswitch_owner_vhca_id
);
1231 MLX5_SET(create_flow_group_in
, flow_group_in
,
1232 source_eswitch_owner_vhca_id_valid
, 1);
1235 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, ix
);
1236 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
,
1237 ix
+ esw
->total_vports
- 1);
1238 ix
+= esw
->total_vports
;
1240 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
1243 esw_warn(dev
, "Failed to create peer miss flow group err(%d)\n", err
);
1246 esw
->fdb_table
.offloads
.peer_miss_grp
= g
;
1248 /* create miss group */
1249 memset(flow_group_in
, 0, inlen
);
1250 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
1251 MLX5_MATCH_OUTER_HEADERS
);
1252 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
,
1254 dmac
= MLX5_ADDR_OF(fte_match_param
, match_criteria
,
1255 outer_headers
.dmac_47_16
);
1258 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, ix
);
1259 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
,
1260 ix
+ MLX5_ESW_MISS_FLOWS
);
1262 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
1265 esw_warn(dev
, "Failed to create miss flow group err(%d)\n", err
);
1268 esw
->fdb_table
.offloads
.miss_grp
= g
;
1270 err
= esw_add_fdb_miss_rule(esw
);
1274 esw
->nvports
= nvports
;
1275 kvfree(flow_group_in
);
1279 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.miss_grp
);
1281 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.peer_miss_grp
);
1283 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.send_to_vport_grp
);
1285 mlx5_esw_chains_destroy(esw
);
1287 mlx5_destroy_flow_table(esw
->fdb_table
.offloads
.slow_fdb
);
1289 /* Holds true only as long as DMFS is the default */
1290 mlx5_flow_namespace_set_mode(root_ns
, MLX5_FLOW_STEERING_MODE_DMFS
);
1292 kvfree(flow_group_in
);
1296 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch
*esw
)
1298 if (!esw
->fdb_table
.offloads
.slow_fdb
)
1301 esw_debug(esw
->dev
, "Destroy offloads FDB Tables\n");
1302 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule_multi
);
1303 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule_uni
);
1304 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.send_to_vport_grp
);
1305 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.peer_miss_grp
);
1306 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.miss_grp
);
1308 mlx5_esw_chains_destroy(esw
);
1309 mlx5_destroy_flow_table(esw
->fdb_table
.offloads
.slow_fdb
);
1310 /* Holds true only as long as DMFS is the default */
1311 mlx5_flow_namespace_set_mode(esw
->fdb_table
.offloads
.ns
,
1312 MLX5_FLOW_STEERING_MODE_DMFS
);
1315 static int esw_create_offloads_table(struct mlx5_eswitch
*esw
, int nvports
)
1317 struct mlx5_flow_table_attr ft_attr
= {};
1318 struct mlx5_core_dev
*dev
= esw
->dev
;
1319 struct mlx5_flow_table
*ft_offloads
;
1320 struct mlx5_flow_namespace
*ns
;
1323 ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_OFFLOADS
);
1325 esw_warn(esw
->dev
, "Failed to get offloads flow namespace\n");
1329 ft_attr
.max_fte
= nvports
+ MLX5_ESW_MISS_FLOWS
;
1332 ft_offloads
= mlx5_create_flow_table(ns
, &ft_attr
);
1333 if (IS_ERR(ft_offloads
)) {
1334 err
= PTR_ERR(ft_offloads
);
1335 esw_warn(esw
->dev
, "Failed to create offloads table, err %d\n", err
);
1339 esw
->offloads
.ft_offloads
= ft_offloads
;
1343 static void esw_destroy_offloads_table(struct mlx5_eswitch
*esw
)
1345 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
1347 mlx5_destroy_flow_table(offloads
->ft_offloads
);
1350 static int esw_create_vport_rx_group(struct mlx5_eswitch
*esw
, int nvports
)
1352 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1353 struct mlx5_flow_group
*g
;
1357 nvports
= nvports
+ MLX5_ESW_MISS_FLOWS
;
1358 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
1362 /* create vport rx group */
1363 esw_set_flow_group_source_port(esw
, flow_group_in
);
1365 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
1366 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, nvports
- 1);
1368 g
= mlx5_create_flow_group(esw
->offloads
.ft_offloads
, flow_group_in
);
1372 mlx5_core_warn(esw
->dev
, "Failed to create vport rx group err %d\n", err
);
1376 esw
->offloads
.vport_rx_group
= g
;
1378 kvfree(flow_group_in
);
1382 static void esw_destroy_vport_rx_group(struct mlx5_eswitch
*esw
)
1384 mlx5_destroy_flow_group(esw
->offloads
.vport_rx_group
);
1387 struct mlx5_flow_handle
*
1388 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, u16 vport
,
1389 struct mlx5_flow_destination
*dest
)
1391 struct mlx5_flow_act flow_act
= {0};
1392 struct mlx5_flow_handle
*flow_rule
;
1393 struct mlx5_flow_spec
*spec
;
1396 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1398 flow_rule
= ERR_PTR(-ENOMEM
);
1402 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
1403 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters_2
);
1404 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
1405 mlx5_eswitch_get_vport_metadata_for_match(esw
, vport
));
1407 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters_2
);
1408 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
1409 mlx5_eswitch_get_vport_metadata_mask());
1411 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS_2
;
1413 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
1414 MLX5_SET(fte_match_set_misc
, misc
, source_port
, vport
);
1416 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
1417 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
1419 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
1422 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1423 flow_rule
= mlx5_add_flow_rules(esw
->offloads
.ft_offloads
, spec
,
1424 &flow_act
, dest
, 1);
1425 if (IS_ERR(flow_rule
)) {
1426 esw_warn(esw
->dev
, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule
));
1436 static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch
*esw
, u8
*mode
)
1438 u8 prev_mlx5_mode
, mlx5_mode
= MLX5_INLINE_MODE_L2
;
1439 struct mlx5_core_dev
*dev
= esw
->dev
;
1442 if (!MLX5_CAP_GEN(dev
, vport_group_manager
))
1445 if (esw
->mode
== MLX5_ESWITCH_NONE
)
1448 switch (MLX5_CAP_ETH(dev
, wqe_inline_mode
)) {
1449 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED
:
1450 mlx5_mode
= MLX5_INLINE_MODE_NONE
;
1452 case MLX5_CAP_INLINE_MODE_L2
:
1453 mlx5_mode
= MLX5_INLINE_MODE_L2
;
1455 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
:
1460 mlx5_query_nic_vport_min_inline(dev
, esw
->first_host_vport
, &prev_mlx5_mode
);
1461 mlx5_esw_for_each_host_func_vport(esw
, vport
, esw
->esw_funcs
.num_vfs
) {
1462 mlx5_query_nic_vport_min_inline(dev
, vport
, &mlx5_mode
);
1463 if (prev_mlx5_mode
!= mlx5_mode
)
1465 prev_mlx5_mode
= mlx5_mode
;
1473 static void esw_destroy_restore_table(struct mlx5_eswitch
*esw
)
1475 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
1477 if (!mlx5_eswitch_reg_c1_loopback_supported(esw
))
1480 mlx5_modify_header_dealloc(esw
->dev
, offloads
->restore_copy_hdr_id
);
1481 mlx5_destroy_flow_group(offloads
->restore_group
);
1482 mlx5_destroy_flow_table(offloads
->ft_offloads_restore
);
1485 static int esw_create_restore_table(struct mlx5_eswitch
*esw
)
1487 u8 modact
[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto
)] = {};
1488 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1489 struct mlx5_flow_table_attr ft_attr
= {};
1490 struct mlx5_core_dev
*dev
= esw
->dev
;
1491 struct mlx5_flow_namespace
*ns
;
1492 struct mlx5_modify_hdr
*mod_hdr
;
1493 void *match_criteria
, *misc
;
1494 struct mlx5_flow_table
*ft
;
1495 struct mlx5_flow_group
*g
;
1499 if (!mlx5_eswitch_reg_c1_loopback_supported(esw
))
1502 ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_OFFLOADS
);
1504 esw_warn(esw
->dev
, "Failed to get offloads flow namespace\n");
1508 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
1509 if (!flow_group_in
) {
1514 ft_attr
.max_fte
= 1 << ESW_CHAIN_TAG_METADATA_BITS
;
1515 ft
= mlx5_create_flow_table(ns
, &ft_attr
);
1518 esw_warn(esw
->dev
, "Failed to create restore table, err %d\n",
1523 memset(flow_group_in
, 0, inlen
);
1524 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
,
1526 misc
= MLX5_ADDR_OF(fte_match_param
, match_criteria
,
1529 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
1530 ESW_CHAIN_TAG_METADATA_MASK
);
1531 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
1532 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
,
1533 ft_attr
.max_fte
- 1);
1534 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
1535 MLX5_MATCH_MISC_PARAMETERS_2
);
1536 g
= mlx5_create_flow_group(ft
, flow_group_in
);
1539 esw_warn(dev
, "Failed to create restore flow group, err: %d\n",
1544 MLX5_SET(copy_action_in
, modact
, action_type
, MLX5_ACTION_TYPE_COPY
);
1545 MLX5_SET(copy_action_in
, modact
, src_field
,
1546 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1
);
1547 MLX5_SET(copy_action_in
, modact
, dst_field
,
1548 MLX5_ACTION_IN_FIELD_METADATA_REG_B
);
1549 mod_hdr
= mlx5_modify_header_alloc(esw
->dev
,
1550 MLX5_FLOW_NAMESPACE_KERNEL
, 1,
1552 if (IS_ERR(mod_hdr
)) {
1553 esw_warn(dev
, "Failed to create restore mod header, err: %d\n",
1555 err
= PTR_ERR(mod_hdr
);
1559 esw
->offloads
.ft_offloads_restore
= ft
;
1560 esw
->offloads
.restore_group
= g
;
1561 esw
->offloads
.restore_copy_hdr_id
= mod_hdr
;
1563 kvfree(flow_group_in
);
1568 mlx5_destroy_flow_group(g
);
1570 mlx5_destroy_flow_table(ft
);
1572 kvfree(flow_group_in
);
1577 static int esw_offloads_start(struct mlx5_eswitch
*esw
,
1578 struct netlink_ext_ack
*extack
)
1582 if (esw
->mode
!= MLX5_ESWITCH_LEGACY
&&
1583 !mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
1584 NL_SET_ERR_MSG_MOD(extack
,
1585 "Can't set offloads mode, SRIOV legacy not enabled");
1589 mlx5_eswitch_disable_locked(esw
, false);
1590 err
= mlx5_eswitch_enable_locked(esw
, MLX5_ESWITCH_OFFLOADS
,
1591 esw
->dev
->priv
.sriov
.num_vfs
);
1593 NL_SET_ERR_MSG_MOD(extack
,
1594 "Failed setting eswitch to offloads");
1595 err1
= mlx5_eswitch_enable_locked(esw
, MLX5_ESWITCH_LEGACY
,
1596 MLX5_ESWITCH_IGNORE_NUM_VFS
);
1598 NL_SET_ERR_MSG_MOD(extack
,
1599 "Failed setting eswitch back to legacy");
1602 if (esw
->offloads
.inline_mode
== MLX5_INLINE_MODE_NONE
) {
1603 if (mlx5_eswitch_inline_mode_get(esw
,
1604 &esw
->offloads
.inline_mode
)) {
1605 esw
->offloads
.inline_mode
= MLX5_INLINE_MODE_L2
;
1606 NL_SET_ERR_MSG_MOD(extack
,
1607 "Inline mode is different between vports");
1613 void esw_offloads_cleanup_reps(struct mlx5_eswitch
*esw
)
1615 kfree(esw
->offloads
.vport_reps
);
1618 int esw_offloads_init_reps(struct mlx5_eswitch
*esw
)
1620 int total_vports
= esw
->total_vports
;
1621 struct mlx5_eswitch_rep
*rep
;
1625 esw
->offloads
.vport_reps
= kcalloc(total_vports
,
1626 sizeof(struct mlx5_eswitch_rep
),
1628 if (!esw
->offloads
.vport_reps
)
1631 mlx5_esw_for_all_reps(esw
, vport_index
, rep
) {
1632 rep
->vport
= mlx5_eswitch_index_to_vport_num(esw
, vport_index
);
1633 rep
->vport_index
= vport_index
;
1635 for (rep_type
= 0; rep_type
< NUM_REP_TYPES
; rep_type
++)
1636 atomic_set(&rep
->rep_data
[rep_type
].state
,
1643 static void __esw_offloads_unload_rep(struct mlx5_eswitch
*esw
,
1644 struct mlx5_eswitch_rep
*rep
, u8 rep_type
)
1646 if (atomic_cmpxchg(&rep
->rep_data
[rep_type
].state
,
1647 REP_LOADED
, REP_REGISTERED
) == REP_LOADED
)
1648 esw
->offloads
.rep_ops
[rep_type
]->unload(rep
);
1651 static void __unload_reps_all_vport(struct mlx5_eswitch
*esw
, u8 rep_type
)
1653 struct mlx5_eswitch_rep
*rep
;
1656 mlx5_esw_for_each_vf_rep_reverse(esw
, i
, rep
, esw
->esw_funcs
.num_vfs
)
1657 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1659 if (mlx5_ecpf_vport_exists(esw
->dev
)) {
1660 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_ECPF
);
1661 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1664 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
1665 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_PF
);
1666 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1669 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_UPLINK
);
1670 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1673 int esw_offloads_load_rep(struct mlx5_eswitch
*esw
, u16 vport_num
)
1675 struct mlx5_eswitch_rep
*rep
;
1679 if (esw
->mode
!= MLX5_ESWITCH_OFFLOADS
)
1682 rep
= mlx5_eswitch_get_rep(esw
, vport_num
);
1683 for (rep_type
= 0; rep_type
< NUM_REP_TYPES
; rep_type
++)
1684 if (atomic_cmpxchg(&rep
->rep_data
[rep_type
].state
,
1685 REP_REGISTERED
, REP_LOADED
) == REP_REGISTERED
) {
1686 err
= esw
->offloads
.rep_ops
[rep_type
]->load(esw
->dev
, rep
);
1694 atomic_set(&rep
->rep_data
[rep_type
].state
, REP_REGISTERED
);
1695 for (--rep_type
; rep_type
>= 0; rep_type
--)
1696 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1700 void esw_offloads_unload_rep(struct mlx5_eswitch
*esw
, u16 vport_num
)
1702 struct mlx5_eswitch_rep
*rep
;
1705 if (esw
->mode
!= MLX5_ESWITCH_OFFLOADS
)
1708 rep
= mlx5_eswitch_get_rep(esw
, vport_num
);
1709 for (rep_type
= NUM_REP_TYPES
- 1; rep_type
>= 0; rep_type
--)
1710 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1713 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1714 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1716 static int mlx5_esw_offloads_pair(struct mlx5_eswitch
*esw
,
1717 struct mlx5_eswitch
*peer_esw
)
1721 err
= esw_add_fdb_peer_miss_rules(esw
, peer_esw
->dev
);
1728 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch
*esw
)
1730 mlx5e_tc_clean_fdb_peer_flows(esw
);
1731 esw_del_fdb_peer_miss_rules(esw
);
1734 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch
*esw
,
1735 struct mlx5_eswitch
*peer_esw
,
1738 struct mlx5_flow_root_namespace
*peer_ns
;
1739 struct mlx5_flow_root_namespace
*ns
;
1742 peer_ns
= peer_esw
->dev
->priv
.steering
->fdb_root_ns
;
1743 ns
= esw
->dev
->priv
.steering
->fdb_root_ns
;
1746 err
= mlx5_flow_namespace_set_peer(ns
, peer_ns
);
1750 err
= mlx5_flow_namespace_set_peer(peer_ns
, ns
);
1752 mlx5_flow_namespace_set_peer(ns
, NULL
);
1756 mlx5_flow_namespace_set_peer(ns
, NULL
);
1757 mlx5_flow_namespace_set_peer(peer_ns
, NULL
);
1763 static int mlx5_esw_offloads_devcom_event(int event
,
1767 struct mlx5_eswitch
*esw
= my_data
;
1768 struct mlx5_devcom
*devcom
= esw
->dev
->priv
.devcom
;
1769 struct mlx5_eswitch
*peer_esw
= event_data
;
1773 case ESW_OFFLOADS_DEVCOM_PAIR
:
1774 if (mlx5_eswitch_vport_match_metadata_enabled(esw
) !=
1775 mlx5_eswitch_vport_match_metadata_enabled(peer_esw
))
1778 err
= mlx5_esw_offloads_set_ns_peer(esw
, peer_esw
, true);
1781 err
= mlx5_esw_offloads_pair(esw
, peer_esw
);
1785 err
= mlx5_esw_offloads_pair(peer_esw
, esw
);
1789 mlx5_devcom_set_paired(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
, true);
1792 case ESW_OFFLOADS_DEVCOM_UNPAIR
:
1793 if (!mlx5_devcom_is_paired(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
))
1796 mlx5_devcom_set_paired(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
, false);
1797 mlx5_esw_offloads_unpair(peer_esw
);
1798 mlx5_esw_offloads_unpair(esw
);
1799 mlx5_esw_offloads_set_ns_peer(esw
, peer_esw
, false);
1806 mlx5_esw_offloads_unpair(esw
);
1808 mlx5_esw_offloads_set_ns_peer(esw
, peer_esw
, false);
1810 mlx5_core_err(esw
->dev
, "esw offloads devcom event failure, event %u err %d",
1815 static void esw_offloads_devcom_init(struct mlx5_eswitch
*esw
)
1817 struct mlx5_devcom
*devcom
= esw
->dev
->priv
.devcom
;
1819 INIT_LIST_HEAD(&esw
->offloads
.peer_flows
);
1820 mutex_init(&esw
->offloads
.peer_mutex
);
1822 if (!MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
1825 mlx5_devcom_register_component(devcom
,
1826 MLX5_DEVCOM_ESW_OFFLOADS
,
1827 mlx5_esw_offloads_devcom_event
,
1830 mlx5_devcom_send_event(devcom
,
1831 MLX5_DEVCOM_ESW_OFFLOADS
,
1832 ESW_OFFLOADS_DEVCOM_PAIR
, esw
);
1835 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch
*esw
)
1837 struct mlx5_devcom
*devcom
= esw
->dev
->priv
.devcom
;
1839 if (!MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
1842 mlx5_devcom_send_event(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
,
1843 ESW_OFFLOADS_DEVCOM_UNPAIR
, esw
);
1845 mlx5_devcom_unregister_component(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
);
1848 static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch
*esw
,
1849 struct mlx5_vport
*vport
)
1851 struct mlx5_flow_act flow_act
= {0};
1852 struct mlx5_flow_spec
*spec
;
1855 /* For prio tag mode, there is only 1 FTEs:
1856 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1858 * Unmatched traffic is allowed by default
1860 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1864 /* Untagged packets - push prio tag VLAN, allow */
1865 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.cvlan_tag
);
1866 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.cvlan_tag
, 0);
1867 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
1868 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
|
1869 MLX5_FLOW_CONTEXT_ACTION_ALLOW
;
1870 flow_act
.vlan
[0].ethtype
= ETH_P_8021Q
;
1871 flow_act
.vlan
[0].vid
= 0;
1872 flow_act
.vlan
[0].prio
= 0;
1874 if (vport
->ingress
.offloads
.modify_metadata_rule
) {
1875 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
;
1876 flow_act
.modify_hdr
= vport
->ingress
.offloads
.modify_metadata
;
1879 vport
->ingress
.allow_rule
=
1880 mlx5_add_flow_rules(vport
->ingress
.acl
, spec
,
1881 &flow_act
, NULL
, 0);
1882 if (IS_ERR(vport
->ingress
.allow_rule
)) {
1883 err
= PTR_ERR(vport
->ingress
.allow_rule
);
1885 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1887 vport
->ingress
.allow_rule
= NULL
;
1894 static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch
*esw
,
1895 struct mlx5_vport
*vport
)
1897 u8 action
[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto
)] = {};
1898 struct mlx5_flow_act flow_act
= {};
1902 key
= mlx5_eswitch_get_vport_metadata_for_match(esw
, vport
->vport
);
1903 key
>>= ESW_SOURCE_PORT_METADATA_OFFSET
;
1905 MLX5_SET(set_action_in
, action
, action_type
, MLX5_ACTION_TYPE_SET
);
1906 MLX5_SET(set_action_in
, action
, field
,
1907 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0
);
1908 MLX5_SET(set_action_in
, action
, data
, key
);
1909 MLX5_SET(set_action_in
, action
, offset
,
1910 ESW_SOURCE_PORT_METADATA_OFFSET
);
1911 MLX5_SET(set_action_in
, action
, length
,
1912 ESW_SOURCE_PORT_METADATA_BITS
);
1914 vport
->ingress
.offloads
.modify_metadata
=
1915 mlx5_modify_header_alloc(esw
->dev
, MLX5_FLOW_NAMESPACE_ESW_INGRESS
,
1917 if (IS_ERR(vport
->ingress
.offloads
.modify_metadata
)) {
1918 err
= PTR_ERR(vport
->ingress
.offloads
.modify_metadata
);
1920 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1925 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
| MLX5_FLOW_CONTEXT_ACTION_ALLOW
;
1926 flow_act
.modify_hdr
= vport
->ingress
.offloads
.modify_metadata
;
1927 vport
->ingress
.offloads
.modify_metadata_rule
=
1928 mlx5_add_flow_rules(vport
->ingress
.acl
,
1929 NULL
, &flow_act
, NULL
, 0);
1930 if (IS_ERR(vport
->ingress
.offloads
.modify_metadata_rule
)) {
1931 err
= PTR_ERR(vport
->ingress
.offloads
.modify_metadata_rule
);
1933 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1935 mlx5_modify_header_dealloc(esw
->dev
, vport
->ingress
.offloads
.modify_metadata
);
1936 vport
->ingress
.offloads
.modify_metadata_rule
= NULL
;
1941 static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch
*esw
,
1942 struct mlx5_vport
*vport
)
1944 if (vport
->ingress
.offloads
.modify_metadata_rule
) {
1945 mlx5_del_flow_rules(vport
->ingress
.offloads
.modify_metadata_rule
);
1946 mlx5_modify_header_dealloc(esw
->dev
, vport
->ingress
.offloads
.modify_metadata
);
1948 vport
->ingress
.offloads
.modify_metadata_rule
= NULL
;
1952 static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch
*esw
,
1953 struct mlx5_vport
*vport
)
1955 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1956 struct mlx5_flow_group
*g
;
1957 void *match_criteria
;
1962 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
1966 if (esw_check_ingress_prio_tag_enabled(esw
, vport
)) {
1967 /* This group is to hold FTE to match untagged packets when prio_tag
1970 memset(flow_group_in
, 0, inlen
);
1972 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
,
1973 flow_group_in
, match_criteria
);
1974 MLX5_SET(create_flow_group_in
, flow_group_in
,
1975 match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1976 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.cvlan_tag
);
1977 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, flow_index
);
1978 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, flow_index
);
1980 g
= mlx5_create_flow_group(vport
->ingress
.acl
, flow_group_in
);
1983 esw_warn(esw
->dev
, "vport[%d] ingress create untagged flow group, err(%d)\n",
1987 vport
->ingress
.offloads
.metadata_prio_tag_grp
= g
;
1991 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
1992 /* This group holds an FTE with no matches for add metadata for
1993 * tagged packets, if prio-tag is enabled (as a fallthrough),
1994 * or all traffic in case prio-tag is disabled.
1996 memset(flow_group_in
, 0, inlen
);
1997 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, flow_index
);
1998 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, flow_index
);
2000 g
= mlx5_create_flow_group(vport
->ingress
.acl
, flow_group_in
);
2003 esw_warn(esw
->dev
, "vport[%d] ingress create drop flow group, err(%d)\n",
2007 vport
->ingress
.offloads
.metadata_allmatch_grp
= g
;
2010 kvfree(flow_group_in
);
2014 if (!IS_ERR_OR_NULL(vport
->ingress
.offloads
.metadata_prio_tag_grp
)) {
2015 mlx5_destroy_flow_group(vport
->ingress
.offloads
.metadata_prio_tag_grp
);
2016 vport
->ingress
.offloads
.metadata_prio_tag_grp
= NULL
;
2019 kvfree(flow_group_in
);
2023 static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport
*vport
)
2025 if (vport
->ingress
.offloads
.metadata_allmatch_grp
) {
2026 mlx5_destroy_flow_group(vport
->ingress
.offloads
.metadata_allmatch_grp
);
2027 vport
->ingress
.offloads
.metadata_allmatch_grp
= NULL
;
2030 if (vport
->ingress
.offloads
.metadata_prio_tag_grp
) {
2031 mlx5_destroy_flow_group(vport
->ingress
.offloads
.metadata_prio_tag_grp
);
2032 vport
->ingress
.offloads
.metadata_prio_tag_grp
= NULL
;
2036 static int esw_vport_ingress_config(struct mlx5_eswitch
*esw
,
2037 struct mlx5_vport
*vport
)
2042 if (!mlx5_eswitch_vport_match_metadata_enabled(esw
) &&
2043 !esw_check_ingress_prio_tag_enabled(esw
, vport
))
2046 esw_vport_cleanup_ingress_rules(esw
, vport
);
2048 if (mlx5_eswitch_vport_match_metadata_enabled(esw
))
2050 if (esw_check_ingress_prio_tag_enabled(esw
, vport
))
2053 err
= esw_vport_create_ingress_acl_table(esw
, vport
, num_ftes
);
2056 "failed to enable ingress acl (%d) on vport[%d]\n",
2061 err
= esw_vport_create_ingress_acl_group(esw
, vport
);
2066 "vport[%d] configure ingress rules\n", vport
->vport
);
2068 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
2069 err
= esw_vport_add_ingress_acl_modify_metadata(esw
, vport
);
2074 if (esw_check_ingress_prio_tag_enabled(esw
, vport
)) {
2075 err
= esw_vport_ingress_prio_tag_config(esw
, vport
);
2082 esw_vport_del_ingress_acl_modify_metadata(esw
, vport
);
2084 esw_vport_destroy_ingress_acl_group(vport
);
2086 esw_vport_destroy_ingress_acl_table(vport
);
2090 static int esw_vport_egress_config(struct mlx5_eswitch
*esw
,
2091 struct mlx5_vport
*vport
)
2095 if (!MLX5_CAP_GEN(esw
->dev
, prio_tag_required
))
2098 esw_vport_cleanup_egress_rules(esw
, vport
);
2100 err
= esw_vport_enable_egress_acl(esw
, vport
);
2104 /* For prio tag mode, there is only 1 FTEs:
2105 * 1) prio tag packets - pop the prio tag VLAN, allow
2106 * Unmatched traffic is allowed by default
2109 "vport[%d] configure prio tag egress rules\n", vport
->vport
);
2111 /* prio tag vlan rule - pop it so VF receives untagged packets */
2112 err
= mlx5_esw_create_vport_egress_acl_vlan(esw
, vport
, 0,
2113 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
|
2114 MLX5_FLOW_CONTEXT_ACTION_ALLOW
);
2116 esw_vport_disable_egress_acl(esw
, vport
);
2122 esw_check_vport_match_metadata_supported(const struct mlx5_eswitch
*esw
)
2124 if (!MLX5_CAP_ESW(esw
->dev
, esw_uplink_ingress_acl
))
2127 if (!(MLX5_CAP_ESW_FLOWTABLE(esw
->dev
, fdb_to_vport_reg_c_id
) &
2128 MLX5_FDB_TO_VPORT_REG_C_0
))
2131 if (!MLX5_CAP_ESW_FLOWTABLE(esw
->dev
, flow_source
))
2134 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
) ||
2135 mlx5_ecpf_vport_exists(esw
->dev
))
2142 esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch
*esw
)
2144 return mlx5_core_mp_enabled(esw
->dev
);
2147 static bool esw_use_vport_metadata(const struct mlx5_eswitch
*esw
)
2149 return esw_check_vport_match_metadata_mandatory(esw
) &&
2150 esw_check_vport_match_metadata_supported(esw
);
2154 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch
*esw
,
2155 struct mlx5_vport
*vport
)
2159 err
= esw_vport_ingress_config(esw
, vport
);
2163 if (mlx5_eswitch_is_vf_vport(esw
, vport
->vport
)) {
2164 err
= esw_vport_egress_config(esw
, vport
);
2166 esw_vport_cleanup_ingress_rules(esw
, vport
);
2167 esw_vport_del_ingress_acl_modify_metadata(esw
, vport
);
2168 esw_vport_destroy_ingress_acl_group(vport
);
2169 esw_vport_destroy_ingress_acl_table(vport
);
2176 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch
*esw
,
2177 struct mlx5_vport
*vport
)
2179 esw_vport_disable_egress_acl(esw
, vport
);
2180 esw_vport_cleanup_ingress_rules(esw
, vport
);
2181 esw_vport_del_ingress_acl_modify_metadata(esw
, vport
);
2182 esw_vport_destroy_ingress_acl_group(vport
);
2183 esw_vport_destroy_ingress_acl_table(vport
);
2186 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch
*esw
)
2188 struct mlx5_vport
*vport
;
2191 if (esw_use_vport_metadata(esw
))
2192 esw
->flags
|= MLX5_ESWITCH_VPORT_MATCH_METADATA
;
2194 vport
= mlx5_eswitch_get_vport(esw
, MLX5_VPORT_UPLINK
);
2195 err
= esw_vport_create_offloads_acl_tables(esw
, vport
);
2197 esw
->flags
&= ~MLX5_ESWITCH_VPORT_MATCH_METADATA
;
2201 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch
*esw
)
2203 struct mlx5_vport
*vport
;
2205 vport
= mlx5_eswitch_get_vport(esw
, MLX5_VPORT_UPLINK
);
2206 esw_vport_destroy_offloads_acl_tables(esw
, vport
);
2207 esw
->flags
&= ~MLX5_ESWITCH_VPORT_MATCH_METADATA
;
2210 static int esw_offloads_steering_init(struct mlx5_eswitch
*esw
)
2212 int num_vfs
= esw
->esw_funcs
.num_vfs
;
2216 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
))
2217 total_vports
= esw
->total_vports
;
2219 total_vports
= num_vfs
+ MLX5_SPECIAL_VPORTS(esw
->dev
);
2221 memset(&esw
->fdb_table
.offloads
, 0, sizeof(struct offloads_fdb
));
2223 err
= esw_create_uplink_offloads_acl_tables(esw
);
2227 err
= esw_create_offloads_table(esw
, total_vports
);
2229 goto create_offloads_err
;
2231 err
= esw_create_restore_table(esw
);
2233 goto create_restore_err
;
2235 err
= esw_create_offloads_fdb_tables(esw
, total_vports
);
2237 goto create_fdb_err
;
2239 err
= esw_create_vport_rx_group(esw
, total_vports
);
2243 mutex_init(&esw
->fdb_table
.offloads
.vports
.lock
);
2244 hash_init(esw
->fdb_table
.offloads
.vports
.table
);
2249 esw_destroy_offloads_fdb_tables(esw
);
2251 esw_destroy_restore_table(esw
);
2253 esw_destroy_offloads_table(esw
);
2254 create_offloads_err
:
2255 esw_destroy_uplink_offloads_acl_tables(esw
);
2260 static void esw_offloads_steering_cleanup(struct mlx5_eswitch
*esw
)
2262 mutex_destroy(&esw
->fdb_table
.offloads
.vports
.lock
);
2263 esw_destroy_vport_rx_group(esw
);
2264 esw_destroy_offloads_fdb_tables(esw
);
2265 esw_destroy_restore_table(esw
);
2266 esw_destroy_offloads_table(esw
);
2267 esw_destroy_uplink_offloads_acl_tables(esw
);
2271 esw_vfs_changed_event_handler(struct mlx5_eswitch
*esw
, const u32
*out
)
2273 bool host_pf_disabled
;
2276 new_num_vfs
= MLX5_GET(query_esw_functions_out
, out
,
2277 host_params_context
.host_num_of_vfs
);
2278 host_pf_disabled
= MLX5_GET(query_esw_functions_out
, out
,
2279 host_params_context
.host_pf_disabled
);
2281 if (new_num_vfs
== esw
->esw_funcs
.num_vfs
|| host_pf_disabled
)
2284 /* Number of VFs can only change from "0 to x" or "x to 0". */
2285 if (esw
->esw_funcs
.num_vfs
> 0) {
2286 mlx5_eswitch_unload_vf_vports(esw
, esw
->esw_funcs
.num_vfs
);
2290 err
= mlx5_eswitch_load_vf_vports(esw
, new_num_vfs
,
2291 MLX5_VPORT_UC_ADDR_CHANGE
);
2295 esw
->esw_funcs
.num_vfs
= new_num_vfs
;
2298 static void esw_functions_changed_event_handler(struct work_struct
*work
)
2300 struct mlx5_host_work
*host_work
;
2301 struct mlx5_eswitch
*esw
;
2304 host_work
= container_of(work
, struct mlx5_host_work
, work
);
2305 esw
= host_work
->esw
;
2307 out
= mlx5_esw_query_functions(esw
->dev
);
2311 esw_vfs_changed_event_handler(esw
, out
);
2317 int mlx5_esw_funcs_changed_handler(struct notifier_block
*nb
, unsigned long type
, void *data
)
2319 struct mlx5_esw_functions
*esw_funcs
;
2320 struct mlx5_host_work
*host_work
;
2321 struct mlx5_eswitch
*esw
;
2323 host_work
= kzalloc(sizeof(*host_work
), GFP_ATOMIC
);
2327 esw_funcs
= mlx5_nb_cof(nb
, struct mlx5_esw_functions
, nb
);
2328 esw
= container_of(esw_funcs
, struct mlx5_eswitch
, esw_funcs
);
2330 host_work
->esw
= esw
;
2332 INIT_WORK(&host_work
->work
, esw_functions_changed_event_handler
);
2333 queue_work(esw
->work_queue
, &host_work
->work
);
2338 int esw_offloads_enable(struct mlx5_eswitch
*esw
)
2340 struct mlx5_vport
*vport
;
2343 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw
->dev
, reformat
) &&
2344 MLX5_CAP_ESW_FLOWTABLE_FDB(esw
->dev
, decap
))
2345 esw
->offloads
.encap
= DEVLINK_ESWITCH_ENCAP_MODE_BASIC
;
2347 esw
->offloads
.encap
= DEVLINK_ESWITCH_ENCAP_MODE_NONE
;
2349 mutex_init(&esw
->offloads
.termtbl_mutex
);
2350 mlx5_rdma_enable_roce(esw
->dev
);
2352 err
= esw_set_passing_vport_metadata(esw
, true);
2354 goto err_vport_metadata
;
2356 err
= esw_offloads_steering_init(esw
);
2358 goto err_steering_init
;
2360 /* Representor will control the vport link state */
2361 mlx5_esw_for_each_vf_vport(esw
, i
, vport
, esw
->esw_funcs
.num_vfs
)
2362 vport
->info
.link_state
= MLX5_VPORT_ADMIN_STATE_DOWN
;
2364 /* Uplink vport rep must load first. */
2365 err
= esw_offloads_load_rep(esw
, MLX5_VPORT_UPLINK
);
2369 err
= mlx5_eswitch_enable_pf_vf_vports(esw
, MLX5_VPORT_UC_ADDR_CHANGE
);
2373 esw_offloads_devcom_init(esw
);
2378 esw_offloads_unload_rep(esw
, MLX5_VPORT_UPLINK
);
2380 esw_set_passing_vport_metadata(esw
, false);
2382 esw_offloads_steering_cleanup(esw
);
2384 mlx5_rdma_disable_roce(esw
->dev
);
2385 mutex_destroy(&esw
->offloads
.termtbl_mutex
);
2389 static int esw_offloads_stop(struct mlx5_eswitch
*esw
,
2390 struct netlink_ext_ack
*extack
)
2394 mlx5_eswitch_disable_locked(esw
, false);
2395 err
= mlx5_eswitch_enable_locked(esw
, MLX5_ESWITCH_LEGACY
,
2396 MLX5_ESWITCH_IGNORE_NUM_VFS
);
2398 NL_SET_ERR_MSG_MOD(extack
, "Failed setting eswitch to legacy");
2399 err1
= mlx5_eswitch_enable_locked(esw
, MLX5_ESWITCH_OFFLOADS
,
2400 MLX5_ESWITCH_IGNORE_NUM_VFS
);
2402 NL_SET_ERR_MSG_MOD(extack
,
2403 "Failed setting eswitch back to offloads");
2410 void esw_offloads_disable(struct mlx5_eswitch
*esw
)
2412 esw_offloads_devcom_cleanup(esw
);
2413 mlx5_eswitch_disable_pf_vf_vports(esw
);
2414 esw_offloads_unload_rep(esw
, MLX5_VPORT_UPLINK
);
2415 esw_set_passing_vport_metadata(esw
, false);
2416 esw_offloads_steering_cleanup(esw
);
2417 mlx5_rdma_disable_roce(esw
->dev
);
2418 mutex_destroy(&esw
->offloads
.termtbl_mutex
);
2419 esw
->offloads
.encap
= DEVLINK_ESWITCH_ENCAP_MODE_NONE
;
2422 static int esw_mode_from_devlink(u16 mode
, u16
*mlx5_mode
)
2425 case DEVLINK_ESWITCH_MODE_LEGACY
:
2426 *mlx5_mode
= MLX5_ESWITCH_LEGACY
;
2428 case DEVLINK_ESWITCH_MODE_SWITCHDEV
:
2429 *mlx5_mode
= MLX5_ESWITCH_OFFLOADS
;
2438 static int esw_mode_to_devlink(u16 mlx5_mode
, u16
*mode
)
2440 switch (mlx5_mode
) {
2441 case MLX5_ESWITCH_LEGACY
:
2442 *mode
= DEVLINK_ESWITCH_MODE_LEGACY
;
2444 case MLX5_ESWITCH_OFFLOADS
:
2445 *mode
= DEVLINK_ESWITCH_MODE_SWITCHDEV
;
2454 static int esw_inline_mode_from_devlink(u8 mode
, u8
*mlx5_mode
)
2457 case DEVLINK_ESWITCH_INLINE_MODE_NONE
:
2458 *mlx5_mode
= MLX5_INLINE_MODE_NONE
;
2460 case DEVLINK_ESWITCH_INLINE_MODE_LINK
:
2461 *mlx5_mode
= MLX5_INLINE_MODE_L2
;
2463 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK
:
2464 *mlx5_mode
= MLX5_INLINE_MODE_IP
;
2466 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT
:
2467 *mlx5_mode
= MLX5_INLINE_MODE_TCP_UDP
;
2476 static int esw_inline_mode_to_devlink(u8 mlx5_mode
, u8
*mode
)
2478 switch (mlx5_mode
) {
2479 case MLX5_INLINE_MODE_NONE
:
2480 *mode
= DEVLINK_ESWITCH_INLINE_MODE_NONE
;
2482 case MLX5_INLINE_MODE_L2
:
2483 *mode
= DEVLINK_ESWITCH_INLINE_MODE_LINK
;
2485 case MLX5_INLINE_MODE_IP
:
2486 *mode
= DEVLINK_ESWITCH_INLINE_MODE_NETWORK
;
2488 case MLX5_INLINE_MODE_TCP_UDP
:
2489 *mode
= DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT
;
2498 static int mlx5_eswitch_check(const struct mlx5_core_dev
*dev
)
2500 if (MLX5_CAP_GEN(dev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
2503 if(!MLX5_ESWITCH_MANAGER(dev
))
2509 static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch
*esw
)
2511 /* devlink commands in NONE eswitch mode are currently supported only
2514 return (esw
->mode
== MLX5_ESWITCH_NONE
&&
2515 !mlx5_core_is_ecpf_esw_manager(esw
->dev
)) ? -EOPNOTSUPP
: 0;
2518 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
,
2519 struct netlink_ext_ack
*extack
)
2521 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2522 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2523 u16 cur_mlx5_mode
, mlx5_mode
= 0;
2526 err
= mlx5_eswitch_check(dev
);
2530 if (esw_mode_from_devlink(mode
, &mlx5_mode
))
2533 mutex_lock(&esw
->mode_lock
);
2534 err
= eswitch_devlink_esw_mode_check(esw
);
2538 cur_mlx5_mode
= esw
->mode
;
2540 if (cur_mlx5_mode
== mlx5_mode
)
2543 if (mode
== DEVLINK_ESWITCH_MODE_SWITCHDEV
)
2544 err
= esw_offloads_start(esw
, extack
);
2545 else if (mode
== DEVLINK_ESWITCH_MODE_LEGACY
)
2546 err
= esw_offloads_stop(esw
, extack
);
2551 mutex_unlock(&esw
->mode_lock
);
2555 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
)
2557 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2558 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2561 err
= mlx5_eswitch_check(dev
);
2565 mutex_lock(&esw
->mode_lock
);
2566 err
= eswitch_devlink_esw_mode_check(dev
->priv
.eswitch
);
2570 err
= esw_mode_to_devlink(esw
->mode
, mode
);
2572 mutex_unlock(&esw
->mode_lock
);
2576 int mlx5_devlink_eswitch_inline_mode_set(struct devlink
*devlink
, u8 mode
,
2577 struct netlink_ext_ack
*extack
)
2579 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2580 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2581 int err
, vport
, num_vport
;
2584 err
= mlx5_eswitch_check(dev
);
2588 mutex_lock(&esw
->mode_lock
);
2589 err
= eswitch_devlink_esw_mode_check(esw
);
2593 switch (MLX5_CAP_ETH(dev
, wqe_inline_mode
)) {
2594 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED
:
2595 if (mode
== DEVLINK_ESWITCH_INLINE_MODE_NONE
)
2598 case MLX5_CAP_INLINE_MODE_L2
:
2599 NL_SET_ERR_MSG_MOD(extack
, "Inline mode can't be set");
2602 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
:
2606 if (atomic64_read(&esw
->offloads
.num_flows
) > 0) {
2607 NL_SET_ERR_MSG_MOD(extack
,
2608 "Can't set inline mode when flows are configured");
2613 err
= esw_inline_mode_from_devlink(mode
, &mlx5_mode
);
2617 mlx5_esw_for_each_host_func_vport(esw
, vport
, esw
->esw_funcs
.num_vfs
) {
2618 err
= mlx5_modify_nic_vport_min_inline(dev
, vport
, mlx5_mode
);
2620 NL_SET_ERR_MSG_MOD(extack
,
2621 "Failed to set min inline on vport");
2622 goto revert_inline_mode
;
2626 esw
->offloads
.inline_mode
= mlx5_mode
;
2627 mutex_unlock(&esw
->mode_lock
);
2631 num_vport
= --vport
;
2632 mlx5_esw_for_each_host_func_vport_reverse(esw
, vport
, num_vport
)
2633 mlx5_modify_nic_vport_min_inline(dev
,
2635 esw
->offloads
.inline_mode
);
2637 mutex_unlock(&esw
->mode_lock
);
2641 int mlx5_devlink_eswitch_inline_mode_get(struct devlink
*devlink
, u8
*mode
)
2643 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2644 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2647 err
= mlx5_eswitch_check(dev
);
2651 mutex_lock(&esw
->mode_lock
);
2652 err
= eswitch_devlink_esw_mode_check(esw
);
2656 err
= esw_inline_mode_to_devlink(esw
->offloads
.inline_mode
, mode
);
2658 mutex_unlock(&esw
->mode_lock
);
2662 int mlx5_devlink_eswitch_encap_mode_set(struct devlink
*devlink
,
2663 enum devlink_eswitch_encap_mode encap
,
2664 struct netlink_ext_ack
*extack
)
2666 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2667 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2670 err
= mlx5_eswitch_check(dev
);
2674 mutex_lock(&esw
->mode_lock
);
2675 err
= eswitch_devlink_esw_mode_check(esw
);
2679 if (encap
!= DEVLINK_ESWITCH_ENCAP_MODE_NONE
&&
2680 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, reformat
) ||
2681 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, decap
))) {
2686 if (encap
&& encap
!= DEVLINK_ESWITCH_ENCAP_MODE_BASIC
) {
2691 if (esw
->mode
== MLX5_ESWITCH_LEGACY
) {
2692 esw
->offloads
.encap
= encap
;
2696 if (esw
->offloads
.encap
== encap
)
2699 if (atomic64_read(&esw
->offloads
.num_flows
) > 0) {
2700 NL_SET_ERR_MSG_MOD(extack
,
2701 "Can't set encapsulation when flows are configured");
2706 esw_destroy_offloads_fdb_tables(esw
);
2708 esw
->offloads
.encap
= encap
;
2710 err
= esw_create_offloads_fdb_tables(esw
, esw
->nvports
);
2713 NL_SET_ERR_MSG_MOD(extack
,
2714 "Failed re-creating fast FDB table");
2715 esw
->offloads
.encap
= !encap
;
2716 (void)esw_create_offloads_fdb_tables(esw
, esw
->nvports
);
2720 mutex_unlock(&esw
->mode_lock
);
2724 int mlx5_devlink_eswitch_encap_mode_get(struct devlink
*devlink
,
2725 enum devlink_eswitch_encap_mode
*encap
)
2727 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2728 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2731 err
= mlx5_eswitch_check(dev
);
2735 mutex_lock(&esw
->mode_lock
);
2736 err
= eswitch_devlink_esw_mode_check(esw
);
2740 *encap
= esw
->offloads
.encap
;
2742 mutex_unlock(&esw
->mode_lock
);
2747 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch
*esw
, u16 vport_num
)
2749 /* Currently, only ECPF based device has representor for host PF. */
2750 if (vport_num
== MLX5_VPORT_PF
&&
2751 !mlx5_core_is_ecpf_esw_manager(esw
->dev
))
2754 if (vport_num
== MLX5_VPORT_ECPF
&&
2755 !mlx5_ecpf_vport_exists(esw
->dev
))
2761 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch
*esw
,
2762 const struct mlx5_eswitch_rep_ops
*ops
,
2765 struct mlx5_eswitch_rep_data
*rep_data
;
2766 struct mlx5_eswitch_rep
*rep
;
2769 esw
->offloads
.rep_ops
[rep_type
] = ops
;
2770 mlx5_esw_for_all_reps(esw
, i
, rep
) {
2771 if (likely(mlx5_eswitch_vport_has_rep(esw
, i
))) {
2772 rep_data
= &rep
->rep_data
[rep_type
];
2773 atomic_set(&rep_data
->state
, REP_REGISTERED
);
2777 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps
);
2779 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch
*esw
, u8 rep_type
)
2781 struct mlx5_eswitch_rep
*rep
;
2784 if (esw
->mode
== MLX5_ESWITCH_OFFLOADS
)
2785 __unload_reps_all_vport(esw
, rep_type
);
2787 mlx5_esw_for_all_reps(esw
, i
, rep
)
2788 atomic_set(&rep
->rep_data
[rep_type
].state
, REP_UNREGISTERED
);
2790 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps
);
2792 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch
*esw
, u8 rep_type
)
2794 struct mlx5_eswitch_rep
*rep
;
2796 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_UPLINK
);
2797 return rep
->rep_data
[rep_type
].priv
;
2800 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch
*esw
,
2804 struct mlx5_eswitch_rep
*rep
;
2806 rep
= mlx5_eswitch_get_rep(esw
, vport
);
2808 if (atomic_read(&rep
->rep_data
[rep_type
].state
) == REP_LOADED
&&
2809 esw
->offloads
.rep_ops
[rep_type
]->get_proto_dev
)
2810 return esw
->offloads
.rep_ops
[rep_type
]->get_proto_dev(rep
);
2813 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev
);
2815 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch
*esw
, u8 rep_type
)
2817 return mlx5_eswitch_get_proto_dev(esw
, MLX5_VPORT_UPLINK
, rep_type
);
2819 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev
);
2821 struct mlx5_eswitch_rep
*mlx5_eswitch_vport_rep(struct mlx5_eswitch
*esw
,
2824 return mlx5_eswitch_get_rep(esw
, vport
);
2826 EXPORT_SYMBOL(mlx5_eswitch_vport_rep
);
2828 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch
*esw
, u16 vport_num
)
2830 return vport_num
>= MLX5_VPORT_FIRST_VF
&&
2831 vport_num
<= esw
->dev
->priv
.sriov
.max_vfs
;
2834 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch
*esw
)
2836 return !!(esw
->flags
& MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED
);
2838 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled
);
2840 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch
*esw
)
2842 return !!(esw
->flags
& MLX5_ESWITCH_VPORT_MATCH_METADATA
);
2844 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled
);
2846 u32
mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch
*esw
,
2849 u32 vport_num_mask
= GENMASK(ESW_VPORT_BITS
- 1, 0);
2850 u32 vhca_id_mask
= GENMASK(ESW_VHCA_ID_BITS
- 1, 0);
2851 u32 vhca_id
= MLX5_CAP_GEN(esw
->dev
, vhca_id
);
2854 /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
2855 WARN_ON_ONCE(vhca_id
>= BIT(ESW_VHCA_ID_BITS
));
2857 /* Trim vhca_id to ESW_VHCA_ID_BITS */
2858 vhca_id
&= vhca_id_mask
;
2860 /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
2861 * don't overlap with VF numbers, and themselves, after trimming.
2863 WARN_ON_ONCE((MLX5_VPORT_UPLINK
& vport_num_mask
) <
2864 vport_num_mask
- 1);
2865 WARN_ON_ONCE((MLX5_VPORT_ECPF
& vport_num_mask
) <
2866 vport_num_mask
- 1);
2867 WARN_ON_ONCE((MLX5_VPORT_UPLINK
& vport_num_mask
) ==
2868 (MLX5_VPORT_ECPF
& vport_num_mask
));
2870 /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
2871 * overlap with pf and ecpf.
2873 if (vport_num
!= MLX5_VPORT_UPLINK
&&
2874 vport_num
!= MLX5_VPORT_ECPF
)
2875 WARN_ON_ONCE(vport_num
>= vport_num_mask
- 1);
2877 /* We can now trim vport_num to ESW_VPORT_BITS */
2878 vport_num
&= vport_num_mask
;
2880 val
= (vhca_id
<< ESW_VPORT_BITS
) | vport_num
;
2881 return val
<< (32 - ESW_SOURCE_PORT_METADATA_BITS
);
2883 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match
);