2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <linux/refcount.h>
42 #include <linux/completion.h>
43 #include <net/tc_act/tc_mirred.h>
44 #include <net/tc_act/tc_vlan.h>
45 #include <net/tc_act/tc_tunnel_key.h>
46 #include <net/tc_act/tc_pedit.h>
47 #include <net/tc_act/tc_csum.h>
49 #include <net/ipv6_stubs.h>
54 #include "esw/chains.h"
57 #include "en/tc_tun.h"
58 #include "en/mapping.h"
60 #include "lib/devcom.h"
61 #include "lib/geneve.h"
62 #include "diag/en_tc_tracepoint.h"
64 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
66 struct mlx5_nic_flow_attr
{
69 struct mlx5_modify_hdr
*modify_hdr
;
72 struct mlx5_flow_table
*hairpin_ft
;
73 struct mlx5_fc
*counter
;
76 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
79 MLX5E_TC_FLOW_FLAG_INGRESS
= MLX5E_TC_FLAG_INGRESS_BIT
,
80 MLX5E_TC_FLOW_FLAG_EGRESS
= MLX5E_TC_FLAG_EGRESS_BIT
,
81 MLX5E_TC_FLOW_FLAG_ESWITCH
= MLX5E_TC_FLAG_ESW_OFFLOAD_BIT
,
82 MLX5E_TC_FLOW_FLAG_FT
= MLX5E_TC_FLAG_FT_OFFLOAD_BIT
,
83 MLX5E_TC_FLOW_FLAG_NIC
= MLX5E_TC_FLAG_NIC_OFFLOAD_BIT
,
84 MLX5E_TC_FLOW_FLAG_OFFLOADED
= MLX5E_TC_FLOW_BASE
,
85 MLX5E_TC_FLOW_FLAG_HAIRPIN
= MLX5E_TC_FLOW_BASE
+ 1,
86 MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS
= MLX5E_TC_FLOW_BASE
+ 2,
87 MLX5E_TC_FLOW_FLAG_SLOW
= MLX5E_TC_FLOW_BASE
+ 3,
88 MLX5E_TC_FLOW_FLAG_DUP
= MLX5E_TC_FLOW_BASE
+ 4,
89 MLX5E_TC_FLOW_FLAG_NOT_READY
= MLX5E_TC_FLOW_BASE
+ 5,
90 MLX5E_TC_FLOW_FLAG_DELETED
= MLX5E_TC_FLOW_BASE
+ 6,
91 MLX5E_TC_FLOW_FLAG_CT
= MLX5E_TC_FLOW_BASE
+ 7,
94 #define MLX5E_TC_MAX_SPLITS 1
96 /* Helper struct for accessing a struct containing list_head array.
100 * |- list_head item 0
103 * |- list_head item 1
105 * To access the containing struct from one of the list_head items:
106 * 1. Get the helper item from the list_head item using
108 * container_of(list_head item, helper struct type, list_head field)
109 * 2. Get the contining struct from the helper item and its index in the array:
110 * containing struct =
111 * container_of(helper item, containing struct type, helper field[index])
113 struct encap_flow_item
{
114 struct mlx5e_encap_entry
*e
; /* attached encap instance */
115 struct list_head list
;
119 struct mlx5e_tc_flow
{
120 struct rhash_head node
;
121 struct mlx5e_priv
*priv
;
124 struct mlx5_flow_handle
*rule
[MLX5E_TC_MAX_SPLITS
+ 1];
125 /* Flow can be associated with multiple encap IDs.
126 * The number of encaps is bounded by the number of supported
129 struct encap_flow_item encaps
[MLX5_MAX_FLOW_FWD_VPORTS
];
130 struct mlx5e_tc_flow
*peer_flow
;
131 struct mlx5e_mod_hdr_entry
*mh
; /* attached mod header instance */
132 struct list_head mod_hdr
; /* flows sharing the same mod hdr ID */
133 struct mlx5e_hairpin_entry
*hpe
; /* attached hairpin instance */
134 struct list_head hairpin
; /* flows sharing the same hairpin */
135 struct list_head peer
; /* flows with peer flow */
136 struct list_head unready
; /* flows not ready to be offloaded (e.g due to missing route) */
138 struct list_head tmp_list
; /* temporary flow list used by neigh update */
140 struct rcu_head rcu_head
;
141 struct completion init_done
;
142 int tunnel_id
; /* the mapped tunnel id of this flow */
145 struct mlx5_esw_flow_attr esw_attr
[0];
146 struct mlx5_nic_flow_attr nic_attr
[0];
150 struct mlx5e_tc_flow_parse_attr
{
151 const struct ip_tunnel_info
*tun_info
[MLX5_MAX_FLOW_FWD_VPORTS
];
152 struct net_device
*filter_dev
;
153 struct mlx5_flow_spec spec
;
154 struct mlx5e_tc_mod_hdr_acts mod_hdr_acts
;
155 int mirred_ifindex
[MLX5_MAX_FLOW_FWD_VPORTS
];
158 #define MLX5E_TC_TABLE_NUM_GROUPS 4
159 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
161 struct tunnel_match_key
{
162 struct flow_dissector_key_control enc_control
;
163 struct flow_dissector_key_keyid enc_key_id
;
164 struct flow_dissector_key_ports enc_tp
;
165 struct flow_dissector_key_ip enc_ip
;
167 struct flow_dissector_key_ipv4_addrs enc_ipv4
;
168 struct flow_dissector_key_ipv6_addrs enc_ipv6
;
174 /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
175 * Upper TUNNEL_INFO_BITS for general tunnel info.
176 * Lower ENC_OPTS_BITS bits for enc_opts.
178 #define TUNNEL_INFO_BITS 6
179 #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
180 #define ENC_OPTS_BITS 2
181 #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
182 #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
183 #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
185 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings
[] = {
187 .mfield
= MLX5_ACTION_IN_FIELD_METADATA_REG_C_0
,
192 .mfield
= MLX5_ACTION_IN_FIELD_METADATA_REG_C_1
,
195 .soffset
= MLX5_BYTE_OFF(fte_match_param
,
196 misc_parameters_2
.metadata_reg_c_1
),
198 [ZONE_TO_REG
] = zone_to_reg_ct
,
199 [CTSTATE_TO_REG
] = ctstate_to_reg_ct
,
200 [MARK_TO_REG
] = mark_to_reg_ct
,
201 [LABELS_TO_REG
] = labels_to_reg_ct
,
202 [FTEID_TO_REG
] = fteid_to_reg_ct
,
203 [TUPLEID_TO_REG
] = tupleid_to_reg_ct
,
206 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow
*flow
);
209 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec
*spec
,
210 enum mlx5e_tc_attr_to_reg type
,
214 int soffset
= mlx5e_tc_attr_to_reg_mappings
[type
].soffset
;
215 int match_len
= mlx5e_tc_attr_to_reg_mappings
[type
].mlen
;
216 void *headers_c
= spec
->match_criteria
;
217 void *headers_v
= spec
->match_value
;
220 fmask
= headers_c
+ soffset
;
221 fval
= headers_v
+ soffset
;
223 mask
= cpu_to_be32(mask
) >> (32 - (match_len
* 8));
224 data
= cpu_to_be32(data
) >> (32 - (match_len
* 8));
226 memcpy(fmask
, &mask
, match_len
);
227 memcpy(fval
, &data
, match_len
);
229 spec
->match_criteria_enable
|= MLX5_MATCH_MISC_PARAMETERS_2
;
233 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev
*mdev
,
234 struct mlx5e_tc_mod_hdr_acts
*mod_hdr_acts
,
235 enum mlx5e_tc_attr_to_reg type
,
238 int moffset
= mlx5e_tc_attr_to_reg_mappings
[type
].moffset
;
239 int mfield
= mlx5e_tc_attr_to_reg_mappings
[type
].mfield
;
240 int mlen
= mlx5e_tc_attr_to_reg_mappings
[type
].mlen
;
244 err
= alloc_mod_hdr_actions(mdev
, MLX5_FLOW_NAMESPACE_FDB
,
249 modact
= mod_hdr_acts
->actions
+
250 (mod_hdr_acts
->num_actions
* MLX5_MH_ACT_SZ
);
252 /* Firmware has 5bit length field and 0 means 32bits */
256 MLX5_SET(set_action_in
, modact
, action_type
, MLX5_ACTION_TYPE_SET
);
257 MLX5_SET(set_action_in
, modact
, field
, mfield
);
258 MLX5_SET(set_action_in
, modact
, offset
, moffset
* 8);
259 MLX5_SET(set_action_in
, modact
, length
, mlen
* 8);
260 MLX5_SET(set_action_in
, modact
, data
, data
);
261 mod_hdr_acts
->num_actions
++;
266 struct mlx5e_hairpin
{
267 struct mlx5_hairpin
*pair
;
269 struct mlx5_core_dev
*func_mdev
;
270 struct mlx5e_priv
*func_priv
;
275 struct mlx5e_rqt indir_rqt
;
276 u32 indir_tirn
[MLX5E_NUM_INDIR_TIRS
];
277 struct mlx5e_ttc_table ttc
;
280 struct mlx5e_hairpin_entry
{
281 /* a node of a hash table which keeps all the hairpin entries */
282 struct hlist_node hairpin_hlist
;
284 /* protects flows list */
285 spinlock_t flows_lock
;
286 /* flows sharing the same hairpin */
287 struct list_head flows
;
288 /* hpe's that were not fully initialized when dead peer update event
289 * function traversed them.
291 struct list_head dead_peer_wait_list
;
295 struct mlx5e_hairpin
*hp
;
297 struct completion res_ready
;
305 struct mlx5e_mod_hdr_entry
{
306 /* a node of a hash table which keeps all the mod_hdr entries */
307 struct hlist_node mod_hdr_hlist
;
309 /* protects flows list */
310 spinlock_t flows_lock
;
311 /* flows sharing the same mod_hdr entry */
312 struct list_head flows
;
314 struct mod_hdr_key key
;
316 struct mlx5_modify_hdr
*modify_hdr
;
319 struct completion res_ready
;
323 static void mlx5e_tc_del_flow(struct mlx5e_priv
*priv
,
324 struct mlx5e_tc_flow
*flow
);
326 static struct mlx5e_tc_flow
*mlx5e_flow_get(struct mlx5e_tc_flow
*flow
)
328 if (!flow
|| !refcount_inc_not_zero(&flow
->refcnt
))
329 return ERR_PTR(-EINVAL
);
333 static void mlx5e_flow_put(struct mlx5e_priv
*priv
,
334 struct mlx5e_tc_flow
*flow
)
336 if (refcount_dec_and_test(&flow
->refcnt
)) {
337 mlx5e_tc_del_flow(priv
, flow
);
338 kfree_rcu(flow
, rcu_head
);
342 static void __flow_flag_set(struct mlx5e_tc_flow
*flow
, unsigned long flag
)
344 /* Complete all memory stores before setting bit. */
345 smp_mb__before_atomic();
346 set_bit(flag
, &flow
->flags
);
349 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
351 static bool __flow_flag_test_and_set(struct mlx5e_tc_flow
*flow
,
354 /* test_and_set_bit() provides all necessary barriers */
355 return test_and_set_bit(flag
, &flow
->flags
);
358 #define flow_flag_test_and_set(flow, flag) \
359 __flow_flag_test_and_set(flow, \
360 MLX5E_TC_FLOW_FLAG_##flag)
362 static void __flow_flag_clear(struct mlx5e_tc_flow
*flow
, unsigned long flag
)
364 /* Complete all memory stores before clearing bit. */
365 smp_mb__before_atomic();
366 clear_bit(flag
, &flow
->flags
);
369 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
370 MLX5E_TC_FLOW_FLAG_##flag)
372 static bool __flow_flag_test(struct mlx5e_tc_flow
*flow
, unsigned long flag
)
374 bool ret
= test_bit(flag
, &flow
->flags
);
376 /* Read fields of flow structure only after checking flags. */
377 smp_mb__after_atomic();
381 #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
382 MLX5E_TC_FLOW_FLAG_##flag)
384 static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow
*flow
)
386 return flow_flag_test(flow
, ESWITCH
);
389 static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow
*flow
)
391 return flow_flag_test(flow
, FT
);
394 static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow
*flow
)
396 return flow_flag_test(flow
, OFFLOADED
);
399 static inline u32
hash_mod_hdr_info(struct mod_hdr_key
*key
)
401 return jhash(key
->actions
,
402 key
->num_actions
* MLX5_MH_ACT_SZ
, 0);
405 static inline int cmp_mod_hdr_info(struct mod_hdr_key
*a
,
406 struct mod_hdr_key
*b
)
408 if (a
->num_actions
!= b
->num_actions
)
411 return memcmp(a
->actions
, b
->actions
, a
->num_actions
* MLX5_MH_ACT_SZ
);
414 static struct mod_hdr_tbl
*
415 get_mod_hdr_table(struct mlx5e_priv
*priv
, int namespace)
417 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
419 return namespace == MLX5_FLOW_NAMESPACE_FDB
? &esw
->offloads
.mod_hdr
:
420 &priv
->fs
.tc
.mod_hdr
;
423 static struct mlx5e_mod_hdr_entry
*
424 mlx5e_mod_hdr_get(struct mod_hdr_tbl
*tbl
, struct mod_hdr_key
*key
, u32 hash_key
)
426 struct mlx5e_mod_hdr_entry
*mh
, *found
= NULL
;
428 hash_for_each_possible(tbl
->hlist
, mh
, mod_hdr_hlist
, hash_key
) {
429 if (!cmp_mod_hdr_info(&mh
->key
, key
)) {
430 refcount_inc(&mh
->refcnt
);
439 static void mlx5e_mod_hdr_put(struct mlx5e_priv
*priv
,
440 struct mlx5e_mod_hdr_entry
*mh
,
443 struct mod_hdr_tbl
*tbl
= get_mod_hdr_table(priv
, namespace);
445 if (!refcount_dec_and_mutex_lock(&mh
->refcnt
, &tbl
->lock
))
447 hash_del(&mh
->mod_hdr_hlist
);
448 mutex_unlock(&tbl
->lock
);
450 WARN_ON(!list_empty(&mh
->flows
));
451 if (mh
->compl_result
> 0)
452 mlx5_modify_header_dealloc(priv
->mdev
, mh
->modify_hdr
);
457 static int get_flow_name_space(struct mlx5e_tc_flow
*flow
)
459 return mlx5e_is_eswitch_flow(flow
) ?
460 MLX5_FLOW_NAMESPACE_FDB
: MLX5_FLOW_NAMESPACE_KERNEL
;
462 static int mlx5e_attach_mod_hdr(struct mlx5e_priv
*priv
,
463 struct mlx5e_tc_flow
*flow
,
464 struct mlx5e_tc_flow_parse_attr
*parse_attr
)
466 int num_actions
, actions_size
, namespace, err
;
467 struct mlx5e_mod_hdr_entry
*mh
;
468 struct mod_hdr_tbl
*tbl
;
469 struct mod_hdr_key key
;
472 num_actions
= parse_attr
->mod_hdr_acts
.num_actions
;
473 actions_size
= MLX5_MH_ACT_SZ
* num_actions
;
475 key
.actions
= parse_attr
->mod_hdr_acts
.actions
;
476 key
.num_actions
= num_actions
;
478 hash_key
= hash_mod_hdr_info(&key
);
480 namespace = get_flow_name_space(flow
);
481 tbl
= get_mod_hdr_table(priv
, namespace);
483 mutex_lock(&tbl
->lock
);
484 mh
= mlx5e_mod_hdr_get(tbl
, &key
, hash_key
);
486 mutex_unlock(&tbl
->lock
);
487 wait_for_completion(&mh
->res_ready
);
489 if (mh
->compl_result
< 0) {
491 goto attach_header_err
;
496 mh
= kzalloc(sizeof(*mh
) + actions_size
, GFP_KERNEL
);
498 mutex_unlock(&tbl
->lock
);
502 mh
->key
.actions
= (void *)mh
+ sizeof(*mh
);
503 memcpy(mh
->key
.actions
, key
.actions
, actions_size
);
504 mh
->key
.num_actions
= num_actions
;
505 spin_lock_init(&mh
->flows_lock
);
506 INIT_LIST_HEAD(&mh
->flows
);
507 refcount_set(&mh
->refcnt
, 1);
508 init_completion(&mh
->res_ready
);
510 hash_add(tbl
->hlist
, &mh
->mod_hdr_hlist
, hash_key
);
511 mutex_unlock(&tbl
->lock
);
513 mh
->modify_hdr
= mlx5_modify_header_alloc(priv
->mdev
, namespace,
516 if (IS_ERR(mh
->modify_hdr
)) {
517 err
= PTR_ERR(mh
->modify_hdr
);
518 mh
->compl_result
= err
;
519 goto alloc_header_err
;
521 mh
->compl_result
= 1;
522 complete_all(&mh
->res_ready
);
526 spin_lock(&mh
->flows_lock
);
527 list_add(&flow
->mod_hdr
, &mh
->flows
);
528 spin_unlock(&mh
->flows_lock
);
529 if (mlx5e_is_eswitch_flow(flow
))
530 flow
->esw_attr
->modify_hdr
= mh
->modify_hdr
;
532 flow
->nic_attr
->modify_hdr
= mh
->modify_hdr
;
537 complete_all(&mh
->res_ready
);
539 mlx5e_mod_hdr_put(priv
, mh
, namespace);
543 static void mlx5e_detach_mod_hdr(struct mlx5e_priv
*priv
,
544 struct mlx5e_tc_flow
*flow
)
546 /* flow wasn't fully initialized */
550 spin_lock(&flow
->mh
->flows_lock
);
551 list_del(&flow
->mod_hdr
);
552 spin_unlock(&flow
->mh
->flows_lock
);
554 mlx5e_mod_hdr_put(priv
, flow
->mh
, get_flow_name_space(flow
));
559 struct mlx5_core_dev
*mlx5e_hairpin_get_mdev(struct net
*net
, int ifindex
)
561 struct net_device
*netdev
;
562 struct mlx5e_priv
*priv
;
564 netdev
= __dev_get_by_index(net
, ifindex
);
565 priv
= netdev_priv(netdev
);
569 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin
*hp
)
571 u32 in
[MLX5_ST_SZ_DW(create_tir_in
)] = {0};
575 err
= mlx5_core_alloc_transport_domain(hp
->func_mdev
, &hp
->tdn
);
579 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
581 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_DIRECT
);
582 MLX5_SET(tirc
, tirc
, inline_rqn
, hp
->pair
->rqn
[0]);
583 MLX5_SET(tirc
, tirc
, transport_domain
, hp
->tdn
);
585 err
= mlx5_core_create_tir(hp
->func_mdev
, in
, MLX5_ST_SZ_BYTES(create_tir_in
), &hp
->tirn
);
592 mlx5_core_dealloc_transport_domain(hp
->func_mdev
, hp
->tdn
);
597 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin
*hp
)
599 mlx5_core_destroy_tir(hp
->func_mdev
, hp
->tirn
);
600 mlx5_core_dealloc_transport_domain(hp
->func_mdev
, hp
->tdn
);
603 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin
*hp
, void *rqtc
)
605 u32 indirection_rqt
[MLX5E_INDIR_RQT_SIZE
], rqn
;
606 struct mlx5e_priv
*priv
= hp
->func_priv
;
607 int i
, ix
, sz
= MLX5E_INDIR_RQT_SIZE
;
609 mlx5e_build_default_indir_rqt(indirection_rqt
, sz
,
612 for (i
= 0; i
< sz
; i
++) {
614 if (priv
->rss_params
.hfunc
== ETH_RSS_HASH_XOR
)
615 ix
= mlx5e_bits_invert(i
, ilog2(sz
));
616 ix
= indirection_rqt
[ix
];
617 rqn
= hp
->pair
->rqn
[ix
];
618 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], rqn
);
622 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin
*hp
)
624 int inlen
, err
, sz
= MLX5E_INDIR_RQT_SIZE
;
625 struct mlx5e_priv
*priv
= hp
->func_priv
;
626 struct mlx5_core_dev
*mdev
= priv
->mdev
;
630 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
631 in
= kvzalloc(inlen
, GFP_KERNEL
);
635 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
637 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
638 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
640 mlx5e_hairpin_fill_rqt_rqns(hp
, rqtc
);
642 err
= mlx5_core_create_rqt(mdev
, in
, inlen
, &hp
->indir_rqt
.rqtn
);
644 hp
->indir_rqt
.enabled
= true;
650 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin
*hp
)
652 struct mlx5e_priv
*priv
= hp
->func_priv
;
653 u32 in
[MLX5_ST_SZ_DW(create_tir_in
)];
657 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
658 struct mlx5e_tirc_config ttconfig
= mlx5e_tirc_get_default_config(tt
);
660 memset(in
, 0, MLX5_ST_SZ_BYTES(create_tir_in
));
661 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
663 MLX5_SET(tirc
, tirc
, transport_domain
, hp
->tdn
);
664 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
665 MLX5_SET(tirc
, tirc
, indirect_table
, hp
->indir_rqt
.rqtn
);
666 mlx5e_build_indir_tir_ctx_hash(&priv
->rss_params
, &ttconfig
, tirc
, false);
668 err
= mlx5_core_create_tir(hp
->func_mdev
, in
,
669 MLX5_ST_SZ_BYTES(create_tir_in
), &hp
->indir_tirn
[tt
]);
671 mlx5_core_warn(hp
->func_mdev
, "create indirect tirs failed, %d\n", err
);
672 goto err_destroy_tirs
;
678 for (i
= 0; i
< tt
; i
++)
679 mlx5_core_destroy_tir(hp
->func_mdev
, hp
->indir_tirn
[i
]);
683 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin
*hp
)
687 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++)
688 mlx5_core_destroy_tir(hp
->func_mdev
, hp
->indir_tirn
[tt
]);
691 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin
*hp
,
692 struct ttc_params
*ttc_params
)
694 struct mlx5_flow_table_attr
*ft_attr
= &ttc_params
->ft_attr
;
697 memset(ttc_params
, 0, sizeof(*ttc_params
));
699 ttc_params
->any_tt_tirn
= hp
->tirn
;
701 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++)
702 ttc_params
->indir_tirn
[tt
] = hp
->indir_tirn
[tt
];
704 ft_attr
->max_fte
= MLX5E_TTC_TABLE_SIZE
;
705 ft_attr
->level
= MLX5E_TC_TTC_FT_LEVEL
;
706 ft_attr
->prio
= MLX5E_TC_PRIO
;
709 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin
*hp
)
711 struct mlx5e_priv
*priv
= hp
->func_priv
;
712 struct ttc_params ttc_params
;
715 err
= mlx5e_hairpin_create_indirect_rqt(hp
);
719 err
= mlx5e_hairpin_create_indirect_tirs(hp
);
721 goto err_create_indirect_tirs
;
723 mlx5e_hairpin_set_ttc_params(hp
, &ttc_params
);
724 err
= mlx5e_create_ttc_table(priv
, &ttc_params
, &hp
->ttc
);
726 goto err_create_ttc_table
;
728 netdev_dbg(priv
->netdev
, "add hairpin: using %d channels rss ttc table id %x\n",
729 hp
->num_channels
, hp
->ttc
.ft
.t
->id
);
733 err_create_ttc_table
:
734 mlx5e_hairpin_destroy_indirect_tirs(hp
);
735 err_create_indirect_tirs
:
736 mlx5e_destroy_rqt(priv
, &hp
->indir_rqt
);
741 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin
*hp
)
743 struct mlx5e_priv
*priv
= hp
->func_priv
;
745 mlx5e_destroy_ttc_table(priv
, &hp
->ttc
);
746 mlx5e_hairpin_destroy_indirect_tirs(hp
);
747 mlx5e_destroy_rqt(priv
, &hp
->indir_rqt
);
750 static struct mlx5e_hairpin
*
751 mlx5e_hairpin_create(struct mlx5e_priv
*priv
, struct mlx5_hairpin_params
*params
,
754 struct mlx5_core_dev
*func_mdev
, *peer_mdev
;
755 struct mlx5e_hairpin
*hp
;
756 struct mlx5_hairpin
*pair
;
759 hp
= kzalloc(sizeof(*hp
), GFP_KERNEL
);
761 return ERR_PTR(-ENOMEM
);
763 func_mdev
= priv
->mdev
;
764 peer_mdev
= mlx5e_hairpin_get_mdev(dev_net(priv
->netdev
), peer_ifindex
);
766 pair
= mlx5_core_hairpin_create(func_mdev
, peer_mdev
, params
);
769 goto create_pair_err
;
772 hp
->func_mdev
= func_mdev
;
773 hp
->func_priv
= priv
;
774 hp
->num_channels
= params
->num_channels
;
776 err
= mlx5e_hairpin_create_transport(hp
);
778 goto create_transport_err
;
780 if (hp
->num_channels
> 1) {
781 err
= mlx5e_hairpin_rss_init(hp
);
789 mlx5e_hairpin_destroy_transport(hp
);
790 create_transport_err
:
791 mlx5_core_hairpin_destroy(hp
->pair
);
797 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin
*hp
)
799 if (hp
->num_channels
> 1)
800 mlx5e_hairpin_rss_cleanup(hp
);
801 mlx5e_hairpin_destroy_transport(hp
);
802 mlx5_core_hairpin_destroy(hp
->pair
);
806 static inline u32
hash_hairpin_info(u16 peer_vhca_id
, u8 prio
)
808 return (peer_vhca_id
<< 16 | prio
);
811 static struct mlx5e_hairpin_entry
*mlx5e_hairpin_get(struct mlx5e_priv
*priv
,
812 u16 peer_vhca_id
, u8 prio
)
814 struct mlx5e_hairpin_entry
*hpe
;
815 u32 hash_key
= hash_hairpin_info(peer_vhca_id
, prio
);
817 hash_for_each_possible(priv
->fs
.tc
.hairpin_tbl
, hpe
,
818 hairpin_hlist
, hash_key
) {
819 if (hpe
->peer_vhca_id
== peer_vhca_id
&& hpe
->prio
== prio
) {
820 refcount_inc(&hpe
->refcnt
);
828 static void mlx5e_hairpin_put(struct mlx5e_priv
*priv
,
829 struct mlx5e_hairpin_entry
*hpe
)
831 /* no more hairpin flows for us, release the hairpin pair */
832 if (!refcount_dec_and_mutex_lock(&hpe
->refcnt
, &priv
->fs
.tc
.hairpin_tbl_lock
))
834 hash_del(&hpe
->hairpin_hlist
);
835 mutex_unlock(&priv
->fs
.tc
.hairpin_tbl_lock
);
837 if (!IS_ERR_OR_NULL(hpe
->hp
)) {
838 netdev_dbg(priv
->netdev
, "del hairpin: peer %s\n",
839 dev_name(hpe
->hp
->pair
->peer_mdev
->device
));
841 mlx5e_hairpin_destroy(hpe
->hp
);
844 WARN_ON(!list_empty(&hpe
->flows
));
848 #define UNKNOWN_MATCH_PRIO 8
850 static int mlx5e_hairpin_get_prio(struct mlx5e_priv
*priv
,
851 struct mlx5_flow_spec
*spec
, u8
*match_prio
,
852 struct netlink_ext_ack
*extack
)
854 void *headers_c
, *headers_v
;
855 u8 prio_val
, prio_mask
= 0;
858 #ifdef CONFIG_MLX5_CORE_EN_DCB
859 if (priv
->dcbx_dp
.trust_state
!= MLX5_QPTS_TRUST_PCP
) {
860 NL_SET_ERR_MSG_MOD(extack
,
861 "only PCP trust state supported for hairpin");
865 headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, outer_headers
);
866 headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, outer_headers
);
868 vlan_present
= MLX5_GET(fte_match_set_lyr_2_4
, headers_v
, cvlan_tag
);
870 prio_mask
= MLX5_GET(fte_match_set_lyr_2_4
, headers_c
, first_prio
);
871 prio_val
= MLX5_GET(fte_match_set_lyr_2_4
, headers_v
, first_prio
);
874 if (!vlan_present
|| !prio_mask
) {
875 prio_val
= UNKNOWN_MATCH_PRIO
;
876 } else if (prio_mask
!= 0x7) {
877 NL_SET_ERR_MSG_MOD(extack
,
878 "masked priority match not supported for hairpin");
882 *match_prio
= prio_val
;
886 static int mlx5e_hairpin_flow_add(struct mlx5e_priv
*priv
,
887 struct mlx5e_tc_flow
*flow
,
888 struct mlx5e_tc_flow_parse_attr
*parse_attr
,
889 struct netlink_ext_ack
*extack
)
891 int peer_ifindex
= parse_attr
->mirred_ifindex
[0];
892 struct mlx5_hairpin_params params
;
893 struct mlx5_core_dev
*peer_mdev
;
894 struct mlx5e_hairpin_entry
*hpe
;
895 struct mlx5e_hairpin
*hp
;
902 peer_mdev
= mlx5e_hairpin_get_mdev(dev_net(priv
->netdev
), peer_ifindex
);
903 if (!MLX5_CAP_GEN(priv
->mdev
, hairpin
) || !MLX5_CAP_GEN(peer_mdev
, hairpin
)) {
904 NL_SET_ERR_MSG_MOD(extack
, "hairpin is not supported");
908 peer_id
= MLX5_CAP_GEN(peer_mdev
, vhca_id
);
909 err
= mlx5e_hairpin_get_prio(priv
, &parse_attr
->spec
, &match_prio
,
914 mutex_lock(&priv
->fs
.tc
.hairpin_tbl_lock
);
915 hpe
= mlx5e_hairpin_get(priv
, peer_id
, match_prio
);
917 mutex_unlock(&priv
->fs
.tc
.hairpin_tbl_lock
);
918 wait_for_completion(&hpe
->res_ready
);
920 if (IS_ERR(hpe
->hp
)) {
927 hpe
= kzalloc(sizeof(*hpe
), GFP_KERNEL
);
929 mutex_unlock(&priv
->fs
.tc
.hairpin_tbl_lock
);
933 spin_lock_init(&hpe
->flows_lock
);
934 INIT_LIST_HEAD(&hpe
->flows
);
935 INIT_LIST_HEAD(&hpe
->dead_peer_wait_list
);
936 hpe
->peer_vhca_id
= peer_id
;
937 hpe
->prio
= match_prio
;
938 refcount_set(&hpe
->refcnt
, 1);
939 init_completion(&hpe
->res_ready
);
941 hash_add(priv
->fs
.tc
.hairpin_tbl
, &hpe
->hairpin_hlist
,
942 hash_hairpin_info(peer_id
, match_prio
));
943 mutex_unlock(&priv
->fs
.tc
.hairpin_tbl_lock
);
945 params
.log_data_size
= 15;
946 params
.log_data_size
= min_t(u8
, params
.log_data_size
,
947 MLX5_CAP_GEN(priv
->mdev
, log_max_hairpin_wq_data_sz
));
948 params
.log_data_size
= max_t(u8
, params
.log_data_size
,
949 MLX5_CAP_GEN(priv
->mdev
, log_min_hairpin_wq_data_sz
));
951 params
.log_num_packets
= params
.log_data_size
-
952 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv
->mdev
);
953 params
.log_num_packets
= min_t(u8
, params
.log_num_packets
,
954 MLX5_CAP_GEN(priv
->mdev
, log_max_hairpin_num_packets
));
956 params
.q_counter
= priv
->q_counter
;
957 /* set hairpin pair per each 50Gbs share of the link */
958 mlx5e_port_max_linkspeed(priv
->mdev
, &link_speed
);
959 link_speed
= max_t(u32
, link_speed
, 50000);
960 link_speed64
= link_speed
;
961 do_div(link_speed64
, 50000);
962 params
.num_channels
= link_speed64
;
964 hp
= mlx5e_hairpin_create(priv
, ¶ms
, peer_ifindex
);
966 complete_all(&hpe
->res_ready
);
972 netdev_dbg(priv
->netdev
, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
973 hp
->tirn
, hp
->pair
->rqn
[0],
974 dev_name(hp
->pair
->peer_mdev
->device
),
975 hp
->pair
->sqn
[0], match_prio
, params
.log_data_size
, params
.log_num_packets
);
978 if (hpe
->hp
->num_channels
> 1) {
979 flow_flag_set(flow
, HAIRPIN_RSS
);
980 flow
->nic_attr
->hairpin_ft
= hpe
->hp
->ttc
.ft
.t
;
982 flow
->nic_attr
->hairpin_tirn
= hpe
->hp
->tirn
;
986 spin_lock(&hpe
->flows_lock
);
987 list_add(&flow
->hairpin
, &hpe
->flows
);
988 spin_unlock(&hpe
->flows_lock
);
993 mlx5e_hairpin_put(priv
, hpe
);
997 static void mlx5e_hairpin_flow_del(struct mlx5e_priv
*priv
,
998 struct mlx5e_tc_flow
*flow
)
1000 /* flow wasn't fully initialized */
1004 spin_lock(&flow
->hpe
->flows_lock
);
1005 list_del(&flow
->hairpin
);
1006 spin_unlock(&flow
->hpe
->flows_lock
);
1008 mlx5e_hairpin_put(priv
, flow
->hpe
);
1013 mlx5e_tc_add_nic_flow(struct mlx5e_priv
*priv
,
1014 struct mlx5e_tc_flow_parse_attr
*parse_attr
,
1015 struct mlx5e_tc_flow
*flow
,
1016 struct netlink_ext_ack
*extack
)
1018 struct mlx5_flow_context
*flow_context
= &parse_attr
->spec
.flow_context
;
1019 struct mlx5_nic_flow_attr
*attr
= flow
->nic_attr
;
1020 struct mlx5_core_dev
*dev
= priv
->mdev
;
1021 struct mlx5_flow_destination dest
[2] = {};
1022 struct mlx5_flow_act flow_act
= {
1023 .action
= attr
->action
,
1024 .flags
= FLOW_ACT_NO_APPEND
,
1026 struct mlx5_fc
*counter
= NULL
;
1027 int err
, dest_ix
= 0;
1029 flow_context
->flags
|= FLOW_CONTEXT_HAS_TAG
;
1030 flow_context
->flow_tag
= attr
->flow_tag
;
1032 if (flow_flag_test(flow
, HAIRPIN
)) {
1033 err
= mlx5e_hairpin_flow_add(priv
, flow
, parse_attr
, extack
);
1037 if (flow_flag_test(flow
, HAIRPIN_RSS
)) {
1038 dest
[dest_ix
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
1039 dest
[dest_ix
].ft
= attr
->hairpin_ft
;
1041 dest
[dest_ix
].type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
1042 dest
[dest_ix
].tir_num
= attr
->hairpin_tirn
;
1045 } else if (attr
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) {
1046 dest
[dest_ix
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
1047 dest
[dest_ix
].ft
= priv
->fs
.vlan
.ft
.t
;
1051 if (attr
->action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
) {
1052 counter
= mlx5_fc_create(dev
, true);
1053 if (IS_ERR(counter
))
1054 return PTR_ERR(counter
);
1056 dest
[dest_ix
].type
= MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
1057 dest
[dest_ix
].counter_id
= mlx5_fc_id(counter
);
1059 attr
->counter
= counter
;
1062 if (attr
->action
& MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
) {
1063 err
= mlx5e_attach_mod_hdr(priv
, flow
, parse_attr
);
1064 flow_act
.modify_hdr
= attr
->modify_hdr
;
1065 dealloc_mod_hdr_actions(&parse_attr
->mod_hdr_acts
);
1070 mutex_lock(&priv
->fs
.tc
.t_lock
);
1071 if (IS_ERR_OR_NULL(priv
->fs
.tc
.t
)) {
1072 struct mlx5_flow_table_attr ft_attr
= {};
1073 int tc_grp_size
, tc_tbl_size
, tc_num_grps
;
1074 u32 max_flow_counter
;
1076 max_flow_counter
= (MLX5_CAP_GEN(dev
, max_flow_counter_31_16
) << 16) |
1077 MLX5_CAP_GEN(dev
, max_flow_counter_15_0
);
1079 tc_grp_size
= min_t(int, max_flow_counter
, MLX5E_TC_TABLE_MAX_GROUP_SIZE
);
1081 tc_tbl_size
= min_t(int, tc_grp_size
* MLX5E_TC_TABLE_NUM_GROUPS
,
1082 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev
, log_max_ft_size
)));
1083 tc_num_grps
= MLX5E_TC_TABLE_NUM_GROUPS
;
1085 ft_attr
.prio
= MLX5E_TC_PRIO
;
1086 ft_attr
.max_fte
= tc_tbl_size
;
1087 ft_attr
.level
= MLX5E_TC_FT_LEVEL
;
1088 ft_attr
.autogroup
.max_num_groups
= tc_num_grps
;
1090 mlx5_create_auto_grouped_flow_table(priv
->fs
.ns
,
1092 if (IS_ERR(priv
->fs
.tc
.t
)) {
1093 mutex_unlock(&priv
->fs
.tc
.t_lock
);
1094 NL_SET_ERR_MSG_MOD(extack
,
1095 "Failed to create tc offload table\n");
1096 netdev_err(priv
->netdev
,
1097 "Failed to create tc offload table\n");
1098 return PTR_ERR(priv
->fs
.tc
.t
);
1102 if (attr
->match_level
!= MLX5_MATCH_NONE
)
1103 parse_attr
->spec
.match_criteria_enable
|= MLX5_MATCH_OUTER_HEADERS
;
1105 flow
->rule
[0] = mlx5_add_flow_rules(priv
->fs
.tc
.t
, &parse_attr
->spec
,
1106 &flow_act
, dest
, dest_ix
);
1107 mutex_unlock(&priv
->fs
.tc
.t_lock
);
1109 return PTR_ERR_OR_ZERO(flow
->rule
[0]);
1112 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv
*priv
,
1113 struct mlx5e_tc_flow
*flow
)
1115 struct mlx5_nic_flow_attr
*attr
= flow
->nic_attr
;
1116 struct mlx5_fc
*counter
= NULL
;
1118 counter
= attr
->counter
;
1119 if (!IS_ERR_OR_NULL(flow
->rule
[0]))
1120 mlx5_del_flow_rules(flow
->rule
[0]);
1121 mlx5_fc_destroy(priv
->mdev
, counter
);
1123 mutex_lock(&priv
->fs
.tc
.t_lock
);
1124 if (!mlx5e_tc_num_filters(priv
, MLX5_TC_FLAG(NIC_OFFLOAD
)) && priv
->fs
.tc
.t
) {
1125 mlx5_destroy_flow_table(priv
->fs
.tc
.t
);
1126 priv
->fs
.tc
.t
= NULL
;
1128 mutex_unlock(&priv
->fs
.tc
.t_lock
);
1130 if (attr
->action
& MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
)
1131 mlx5e_detach_mod_hdr(priv
, flow
);
1133 if (flow_flag_test(flow
, HAIRPIN
))
1134 mlx5e_hairpin_flow_del(priv
, flow
);
1137 static void mlx5e_detach_encap(struct mlx5e_priv
*priv
,
1138 struct mlx5e_tc_flow
*flow
, int out_index
);
1140 static int mlx5e_attach_encap(struct mlx5e_priv
*priv
,
1141 struct mlx5e_tc_flow
*flow
,
1142 struct net_device
*mirred_dev
,
1144 struct netlink_ext_ack
*extack
,
1145 struct net_device
**encap_dev
,
1148 static struct mlx5_flow_handle
*
1149 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch
*esw
,
1150 struct mlx5e_tc_flow
*flow
,
1151 struct mlx5_flow_spec
*spec
,
1152 struct mlx5_esw_flow_attr
*attr
)
1154 struct mlx5e_tc_mod_hdr_acts
*mod_hdr_acts
;
1155 struct mlx5_flow_handle
*rule
;
1157 if (flow_flag_test(flow
, CT
)) {
1158 mod_hdr_acts
= &attr
->parse_attr
->mod_hdr_acts
;
1160 return mlx5_tc_ct_flow_offload(flow
->priv
, flow
, spec
, attr
,
1164 rule
= mlx5_eswitch_add_offloaded_rule(esw
, spec
, attr
);
1168 if (attr
->split_count
) {
1169 flow
->rule
[1] = mlx5_eswitch_add_fwd_rule(esw
, spec
, attr
);
1170 if (IS_ERR(flow
->rule
[1])) {
1171 mlx5_eswitch_del_offloaded_rule(esw
, rule
, attr
);
1172 return flow
->rule
[1];
1180 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch
*esw
,
1181 struct mlx5e_tc_flow
*flow
,
1182 struct mlx5_esw_flow_attr
*attr
)
1184 flow_flag_clear(flow
, OFFLOADED
);
1186 if (flow_flag_test(flow
, CT
)) {
1187 mlx5_tc_ct_delete_flow(flow
->priv
, flow
, attr
);
1191 if (attr
->split_count
)
1192 mlx5_eswitch_del_fwd_rule(esw
, flow
->rule
[1], attr
);
1194 mlx5_eswitch_del_offloaded_rule(esw
, flow
->rule
[0], attr
);
1197 static struct mlx5_flow_handle
*
1198 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch
*esw
,
1199 struct mlx5e_tc_flow
*flow
,
1200 struct mlx5_flow_spec
*spec
)
1202 struct mlx5_esw_flow_attr slow_attr
;
1203 struct mlx5_flow_handle
*rule
;
1205 memcpy(&slow_attr
, flow
->esw_attr
, sizeof(slow_attr
));
1206 slow_attr
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1207 slow_attr
.split_count
= 0;
1208 slow_attr
.flags
|= MLX5_ESW_ATTR_FLAG_SLOW_PATH
;
1210 rule
= mlx5e_tc_offload_fdb_rules(esw
, flow
, spec
, &slow_attr
);
1212 flow_flag_set(flow
, SLOW
);
1218 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch
*esw
,
1219 struct mlx5e_tc_flow
*flow
)
1221 struct mlx5_esw_flow_attr slow_attr
;
1223 memcpy(&slow_attr
, flow
->esw_attr
, sizeof(slow_attr
));
1224 slow_attr
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1225 slow_attr
.split_count
= 0;
1226 slow_attr
.flags
|= MLX5_ESW_ATTR_FLAG_SLOW_PATH
;
1227 mlx5e_tc_unoffload_fdb_rules(esw
, flow
, &slow_attr
);
1228 flow_flag_clear(flow
, SLOW
);
1231 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1234 static void unready_flow_add(struct mlx5e_tc_flow
*flow
,
1235 struct list_head
*unready_flows
)
1237 flow_flag_set(flow
, NOT_READY
);
1238 list_add_tail(&flow
->unready
, unready_flows
);
1241 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1244 static void unready_flow_del(struct mlx5e_tc_flow
*flow
)
1246 list_del(&flow
->unready
);
1247 flow_flag_clear(flow
, NOT_READY
);
1250 static void add_unready_flow(struct mlx5e_tc_flow
*flow
)
1252 struct mlx5_rep_uplink_priv
*uplink_priv
;
1253 struct mlx5e_rep_priv
*rpriv
;
1254 struct mlx5_eswitch
*esw
;
1256 esw
= flow
->priv
->mdev
->priv
.eswitch
;
1257 rpriv
= mlx5_eswitch_get_uplink_priv(esw
, REP_ETH
);
1258 uplink_priv
= &rpriv
->uplink_priv
;
1260 mutex_lock(&uplink_priv
->unready_flows_lock
);
1261 unready_flow_add(flow
, &uplink_priv
->unready_flows
);
1262 mutex_unlock(&uplink_priv
->unready_flows_lock
);
1265 static void remove_unready_flow(struct mlx5e_tc_flow
*flow
)
1267 struct mlx5_rep_uplink_priv
*uplink_priv
;
1268 struct mlx5e_rep_priv
*rpriv
;
1269 struct mlx5_eswitch
*esw
;
1271 esw
= flow
->priv
->mdev
->priv
.eswitch
;
1272 rpriv
= mlx5_eswitch_get_uplink_priv(esw
, REP_ETH
);
1273 uplink_priv
= &rpriv
->uplink_priv
;
1275 mutex_lock(&uplink_priv
->unready_flows_lock
);
1276 unready_flow_del(flow
);
1277 mutex_unlock(&uplink_priv
->unready_flows_lock
);
1281 mlx5e_tc_add_fdb_flow(struct mlx5e_priv
*priv
,
1282 struct mlx5e_tc_flow
*flow
,
1283 struct netlink_ext_ack
*extack
)
1285 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
1286 struct mlx5_esw_flow_attr
*attr
= flow
->esw_attr
;
1287 struct mlx5e_tc_flow_parse_attr
*parse_attr
= attr
->parse_attr
;
1288 struct net_device
*out_dev
, *encap_dev
= NULL
;
1289 struct mlx5_fc
*counter
= NULL
;
1290 struct mlx5e_rep_priv
*rpriv
;
1291 struct mlx5e_priv
*out_priv
;
1292 bool encap_valid
= true;
1293 u32 max_prio
, max_chain
;
1297 if (!mlx5_esw_chains_prios_supported(esw
) && attr
->prio
!= 1) {
1298 NL_SET_ERR_MSG_MOD(extack
,
1299 "E-switch priorities unsupported, upgrade FW");
1303 /* We check chain range only for tc flows.
1304 * For ft flows, we checked attr->chain was originally 0 and set it to
1305 * FDB_FT_CHAIN which is outside tc range.
1306 * See mlx5e_rep_setup_ft_cb().
1308 max_chain
= mlx5_esw_chains_get_chain_range(esw
);
1309 if (!mlx5e_is_ft_flow(flow
) && attr
->chain
> max_chain
) {
1310 NL_SET_ERR_MSG_MOD(extack
,
1311 "Requested chain is out of supported range");
1315 max_prio
= mlx5_esw_chains_get_prio_range(esw
);
1316 if (attr
->prio
> max_prio
) {
1317 NL_SET_ERR_MSG_MOD(extack
,
1318 "Requested priority is out of supported range");
1322 for (out_index
= 0; out_index
< MLX5_MAX_FLOW_FWD_VPORTS
; out_index
++) {
1325 if (!(attr
->dests
[out_index
].flags
& MLX5_ESW_DEST_ENCAP
))
1328 mirred_ifindex
= parse_attr
->mirred_ifindex
[out_index
];
1329 out_dev
= __dev_get_by_index(dev_net(priv
->netdev
),
1331 err
= mlx5e_attach_encap(priv
, flow
, out_dev
, out_index
,
1332 extack
, &encap_dev
, &encap_valid
);
1336 out_priv
= netdev_priv(encap_dev
);
1337 rpriv
= out_priv
->ppriv
;
1338 attr
->dests
[out_index
].rep
= rpriv
->rep
;
1339 attr
->dests
[out_index
].mdev
= out_priv
->mdev
;
1342 err
= mlx5_eswitch_add_vlan_action(esw
, attr
);
1346 if (attr
->action
& MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
&&
1347 !(attr
->ct_attr
.ct_action
& TCA_CT_ACT_CLEAR
)) {
1348 err
= mlx5e_attach_mod_hdr(priv
, flow
, parse_attr
);
1349 dealloc_mod_hdr_actions(&parse_attr
->mod_hdr_acts
);
1354 if (attr
->action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
) {
1355 counter
= mlx5_fc_create(attr
->counter_dev
, true);
1356 if (IS_ERR(counter
))
1357 return PTR_ERR(counter
);
1359 attr
->counter
= counter
;
1362 /* we get here if one of the following takes place:
1363 * (1) there's no error
1364 * (2) there's an encap action and we don't have valid neigh
1367 flow
->rule
[0] = mlx5e_tc_offload_to_slow_path(esw
, flow
, &parse_attr
->spec
);
1369 flow
->rule
[0] = mlx5e_tc_offload_fdb_rules(esw
, flow
, &parse_attr
->spec
, attr
);
1371 if (IS_ERR(flow
->rule
[0]))
1372 return PTR_ERR(flow
->rule
[0]);
1374 flow_flag_set(flow
, OFFLOADED
);
1379 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow
*flow
)
1381 struct mlx5_flow_spec
*spec
= &flow
->esw_attr
->parse_attr
->spec
;
1382 void *headers_v
= MLX5_ADDR_OF(fte_match_param
,
1385 u32 geneve_tlv_opt_0_data
= MLX5_GET(fte_match_set_misc3
,
1387 geneve_tlv_option_0_data
);
1389 return !!geneve_tlv_opt_0_data
;
1392 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv
*priv
,
1393 struct mlx5e_tc_flow
*flow
)
1395 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
1396 struct mlx5_esw_flow_attr
*attr
= flow
->esw_attr
;
1399 mlx5e_put_flow_tunnel_id(flow
);
1401 if (flow_flag_test(flow
, NOT_READY
)) {
1402 remove_unready_flow(flow
);
1403 kvfree(attr
->parse_attr
);
1407 if (mlx5e_is_offloaded_flow(flow
)) {
1408 if (flow_flag_test(flow
, SLOW
))
1409 mlx5e_tc_unoffload_from_slow_path(esw
, flow
);
1411 mlx5e_tc_unoffload_fdb_rules(esw
, flow
, attr
);
1414 if (mlx5_flow_has_geneve_opt(flow
))
1415 mlx5_geneve_tlv_option_del(priv
->mdev
->geneve
);
1417 mlx5_eswitch_del_vlan_action(esw
, attr
);
1419 for (out_index
= 0; out_index
< MLX5_MAX_FLOW_FWD_VPORTS
; out_index
++)
1420 if (attr
->dests
[out_index
].flags
& MLX5_ESW_DEST_ENCAP
) {
1421 mlx5e_detach_encap(priv
, flow
, out_index
);
1422 kfree(attr
->parse_attr
->tun_info
[out_index
]);
1424 kvfree(attr
->parse_attr
);
1426 if (attr
->action
& MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
)
1427 mlx5e_detach_mod_hdr(priv
, flow
);
1429 if (attr
->action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
)
1430 mlx5_fc_destroy(attr
->counter_dev
, attr
->counter
);
1433 void mlx5e_tc_encap_flows_add(struct mlx5e_priv
*priv
,
1434 struct mlx5e_encap_entry
*e
,
1435 struct list_head
*flow_list
)
1437 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
1438 struct mlx5_esw_flow_attr
*esw_attr
;
1439 struct mlx5_flow_handle
*rule
;
1440 struct mlx5_flow_spec
*spec
;
1441 struct mlx5e_tc_flow
*flow
;
1444 e
->pkt_reformat
= mlx5_packet_reformat_alloc(priv
->mdev
,
1446 e
->encap_size
, e
->encap_header
,
1447 MLX5_FLOW_NAMESPACE_FDB
);
1448 if (IS_ERR(e
->pkt_reformat
)) {
1449 mlx5_core_warn(priv
->mdev
, "Failed to offload cached encapsulation header, %lu\n",
1450 PTR_ERR(e
->pkt_reformat
));
1453 e
->flags
|= MLX5_ENCAP_ENTRY_VALID
;
1454 mlx5e_rep_queue_neigh_stats_work(priv
);
1456 list_for_each_entry(flow
, flow_list
, tmp_list
) {
1457 bool all_flow_encaps_valid
= true;
1460 if (!mlx5e_is_offloaded_flow(flow
))
1462 esw_attr
= flow
->esw_attr
;
1463 spec
= &esw_attr
->parse_attr
->spec
;
1465 esw_attr
->dests
[flow
->tmp_efi_index
].pkt_reformat
= e
->pkt_reformat
;
1466 esw_attr
->dests
[flow
->tmp_efi_index
].flags
|= MLX5_ESW_DEST_ENCAP_VALID
;
1467 /* Flow can be associated with multiple encap entries.
1468 * Before offloading the flow verify that all of them have
1469 * a valid neighbour.
1471 for (i
= 0; i
< MLX5_MAX_FLOW_FWD_VPORTS
; i
++) {
1472 if (!(esw_attr
->dests
[i
].flags
& MLX5_ESW_DEST_ENCAP
))
1474 if (!(esw_attr
->dests
[i
].flags
& MLX5_ESW_DEST_ENCAP_VALID
)) {
1475 all_flow_encaps_valid
= false;
1479 /* Do not offload flows with unresolved neighbors */
1480 if (!all_flow_encaps_valid
)
1482 /* update from slow path rule to encap rule */
1483 rule
= mlx5e_tc_offload_fdb_rules(esw
, flow
, spec
, esw_attr
);
1485 err
= PTR_ERR(rule
);
1486 mlx5_core_warn(priv
->mdev
, "Failed to update cached encapsulation flow, %d\n",
1491 mlx5e_tc_unoffload_from_slow_path(esw
, flow
);
1492 flow
->rule
[0] = rule
;
1493 /* was unset when slow path rule removed */
1494 flow_flag_set(flow
, OFFLOADED
);
1498 void mlx5e_tc_encap_flows_del(struct mlx5e_priv
*priv
,
1499 struct mlx5e_encap_entry
*e
,
1500 struct list_head
*flow_list
)
1502 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
1503 struct mlx5_flow_handle
*rule
;
1504 struct mlx5_flow_spec
*spec
;
1505 struct mlx5e_tc_flow
*flow
;
1508 list_for_each_entry(flow
, flow_list
, tmp_list
) {
1509 if (!mlx5e_is_offloaded_flow(flow
))
1511 spec
= &flow
->esw_attr
->parse_attr
->spec
;
1513 /* update from encap rule to slow path rule */
1514 rule
= mlx5e_tc_offload_to_slow_path(esw
, flow
, spec
);
1515 /* mark the flow's encap dest as non-valid */
1516 flow
->esw_attr
->dests
[flow
->tmp_efi_index
].flags
&= ~MLX5_ESW_DEST_ENCAP_VALID
;
1519 err
= PTR_ERR(rule
);
1520 mlx5_core_warn(priv
->mdev
, "Failed to update slow path (encap) flow, %d\n",
1525 mlx5e_tc_unoffload_fdb_rules(esw
, flow
, flow
->esw_attr
);
1526 flow
->rule
[0] = rule
;
1527 /* was unset when fast path rule removed */
1528 flow_flag_set(flow
, OFFLOADED
);
1531 /* we know that the encap is valid */
1532 e
->flags
&= ~MLX5_ENCAP_ENTRY_VALID
;
1533 mlx5_packet_reformat_dealloc(priv
->mdev
, e
->pkt_reformat
);
1536 static struct mlx5_fc
*mlx5e_tc_get_counter(struct mlx5e_tc_flow
*flow
)
1538 if (mlx5e_is_eswitch_flow(flow
))
1539 return flow
->esw_attr
->counter
;
1541 return flow
->nic_attr
->counter
;
1544 /* Takes reference to all flows attached to encap and adds the flows to
1545 * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
1547 void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry
*e
, struct list_head
*flow_list
)
1549 struct encap_flow_item
*efi
;
1550 struct mlx5e_tc_flow
*flow
;
1552 list_for_each_entry(efi
, &e
->flows
, list
) {
1553 flow
= container_of(efi
, struct mlx5e_tc_flow
, encaps
[efi
->index
]);
1554 if (IS_ERR(mlx5e_flow_get(flow
)))
1556 wait_for_completion(&flow
->init_done
);
1558 flow
->tmp_efi_index
= efi
->index
;
1559 list_add(&flow
->tmp_list
, flow_list
);
1563 /* Iterate over tmp_list of flows attached to flow_list head. */
1564 void mlx5e_put_encap_flow_list(struct mlx5e_priv
*priv
, struct list_head
*flow_list
)
1566 struct mlx5e_tc_flow
*flow
, *tmp
;
1568 list_for_each_entry_safe(flow
, tmp
, flow_list
, tmp_list
)
1569 mlx5e_flow_put(priv
, flow
);
1572 static struct mlx5e_encap_entry
*
1573 mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry
*nhe
,
1574 struct mlx5e_encap_entry
*e
)
1576 struct mlx5e_encap_entry
*next
= NULL
;
1581 /* find encap with non-zero reference counter value */
1583 list_next_or_null_rcu(&nhe
->encap_list
,
1585 struct mlx5e_encap_entry
,
1587 list_first_or_null_rcu(&nhe
->encap_list
,
1588 struct mlx5e_encap_entry
,
1591 next
= list_next_or_null_rcu(&nhe
->encap_list
,
1593 struct mlx5e_encap_entry
,
1595 if (mlx5e_encap_take(next
))
1600 /* release starting encap */
1602 mlx5e_encap_put(netdev_priv(e
->out_dev
), e
);
1606 /* wait for encap to be fully initialized */
1607 wait_for_completion(&next
->res_ready
);
1608 /* continue searching if encap entry is not in valid state after completion */
1609 if (!(next
->flags
& MLX5_ENCAP_ENTRY_VALID
)) {
1617 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry
*nhe
)
1619 struct mlx5e_neigh
*m_neigh
= &nhe
->m_neigh
;
1620 struct mlx5e_encap_entry
*e
= NULL
;
1621 struct mlx5e_tc_flow
*flow
;
1622 struct mlx5_fc
*counter
;
1623 struct neigh_table
*tbl
;
1624 bool neigh_used
= false;
1625 struct neighbour
*n
;
1628 if (m_neigh
->family
== AF_INET
)
1630 #if IS_ENABLED(CONFIG_IPV6)
1631 else if (m_neigh
->family
== AF_INET6
)
1632 tbl
= ipv6_stub
->nd_tbl
;
1637 /* mlx5e_get_next_valid_encap() releases previous encap before returning
1640 while ((e
= mlx5e_get_next_valid_encap(nhe
, e
)) != NULL
) {
1641 struct mlx5e_priv
*priv
= netdev_priv(e
->out_dev
);
1642 struct encap_flow_item
*efi
, *tmp
;
1643 struct mlx5_eswitch
*esw
;
1644 LIST_HEAD(flow_list
);
1646 esw
= priv
->mdev
->priv
.eswitch
;
1647 mutex_lock(&esw
->offloads
.encap_tbl_lock
);
1648 list_for_each_entry_safe(efi
, tmp
, &e
->flows
, list
) {
1649 flow
= container_of(efi
, struct mlx5e_tc_flow
,
1650 encaps
[efi
->index
]);
1651 if (IS_ERR(mlx5e_flow_get(flow
)))
1653 list_add(&flow
->tmp_list
, &flow_list
);
1655 if (mlx5e_is_offloaded_flow(flow
)) {
1656 counter
= mlx5e_tc_get_counter(flow
);
1657 lastuse
= mlx5_fc_query_lastuse(counter
);
1658 if (time_after((unsigned long)lastuse
, nhe
->reported_lastuse
)) {
1664 mutex_unlock(&esw
->offloads
.encap_tbl_lock
);
1666 mlx5e_put_encap_flow_list(priv
, &flow_list
);
1668 /* release current encap before breaking the loop */
1669 mlx5e_encap_put(priv
, e
);
1674 trace_mlx5e_tc_update_neigh_used_value(nhe
, neigh_used
);
1677 nhe
->reported_lastuse
= jiffies
;
1679 /* find the relevant neigh according to the cached device and
1682 n
= neigh_lookup(tbl
, &m_neigh
->dst_ip
, m_neigh
->dev
);
1686 neigh_event_send(n
, NULL
);
1691 static void mlx5e_encap_dealloc(struct mlx5e_priv
*priv
, struct mlx5e_encap_entry
*e
)
1693 WARN_ON(!list_empty(&e
->flows
));
1695 if (e
->compl_result
> 0) {
1696 mlx5e_rep_encap_entry_detach(netdev_priv(e
->out_dev
), e
);
1698 if (e
->flags
& MLX5_ENCAP_ENTRY_VALID
)
1699 mlx5_packet_reformat_dealloc(priv
->mdev
, e
->pkt_reformat
);
1703 kfree(e
->encap_header
);
1707 void mlx5e_encap_put(struct mlx5e_priv
*priv
, struct mlx5e_encap_entry
*e
)
1709 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
1711 if (!refcount_dec_and_mutex_lock(&e
->refcnt
, &esw
->offloads
.encap_tbl_lock
))
1713 hash_del_rcu(&e
->encap_hlist
);
1714 mutex_unlock(&esw
->offloads
.encap_tbl_lock
);
1716 mlx5e_encap_dealloc(priv
, e
);
1719 static void mlx5e_detach_encap(struct mlx5e_priv
*priv
,
1720 struct mlx5e_tc_flow
*flow
, int out_index
)
1722 struct mlx5e_encap_entry
*e
= flow
->encaps
[out_index
].e
;
1723 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
1725 /* flow wasn't fully initialized */
1729 mutex_lock(&esw
->offloads
.encap_tbl_lock
);
1730 list_del(&flow
->encaps
[out_index
].list
);
1731 flow
->encaps
[out_index
].e
= NULL
;
1732 if (!refcount_dec_and_test(&e
->refcnt
)) {
1733 mutex_unlock(&esw
->offloads
.encap_tbl_lock
);
1736 hash_del_rcu(&e
->encap_hlist
);
1737 mutex_unlock(&esw
->offloads
.encap_tbl_lock
);
1739 mlx5e_encap_dealloc(priv
, e
);
1742 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow
*flow
)
1744 struct mlx5_eswitch
*esw
= flow
->priv
->mdev
->priv
.eswitch
;
1746 if (!flow_flag_test(flow
, ESWITCH
) ||
1747 !flow_flag_test(flow
, DUP
))
1750 mutex_lock(&esw
->offloads
.peer_mutex
);
1751 list_del(&flow
->peer
);
1752 mutex_unlock(&esw
->offloads
.peer_mutex
);
1754 flow_flag_clear(flow
, DUP
);
1756 if (refcount_dec_and_test(&flow
->peer_flow
->refcnt
)) {
1757 mlx5e_tc_del_fdb_flow(flow
->peer_flow
->priv
, flow
->peer_flow
);
1758 kfree(flow
->peer_flow
);
1761 flow
->peer_flow
= NULL
;
1764 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow
*flow
)
1766 struct mlx5_core_dev
*dev
= flow
->priv
->mdev
;
1767 struct mlx5_devcom
*devcom
= dev
->priv
.devcom
;
1768 struct mlx5_eswitch
*peer_esw
;
1770 peer_esw
= mlx5_devcom_get_peer_data(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
);
1774 __mlx5e_tc_del_fdb_peer_flow(flow
);
1775 mlx5_devcom_release_peer_data(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
);
1778 static void mlx5e_tc_del_flow(struct mlx5e_priv
*priv
,
1779 struct mlx5e_tc_flow
*flow
)
1781 if (mlx5e_is_eswitch_flow(flow
)) {
1782 mlx5e_tc_del_fdb_peer_flow(flow
);
1783 mlx5e_tc_del_fdb_flow(priv
, flow
);
1785 mlx5e_tc_del_nic_flow(priv
, flow
);
1789 static int flow_has_tc_fwd_action(struct flow_cls_offload
*f
)
1791 struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
1792 struct flow_action
*flow_action
= &rule
->action
;
1793 const struct flow_action_entry
*act
;
1796 flow_action_for_each(i
, act
, flow_action
) {
1798 case FLOW_ACTION_GOTO
:
1809 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv
*priv
,
1810 struct flow_dissector_key_enc_opts
*opts
,
1811 struct netlink_ext_ack
*extack
,
1814 struct geneve_opt
*opt
;
1819 while (opts
->len
> off
) {
1820 opt
= (struct geneve_opt
*)&opts
->data
[off
];
1822 if (!(*dont_care
) || opt
->opt_class
|| opt
->type
||
1823 memchr_inv(opt
->opt_data
, 0, opt
->length
* 4)) {
1826 if (opt
->opt_class
!= U16_MAX
||
1827 opt
->type
!= U8_MAX
||
1828 memchr_inv(opt
->opt_data
, 0xFF,
1830 NL_SET_ERR_MSG(extack
,
1831 "Partial match of tunnel options in chain > 0 isn't supported");
1832 netdev_warn(priv
->netdev
,
1833 "Partial match of tunnel options in chain > 0 isn't supported");
1838 off
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
1844 #define COPY_DISSECTOR(rule, diss_key, dst)\
1846 struct flow_rule *__rule = (rule);\
1847 typeof(dst) __dst = dst;\
1850 skb_flow_dissector_target(__rule->match.dissector,\
1852 __rule->match.key),\
1856 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv
*priv
,
1857 struct mlx5e_tc_flow
*flow
,
1858 struct flow_cls_offload
*f
,
1859 struct net_device
*filter_dev
)
1861 struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
1862 struct netlink_ext_ack
*extack
= f
->common
.extack
;
1863 struct mlx5_esw_flow_attr
*attr
= flow
->esw_attr
;
1864 struct mlx5e_tc_mod_hdr_acts
*mod_hdr_acts
;
1865 struct flow_match_enc_opts enc_opts_match
;
1866 struct mlx5_rep_uplink_priv
*uplink_priv
;
1867 struct mlx5e_rep_priv
*uplink_rpriv
;
1868 struct tunnel_match_key tunnel_key
;
1869 bool enc_opts_is_dont_care
= true;
1870 u32 tun_id
, enc_opts_id
= 0;
1871 struct mlx5_eswitch
*esw
;
1875 esw
= priv
->mdev
->priv
.eswitch
;
1876 uplink_rpriv
= mlx5_eswitch_get_uplink_priv(esw
, REP_ETH
);
1877 uplink_priv
= &uplink_rpriv
->uplink_priv
;
1879 memset(&tunnel_key
, 0, sizeof(tunnel_key
));
1880 COPY_DISSECTOR(rule
, FLOW_DISSECTOR_KEY_ENC_CONTROL
,
1881 &tunnel_key
.enc_control
);
1882 if (tunnel_key
.enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
)
1883 COPY_DISSECTOR(rule
, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
,
1884 &tunnel_key
.enc_ipv4
);
1886 COPY_DISSECTOR(rule
, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
,
1887 &tunnel_key
.enc_ipv6
);
1888 COPY_DISSECTOR(rule
, FLOW_DISSECTOR_KEY_ENC_IP
, &tunnel_key
.enc_ip
);
1889 COPY_DISSECTOR(rule
, FLOW_DISSECTOR_KEY_ENC_PORTS
,
1890 &tunnel_key
.enc_tp
);
1891 COPY_DISSECTOR(rule
, FLOW_DISSECTOR_KEY_ENC_KEYID
,
1892 &tunnel_key
.enc_key_id
);
1893 tunnel_key
.filter_ifindex
= filter_dev
->ifindex
;
1895 err
= mapping_add(uplink_priv
->tunnel_mapping
, &tunnel_key
, &tun_id
);
1899 flow_rule_match_enc_opts(rule
, &enc_opts_match
);
1900 err
= enc_opts_is_dont_care_or_full_match(priv
,
1901 enc_opts_match
.mask
,
1903 &enc_opts_is_dont_care
);
1907 if (!enc_opts_is_dont_care
) {
1908 err
= mapping_add(uplink_priv
->tunnel_enc_opts_mapping
,
1909 enc_opts_match
.key
, &enc_opts_id
);
1914 value
= tun_id
<< ENC_OPTS_BITS
| enc_opts_id
;
1915 mask
= enc_opts_id
? TUNNEL_ID_MASK
:
1916 (TUNNEL_ID_MASK
& ~ENC_OPTS_BITS_MASK
);
1919 mlx5e_tc_match_to_reg_match(&attr
->parse_attr
->spec
,
1920 TUNNEL_TO_REG
, value
, mask
);
1922 mod_hdr_acts
= &attr
->parse_attr
->mod_hdr_acts
;
1923 err
= mlx5e_tc_match_to_reg_set(priv
->mdev
,
1925 TUNNEL_TO_REG
, value
);
1929 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
;
1932 flow
->tunnel_id
= value
;
1937 mapping_remove(uplink_priv
->tunnel_enc_opts_mapping
,
1940 mapping_remove(uplink_priv
->tunnel_mapping
, tun_id
);
1944 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow
*flow
)
1946 u32 enc_opts_id
= flow
->tunnel_id
& ENC_OPTS_BITS_MASK
;
1947 u32 tun_id
= flow
->tunnel_id
>> ENC_OPTS_BITS
;
1948 struct mlx5_rep_uplink_priv
*uplink_priv
;
1949 struct mlx5e_rep_priv
*uplink_rpriv
;
1950 struct mlx5_eswitch
*esw
;
1952 esw
= flow
->priv
->mdev
->priv
.eswitch
;
1953 uplink_rpriv
= mlx5_eswitch_get_uplink_priv(esw
, REP_ETH
);
1954 uplink_priv
= &uplink_rpriv
->uplink_priv
;
1957 mapping_remove(uplink_priv
->tunnel_mapping
, tun_id
);
1959 mapping_remove(uplink_priv
->tunnel_enc_opts_mapping
,
1963 u32
mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow
*flow
)
1965 return flow
->tunnel_id
;
1968 static int parse_tunnel_attr(struct mlx5e_priv
*priv
,
1969 struct mlx5e_tc_flow
*flow
,
1970 struct mlx5_flow_spec
*spec
,
1971 struct flow_cls_offload
*f
,
1972 struct net_device
*filter_dev
,
1976 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
1977 struct netlink_ext_ack
*extack
= f
->common
.extack
;
1978 bool needs_mapping
, sets_mapping
;
1981 if (!mlx5e_is_eswitch_flow(flow
))
1984 needs_mapping
= !!flow
->esw_attr
->chain
;
1985 sets_mapping
= !flow
->esw_attr
->chain
&& flow_has_tc_fwd_action(f
);
1986 *match_inner
= !needs_mapping
;
1988 if ((needs_mapping
|| sets_mapping
) &&
1989 !mlx5_eswitch_reg_c1_loopback_enabled(esw
)) {
1990 NL_SET_ERR_MSG(extack
,
1991 "Chains on tunnel devices isn't supported without register loopback support");
1992 netdev_warn(priv
->netdev
,
1993 "Chains on tunnel devices isn't supported without register loopback support");
1997 if (!flow
->esw_attr
->chain
) {
1998 err
= mlx5e_tc_tun_parse(filter_dev
, priv
, spec
, f
,
2001 NL_SET_ERR_MSG_MOD(extack
,
2002 "Failed to parse tunnel attributes");
2003 netdev_warn(priv
->netdev
,
2004 "Failed to parse tunnel attributes");
2008 flow
->esw_attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_DECAP
;
2011 if (!needs_mapping
&& !sets_mapping
)
2014 return mlx5e_get_flow_tunnel_id(priv
, flow
, f
, filter_dev
);
2017 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec
*spec
)
2019 return MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
2023 static void *get_match_inner_headers_value(struct mlx5_flow_spec
*spec
)
2025 return MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
2029 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec
*spec
)
2031 return MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
2035 static void *get_match_outer_headers_value(struct mlx5_flow_spec
*spec
)
2037 return MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
2041 static void *get_match_headers_value(u32 flags
,
2042 struct mlx5_flow_spec
*spec
)
2044 return (flags
& MLX5_FLOW_CONTEXT_ACTION_DECAP
) ?
2045 get_match_inner_headers_value(spec
) :
2046 get_match_outer_headers_value(spec
);
2049 static void *get_match_headers_criteria(u32 flags
,
2050 struct mlx5_flow_spec
*spec
)
2052 return (flags
& MLX5_FLOW_CONTEXT_ACTION_DECAP
) ?
2053 get_match_inner_headers_criteria(spec
) :
2054 get_match_outer_headers_criteria(spec
);
2057 static int mlx5e_flower_parse_meta(struct net_device
*filter_dev
,
2058 struct flow_cls_offload
*f
)
2060 struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
2061 struct netlink_ext_ack
*extack
= f
->common
.extack
;
2062 struct net_device
*ingress_dev
;
2063 struct flow_match_meta match
;
2065 if (!flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_META
))
2068 flow_rule_match_meta(rule
, &match
);
2069 if (match
.mask
->ingress_ifindex
!= 0xFFFFFFFF) {
2070 NL_SET_ERR_MSG_MOD(extack
, "Unsupported ingress ifindex mask");
2074 ingress_dev
= __dev_get_by_index(dev_net(filter_dev
),
2075 match
.key
->ingress_ifindex
);
2077 NL_SET_ERR_MSG_MOD(extack
,
2078 "Can't find the ingress port to match on");
2082 if (ingress_dev
!= filter_dev
) {
2083 NL_SET_ERR_MSG_MOD(extack
,
2084 "Can't match on the ingress filter port");
2091 static int __parse_cls_flower(struct mlx5e_priv
*priv
,
2092 struct mlx5e_tc_flow
*flow
,
2093 struct mlx5_flow_spec
*spec
,
2094 struct flow_cls_offload
*f
,
2095 struct net_device
*filter_dev
,
2096 u8
*inner_match_level
, u8
*outer_match_level
)
2098 struct netlink_ext_ack
*extack
= f
->common
.extack
;
2099 void *headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
2101 void *headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
2103 void *misc_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
2105 void *misc_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
2107 struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
2108 struct flow_dissector
*dissector
= rule
->match
.dissector
;
2114 match_level
= outer_match_level
;
2116 if (dissector
->used_keys
&
2117 ~(BIT(FLOW_DISSECTOR_KEY_META
) |
2118 BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
2119 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
2120 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
2121 BIT(FLOW_DISSECTOR_KEY_VLAN
) |
2122 BIT(FLOW_DISSECTOR_KEY_CVLAN
) |
2123 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
2124 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
2125 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
2126 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID
) |
2127 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
) |
2128 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
) |
2129 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS
) |
2130 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL
) |
2131 BIT(FLOW_DISSECTOR_KEY_TCP
) |
2132 BIT(FLOW_DISSECTOR_KEY_IP
) |
2133 BIT(FLOW_DISSECTOR_KEY_CT
) |
2134 BIT(FLOW_DISSECTOR_KEY_ENC_IP
) |
2135 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS
))) {
2136 NL_SET_ERR_MSG_MOD(extack
, "Unsupported key");
2137 netdev_warn(priv
->netdev
, "Unsupported key used: 0x%x\n",
2138 dissector
->used_keys
);
2142 if (mlx5e_get_tc_tun(filter_dev
)) {
2143 bool match_inner
= false;
2145 err
= parse_tunnel_attr(priv
, flow
, spec
, f
, filter_dev
,
2146 outer_match_level
, &match_inner
);
2151 /* header pointers should point to the inner headers
2152 * if the packet was decapsulated already.
2153 * outer headers are set by parse_tunnel_attr.
2155 match_level
= inner_match_level
;
2156 headers_c
= get_match_inner_headers_criteria(spec
);
2157 headers_v
= get_match_inner_headers_value(spec
);
2161 err
= mlx5e_flower_parse_meta(filter_dev
, f
);
2165 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
2166 struct flow_match_basic match
;
2168 flow_rule_match_basic(rule
, &match
);
2169 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, ethertype
,
2170 ntohs(match
.mask
->n_proto
));
2171 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ethertype
,
2172 ntohs(match
.key
->n_proto
));
2174 if (match
.mask
->n_proto
)
2175 *match_level
= MLX5_MATCH_L2
;
2177 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_VLAN
) ||
2178 is_vlan_dev(filter_dev
)) {
2179 struct flow_dissector_key_vlan filter_dev_mask
;
2180 struct flow_dissector_key_vlan filter_dev_key
;
2181 struct flow_match_vlan match
;
2183 if (is_vlan_dev(filter_dev
)) {
2184 match
.key
= &filter_dev_key
;
2185 match
.key
->vlan_id
= vlan_dev_vlan_id(filter_dev
);
2186 match
.key
->vlan_tpid
= vlan_dev_vlan_proto(filter_dev
);
2187 match
.key
->vlan_priority
= 0;
2188 match
.mask
= &filter_dev_mask
;
2189 memset(match
.mask
, 0xff, sizeof(*match
.mask
));
2190 match
.mask
->vlan_priority
= 0;
2192 flow_rule_match_vlan(rule
, &match
);
2194 if (match
.mask
->vlan_id
||
2195 match
.mask
->vlan_priority
||
2196 match
.mask
->vlan_tpid
) {
2197 if (match
.key
->vlan_tpid
== htons(ETH_P_8021AD
)) {
2198 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
2200 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
2203 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
2205 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
2209 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, first_vid
,
2210 match
.mask
->vlan_id
);
2211 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, first_vid
,
2212 match
.key
->vlan_id
);
2214 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, first_prio
,
2215 match
.mask
->vlan_priority
);
2216 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, first_prio
,
2217 match
.key
->vlan_priority
);
2219 *match_level
= MLX5_MATCH_L2
;
2221 } else if (*match_level
!= MLX5_MATCH_NONE
) {
2222 /* cvlan_tag enabled in match criteria and
2223 * disabled in match value means both S & C tags
2224 * don't exist (untagged of both)
2226 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, cvlan_tag
, 1);
2227 *match_level
= MLX5_MATCH_L2
;
2230 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_CVLAN
)) {
2231 struct flow_match_vlan match
;
2233 flow_rule_match_cvlan(rule
, &match
);
2234 if (match
.mask
->vlan_id
||
2235 match
.mask
->vlan_priority
||
2236 match
.mask
->vlan_tpid
) {
2237 if (match
.key
->vlan_tpid
== htons(ETH_P_8021AD
)) {
2238 MLX5_SET(fte_match_set_misc
, misc_c
,
2239 outer_second_svlan_tag
, 1);
2240 MLX5_SET(fte_match_set_misc
, misc_v
,
2241 outer_second_svlan_tag
, 1);
2243 MLX5_SET(fte_match_set_misc
, misc_c
,
2244 outer_second_cvlan_tag
, 1);
2245 MLX5_SET(fte_match_set_misc
, misc_v
,
2246 outer_second_cvlan_tag
, 1);
2249 MLX5_SET(fte_match_set_misc
, misc_c
, outer_second_vid
,
2250 match
.mask
->vlan_id
);
2251 MLX5_SET(fte_match_set_misc
, misc_v
, outer_second_vid
,
2252 match
.key
->vlan_id
);
2253 MLX5_SET(fte_match_set_misc
, misc_c
, outer_second_prio
,
2254 match
.mask
->vlan_priority
);
2255 MLX5_SET(fte_match_set_misc
, misc_v
, outer_second_prio
,
2256 match
.key
->vlan_priority
);
2258 *match_level
= MLX5_MATCH_L2
;
2262 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
2263 struct flow_match_eth_addrs match
;
2265 flow_rule_match_eth_addrs(rule
, &match
);
2266 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
2269 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
2273 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
2276 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
2280 if (!is_zero_ether_addr(match
.mask
->src
) ||
2281 !is_zero_ether_addr(match
.mask
->dst
))
2282 *match_level
= MLX5_MATCH_L2
;
2285 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_CONTROL
)) {
2286 struct flow_match_control match
;
2288 flow_rule_match_control(rule
, &match
);
2289 addr_type
= match
.key
->addr_type
;
2291 /* the HW doesn't support frag first/later */
2292 if (match
.mask
->flags
& FLOW_DIS_FIRST_FRAG
)
2295 if (match
.mask
->flags
& FLOW_DIS_IS_FRAGMENT
) {
2296 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, frag
, 1);
2297 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, frag
,
2298 match
.key
->flags
& FLOW_DIS_IS_FRAGMENT
);
2300 /* the HW doesn't need L3 inline to match on frag=no */
2301 if (!(match
.key
->flags
& FLOW_DIS_IS_FRAGMENT
))
2302 *match_level
= MLX5_MATCH_L2
;
2303 /* *** L2 attributes parsing up to here *** */
2305 *match_level
= MLX5_MATCH_L3
;
2309 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
2310 struct flow_match_basic match
;
2312 flow_rule_match_basic(rule
, &match
);
2313 ip_proto
= match
.key
->ip_proto
;
2315 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, ip_protocol
,
2316 match
.mask
->ip_proto
);
2317 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ip_protocol
,
2318 match
.key
->ip_proto
);
2320 if (match
.mask
->ip_proto
)
2321 *match_level
= MLX5_MATCH_L3
;
2324 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
2325 struct flow_match_ipv4_addrs match
;
2327 flow_rule_match_ipv4_addrs(rule
, &match
);
2328 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
2329 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
),
2330 &match
.mask
->src
, sizeof(match
.mask
->src
));
2331 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
2332 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
),
2333 &match
.key
->src
, sizeof(match
.key
->src
));
2334 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
2335 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
),
2336 &match
.mask
->dst
, sizeof(match
.mask
->dst
));
2337 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
2338 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
),
2339 &match
.key
->dst
, sizeof(match
.key
->dst
));
2341 if (match
.mask
->src
|| match
.mask
->dst
)
2342 *match_level
= MLX5_MATCH_L3
;
2345 if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
2346 struct flow_match_ipv6_addrs match
;
2348 flow_rule_match_ipv6_addrs(rule
, &match
);
2349 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
2350 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
),
2351 &match
.mask
->src
, sizeof(match
.mask
->src
));
2352 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
2353 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
),
2354 &match
.key
->src
, sizeof(match
.key
->src
));
2356 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
2357 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
),
2358 &match
.mask
->dst
, sizeof(match
.mask
->dst
));
2359 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
2360 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
),
2361 &match
.key
->dst
, sizeof(match
.key
->dst
));
2363 if (ipv6_addr_type(&match
.mask
->src
) != IPV6_ADDR_ANY
||
2364 ipv6_addr_type(&match
.mask
->dst
) != IPV6_ADDR_ANY
)
2365 *match_level
= MLX5_MATCH_L3
;
2368 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IP
)) {
2369 struct flow_match_ip match
;
2371 flow_rule_match_ip(rule
, &match
);
2372 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, ip_ecn
,
2373 match
.mask
->tos
& 0x3);
2374 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ip_ecn
,
2375 match
.key
->tos
& 0x3);
2377 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, ip_dscp
,
2378 match
.mask
->tos
>> 2);
2379 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ip_dscp
,
2380 match
.key
->tos
>> 2);
2382 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, ttl_hoplimit
,
2384 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ttl_hoplimit
,
2387 if (match
.mask
->ttl
&&
2388 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv
->mdev
,
2389 ft_field_support
.outer_ipv4_ttl
)) {
2390 NL_SET_ERR_MSG_MOD(extack
,
2391 "Matching on TTL is not supported");
2395 if (match
.mask
->tos
|| match
.mask
->ttl
)
2396 *match_level
= MLX5_MATCH_L3
;
2399 /* *** L3 attributes parsing up to here *** */
2401 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_PORTS
)) {
2402 struct flow_match_ports match
;
2404 flow_rule_match_ports(rule
, &match
);
2407 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
2408 tcp_sport
, ntohs(match
.mask
->src
));
2409 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
2410 tcp_sport
, ntohs(match
.key
->src
));
2412 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
2413 tcp_dport
, ntohs(match
.mask
->dst
));
2414 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
2415 tcp_dport
, ntohs(match
.key
->dst
));
2419 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
2420 udp_sport
, ntohs(match
.mask
->src
));
2421 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
2422 udp_sport
, ntohs(match
.key
->src
));
2424 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
2425 udp_dport
, ntohs(match
.mask
->dst
));
2426 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
2427 udp_dport
, ntohs(match
.key
->dst
));
2430 NL_SET_ERR_MSG_MOD(extack
,
2431 "Only UDP and TCP transports are supported for L4 matching");
2432 netdev_err(priv
->netdev
,
2433 "Only UDP and TCP transport are supported\n");
2437 if (match
.mask
->src
|| match
.mask
->dst
)
2438 *match_level
= MLX5_MATCH_L4
;
2441 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_TCP
)) {
2442 struct flow_match_tcp match
;
2444 flow_rule_match_tcp(rule
, &match
);
2445 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, tcp_flags
,
2446 ntohs(match
.mask
->flags
));
2447 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, tcp_flags
,
2448 ntohs(match
.key
->flags
));
2450 if (match
.mask
->flags
)
2451 *match_level
= MLX5_MATCH_L4
;
2457 static int parse_cls_flower(struct mlx5e_priv
*priv
,
2458 struct mlx5e_tc_flow
*flow
,
2459 struct mlx5_flow_spec
*spec
,
2460 struct flow_cls_offload
*f
,
2461 struct net_device
*filter_dev
)
2463 u8 inner_match_level
, outer_match_level
, non_tunnel_match_level
;
2464 struct netlink_ext_ack
*extack
= f
->common
.extack
;
2465 struct mlx5_core_dev
*dev
= priv
->mdev
;
2466 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2467 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
2468 struct mlx5_eswitch_rep
*rep
;
2469 bool is_eswitch_flow
;
2472 inner_match_level
= MLX5_MATCH_NONE
;
2473 outer_match_level
= MLX5_MATCH_NONE
;
2475 err
= __parse_cls_flower(priv
, flow
, spec
, f
, filter_dev
,
2476 &inner_match_level
, &outer_match_level
);
2477 non_tunnel_match_level
= (inner_match_level
== MLX5_MATCH_NONE
) ?
2478 outer_match_level
: inner_match_level
;
2480 is_eswitch_flow
= mlx5e_is_eswitch_flow(flow
);
2481 if (!err
&& is_eswitch_flow
) {
2483 if (rep
->vport
!= MLX5_VPORT_UPLINK
&&
2484 (esw
->offloads
.inline_mode
!= MLX5_INLINE_MODE_NONE
&&
2485 esw
->offloads
.inline_mode
< non_tunnel_match_level
)) {
2486 NL_SET_ERR_MSG_MOD(extack
,
2487 "Flow is not offloaded due to min inline setting");
2488 netdev_warn(priv
->netdev
,
2489 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2490 non_tunnel_match_level
, esw
->offloads
.inline_mode
);
2495 if (is_eswitch_flow
) {
2496 flow
->esw_attr
->inner_match_level
= inner_match_level
;
2497 flow
->esw_attr
->outer_match_level
= outer_match_level
;
2499 flow
->nic_attr
->match_level
= non_tunnel_match_level
;
2505 struct pedit_headers
{
2507 struct vlan_hdr vlan
;
2514 struct pedit_headers_action
{
2515 struct pedit_headers vals
;
2516 struct pedit_headers masks
;
2520 static int pedit_header_offsets
[] = {
2521 [FLOW_ACT_MANGLE_HDR_TYPE_ETH
] = offsetof(struct pedit_headers
, eth
),
2522 [FLOW_ACT_MANGLE_HDR_TYPE_IP4
] = offsetof(struct pedit_headers
, ip4
),
2523 [FLOW_ACT_MANGLE_HDR_TYPE_IP6
] = offsetof(struct pedit_headers
, ip6
),
2524 [FLOW_ACT_MANGLE_HDR_TYPE_TCP
] = offsetof(struct pedit_headers
, tcp
),
2525 [FLOW_ACT_MANGLE_HDR_TYPE_UDP
] = offsetof(struct pedit_headers
, udp
),
2528 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2530 static int set_pedit_val(u8 hdr_type
, u32 mask
, u32 val
, u32 offset
,
2531 struct pedit_headers_action
*hdrs
)
2533 u32
*curr_pmask
, *curr_pval
;
2535 curr_pmask
= (u32
*)(pedit_header(&hdrs
->masks
, hdr_type
) + offset
);
2536 curr_pval
= (u32
*)(pedit_header(&hdrs
->vals
, hdr_type
) + offset
);
2538 if (*curr_pmask
& mask
) /* disallow acting twice on the same location */
2541 *curr_pmask
|= mask
;
2542 *curr_pval
|= (val
& mask
);
2550 struct mlx5_fields
{
2558 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2559 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2560 offsetof(struct pedit_headers, field) + (off), \
2561 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2563 /* masked values are the same and there are no rewrites that do not have a
2566 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2567 type matchmaskx = *(type *)(matchmaskp); \
2568 type matchvalx = *(type *)(matchvalp); \
2569 type maskx = *(type *)(maskp); \
2570 type valx = *(type *)(valp); \
2572 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2576 static bool cmp_val_mask(void *valp
, void *maskp
, void *matchvalp
,
2577 void *matchmaskp
, u8 bsize
)
2583 same
= SAME_VAL_MASK(u8
, valp
, maskp
, matchvalp
, matchmaskp
);
2586 same
= SAME_VAL_MASK(u16
, valp
, maskp
, matchvalp
, matchmaskp
);
2589 same
= SAME_VAL_MASK(u32
, valp
, maskp
, matchvalp
, matchmaskp
);
2596 static struct mlx5_fields fields
[] = {
2597 OFFLOAD(DMAC_47_16
, 32, U32_MAX
, eth
.h_dest
[0], 0, dmac_47_16
),
2598 OFFLOAD(DMAC_15_0
, 16, U16_MAX
, eth
.h_dest
[4], 0, dmac_15_0
),
2599 OFFLOAD(SMAC_47_16
, 32, U32_MAX
, eth
.h_source
[0], 0, smac_47_16
),
2600 OFFLOAD(SMAC_15_0
, 16, U16_MAX
, eth
.h_source
[4], 0, smac_15_0
),
2601 OFFLOAD(ETHERTYPE
, 16, U16_MAX
, eth
.h_proto
, 0, ethertype
),
2602 OFFLOAD(FIRST_VID
, 16, U16_MAX
, vlan
.h_vlan_TCI
, 0, first_vid
),
2604 OFFLOAD(IP_DSCP
, 8, 0xfc, ip4
.tos
, 0, ip_dscp
),
2605 OFFLOAD(IP_TTL
, 8, U8_MAX
, ip4
.ttl
, 0, ttl_hoplimit
),
2606 OFFLOAD(SIPV4
, 32, U32_MAX
, ip4
.saddr
, 0, src_ipv4_src_ipv6
.ipv4_layout
.ipv4
),
2607 OFFLOAD(DIPV4
, 32, U32_MAX
, ip4
.daddr
, 0, dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
),
2609 OFFLOAD(SIPV6_127_96
, 32, U32_MAX
, ip6
.saddr
.s6_addr32
[0], 0,
2610 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
[0]),
2611 OFFLOAD(SIPV6_95_64
, 32, U32_MAX
, ip6
.saddr
.s6_addr32
[1], 0,
2612 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
[4]),
2613 OFFLOAD(SIPV6_63_32
, 32, U32_MAX
, ip6
.saddr
.s6_addr32
[2], 0,
2614 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
[8]),
2615 OFFLOAD(SIPV6_31_0
, 32, U32_MAX
, ip6
.saddr
.s6_addr32
[3], 0,
2616 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
[12]),
2617 OFFLOAD(DIPV6_127_96
, 32, U32_MAX
, ip6
.daddr
.s6_addr32
[0], 0,
2618 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
[0]),
2619 OFFLOAD(DIPV6_95_64
, 32, U32_MAX
, ip6
.daddr
.s6_addr32
[1], 0,
2620 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
[4]),
2621 OFFLOAD(DIPV6_63_32
, 32, U32_MAX
, ip6
.daddr
.s6_addr32
[2], 0,
2622 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
[8]),
2623 OFFLOAD(DIPV6_31_0
, 32, U32_MAX
, ip6
.daddr
.s6_addr32
[3], 0,
2624 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
[12]),
2625 OFFLOAD(IPV6_HOPLIMIT
, 8, U8_MAX
, ip6
.hop_limit
, 0, ttl_hoplimit
),
2627 OFFLOAD(TCP_SPORT
, 16, U16_MAX
, tcp
.source
, 0, tcp_sport
),
2628 OFFLOAD(TCP_DPORT
, 16, U16_MAX
, tcp
.dest
, 0, tcp_dport
),
2629 /* in linux iphdr tcp_flags is 8 bits long */
2630 OFFLOAD(TCP_FLAGS
, 8, U8_MAX
, tcp
.ack_seq
, 5, tcp_flags
),
2632 OFFLOAD(UDP_SPORT
, 16, U16_MAX
, udp
.source
, 0, udp_sport
),
2633 OFFLOAD(UDP_DPORT
, 16, U16_MAX
, udp
.dest
, 0, udp_dport
),
2636 static int offload_pedit_fields(struct mlx5e_priv
*priv
,
2638 struct pedit_headers_action
*hdrs
,
2639 struct mlx5e_tc_flow_parse_attr
*parse_attr
,
2641 struct netlink_ext_ack
*extack
)
2643 struct pedit_headers
*set_masks
, *add_masks
, *set_vals
, *add_vals
;
2644 int i
, action_size
, first
, last
, next_z
;
2645 void *headers_c
, *headers_v
, *action
, *vals_p
;
2646 u32
*s_masks_p
, *a_masks_p
, s_mask
, a_mask
;
2647 struct mlx5e_tc_mod_hdr_acts
*mod_acts
;
2648 struct mlx5_fields
*f
;
2655 mod_acts
= &parse_attr
->mod_hdr_acts
;
2656 headers_c
= get_match_headers_criteria(*action_flags
, &parse_attr
->spec
);
2657 headers_v
= get_match_headers_value(*action_flags
, &parse_attr
->spec
);
2659 set_masks
= &hdrs
[0].masks
;
2660 add_masks
= &hdrs
[1].masks
;
2661 set_vals
= &hdrs
[0].vals
;
2662 add_vals
= &hdrs
[1].vals
;
2664 action_size
= MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto
);
2666 for (i
= 0; i
< ARRAY_SIZE(fields
); i
++) {
2670 /* avoid seeing bits set from previous iterations */
2674 s_masks_p
= (void *)set_masks
+ f
->offset
;
2675 a_masks_p
= (void *)add_masks
+ f
->offset
;
2677 s_mask
= *s_masks_p
& f
->field_mask
;
2678 a_mask
= *a_masks_p
& f
->field_mask
;
2680 if (!s_mask
&& !a_mask
) /* nothing to offload here */
2683 if (s_mask
&& a_mask
) {
2684 NL_SET_ERR_MSG_MOD(extack
,
2685 "can't set and add to the same HW field");
2686 printk(KERN_WARNING
"mlx5: can't set and add to the same HW field (%x)\n", f
->field
);
2692 void *match_mask
= headers_c
+ f
->match_offset
;
2693 void *match_val
= headers_v
+ f
->match_offset
;
2695 cmd
= MLX5_ACTION_TYPE_SET
;
2697 vals_p
= (void *)set_vals
+ f
->offset
;
2698 /* don't rewrite if we have a match on the same value */
2699 if (cmp_val_mask(vals_p
, s_masks_p
, match_val
,
2700 match_mask
, f
->field_bsize
))
2702 /* clear to denote we consumed this field */
2703 *s_masks_p
&= ~f
->field_mask
;
2705 cmd
= MLX5_ACTION_TYPE_ADD
;
2707 vals_p
= (void *)add_vals
+ f
->offset
;
2708 /* add 0 is no change */
2709 if ((*(u32
*)vals_p
& f
->field_mask
) == 0)
2711 /* clear to denote we consumed this field */
2712 *a_masks_p
&= ~f
->field_mask
;
2717 if (f
->field_bsize
== 32) {
2718 mask_be32
= (__be32
)mask
;
2719 mask
= (__force
unsigned long)cpu_to_le32(be32_to_cpu(mask_be32
));
2720 } else if (f
->field_bsize
== 16) {
2721 mask_be32
= (__be32
)mask
;
2722 mask_be16
= *(__be16
*)&mask_be32
;
2723 mask
= (__force
unsigned long)cpu_to_le16(be16_to_cpu(mask_be16
));
2726 first
= find_first_bit(&mask
, f
->field_bsize
);
2727 next_z
= find_next_zero_bit(&mask
, f
->field_bsize
, first
);
2728 last
= find_last_bit(&mask
, f
->field_bsize
);
2729 if (first
< next_z
&& next_z
< last
) {
2730 NL_SET_ERR_MSG_MOD(extack
,
2731 "rewrite of few sub-fields isn't supported");
2732 printk(KERN_WARNING
"mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2737 err
= alloc_mod_hdr_actions(priv
->mdev
, namespace, mod_acts
);
2739 NL_SET_ERR_MSG_MOD(extack
,
2740 "too many pedit actions, can't offload");
2741 mlx5_core_warn(priv
->mdev
,
2742 "mlx5: parsed %d pedit actions, can't do more\n",
2743 mod_acts
->num_actions
);
2747 action
= mod_acts
->actions
+
2748 (mod_acts
->num_actions
* action_size
);
2749 MLX5_SET(set_action_in
, action
, action_type
, cmd
);
2750 MLX5_SET(set_action_in
, action
, field
, f
->field
);
2752 if (cmd
== MLX5_ACTION_TYPE_SET
) {
2755 /* if field is bit sized it can start not from first bit */
2756 start
= find_first_bit((unsigned long *)&f
->field_mask
,
2759 MLX5_SET(set_action_in
, action
, offset
, first
- start
);
2760 /* length is num of bits to be written, zero means length of 32 */
2761 MLX5_SET(set_action_in
, action
, length
, (last
- first
+ 1));
2764 if (f
->field_bsize
== 32)
2765 MLX5_SET(set_action_in
, action
, data
, ntohl(*(__be32
*)vals_p
) >> first
);
2766 else if (f
->field_bsize
== 16)
2767 MLX5_SET(set_action_in
, action
, data
, ntohs(*(__be16
*)vals_p
) >> first
);
2768 else if (f
->field_bsize
== 8)
2769 MLX5_SET(set_action_in
, action
, data
, *(u8
*)vals_p
>> first
);
2771 ++mod_acts
->num_actions
;
2777 static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev
*mdev
,
2780 if (namespace == MLX5_FLOW_NAMESPACE_FDB
) /* FDB offloading */
2781 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev
, max_modify_header_actions
);
2782 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2783 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev
, max_modify_header_actions
);
2786 int alloc_mod_hdr_actions(struct mlx5_core_dev
*mdev
,
2788 struct mlx5e_tc_mod_hdr_acts
*mod_hdr_acts
)
2790 int action_size
, new_num_actions
, max_hw_actions
;
2791 size_t new_sz
, old_sz
;
2794 if (mod_hdr_acts
->num_actions
< mod_hdr_acts
->max_actions
)
2797 action_size
= MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto
);
2799 max_hw_actions
= mlx5e_flow_namespace_max_modify_action(mdev
,
2801 new_num_actions
= min(max_hw_actions
,
2802 mod_hdr_acts
->actions
?
2803 mod_hdr_acts
->max_actions
* 2 : 1);
2804 if (mod_hdr_acts
->max_actions
== new_num_actions
)
2807 new_sz
= action_size
* new_num_actions
;
2808 old_sz
= mod_hdr_acts
->max_actions
* action_size
;
2809 ret
= krealloc(mod_hdr_acts
->actions
, new_sz
, GFP_KERNEL
);
2813 memset(ret
+ old_sz
, 0, new_sz
- old_sz
);
2814 mod_hdr_acts
->actions
= ret
;
2815 mod_hdr_acts
->max_actions
= new_num_actions
;
2820 void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts
*mod_hdr_acts
)
2822 kfree(mod_hdr_acts
->actions
);
2823 mod_hdr_acts
->actions
= NULL
;
2824 mod_hdr_acts
->num_actions
= 0;
2825 mod_hdr_acts
->max_actions
= 0;
2828 static const struct pedit_headers zero_masks
= {};
2830 static int parse_tc_pedit_action(struct mlx5e_priv
*priv
,
2831 const struct flow_action_entry
*act
, int namespace,
2832 struct pedit_headers_action
*hdrs
,
2833 struct netlink_ext_ack
*extack
)
2835 u8 cmd
= (act
->id
== FLOW_ACTION_MANGLE
) ? 0 : 1;
2836 int err
= -EOPNOTSUPP
;
2837 u32 mask
, val
, offset
;
2840 htype
= act
->mangle
.htype
;
2841 err
= -EOPNOTSUPP
; /* can't be all optimistic */
2843 if (htype
== FLOW_ACT_MANGLE_UNSPEC
) {
2844 NL_SET_ERR_MSG_MOD(extack
, "legacy pedit isn't offloaded");
2848 if (!mlx5e_flow_namespace_max_modify_action(priv
->mdev
, namespace)) {
2849 NL_SET_ERR_MSG_MOD(extack
,
2850 "The pedit offload action is not supported");
2854 mask
= act
->mangle
.mask
;
2855 val
= act
->mangle
.val
;
2856 offset
= act
->mangle
.offset
;
2858 err
= set_pedit_val(htype
, ~mask
, val
, offset
, &hdrs
[cmd
]);
2869 static int alloc_tc_pedit_action(struct mlx5e_priv
*priv
, int namespace,
2870 struct mlx5e_tc_flow_parse_attr
*parse_attr
,
2871 struct pedit_headers_action
*hdrs
,
2873 struct netlink_ext_ack
*extack
)
2875 struct pedit_headers
*cmd_masks
;
2879 err
= offload_pedit_fields(priv
, namespace, hdrs
, parse_attr
,
2880 action_flags
, extack
);
2882 goto out_dealloc_parsed_actions
;
2884 for (cmd
= 0; cmd
< __PEDIT_CMD_MAX
; cmd
++) {
2885 cmd_masks
= &hdrs
[cmd
].masks
;
2886 if (memcmp(cmd_masks
, &zero_masks
, sizeof(zero_masks
))) {
2887 NL_SET_ERR_MSG_MOD(extack
,
2888 "attempt to offload an unsupported field");
2889 netdev_warn(priv
->netdev
, "attempt to offload an unsupported field (cmd %d)\n", cmd
);
2890 print_hex_dump(KERN_WARNING
, "mask: ", DUMP_PREFIX_ADDRESS
,
2891 16, 1, cmd_masks
, sizeof(zero_masks
), true);
2893 goto out_dealloc_parsed_actions
;
2899 out_dealloc_parsed_actions
:
2900 dealloc_mod_hdr_actions(&parse_attr
->mod_hdr_acts
);
2904 static bool csum_offload_supported(struct mlx5e_priv
*priv
,
2907 struct netlink_ext_ack
*extack
)
2909 u32 prot_flags
= TCA_CSUM_UPDATE_FLAG_IPV4HDR
| TCA_CSUM_UPDATE_FLAG_TCP
|
2910 TCA_CSUM_UPDATE_FLAG_UDP
;
2912 /* The HW recalcs checksums only if re-writing headers */
2913 if (!(action
& MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
)) {
2914 NL_SET_ERR_MSG_MOD(extack
,
2915 "TC csum action is only offloaded with pedit");
2916 netdev_warn(priv
->netdev
,
2917 "TC csum action is only offloaded with pedit\n");
2921 if (update_flags
& ~prot_flags
) {
2922 NL_SET_ERR_MSG_MOD(extack
,
2923 "can't offload TC csum action for some header/s");
2924 netdev_warn(priv
->netdev
,
2925 "can't offload TC csum action for some header/s - flags %#x\n",
2933 struct ip_ttl_word
{
2939 struct ipv6_hoplimit_word
{
2945 static int is_action_keys_supported(const struct flow_action_entry
*act
,
2946 bool ct_flow
, bool *modify_ip_header
,
2947 struct netlink_ext_ack
*extack
)
2952 htype
= act
->mangle
.htype
;
2953 offset
= act
->mangle
.offset
;
2954 mask
= ~act
->mangle
.mask
;
2955 /* For IPv4 & IPv6 header check 4 byte word,
2956 * to determine that modified fields
2957 * are NOT ttl & hop_limit only.
2959 if (htype
== FLOW_ACT_MANGLE_HDR_TYPE_IP4
) {
2960 struct ip_ttl_word
*ttl_word
=
2961 (struct ip_ttl_word
*)&mask
;
2963 if (offset
!= offsetof(struct iphdr
, ttl
) ||
2964 ttl_word
->protocol
||
2966 *modify_ip_header
= true;
2969 if (ct_flow
&& offset
>= offsetof(struct iphdr
, saddr
)) {
2970 NL_SET_ERR_MSG_MOD(extack
,
2971 "can't offload re-write of ipv4 address with action ct");
2974 } else if (htype
== FLOW_ACT_MANGLE_HDR_TYPE_IP6
) {
2975 struct ipv6_hoplimit_word
*hoplimit_word
=
2976 (struct ipv6_hoplimit_word
*)&mask
;
2978 if (offset
!= offsetof(struct ipv6hdr
, payload_len
) ||
2979 hoplimit_word
->payload_len
||
2980 hoplimit_word
->nexthdr
) {
2981 *modify_ip_header
= true;
2984 if (ct_flow
&& offset
>= offsetof(struct ipv6hdr
, saddr
)) {
2985 NL_SET_ERR_MSG_MOD(extack
,
2986 "can't offload re-write of ipv6 address with action ct");
2989 } else if (ct_flow
&& (htype
== FLOW_ACT_MANGLE_HDR_TYPE_TCP
||
2990 htype
== FLOW_ACT_MANGLE_HDR_TYPE_UDP
)) {
2991 NL_SET_ERR_MSG_MOD(extack
,
2992 "can't offload re-write of transport header ports with action ct");
2999 static bool modify_header_match_supported(struct mlx5_flow_spec
*spec
,
3000 struct flow_action
*flow_action
,
3001 u32 actions
, bool ct_flow
,
3002 struct netlink_ext_ack
*extack
)
3004 const struct flow_action_entry
*act
;
3005 bool modify_ip_header
;
3011 headers_v
= get_match_headers_value(actions
, spec
);
3012 ethertype
= MLX5_GET(fte_match_set_lyr_2_4
, headers_v
, ethertype
);
3014 /* for non-IP we only re-write MACs, so we're okay */
3015 if (ethertype
!= ETH_P_IP
&& ethertype
!= ETH_P_IPV6
)
3018 modify_ip_header
= false;
3019 flow_action_for_each(i
, act
, flow_action
) {
3020 if (act
->id
!= FLOW_ACTION_MANGLE
&&
3021 act
->id
!= FLOW_ACTION_ADD
)
3024 err
= is_action_keys_supported(act
, ct_flow
,
3025 &modify_ip_header
, extack
);
3030 ip_proto
= MLX5_GET(fte_match_set_lyr_2_4
, headers_v
, ip_protocol
);
3031 if (modify_ip_header
&& ip_proto
!= IPPROTO_TCP
&&
3032 ip_proto
!= IPPROTO_UDP
&& ip_proto
!= IPPROTO_ICMP
) {
3033 NL_SET_ERR_MSG_MOD(extack
,
3034 "can't offload re-write of non TCP/UDP");
3035 pr_info("can't offload re-write of ip proto %d\n", ip_proto
);
3043 static bool actions_match_supported(struct mlx5e_priv
*priv
,
3044 struct flow_action
*flow_action
,
3045 struct mlx5e_tc_flow_parse_attr
*parse_attr
,
3046 struct mlx5e_tc_flow
*flow
,
3047 struct netlink_ext_ack
*extack
)
3052 ct_flow
= flow_flag_test(flow
, CT
);
3053 if (mlx5e_is_eswitch_flow(flow
)) {
3054 actions
= flow
->esw_attr
->action
;
3056 if (flow
->esw_attr
->split_count
&& ct_flow
) {
3057 /* All registers used by ct are cleared when using
3060 NL_SET_ERR_MSG_MOD(extack
,
3061 "Can't offload mirroring with action ct");
3065 actions
= flow
->nic_attr
->action
;
3068 if (actions
& MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
)
3069 return modify_header_match_supported(&parse_attr
->spec
,
3070 flow_action
, actions
,
3076 static bool same_hw_devs(struct mlx5e_priv
*priv
, struct mlx5e_priv
*peer_priv
)
3078 struct mlx5_core_dev
*fmdev
, *pmdev
;
3079 u64 fsystem_guid
, psystem_guid
;
3082 pmdev
= peer_priv
->mdev
;
3084 fsystem_guid
= mlx5_query_nic_system_image_guid(fmdev
);
3085 psystem_guid
= mlx5_query_nic_system_image_guid(pmdev
);
3087 return (fsystem_guid
== psystem_guid
);
3090 static int add_vlan_rewrite_action(struct mlx5e_priv
*priv
, int namespace,
3091 const struct flow_action_entry
*act
,
3092 struct mlx5e_tc_flow_parse_attr
*parse_attr
,
3093 struct pedit_headers_action
*hdrs
,
3094 u32
*action
, struct netlink_ext_ack
*extack
)
3096 u16 mask16
= VLAN_VID_MASK
;
3097 u16 val16
= act
->vlan
.vid
& VLAN_VID_MASK
;
3098 const struct flow_action_entry pedit_act
= {
3099 .id
= FLOW_ACTION_MANGLE
,
3100 .mangle
.htype
= FLOW_ACT_MANGLE_HDR_TYPE_ETH
,
3101 .mangle
.offset
= offsetof(struct vlan_ethhdr
, h_vlan_TCI
),
3102 .mangle
.mask
= ~(u32
)be16_to_cpu(*(__be16
*)&mask16
),
3103 .mangle
.val
= (u32
)be16_to_cpu(*(__be16
*)&val16
),
3105 u8 match_prio_mask
, match_prio_val
;
3106 void *headers_c
, *headers_v
;
3109 headers_c
= get_match_headers_criteria(*action
, &parse_attr
->spec
);
3110 headers_v
= get_match_headers_value(*action
, &parse_attr
->spec
);
3112 if (!(MLX5_GET(fte_match_set_lyr_2_4
, headers_c
, cvlan_tag
) &&
3113 MLX5_GET(fte_match_set_lyr_2_4
, headers_v
, cvlan_tag
))) {
3114 NL_SET_ERR_MSG_MOD(extack
,
3115 "VLAN rewrite action must have VLAN protocol match");
3119 match_prio_mask
= MLX5_GET(fte_match_set_lyr_2_4
, headers_c
, first_prio
);
3120 match_prio_val
= MLX5_GET(fte_match_set_lyr_2_4
, headers_v
, first_prio
);
3121 if (act
->vlan
.prio
!= (match_prio_val
& match_prio_mask
)) {
3122 NL_SET_ERR_MSG_MOD(extack
,
3123 "Changing VLAN prio is not supported");
3127 err
= parse_tc_pedit_action(priv
, &pedit_act
, namespace, hdrs
, NULL
);
3128 *action
|= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
;
3134 add_vlan_prio_tag_rewrite_action(struct mlx5e_priv
*priv
,
3135 struct mlx5e_tc_flow_parse_attr
*parse_attr
,
3136 struct pedit_headers_action
*hdrs
,
3137 u32
*action
, struct netlink_ext_ack
*extack
)
3139 const struct flow_action_entry prio_tag_act
= {
3142 MLX5_GET(fte_match_set_lyr_2_4
,
3143 get_match_headers_value(*action
,
3146 MLX5_GET(fte_match_set_lyr_2_4
,
3147 get_match_headers_criteria(*action
,
3152 return add_vlan_rewrite_action(priv
, MLX5_FLOW_NAMESPACE_FDB
,
3153 &prio_tag_act
, parse_attr
, hdrs
, action
,
3157 static int parse_tc_nic_actions(struct mlx5e_priv
*priv
,
3158 struct flow_action
*flow_action
,
3159 struct mlx5e_tc_flow_parse_attr
*parse_attr
,
3160 struct mlx5e_tc_flow
*flow
,
3161 struct netlink_ext_ack
*extack
)
3163 struct mlx5_nic_flow_attr
*attr
= flow
->nic_attr
;
3164 struct pedit_headers_action hdrs
[2] = {};
3165 const struct flow_action_entry
*act
;
3169 if (!flow_action_has_entries(flow_action
))
3172 if (!flow_action_hw_stats_check(flow_action
, extack
,
3173 FLOW_ACTION_HW_STATS_DELAYED_BIT
))
3176 attr
->flow_tag
= MLX5_FS_DEFAULT_FLOW_TAG
;
3178 flow_action_for_each(i
, act
, flow_action
) {
3180 case FLOW_ACTION_ACCEPT
:
3181 action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
|
3182 MLX5_FLOW_CONTEXT_ACTION_COUNT
;
3184 case FLOW_ACTION_DROP
:
3185 action
|= MLX5_FLOW_CONTEXT_ACTION_DROP
;
3186 if (MLX5_CAP_FLOWTABLE(priv
->mdev
,
3187 flow_table_properties_nic_receive
.flow_counter
))
3188 action
|= MLX5_FLOW_CONTEXT_ACTION_COUNT
;
3190 case FLOW_ACTION_MANGLE
:
3191 case FLOW_ACTION_ADD
:
3192 err
= parse_tc_pedit_action(priv
, act
, MLX5_FLOW_NAMESPACE_KERNEL
,
3197 action
|= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
|
3198 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
3200 case FLOW_ACTION_VLAN_MANGLE
:
3201 err
= add_vlan_rewrite_action(priv
,
3202 MLX5_FLOW_NAMESPACE_KERNEL
,
3203 act
, parse_attr
, hdrs
,
3209 case FLOW_ACTION_CSUM
:
3210 if (csum_offload_supported(priv
, action
,
3216 case FLOW_ACTION_REDIRECT
: {
3217 struct net_device
*peer_dev
= act
->dev
;
3219 if (priv
->netdev
->netdev_ops
== peer_dev
->netdev_ops
&&
3220 same_hw_devs(priv
, netdev_priv(peer_dev
))) {
3221 parse_attr
->mirred_ifindex
[0] = peer_dev
->ifindex
;
3222 flow_flag_set(flow
, HAIRPIN
);
3223 action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
|
3224 MLX5_FLOW_CONTEXT_ACTION_COUNT
;
3226 NL_SET_ERR_MSG_MOD(extack
,
3227 "device is not on same HW, can't offload");
3228 netdev_warn(priv
->netdev
, "device %s not on same HW, can't offload\n",
3234 case FLOW_ACTION_MARK
: {
3235 u32 mark
= act
->mark
;
3237 if (mark
& ~MLX5E_TC_FLOW_ID_MASK
) {
3238 NL_SET_ERR_MSG_MOD(extack
,
3239 "Bad flow mark - only 16 bit is supported");
3243 attr
->flow_tag
= mark
;
3244 action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
3248 NL_SET_ERR_MSG_MOD(extack
, "The offload action is not supported");
3253 if (hdrs
[TCA_PEDIT_KEY_EX_CMD_SET
].pedits
||
3254 hdrs
[TCA_PEDIT_KEY_EX_CMD_ADD
].pedits
) {
3255 err
= alloc_tc_pedit_action(priv
, MLX5_FLOW_NAMESPACE_KERNEL
,
3256 parse_attr
, hdrs
, &action
, extack
);
3259 /* in case all pedit actions are skipped, remove the MOD_HDR
3262 if (parse_attr
->mod_hdr_acts
.num_actions
== 0) {
3263 action
&= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
;
3264 dealloc_mod_hdr_actions(&parse_attr
->mod_hdr_acts
);
3268 attr
->action
= action
;
3269 if (!actions_match_supported(priv
, flow_action
, parse_attr
, flow
, extack
))
3276 const struct ip_tunnel_key
*ip_tun_key
;
3277 struct mlx5e_tc_tunnel
*tc_tunnel
;
3280 static inline int cmp_encap_info(struct encap_key
*a
,
3281 struct encap_key
*b
)
3283 return memcmp(a
->ip_tun_key
, b
->ip_tun_key
, sizeof(*a
->ip_tun_key
)) ||
3284 a
->tc_tunnel
->tunnel_type
!= b
->tc_tunnel
->tunnel_type
;
3287 static inline int hash_encap_info(struct encap_key
*key
)
3289 return jhash(key
->ip_tun_key
, sizeof(*key
->ip_tun_key
),
3290 key
->tc_tunnel
->tunnel_type
);
3294 static bool is_merged_eswitch_dev(struct mlx5e_priv
*priv
,
3295 struct net_device
*peer_netdev
)
3297 struct mlx5e_priv
*peer_priv
;
3299 peer_priv
= netdev_priv(peer_netdev
);
3301 return (MLX5_CAP_ESW(priv
->mdev
, merged_eswitch
) &&
3302 mlx5e_eswitch_rep(priv
->netdev
) &&
3303 mlx5e_eswitch_rep(peer_netdev
) &&
3304 same_hw_devs(priv
, peer_priv
));
3309 bool mlx5e_encap_take(struct mlx5e_encap_entry
*e
)
3311 return refcount_inc_not_zero(&e
->refcnt
);
3314 static struct mlx5e_encap_entry
*
3315 mlx5e_encap_get(struct mlx5e_priv
*priv
, struct encap_key
*key
,
3318 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
3319 struct mlx5e_encap_entry
*e
;
3320 struct encap_key e_key
;
3322 hash_for_each_possible_rcu(esw
->offloads
.encap_tbl
, e
,
3323 encap_hlist
, hash_key
) {
3324 e_key
.ip_tun_key
= &e
->tun_info
->key
;
3325 e_key
.tc_tunnel
= e
->tunnel
;
3326 if (!cmp_encap_info(&e_key
, key
) &&
3327 mlx5e_encap_take(e
))
3334 static struct ip_tunnel_info
*dup_tun_info(const struct ip_tunnel_info
*tun_info
)
3336 size_t tun_size
= sizeof(*tun_info
) + tun_info
->options_len
;
3338 return kmemdup(tun_info
, tun_size
, GFP_KERNEL
);
3341 static bool is_duplicated_encap_entry(struct mlx5e_priv
*priv
,
3342 struct mlx5e_tc_flow
*flow
,
3344 struct mlx5e_encap_entry
*e
,
3345 struct netlink_ext_ack
*extack
)
3349 for (i
= 0; i
< out_index
; i
++) {
3350 if (flow
->encaps
[i
].e
!= e
)
3352 NL_SET_ERR_MSG_MOD(extack
, "can't duplicate encap action");
3353 netdev_err(priv
->netdev
, "can't duplicate encap action\n");
3360 static int mlx5e_attach_encap(struct mlx5e_priv
*priv
,
3361 struct mlx5e_tc_flow
*flow
,
3362 struct net_device
*mirred_dev
,
3364 struct netlink_ext_ack
*extack
,
3365 struct net_device
**encap_dev
,
3368 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
3369 struct mlx5_esw_flow_attr
*attr
= flow
->esw_attr
;
3370 struct mlx5e_tc_flow_parse_attr
*parse_attr
;
3371 const struct ip_tunnel_info
*tun_info
;
3372 struct encap_key key
;
3373 struct mlx5e_encap_entry
*e
;
3374 unsigned short family
;
3378 parse_attr
= attr
->parse_attr
;
3379 tun_info
= parse_attr
->tun_info
[out_index
];
3380 family
= ip_tunnel_info_af(tun_info
);
3381 key
.ip_tun_key
= &tun_info
->key
;
3382 key
.tc_tunnel
= mlx5e_get_tc_tun(mirred_dev
);
3383 if (!key
.tc_tunnel
) {
3384 NL_SET_ERR_MSG_MOD(extack
, "Unsupported tunnel");
3388 hash_key
= hash_encap_info(&key
);
3390 mutex_lock(&esw
->offloads
.encap_tbl_lock
);
3391 e
= mlx5e_encap_get(priv
, &key
, hash_key
);
3393 /* must verify if encap is valid or not */
3395 /* Check that entry was not already attached to this flow */
3396 if (is_duplicated_encap_entry(priv
, flow
, out_index
, e
, extack
)) {
3401 mutex_unlock(&esw
->offloads
.encap_tbl_lock
);
3402 wait_for_completion(&e
->res_ready
);
3404 /* Protect against concurrent neigh update. */
3405 mutex_lock(&esw
->offloads
.encap_tbl_lock
);
3406 if (e
->compl_result
< 0) {
3413 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
3419 refcount_set(&e
->refcnt
, 1);
3420 init_completion(&e
->res_ready
);
3422 tun_info
= dup_tun_info(tun_info
);
3427 e
->tun_info
= tun_info
;
3428 err
= mlx5e_tc_tun_init_encap_attr(mirred_dev
, priv
, e
, extack
);
3432 INIT_LIST_HEAD(&e
->flows
);
3433 hash_add_rcu(esw
->offloads
.encap_tbl
, &e
->encap_hlist
, hash_key
);
3434 mutex_unlock(&esw
->offloads
.encap_tbl_lock
);
3436 if (family
== AF_INET
)
3437 err
= mlx5e_tc_tun_create_header_ipv4(priv
, mirred_dev
, e
);
3438 else if (family
== AF_INET6
)
3439 err
= mlx5e_tc_tun_create_header_ipv6(priv
, mirred_dev
, e
);
3441 /* Protect against concurrent neigh update. */
3442 mutex_lock(&esw
->offloads
.encap_tbl_lock
);
3443 complete_all(&e
->res_ready
);
3445 e
->compl_result
= err
;
3448 e
->compl_result
= 1;
3451 flow
->encaps
[out_index
].e
= e
;
3452 list_add(&flow
->encaps
[out_index
].list
, &e
->flows
);
3453 flow
->encaps
[out_index
].index
= out_index
;
3454 *encap_dev
= e
->out_dev
;
3455 if (e
->flags
& MLX5_ENCAP_ENTRY_VALID
) {
3456 attr
->dests
[out_index
].pkt_reformat
= e
->pkt_reformat
;
3457 attr
->dests
[out_index
].flags
|= MLX5_ESW_DEST_ENCAP_VALID
;
3458 *encap_valid
= true;
3460 *encap_valid
= false;
3462 mutex_unlock(&esw
->offloads
.encap_tbl_lock
);
3467 mutex_unlock(&esw
->offloads
.encap_tbl_lock
);
3469 mlx5e_encap_put(priv
, e
);
3473 mutex_unlock(&esw
->offloads
.encap_tbl_lock
);
3479 static int parse_tc_vlan_action(struct mlx5e_priv
*priv
,
3480 const struct flow_action_entry
*act
,
3481 struct mlx5_esw_flow_attr
*attr
,
3484 u8 vlan_idx
= attr
->total_vlan
;
3486 if (vlan_idx
>= MLX5_FS_VLAN_DEPTH
)
3490 case FLOW_ACTION_VLAN_POP
:
3492 if (!mlx5_eswitch_vlan_actions_supported(priv
->mdev
,
3493 MLX5_FS_VLAN_DEPTH
))
3496 *action
|= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2
;
3498 *action
|= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
;
3501 case FLOW_ACTION_VLAN_PUSH
:
3502 attr
->vlan_vid
[vlan_idx
] = act
->vlan
.vid
;
3503 attr
->vlan_prio
[vlan_idx
] = act
->vlan
.prio
;
3504 attr
->vlan_proto
[vlan_idx
] = act
->vlan
.proto
;
3505 if (!attr
->vlan_proto
[vlan_idx
])
3506 attr
->vlan_proto
[vlan_idx
] = htons(ETH_P_8021Q
);
3509 if (!mlx5_eswitch_vlan_actions_supported(priv
->mdev
,
3510 MLX5_FS_VLAN_DEPTH
))
3513 *action
|= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2
;
3515 if (!mlx5_eswitch_vlan_actions_supported(priv
->mdev
, 1) &&
3516 (act
->vlan
.proto
!= htons(ETH_P_8021Q
) ||
3520 *action
|= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
;
3527 attr
->total_vlan
= vlan_idx
+ 1;
3532 static int add_vlan_push_action(struct mlx5e_priv
*priv
,
3533 struct mlx5_esw_flow_attr
*attr
,
3534 struct net_device
**out_dev
,
3537 struct net_device
*vlan_dev
= *out_dev
;
3538 struct flow_action_entry vlan_act
= {
3539 .id
= FLOW_ACTION_VLAN_PUSH
,
3540 .vlan
.vid
= vlan_dev_vlan_id(vlan_dev
),
3541 .vlan
.proto
= vlan_dev_vlan_proto(vlan_dev
),
3546 err
= parse_tc_vlan_action(priv
, &vlan_act
, attr
, action
);
3550 *out_dev
= dev_get_by_index_rcu(dev_net(vlan_dev
),
3551 dev_get_iflink(vlan_dev
));
3552 if (is_vlan_dev(*out_dev
))
3553 err
= add_vlan_push_action(priv
, attr
, out_dev
, action
);
3558 static int add_vlan_pop_action(struct mlx5e_priv
*priv
,
3559 struct mlx5_esw_flow_attr
*attr
,
3562 struct flow_action_entry vlan_act
= {
3563 .id
= FLOW_ACTION_VLAN_POP
,
3565 int nest_level
, err
= 0;
3567 nest_level
= attr
->parse_attr
->filter_dev
->lower_level
-
3568 priv
->netdev
->lower_level
;
3569 while (nest_level
--) {
3570 err
= parse_tc_vlan_action(priv
, &vlan_act
, attr
, action
);
3578 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv
*priv
,
3579 struct net_device
*out_dev
)
3581 if (is_merged_eswitch_dev(priv
, out_dev
))
3584 return mlx5e_eswitch_rep(out_dev
) &&
3585 same_hw_devs(priv
, netdev_priv(out_dev
));
3588 static bool is_duplicated_output_device(struct net_device
*dev
,
3589 struct net_device
*out_dev
,
3590 int *ifindexes
, int if_count
,
3591 struct netlink_ext_ack
*extack
)
3595 for (i
= 0; i
< if_count
; i
++) {
3596 if (ifindexes
[i
] == out_dev
->ifindex
) {
3597 NL_SET_ERR_MSG_MOD(extack
,
3598 "can't duplicate output to same device");
3599 netdev_err(dev
, "can't duplicate output to same device: %s\n",
3608 static int mlx5_validate_goto_chain(struct mlx5_eswitch
*esw
,
3609 struct mlx5e_tc_flow
*flow
,
3610 const struct flow_action_entry
*act
,
3612 struct netlink_ext_ack
*extack
)
3614 u32 max_chain
= mlx5_esw_chains_get_chain_range(esw
);
3615 struct mlx5_esw_flow_attr
*attr
= flow
->esw_attr
;
3616 bool ft_flow
= mlx5e_is_ft_flow(flow
);
3617 u32 dest_chain
= act
->chain_index
;
3620 NL_SET_ERR_MSG_MOD(extack
, "Goto action is not supported");
3624 if (!mlx5_esw_chains_backwards_supported(esw
) &&
3625 dest_chain
<= attr
->chain
) {
3626 NL_SET_ERR_MSG_MOD(extack
,
3627 "Goto lower numbered chain isn't supported");
3630 if (dest_chain
> max_chain
) {
3631 NL_SET_ERR_MSG_MOD(extack
,
3632 "Requested destination chain is out of supported range");
3636 if (actions
& (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT
|
3637 MLX5_FLOW_CONTEXT_ACTION_DECAP
) &&
3638 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw
->dev
, reformat_and_fwd_to_table
)) {
3639 NL_SET_ERR_MSG_MOD(extack
,
3640 "Goto chain is not allowed if action has reformat or decap");
3647 static int verify_uplink_forwarding(struct mlx5e_priv
*priv
,
3648 struct mlx5e_tc_flow
*flow
,
3649 struct net_device
*out_dev
,
3650 struct netlink_ext_ack
*extack
)
3652 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
3653 struct mlx5_esw_flow_attr
*attr
= flow
->esw_attr
;
3654 struct mlx5e_rep_priv
*rep_priv
;
3656 /* Forwarding non encapsulated traffic between
3657 * uplink ports is allowed only if
3658 * termination_table_raw_traffic cap is set.
3660 * Input vport was stored esw_attr->in_rep.
3661 * In LAG case, *priv* is the private data of
3662 * uplink which may be not the input vport.
3664 rep_priv
= mlx5e_rep_to_rep_priv(attr
->in_rep
);
3666 if (!(mlx5e_eswitch_uplink_rep(rep_priv
->netdev
) &&
3667 mlx5e_eswitch_uplink_rep(out_dev
)))
3670 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw
->dev
,
3671 termination_table_raw_traffic
)) {
3672 NL_SET_ERR_MSG_MOD(extack
,
3673 "devices are both uplink, can't offload forwarding");
3674 pr_err("devices %s %s are both uplink, can't offload forwarding\n",
3675 priv
->netdev
->name
, out_dev
->name
);
3677 } else if (out_dev
!= rep_priv
->netdev
) {
3678 NL_SET_ERR_MSG_MOD(extack
,
3679 "devices are not the same uplink, can't offload forwarding");
3680 pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
3681 priv
->netdev
->name
, out_dev
->name
);
3687 static int parse_tc_fdb_actions(struct mlx5e_priv
*priv
,
3688 struct flow_action
*flow_action
,
3689 struct mlx5e_tc_flow
*flow
,
3690 struct netlink_ext_ack
*extack
)
3692 struct pedit_headers_action hdrs
[2] = {};
3693 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
3694 struct mlx5_esw_flow_attr
*attr
= flow
->esw_attr
;
3695 struct mlx5e_tc_flow_parse_attr
*parse_attr
= attr
->parse_attr
;
3696 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
3697 const struct ip_tunnel_info
*info
= NULL
;
3698 int ifindexes
[MLX5_MAX_FLOW_FWD_VPORTS
];
3699 bool ft_flow
= mlx5e_is_ft_flow(flow
);
3700 const struct flow_action_entry
*act
;
3701 bool encap
= false, decap
= false;
3702 u32 action
= attr
->action
;
3703 int err
, i
, if_count
= 0;
3705 if (!flow_action_has_entries(flow_action
))
3708 if (!flow_action_hw_stats_check(flow_action
, extack
,
3709 FLOW_ACTION_HW_STATS_DELAYED_BIT
))
3712 flow_action_for_each(i
, act
, flow_action
) {
3714 case FLOW_ACTION_DROP
:
3715 action
|= MLX5_FLOW_CONTEXT_ACTION_DROP
|
3716 MLX5_FLOW_CONTEXT_ACTION_COUNT
;
3718 case FLOW_ACTION_MANGLE
:
3719 case FLOW_ACTION_ADD
:
3720 err
= parse_tc_pedit_action(priv
, act
, MLX5_FLOW_NAMESPACE_FDB
,
3725 action
|= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
;
3726 attr
->split_count
= attr
->out_count
;
3728 case FLOW_ACTION_CSUM
:
3729 if (csum_offload_supported(priv
, action
,
3730 act
->csum_flags
, extack
))
3734 case FLOW_ACTION_REDIRECT
:
3735 case FLOW_ACTION_MIRRED
: {
3736 struct mlx5e_priv
*out_priv
;
3737 struct net_device
*out_dev
;
3741 /* out_dev is NULL when filters with
3742 * non-existing mirred device are replayed to
3748 if (ft_flow
&& out_dev
== priv
->netdev
) {
3749 /* Ignore forward to self rules generated
3750 * by adding both mlx5 devs to the flow table
3751 * block on a normal nft offload setup.
3756 if (attr
->out_count
>= MLX5_MAX_FLOW_FWD_VPORTS
) {
3757 NL_SET_ERR_MSG_MOD(extack
,
3758 "can't support more output ports, can't offload forwarding");
3759 netdev_warn(priv
->netdev
,
3760 "can't support more than %d output ports, can't offload forwarding\n",
3765 action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
|
3766 MLX5_FLOW_CONTEXT_ACTION_COUNT
;
3768 parse_attr
->mirred_ifindex
[attr
->out_count
] =
3770 parse_attr
->tun_info
[attr
->out_count
] = dup_tun_info(info
);
3771 if (!parse_attr
->tun_info
[attr
->out_count
])
3774 attr
->dests
[attr
->out_count
].flags
|=
3775 MLX5_ESW_DEST_ENCAP
;
3777 /* attr->dests[].rep is resolved when we
3780 } else if (netdev_port_same_parent_id(priv
->netdev
, out_dev
)) {
3781 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
3782 struct net_device
*uplink_dev
= mlx5_eswitch_uplink_get_proto_dev(esw
, REP_ETH
);
3783 struct net_device
*uplink_upper
;
3785 if (is_duplicated_output_device(priv
->netdev
,
3792 ifindexes
[if_count
] = out_dev
->ifindex
;
3797 netdev_master_upper_dev_get_rcu(uplink_dev
);
3799 netif_is_lag_master(uplink_upper
) &&
3800 uplink_upper
== out_dev
)
3801 out_dev
= uplink_dev
;
3804 if (is_vlan_dev(out_dev
)) {
3805 err
= add_vlan_push_action(priv
, attr
,
3812 if (is_vlan_dev(parse_attr
->filter_dev
)) {
3813 err
= add_vlan_pop_action(priv
, attr
,
3819 err
= verify_uplink_forwarding(priv
, flow
, out_dev
, extack
);
3823 if (!mlx5e_is_valid_eswitch_fwd_dev(priv
, out_dev
)) {
3824 NL_SET_ERR_MSG_MOD(extack
,
3825 "devices are not on same switch HW, can't offload forwarding");
3826 netdev_warn(priv
->netdev
,
3827 "devices %s %s not on same switch HW, can't offload forwarding\n",
3833 out_priv
= netdev_priv(out_dev
);
3834 rpriv
= out_priv
->ppriv
;
3835 attr
->dests
[attr
->out_count
].rep
= rpriv
->rep
;
3836 attr
->dests
[attr
->out_count
].mdev
= out_priv
->mdev
;
3838 } else if (parse_attr
->filter_dev
!= priv
->netdev
) {
3839 /* All mlx5 devices are called to configure
3840 * high level device filters. Therefore, the
3841 * *attempt* to install a filter on invalid
3842 * eswitch should not trigger an explicit error
3846 NL_SET_ERR_MSG_MOD(extack
,
3847 "devices are not on same switch HW, can't offload forwarding");
3848 netdev_warn(priv
->netdev
,
3849 "devices %s %s not on same switch HW, can't offload forwarding\n",
3856 case FLOW_ACTION_TUNNEL_ENCAP
:
3864 case FLOW_ACTION_VLAN_PUSH
:
3865 case FLOW_ACTION_VLAN_POP
:
3866 if (act
->id
== FLOW_ACTION_VLAN_PUSH
&&
3867 (action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
)) {
3868 /* Replace vlan pop+push with vlan modify */
3869 action
&= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
;
3870 err
= add_vlan_rewrite_action(priv
,
3871 MLX5_FLOW_NAMESPACE_FDB
,
3872 act
, parse_attr
, hdrs
,
3875 err
= parse_tc_vlan_action(priv
, act
, attr
, &action
);
3880 attr
->split_count
= attr
->out_count
;
3882 case FLOW_ACTION_VLAN_MANGLE
:
3883 err
= add_vlan_rewrite_action(priv
,
3884 MLX5_FLOW_NAMESPACE_FDB
,
3885 act
, parse_attr
, hdrs
,
3890 attr
->split_count
= attr
->out_count
;
3892 case FLOW_ACTION_TUNNEL_DECAP
:
3895 case FLOW_ACTION_GOTO
:
3896 err
= mlx5_validate_goto_chain(esw
, flow
, act
, action
,
3901 action
|= MLX5_FLOW_CONTEXT_ACTION_COUNT
;
3902 attr
->dest_chain
= act
->chain_index
;
3904 case FLOW_ACTION_CT
:
3905 err
= mlx5_tc_ct_parse_action(priv
, attr
, act
, extack
);
3909 flow_flag_set(flow
, CT
);
3912 NL_SET_ERR_MSG_MOD(extack
, "The offload action is not supported");
3917 if (MLX5_CAP_GEN(esw
->dev
, prio_tag_required
) &&
3918 action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
) {
3919 /* For prio tag mode, replace vlan pop with rewrite vlan prio
3922 action
&= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
;
3923 err
= add_vlan_prio_tag_rewrite_action(priv
, parse_attr
, hdrs
,
3929 if (hdrs
[TCA_PEDIT_KEY_EX_CMD_SET
].pedits
||
3930 hdrs
[TCA_PEDIT_KEY_EX_CMD_ADD
].pedits
) {
3931 err
= alloc_tc_pedit_action(priv
, MLX5_FLOW_NAMESPACE_FDB
,
3932 parse_attr
, hdrs
, &action
, extack
);
3935 /* in case all pedit actions are skipped, remove the MOD_HDR
3936 * flag. we might have set split_count either by pedit or
3937 * pop/push. if there is no pop/push either, reset it too.
3939 if (parse_attr
->mod_hdr_acts
.num_actions
== 0) {
3940 action
&= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
;
3941 dealloc_mod_hdr_actions(&parse_attr
->mod_hdr_acts
);
3942 if (!((action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
) ||
3943 (action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
)))
3944 attr
->split_count
= 0;
3948 attr
->action
= action
;
3949 if (!actions_match_supported(priv
, flow_action
, parse_attr
, flow
, extack
))
3952 if (attr
->dest_chain
) {
3954 /* It can be supported if we'll create a mapping for
3955 * the tunnel device only (without tunnel), and set
3956 * this tunnel id with this decap flow.
3958 * On restore (miss), we'll just set this saved tunnel
3962 NL_SET_ERR_MSG(extack
,
3963 "Decap with goto isn't supported");
3964 netdev_warn(priv
->netdev
,
3965 "Decap with goto isn't supported");
3969 if (attr
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) {
3970 NL_SET_ERR_MSG_MOD(extack
,
3971 "Mirroring goto chain rules isn't supported");
3974 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
3977 if (!(attr
->action
&
3978 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
| MLX5_FLOW_CONTEXT_ACTION_DROP
))) {
3979 NL_SET_ERR_MSG_MOD(extack
,
3980 "Rule must have at least one forward/drop action");
3984 if (attr
->split_count
> 0 && !mlx5_esw_has_fwd_fdb(priv
->mdev
)) {
3985 NL_SET_ERR_MSG_MOD(extack
,
3986 "current firmware doesn't support split rule for port mirroring");
3987 netdev_warn_once(priv
->netdev
, "current firmware doesn't support split rule for port mirroring\n");
3994 static void get_flags(int flags
, unsigned long *flow_flags
)
3996 unsigned long __flow_flags
= 0;
3998 if (flags
& MLX5_TC_FLAG(INGRESS
))
3999 __flow_flags
|= BIT(MLX5E_TC_FLOW_FLAG_INGRESS
);
4000 if (flags
& MLX5_TC_FLAG(EGRESS
))
4001 __flow_flags
|= BIT(MLX5E_TC_FLOW_FLAG_EGRESS
);
4003 if (flags
& MLX5_TC_FLAG(ESW_OFFLOAD
))
4004 __flow_flags
|= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH
);
4005 if (flags
& MLX5_TC_FLAG(NIC_OFFLOAD
))
4006 __flow_flags
|= BIT(MLX5E_TC_FLOW_FLAG_NIC
);
4007 if (flags
& MLX5_TC_FLAG(FT_OFFLOAD
))
4008 __flow_flags
|= BIT(MLX5E_TC_FLOW_FLAG_FT
);
4010 *flow_flags
= __flow_flags
;
4013 static const struct rhashtable_params tc_ht_params
= {
4014 .head_offset
= offsetof(struct mlx5e_tc_flow
, node
),
4015 .key_offset
= offsetof(struct mlx5e_tc_flow
, cookie
),
4016 .key_len
= sizeof(((struct mlx5e_tc_flow
*)0)->cookie
),
4017 .automatic_shrinking
= true,
4020 static struct rhashtable
*get_tc_ht(struct mlx5e_priv
*priv
,
4021 unsigned long flags
)
4023 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
4024 struct mlx5e_rep_priv
*uplink_rpriv
;
4026 if (flags
& MLX5_TC_FLAG(ESW_OFFLOAD
)) {
4027 uplink_rpriv
= mlx5_eswitch_get_uplink_priv(esw
, REP_ETH
);
4028 return &uplink_rpriv
->uplink_priv
.tc_ht
;
4029 } else /* NIC offload */
4030 return &priv
->fs
.tc
.ht
;
4033 static bool is_peer_flow_needed(struct mlx5e_tc_flow
*flow
)
4035 struct mlx5_esw_flow_attr
*attr
= flow
->esw_attr
;
4036 bool is_rep_ingress
= attr
->in_rep
->vport
!= MLX5_VPORT_UPLINK
&&
4037 flow_flag_test(flow
, INGRESS
);
4038 bool act_is_encap
= !!(attr
->action
&
4039 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT
);
4040 bool esw_paired
= mlx5_devcom_is_paired(attr
->in_mdev
->priv
.devcom
,
4041 MLX5_DEVCOM_ESW_OFFLOADS
);
4046 if ((mlx5_lag_is_sriov(attr
->in_mdev
) ||
4047 mlx5_lag_is_multipath(attr
->in_mdev
)) &&
4048 (is_rep_ingress
|| act_is_encap
))
4055 mlx5e_alloc_flow(struct mlx5e_priv
*priv
, int attr_size
,
4056 struct flow_cls_offload
*f
, unsigned long flow_flags
,
4057 struct mlx5e_tc_flow_parse_attr
**__parse_attr
,
4058 struct mlx5e_tc_flow
**__flow
)
4060 struct mlx5e_tc_flow_parse_attr
*parse_attr
;
4061 struct mlx5e_tc_flow
*flow
;
4064 flow
= kzalloc(sizeof(*flow
) + attr_size
, GFP_KERNEL
);
4065 parse_attr
= kvzalloc(sizeof(*parse_attr
), GFP_KERNEL
);
4066 if (!parse_attr
|| !flow
) {
4071 flow
->cookie
= f
->cookie
;
4072 flow
->flags
= flow_flags
;
4074 for (out_index
= 0; out_index
< MLX5_MAX_FLOW_FWD_VPORTS
; out_index
++)
4075 INIT_LIST_HEAD(&flow
->encaps
[out_index
].list
);
4076 INIT_LIST_HEAD(&flow
->mod_hdr
);
4077 INIT_LIST_HEAD(&flow
->hairpin
);
4078 refcount_set(&flow
->refcnt
, 1);
4079 init_completion(&flow
->init_done
);
4082 *__parse_attr
= parse_attr
;
4093 mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr
*esw_attr
,
4094 struct mlx5e_priv
*priv
,
4095 struct mlx5e_tc_flow_parse_attr
*parse_attr
,
4096 struct flow_cls_offload
*f
,
4097 struct mlx5_eswitch_rep
*in_rep
,
4098 struct mlx5_core_dev
*in_mdev
)
4100 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
4102 esw_attr
->parse_attr
= parse_attr
;
4103 esw_attr
->chain
= f
->common
.chain_index
;
4104 esw_attr
->prio
= f
->common
.prio
;
4106 esw_attr
->in_rep
= in_rep
;
4107 esw_attr
->in_mdev
= in_mdev
;
4109 if (MLX5_CAP_ESW(esw
->dev
, counter_eswitch_affinity
) ==
4110 MLX5_COUNTER_SOURCE_ESWITCH
)
4111 esw_attr
->counter_dev
= in_mdev
;
4113 esw_attr
->counter_dev
= priv
->mdev
;
4116 static struct mlx5e_tc_flow
*
4117 __mlx5e_add_fdb_flow(struct mlx5e_priv
*priv
,
4118 struct flow_cls_offload
*f
,
4119 unsigned long flow_flags
,
4120 struct net_device
*filter_dev
,
4121 struct mlx5_eswitch_rep
*in_rep
,
4122 struct mlx5_core_dev
*in_mdev
)
4124 struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
4125 struct netlink_ext_ack
*extack
= f
->common
.extack
;
4126 struct mlx5e_tc_flow_parse_attr
*parse_attr
;
4127 struct mlx5e_tc_flow
*flow
;
4130 flow_flags
|= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH
);
4131 attr_size
= sizeof(struct mlx5_esw_flow_attr
);
4132 err
= mlx5e_alloc_flow(priv
, attr_size
, f
, flow_flags
,
4133 &parse_attr
, &flow
);
4137 parse_attr
->filter_dev
= filter_dev
;
4138 mlx5e_flow_esw_attr_init(flow
->esw_attr
,
4140 f
, in_rep
, in_mdev
);
4142 err
= parse_cls_flower(flow
->priv
, flow
, &parse_attr
->spec
,
4147 err
= parse_tc_fdb_actions(priv
, &rule
->action
, flow
, extack
);
4151 err
= mlx5_tc_ct_parse_match(priv
, &parse_attr
->spec
, f
, extack
);
4155 err
= mlx5e_tc_add_fdb_flow(priv
, flow
, extack
);
4156 complete_all(&flow
->init_done
);
4158 if (!(err
== -ENETUNREACH
&& mlx5_lag_is_multipath(in_mdev
)))
4161 add_unready_flow(flow
);
4167 mlx5e_flow_put(priv
, flow
);
4169 return ERR_PTR(err
);
4172 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload
*f
,
4173 struct mlx5e_tc_flow
*flow
,
4174 unsigned long flow_flags
)
4176 struct mlx5e_priv
*priv
= flow
->priv
, *peer_priv
;
4177 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
, *peer_esw
;
4178 struct mlx5_devcom
*devcom
= priv
->mdev
->priv
.devcom
;
4179 struct mlx5e_tc_flow_parse_attr
*parse_attr
;
4180 struct mlx5e_rep_priv
*peer_urpriv
;
4181 struct mlx5e_tc_flow
*peer_flow
;
4182 struct mlx5_core_dev
*in_mdev
;
4185 peer_esw
= mlx5_devcom_get_peer_data(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
);
4189 peer_urpriv
= mlx5_eswitch_get_uplink_priv(peer_esw
, REP_ETH
);
4190 peer_priv
= netdev_priv(peer_urpriv
->netdev
);
4192 /* in_mdev is assigned of which the packet originated from.
4193 * So packets redirected to uplink use the same mdev of the
4194 * original flow and packets redirected from uplink use the
4197 if (flow
->esw_attr
->in_rep
->vport
== MLX5_VPORT_UPLINK
)
4198 in_mdev
= peer_priv
->mdev
;
4200 in_mdev
= priv
->mdev
;
4202 parse_attr
= flow
->esw_attr
->parse_attr
;
4203 peer_flow
= __mlx5e_add_fdb_flow(peer_priv
, f
, flow_flags
,
4204 parse_attr
->filter_dev
,
4205 flow
->esw_attr
->in_rep
, in_mdev
);
4206 if (IS_ERR(peer_flow
)) {
4207 err
= PTR_ERR(peer_flow
);
4211 flow
->peer_flow
= peer_flow
;
4212 flow_flag_set(flow
, DUP
);
4213 mutex_lock(&esw
->offloads
.peer_mutex
);
4214 list_add_tail(&flow
->peer
, &esw
->offloads
.peer_flows
);
4215 mutex_unlock(&esw
->offloads
.peer_mutex
);
4218 mlx5_devcom_release_peer_data(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
);
4223 mlx5e_add_fdb_flow(struct mlx5e_priv
*priv
,
4224 struct flow_cls_offload
*f
,
4225 unsigned long flow_flags
,
4226 struct net_device
*filter_dev
,
4227 struct mlx5e_tc_flow
**__flow
)
4229 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
4230 struct mlx5_eswitch_rep
*in_rep
= rpriv
->rep
;
4231 struct mlx5_core_dev
*in_mdev
= priv
->mdev
;
4232 struct mlx5e_tc_flow
*flow
;
4235 flow
= __mlx5e_add_fdb_flow(priv
, f
, flow_flags
, filter_dev
, in_rep
,
4238 return PTR_ERR(flow
);
4240 if (is_peer_flow_needed(flow
)) {
4241 err
= mlx5e_tc_add_fdb_peer_flow(f
, flow
, flow_flags
);
4243 mlx5e_tc_del_fdb_flow(priv
, flow
);
4257 mlx5e_add_nic_flow(struct mlx5e_priv
*priv
,
4258 struct flow_cls_offload
*f
,
4259 unsigned long flow_flags
,
4260 struct net_device
*filter_dev
,
4261 struct mlx5e_tc_flow
**__flow
)
4263 struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
4264 struct netlink_ext_ack
*extack
= f
->common
.extack
;
4265 struct mlx5e_tc_flow_parse_attr
*parse_attr
;
4266 struct mlx5e_tc_flow
*flow
;
4269 /* multi-chain not supported for NIC rules */
4270 if (!tc_cls_can_offload_and_chain0(priv
->netdev
, &f
->common
))
4273 flow_flags
|= BIT(MLX5E_TC_FLOW_FLAG_NIC
);
4274 attr_size
= sizeof(struct mlx5_nic_flow_attr
);
4275 err
= mlx5e_alloc_flow(priv
, attr_size
, f
, flow_flags
,
4276 &parse_attr
, &flow
);
4280 parse_attr
->filter_dev
= filter_dev
;
4281 err
= parse_cls_flower(flow
->priv
, flow
, &parse_attr
->spec
,
4286 err
= parse_tc_nic_actions(priv
, &rule
->action
, parse_attr
, flow
, extack
);
4290 err
= mlx5e_tc_add_nic_flow(priv
, parse_attr
, flow
, extack
);
4294 flow_flag_set(flow
, OFFLOADED
);
4301 mlx5e_flow_put(priv
, flow
);
4308 mlx5e_tc_add_flow(struct mlx5e_priv
*priv
,
4309 struct flow_cls_offload
*f
,
4310 unsigned long flags
,
4311 struct net_device
*filter_dev
,
4312 struct mlx5e_tc_flow
**flow
)
4314 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
4315 unsigned long flow_flags
;
4318 get_flags(flags
, &flow_flags
);
4320 if (!tc_can_offload_extack(priv
->netdev
, f
->common
.extack
))
4323 if (esw
&& esw
->mode
== MLX5_ESWITCH_OFFLOADS
)
4324 err
= mlx5e_add_fdb_flow(priv
, f
, flow_flags
,
4327 err
= mlx5e_add_nic_flow(priv
, f
, flow_flags
,
4333 int mlx5e_configure_flower(struct net_device
*dev
, struct mlx5e_priv
*priv
,
4334 struct flow_cls_offload
*f
, unsigned long flags
)
4336 struct netlink_ext_ack
*extack
= f
->common
.extack
;
4337 struct rhashtable
*tc_ht
= get_tc_ht(priv
, flags
);
4338 struct mlx5e_tc_flow
*flow
;
4342 flow
= rhashtable_lookup(tc_ht
, &f
->cookie
, tc_ht_params
);
4345 NL_SET_ERR_MSG_MOD(extack
,
4346 "flow cookie already exists, ignoring");
4347 netdev_warn_once(priv
->netdev
,
4348 "flow cookie %lx already exists, ignoring\n",
4354 trace_mlx5e_configure_flower(f
);
4355 err
= mlx5e_tc_add_flow(priv
, f
, flags
, dev
, &flow
);
4359 err
= rhashtable_lookup_insert_fast(tc_ht
, &flow
->node
, tc_ht_params
);
4366 mlx5e_flow_put(priv
, flow
);
4371 static bool same_flow_direction(struct mlx5e_tc_flow
*flow
, int flags
)
4373 bool dir_ingress
= !!(flags
& MLX5_TC_FLAG(INGRESS
));
4374 bool dir_egress
= !!(flags
& MLX5_TC_FLAG(EGRESS
));
4376 return flow_flag_test(flow
, INGRESS
) == dir_ingress
&&
4377 flow_flag_test(flow
, EGRESS
) == dir_egress
;
4380 int mlx5e_delete_flower(struct net_device
*dev
, struct mlx5e_priv
*priv
,
4381 struct flow_cls_offload
*f
, unsigned long flags
)
4383 struct rhashtable
*tc_ht
= get_tc_ht(priv
, flags
);
4384 struct mlx5e_tc_flow
*flow
;
4388 flow
= rhashtable_lookup(tc_ht
, &f
->cookie
, tc_ht_params
);
4389 if (!flow
|| !same_flow_direction(flow
, flags
)) {
4394 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4397 if (flow_flag_test_and_set(flow
, DELETED
)) {
4401 rhashtable_remove_fast(tc_ht
, &flow
->node
, tc_ht_params
);
4404 trace_mlx5e_delete_flower(f
);
4405 mlx5e_flow_put(priv
, flow
);
4414 int mlx5e_stats_flower(struct net_device
*dev
, struct mlx5e_priv
*priv
,
4415 struct flow_cls_offload
*f
, unsigned long flags
)
4417 struct mlx5_devcom
*devcom
= priv
->mdev
->priv
.devcom
;
4418 struct rhashtable
*tc_ht
= get_tc_ht(priv
, flags
);
4419 struct mlx5_eswitch
*peer_esw
;
4420 struct mlx5e_tc_flow
*flow
;
4421 struct mlx5_fc
*counter
;
4428 flow
= mlx5e_flow_get(rhashtable_lookup(tc_ht
, &f
->cookie
,
4432 return PTR_ERR(flow
);
4434 if (!same_flow_direction(flow
, flags
)) {
4439 if (mlx5e_is_offloaded_flow(flow
) || flow_flag_test(flow
, CT
)) {
4440 counter
= mlx5e_tc_get_counter(flow
);
4444 mlx5_fc_query_cached(counter
, &bytes
, &packets
, &lastuse
);
4447 /* Under multipath it's possible for one rule to be currently
4448 * un-offloaded while the other rule is offloaded.
4450 peer_esw
= mlx5_devcom_get_peer_data(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
);
4454 if (flow_flag_test(flow
, DUP
) &&
4455 flow_flag_test(flow
->peer_flow
, OFFLOADED
)) {
4460 counter
= mlx5e_tc_get_counter(flow
->peer_flow
);
4462 goto no_peer_counter
;
4463 mlx5_fc_query_cached(counter
, &bytes2
, &packets2
, &lastuse2
);
4466 packets
+= packets2
;
4467 lastuse
= max_t(u64
, lastuse
, lastuse2
);
4471 mlx5_devcom_release_peer_data(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
);
4473 flow_stats_update(&f
->stats
, bytes
, packets
, lastuse
,
4474 FLOW_ACTION_HW_STATS_DELAYED
);
4475 trace_mlx5e_stats_flower(f
);
4477 mlx5e_flow_put(priv
, flow
);
4481 static int apply_police_params(struct mlx5e_priv
*priv
, u32 rate
,
4482 struct netlink_ext_ack
*extack
)
4484 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
4485 struct mlx5_eswitch
*esw
;
4490 vport_num
= rpriv
->rep
->vport
;
4491 if (vport_num
>= MLX5_VPORT_ECPF
) {
4492 NL_SET_ERR_MSG_MOD(extack
,
4493 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4497 esw
= priv
->mdev
->priv
.eswitch
;
4498 /* rate is given in bytes/sec.
4499 * First convert to bits/sec and then round to the nearest mbit/secs.
4500 * mbit means million bits.
4501 * Moreover, if rate is non zero we choose to configure to a minimum of
4504 rate_mbps
= rate
? max_t(u32
, (rate
* 8 + 500000) / 1000000, 1) : 0;
4505 err
= mlx5_esw_modify_vport_rate(esw
, vport_num
, rate_mbps
);
4507 NL_SET_ERR_MSG_MOD(extack
, "failed applying action to hardware");
4512 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv
*priv
,
4513 struct flow_action
*flow_action
,
4514 struct netlink_ext_ack
*extack
)
4516 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
4517 const struct flow_action_entry
*act
;
4521 if (!flow_action_has_entries(flow_action
)) {
4522 NL_SET_ERR_MSG_MOD(extack
, "matchall called with no action");
4526 if (!flow_offload_has_one_action(flow_action
)) {
4527 NL_SET_ERR_MSG_MOD(extack
, "matchall policing support only a single action");
4531 if (!flow_action_basic_hw_stats_check(flow_action
, extack
))
4534 flow_action_for_each(i
, act
, flow_action
) {
4536 case FLOW_ACTION_POLICE
:
4537 err
= apply_police_params(priv
, act
->police
.rate_bytes_ps
, extack
);
4541 rpriv
->prev_vf_vport_stats
= priv
->stats
.vf_vport
;
4544 NL_SET_ERR_MSG_MOD(extack
, "mlx5 supports only police action for matchall");
4552 int mlx5e_tc_configure_matchall(struct mlx5e_priv
*priv
,
4553 struct tc_cls_matchall_offload
*ma
)
4555 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
4556 struct netlink_ext_ack
*extack
= ma
->common
.extack
;
4558 if (!mlx5_esw_qos_enabled(esw
)) {
4559 NL_SET_ERR_MSG_MOD(extack
, "QoS is not supported on this device");
4563 if (ma
->common
.prio
!= 1) {
4564 NL_SET_ERR_MSG_MOD(extack
, "only priority 1 is supported");
4568 return scan_tc_matchall_fdb_actions(priv
, &ma
->rule
->action
, extack
);
4571 int mlx5e_tc_delete_matchall(struct mlx5e_priv
*priv
,
4572 struct tc_cls_matchall_offload
*ma
)
4574 struct netlink_ext_ack
*extack
= ma
->common
.extack
;
4576 return apply_police_params(priv
, 0, extack
);
4579 void mlx5e_tc_stats_matchall(struct mlx5e_priv
*priv
,
4580 struct tc_cls_matchall_offload
*ma
)
4582 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
4583 struct rtnl_link_stats64 cur_stats
;
4587 cur_stats
= priv
->stats
.vf_vport
;
4588 dpkts
= cur_stats
.rx_packets
- rpriv
->prev_vf_vport_stats
.rx_packets
;
4589 dbytes
= cur_stats
.rx_bytes
- rpriv
->prev_vf_vport_stats
.rx_bytes
;
4590 rpriv
->prev_vf_vport_stats
= cur_stats
;
4591 flow_stats_update(&ma
->stats
, dpkts
, dbytes
, jiffies
,
4592 FLOW_ACTION_HW_STATS_DELAYED
);
4595 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv
*priv
,
4596 struct mlx5e_priv
*peer_priv
)
4598 struct mlx5_core_dev
*peer_mdev
= peer_priv
->mdev
;
4599 struct mlx5e_hairpin_entry
*hpe
, *tmp
;
4600 LIST_HEAD(init_wait_list
);
4604 if (!same_hw_devs(priv
, peer_priv
))
4607 peer_vhca_id
= MLX5_CAP_GEN(peer_mdev
, vhca_id
);
4609 mutex_lock(&priv
->fs
.tc
.hairpin_tbl_lock
);
4610 hash_for_each(priv
->fs
.tc
.hairpin_tbl
, bkt
, hpe
, hairpin_hlist
)
4611 if (refcount_inc_not_zero(&hpe
->refcnt
))
4612 list_add(&hpe
->dead_peer_wait_list
, &init_wait_list
);
4613 mutex_unlock(&priv
->fs
.tc
.hairpin_tbl_lock
);
4615 list_for_each_entry_safe(hpe
, tmp
, &init_wait_list
, dead_peer_wait_list
) {
4616 wait_for_completion(&hpe
->res_ready
);
4617 if (!IS_ERR_OR_NULL(hpe
->hp
) && hpe
->peer_vhca_id
== peer_vhca_id
)
4618 hpe
->hp
->pair
->peer_gone
= true;
4620 mlx5e_hairpin_put(priv
, hpe
);
4624 static int mlx5e_tc_netdev_event(struct notifier_block
*this,
4625 unsigned long event
, void *ptr
)
4627 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
4628 struct mlx5e_flow_steering
*fs
;
4629 struct mlx5e_priv
*peer_priv
;
4630 struct mlx5e_tc_table
*tc
;
4631 struct mlx5e_priv
*priv
;
4633 if (ndev
->netdev_ops
!= &mlx5e_netdev_ops
||
4634 event
!= NETDEV_UNREGISTER
||
4635 ndev
->reg_state
== NETREG_REGISTERED
)
4638 tc
= container_of(this, struct mlx5e_tc_table
, netdevice_nb
);
4639 fs
= container_of(tc
, struct mlx5e_flow_steering
, tc
);
4640 priv
= container_of(fs
, struct mlx5e_priv
, fs
);
4641 peer_priv
= netdev_priv(ndev
);
4642 if (priv
== peer_priv
||
4643 !(priv
->netdev
->features
& NETIF_F_HW_TC
))
4646 mlx5e_tc_hairpin_update_dead_peer(priv
, peer_priv
);
4651 int mlx5e_tc_nic_init(struct mlx5e_priv
*priv
)
4653 struct mlx5e_tc_table
*tc
= &priv
->fs
.tc
;
4656 mutex_init(&tc
->t_lock
);
4657 mutex_init(&tc
->mod_hdr
.lock
);
4658 hash_init(tc
->mod_hdr
.hlist
);
4659 mutex_init(&tc
->hairpin_tbl_lock
);
4660 hash_init(tc
->hairpin_tbl
);
4662 err
= rhashtable_init(&tc
->ht
, &tc_ht_params
);
4666 tc
->netdevice_nb
.notifier_call
= mlx5e_tc_netdev_event
;
4667 err
= register_netdevice_notifier_dev_net(priv
->netdev
,
4671 tc
->netdevice_nb
.notifier_call
= NULL
;
4672 mlx5_core_warn(priv
->mdev
, "Failed to register netdev notifier\n");
4678 static void _mlx5e_tc_del_flow(void *ptr
, void *arg
)
4680 struct mlx5e_tc_flow
*flow
= ptr
;
4681 struct mlx5e_priv
*priv
= flow
->priv
;
4683 mlx5e_tc_del_flow(priv
, flow
);
4687 void mlx5e_tc_nic_cleanup(struct mlx5e_priv
*priv
)
4689 struct mlx5e_tc_table
*tc
= &priv
->fs
.tc
;
4691 if (tc
->netdevice_nb
.notifier_call
)
4692 unregister_netdevice_notifier_dev_net(priv
->netdev
,
4696 mutex_destroy(&tc
->mod_hdr
.lock
);
4697 mutex_destroy(&tc
->hairpin_tbl_lock
);
4699 rhashtable_destroy(&tc
->ht
);
4701 if (!IS_ERR_OR_NULL(tc
->t
)) {
4702 mlx5_destroy_flow_table(tc
->t
);
4705 mutex_destroy(&tc
->t_lock
);
4708 int mlx5e_tc_esw_init(struct rhashtable
*tc_ht
)
4710 const size_t sz_enc_opts
= sizeof(struct flow_dissector_key_enc_opts
);
4711 struct mlx5_rep_uplink_priv
*uplink_priv
;
4712 struct mlx5e_rep_priv
*priv
;
4713 struct mapping_ctx
*mapping
;
4716 uplink_priv
= container_of(tc_ht
, struct mlx5_rep_uplink_priv
, tc_ht
);
4717 priv
= container_of(uplink_priv
, struct mlx5e_rep_priv
, uplink_priv
);
4719 err
= mlx5_tc_ct_init(uplink_priv
);
4723 mapping
= mapping_create(sizeof(struct tunnel_match_key
),
4724 TUNNEL_INFO_BITS_MASK
, true);
4725 if (IS_ERR(mapping
)) {
4726 err
= PTR_ERR(mapping
);
4727 goto err_tun_mapping
;
4729 uplink_priv
->tunnel_mapping
= mapping
;
4731 mapping
= mapping_create(sz_enc_opts
, ENC_OPTS_BITS_MASK
, true);
4732 if (IS_ERR(mapping
)) {
4733 err
= PTR_ERR(mapping
);
4734 goto err_enc_opts_mapping
;
4736 uplink_priv
->tunnel_enc_opts_mapping
= mapping
;
4738 err
= rhashtable_init(tc_ht
, &tc_ht_params
);
4745 mapping_destroy(uplink_priv
->tunnel_enc_opts_mapping
);
4746 err_enc_opts_mapping
:
4747 mapping_destroy(uplink_priv
->tunnel_mapping
);
4749 mlx5_tc_ct_clean(uplink_priv
);
4751 netdev_warn(priv
->netdev
,
4752 "Failed to initialize tc (eswitch), err: %d", err
);
4756 void mlx5e_tc_esw_cleanup(struct rhashtable
*tc_ht
)
4758 struct mlx5_rep_uplink_priv
*uplink_priv
;
4760 rhashtable_free_and_destroy(tc_ht
, _mlx5e_tc_del_flow
, NULL
);
4762 uplink_priv
= container_of(tc_ht
, struct mlx5_rep_uplink_priv
, tc_ht
);
4763 mapping_destroy(uplink_priv
->tunnel_enc_opts_mapping
);
4764 mapping_destroy(uplink_priv
->tunnel_mapping
);
4766 mlx5_tc_ct_clean(uplink_priv
);
4769 int mlx5e_tc_num_filters(struct mlx5e_priv
*priv
, unsigned long flags
)
4771 struct rhashtable
*tc_ht
= get_tc_ht(priv
, flags
);
4773 return atomic_read(&tc_ht
->nelems
);
4776 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch
*esw
)
4778 struct mlx5e_tc_flow
*flow
, *tmp
;
4780 list_for_each_entry_safe(flow
, tmp
, &esw
->offloads
.peer_flows
, peer
)
4781 __mlx5e_tc_del_fdb_peer_flow(flow
);
4784 void mlx5e_tc_reoffload_flows_work(struct work_struct
*work
)
4786 struct mlx5_rep_uplink_priv
*rpriv
=
4787 container_of(work
, struct mlx5_rep_uplink_priv
,
4788 reoffload_flows_work
);
4789 struct mlx5e_tc_flow
*flow
, *tmp
;
4791 mutex_lock(&rpriv
->unready_flows_lock
);
4792 list_for_each_entry_safe(flow
, tmp
, &rpriv
->unready_flows
, unready
) {
4793 if (!mlx5e_tc_add_fdb_flow(flow
->priv
, flow
, NULL
))
4794 unready_flow_del(flow
);
4796 mutex_unlock(&rpriv
->unready_flows_lock
);
4799 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4800 static bool mlx5e_restore_tunnel(struct mlx5e_priv
*priv
, struct sk_buff
*skb
,
4801 struct mlx5e_tc_update_priv
*tc_priv
,
4804 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
4805 struct flow_dissector_key_enc_opts enc_opts
= {};
4806 struct mlx5_rep_uplink_priv
*uplink_priv
;
4807 struct mlx5e_rep_priv
*uplink_rpriv
;
4808 struct metadata_dst
*tun_dst
;
4809 struct tunnel_match_key key
;
4810 u32 tun_id
, enc_opts_id
;
4811 struct net_device
*dev
;
4814 enc_opts_id
= tunnel_id
& ENC_OPTS_BITS_MASK
;
4815 tun_id
= tunnel_id
>> ENC_OPTS_BITS
;
4820 uplink_rpriv
= mlx5_eswitch_get_uplink_priv(esw
, REP_ETH
);
4821 uplink_priv
= &uplink_rpriv
->uplink_priv
;
4823 err
= mapping_find(uplink_priv
->tunnel_mapping
, tun_id
, &key
);
4826 netdev_dbg(priv
->netdev
,
4827 "Couldn't find tunnel for tun_id: %d, err: %d\n",
4833 err
= mapping_find(uplink_priv
->tunnel_enc_opts_mapping
,
4834 enc_opts_id
, &enc_opts
);
4836 netdev_dbg(priv
->netdev
,
4837 "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
4843 tun_dst
= tun_rx_dst(enc_opts
.len
);
4849 ip_tunnel_key_init(&tun_dst
->u
.tun_info
.key
,
4850 key
.enc_ipv4
.src
, key
.enc_ipv4
.dst
,
4851 key
.enc_ip
.tos
, key
.enc_ip
.ttl
,
4853 key
.enc_tp
.src
, key
.enc_tp
.dst
,
4854 key32_to_tunnel_id(key
.enc_key_id
.keyid
),
4858 ip_tunnel_info_opts_set(&tun_dst
->u
.tun_info
, enc_opts
.data
,
4859 enc_opts
.len
, enc_opts
.dst_opt_type
);
4861 skb_dst_set(skb
, (struct dst_entry
*)tun_dst
);
4862 dev
= dev_get_by_index(&init_net
, key
.filter_ifindex
);
4864 netdev_dbg(priv
->netdev
,
4865 "Couldn't find tunnel device with ifindex: %d\n",
4866 key
.filter_ifindex
);
4870 /* Set tun_dev so we do dev_put() after datapath */
4871 tc_priv
->tun_dev
= dev
;
4877 #endif /* CONFIG_NET_TC_SKB_EXT */
4879 bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64
*cqe
,
4880 struct sk_buff
*skb
,
4881 struct mlx5e_tc_update_priv
*tc_priv
)
4883 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4884 u32 chain
= 0, reg_c0
, reg_c1
, tunnel_id
, tuple_id
;
4885 struct mlx5_rep_uplink_priv
*uplink_priv
;
4886 struct mlx5e_rep_priv
*uplink_rpriv
;
4887 struct tc_skb_ext
*tc_skb_ext
;
4888 struct mlx5_eswitch
*esw
;
4889 struct mlx5e_priv
*priv
;
4893 reg_c0
= (be32_to_cpu(cqe
->sop_drop_qpn
) & MLX5E_TC_FLOW_ID_MASK
);
4894 if (reg_c0
== MLX5_FS_DEFAULT_FLOW_TAG
)
4896 reg_c1
= be32_to_cpu(cqe
->imm_inval_pkey
);
4901 priv
= netdev_priv(skb
->dev
);
4902 esw
= priv
->mdev
->priv
.eswitch
;
4904 err
= mlx5_eswitch_get_chain_for_tag(esw
, reg_c0
, &chain
);
4906 netdev_dbg(priv
->netdev
,
4907 "Couldn't find chain for chain tag: %d, err: %d\n",
4913 tc_skb_ext
= skb_ext_add(skb
, TC_SKB_EXT
);
4919 tc_skb_ext
->chain
= chain
;
4921 tuple_id
= reg_c1
& TUPLE_ID_MAX
;
4923 uplink_rpriv
= mlx5_eswitch_get_uplink_priv(esw
, REP_ETH
);
4924 uplink_priv
= &uplink_rpriv
->uplink_priv
;
4925 if (!mlx5e_tc_ct_restore_flow(uplink_priv
, skb
, tuple_id
))
4929 tunnel_moffset
= mlx5e_tc_attr_to_reg_mappings
[TUNNEL_TO_REG
].moffset
;
4930 tunnel_id
= reg_c1
>> (8 * tunnel_moffset
);
4931 return mlx5e_restore_tunnel(priv
, skb
, tc_priv
, tunnel_id
);
4932 #endif /* CONFIG_NET_TC_SKB_EXT */
4937 void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv
*tc_priv
)
4939 if (tc_priv
->tun_dev
)
4940 dev_put(tc_priv
->tun_dev
);