1 #ifndef _NET_FLOW_OFFLOAD_H
2 #define _NET_FLOW_OFFLOAD_H
4 #include <linux/kernel.h>
5 #include <linux/list.h>
6 #include <linux/netlink.h>
7 #include <net/flow_dissector.h>
8 #include <linux/rhashtable.h>
11 struct flow_dissector
*dissector
;
16 struct flow_match_meta
{
17 struct flow_dissector_key_meta
*key
, *mask
;
20 struct flow_match_basic
{
21 struct flow_dissector_key_basic
*key
, *mask
;
24 struct flow_match_control
{
25 struct flow_dissector_key_control
*key
, *mask
;
28 struct flow_match_eth_addrs
{
29 struct flow_dissector_key_eth_addrs
*key
, *mask
;
32 struct flow_match_vlan
{
33 struct flow_dissector_key_vlan
*key
, *mask
;
36 struct flow_match_ipv4_addrs
{
37 struct flow_dissector_key_ipv4_addrs
*key
, *mask
;
40 struct flow_match_ipv6_addrs
{
41 struct flow_dissector_key_ipv6_addrs
*key
, *mask
;
44 struct flow_match_ip
{
45 struct flow_dissector_key_ip
*key
, *mask
;
48 struct flow_match_ports
{
49 struct flow_dissector_key_ports
*key
, *mask
;
52 struct flow_match_icmp
{
53 struct flow_dissector_key_icmp
*key
, *mask
;
56 struct flow_match_tcp
{
57 struct flow_dissector_key_tcp
*key
, *mask
;
60 struct flow_match_mpls
{
61 struct flow_dissector_key_mpls
*key
, *mask
;
64 struct flow_match_enc_keyid
{
65 struct flow_dissector_key_keyid
*key
, *mask
;
68 struct flow_match_enc_opts
{
69 struct flow_dissector_key_enc_opts
*key
, *mask
;
72 struct flow_match_ct
{
73 struct flow_dissector_key_ct
*key
, *mask
;
78 void flow_rule_match_meta(const struct flow_rule
*rule
,
79 struct flow_match_meta
*out
);
80 void flow_rule_match_basic(const struct flow_rule
*rule
,
81 struct flow_match_basic
*out
);
82 void flow_rule_match_control(const struct flow_rule
*rule
,
83 struct flow_match_control
*out
);
84 void flow_rule_match_eth_addrs(const struct flow_rule
*rule
,
85 struct flow_match_eth_addrs
*out
);
86 void flow_rule_match_vlan(const struct flow_rule
*rule
,
87 struct flow_match_vlan
*out
);
88 void flow_rule_match_cvlan(const struct flow_rule
*rule
,
89 struct flow_match_vlan
*out
);
90 void flow_rule_match_ipv4_addrs(const struct flow_rule
*rule
,
91 struct flow_match_ipv4_addrs
*out
);
92 void flow_rule_match_ipv6_addrs(const struct flow_rule
*rule
,
93 struct flow_match_ipv6_addrs
*out
);
94 void flow_rule_match_ip(const struct flow_rule
*rule
,
95 struct flow_match_ip
*out
);
96 void flow_rule_match_ports(const struct flow_rule
*rule
,
97 struct flow_match_ports
*out
);
98 void flow_rule_match_tcp(const struct flow_rule
*rule
,
99 struct flow_match_tcp
*out
);
100 void flow_rule_match_icmp(const struct flow_rule
*rule
,
101 struct flow_match_icmp
*out
);
102 void flow_rule_match_mpls(const struct flow_rule
*rule
,
103 struct flow_match_mpls
*out
);
104 void flow_rule_match_enc_control(const struct flow_rule
*rule
,
105 struct flow_match_control
*out
);
106 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule
*rule
,
107 struct flow_match_ipv4_addrs
*out
);
108 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule
*rule
,
109 struct flow_match_ipv6_addrs
*out
);
110 void flow_rule_match_enc_ip(const struct flow_rule
*rule
,
111 struct flow_match_ip
*out
);
112 void flow_rule_match_enc_ports(const struct flow_rule
*rule
,
113 struct flow_match_ports
*out
);
114 void flow_rule_match_enc_keyid(const struct flow_rule
*rule
,
115 struct flow_match_enc_keyid
*out
);
116 void flow_rule_match_enc_opts(const struct flow_rule
*rule
,
117 struct flow_match_enc_opts
*out
);
118 void flow_rule_match_ct(const struct flow_rule
*rule
,
119 struct flow_match_ct
*out
);
121 enum flow_action_id
{
122 FLOW_ACTION_ACCEPT
= 0,
126 FLOW_ACTION_REDIRECT
,
128 FLOW_ACTION_REDIRECT_INGRESS
,
129 FLOW_ACTION_MIRRED_INGRESS
,
130 FLOW_ACTION_VLAN_PUSH
,
131 FLOW_ACTION_VLAN_POP
,
132 FLOW_ACTION_VLAN_MANGLE
,
133 FLOW_ACTION_TUNNEL_ENCAP
,
134 FLOW_ACTION_TUNNEL_DECAP
,
140 FLOW_ACTION_PRIORITY
,
146 FLOW_ACTION_CT_METADATA
,
147 FLOW_ACTION_MPLS_PUSH
,
148 FLOW_ACTION_MPLS_POP
,
149 FLOW_ACTION_MPLS_MANGLE
,
153 /* This is mirroring enum pedit_header_type definition for easy mapping between
154 * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
155 * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
157 enum flow_action_mangle_base
{
158 FLOW_ACT_MANGLE_UNSPEC
= 0,
159 FLOW_ACT_MANGLE_HDR_TYPE_ETH
,
160 FLOW_ACT_MANGLE_HDR_TYPE_IP4
,
161 FLOW_ACT_MANGLE_HDR_TYPE_IP6
,
162 FLOW_ACT_MANGLE_HDR_TYPE_TCP
,
163 FLOW_ACT_MANGLE_HDR_TYPE_UDP
,
166 enum flow_action_hw_stats_bit
{
167 FLOW_ACTION_HW_STATS_IMMEDIATE_BIT
,
168 FLOW_ACTION_HW_STATS_DELAYED_BIT
,
169 FLOW_ACTION_HW_STATS_DISABLED_BIT
,
172 enum flow_action_hw_stats
{
173 FLOW_ACTION_HW_STATS_DONT_CARE
= 0,
174 FLOW_ACTION_HW_STATS_IMMEDIATE
=
175 BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT
),
176 FLOW_ACTION_HW_STATS_DELAYED
= BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT
),
177 FLOW_ACTION_HW_STATS_ANY
= FLOW_ACTION_HW_STATS_IMMEDIATE
|
178 FLOW_ACTION_HW_STATS_DELAYED
,
179 FLOW_ACTION_HW_STATS_DISABLED
=
180 BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT
),
183 typedef void (*action_destr
)(void *priv
);
185 struct flow_action_cookie
{
190 struct flow_action_cookie
*flow_action_cookie_create(void *data
,
193 void flow_action_cookie_destroy(struct flow_action_cookie
*cookie
);
195 struct flow_action_entry
{
196 enum flow_action_id id
;
197 enum flow_action_hw_stats hw_stats
;
198 action_destr destructor
;
199 void *destructor_priv
;
201 u32 chain_index
; /* FLOW_ACTION_GOTO */
202 struct net_device
*dev
; /* FLOW_ACTION_REDIRECT */
203 struct { /* FLOW_ACTION_VLAN */
208 struct { /* FLOW_ACTION_MANGLE */
209 /* FLOW_ACTION_ADD */
210 enum flow_action_mangle_base htype
;
215 struct ip_tunnel_info
*tunnel
; /* FLOW_ACTION_TUNNEL_ENCAP */
216 u32 csum_flags
; /* FLOW_ACTION_CSUM */
217 u32 mark
; /* FLOW_ACTION_MARK */
218 u16 ptype
; /* FLOW_ACTION_PTYPE */
219 u32 priority
; /* FLOW_ACTION_PRIORITY */
220 struct { /* FLOW_ACTION_QUEUE */
225 struct { /* FLOW_ACTION_SAMPLE */
226 struct psample_group
*psample_group
;
231 struct { /* FLOW_ACTION_POLICE */
235 struct { /* FLOW_ACTION_CT */
238 struct nf_flowtable
*flow_table
;
241 unsigned long cookie
;
245 struct { /* FLOW_ACTION_MPLS_PUSH */
252 struct { /* FLOW_ACTION_MPLS_POP */
255 struct { /* FLOW_ACTION_MPLS_MANGLE */
262 struct flow_action_cookie
*cookie
; /* user defined action cookie */
266 unsigned int num_entries
;
267 struct flow_action_entry entries
[];
270 static inline bool flow_action_has_entries(const struct flow_action
*action
)
272 return action
->num_entries
;
276 * flow_action_has_one_action() - check if exactly one action is present
277 * @action: tc filter flow offload action
279 * Returns true if exactly one action is present.
281 static inline bool flow_offload_has_one_action(const struct flow_action
*action
)
283 return action
->num_entries
== 1;
286 #define flow_action_for_each(__i, __act, __actions) \
287 for (__i = 0, __act = &(__actions)->entries[0]; \
288 __i < (__actions)->num_entries; \
289 __act = &(__actions)->entries[++__i])
292 flow_action_mixed_hw_stats_check(const struct flow_action
*action
,
293 struct netlink_ext_ack
*extack
)
295 const struct flow_action_entry
*action_entry
;
296 u8
uninitialized_var(last_hw_stats
);
299 if (flow_offload_has_one_action(action
))
302 flow_action_for_each(i
, action_entry
, action
) {
303 if (i
&& action_entry
->hw_stats
!= last_hw_stats
) {
304 NL_SET_ERR_MSG_MOD(extack
, "Mixing HW stats types for actions is not supported");
307 last_hw_stats
= action_entry
->hw_stats
;
312 static inline const struct flow_action_entry
*
313 flow_action_first_entry_get(const struct flow_action
*action
)
315 WARN_ON(!flow_action_has_entries(action
));
316 return &action
->entries
[0];
320 __flow_action_hw_stats_check(const struct flow_action
*action
,
321 struct netlink_ext_ack
*extack
,
322 bool check_allow_bit
,
323 enum flow_action_hw_stats_bit allow_bit
)
325 const struct flow_action_entry
*action_entry
;
327 if (!flow_action_has_entries(action
))
329 if (!flow_action_mixed_hw_stats_check(action
, extack
))
332 action_entry
= flow_action_first_entry_get(action
);
333 if (action_entry
->hw_stats
== FLOW_ACTION_HW_STATS_DONT_CARE
)
336 if (!check_allow_bit
&&
337 action_entry
->hw_stats
!= FLOW_ACTION_HW_STATS_ANY
) {
338 NL_SET_ERR_MSG_MOD(extack
, "Driver supports only default HW stats type \"any\"");
340 } else if (check_allow_bit
&&
341 !(action_entry
->hw_stats
& BIT(allow_bit
))) {
342 NL_SET_ERR_MSG_MOD(extack
, "Driver does not support selected HW stats type");
349 flow_action_hw_stats_check(const struct flow_action
*action
,
350 struct netlink_ext_ack
*extack
,
351 enum flow_action_hw_stats_bit allow_bit
)
353 return __flow_action_hw_stats_check(action
, extack
, true, allow_bit
);
357 flow_action_basic_hw_stats_check(const struct flow_action
*action
,
358 struct netlink_ext_ack
*extack
)
360 return __flow_action_hw_stats_check(action
, extack
, false, 0);
364 struct flow_match match
;
365 struct flow_action action
;
368 struct flow_rule
*flow_rule_alloc(unsigned int num_actions
);
370 static inline bool flow_rule_match_key(const struct flow_rule
*rule
,
371 enum flow_dissector_key_id key
)
373 return dissector_uses_key(rule
->match
.dissector
, key
);
380 enum flow_action_hw_stats used_hw_stats
;
381 bool used_hw_stats_valid
;
384 static inline void flow_stats_update(struct flow_stats
*flow_stats
,
385 u64 bytes
, u64 pkts
, u64 lastused
,
386 enum flow_action_hw_stats used_hw_stats
)
388 flow_stats
->pkts
+= pkts
;
389 flow_stats
->bytes
+= bytes
;
390 flow_stats
->lastused
= max_t(u64
, flow_stats
->lastused
, lastused
);
392 /* The driver should pass value with a maximum of one bit set.
393 * Passing FLOW_ACTION_HW_STATS_ANY is invalid.
395 WARN_ON(used_hw_stats
== FLOW_ACTION_HW_STATS_ANY
);
396 flow_stats
->used_hw_stats
|= used_hw_stats
;
397 flow_stats
->used_hw_stats_valid
= true;
400 enum flow_block_command
{
405 enum flow_block_binder_type
{
406 FLOW_BLOCK_BINDER_TYPE_UNSPEC
,
407 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
,
408 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS
,
412 struct list_head cb_list
;
415 struct netlink_ext_ack
;
417 struct flow_block_offload
{
418 enum flow_block_command command
;
419 enum flow_block_binder_type binder_type
;
421 bool unlocked_driver_cb
;
423 struct flow_block
*block
;
424 struct list_head cb_list
;
425 struct list_head
*driver_block_list
;
426 struct netlink_ext_ack
*extack
;
430 typedef int flow_setup_cb_t(enum tc_setup_type type
, void *type_data
,
433 struct flow_block_cb
{
434 struct list_head driver_list
;
435 struct list_head list
;
439 void (*release
)(void *cb_priv
);
443 struct flow_block_cb
*flow_block_cb_alloc(flow_setup_cb_t
*cb
,
444 void *cb_ident
, void *cb_priv
,
445 void (*release
)(void *cb_priv
));
446 void flow_block_cb_free(struct flow_block_cb
*block_cb
);
448 struct flow_block_cb
*flow_block_cb_lookup(struct flow_block
*block
,
449 flow_setup_cb_t
*cb
, void *cb_ident
);
451 void *flow_block_cb_priv(struct flow_block_cb
*block_cb
);
452 void flow_block_cb_incref(struct flow_block_cb
*block_cb
);
453 unsigned int flow_block_cb_decref(struct flow_block_cb
*block_cb
);
455 static inline void flow_block_cb_add(struct flow_block_cb
*block_cb
,
456 struct flow_block_offload
*offload
)
458 list_add_tail(&block_cb
->list
, &offload
->cb_list
);
461 static inline void flow_block_cb_remove(struct flow_block_cb
*block_cb
,
462 struct flow_block_offload
*offload
)
464 list_move(&block_cb
->list
, &offload
->cb_list
);
467 bool flow_block_cb_is_busy(flow_setup_cb_t
*cb
, void *cb_ident
,
468 struct list_head
*driver_block_list
);
470 int flow_block_cb_setup_simple(struct flow_block_offload
*f
,
471 struct list_head
*driver_list
,
473 void *cb_ident
, void *cb_priv
, bool ingress_only
);
475 enum flow_cls_command
{
479 FLOW_CLS_TMPLT_CREATE
,
480 FLOW_CLS_TMPLT_DESTROY
,
483 struct flow_cls_common_offload
{
487 struct netlink_ext_ack
*extack
;
490 struct flow_cls_offload
{
491 struct flow_cls_common_offload common
;
492 enum flow_cls_command command
;
493 unsigned long cookie
;
494 struct flow_rule
*rule
;
495 struct flow_stats stats
;
499 static inline struct flow_rule
*
500 flow_cls_offload_flow_rule(struct flow_cls_offload
*flow_cmd
)
502 return flow_cmd
->rule
;
505 static inline void flow_block_init(struct flow_block
*flow_block
)
507 INIT_LIST_HEAD(&flow_block
->cb_list
);
510 typedef int flow_indr_block_bind_cb_t(struct net_device
*dev
, void *cb_priv
,
511 enum tc_setup_type type
, void *type_data
);
513 typedef void flow_indr_block_cmd_t(struct net_device
*dev
,
514 flow_indr_block_bind_cb_t
*cb
, void *cb_priv
,
515 enum flow_block_command command
);
517 struct flow_indr_block_entry
{
518 flow_indr_block_cmd_t
*cb
;
519 struct list_head list
;
522 void flow_indr_add_block_cb(struct flow_indr_block_entry
*entry
);
524 void flow_indr_del_block_cb(struct flow_indr_block_entry
*entry
);
526 int __flow_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
527 flow_indr_block_bind_cb_t
*cb
,
530 void __flow_indr_block_cb_unregister(struct net_device
*dev
,
531 flow_indr_block_bind_cb_t
*cb
,
534 int flow_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
535 flow_indr_block_bind_cb_t
*cb
, void *cb_ident
);
537 void flow_indr_block_cb_unregister(struct net_device
*dev
,
538 flow_indr_block_bind_cb_t
*cb
,
541 void flow_indr_block_call(struct net_device
*dev
,
542 struct flow_block_offload
*bo
,
543 enum flow_block_command command
,
544 enum tc_setup_type type
);
546 #endif /* _NET_FLOW_OFFLOAD_H */