2 * net/sched/cls_flower.c Flower classifier
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/rhashtable.h>
16 #include <linux/workqueue.h>
18 #include <linux/if_ether.h>
19 #include <linux/in6.h>
21 #include <linux/mpls.h>
23 #include <net/sch_generic.h>
24 #include <net/pkt_cls.h>
26 #include <net/flow_dissector.h>
27 #include <net/geneve.h>
30 #include <net/dst_metadata.h>
34 struct flow_dissector_key_control control
;
35 struct flow_dissector_key_control enc_control
;
36 struct flow_dissector_key_basic basic
;
37 struct flow_dissector_key_eth_addrs eth
;
38 struct flow_dissector_key_vlan vlan
;
39 struct flow_dissector_key_vlan cvlan
;
41 struct flow_dissector_key_ipv4_addrs ipv4
;
42 struct flow_dissector_key_ipv6_addrs ipv6
;
44 struct flow_dissector_key_ports tp
;
45 struct flow_dissector_key_icmp icmp
;
46 struct flow_dissector_key_arp arp
;
47 struct flow_dissector_key_keyid enc_key_id
;
49 struct flow_dissector_key_ipv4_addrs enc_ipv4
;
50 struct flow_dissector_key_ipv6_addrs enc_ipv6
;
52 struct flow_dissector_key_ports enc_tp
;
53 struct flow_dissector_key_mpls mpls
;
54 struct flow_dissector_key_tcp tcp
;
55 struct flow_dissector_key_ip ip
;
56 struct flow_dissector_key_ip enc_ip
;
57 struct flow_dissector_key_enc_opts enc_opts
;
58 struct flow_dissector_key_ports tp_min
;
59 struct flow_dissector_key_ports tp_max
;
60 } __aligned(BITS_PER_LONG
/ 8); /* Ensure that we can do comparisons as longs. */
62 struct fl_flow_mask_range
{
63 unsigned short int start
;
64 unsigned short int end
;
68 struct fl_flow_key key
;
69 struct fl_flow_mask_range range
;
71 struct rhash_head ht_node
;
73 struct rhashtable_params filter_ht_params
;
74 struct flow_dissector dissector
;
75 struct list_head filters
;
76 struct rcu_work rwork
;
77 struct list_head list
;
80 struct fl_flow_tmplt
{
81 struct fl_flow_key dummy_key
;
82 struct fl_flow_key mask
;
83 struct flow_dissector dissector
;
84 struct tcf_chain
*chain
;
89 struct list_head masks
;
90 struct rcu_work rwork
;
91 struct idr handle_idr
;
94 struct cls_fl_filter
{
95 struct fl_flow_mask
*mask
;
96 struct rhash_head ht_node
;
97 struct fl_flow_key mkey
;
99 struct tcf_result res
;
100 struct fl_flow_key key
;
101 struct list_head list
;
105 struct rcu_work rwork
;
106 struct net_device
*hw_dev
;
109 static const struct rhashtable_params mask_ht_params
= {
110 .key_offset
= offsetof(struct fl_flow_mask
, key
),
111 .key_len
= sizeof(struct fl_flow_key
),
112 .head_offset
= offsetof(struct fl_flow_mask
, ht_node
),
113 .automatic_shrinking
= true,
116 static unsigned short int fl_mask_range(const struct fl_flow_mask
*mask
)
118 return mask
->range
.end
- mask
->range
.start
;
121 static void fl_mask_update_range(struct fl_flow_mask
*mask
)
123 const u8
*bytes
= (const u8
*) &mask
->key
;
124 size_t size
= sizeof(mask
->key
);
125 size_t i
, first
= 0, last
;
127 for (i
= 0; i
< size
; i
++) {
134 for (i
= size
- 1; i
!= first
; i
--) {
140 mask
->range
.start
= rounddown(first
, sizeof(long));
141 mask
->range
.end
= roundup(last
+ 1, sizeof(long));
144 static void *fl_key_get_start(struct fl_flow_key
*key
,
145 const struct fl_flow_mask
*mask
)
147 return (u8
*) key
+ mask
->range
.start
;
150 static void fl_set_masked_key(struct fl_flow_key
*mkey
, struct fl_flow_key
*key
,
151 struct fl_flow_mask
*mask
)
153 const long *lkey
= fl_key_get_start(key
, mask
);
154 const long *lmask
= fl_key_get_start(&mask
->key
, mask
);
155 long *lmkey
= fl_key_get_start(mkey
, mask
);
158 for (i
= 0; i
< fl_mask_range(mask
); i
+= sizeof(long))
159 *lmkey
++ = *lkey
++ & *lmask
++;
162 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt
*tmplt
,
163 struct fl_flow_mask
*mask
)
165 const long *lmask
= fl_key_get_start(&mask
->key
, mask
);
171 ltmplt
= fl_key_get_start(&tmplt
->mask
, mask
);
172 for (i
= 0; i
< fl_mask_range(mask
); i
+= sizeof(long)) {
173 if (~*ltmplt
++ & *lmask
++)
179 static void fl_clear_masked_range(struct fl_flow_key
*key
,
180 struct fl_flow_mask
*mask
)
182 memset(fl_key_get_start(key
, mask
), 0, fl_mask_range(mask
));
185 static bool fl_range_port_dst_cmp(struct cls_fl_filter
*filter
,
186 struct fl_flow_key
*key
,
187 struct fl_flow_key
*mkey
)
189 __be16 min_mask
, max_mask
, min_val
, max_val
;
191 min_mask
= htons(filter
->mask
->key
.tp_min
.dst
);
192 max_mask
= htons(filter
->mask
->key
.tp_max
.dst
);
193 min_val
= htons(filter
->key
.tp_min
.dst
);
194 max_val
= htons(filter
->key
.tp_max
.dst
);
196 if (min_mask
&& max_mask
) {
197 if (htons(key
->tp
.dst
) < min_val
||
198 htons(key
->tp
.dst
) > max_val
)
201 /* skb does not have min and max values */
202 mkey
->tp_min
.dst
= filter
->mkey
.tp_min
.dst
;
203 mkey
->tp_max
.dst
= filter
->mkey
.tp_max
.dst
;
208 static bool fl_range_port_src_cmp(struct cls_fl_filter
*filter
,
209 struct fl_flow_key
*key
,
210 struct fl_flow_key
*mkey
)
212 __be16 min_mask
, max_mask
, min_val
, max_val
;
214 min_mask
= htons(filter
->mask
->key
.tp_min
.src
);
215 max_mask
= htons(filter
->mask
->key
.tp_max
.src
);
216 min_val
= htons(filter
->key
.tp_min
.src
);
217 max_val
= htons(filter
->key
.tp_max
.src
);
219 if (min_mask
&& max_mask
) {
220 if (htons(key
->tp
.src
) < min_val
||
221 htons(key
->tp
.src
) > max_val
)
224 /* skb does not have min and max values */
225 mkey
->tp_min
.src
= filter
->mkey
.tp_min
.src
;
226 mkey
->tp_max
.src
= filter
->mkey
.tp_max
.src
;
231 static struct cls_fl_filter
*__fl_lookup(struct fl_flow_mask
*mask
,
232 struct fl_flow_key
*mkey
)
234 return rhashtable_lookup_fast(&mask
->ht
, fl_key_get_start(mkey
, mask
),
235 mask
->filter_ht_params
);
238 static struct cls_fl_filter
*fl_lookup_range(struct fl_flow_mask
*mask
,
239 struct fl_flow_key
*mkey
,
240 struct fl_flow_key
*key
)
242 struct cls_fl_filter
*filter
, *f
;
244 list_for_each_entry_rcu(filter
, &mask
->filters
, list
) {
245 if (!fl_range_port_dst_cmp(filter
, key
, mkey
))
248 if (!fl_range_port_src_cmp(filter
, key
, mkey
))
251 f
= __fl_lookup(mask
, mkey
);
258 static struct cls_fl_filter
*fl_lookup(struct fl_flow_mask
*mask
,
259 struct fl_flow_key
*mkey
,
260 struct fl_flow_key
*key
)
262 if ((mask
->flags
& TCA_FLOWER_MASK_FLAGS_RANGE
))
263 return fl_lookup_range(mask
, mkey
, key
);
265 return __fl_lookup(mask
, mkey
);
268 static int fl_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
269 struct tcf_result
*res
)
271 struct cls_fl_head
*head
= rcu_dereference_bh(tp
->root
);
272 struct cls_fl_filter
*f
;
273 struct fl_flow_mask
*mask
;
274 struct fl_flow_key skb_key
;
275 struct fl_flow_key skb_mkey
;
277 list_for_each_entry_rcu(mask
, &head
->masks
, list
) {
278 fl_clear_masked_range(&skb_key
, mask
);
280 skb_key
.indev_ifindex
= skb
->skb_iif
;
281 /* skb_flow_dissect() does not set n_proto in case an unknown
282 * protocol, so do it rather here.
284 skb_key
.basic
.n_proto
= skb
->protocol
;
285 skb_flow_dissect_tunnel_info(skb
, &mask
->dissector
, &skb_key
);
286 skb_flow_dissect(skb
, &mask
->dissector
, &skb_key
, 0);
288 fl_set_masked_key(&skb_mkey
, &skb_key
, mask
);
290 f
= fl_lookup(mask
, &skb_mkey
, &skb_key
);
291 if (f
&& !tc_skip_sw(f
->flags
)) {
293 return tcf_exts_exec(skb
, &f
->exts
, res
);
299 static int fl_init(struct tcf_proto
*tp
)
301 struct cls_fl_head
*head
;
303 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
307 INIT_LIST_HEAD_RCU(&head
->masks
);
308 rcu_assign_pointer(tp
->root
, head
);
309 idr_init(&head
->handle_idr
);
311 return rhashtable_init(&head
->ht
, &mask_ht_params
);
314 static void fl_mask_free(struct fl_flow_mask
*mask
)
316 rhashtable_destroy(&mask
->ht
);
320 static void fl_mask_free_work(struct work_struct
*work
)
322 struct fl_flow_mask
*mask
= container_of(to_rcu_work(work
),
323 struct fl_flow_mask
, rwork
);
328 static bool fl_mask_put(struct cls_fl_head
*head
, struct fl_flow_mask
*mask
,
331 if (!list_empty(&mask
->filters
))
334 rhashtable_remove_fast(&head
->ht
, &mask
->ht_node
, mask_ht_params
);
335 list_del_rcu(&mask
->list
);
337 tcf_queue_work(&mask
->rwork
, fl_mask_free_work
);
344 static void __fl_destroy_filter(struct cls_fl_filter
*f
)
346 tcf_exts_destroy(&f
->exts
);
347 tcf_exts_put_net(&f
->exts
);
351 static void fl_destroy_filter_work(struct work_struct
*work
)
353 struct cls_fl_filter
*f
= container_of(to_rcu_work(work
),
354 struct cls_fl_filter
, rwork
);
357 __fl_destroy_filter(f
);
361 static void fl_hw_destroy_filter(struct tcf_proto
*tp
, struct cls_fl_filter
*f
,
362 struct netlink_ext_ack
*extack
)
364 struct tc_cls_flower_offload cls_flower
= {};
365 struct tcf_block
*block
= tp
->chain
->block
;
367 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
, extack
);
368 cls_flower
.command
= TC_CLSFLOWER_DESTROY
;
369 cls_flower
.cookie
= (unsigned long) f
;
371 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false);
372 tcf_block_offload_dec(block
, &f
->flags
);
375 static int fl_hw_replace_filter(struct tcf_proto
*tp
,
376 struct cls_fl_filter
*f
,
377 struct netlink_ext_ack
*extack
)
379 struct tc_cls_flower_offload cls_flower
= {};
380 struct tcf_block
*block
= tp
->chain
->block
;
381 bool skip_sw
= tc_skip_sw(f
->flags
);
384 cls_flower
.rule
= flow_rule_alloc(tcf_exts_num_actions(&f
->exts
));
385 if (!cls_flower
.rule
)
388 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
, extack
);
389 cls_flower
.command
= TC_CLSFLOWER_REPLACE
;
390 cls_flower
.cookie
= (unsigned long) f
;
391 cls_flower
.rule
->match
.dissector
= &f
->mask
->dissector
;
392 cls_flower
.rule
->match
.mask
= &f
->mask
->key
;
393 cls_flower
.rule
->match
.key
= &f
->mkey
;
394 cls_flower
.classid
= f
->res
.classid
;
396 err
= tc_setup_flow_action(&cls_flower
.rule
->action
, &f
->exts
);
398 kfree(cls_flower
.rule
);
400 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
406 err
= tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, skip_sw
);
407 kfree(cls_flower
.rule
);
410 fl_hw_destroy_filter(tp
, f
, NULL
);
412 } else if (err
> 0) {
413 f
->in_hw_count
= err
;
414 tcf_block_offload_inc(block
, &f
->flags
);
417 if (skip_sw
&& !(f
->flags
& TCA_CLS_FLAGS_IN_HW
))
423 static void fl_hw_update_stats(struct tcf_proto
*tp
, struct cls_fl_filter
*f
)
425 struct tc_cls_flower_offload cls_flower
= {};
426 struct tcf_block
*block
= tp
->chain
->block
;
428 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
, NULL
);
429 cls_flower
.command
= TC_CLSFLOWER_STATS
;
430 cls_flower
.cookie
= (unsigned long) f
;
431 cls_flower
.classid
= f
->res
.classid
;
433 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false);
435 tcf_exts_stats_update(&f
->exts
, cls_flower
.stats
.bytes
,
436 cls_flower
.stats
.pkts
,
437 cls_flower
.stats
.lastused
);
440 static bool __fl_delete(struct tcf_proto
*tp
, struct cls_fl_filter
*f
,
441 struct netlink_ext_ack
*extack
)
443 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
444 bool async
= tcf_exts_get_net(&f
->exts
);
447 idr_remove(&head
->handle_idr
, f
->handle
);
448 list_del_rcu(&f
->list
);
449 last
= fl_mask_put(head
, f
->mask
, async
);
450 if (!tc_skip_hw(f
->flags
))
451 fl_hw_destroy_filter(tp
, f
, extack
);
452 tcf_unbind_filter(tp
, &f
->res
);
454 tcf_queue_work(&f
->rwork
, fl_destroy_filter_work
);
456 __fl_destroy_filter(f
);
461 static void fl_destroy_sleepable(struct work_struct
*work
)
463 struct cls_fl_head
*head
= container_of(to_rcu_work(work
),
467 rhashtable_destroy(&head
->ht
);
469 module_put(THIS_MODULE
);
472 static void fl_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
473 struct netlink_ext_ack
*extack
)
475 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
476 struct fl_flow_mask
*mask
, *next_mask
;
477 struct cls_fl_filter
*f
, *next
;
479 list_for_each_entry_safe(mask
, next_mask
, &head
->masks
, list
) {
480 list_for_each_entry_safe(f
, next
, &mask
->filters
, list
) {
481 if (__fl_delete(tp
, f
, extack
))
485 idr_destroy(&head
->handle_idr
);
487 __module_get(THIS_MODULE
);
488 tcf_queue_work(&head
->rwork
, fl_destroy_sleepable
);
491 static void *fl_get(struct tcf_proto
*tp
, u32 handle
)
493 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
495 return idr_find(&head
->handle_idr
, handle
);
498 static const struct nla_policy fl_policy
[TCA_FLOWER_MAX
+ 1] = {
499 [TCA_FLOWER_UNSPEC
] = { .type
= NLA_UNSPEC
},
500 [TCA_FLOWER_CLASSID
] = { .type
= NLA_U32
},
501 [TCA_FLOWER_INDEV
] = { .type
= NLA_STRING
,
503 [TCA_FLOWER_KEY_ETH_DST
] = { .len
= ETH_ALEN
},
504 [TCA_FLOWER_KEY_ETH_DST_MASK
] = { .len
= ETH_ALEN
},
505 [TCA_FLOWER_KEY_ETH_SRC
] = { .len
= ETH_ALEN
},
506 [TCA_FLOWER_KEY_ETH_SRC_MASK
] = { .len
= ETH_ALEN
},
507 [TCA_FLOWER_KEY_ETH_TYPE
] = { .type
= NLA_U16
},
508 [TCA_FLOWER_KEY_IP_PROTO
] = { .type
= NLA_U8
},
509 [TCA_FLOWER_KEY_IPV4_SRC
] = { .type
= NLA_U32
},
510 [TCA_FLOWER_KEY_IPV4_SRC_MASK
] = { .type
= NLA_U32
},
511 [TCA_FLOWER_KEY_IPV4_DST
] = { .type
= NLA_U32
},
512 [TCA_FLOWER_KEY_IPV4_DST_MASK
] = { .type
= NLA_U32
},
513 [TCA_FLOWER_KEY_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
514 [TCA_FLOWER_KEY_IPV6_SRC_MASK
] = { .len
= sizeof(struct in6_addr
) },
515 [TCA_FLOWER_KEY_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
516 [TCA_FLOWER_KEY_IPV6_DST_MASK
] = { .len
= sizeof(struct in6_addr
) },
517 [TCA_FLOWER_KEY_TCP_SRC
] = { .type
= NLA_U16
},
518 [TCA_FLOWER_KEY_TCP_DST
] = { .type
= NLA_U16
},
519 [TCA_FLOWER_KEY_UDP_SRC
] = { .type
= NLA_U16
},
520 [TCA_FLOWER_KEY_UDP_DST
] = { .type
= NLA_U16
},
521 [TCA_FLOWER_KEY_VLAN_ID
] = { .type
= NLA_U16
},
522 [TCA_FLOWER_KEY_VLAN_PRIO
] = { .type
= NLA_U8
},
523 [TCA_FLOWER_KEY_VLAN_ETH_TYPE
] = { .type
= NLA_U16
},
524 [TCA_FLOWER_KEY_ENC_KEY_ID
] = { .type
= NLA_U32
},
525 [TCA_FLOWER_KEY_ENC_IPV4_SRC
] = { .type
= NLA_U32
},
526 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
] = { .type
= NLA_U32
},
527 [TCA_FLOWER_KEY_ENC_IPV4_DST
] = { .type
= NLA_U32
},
528 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
] = { .type
= NLA_U32
},
529 [TCA_FLOWER_KEY_ENC_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
530 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
] = { .len
= sizeof(struct in6_addr
) },
531 [TCA_FLOWER_KEY_ENC_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
532 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
] = { .len
= sizeof(struct in6_addr
) },
533 [TCA_FLOWER_KEY_TCP_SRC_MASK
] = { .type
= NLA_U16
},
534 [TCA_FLOWER_KEY_TCP_DST_MASK
] = { .type
= NLA_U16
},
535 [TCA_FLOWER_KEY_UDP_SRC_MASK
] = { .type
= NLA_U16
},
536 [TCA_FLOWER_KEY_UDP_DST_MASK
] = { .type
= NLA_U16
},
537 [TCA_FLOWER_KEY_SCTP_SRC_MASK
] = { .type
= NLA_U16
},
538 [TCA_FLOWER_KEY_SCTP_DST_MASK
] = { .type
= NLA_U16
},
539 [TCA_FLOWER_KEY_SCTP_SRC
] = { .type
= NLA_U16
},
540 [TCA_FLOWER_KEY_SCTP_DST
] = { .type
= NLA_U16
},
541 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
] = { .type
= NLA_U16
},
542 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
] = { .type
= NLA_U16
},
543 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT
] = { .type
= NLA_U16
},
544 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
] = { .type
= NLA_U16
},
545 [TCA_FLOWER_KEY_FLAGS
] = { .type
= NLA_U32
},
546 [TCA_FLOWER_KEY_FLAGS_MASK
] = { .type
= NLA_U32
},
547 [TCA_FLOWER_KEY_ICMPV4_TYPE
] = { .type
= NLA_U8
},
548 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
] = { .type
= NLA_U8
},
549 [TCA_FLOWER_KEY_ICMPV4_CODE
] = { .type
= NLA_U8
},
550 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK
] = { .type
= NLA_U8
},
551 [TCA_FLOWER_KEY_ICMPV6_TYPE
] = { .type
= NLA_U8
},
552 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
] = { .type
= NLA_U8
},
553 [TCA_FLOWER_KEY_ICMPV6_CODE
] = { .type
= NLA_U8
},
554 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK
] = { .type
= NLA_U8
},
555 [TCA_FLOWER_KEY_ARP_SIP
] = { .type
= NLA_U32
},
556 [TCA_FLOWER_KEY_ARP_SIP_MASK
] = { .type
= NLA_U32
},
557 [TCA_FLOWER_KEY_ARP_TIP
] = { .type
= NLA_U32
},
558 [TCA_FLOWER_KEY_ARP_TIP_MASK
] = { .type
= NLA_U32
},
559 [TCA_FLOWER_KEY_ARP_OP
] = { .type
= NLA_U8
},
560 [TCA_FLOWER_KEY_ARP_OP_MASK
] = { .type
= NLA_U8
},
561 [TCA_FLOWER_KEY_ARP_SHA
] = { .len
= ETH_ALEN
},
562 [TCA_FLOWER_KEY_ARP_SHA_MASK
] = { .len
= ETH_ALEN
},
563 [TCA_FLOWER_KEY_ARP_THA
] = { .len
= ETH_ALEN
},
564 [TCA_FLOWER_KEY_ARP_THA_MASK
] = { .len
= ETH_ALEN
},
565 [TCA_FLOWER_KEY_MPLS_TTL
] = { .type
= NLA_U8
},
566 [TCA_FLOWER_KEY_MPLS_BOS
] = { .type
= NLA_U8
},
567 [TCA_FLOWER_KEY_MPLS_TC
] = { .type
= NLA_U8
},
568 [TCA_FLOWER_KEY_MPLS_LABEL
] = { .type
= NLA_U32
},
569 [TCA_FLOWER_KEY_TCP_FLAGS
] = { .type
= NLA_U16
},
570 [TCA_FLOWER_KEY_TCP_FLAGS_MASK
] = { .type
= NLA_U16
},
571 [TCA_FLOWER_KEY_IP_TOS
] = { .type
= NLA_U8
},
572 [TCA_FLOWER_KEY_IP_TOS_MASK
] = { .type
= NLA_U8
},
573 [TCA_FLOWER_KEY_IP_TTL
] = { .type
= NLA_U8
},
574 [TCA_FLOWER_KEY_IP_TTL_MASK
] = { .type
= NLA_U8
},
575 [TCA_FLOWER_KEY_CVLAN_ID
] = { .type
= NLA_U16
},
576 [TCA_FLOWER_KEY_CVLAN_PRIO
] = { .type
= NLA_U8
},
577 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE
] = { .type
= NLA_U16
},
578 [TCA_FLOWER_KEY_ENC_IP_TOS
] = { .type
= NLA_U8
},
579 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK
] = { .type
= NLA_U8
},
580 [TCA_FLOWER_KEY_ENC_IP_TTL
] = { .type
= NLA_U8
},
581 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK
] = { .type
= NLA_U8
},
582 [TCA_FLOWER_KEY_ENC_OPTS
] = { .type
= NLA_NESTED
},
583 [TCA_FLOWER_KEY_ENC_OPTS_MASK
] = { .type
= NLA_NESTED
},
586 static const struct nla_policy
587 enc_opts_policy
[TCA_FLOWER_KEY_ENC_OPTS_MAX
+ 1] = {
588 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE
] = { .type
= NLA_NESTED
},
591 static const struct nla_policy
592 geneve_opt_policy
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX
+ 1] = {
593 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
] = { .type
= NLA_U16
},
594 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
] = { .type
= NLA_U8
},
595 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
] = { .type
= NLA_BINARY
,
599 static void fl_set_key_val(struct nlattr
**tb
,
600 void *val
, int val_type
,
601 void *mask
, int mask_type
, int len
)
605 memcpy(val
, nla_data(tb
[val_type
]), len
);
606 if (mask_type
== TCA_FLOWER_UNSPEC
|| !tb
[mask_type
])
607 memset(mask
, 0xff, len
);
609 memcpy(mask
, nla_data(tb
[mask_type
]), len
);
612 static int fl_set_key_port_range(struct nlattr
**tb
, struct fl_flow_key
*key
,
613 struct fl_flow_key
*mask
)
615 fl_set_key_val(tb
, &key
->tp_min
.dst
,
616 TCA_FLOWER_KEY_PORT_DST_MIN
, &mask
->tp_min
.dst
,
617 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_min
.dst
));
618 fl_set_key_val(tb
, &key
->tp_max
.dst
,
619 TCA_FLOWER_KEY_PORT_DST_MAX
, &mask
->tp_max
.dst
,
620 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_max
.dst
));
621 fl_set_key_val(tb
, &key
->tp_min
.src
,
622 TCA_FLOWER_KEY_PORT_SRC_MIN
, &mask
->tp_min
.src
,
623 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_min
.src
));
624 fl_set_key_val(tb
, &key
->tp_max
.src
,
625 TCA_FLOWER_KEY_PORT_SRC_MAX
, &mask
->tp_max
.src
,
626 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_max
.src
));
628 if ((mask
->tp_min
.dst
&& mask
->tp_max
.dst
&&
629 htons(key
->tp_max
.dst
) <= htons(key
->tp_min
.dst
)) ||
630 (mask
->tp_min
.src
&& mask
->tp_max
.src
&&
631 htons(key
->tp_max
.src
) <= htons(key
->tp_min
.src
)))
637 static int fl_set_key_mpls(struct nlattr
**tb
,
638 struct flow_dissector_key_mpls
*key_val
,
639 struct flow_dissector_key_mpls
*key_mask
)
641 if (tb
[TCA_FLOWER_KEY_MPLS_TTL
]) {
642 key_val
->mpls_ttl
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_TTL
]);
643 key_mask
->mpls_ttl
= MPLS_TTL_MASK
;
645 if (tb
[TCA_FLOWER_KEY_MPLS_BOS
]) {
646 u8 bos
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_BOS
]);
648 if (bos
& ~MPLS_BOS_MASK
)
650 key_val
->mpls_bos
= bos
;
651 key_mask
->mpls_bos
= MPLS_BOS_MASK
;
653 if (tb
[TCA_FLOWER_KEY_MPLS_TC
]) {
654 u8 tc
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_TC
]);
656 if (tc
& ~MPLS_TC_MASK
)
658 key_val
->mpls_tc
= tc
;
659 key_mask
->mpls_tc
= MPLS_TC_MASK
;
661 if (tb
[TCA_FLOWER_KEY_MPLS_LABEL
]) {
662 u32 label
= nla_get_u32(tb
[TCA_FLOWER_KEY_MPLS_LABEL
]);
664 if (label
& ~MPLS_LABEL_MASK
)
666 key_val
->mpls_label
= label
;
667 key_mask
->mpls_label
= MPLS_LABEL_MASK
;
672 static void fl_set_key_vlan(struct nlattr
**tb
,
674 int vlan_id_key
, int vlan_prio_key
,
675 struct flow_dissector_key_vlan
*key_val
,
676 struct flow_dissector_key_vlan
*key_mask
)
678 #define VLAN_PRIORITY_MASK 0x7
680 if (tb
[vlan_id_key
]) {
682 nla_get_u16(tb
[vlan_id_key
]) & VLAN_VID_MASK
;
683 key_mask
->vlan_id
= VLAN_VID_MASK
;
685 if (tb
[vlan_prio_key
]) {
686 key_val
->vlan_priority
=
687 nla_get_u8(tb
[vlan_prio_key
]) &
689 key_mask
->vlan_priority
= VLAN_PRIORITY_MASK
;
691 key_val
->vlan_tpid
= ethertype
;
692 key_mask
->vlan_tpid
= cpu_to_be16(~0);
695 static void fl_set_key_flag(u32 flower_key
, u32 flower_mask
,
696 u32
*dissector_key
, u32
*dissector_mask
,
697 u32 flower_flag_bit
, u32 dissector_flag_bit
)
699 if (flower_mask
& flower_flag_bit
) {
700 *dissector_mask
|= dissector_flag_bit
;
701 if (flower_key
& flower_flag_bit
)
702 *dissector_key
|= dissector_flag_bit
;
706 static int fl_set_key_flags(struct nlattr
**tb
,
707 u32
*flags_key
, u32
*flags_mask
)
711 /* mask is mandatory for flags */
712 if (!tb
[TCA_FLOWER_KEY_FLAGS_MASK
])
715 key
= be32_to_cpu(nla_get_u32(tb
[TCA_FLOWER_KEY_FLAGS
]));
716 mask
= be32_to_cpu(nla_get_u32(tb
[TCA_FLOWER_KEY_FLAGS_MASK
]));
721 fl_set_key_flag(key
, mask
, flags_key
, flags_mask
,
722 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
, FLOW_DIS_IS_FRAGMENT
);
723 fl_set_key_flag(key
, mask
, flags_key
, flags_mask
,
724 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST
,
725 FLOW_DIS_FIRST_FRAG
);
730 static void fl_set_key_ip(struct nlattr
**tb
, bool encap
,
731 struct flow_dissector_key_ip
*key
,
732 struct flow_dissector_key_ip
*mask
)
734 int tos_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS
: TCA_FLOWER_KEY_IP_TOS
;
735 int ttl_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL
: TCA_FLOWER_KEY_IP_TTL
;
736 int tos_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS_MASK
: TCA_FLOWER_KEY_IP_TOS_MASK
;
737 int ttl_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL_MASK
: TCA_FLOWER_KEY_IP_TTL_MASK
;
739 fl_set_key_val(tb
, &key
->tos
, tos_key
, &mask
->tos
, tos_mask
, sizeof(key
->tos
));
740 fl_set_key_val(tb
, &key
->ttl
, ttl_key
, &mask
->ttl
, ttl_mask
, sizeof(key
->ttl
));
743 static int fl_set_geneve_opt(const struct nlattr
*nla
, struct fl_flow_key
*key
,
744 int depth
, int option_len
,
745 struct netlink_ext_ack
*extack
)
747 struct nlattr
*tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX
+ 1];
748 struct nlattr
*class = NULL
, *type
= NULL
, *data
= NULL
;
749 struct geneve_opt
*opt
;
750 int err
, data_len
= 0;
752 if (option_len
> sizeof(struct geneve_opt
))
753 data_len
= option_len
- sizeof(struct geneve_opt
);
755 opt
= (struct geneve_opt
*)&key
->enc_opts
.data
[key
->enc_opts
.len
];
756 memset(opt
, 0xff, option_len
);
757 opt
->length
= data_len
/ 4;
762 /* If no mask has been prodived we assume an exact match. */
764 return sizeof(struct geneve_opt
) + data_len
;
766 if (nla_type(nla
) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE
) {
767 NL_SET_ERR_MSG(extack
, "Non-geneve option type for mask");
771 err
= nla_parse_nested(tb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX
,
772 nla
, geneve_opt_policy
, extack
);
776 /* We are not allowed to omit any of CLASS, TYPE or DATA
777 * fields from the key.
780 (!tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
] ||
781 !tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
] ||
782 !tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
])) {
783 NL_SET_ERR_MSG(extack
, "Missing tunnel key geneve option class, type or data");
787 /* Omitting any of CLASS, TYPE or DATA fields is allowed
790 if (tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
]) {
791 int new_len
= key
->enc_opts
.len
;
793 data
= tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
];
794 data_len
= nla_len(data
);
796 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is less than 4 bytes long");
800 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is not a multiple of 4 bytes long");
804 new_len
+= sizeof(struct geneve_opt
) + data_len
;
805 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX
!= IP_TUNNEL_OPTS_MAX
);
806 if (new_len
> FLOW_DIS_TUN_OPTS_MAX
) {
807 NL_SET_ERR_MSG(extack
, "Tunnel options exceeds max size");
810 opt
->length
= data_len
/ 4;
811 memcpy(opt
->opt_data
, nla_data(data
), data_len
);
814 if (tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
]) {
815 class = tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
];
816 opt
->opt_class
= nla_get_be16(class);
819 if (tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
]) {
820 type
= tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
];
821 opt
->type
= nla_get_u8(type
);
824 return sizeof(struct geneve_opt
) + data_len
;
827 static int fl_set_enc_opt(struct nlattr
**tb
, struct fl_flow_key
*key
,
828 struct fl_flow_key
*mask
,
829 struct netlink_ext_ack
*extack
)
831 const struct nlattr
*nla_enc_key
, *nla_opt_key
, *nla_opt_msk
= NULL
;
832 int err
, option_len
, key_depth
, msk_depth
= 0;
834 err
= nla_validate_nested(tb
[TCA_FLOWER_KEY_ENC_OPTS
],
835 TCA_FLOWER_KEY_ENC_OPTS_MAX
,
836 enc_opts_policy
, extack
);
840 nla_enc_key
= nla_data(tb
[TCA_FLOWER_KEY_ENC_OPTS
]);
842 if (tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
]) {
843 err
= nla_validate_nested(tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
],
844 TCA_FLOWER_KEY_ENC_OPTS_MAX
,
845 enc_opts_policy
, extack
);
849 nla_opt_msk
= nla_data(tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
]);
850 msk_depth
= nla_len(tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
]);
853 nla_for_each_attr(nla_opt_key
, nla_enc_key
,
854 nla_len(tb
[TCA_FLOWER_KEY_ENC_OPTS
]), key_depth
) {
855 switch (nla_type(nla_opt_key
)) {
856 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE
:
858 key
->enc_opts
.dst_opt_type
= TUNNEL_GENEVE_OPT
;
859 option_len
= fl_set_geneve_opt(nla_opt_key
, key
,
860 key_depth
, option_len
,
865 key
->enc_opts
.len
+= option_len
;
866 /* At the same time we need to parse through the mask
867 * in order to verify exact and mask attribute lengths.
869 mask
->enc_opts
.dst_opt_type
= TUNNEL_GENEVE_OPT
;
870 option_len
= fl_set_geneve_opt(nla_opt_msk
, mask
,
871 msk_depth
, option_len
,
876 mask
->enc_opts
.len
+= option_len
;
877 if (key
->enc_opts
.len
!= mask
->enc_opts
.len
) {
878 NL_SET_ERR_MSG(extack
, "Key and mask miss aligned");
883 nla_opt_msk
= nla_next(nla_opt_msk
, &msk_depth
);
886 NL_SET_ERR_MSG(extack
, "Unknown tunnel option type");
894 static int fl_set_key(struct net
*net
, struct nlattr
**tb
,
895 struct fl_flow_key
*key
, struct fl_flow_key
*mask
,
896 struct netlink_ext_ack
*extack
)
900 #ifdef CONFIG_NET_CLS_IND
901 if (tb
[TCA_FLOWER_INDEV
]) {
902 int err
= tcf_change_indev(net
, tb
[TCA_FLOWER_INDEV
], extack
);
905 key
->indev_ifindex
= err
;
906 mask
->indev_ifindex
= 0xffffffff;
910 fl_set_key_val(tb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
911 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
912 sizeof(key
->eth
.dst
));
913 fl_set_key_val(tb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
914 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
915 sizeof(key
->eth
.src
));
917 if (tb
[TCA_FLOWER_KEY_ETH_TYPE
]) {
918 ethertype
= nla_get_be16(tb
[TCA_FLOWER_KEY_ETH_TYPE
]);
920 if (eth_type_vlan(ethertype
)) {
921 fl_set_key_vlan(tb
, ethertype
, TCA_FLOWER_KEY_VLAN_ID
,
922 TCA_FLOWER_KEY_VLAN_PRIO
, &key
->vlan
,
925 if (tb
[TCA_FLOWER_KEY_VLAN_ETH_TYPE
]) {
926 ethertype
= nla_get_be16(tb
[TCA_FLOWER_KEY_VLAN_ETH_TYPE
]);
927 if (eth_type_vlan(ethertype
)) {
928 fl_set_key_vlan(tb
, ethertype
,
929 TCA_FLOWER_KEY_CVLAN_ID
,
930 TCA_FLOWER_KEY_CVLAN_PRIO
,
931 &key
->cvlan
, &mask
->cvlan
);
932 fl_set_key_val(tb
, &key
->basic
.n_proto
,
933 TCA_FLOWER_KEY_CVLAN_ETH_TYPE
,
934 &mask
->basic
.n_proto
,
936 sizeof(key
->basic
.n_proto
));
938 key
->basic
.n_proto
= ethertype
;
939 mask
->basic
.n_proto
= cpu_to_be16(~0);
943 key
->basic
.n_proto
= ethertype
;
944 mask
->basic
.n_proto
= cpu_to_be16(~0);
948 if (key
->basic
.n_proto
== htons(ETH_P_IP
) ||
949 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) {
950 fl_set_key_val(tb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
951 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
952 sizeof(key
->basic
.ip_proto
));
953 fl_set_key_ip(tb
, false, &key
->ip
, &mask
->ip
);
956 if (tb
[TCA_FLOWER_KEY_IPV4_SRC
] || tb
[TCA_FLOWER_KEY_IPV4_DST
]) {
957 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
958 mask
->control
.addr_type
= ~0;
959 fl_set_key_val(tb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
960 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
961 sizeof(key
->ipv4
.src
));
962 fl_set_key_val(tb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
963 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
964 sizeof(key
->ipv4
.dst
));
965 } else if (tb
[TCA_FLOWER_KEY_IPV6_SRC
] || tb
[TCA_FLOWER_KEY_IPV6_DST
]) {
966 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
967 mask
->control
.addr_type
= ~0;
968 fl_set_key_val(tb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
969 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
970 sizeof(key
->ipv6
.src
));
971 fl_set_key_val(tb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
972 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
973 sizeof(key
->ipv6
.dst
));
976 if (key
->basic
.ip_proto
== IPPROTO_TCP
) {
977 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
978 &mask
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC_MASK
,
979 sizeof(key
->tp
.src
));
980 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
981 &mask
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST_MASK
,
982 sizeof(key
->tp
.dst
));
983 fl_set_key_val(tb
, &key
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS
,
984 &mask
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS_MASK
,
985 sizeof(key
->tcp
.flags
));
986 } else if (key
->basic
.ip_proto
== IPPROTO_UDP
) {
987 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
988 &mask
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC_MASK
,
989 sizeof(key
->tp
.src
));
990 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
991 &mask
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST_MASK
,
992 sizeof(key
->tp
.dst
));
993 } else if (key
->basic
.ip_proto
== IPPROTO_SCTP
) {
994 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC
,
995 &mask
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC_MASK
,
996 sizeof(key
->tp
.src
));
997 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST
,
998 &mask
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST_MASK
,
999 sizeof(key
->tp
.dst
));
1000 } else if (key
->basic
.n_proto
== htons(ETH_P_IP
) &&
1001 key
->basic
.ip_proto
== IPPROTO_ICMP
) {
1002 fl_set_key_val(tb
, &key
->icmp
.type
, TCA_FLOWER_KEY_ICMPV4_TYPE
,
1004 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
,
1005 sizeof(key
->icmp
.type
));
1006 fl_set_key_val(tb
, &key
->icmp
.code
, TCA_FLOWER_KEY_ICMPV4_CODE
,
1008 TCA_FLOWER_KEY_ICMPV4_CODE_MASK
,
1009 sizeof(key
->icmp
.code
));
1010 } else if (key
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
1011 key
->basic
.ip_proto
== IPPROTO_ICMPV6
) {
1012 fl_set_key_val(tb
, &key
->icmp
.type
, TCA_FLOWER_KEY_ICMPV6_TYPE
,
1014 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
,
1015 sizeof(key
->icmp
.type
));
1016 fl_set_key_val(tb
, &key
->icmp
.code
, TCA_FLOWER_KEY_ICMPV6_CODE
,
1018 TCA_FLOWER_KEY_ICMPV6_CODE_MASK
,
1019 sizeof(key
->icmp
.code
));
1020 } else if (key
->basic
.n_proto
== htons(ETH_P_MPLS_UC
) ||
1021 key
->basic
.n_proto
== htons(ETH_P_MPLS_MC
)) {
1022 ret
= fl_set_key_mpls(tb
, &key
->mpls
, &mask
->mpls
);
1025 } else if (key
->basic
.n_proto
== htons(ETH_P_ARP
) ||
1026 key
->basic
.n_proto
== htons(ETH_P_RARP
)) {
1027 fl_set_key_val(tb
, &key
->arp
.sip
, TCA_FLOWER_KEY_ARP_SIP
,
1028 &mask
->arp
.sip
, TCA_FLOWER_KEY_ARP_SIP_MASK
,
1029 sizeof(key
->arp
.sip
));
1030 fl_set_key_val(tb
, &key
->arp
.tip
, TCA_FLOWER_KEY_ARP_TIP
,
1031 &mask
->arp
.tip
, TCA_FLOWER_KEY_ARP_TIP_MASK
,
1032 sizeof(key
->arp
.tip
));
1033 fl_set_key_val(tb
, &key
->arp
.op
, TCA_FLOWER_KEY_ARP_OP
,
1034 &mask
->arp
.op
, TCA_FLOWER_KEY_ARP_OP_MASK
,
1035 sizeof(key
->arp
.op
));
1036 fl_set_key_val(tb
, key
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA
,
1037 mask
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA_MASK
,
1038 sizeof(key
->arp
.sha
));
1039 fl_set_key_val(tb
, key
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA
,
1040 mask
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA_MASK
,
1041 sizeof(key
->arp
.tha
));
1044 if (key
->basic
.ip_proto
== IPPROTO_TCP
||
1045 key
->basic
.ip_proto
== IPPROTO_UDP
||
1046 key
->basic
.ip_proto
== IPPROTO_SCTP
) {
1047 ret
= fl_set_key_port_range(tb
, key
, mask
);
1052 if (tb
[TCA_FLOWER_KEY_ENC_IPV4_SRC
] ||
1053 tb
[TCA_FLOWER_KEY_ENC_IPV4_DST
]) {
1054 key
->enc_control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
1055 mask
->enc_control
.addr_type
= ~0;
1056 fl_set_key_val(tb
, &key
->enc_ipv4
.src
,
1057 TCA_FLOWER_KEY_ENC_IPV4_SRC
,
1058 &mask
->enc_ipv4
.src
,
1059 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
,
1060 sizeof(key
->enc_ipv4
.src
));
1061 fl_set_key_val(tb
, &key
->enc_ipv4
.dst
,
1062 TCA_FLOWER_KEY_ENC_IPV4_DST
,
1063 &mask
->enc_ipv4
.dst
,
1064 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
,
1065 sizeof(key
->enc_ipv4
.dst
));
1068 if (tb
[TCA_FLOWER_KEY_ENC_IPV6_SRC
] ||
1069 tb
[TCA_FLOWER_KEY_ENC_IPV6_DST
]) {
1070 key
->enc_control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
1071 mask
->enc_control
.addr_type
= ~0;
1072 fl_set_key_val(tb
, &key
->enc_ipv6
.src
,
1073 TCA_FLOWER_KEY_ENC_IPV6_SRC
,
1074 &mask
->enc_ipv6
.src
,
1075 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
,
1076 sizeof(key
->enc_ipv6
.src
));
1077 fl_set_key_val(tb
, &key
->enc_ipv6
.dst
,
1078 TCA_FLOWER_KEY_ENC_IPV6_DST
,
1079 &mask
->enc_ipv6
.dst
,
1080 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
,
1081 sizeof(key
->enc_ipv6
.dst
));
1084 fl_set_key_val(tb
, &key
->enc_key_id
.keyid
, TCA_FLOWER_KEY_ENC_KEY_ID
,
1085 &mask
->enc_key_id
.keyid
, TCA_FLOWER_UNSPEC
,
1086 sizeof(key
->enc_key_id
.keyid
));
1088 fl_set_key_val(tb
, &key
->enc_tp
.src
, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
,
1089 &mask
->enc_tp
.src
, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
,
1090 sizeof(key
->enc_tp
.src
));
1092 fl_set_key_val(tb
, &key
->enc_tp
.dst
, TCA_FLOWER_KEY_ENC_UDP_DST_PORT
,
1093 &mask
->enc_tp
.dst
, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
,
1094 sizeof(key
->enc_tp
.dst
));
1096 fl_set_key_ip(tb
, true, &key
->enc_ip
, &mask
->enc_ip
);
1098 if (tb
[TCA_FLOWER_KEY_ENC_OPTS
]) {
1099 ret
= fl_set_enc_opt(tb
, key
, mask
, extack
);
1104 if (tb
[TCA_FLOWER_KEY_FLAGS
])
1105 ret
= fl_set_key_flags(tb
, &key
->control
.flags
, &mask
->control
.flags
);
1110 static void fl_mask_copy(struct fl_flow_mask
*dst
,
1111 struct fl_flow_mask
*src
)
1113 const void *psrc
= fl_key_get_start(&src
->key
, src
);
1114 void *pdst
= fl_key_get_start(&dst
->key
, src
);
1116 memcpy(pdst
, psrc
, fl_mask_range(src
));
1117 dst
->range
= src
->range
;
1120 static const struct rhashtable_params fl_ht_params
= {
1121 .key_offset
= offsetof(struct cls_fl_filter
, mkey
), /* base offset */
1122 .head_offset
= offsetof(struct cls_fl_filter
, ht_node
),
1123 .automatic_shrinking
= true,
1126 static int fl_init_mask_hashtable(struct fl_flow_mask
*mask
)
1128 mask
->filter_ht_params
= fl_ht_params
;
1129 mask
->filter_ht_params
.key_len
= fl_mask_range(mask
);
1130 mask
->filter_ht_params
.key_offset
+= mask
->range
.start
;
1132 return rhashtable_init(&mask
->ht
, &mask
->filter_ht_params
);
1135 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1136 #define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
1138 #define FL_KEY_IS_MASKED(mask, member) \
1139 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1140 0, FL_KEY_MEMBER_SIZE(member)) \
1142 #define FL_KEY_SET(keys, cnt, id, member) \
1144 keys[cnt].key_id = id; \
1145 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1149 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
1151 if (FL_KEY_IS_MASKED(mask, member)) \
1152 FL_KEY_SET(keys, cnt, id, member); \
1155 static void fl_init_dissector(struct flow_dissector
*dissector
,
1156 struct fl_flow_key
*mask
)
1158 struct flow_dissector_key keys
[FLOW_DISSECTOR_KEY_MAX
];
1161 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_CONTROL
, control
);
1162 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_BASIC
, basic
);
1163 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1164 FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth
);
1165 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1166 FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
);
1167 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1168 FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
);
1169 if (FL_KEY_IS_MASKED(mask
, tp
) ||
1170 FL_KEY_IS_MASKED(mask
, tp_min
) || FL_KEY_IS_MASKED(mask
, tp_max
))
1171 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_PORTS
, tp
);
1172 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1173 FLOW_DISSECTOR_KEY_IP
, ip
);
1174 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1175 FLOW_DISSECTOR_KEY_TCP
, tcp
);
1176 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1177 FLOW_DISSECTOR_KEY_ICMP
, icmp
);
1178 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1179 FLOW_DISSECTOR_KEY_ARP
, arp
);
1180 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1181 FLOW_DISSECTOR_KEY_MPLS
, mpls
);
1182 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1183 FLOW_DISSECTOR_KEY_VLAN
, vlan
);
1184 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1185 FLOW_DISSECTOR_KEY_CVLAN
, cvlan
);
1186 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1187 FLOW_DISSECTOR_KEY_ENC_KEYID
, enc_key_id
);
1188 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1189 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
, enc_ipv4
);
1190 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1191 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
, enc_ipv6
);
1192 if (FL_KEY_IS_MASKED(mask
, enc_ipv4
) ||
1193 FL_KEY_IS_MASKED(mask
, enc_ipv6
))
1194 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_ENC_CONTROL
,
1196 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1197 FLOW_DISSECTOR_KEY_ENC_PORTS
, enc_tp
);
1198 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1199 FLOW_DISSECTOR_KEY_ENC_IP
, enc_ip
);
1200 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1201 FLOW_DISSECTOR_KEY_ENC_OPTS
, enc_opts
);
1203 skb_flow_dissector_init(dissector
, keys
, cnt
);
1206 static struct fl_flow_mask
*fl_create_new_mask(struct cls_fl_head
*head
,
1207 struct fl_flow_mask
*mask
)
1209 struct fl_flow_mask
*newmask
;
1212 newmask
= kzalloc(sizeof(*newmask
), GFP_KERNEL
);
1214 return ERR_PTR(-ENOMEM
);
1216 fl_mask_copy(newmask
, mask
);
1218 if ((newmask
->key
.tp_min
.dst
&& newmask
->key
.tp_max
.dst
) ||
1219 (newmask
->key
.tp_min
.src
&& newmask
->key
.tp_max
.src
))
1220 newmask
->flags
|= TCA_FLOWER_MASK_FLAGS_RANGE
;
1222 err
= fl_init_mask_hashtable(newmask
);
1226 fl_init_dissector(&newmask
->dissector
, &newmask
->key
);
1228 INIT_LIST_HEAD_RCU(&newmask
->filters
);
1230 err
= rhashtable_insert_fast(&head
->ht
, &newmask
->ht_node
,
1233 goto errout_destroy
;
1235 list_add_tail_rcu(&newmask
->list
, &head
->masks
);
1240 rhashtable_destroy(&newmask
->ht
);
1244 return ERR_PTR(err
);
1247 static int fl_check_assign_mask(struct cls_fl_head
*head
,
1248 struct cls_fl_filter
*fnew
,
1249 struct cls_fl_filter
*fold
,
1250 struct fl_flow_mask
*mask
)
1252 struct fl_flow_mask
*newmask
;
1254 fnew
->mask
= rhashtable_lookup_fast(&head
->ht
, mask
, mask_ht_params
);
1259 newmask
= fl_create_new_mask(head
, mask
);
1260 if (IS_ERR(newmask
))
1261 return PTR_ERR(newmask
);
1263 fnew
->mask
= newmask
;
1264 } else if (fold
&& fold
->mask
!= fnew
->mask
) {
1271 static int fl_set_parms(struct net
*net
, struct tcf_proto
*tp
,
1272 struct cls_fl_filter
*f
, struct fl_flow_mask
*mask
,
1273 unsigned long base
, struct nlattr
**tb
,
1274 struct nlattr
*est
, bool ovr
,
1275 struct fl_flow_tmplt
*tmplt
,
1276 struct netlink_ext_ack
*extack
)
1280 err
= tcf_exts_validate(net
, tp
, tb
, est
, &f
->exts
, ovr
, true,
1285 if (tb
[TCA_FLOWER_CLASSID
]) {
1286 f
->res
.classid
= nla_get_u32(tb
[TCA_FLOWER_CLASSID
]);
1287 tcf_bind_filter(tp
, &f
->res
, base
);
1290 err
= fl_set_key(net
, tb
, &f
->key
, &mask
->key
, extack
);
1294 fl_mask_update_range(mask
);
1295 fl_set_masked_key(&f
->mkey
, &f
->key
, mask
);
1297 if (!fl_mask_fits_tmplt(tmplt
, mask
)) {
1298 NL_SET_ERR_MSG_MOD(extack
, "Mask does not fit the template");
1305 static int fl_change(struct net
*net
, struct sk_buff
*in_skb
,
1306 struct tcf_proto
*tp
, unsigned long base
,
1307 u32 handle
, struct nlattr
**tca
,
1308 void **arg
, bool ovr
, bool rtnl_held
,
1309 struct netlink_ext_ack
*extack
)
1311 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
1312 struct cls_fl_filter
*fold
= *arg
;
1313 struct cls_fl_filter
*fnew
;
1314 struct fl_flow_mask
*mask
;
1318 if (!tca
[TCA_OPTIONS
])
1321 mask
= kzalloc(sizeof(struct fl_flow_mask
), GFP_KERNEL
);
1325 tb
= kcalloc(TCA_FLOWER_MAX
+ 1, sizeof(struct nlattr
*), GFP_KERNEL
);
1328 goto errout_mask_alloc
;
1331 err
= nla_parse_nested(tb
, TCA_FLOWER_MAX
, tca
[TCA_OPTIONS
],
1336 if (fold
&& handle
&& fold
->handle
!= handle
) {
1341 fnew
= kzalloc(sizeof(*fnew
), GFP_KERNEL
);
1347 err
= tcf_exts_init(&fnew
->exts
, net
, TCA_FLOWER_ACT
, 0);
1351 if (tb
[TCA_FLOWER_FLAGS
]) {
1352 fnew
->flags
= nla_get_u32(tb
[TCA_FLOWER_FLAGS
]);
1354 if (!tc_flags_valid(fnew
->flags
)) {
1360 err
= fl_set_parms(net
, tp
, fnew
, mask
, base
, tb
, tca
[TCA_RATE
], ovr
,
1361 tp
->chain
->tmplt_priv
, extack
);
1365 err
= fl_check_assign_mask(head
, fnew
, fold
, mask
);
1371 err
= idr_alloc_u32(&head
->handle_idr
, fnew
, &handle
,
1372 INT_MAX
, GFP_KERNEL
);
1374 /* user specifies a handle and it doesn't exist */
1375 err
= idr_alloc_u32(&head
->handle_idr
, fnew
, &handle
,
1376 handle
, GFP_KERNEL
);
1380 fnew
->handle
= handle
;
1382 if (!fold
&& __fl_lookup(fnew
->mask
, &fnew
->mkey
)) {
1387 err
= rhashtable_insert_fast(&fnew
->mask
->ht
, &fnew
->ht_node
,
1388 fnew
->mask
->filter_ht_params
);
1392 if (!tc_skip_hw(fnew
->flags
)) {
1393 err
= fl_hw_replace_filter(tp
, fnew
, extack
);
1395 goto errout_mask_ht
;
1398 if (!tc_in_hw(fnew
->flags
))
1399 fnew
->flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
1402 rhashtable_remove_fast(&fold
->mask
->ht
,
1404 fold
->mask
->filter_ht_params
);
1405 if (!tc_skip_hw(fold
->flags
))
1406 fl_hw_destroy_filter(tp
, fold
, NULL
);
1412 idr_replace(&head
->handle_idr
, fnew
, fnew
->handle
);
1413 list_replace_rcu(&fold
->list
, &fnew
->list
);
1414 tcf_unbind_filter(tp
, &fold
->res
);
1415 tcf_exts_get_net(&fold
->exts
);
1416 tcf_queue_work(&fold
->rwork
, fl_destroy_filter_work
);
1418 list_add_tail_rcu(&fnew
->list
, &fnew
->mask
->filters
);
1426 rhashtable_remove_fast(&fnew
->mask
->ht
, &fnew
->ht_node
,
1427 fnew
->mask
->filter_ht_params
);
1431 idr_remove(&head
->handle_idr
, fnew
->handle
);
1434 fl_mask_put(head
, fnew
->mask
, false);
1437 tcf_exts_destroy(&fnew
->exts
);
1446 static int fl_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
1447 bool rtnl_held
, struct netlink_ext_ack
*extack
)
1449 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
1450 struct cls_fl_filter
*f
= arg
;
1452 rhashtable_remove_fast(&f
->mask
->ht
, &f
->ht_node
,
1453 f
->mask
->filter_ht_params
);
1454 __fl_delete(tp
, f
, extack
);
1455 *last
= list_empty(&head
->masks
);
1459 static void fl_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
,
1462 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
1463 struct cls_fl_filter
*f
;
1465 arg
->count
= arg
->skip
;
1467 while ((f
= idr_get_next_ul(&head
->handle_idr
,
1468 &arg
->cookie
)) != NULL
) {
1469 if (arg
->fn(tp
, f
, arg
) < 0) {
1473 arg
->cookie
= f
->handle
+ 1;
1478 static int fl_reoffload(struct tcf_proto
*tp
, bool add
, tc_setup_cb_t
*cb
,
1479 void *cb_priv
, struct netlink_ext_ack
*extack
)
1481 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
1482 struct tc_cls_flower_offload cls_flower
= {};
1483 struct tcf_block
*block
= tp
->chain
->block
;
1484 struct fl_flow_mask
*mask
;
1485 struct cls_fl_filter
*f
;
1488 list_for_each_entry(mask
, &head
->masks
, list
) {
1489 list_for_each_entry(f
, &mask
->filters
, list
) {
1490 if (tc_skip_hw(f
->flags
))
1494 flow_rule_alloc(tcf_exts_num_actions(&f
->exts
));
1495 if (!cls_flower
.rule
)
1498 tc_cls_common_offload_init(&cls_flower
.common
, tp
,
1500 cls_flower
.command
= add
?
1501 TC_CLSFLOWER_REPLACE
: TC_CLSFLOWER_DESTROY
;
1502 cls_flower
.cookie
= (unsigned long)f
;
1503 cls_flower
.rule
->match
.dissector
= &mask
->dissector
;
1504 cls_flower
.rule
->match
.mask
= &mask
->key
;
1505 cls_flower
.rule
->match
.key
= &f
->mkey
;
1507 err
= tc_setup_flow_action(&cls_flower
.rule
->action
,
1510 kfree(cls_flower
.rule
);
1511 if (tc_skip_sw(f
->flags
)) {
1512 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
1518 cls_flower
.classid
= f
->res
.classid
;
1520 err
= cb(TC_SETUP_CLSFLOWER
, &cls_flower
, cb_priv
);
1521 kfree(cls_flower
.rule
);
1524 if (add
&& tc_skip_sw(f
->flags
))
1529 tc_cls_offload_cnt_update(block
, &f
->in_hw_count
,
1537 static int fl_hw_create_tmplt(struct tcf_chain
*chain
,
1538 struct fl_flow_tmplt
*tmplt
)
1540 struct tc_cls_flower_offload cls_flower
= {};
1541 struct tcf_block
*block
= chain
->block
;
1543 cls_flower
.rule
= flow_rule_alloc(0);
1544 if (!cls_flower
.rule
)
1547 cls_flower
.common
.chain_index
= chain
->index
;
1548 cls_flower
.command
= TC_CLSFLOWER_TMPLT_CREATE
;
1549 cls_flower
.cookie
= (unsigned long) tmplt
;
1550 cls_flower
.rule
->match
.dissector
= &tmplt
->dissector
;
1551 cls_flower
.rule
->match
.mask
= &tmplt
->mask
;
1552 cls_flower
.rule
->match
.key
= &tmplt
->dummy_key
;
1554 /* We don't care if driver (any of them) fails to handle this
1555 * call. It serves just as a hint for it.
1557 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false);
1558 kfree(cls_flower
.rule
);
1563 static void fl_hw_destroy_tmplt(struct tcf_chain
*chain
,
1564 struct fl_flow_tmplt
*tmplt
)
1566 struct tc_cls_flower_offload cls_flower
= {};
1567 struct tcf_block
*block
= chain
->block
;
1569 cls_flower
.common
.chain_index
= chain
->index
;
1570 cls_flower
.command
= TC_CLSFLOWER_TMPLT_DESTROY
;
1571 cls_flower
.cookie
= (unsigned long) tmplt
;
1573 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false);
1576 static void *fl_tmplt_create(struct net
*net
, struct tcf_chain
*chain
,
1577 struct nlattr
**tca
,
1578 struct netlink_ext_ack
*extack
)
1580 struct fl_flow_tmplt
*tmplt
;
1584 if (!tca
[TCA_OPTIONS
])
1585 return ERR_PTR(-EINVAL
);
1587 tb
= kcalloc(TCA_FLOWER_MAX
+ 1, sizeof(struct nlattr
*), GFP_KERNEL
);
1589 return ERR_PTR(-ENOBUFS
);
1590 err
= nla_parse_nested(tb
, TCA_FLOWER_MAX
, tca
[TCA_OPTIONS
],
1595 tmplt
= kzalloc(sizeof(*tmplt
), GFP_KERNEL
);
1600 tmplt
->chain
= chain
;
1601 err
= fl_set_key(net
, tb
, &tmplt
->dummy_key
, &tmplt
->mask
, extack
);
1605 fl_init_dissector(&tmplt
->dissector
, &tmplt
->mask
);
1607 err
= fl_hw_create_tmplt(chain
, tmplt
);
1618 return ERR_PTR(err
);
1621 static void fl_tmplt_destroy(void *tmplt_priv
)
1623 struct fl_flow_tmplt
*tmplt
= tmplt_priv
;
1625 fl_hw_destroy_tmplt(tmplt
->chain
, tmplt
);
1629 static int fl_dump_key_val(struct sk_buff
*skb
,
1630 void *val
, int val_type
,
1631 void *mask
, int mask_type
, int len
)
1635 if (!memchr_inv(mask
, 0, len
))
1637 err
= nla_put(skb
, val_type
, len
, val
);
1640 if (mask_type
!= TCA_FLOWER_UNSPEC
) {
1641 err
= nla_put(skb
, mask_type
, len
, mask
);
1648 static int fl_dump_key_port_range(struct sk_buff
*skb
, struct fl_flow_key
*key
,
1649 struct fl_flow_key
*mask
)
1651 if (fl_dump_key_val(skb
, &key
->tp_min
.dst
, TCA_FLOWER_KEY_PORT_DST_MIN
,
1652 &mask
->tp_min
.dst
, TCA_FLOWER_UNSPEC
,
1653 sizeof(key
->tp_min
.dst
)) ||
1654 fl_dump_key_val(skb
, &key
->tp_max
.dst
, TCA_FLOWER_KEY_PORT_DST_MAX
,
1655 &mask
->tp_max
.dst
, TCA_FLOWER_UNSPEC
,
1656 sizeof(key
->tp_max
.dst
)) ||
1657 fl_dump_key_val(skb
, &key
->tp_min
.src
, TCA_FLOWER_KEY_PORT_SRC_MIN
,
1658 &mask
->tp_min
.src
, TCA_FLOWER_UNSPEC
,
1659 sizeof(key
->tp_min
.src
)) ||
1660 fl_dump_key_val(skb
, &key
->tp_max
.src
, TCA_FLOWER_KEY_PORT_SRC_MAX
,
1661 &mask
->tp_max
.src
, TCA_FLOWER_UNSPEC
,
1662 sizeof(key
->tp_max
.src
)))
1668 static int fl_dump_key_mpls(struct sk_buff
*skb
,
1669 struct flow_dissector_key_mpls
*mpls_key
,
1670 struct flow_dissector_key_mpls
*mpls_mask
)
1674 if (!memchr_inv(mpls_mask
, 0, sizeof(*mpls_mask
)))
1676 if (mpls_mask
->mpls_ttl
) {
1677 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_TTL
,
1678 mpls_key
->mpls_ttl
);
1682 if (mpls_mask
->mpls_tc
) {
1683 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_TC
,
1688 if (mpls_mask
->mpls_label
) {
1689 err
= nla_put_u32(skb
, TCA_FLOWER_KEY_MPLS_LABEL
,
1690 mpls_key
->mpls_label
);
1694 if (mpls_mask
->mpls_bos
) {
1695 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_BOS
,
1696 mpls_key
->mpls_bos
);
1703 static int fl_dump_key_ip(struct sk_buff
*skb
, bool encap
,
1704 struct flow_dissector_key_ip
*key
,
1705 struct flow_dissector_key_ip
*mask
)
1707 int tos_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS
: TCA_FLOWER_KEY_IP_TOS
;
1708 int ttl_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL
: TCA_FLOWER_KEY_IP_TTL
;
1709 int tos_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS_MASK
: TCA_FLOWER_KEY_IP_TOS_MASK
;
1710 int ttl_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL_MASK
: TCA_FLOWER_KEY_IP_TTL_MASK
;
1712 if (fl_dump_key_val(skb
, &key
->tos
, tos_key
, &mask
->tos
, tos_mask
, sizeof(key
->tos
)) ||
1713 fl_dump_key_val(skb
, &key
->ttl
, ttl_key
, &mask
->ttl
, ttl_mask
, sizeof(key
->ttl
)))
1719 static int fl_dump_key_vlan(struct sk_buff
*skb
,
1720 int vlan_id_key
, int vlan_prio_key
,
1721 struct flow_dissector_key_vlan
*vlan_key
,
1722 struct flow_dissector_key_vlan
*vlan_mask
)
1726 if (!memchr_inv(vlan_mask
, 0, sizeof(*vlan_mask
)))
1728 if (vlan_mask
->vlan_id
) {
1729 err
= nla_put_u16(skb
, vlan_id_key
,
1734 if (vlan_mask
->vlan_priority
) {
1735 err
= nla_put_u8(skb
, vlan_prio_key
,
1736 vlan_key
->vlan_priority
);
1743 static void fl_get_key_flag(u32 dissector_key
, u32 dissector_mask
,
1744 u32
*flower_key
, u32
*flower_mask
,
1745 u32 flower_flag_bit
, u32 dissector_flag_bit
)
1747 if (dissector_mask
& dissector_flag_bit
) {
1748 *flower_mask
|= flower_flag_bit
;
1749 if (dissector_key
& dissector_flag_bit
)
1750 *flower_key
|= flower_flag_bit
;
1754 static int fl_dump_key_flags(struct sk_buff
*skb
, u32 flags_key
, u32 flags_mask
)
1760 if (!memchr_inv(&flags_mask
, 0, sizeof(flags_mask
)))
1766 fl_get_key_flag(flags_key
, flags_mask
, &key
, &mask
,
1767 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
, FLOW_DIS_IS_FRAGMENT
);
1768 fl_get_key_flag(flags_key
, flags_mask
, &key
, &mask
,
1769 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST
,
1770 FLOW_DIS_FIRST_FRAG
);
1772 _key
= cpu_to_be32(key
);
1773 _mask
= cpu_to_be32(mask
);
1775 err
= nla_put(skb
, TCA_FLOWER_KEY_FLAGS
, 4, &_key
);
1779 return nla_put(skb
, TCA_FLOWER_KEY_FLAGS_MASK
, 4, &_mask
);
1782 static int fl_dump_key_geneve_opt(struct sk_buff
*skb
,
1783 struct flow_dissector_key_enc_opts
*enc_opts
)
1785 struct geneve_opt
*opt
;
1786 struct nlattr
*nest
;
1789 nest
= nla_nest_start(skb
, TCA_FLOWER_KEY_ENC_OPTS_GENEVE
);
1791 goto nla_put_failure
;
1793 while (enc_opts
->len
> opt_off
) {
1794 opt
= (struct geneve_opt
*)&enc_opts
->data
[opt_off
];
1796 if (nla_put_be16(skb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
,
1798 goto nla_put_failure
;
1799 if (nla_put_u8(skb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
,
1801 goto nla_put_failure
;
1802 if (nla_put(skb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
,
1803 opt
->length
* 4, opt
->opt_data
))
1804 goto nla_put_failure
;
1806 opt_off
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
1808 nla_nest_end(skb
, nest
);
1812 nla_nest_cancel(skb
, nest
);
1816 static int fl_dump_key_options(struct sk_buff
*skb
, int enc_opt_type
,
1817 struct flow_dissector_key_enc_opts
*enc_opts
)
1819 struct nlattr
*nest
;
1825 nest
= nla_nest_start(skb
, enc_opt_type
);
1827 goto nla_put_failure
;
1829 switch (enc_opts
->dst_opt_type
) {
1830 case TUNNEL_GENEVE_OPT
:
1831 err
= fl_dump_key_geneve_opt(skb
, enc_opts
);
1833 goto nla_put_failure
;
1836 goto nla_put_failure
;
1838 nla_nest_end(skb
, nest
);
1842 nla_nest_cancel(skb
, nest
);
1846 static int fl_dump_key_enc_opt(struct sk_buff
*skb
,
1847 struct flow_dissector_key_enc_opts
*key_opts
,
1848 struct flow_dissector_key_enc_opts
*msk_opts
)
1852 err
= fl_dump_key_options(skb
, TCA_FLOWER_KEY_ENC_OPTS
, key_opts
);
1856 return fl_dump_key_options(skb
, TCA_FLOWER_KEY_ENC_OPTS_MASK
, msk_opts
);
1859 static int fl_dump_key(struct sk_buff
*skb
, struct net
*net
,
1860 struct fl_flow_key
*key
, struct fl_flow_key
*mask
)
1862 if (mask
->indev_ifindex
) {
1863 struct net_device
*dev
;
1865 dev
= __dev_get_by_index(net
, key
->indev_ifindex
);
1866 if (dev
&& nla_put_string(skb
, TCA_FLOWER_INDEV
, dev
->name
))
1867 goto nla_put_failure
;
1870 if (fl_dump_key_val(skb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
1871 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
1872 sizeof(key
->eth
.dst
)) ||
1873 fl_dump_key_val(skb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
1874 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
1875 sizeof(key
->eth
.src
)) ||
1876 fl_dump_key_val(skb
, &key
->basic
.n_proto
, TCA_FLOWER_KEY_ETH_TYPE
,
1877 &mask
->basic
.n_proto
, TCA_FLOWER_UNSPEC
,
1878 sizeof(key
->basic
.n_proto
)))
1879 goto nla_put_failure
;
1881 if (fl_dump_key_mpls(skb
, &key
->mpls
, &mask
->mpls
))
1882 goto nla_put_failure
;
1884 if (fl_dump_key_vlan(skb
, TCA_FLOWER_KEY_VLAN_ID
,
1885 TCA_FLOWER_KEY_VLAN_PRIO
, &key
->vlan
, &mask
->vlan
))
1886 goto nla_put_failure
;
1888 if (fl_dump_key_vlan(skb
, TCA_FLOWER_KEY_CVLAN_ID
,
1889 TCA_FLOWER_KEY_CVLAN_PRIO
,
1890 &key
->cvlan
, &mask
->cvlan
) ||
1891 (mask
->cvlan
.vlan_tpid
&&
1892 nla_put_be16(skb
, TCA_FLOWER_KEY_VLAN_ETH_TYPE
,
1893 key
->cvlan
.vlan_tpid
)))
1894 goto nla_put_failure
;
1896 if (mask
->basic
.n_proto
) {
1897 if (mask
->cvlan
.vlan_tpid
) {
1898 if (nla_put_be16(skb
, TCA_FLOWER_KEY_CVLAN_ETH_TYPE
,
1899 key
->basic
.n_proto
))
1900 goto nla_put_failure
;
1901 } else if (mask
->vlan
.vlan_tpid
) {
1902 if (nla_put_be16(skb
, TCA_FLOWER_KEY_VLAN_ETH_TYPE
,
1903 key
->basic
.n_proto
))
1904 goto nla_put_failure
;
1908 if ((key
->basic
.n_proto
== htons(ETH_P_IP
) ||
1909 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) &&
1910 (fl_dump_key_val(skb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
1911 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
1912 sizeof(key
->basic
.ip_proto
)) ||
1913 fl_dump_key_ip(skb
, false, &key
->ip
, &mask
->ip
)))
1914 goto nla_put_failure
;
1916 if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
&&
1917 (fl_dump_key_val(skb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
1918 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
1919 sizeof(key
->ipv4
.src
)) ||
1920 fl_dump_key_val(skb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
1921 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
1922 sizeof(key
->ipv4
.dst
))))
1923 goto nla_put_failure
;
1924 else if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
&&
1925 (fl_dump_key_val(skb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
1926 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
1927 sizeof(key
->ipv6
.src
)) ||
1928 fl_dump_key_val(skb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
1929 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
1930 sizeof(key
->ipv6
.dst
))))
1931 goto nla_put_failure
;
1933 if (key
->basic
.ip_proto
== IPPROTO_TCP
&&
1934 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
1935 &mask
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC_MASK
,
1936 sizeof(key
->tp
.src
)) ||
1937 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
1938 &mask
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST_MASK
,
1939 sizeof(key
->tp
.dst
)) ||
1940 fl_dump_key_val(skb
, &key
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS
,
1941 &mask
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS_MASK
,
1942 sizeof(key
->tcp
.flags
))))
1943 goto nla_put_failure
;
1944 else if (key
->basic
.ip_proto
== IPPROTO_UDP
&&
1945 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
1946 &mask
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC_MASK
,
1947 sizeof(key
->tp
.src
)) ||
1948 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
1949 &mask
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST_MASK
,
1950 sizeof(key
->tp
.dst
))))
1951 goto nla_put_failure
;
1952 else if (key
->basic
.ip_proto
== IPPROTO_SCTP
&&
1953 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC
,
1954 &mask
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC_MASK
,
1955 sizeof(key
->tp
.src
)) ||
1956 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST
,
1957 &mask
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST_MASK
,
1958 sizeof(key
->tp
.dst
))))
1959 goto nla_put_failure
;
1960 else if (key
->basic
.n_proto
== htons(ETH_P_IP
) &&
1961 key
->basic
.ip_proto
== IPPROTO_ICMP
&&
1962 (fl_dump_key_val(skb
, &key
->icmp
.type
,
1963 TCA_FLOWER_KEY_ICMPV4_TYPE
, &mask
->icmp
.type
,
1964 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
,
1965 sizeof(key
->icmp
.type
)) ||
1966 fl_dump_key_val(skb
, &key
->icmp
.code
,
1967 TCA_FLOWER_KEY_ICMPV4_CODE
, &mask
->icmp
.code
,
1968 TCA_FLOWER_KEY_ICMPV4_CODE_MASK
,
1969 sizeof(key
->icmp
.code
))))
1970 goto nla_put_failure
;
1971 else if (key
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
1972 key
->basic
.ip_proto
== IPPROTO_ICMPV6
&&
1973 (fl_dump_key_val(skb
, &key
->icmp
.type
,
1974 TCA_FLOWER_KEY_ICMPV6_TYPE
, &mask
->icmp
.type
,
1975 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
,
1976 sizeof(key
->icmp
.type
)) ||
1977 fl_dump_key_val(skb
, &key
->icmp
.code
,
1978 TCA_FLOWER_KEY_ICMPV6_CODE
, &mask
->icmp
.code
,
1979 TCA_FLOWER_KEY_ICMPV6_CODE_MASK
,
1980 sizeof(key
->icmp
.code
))))
1981 goto nla_put_failure
;
1982 else if ((key
->basic
.n_proto
== htons(ETH_P_ARP
) ||
1983 key
->basic
.n_proto
== htons(ETH_P_RARP
)) &&
1984 (fl_dump_key_val(skb
, &key
->arp
.sip
,
1985 TCA_FLOWER_KEY_ARP_SIP
, &mask
->arp
.sip
,
1986 TCA_FLOWER_KEY_ARP_SIP_MASK
,
1987 sizeof(key
->arp
.sip
)) ||
1988 fl_dump_key_val(skb
, &key
->arp
.tip
,
1989 TCA_FLOWER_KEY_ARP_TIP
, &mask
->arp
.tip
,
1990 TCA_FLOWER_KEY_ARP_TIP_MASK
,
1991 sizeof(key
->arp
.tip
)) ||
1992 fl_dump_key_val(skb
, &key
->arp
.op
,
1993 TCA_FLOWER_KEY_ARP_OP
, &mask
->arp
.op
,
1994 TCA_FLOWER_KEY_ARP_OP_MASK
,
1995 sizeof(key
->arp
.op
)) ||
1996 fl_dump_key_val(skb
, key
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA
,
1997 mask
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA_MASK
,
1998 sizeof(key
->arp
.sha
)) ||
1999 fl_dump_key_val(skb
, key
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA
,
2000 mask
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA_MASK
,
2001 sizeof(key
->arp
.tha
))))
2002 goto nla_put_failure
;
2004 if ((key
->basic
.ip_proto
== IPPROTO_TCP
||
2005 key
->basic
.ip_proto
== IPPROTO_UDP
||
2006 key
->basic
.ip_proto
== IPPROTO_SCTP
) &&
2007 fl_dump_key_port_range(skb
, key
, mask
))
2008 goto nla_put_failure
;
2010 if (key
->enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
&&
2011 (fl_dump_key_val(skb
, &key
->enc_ipv4
.src
,
2012 TCA_FLOWER_KEY_ENC_IPV4_SRC
, &mask
->enc_ipv4
.src
,
2013 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
,
2014 sizeof(key
->enc_ipv4
.src
)) ||
2015 fl_dump_key_val(skb
, &key
->enc_ipv4
.dst
,
2016 TCA_FLOWER_KEY_ENC_IPV4_DST
, &mask
->enc_ipv4
.dst
,
2017 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
,
2018 sizeof(key
->enc_ipv4
.dst
))))
2019 goto nla_put_failure
;
2020 else if (key
->enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
&&
2021 (fl_dump_key_val(skb
, &key
->enc_ipv6
.src
,
2022 TCA_FLOWER_KEY_ENC_IPV6_SRC
, &mask
->enc_ipv6
.src
,
2023 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
,
2024 sizeof(key
->enc_ipv6
.src
)) ||
2025 fl_dump_key_val(skb
, &key
->enc_ipv6
.dst
,
2026 TCA_FLOWER_KEY_ENC_IPV6_DST
,
2027 &mask
->enc_ipv6
.dst
,
2028 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
,
2029 sizeof(key
->enc_ipv6
.dst
))))
2030 goto nla_put_failure
;
2032 if (fl_dump_key_val(skb
, &key
->enc_key_id
, TCA_FLOWER_KEY_ENC_KEY_ID
,
2033 &mask
->enc_key_id
, TCA_FLOWER_UNSPEC
,
2034 sizeof(key
->enc_key_id
)) ||
2035 fl_dump_key_val(skb
, &key
->enc_tp
.src
,
2036 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
,
2038 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
,
2039 sizeof(key
->enc_tp
.src
)) ||
2040 fl_dump_key_val(skb
, &key
->enc_tp
.dst
,
2041 TCA_FLOWER_KEY_ENC_UDP_DST_PORT
,
2043 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
,
2044 sizeof(key
->enc_tp
.dst
)) ||
2045 fl_dump_key_ip(skb
, true, &key
->enc_ip
, &mask
->enc_ip
) ||
2046 fl_dump_key_enc_opt(skb
, &key
->enc_opts
, &mask
->enc_opts
))
2047 goto nla_put_failure
;
2049 if (fl_dump_key_flags(skb
, key
->control
.flags
, mask
->control
.flags
))
2050 goto nla_put_failure
;
2058 static int fl_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
2059 struct sk_buff
*skb
, struct tcmsg
*t
, bool rtnl_held
)
2061 struct cls_fl_filter
*f
= fh
;
2062 struct nlattr
*nest
;
2063 struct fl_flow_key
*key
, *mask
;
2068 t
->tcm_handle
= f
->handle
;
2070 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
2072 goto nla_put_failure
;
2074 if (f
->res
.classid
&&
2075 nla_put_u32(skb
, TCA_FLOWER_CLASSID
, f
->res
.classid
))
2076 goto nla_put_failure
;
2079 mask
= &f
->mask
->key
;
2081 if (fl_dump_key(skb
, net
, key
, mask
))
2082 goto nla_put_failure
;
2084 if (!tc_skip_hw(f
->flags
))
2085 fl_hw_update_stats(tp
, f
);
2087 if (f
->flags
&& nla_put_u32(skb
, TCA_FLOWER_FLAGS
, f
->flags
))
2088 goto nla_put_failure
;
2090 if (nla_put_u32(skb
, TCA_FLOWER_IN_HW_COUNT
, f
->in_hw_count
))
2091 goto nla_put_failure
;
2093 if (tcf_exts_dump(skb
, &f
->exts
))
2094 goto nla_put_failure
;
2096 nla_nest_end(skb
, nest
);
2098 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
2099 goto nla_put_failure
;
2104 nla_nest_cancel(skb
, nest
);
2108 static int fl_tmplt_dump(struct sk_buff
*skb
, struct net
*net
, void *tmplt_priv
)
2110 struct fl_flow_tmplt
*tmplt
= tmplt_priv
;
2111 struct fl_flow_key
*key
, *mask
;
2112 struct nlattr
*nest
;
2114 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
2116 goto nla_put_failure
;
2118 key
= &tmplt
->dummy_key
;
2119 mask
= &tmplt
->mask
;
2121 if (fl_dump_key(skb
, net
, key
, mask
))
2122 goto nla_put_failure
;
2124 nla_nest_end(skb
, nest
);
2129 nla_nest_cancel(skb
, nest
);
2133 static void fl_bind_class(void *fh
, u32 classid
, unsigned long cl
)
2135 struct cls_fl_filter
*f
= fh
;
2137 if (f
&& f
->res
.classid
== classid
)
2141 static struct tcf_proto_ops cls_fl_ops __read_mostly
= {
2143 .classify
= fl_classify
,
2145 .destroy
= fl_destroy
,
2147 .change
= fl_change
,
2148 .delete = fl_delete
,
2150 .reoffload
= fl_reoffload
,
2152 .bind_class
= fl_bind_class
,
2153 .tmplt_create
= fl_tmplt_create
,
2154 .tmplt_destroy
= fl_tmplt_destroy
,
2155 .tmplt_dump
= fl_tmplt_dump
,
2156 .owner
= THIS_MODULE
,
2159 static int __init
cls_fl_init(void)
2161 return register_tcf_proto_ops(&cls_fl_ops
);
2164 static void __exit
cls_fl_exit(void)
2166 unregister_tcf_proto_ops(&cls_fl_ops
);
2169 module_init(cls_fl_init
);
2170 module_exit(cls_fl_exit
);
2172 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2173 MODULE_DESCRIPTION("Flower classifier");
2174 MODULE_LICENSE("GPL v2");