]> git.ipfire.org Git - thirdparty/linux.git/blame - net/sched/cls_flower.c
tc: introduce Flower classifier
[thirdparty/linux.git] / net / sched / cls_flower.c
CommitLineData
77b9900e
JP
1/*
2 * net/sched/cls_flower.c Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/rhashtable.h>
16
17#include <linux/if_ether.h>
18#include <linux/in6.h>
19#include <linux/ip.h>
20
21#include <net/sch_generic.h>
22#include <net/pkt_cls.h>
23#include <net/ip.h>
24#include <net/flow_dissector.h>
25
26struct fl_flow_key {
27 int indev_ifindex;
28 struct flow_dissector_key_basic basic;
29 struct flow_dissector_key_eth_addrs eth;
30 union {
31 struct flow_dissector_key_addrs ipv4;
32 struct flow_dissector_key_ipv6_addrs ipv6;
33 };
34 struct flow_dissector_key_ports tp;
35} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
36
37struct fl_flow_mask_range {
38 unsigned short int start;
39 unsigned short int end;
40};
41
42struct fl_flow_mask {
43 struct fl_flow_key key;
44 struct fl_flow_mask_range range;
45 struct rcu_head rcu;
46};
47
48struct cls_fl_head {
49 struct rhashtable ht;
50 struct fl_flow_mask mask;
51 struct flow_dissector dissector;
52 u32 hgen;
53 bool mask_assigned;
54 struct list_head filters;
55 struct rhashtable_params ht_params;
56 struct rcu_head rcu;
57};
58
59struct cls_fl_filter {
60 struct rhash_head ht_node;
61 struct fl_flow_key mkey;
62 struct tcf_exts exts;
63 struct tcf_result res;
64 struct fl_flow_key key;
65 struct list_head list;
66 u32 handle;
67 struct rcu_head rcu;
68};
69
70static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
71{
72 return mask->range.end - mask->range.start;
73}
74
75static void fl_mask_update_range(struct fl_flow_mask *mask)
76{
77 const u8 *bytes = (const u8 *) &mask->key;
78 size_t size = sizeof(mask->key);
79 size_t i, first = 0, last = size - 1;
80
81 for (i = 0; i < sizeof(mask->key); i++) {
82 if (bytes[i]) {
83 if (!first && i)
84 first = i;
85 last = i;
86 }
87 }
88 mask->range.start = rounddown(first, sizeof(long));
89 mask->range.end = roundup(last + 1, sizeof(long));
90}
91
92static void *fl_key_get_start(struct fl_flow_key *key,
93 const struct fl_flow_mask *mask)
94{
95 return (u8 *) key + mask->range.start;
96}
97
98static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
99 struct fl_flow_mask *mask)
100{
101 const long *lkey = fl_key_get_start(key, mask);
102 const long *lmask = fl_key_get_start(&mask->key, mask);
103 long *lmkey = fl_key_get_start(mkey, mask);
104 int i;
105
106 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
107 *lmkey++ = *lkey++ & *lmask++;
108}
109
110static void fl_clear_masked_range(struct fl_flow_key *key,
111 struct fl_flow_mask *mask)
112{
113 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
114}
115
116static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
117 struct tcf_result *res)
118{
119 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
120 struct cls_fl_filter *f;
121 struct fl_flow_key skb_key;
122 struct fl_flow_key skb_mkey;
123
124 fl_clear_masked_range(&skb_key, &head->mask);
125 skb_key.indev_ifindex = skb->skb_iif;
126 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
127 * so do it rather here.
128 */
129 skb_key.basic.n_proto = skb->protocol;
130 skb_flow_dissect(skb, &head->dissector, &skb_key);
131
132 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
133
134 f = rhashtable_lookup_fast(&head->ht,
135 fl_key_get_start(&skb_mkey, &head->mask),
136 head->ht_params);
137 if (f) {
138 *res = f->res;
139 return tcf_exts_exec(skb, &f->exts, res);
140 }
141 return -1;
142}
143
144static int fl_init(struct tcf_proto *tp)
145{
146 struct cls_fl_head *head;
147
148 head = kzalloc(sizeof(*head), GFP_KERNEL);
149 if (!head)
150 return -ENOBUFS;
151
152 INIT_LIST_HEAD_RCU(&head->filters);
153 rcu_assign_pointer(tp->root, head);
154
155 return 0;
156}
157
158static void fl_destroy_filter(struct rcu_head *head)
159{
160 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
161
162 tcf_exts_destroy(&f->exts);
163 kfree(f);
164}
165
166static bool fl_destroy(struct tcf_proto *tp, bool force)
167{
168 struct cls_fl_head *head = rtnl_dereference(tp->root);
169 struct cls_fl_filter *f, *next;
170
171 if (!force && !list_empty(&head->filters))
172 return false;
173
174 list_for_each_entry_safe(f, next, &head->filters, list) {
175 list_del_rcu(&f->list);
176 call_rcu(&f->rcu, fl_destroy_filter);
177 }
178 RCU_INIT_POINTER(tp->root, NULL);
179 if (head->mask_assigned)
180 rhashtable_destroy(&head->ht);
181 kfree_rcu(head, rcu);
182 return true;
183}
184
185static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
186{
187 struct cls_fl_head *head = rtnl_dereference(tp->root);
188 struct cls_fl_filter *f;
189
190 list_for_each_entry(f, &head->filters, list)
191 if (f->handle == handle)
192 return (unsigned long) f;
193 return 0;
194}
195
196static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
197 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
198 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
199 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
200 .len = IFNAMSIZ },
201 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
202 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
203 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
204 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
205 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
206 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
207 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
208 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
209 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
210 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
211 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
212 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
213 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
214 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
215 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
216 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
217 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
218 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
219};
220
221static void fl_set_key_val(struct nlattr **tb,
222 void *val, int val_type,
223 void *mask, int mask_type, int len)
224{
225 if (!tb[val_type])
226 return;
227 memcpy(val, nla_data(tb[val_type]), len);
228 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
229 memset(mask, 0xff, len);
230 else
231 memcpy(mask, nla_data(tb[mask_type]), len);
232}
233
234static int fl_set_key(struct net *net, struct nlattr **tb,
235 struct fl_flow_key *key, struct fl_flow_key *mask)
236{
237 int err;
238
239 if (tb[TCA_FLOWER_INDEV]) {
240 err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
241 if (err < 0)
242 return err;
243 key->indev_ifindex = err;
244 mask->indev_ifindex = 0xffffffff;
245 }
246
247 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
248 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
249 sizeof(key->eth.dst));
250 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
251 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
252 sizeof(key->eth.src));
253 fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
254 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
255 sizeof(key->basic.n_proto));
256 if (key->basic.n_proto == htons(ETH_P_IP) ||
257 key->basic.n_proto == htons(ETH_P_IPV6)) {
258 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
259 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
260 sizeof(key->basic.ip_proto));
261 }
262 if (key->basic.n_proto == htons(ETH_P_IP)) {
263 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
264 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
265 sizeof(key->ipv4.src));
266 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
267 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
268 sizeof(key->ipv4.dst));
269 } else if (key->basic.n_proto == htons(ETH_P_IPV6)) {
270 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
271 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
272 sizeof(key->ipv6.src));
273 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
274 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
275 sizeof(key->ipv6.dst));
276 }
277 if (key->basic.ip_proto == IPPROTO_TCP) {
278 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
279 &mask->tp.src, TCA_FLOWER_UNSPEC,
280 sizeof(key->tp.src));
281 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
282 &mask->tp.dst, TCA_FLOWER_UNSPEC,
283 sizeof(key->tp.dst));
284 } else if (key->basic.ip_proto == IPPROTO_UDP) {
285 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
286 &mask->tp.src, TCA_FLOWER_UNSPEC,
287 sizeof(key->tp.src));
288 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
289 &mask->tp.dst, TCA_FLOWER_UNSPEC,
290 sizeof(key->tp.dst));
291 }
292
293 return 0;
294}
295
296static bool fl_mask_eq(struct fl_flow_mask *mask1,
297 struct fl_flow_mask *mask2)
298{
299 const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
300 const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
301
302 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
303 !memcmp(lmask1, lmask2, fl_mask_range(mask1));
304}
305
306static const struct rhashtable_params fl_ht_params = {
307 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
308 .head_offset = offsetof(struct cls_fl_filter, ht_node),
309 .automatic_shrinking = true,
310};
311
312static int fl_init_hashtable(struct cls_fl_head *head,
313 struct fl_flow_mask *mask)
314{
315 head->ht_params = fl_ht_params;
316 head->ht_params.key_len = fl_mask_range(mask);
317 head->ht_params.key_offset += mask->range.start;
318
319 return rhashtable_init(&head->ht, &head->ht_params);
320}
321
322#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
323#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
324#define FL_KEY_MEMBER_END_OFFSET(member) \
325 (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
326
327#define FL_KEY_IN_RANGE(mask, member) \
328 (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \
329 FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
330
331#define FL_KEY_SET(keys, cnt, id, member) \
332 do { \
333 keys[cnt].key_id = id; \
334 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
335 cnt++; \
336 } while(0);
337
338#define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \
339 do { \
340 if (FL_KEY_IN_RANGE(mask, member)) \
341 FL_KEY_SET(keys, cnt, id, member); \
342 } while(0);
343
344static void fl_init_dissector(struct cls_fl_head *head,
345 struct fl_flow_mask *mask)
346{
347 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
348 size_t cnt = 0;
349
350 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
351 FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
352 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
353 FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
354 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
355 FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
356 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
357 FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
358 FLOW_DISSECTOR_KEY_PORTS, tp);
359
360 skb_flow_dissector_init(&head->dissector, keys, cnt);
361}
362
363static int fl_check_assign_mask(struct cls_fl_head *head,
364 struct fl_flow_mask *mask)
365{
366 int err;
367
368 if (head->mask_assigned) {
369 if (!fl_mask_eq(&head->mask, mask))
370 return -EINVAL;
371 else
372 return 0;
373 }
374
375 /* Mask is not assigned yet. So assign it and init hashtable
376 * according to that.
377 */
378 err = fl_init_hashtable(head, mask);
379 if (err)
380 return err;
381 memcpy(&head->mask, mask, sizeof(head->mask));
382 head->mask_assigned = true;
383
384 fl_init_dissector(head, mask);
385
386 return 0;
387}
388
389static int fl_set_parms(struct net *net, struct tcf_proto *tp,
390 struct cls_fl_filter *f, struct fl_flow_mask *mask,
391 unsigned long base, struct nlattr **tb,
392 struct nlattr *est, bool ovr)
393{
394 struct tcf_exts e;
395 int err;
396
397 tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
398 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
399 if (err < 0)
400 return err;
401
402 if (tb[TCA_FLOWER_CLASSID]) {
403 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
404 tcf_bind_filter(tp, &f->res, base);
405 }
406
407 err = fl_set_key(net, tb, &f->key, &mask->key);
408 if (err)
409 goto errout;
410
411 fl_mask_update_range(mask);
412 fl_set_masked_key(&f->mkey, &f->key, mask);
413
414 tcf_exts_change(tp, &f->exts, &e);
415
416 return 0;
417errout:
418 tcf_exts_destroy(&e);
419 return err;
420}
421
422static u32 fl_grab_new_handle(struct tcf_proto *tp,
423 struct cls_fl_head *head)
424{
425 unsigned int i = 0x80000000;
426 u32 handle;
427
428 do {
429 if (++head->hgen == 0x7FFFFFFF)
430 head->hgen = 1;
431 } while (--i > 0 && fl_get(tp, head->hgen));
432
433 if (unlikely(i == 0)) {
434 pr_err("Insufficient number of handles\n");
435 handle = 0;
436 } else {
437 handle = head->hgen;
438 }
439
440 return handle;
441}
442
443static int fl_change(struct net *net, struct sk_buff *in_skb,
444 struct tcf_proto *tp, unsigned long base,
445 u32 handle, struct nlattr **tca,
446 unsigned long *arg, bool ovr)
447{
448 struct cls_fl_head *head = rtnl_dereference(tp->root);
449 struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
450 struct cls_fl_filter *fnew;
451 struct nlattr *tb[TCA_FLOWER_MAX + 1];
452 struct fl_flow_mask mask = {};
453 int err;
454
455 if (!tca[TCA_OPTIONS])
456 return -EINVAL;
457
458 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
459 if (err < 0)
460 return err;
461
462 if (fold && handle && fold->handle != handle)
463 return -EINVAL;
464
465 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
466 if (!fnew)
467 return -ENOBUFS;
468
469 tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
470
471 if (!handle) {
472 handle = fl_grab_new_handle(tp, head);
473 if (!handle) {
474 err = -EINVAL;
475 goto errout;
476 }
477 }
478 fnew->handle = handle;
479
480 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
481 if (err)
482 goto errout;
483
484 err = fl_check_assign_mask(head, &mask);
485 if (err)
486 goto errout;
487
488 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
489 head->ht_params);
490 if (err)
491 goto errout;
492 if (fold)
493 rhashtable_remove_fast(&head->ht, &fold->ht_node,
494 head->ht_params);
495
496 *arg = (unsigned long) fnew;
497
498 if (fold) {
499 list_replace_rcu(&fnew->list, &fold->list);
500 tcf_unbind_filter(tp, &fold->res);
501 call_rcu(&fold->rcu, fl_destroy_filter);
502 } else {
503 list_add_tail_rcu(&fnew->list, &head->filters);
504 }
505
506 return 0;
507
508errout:
509 kfree(fnew);
510 return err;
511}
512
513static int fl_delete(struct tcf_proto *tp, unsigned long arg)
514{
515 struct cls_fl_head *head = rtnl_dereference(tp->root);
516 struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
517
518 rhashtable_remove_fast(&head->ht, &f->ht_node,
519 head->ht_params);
520 list_del_rcu(&f->list);
521 tcf_unbind_filter(tp, &f->res);
522 call_rcu(&f->rcu, fl_destroy_filter);
523 return 0;
524}
525
526static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
527{
528 struct cls_fl_head *head = rtnl_dereference(tp->root);
529 struct cls_fl_filter *f;
530
531 list_for_each_entry_rcu(f, &head->filters, list) {
532 if (arg->count < arg->skip)
533 goto skip;
534 if (arg->fn(tp, (unsigned long) f, arg) < 0) {
535 arg->stop = 1;
536 break;
537 }
538skip:
539 arg->count++;
540 }
541}
542
543static int fl_dump_key_val(struct sk_buff *skb,
544 void *val, int val_type,
545 void *mask, int mask_type, int len)
546{
547 int err;
548
549 if (!memchr_inv(mask, 0, len))
550 return 0;
551 err = nla_put(skb, val_type, len, val);
552 if (err)
553 return err;
554 if (mask_type != TCA_FLOWER_UNSPEC) {
555 err = nla_put(skb, mask_type, len, mask);
556 if (err)
557 return err;
558 }
559 return 0;
560}
561
562static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
563 struct sk_buff *skb, struct tcmsg *t)
564{
565 struct cls_fl_head *head = rtnl_dereference(tp->root);
566 struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
567 struct nlattr *nest;
568 struct fl_flow_key *key, *mask;
569
570 if (!f)
571 return skb->len;
572
573 t->tcm_handle = f->handle;
574
575 nest = nla_nest_start(skb, TCA_OPTIONS);
576 if (!nest)
577 goto nla_put_failure;
578
579 if (f->res.classid &&
580 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
581 goto nla_put_failure;
582
583 key = &f->key;
584 mask = &head->mask.key;
585
586 if (mask->indev_ifindex) {
587 struct net_device *dev;
588
589 dev = __dev_get_by_index(net, key->indev_ifindex);
590 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
591 goto nla_put_failure;
592 }
593
594 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
595 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
596 sizeof(key->eth.dst)) ||
597 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
598 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
599 sizeof(key->eth.src)) ||
600 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
601 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
602 sizeof(key->basic.n_proto)))
603 goto nla_put_failure;
604 if ((key->basic.n_proto == htons(ETH_P_IP) ||
605 key->basic.n_proto == htons(ETH_P_IPV6)) &&
606 fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
607 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
608 sizeof(key->basic.ip_proto)))
609 goto nla_put_failure;
610
611 if (key->basic.n_proto == htons(ETH_P_IP) &&
612 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
613 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
614 sizeof(key->ipv4.src)) ||
615 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
616 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
617 sizeof(key->ipv4.dst))))
618 goto nla_put_failure;
619 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
620 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
621 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
622 sizeof(key->ipv6.src)) ||
623 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
624 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
625 sizeof(key->ipv6.dst))))
626 goto nla_put_failure;
627
628 if (key->basic.ip_proto == IPPROTO_TCP &&
629 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
630 &mask->tp.src, TCA_FLOWER_UNSPEC,
631 sizeof(key->tp.src)) ||
632 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
633 &mask->tp.dst, TCA_FLOWER_UNSPEC,
634 sizeof(key->tp.dst))))
635 goto nla_put_failure;
636 else if (key->basic.ip_proto == IPPROTO_UDP &&
637 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
638 &mask->tp.src, TCA_FLOWER_UNSPEC,
639 sizeof(key->tp.src)) ||
640 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
641 &mask->tp.dst, TCA_FLOWER_UNSPEC,
642 sizeof(key->tp.dst))))
643 goto nla_put_failure;
644
645 if (tcf_exts_dump(skb, &f->exts))
646 goto nla_put_failure;
647
648 nla_nest_end(skb, nest);
649
650 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
651 goto nla_put_failure;
652
653 return skb->len;
654
655nla_put_failure:
656 nla_nest_cancel(skb, nest);
657 return -1;
658}
659
660static struct tcf_proto_ops cls_fl_ops __read_mostly = {
661 .kind = "flower",
662 .classify = fl_classify,
663 .init = fl_init,
664 .destroy = fl_destroy,
665 .get = fl_get,
666 .change = fl_change,
667 .delete = fl_delete,
668 .walk = fl_walk,
669 .dump = fl_dump,
670 .owner = THIS_MODULE,
671};
672
673static int __init cls_fl_init(void)
674{
675 return register_tcf_proto_ops(&cls_fl_ops);
676}
677
678static void __exit cls_fl_exit(void)
679{
680 unregister_tcf_proto_ops(&cls_fl_ops);
681}
682
683module_init(cls_fl_init);
684module_exit(cls_fl_exit);
685
686MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
687MODULE_DESCRIPTION("Flower classifier");
688MODULE_LICENSE("GPL v2");