]> git.ipfire.org Git - people/arne_f/kernel.git/blob - net/sched/cls_tcindex.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[people/arne_f/kernel.git] / net / sched / cls_tcindex.c
1 /*
2 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
3 *
4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
5 */
6
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16
17 /*
18 * Passing parameters to the root seems to be done more awkwardly than really
19 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
20 * verified. FIXME.
21 */
22
23 #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
24 #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
25
26
27 struct tcindex_filter_result {
28 struct tcf_exts exts;
29 struct tcf_result res;
30 union {
31 struct work_struct work;
32 struct rcu_head rcu;
33 };
34 };
35
36 struct tcindex_filter {
37 u16 key;
38 struct tcindex_filter_result result;
39 struct tcindex_filter __rcu *next;
40 union {
41 struct work_struct work;
42 struct rcu_head rcu;
43 };
44 };
45
46
47 struct tcindex_data {
48 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
49 struct tcindex_filter __rcu **h; /* imperfect hash; */
50 struct tcf_proto *tp;
51 u16 mask; /* AND key with mask */
52 u32 shift; /* shift ANDed key to the right */
53 u32 hash; /* hash table size; 0 if undefined */
54 u32 alloc_hash; /* allocated size */
55 u32 fall_through; /* 0: only classify if explicit match */
56 struct rcu_head rcu;
57 };
58
59 static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
60 {
61 return tcf_exts_has_actions(&r->exts) || r->res.classid;
62 }
63
64 static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
65 u16 key)
66 {
67 if (p->perfect) {
68 struct tcindex_filter_result *f = p->perfect + key;
69
70 return tcindex_filter_is_set(f) ? f : NULL;
71 } else if (p->h) {
72 struct tcindex_filter __rcu **fp;
73 struct tcindex_filter *f;
74
75 fp = &p->h[key % p->hash];
76 for (f = rcu_dereference_bh_rtnl(*fp);
77 f;
78 fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
79 if (f->key == key)
80 return &f->result;
81 }
82
83 return NULL;
84 }
85
86
87 static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
88 struct tcf_result *res)
89 {
90 struct tcindex_data *p = rcu_dereference_bh(tp->root);
91 struct tcindex_filter_result *f;
92 int key = (skb->tc_index & p->mask) >> p->shift;
93
94 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
95 skb, tp, res, p);
96
97 f = tcindex_lookup(p, key);
98 if (!f) {
99 if (!p->fall_through)
100 return -1;
101 res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key);
102 res->class = 0;
103 pr_debug("alg 0x%x\n", res->classid);
104 return 0;
105 }
106 *res = f->res;
107 pr_debug("map 0x%x\n", res->classid);
108
109 return tcf_exts_exec(skb, &f->exts, res);
110 }
111
112
113 static void *tcindex_get(struct tcf_proto *tp, u32 handle)
114 {
115 struct tcindex_data *p = rtnl_dereference(tp->root);
116 struct tcindex_filter_result *r;
117
118 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
119 if (p->perfect && handle >= p->alloc_hash)
120 return NULL;
121 r = tcindex_lookup(p, handle);
122 return r && tcindex_filter_is_set(r) ? r : NULL;
123 }
124
125 static int tcindex_init(struct tcf_proto *tp)
126 {
127 struct tcindex_data *p;
128
129 pr_debug("tcindex_init(tp %p)\n", tp);
130 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
131 if (!p)
132 return -ENOMEM;
133
134 p->mask = 0xffff;
135 p->hash = DEFAULT_HASH_SIZE;
136 p->fall_through = 1;
137
138 rcu_assign_pointer(tp->root, p);
139 return 0;
140 }
141
142 static void tcindex_destroy_rexts_work(struct work_struct *work)
143 {
144 struct tcindex_filter_result *r;
145
146 r = container_of(work, struct tcindex_filter_result, work);
147 rtnl_lock();
148 tcf_exts_destroy(&r->exts);
149 rtnl_unlock();
150 }
151
152 static void tcindex_destroy_rexts(struct rcu_head *head)
153 {
154 struct tcindex_filter_result *r;
155
156 r = container_of(head, struct tcindex_filter_result, rcu);
157 INIT_WORK(&r->work, tcindex_destroy_rexts_work);
158 tcf_queue_work(&r->work);
159 }
160
161 static void tcindex_destroy_fexts_work(struct work_struct *work)
162 {
163 struct tcindex_filter *f = container_of(work, struct tcindex_filter,
164 work);
165
166 rtnl_lock();
167 tcf_exts_destroy(&f->result.exts);
168 kfree(f);
169 rtnl_unlock();
170 }
171
172 static void tcindex_destroy_fexts(struct rcu_head *head)
173 {
174 struct tcindex_filter *f = container_of(head, struct tcindex_filter,
175 rcu);
176
177 INIT_WORK(&f->work, tcindex_destroy_fexts_work);
178 tcf_queue_work(&f->work);
179 }
180
181 static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)
182 {
183 struct tcindex_data *p = rtnl_dereference(tp->root);
184 struct tcindex_filter_result *r = arg;
185 struct tcindex_filter __rcu **walk;
186 struct tcindex_filter *f = NULL;
187
188 pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
189 if (p->perfect) {
190 if (!r->res.class)
191 return -ENOENT;
192 } else {
193 int i;
194
195 for (i = 0; i < p->hash; i++) {
196 walk = p->h + i;
197 for (f = rtnl_dereference(*walk); f;
198 walk = &f->next, f = rtnl_dereference(*walk)) {
199 if (&f->result == r)
200 goto found;
201 }
202 }
203 return -ENOENT;
204
205 found:
206 rcu_assign_pointer(*walk, rtnl_dereference(f->next));
207 }
208 tcf_unbind_filter(tp, &r->res);
209 /* all classifiers are required to call tcf_exts_destroy() after rcu
210 * grace period, since converted-to-rcu actions are relying on that
211 * in cleanup() callback
212 */
213 if (f)
214 call_rcu(&f->rcu, tcindex_destroy_fexts);
215 else
216 call_rcu(&r->rcu, tcindex_destroy_rexts);
217
218 *last = false;
219 return 0;
220 }
221
222 static int tcindex_destroy_element(struct tcf_proto *tp,
223 void *arg, struct tcf_walker *walker)
224 {
225 bool last;
226
227 return tcindex_delete(tp, arg, &last);
228 }
229
230 static void __tcindex_destroy(struct rcu_head *head)
231 {
232 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
233
234 kfree(p->perfect);
235 kfree(p->h);
236 kfree(p);
237 }
238
239 static inline int
240 valid_perfect_hash(struct tcindex_data *p)
241 {
242 return p->hash > (p->mask >> p->shift);
243 }
244
245 static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
246 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
247 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
248 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
249 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
250 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
251 };
252
253 static int tcindex_filter_result_init(struct tcindex_filter_result *r)
254 {
255 memset(r, 0, sizeof(*r));
256 return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
257 }
258
259 static void __tcindex_partial_destroy(struct rcu_head *head)
260 {
261 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
262
263 kfree(p->perfect);
264 kfree(p);
265 }
266
267 static void tcindex_free_perfect_hash(struct tcindex_data *cp)
268 {
269 int i;
270
271 for (i = 0; i < cp->hash; i++)
272 tcf_exts_destroy(&cp->perfect[i].exts);
273 kfree(cp->perfect);
274 }
275
276 static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
277 {
278 int i, err = 0;
279
280 cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
281 GFP_KERNEL);
282 if (!cp->perfect)
283 return -ENOMEM;
284
285 for (i = 0; i < cp->hash; i++) {
286 err = tcf_exts_init(&cp->perfect[i].exts,
287 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
288 if (err < 0)
289 goto errout;
290 }
291
292 return 0;
293
294 errout:
295 tcindex_free_perfect_hash(cp);
296 return err;
297 }
298
299 static int
300 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
301 u32 handle, struct tcindex_data *p,
302 struct tcindex_filter_result *r, struct nlattr **tb,
303 struct nlattr *est, bool ovr)
304 {
305 struct tcindex_filter_result new_filter_result, *old_r = r;
306 struct tcindex_filter_result cr;
307 struct tcindex_data *cp = NULL, *oldp;
308 struct tcindex_filter *f = NULL; /* make gcc behave */
309 int err, balloc = 0;
310 struct tcf_exts e;
311
312 err = tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
313 if (err < 0)
314 return err;
315 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
316 if (err < 0)
317 goto errout;
318
319 err = -ENOMEM;
320 /* tcindex_data attributes must look atomic to classifier/lookup so
321 * allocate new tcindex data and RCU assign it onto root. Keeping
322 * perfect hash and hash pointers from old data.
323 */
324 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
325 if (!cp)
326 goto errout;
327
328 cp->mask = p->mask;
329 cp->shift = p->shift;
330 cp->hash = p->hash;
331 cp->alloc_hash = p->alloc_hash;
332 cp->fall_through = p->fall_through;
333 cp->tp = tp;
334
335 if (p->perfect) {
336 int i;
337
338 if (tcindex_alloc_perfect_hash(cp) < 0)
339 goto errout;
340 for (i = 0; i < cp->hash; i++)
341 cp->perfect[i].res = p->perfect[i].res;
342 balloc = 1;
343 }
344 cp->h = p->h;
345
346 err = tcindex_filter_result_init(&new_filter_result);
347 if (err < 0)
348 goto errout1;
349 err = tcindex_filter_result_init(&cr);
350 if (err < 0)
351 goto errout1;
352 if (old_r)
353 cr.res = r->res;
354
355 if (tb[TCA_TCINDEX_HASH])
356 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
357
358 if (tb[TCA_TCINDEX_MASK])
359 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
360
361 if (tb[TCA_TCINDEX_SHIFT])
362 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
363
364 err = -EBUSY;
365
366 /* Hash already allocated, make sure that we still meet the
367 * requirements for the allocated hash.
368 */
369 if (cp->perfect) {
370 if (!valid_perfect_hash(cp) ||
371 cp->hash > cp->alloc_hash)
372 goto errout_alloc;
373 } else if (cp->h && cp->hash != cp->alloc_hash) {
374 goto errout_alloc;
375 }
376
377 err = -EINVAL;
378 if (tb[TCA_TCINDEX_FALL_THROUGH])
379 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
380
381 if (!cp->hash) {
382 /* Hash not specified, use perfect hash if the upper limit
383 * of the hashing index is below the threshold.
384 */
385 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
386 cp->hash = (cp->mask >> cp->shift) + 1;
387 else
388 cp->hash = DEFAULT_HASH_SIZE;
389 }
390
391 if (!cp->perfect && !cp->h)
392 cp->alloc_hash = cp->hash;
393
394 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
395 * but then, we'd fail handles that may become valid after some future
396 * mask change. While this is extremely unlikely to ever matter,
397 * the check below is safer (and also more backwards-compatible).
398 */
399 if (cp->perfect || valid_perfect_hash(cp))
400 if (handle >= cp->alloc_hash)
401 goto errout_alloc;
402
403
404 err = -ENOMEM;
405 if (!cp->perfect && !cp->h) {
406 if (valid_perfect_hash(cp)) {
407 if (tcindex_alloc_perfect_hash(cp) < 0)
408 goto errout_alloc;
409 balloc = 1;
410 } else {
411 struct tcindex_filter __rcu **hash;
412
413 hash = kcalloc(cp->hash,
414 sizeof(struct tcindex_filter *),
415 GFP_KERNEL);
416
417 if (!hash)
418 goto errout_alloc;
419
420 cp->h = hash;
421 balloc = 2;
422 }
423 }
424
425 if (cp->perfect)
426 r = cp->perfect + handle;
427 else
428 r = tcindex_lookup(cp, handle) ? : &new_filter_result;
429
430 if (r == &new_filter_result) {
431 f = kzalloc(sizeof(*f), GFP_KERNEL);
432 if (!f)
433 goto errout_alloc;
434 f->key = handle;
435 f->next = NULL;
436 err = tcindex_filter_result_init(&f->result);
437 if (err < 0) {
438 kfree(f);
439 goto errout_alloc;
440 }
441 }
442
443 if (tb[TCA_TCINDEX_CLASSID]) {
444 cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
445 tcf_bind_filter(tp, &cr.res, base);
446 }
447
448 if (old_r)
449 tcf_exts_change(&r->exts, &e);
450 else
451 tcf_exts_change(&cr.exts, &e);
452
453 if (old_r && old_r != r) {
454 err = tcindex_filter_result_init(old_r);
455 if (err < 0) {
456 kfree(f);
457 goto errout_alloc;
458 }
459 }
460
461 oldp = p;
462 r->res = cr.res;
463 rcu_assign_pointer(tp->root, cp);
464
465 if (r == &new_filter_result) {
466 struct tcindex_filter *nfp;
467 struct tcindex_filter __rcu **fp;
468
469 tcf_exts_change(&f->result.exts, &r->exts);
470
471 fp = cp->h + (handle % cp->hash);
472 for (nfp = rtnl_dereference(*fp);
473 nfp;
474 fp = &nfp->next, nfp = rtnl_dereference(*fp))
475 ; /* nothing */
476
477 rcu_assign_pointer(*fp, f);
478 }
479
480 if (oldp)
481 call_rcu(&oldp->rcu, __tcindex_partial_destroy);
482 return 0;
483
484 errout_alloc:
485 if (balloc == 1)
486 tcindex_free_perfect_hash(cp);
487 else if (balloc == 2)
488 kfree(cp->h);
489 errout1:
490 tcf_exts_destroy(&cr.exts);
491 tcf_exts_destroy(&new_filter_result.exts);
492 errout:
493 kfree(cp);
494 tcf_exts_destroy(&e);
495 return err;
496 }
497
498 static int
499 tcindex_change(struct net *net, struct sk_buff *in_skb,
500 struct tcf_proto *tp, unsigned long base, u32 handle,
501 struct nlattr **tca, void **arg, bool ovr)
502 {
503 struct nlattr *opt = tca[TCA_OPTIONS];
504 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
505 struct tcindex_data *p = rtnl_dereference(tp->root);
506 struct tcindex_filter_result *r = *arg;
507 int err;
508
509 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
510 "p %p,r %p,*arg %p\n",
511 tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL);
512
513 if (!opt)
514 return 0;
515
516 err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy, NULL);
517 if (err < 0)
518 return err;
519
520 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
521 tca[TCA_RATE], ovr);
522 }
523
524 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
525 {
526 struct tcindex_data *p = rtnl_dereference(tp->root);
527 struct tcindex_filter *f, *next;
528 int i;
529
530 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
531 if (p->perfect) {
532 for (i = 0; i < p->hash; i++) {
533 if (!p->perfect[i].res.class)
534 continue;
535 if (walker->count >= walker->skip) {
536 if (walker->fn(tp, p->perfect + i, walker) < 0) {
537 walker->stop = 1;
538 return;
539 }
540 }
541 walker->count++;
542 }
543 }
544 if (!p->h)
545 return;
546 for (i = 0; i < p->hash; i++) {
547 for (f = rtnl_dereference(p->h[i]); f; f = next) {
548 next = rtnl_dereference(f->next);
549 if (walker->count >= walker->skip) {
550 if (walker->fn(tp, &f->result, walker) < 0) {
551 walker->stop = 1;
552 return;
553 }
554 }
555 walker->count++;
556 }
557 }
558 }
559
560 static void tcindex_destroy(struct tcf_proto *tp)
561 {
562 struct tcindex_data *p = rtnl_dereference(tp->root);
563 struct tcf_walker walker;
564
565 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
566 walker.count = 0;
567 walker.skip = 0;
568 walker.fn = tcindex_destroy_element;
569 tcindex_walk(tp, &walker);
570
571 call_rcu(&p->rcu, __tcindex_destroy);
572 }
573
574
575 static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
576 struct sk_buff *skb, struct tcmsg *t)
577 {
578 struct tcindex_data *p = rtnl_dereference(tp->root);
579 struct tcindex_filter_result *r = fh;
580 struct nlattr *nest;
581
582 pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
583 tp, fh, skb, t, p, r);
584 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
585
586 nest = nla_nest_start(skb, TCA_OPTIONS);
587 if (nest == NULL)
588 goto nla_put_failure;
589
590 if (!fh) {
591 t->tcm_handle = ~0; /* whatever ... */
592 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
593 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
594 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
595 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
596 goto nla_put_failure;
597 nla_nest_end(skb, nest);
598 } else {
599 if (p->perfect) {
600 t->tcm_handle = r - p->perfect;
601 } else {
602 struct tcindex_filter *f;
603 struct tcindex_filter __rcu **fp;
604 int i;
605
606 t->tcm_handle = 0;
607 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
608 fp = &p->h[i];
609 for (f = rtnl_dereference(*fp);
610 !t->tcm_handle && f;
611 fp = &f->next, f = rtnl_dereference(*fp)) {
612 if (&f->result == r)
613 t->tcm_handle = f->key;
614 }
615 }
616 }
617 pr_debug("handle = %d\n", t->tcm_handle);
618 if (r->res.class &&
619 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
620 goto nla_put_failure;
621
622 if (tcf_exts_dump(skb, &r->exts) < 0)
623 goto nla_put_failure;
624 nla_nest_end(skb, nest);
625
626 if (tcf_exts_dump_stats(skb, &r->exts) < 0)
627 goto nla_put_failure;
628 }
629
630 return skb->len;
631
632 nla_put_failure:
633 nla_nest_cancel(skb, nest);
634 return -1;
635 }
636
637 static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl)
638 {
639 struct tcindex_filter_result *r = fh;
640
641 if (r && r->res.classid == classid)
642 r->res.class = cl;
643 }
644
645 static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
646 .kind = "tcindex",
647 .classify = tcindex_classify,
648 .init = tcindex_init,
649 .destroy = tcindex_destroy,
650 .get = tcindex_get,
651 .change = tcindex_change,
652 .delete = tcindex_delete,
653 .walk = tcindex_walk,
654 .dump = tcindex_dump,
655 .bind_class = tcindex_bind_class,
656 .owner = THIS_MODULE,
657 };
658
659 static int __init init_tcindex(void)
660 {
661 return register_tcf_proto_ops(&cls_tcindex_ops);
662 }
663
664 static void __exit exit_tcindex(void)
665 {
666 unregister_tcf_proto_ops(&cls_tcindex_ops);
667 }
668
669 module_init(init_tcindex)
670 module_exit(exit_tcindex)
671 MODULE_LICENSE("GPL");