]> git.ipfire.org Git - people/arne_f/kernel.git/blob - net/sched/cls_tcindex.c
cls_tcindex: use tcf_exts_get_net() before call_rcu()
[people/arne_f/kernel.git] / net / sched / cls_tcindex.c
1 /*
2 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
3 *
4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
5 */
6
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16
17 /*
18 * Passing parameters to the root seems to be done more awkwardly than really
19 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
20 * verified. FIXME.
21 */
22
23 #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
24 #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
25
26
27 struct tcindex_filter_result {
28 struct tcf_exts exts;
29 struct tcf_result res;
30 union {
31 struct work_struct work;
32 struct rcu_head rcu;
33 };
34 };
35
36 struct tcindex_filter {
37 u16 key;
38 struct tcindex_filter_result result;
39 struct tcindex_filter __rcu *next;
40 union {
41 struct work_struct work;
42 struct rcu_head rcu;
43 };
44 };
45
46
47 struct tcindex_data {
48 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
49 struct tcindex_filter __rcu **h; /* imperfect hash; */
50 struct tcf_proto *tp;
51 u16 mask; /* AND key with mask */
52 u32 shift; /* shift ANDed key to the right */
53 u32 hash; /* hash table size; 0 if undefined */
54 u32 alloc_hash; /* allocated size */
55 u32 fall_through; /* 0: only classify if explicit match */
56 struct rcu_head rcu;
57 };
58
59 static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
60 {
61 return tcf_exts_has_actions(&r->exts) || r->res.classid;
62 }
63
64 static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
65 u16 key)
66 {
67 if (p->perfect) {
68 struct tcindex_filter_result *f = p->perfect + key;
69
70 return tcindex_filter_is_set(f) ? f : NULL;
71 } else if (p->h) {
72 struct tcindex_filter __rcu **fp;
73 struct tcindex_filter *f;
74
75 fp = &p->h[key % p->hash];
76 for (f = rcu_dereference_bh_rtnl(*fp);
77 f;
78 fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
79 if (f->key == key)
80 return &f->result;
81 }
82
83 return NULL;
84 }
85
86
87 static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
88 struct tcf_result *res)
89 {
90 struct tcindex_data *p = rcu_dereference_bh(tp->root);
91 struct tcindex_filter_result *f;
92 int key = (skb->tc_index & p->mask) >> p->shift;
93
94 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
95 skb, tp, res, p);
96
97 f = tcindex_lookup(p, key);
98 if (!f) {
99 if (!p->fall_through)
100 return -1;
101 res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key);
102 res->class = 0;
103 pr_debug("alg 0x%x\n", res->classid);
104 return 0;
105 }
106 *res = f->res;
107 pr_debug("map 0x%x\n", res->classid);
108
109 return tcf_exts_exec(skb, &f->exts, res);
110 }
111
112
113 static void *tcindex_get(struct tcf_proto *tp, u32 handle)
114 {
115 struct tcindex_data *p = rtnl_dereference(tp->root);
116 struct tcindex_filter_result *r;
117
118 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
119 if (p->perfect && handle >= p->alloc_hash)
120 return NULL;
121 r = tcindex_lookup(p, handle);
122 return r && tcindex_filter_is_set(r) ? r : NULL;
123 }
124
125 static int tcindex_init(struct tcf_proto *tp)
126 {
127 struct tcindex_data *p;
128
129 pr_debug("tcindex_init(tp %p)\n", tp);
130 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
131 if (!p)
132 return -ENOMEM;
133
134 p->mask = 0xffff;
135 p->hash = DEFAULT_HASH_SIZE;
136 p->fall_through = 1;
137
138 rcu_assign_pointer(tp->root, p);
139 return 0;
140 }
141
142 static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
143 {
144 tcf_exts_destroy(&r->exts);
145 tcf_exts_put_net(&r->exts);
146 }
147
148 static void tcindex_destroy_rexts_work(struct work_struct *work)
149 {
150 struct tcindex_filter_result *r;
151
152 r = container_of(work, struct tcindex_filter_result, work);
153 rtnl_lock();
154 __tcindex_destroy_rexts(r);
155 rtnl_unlock();
156 }
157
158 static void tcindex_destroy_rexts(struct rcu_head *head)
159 {
160 struct tcindex_filter_result *r;
161
162 r = container_of(head, struct tcindex_filter_result, rcu);
163 INIT_WORK(&r->work, tcindex_destroy_rexts_work);
164 tcf_queue_work(&r->work);
165 }
166
167 static void __tcindex_destroy_fexts(struct tcindex_filter *f)
168 {
169 tcf_exts_destroy(&f->result.exts);
170 tcf_exts_put_net(&f->result.exts);
171 kfree(f);
172 }
173
174 static void tcindex_destroy_fexts_work(struct work_struct *work)
175 {
176 struct tcindex_filter *f = container_of(work, struct tcindex_filter,
177 work);
178
179 rtnl_lock();
180 __tcindex_destroy_fexts(f);
181 rtnl_unlock();
182 }
183
184 static void tcindex_destroy_fexts(struct rcu_head *head)
185 {
186 struct tcindex_filter *f = container_of(head, struct tcindex_filter,
187 rcu);
188
189 INIT_WORK(&f->work, tcindex_destroy_fexts_work);
190 tcf_queue_work(&f->work);
191 }
192
193 static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)
194 {
195 struct tcindex_data *p = rtnl_dereference(tp->root);
196 struct tcindex_filter_result *r = arg;
197 struct tcindex_filter __rcu **walk;
198 struct tcindex_filter *f = NULL;
199
200 pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
201 if (p->perfect) {
202 if (!r->res.class)
203 return -ENOENT;
204 } else {
205 int i;
206
207 for (i = 0; i < p->hash; i++) {
208 walk = p->h + i;
209 for (f = rtnl_dereference(*walk); f;
210 walk = &f->next, f = rtnl_dereference(*walk)) {
211 if (&f->result == r)
212 goto found;
213 }
214 }
215 return -ENOENT;
216
217 found:
218 rcu_assign_pointer(*walk, rtnl_dereference(f->next));
219 }
220 tcf_unbind_filter(tp, &r->res);
221 /* all classifiers are required to call tcf_exts_destroy() after rcu
222 * grace period, since converted-to-rcu actions are relying on that
223 * in cleanup() callback
224 */
225 if (f) {
226 if (tcf_exts_get_net(&f->result.exts))
227 call_rcu(&f->rcu, tcindex_destroy_fexts);
228 else
229 __tcindex_destroy_fexts(f);
230 } else {
231 if (tcf_exts_get_net(&r->exts))
232 call_rcu(&r->rcu, tcindex_destroy_rexts);
233 else
234 __tcindex_destroy_rexts(r);
235 }
236
237 *last = false;
238 return 0;
239 }
240
241 static int tcindex_destroy_element(struct tcf_proto *tp,
242 void *arg, struct tcf_walker *walker)
243 {
244 bool last;
245
246 return tcindex_delete(tp, arg, &last);
247 }
248
249 static void __tcindex_destroy(struct rcu_head *head)
250 {
251 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
252
253 kfree(p->perfect);
254 kfree(p->h);
255 kfree(p);
256 }
257
258 static inline int
259 valid_perfect_hash(struct tcindex_data *p)
260 {
261 return p->hash > (p->mask >> p->shift);
262 }
263
264 static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
265 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
266 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
267 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
268 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
269 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
270 };
271
272 static int tcindex_filter_result_init(struct tcindex_filter_result *r)
273 {
274 memset(r, 0, sizeof(*r));
275 return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
276 }
277
278 static void __tcindex_partial_destroy(struct rcu_head *head)
279 {
280 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
281
282 kfree(p->perfect);
283 kfree(p);
284 }
285
286 static void tcindex_free_perfect_hash(struct tcindex_data *cp)
287 {
288 int i;
289
290 for (i = 0; i < cp->hash; i++)
291 tcf_exts_destroy(&cp->perfect[i].exts);
292 kfree(cp->perfect);
293 }
294
295 static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
296 {
297 int i, err = 0;
298
299 cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
300 GFP_KERNEL);
301 if (!cp->perfect)
302 return -ENOMEM;
303
304 for (i = 0; i < cp->hash; i++) {
305 err = tcf_exts_init(&cp->perfect[i].exts,
306 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
307 if (err < 0)
308 goto errout;
309 }
310
311 return 0;
312
313 errout:
314 tcindex_free_perfect_hash(cp);
315 return err;
316 }
317
318 static int
319 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
320 u32 handle, struct tcindex_data *p,
321 struct tcindex_filter_result *r, struct nlattr **tb,
322 struct nlattr *est, bool ovr)
323 {
324 struct tcindex_filter_result new_filter_result, *old_r = r;
325 struct tcindex_filter_result cr;
326 struct tcindex_data *cp = NULL, *oldp;
327 struct tcindex_filter *f = NULL; /* make gcc behave */
328 int err, balloc = 0;
329 struct tcf_exts e;
330
331 err = tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
332 if (err < 0)
333 return err;
334 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
335 if (err < 0)
336 goto errout;
337
338 err = -ENOMEM;
339 /* tcindex_data attributes must look atomic to classifier/lookup so
340 * allocate new tcindex data and RCU assign it onto root. Keeping
341 * perfect hash and hash pointers from old data.
342 */
343 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
344 if (!cp)
345 goto errout;
346
347 cp->mask = p->mask;
348 cp->shift = p->shift;
349 cp->hash = p->hash;
350 cp->alloc_hash = p->alloc_hash;
351 cp->fall_through = p->fall_through;
352 cp->tp = tp;
353
354 if (p->perfect) {
355 int i;
356
357 if (tcindex_alloc_perfect_hash(cp) < 0)
358 goto errout;
359 for (i = 0; i < cp->hash; i++)
360 cp->perfect[i].res = p->perfect[i].res;
361 balloc = 1;
362 }
363 cp->h = p->h;
364
365 err = tcindex_filter_result_init(&new_filter_result);
366 if (err < 0)
367 goto errout1;
368 err = tcindex_filter_result_init(&cr);
369 if (err < 0)
370 goto errout1;
371 if (old_r)
372 cr.res = r->res;
373
374 if (tb[TCA_TCINDEX_HASH])
375 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
376
377 if (tb[TCA_TCINDEX_MASK])
378 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
379
380 if (tb[TCA_TCINDEX_SHIFT])
381 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
382
383 err = -EBUSY;
384
385 /* Hash already allocated, make sure that we still meet the
386 * requirements for the allocated hash.
387 */
388 if (cp->perfect) {
389 if (!valid_perfect_hash(cp) ||
390 cp->hash > cp->alloc_hash)
391 goto errout_alloc;
392 } else if (cp->h && cp->hash != cp->alloc_hash) {
393 goto errout_alloc;
394 }
395
396 err = -EINVAL;
397 if (tb[TCA_TCINDEX_FALL_THROUGH])
398 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
399
400 if (!cp->hash) {
401 /* Hash not specified, use perfect hash if the upper limit
402 * of the hashing index is below the threshold.
403 */
404 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
405 cp->hash = (cp->mask >> cp->shift) + 1;
406 else
407 cp->hash = DEFAULT_HASH_SIZE;
408 }
409
410 if (!cp->perfect && !cp->h)
411 cp->alloc_hash = cp->hash;
412
413 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
414 * but then, we'd fail handles that may become valid after some future
415 * mask change. While this is extremely unlikely to ever matter,
416 * the check below is safer (and also more backwards-compatible).
417 */
418 if (cp->perfect || valid_perfect_hash(cp))
419 if (handle >= cp->alloc_hash)
420 goto errout_alloc;
421
422
423 err = -ENOMEM;
424 if (!cp->perfect && !cp->h) {
425 if (valid_perfect_hash(cp)) {
426 if (tcindex_alloc_perfect_hash(cp) < 0)
427 goto errout_alloc;
428 balloc = 1;
429 } else {
430 struct tcindex_filter __rcu **hash;
431
432 hash = kcalloc(cp->hash,
433 sizeof(struct tcindex_filter *),
434 GFP_KERNEL);
435
436 if (!hash)
437 goto errout_alloc;
438
439 cp->h = hash;
440 balloc = 2;
441 }
442 }
443
444 if (cp->perfect)
445 r = cp->perfect + handle;
446 else
447 r = tcindex_lookup(cp, handle) ? : &new_filter_result;
448
449 if (r == &new_filter_result) {
450 f = kzalloc(sizeof(*f), GFP_KERNEL);
451 if (!f)
452 goto errout_alloc;
453 f->key = handle;
454 f->next = NULL;
455 err = tcindex_filter_result_init(&f->result);
456 if (err < 0) {
457 kfree(f);
458 goto errout_alloc;
459 }
460 }
461
462 if (tb[TCA_TCINDEX_CLASSID]) {
463 cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
464 tcf_bind_filter(tp, &cr.res, base);
465 }
466
467 if (old_r)
468 tcf_exts_change(&r->exts, &e);
469 else
470 tcf_exts_change(&cr.exts, &e);
471
472 if (old_r && old_r != r) {
473 err = tcindex_filter_result_init(old_r);
474 if (err < 0) {
475 kfree(f);
476 goto errout_alloc;
477 }
478 }
479
480 oldp = p;
481 r->res = cr.res;
482 rcu_assign_pointer(tp->root, cp);
483
484 if (r == &new_filter_result) {
485 struct tcindex_filter *nfp;
486 struct tcindex_filter __rcu **fp;
487
488 tcf_exts_change(&f->result.exts, &r->exts);
489
490 fp = cp->h + (handle % cp->hash);
491 for (nfp = rtnl_dereference(*fp);
492 nfp;
493 fp = &nfp->next, nfp = rtnl_dereference(*fp))
494 ; /* nothing */
495
496 rcu_assign_pointer(*fp, f);
497 }
498
499 if (oldp)
500 call_rcu(&oldp->rcu, __tcindex_partial_destroy);
501 return 0;
502
503 errout_alloc:
504 if (balloc == 1)
505 tcindex_free_perfect_hash(cp);
506 else if (balloc == 2)
507 kfree(cp->h);
508 errout1:
509 tcf_exts_destroy(&cr.exts);
510 tcf_exts_destroy(&new_filter_result.exts);
511 errout:
512 kfree(cp);
513 tcf_exts_destroy(&e);
514 return err;
515 }
516
517 static int
518 tcindex_change(struct net *net, struct sk_buff *in_skb,
519 struct tcf_proto *tp, unsigned long base, u32 handle,
520 struct nlattr **tca, void **arg, bool ovr)
521 {
522 struct nlattr *opt = tca[TCA_OPTIONS];
523 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
524 struct tcindex_data *p = rtnl_dereference(tp->root);
525 struct tcindex_filter_result *r = *arg;
526 int err;
527
528 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
529 "p %p,r %p,*arg %p\n",
530 tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL);
531
532 if (!opt)
533 return 0;
534
535 err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy, NULL);
536 if (err < 0)
537 return err;
538
539 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
540 tca[TCA_RATE], ovr);
541 }
542
543 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
544 {
545 struct tcindex_data *p = rtnl_dereference(tp->root);
546 struct tcindex_filter *f, *next;
547 int i;
548
549 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
550 if (p->perfect) {
551 for (i = 0; i < p->hash; i++) {
552 if (!p->perfect[i].res.class)
553 continue;
554 if (walker->count >= walker->skip) {
555 if (walker->fn(tp, p->perfect + i, walker) < 0) {
556 walker->stop = 1;
557 return;
558 }
559 }
560 walker->count++;
561 }
562 }
563 if (!p->h)
564 return;
565 for (i = 0; i < p->hash; i++) {
566 for (f = rtnl_dereference(p->h[i]); f; f = next) {
567 next = rtnl_dereference(f->next);
568 if (walker->count >= walker->skip) {
569 if (walker->fn(tp, &f->result, walker) < 0) {
570 walker->stop = 1;
571 return;
572 }
573 }
574 walker->count++;
575 }
576 }
577 }
578
579 static void tcindex_destroy(struct tcf_proto *tp)
580 {
581 struct tcindex_data *p = rtnl_dereference(tp->root);
582 struct tcf_walker walker;
583
584 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
585 walker.count = 0;
586 walker.skip = 0;
587 walker.fn = tcindex_destroy_element;
588 tcindex_walk(tp, &walker);
589
590 call_rcu(&p->rcu, __tcindex_destroy);
591 }
592
593
594 static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
595 struct sk_buff *skb, struct tcmsg *t)
596 {
597 struct tcindex_data *p = rtnl_dereference(tp->root);
598 struct tcindex_filter_result *r = fh;
599 struct nlattr *nest;
600
601 pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
602 tp, fh, skb, t, p, r);
603 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
604
605 nest = nla_nest_start(skb, TCA_OPTIONS);
606 if (nest == NULL)
607 goto nla_put_failure;
608
609 if (!fh) {
610 t->tcm_handle = ~0; /* whatever ... */
611 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
612 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
613 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
614 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
615 goto nla_put_failure;
616 nla_nest_end(skb, nest);
617 } else {
618 if (p->perfect) {
619 t->tcm_handle = r - p->perfect;
620 } else {
621 struct tcindex_filter *f;
622 struct tcindex_filter __rcu **fp;
623 int i;
624
625 t->tcm_handle = 0;
626 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
627 fp = &p->h[i];
628 for (f = rtnl_dereference(*fp);
629 !t->tcm_handle && f;
630 fp = &f->next, f = rtnl_dereference(*fp)) {
631 if (&f->result == r)
632 t->tcm_handle = f->key;
633 }
634 }
635 }
636 pr_debug("handle = %d\n", t->tcm_handle);
637 if (r->res.class &&
638 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
639 goto nla_put_failure;
640
641 if (tcf_exts_dump(skb, &r->exts) < 0)
642 goto nla_put_failure;
643 nla_nest_end(skb, nest);
644
645 if (tcf_exts_dump_stats(skb, &r->exts) < 0)
646 goto nla_put_failure;
647 }
648
649 return skb->len;
650
651 nla_put_failure:
652 nla_nest_cancel(skb, nest);
653 return -1;
654 }
655
656 static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl)
657 {
658 struct tcindex_filter_result *r = fh;
659
660 if (r && r->res.classid == classid)
661 r->res.class = cl;
662 }
663
664 static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
665 .kind = "tcindex",
666 .classify = tcindex_classify,
667 .init = tcindex_init,
668 .destroy = tcindex_destroy,
669 .get = tcindex_get,
670 .change = tcindex_change,
671 .delete = tcindex_delete,
672 .walk = tcindex_walk,
673 .dump = tcindex_dump,
674 .bind_class = tcindex_bind_class,
675 .owner = THIS_MODULE,
676 };
677
678 static int __init init_tcindex(void)
679 {
680 return register_tcf_proto_ops(&cls_tcindex_ops);
681 }
682
683 static void __exit exit_tcindex(void)
684 {
685 unregister_tcf_proto_ops(&cls_tcindex_ops);
686 }
687
688 module_init(init_tcindex)
689 module_exit(exit_tcindex)
690 MODULE_LICENSE("GPL");