]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/linux-2.6.32.8-xen-imq-test2.patch
Merge branch 'next' of ssh://arne_f@ipfire.org/pub/git/ipfire-2.x into next
[people/pmueller/ipfire-2.x.git] / src / patches / linux-2.6.32.8-xen-imq-test2.patch
1 diff -Naur linux-2.6.32.8.org/drivers/net/imq.c linux-2.6.32.8/drivers/net/imq.c
2 --- linux-2.6.32.8.org/drivers/net/imq.c 1970-01-01 01:00:00.000000000 +0100
3 +++ linux-2.6.32.8/drivers/net/imq.c 2010-02-17 20:45:00.000000000 +0100
4 @@ -0,0 +1,632 @@
5 +/*
6 + * Pseudo-driver for the intermediate queue device.
7 + *
8 + * This program is free software; you can redistribute it and/or
9 + * modify it under the terms of the GNU General Public License
10 + * as published by the Free Software Foundation; either version
11 + * 2 of the License, or (at your option) any later version.
12 + *
13 + * Authors: Patrick McHardy, <kaber@trash.net>
14 + *
15 + * The first version was written by Martin Devera, <devik@cdi.cz>
16 + *
17 + * Credits: Jan Rafaj <imq2t@cedric.vabo.cz>
18 + * - Update patch to 2.4.21
19 + * Sebastian Strollo <sstrollo@nortelnetworks.com>
20 + * - Fix "Dead-loop on netdevice imq"-issue
21 + * Marcel Sebek <sebek64@post.cz>
22 + * - Update to 2.6.2-rc1
23 + *
24 + * After some time of inactivity there is a group taking care
25 + * of IMQ again: http://www.linuximq.net
26 + *
27 + *
28 + * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7
29 + * including the following changes:
30 + *
31 + * - Correction of ipv6 support "+"s issue (Hasso Tepper)
32 + * - Correction of imq_init_devs() issue that resulted in
33 + * kernel OOPS unloading IMQ as module (Norbert Buchmuller)
34 + * - Addition of functionality to choose number of IMQ devices
35 + * during kernel config (Andre Correa)
36 + * - Addition of functionality to choose how IMQ hooks on
37 + * PRE and POSTROUTING (after or before NAT) (Andre Correa)
38 + * - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
39 + *
40 + *
41 + * 2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
42 + * released with almost no problems. 2.6.14-x was released
43 + * with some important changes: nfcache was removed; After
44 + * some weeks of trouble we figured out that some IMQ fields
45 + * in skb were missing in skbuff.c - skb_clone and copy_skb_header.
46 + * These functions are correctly patched by this new patch version.
47 + *
48 + * Thanks for all who helped to figure out all the problems with
49 + * 2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
50 + * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
51 + * I didn't forget anybody). I apologize again for my lack of time.
52 + *
53 + *
54 + * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
55 + * of qdisc_restart() and moved qdisc_run() to tasklet to avoid
56 + * recursive locking. New initialization routines to fix 'rmmod' not
57 + * working anymore. Used code from ifb.c. (Jussi Kivilinna)
58 + *
59 + * 2008/08/06 - 2.6.26 - (JK)
60 + * - Replaced tasklet with 'netif_schedule()'.
61 + * - Cleaned up and added comments for imq_nf_queue().
62 + *
63 + * 2009/04/12
64 + * - Add skb_save_cb/skb_restore_cb helper functions for backuping
65 + * control buffer. This is needed because qdisc-layer on kernels
66 + * 2.6.27 and newer overwrite control buffer. (Jussi Kivilinna)
67 + * - Add better locking for IMQ device. Hopefully this will solve
68 + * SMP issues. (Jussi Kivilinna)
69 + * - Port to 2.6.27
70 + * - Port to 2.6.28
71 + * - Port to 2.6.29 + fix rmmod not working
72 + *
73 + * 2009/04/20 - (Jussi Kivilinna)
74 + * - Use netdevice feature flags to avoid extra packet handling
75 + * by core networking layer and possibly increase performance.
76 + *
77 + * 2009/09/26 - (Jussi Kivilinna)
78 + * - Add imq_nf_reinject_lockless to fix deadlock with
79 + * imq_nf_queue/imq_nf_reinject.
80 + *
81 + * 2009/12/08 - (Jussi Kivilinna)
82 + * - Port to 2.6.32
83 + * - Add check for skb->nf_queue_entry==NULL in imq_dev_xmit()
84 + * - Also add better error checking for skb->nf_queue_entry usage
85 + *
86 + * Also, many thanks to pablo Sebastian Greco for making the initial
87 + * patch and to those who helped the testing.
88 + *
89 + * More info at: http://www.linuximq.net/ (Andre Correa)
90 + */
91 +
92 +#include <linux/module.h>
93 +#include <linux/kernel.h>
94 +#include <linux/moduleparam.h>
95 +#include <linux/list.h>
96 +#include <linux/skbuff.h>
97 +#include <linux/netdevice.h>
98 +#include <linux/etherdevice.h>
99 +#include <linux/rtnetlink.h>
100 +#include <linux/if_arp.h>
101 +#include <linux/netfilter.h>
102 +#include <linux/netfilter_ipv4.h>
103 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
104 + #include <linux/netfilter_ipv6.h>
105 +#endif
106 +#include <linux/imq.h>
107 +#include <net/pkt_sched.h>
108 +#include <net/netfilter/nf_queue.h>
109 +
110 +static nf_hookfn imq_nf_hook;
111 +
112 +static struct nf_hook_ops imq_ingress_ipv4 = {
113 + .hook = imq_nf_hook,
114 + .owner = THIS_MODULE,
115 + .pf = PF_INET,
116 + .hooknum = NF_INET_PRE_ROUTING,
117 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
118 + .priority = NF_IP_PRI_MANGLE + 1
119 +#else
120 + .priority = NF_IP_PRI_NAT_DST + 1
121 +#endif
122 +};
123 +
124 +static struct nf_hook_ops imq_egress_ipv4 = {
125 + .hook = imq_nf_hook,
126 + .owner = THIS_MODULE,
127 + .pf = PF_INET,
128 + .hooknum = NF_INET_POST_ROUTING,
129 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
130 + .priority = NF_IP_PRI_LAST
131 +#else
132 + .priority = NF_IP_PRI_NAT_SRC - 1
133 +#endif
134 +};
135 +
136 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
137 +static struct nf_hook_ops imq_ingress_ipv6 = {
138 + .hook = imq_nf_hook,
139 + .owner = THIS_MODULE,
140 + .pf = PF_INET6,
141 + .hooknum = NF_INET_PRE_ROUTING,
142 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
143 + .priority = NF_IP6_PRI_MANGLE + 1
144 +#else
145 + .priority = NF_IP6_PRI_NAT_DST + 1
146 +#endif
147 +};
148 +
149 +static struct nf_hook_ops imq_egress_ipv6 = {
150 + .hook = imq_nf_hook,
151 + .owner = THIS_MODULE,
152 + .pf = PF_INET6,
153 + .hooknum = NF_INET_POST_ROUTING,
154 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
155 + .priority = NF_IP6_PRI_LAST
156 +#else
157 + .priority = NF_IP6_PRI_NAT_SRC - 1
158 +#endif
159 +};
160 +#endif
161 +
162 +#if defined(CONFIG_IMQ_NUM_DEVS)
163 +static unsigned int numdevs = CONFIG_IMQ_NUM_DEVS;
164 +#else
165 +static unsigned int numdevs = IMQ_MAX_DEVS;
166 +#endif
167 +
168 +static DEFINE_SPINLOCK(imq_nf_queue_lock);
169 +
170 +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
171 +
172 +
173 +static struct net_device_stats *imq_get_stats(struct net_device *dev)
174 +{
175 + return &dev->stats;
176 +}
177 +
178 +/* called for packets kfree'd in qdiscs at places other than enqueue */
179 +static void imq_skb_destructor(struct sk_buff *skb)
180 +{
181 + struct nf_queue_entry *entry = skb->nf_queue_entry;
182 +
183 + skb->nf_queue_entry = NULL;
184 +
185 + if (entry) {
186 + nf_queue_entry_release_refs(entry);
187 + kfree(entry);
188 + }
189 +
190 + skb_restore_cb(skb); /* kfree backup */
191 +}
192 +
193 +/* locking not needed when called from imq_nf_queue */
194 +static void imq_nf_reinject_lockless(struct nf_queue_entry *entry,
195 + unsigned int verdict)
196 +{
197 + int status;
198 +
199 + if (!entry->next_outfn) {
200 + nf_reinject(entry, verdict);
201 + return;
202 + }
203 +
204 + status = entry->next_outfn(entry, entry->next_queuenum);
205 + if (status < 0) {
206 + nf_queue_entry_release_refs(entry);
207 + kfree_skb(entry->skb);
208 + kfree(entry);
209 + }
210 +}
211 +
212 +static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
213 +{
214 + int status;
215 +
216 + if (!entry->next_outfn) {
217 + spin_lock_bh(&imq_nf_queue_lock);
218 + nf_reinject(entry, verdict);
219 + spin_unlock_bh(&imq_nf_queue_lock);
220 + return;
221 + }
222 +
223 + rcu_read_lock();
224 + local_bh_disable();
225 + status = entry->next_outfn(entry, entry->next_queuenum);
226 + local_bh_enable();
227 + if (status < 0) {
228 + nf_queue_entry_release_refs(entry);
229 + kfree_skb(entry->skb);
230 + kfree(entry);
231 + }
232 +
233 + rcu_read_unlock();
234 +}
235 +
236 +static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
237 +{
238 + struct nf_queue_entry *entry = skb->nf_queue_entry;
239 +
240 + skb->nf_queue_entry = NULL;
241 + dev->trans_start = jiffies;
242 +
243 + dev->stats.tx_bytes += skb->len;
244 + dev->stats.tx_packets++;
245 +
246 + if (entry == NULL) {
247 + /* We don't know what is going on here.. packet is queued for
248 + * imq device, but (probably) not by us.
249 + *
250 + * If this packet was not send here by imq_nf_queue(), then
251 + * skb_save_cb() was not used and skb_free() should not show:
252 + * WARNING: IMQ: kfree_skb: skb->cb_next:..
253 + * and/or
254 + * WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
255 + *
256 + * However if this message is shown, then IMQ is somehow broken
257 + * and you should report this to linuximq.net.
258 + */
259 +
260 + /* imq_dev_xmit is black hole that eats all packets, report that
261 + * we eat this packet happily and increase dropped counters.
262 + */
263 +
264 + dev->stats.tx_dropped++;
265 + dev_kfree_skb(skb);
266 +
267 + return NETDEV_TX_OK;
268 + }
269 +
270 + skb_restore_cb(skb); /* restore skb->cb */
271 +
272 + skb->imq_flags = 0;
273 + skb->destructor = NULL;
274 +
275 + imq_nf_reinject(entry, NF_ACCEPT);
276 +
277 + return NETDEV_TX_OK;
278 +}
279 +
280 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
281 +{
282 + struct net_device *dev;
283 + struct sk_buff *skb_orig, *skb, *skb_shared;
284 + struct Qdisc *q;
285 + struct netdev_queue *txq;
286 + int users, index;
287 + int retval = -EINVAL;
288 +
289 + index = entry->skb->imq_flags & IMQ_F_IFMASK;
290 + if (unlikely(index > numdevs - 1)) {
291 + if (net_ratelimit())
292 + printk(KERN_WARNING
293 + "IMQ: invalid device specified, highest is %u\n",
294 + numdevs - 1);
295 + retval = -EINVAL;
296 + goto out;
297 + }
298 +
299 + /* check for imq device by index from cache */
300 + dev = imq_devs_cache[index];
301 + if (unlikely(!dev)) {
302 + char buf[8];
303 +
304 + /* get device by name and cache result */
305 + snprintf(buf, sizeof(buf), "imq%d", index);
306 + dev = dev_get_by_name(&init_net, buf);
307 + if (!dev) {
308 + /* not found ?!*/
309 + BUG();
310 + retval = -ENODEV;
311 + goto out;
312 + }
313 +
314 + imq_devs_cache[index] = dev;
315 + dev_put(dev);
316 + }
317 +
318 + if (unlikely(!(dev->flags & IFF_UP))) {
319 + entry->skb->imq_flags = 0;
320 + imq_nf_reinject_lockless(entry, NF_ACCEPT);
321 + retval = 0;
322 + goto out;
323 + }
324 + dev->last_rx = jiffies;
325 +
326 + skb = entry->skb;
327 + skb_orig = NULL;
328 +
329 + /* skb has owner? => make clone */
330 + if (unlikely(skb->destructor)) {
331 + skb_orig = skb;
332 + skb = skb_clone(skb, GFP_ATOMIC);
333 + if (!skb) {
334 + retval = -ENOMEM;
335 + goto out;
336 + }
337 + entry->skb = skb;
338 + }
339 +
340 + skb->nf_queue_entry = entry;
341 +
342 + dev->stats.rx_bytes += skb->len;
343 + dev->stats.rx_packets++;
344 +
345 + txq = dev_pick_tx(dev, skb);
346 +
347 + q = rcu_dereference(txq->qdisc);
348 + if (unlikely(!q->enqueue))
349 + goto packet_not_eaten_by_imq_dev;
350 +
351 + spin_lock_bh(qdisc_lock(q));
352 +
353 + users = atomic_read(&skb->users);
354 +
355 + skb_shared = skb_get(skb); /* increase reference count by one */
356 + skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will
357 + overwrite it */
358 + qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
359 +
360 + if (likely(atomic_read(&skb_shared->users) == users + 1)) {
361 + kfree_skb(skb_shared); /* decrease reference count by one */
362 +
363 + skb->destructor = &imq_skb_destructor;
364 +
365 + /* cloned? */
366 + if (skb_orig)
367 + kfree_skb(skb_orig); /* free original */
368 +
369 + spin_unlock_bh(qdisc_lock(q));
370 +
371 + /* schedule qdisc dequeue */
372 + __netif_schedule(q);
373 +
374 + retval = 0;
375 + goto out;
376 + } else {
377 + skb_restore_cb(skb_shared); /* restore skb->cb */
378 + skb->nf_queue_entry = NULL;
379 + /* qdisc dropped packet and decreased skb reference count of
380 + * skb, so we don't really want to and try refree as that would
381 + * actually destroy the skb. */
382 + spin_unlock_bh(qdisc_lock(q));
383 + goto packet_not_eaten_by_imq_dev;
384 + }
385 +
386 +packet_not_eaten_by_imq_dev:
387 + /* cloned? restore original */
388 + if (skb_orig) {
389 + kfree_skb(skb);
390 + entry->skb = skb_orig;
391 + }
392 + retval = -1;
393 +out:
394 + return retval;
395 +}
396 +
397 +static struct nf_queue_handler nfqh = {
398 + .name = "imq",
399 + .outfn = imq_nf_queue,
400 +};
401 +
402 +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
403 + const struct net_device *indev,
404 + const struct net_device *outdev,
405 + int (*okfn)(struct sk_buff *))
406 +{
407 + if (pskb->imq_flags & IMQ_F_ENQUEUE)
408 + return NF_QUEUE;
409 +
410 + return NF_ACCEPT;
411 +}
412 +
413 +static int imq_close(struct net_device *dev)
414 +{
415 + netif_stop_queue(dev);
416 + return 0;
417 +}
418 +
419 +static int imq_open(struct net_device *dev)
420 +{
421 + netif_start_queue(dev);
422 + return 0;
423 +}
424 +
425 +static const struct net_device_ops imq_netdev_ops = {
426 + .ndo_open = imq_open,
427 + .ndo_stop = imq_close,
428 + .ndo_start_xmit = imq_dev_xmit,
429 + .ndo_get_stats = imq_get_stats,
430 +};
431 +
432 +static void imq_setup(struct net_device *dev)
433 +{
434 + dev->netdev_ops = &imq_netdev_ops;
435 + dev->type = ARPHRD_VOID;
436 + dev->mtu = 16000;
437 + dev->tx_queue_len = 11000;
438 + dev->flags = IFF_NOARP;
439 + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
440 + NETIF_F_GSO | NETIF_F_HW_CSUM |
441 + NETIF_F_HIGHDMA;
442 + dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
443 +}
444 +
445 +static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
446 +{
447 + int ret = 0;
448 +
449 + if (tb[IFLA_ADDRESS]) {
450 + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
451 + ret = -EINVAL;
452 + goto end;
453 + }
454 + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
455 + ret = -EADDRNOTAVAIL;
456 + goto end;
457 + }
458 + }
459 + return 0;
460 +end:
461 + printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret);
462 + return ret;
463 +}
464 +
465 +static struct rtnl_link_ops imq_link_ops __read_mostly = {
466 + .kind = "imq",
467 + .priv_size = 0,
468 + .setup = imq_setup,
469 + .validate = imq_validate,
470 +};
471 +
472 +static int __init imq_init_hooks(void)
473 +{
474 + int err;
475 +
476 + nf_register_queue_imq_handler(&nfqh);
477 +
478 + err = nf_register_hook(&imq_ingress_ipv4);
479 + if (err)
480 + goto err1;
481 +
482 + err = nf_register_hook(&imq_egress_ipv4);
483 + if (err)
484 + goto err2;
485 +
486 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
487 + err = nf_register_hook(&imq_ingress_ipv6);
488 + if (err)
489 + goto err3;
490 +
491 + err = nf_register_hook(&imq_egress_ipv6);
492 + if (err)
493 + goto err4;
494 +#endif
495 +
496 + return 0;
497 +
498 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
499 +err4:
500 + nf_unregister_hook(&imq_ingress_ipv6);
501 +err3:
502 + nf_unregister_hook(&imq_egress_ipv4);
503 +#endif
504 +err2:
505 + nf_unregister_hook(&imq_ingress_ipv4);
506 +err1:
507 + nf_unregister_queue_imq_handler();
508 + return err;
509 +}
510 +
511 +static int __init imq_init_one(int index)
512 +{
513 + struct net_device *dev;
514 + int ret;
515 +
516 + dev = alloc_netdev(0, "imq%d", imq_setup);
517 + if (!dev)
518 + return -ENOMEM;
519 +
520 + ret = dev_alloc_name(dev, dev->name);
521 + if (ret < 0)
522 + goto fail;
523 +
524 + dev->rtnl_link_ops = &imq_link_ops;
525 + ret = register_netdevice(dev);
526 + if (ret < 0)
527 + goto fail;
528 +
529 + return 0;
530 +fail:
531 + free_netdev(dev);
532 + return ret;
533 +}
534 +
535 +static int __init imq_init_devs(void)
536 +{
537 + int err, i;
538 +
539 + if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
540 + printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
541 + IMQ_MAX_DEVS);
542 + return -EINVAL;
543 + }
544 +
545 + rtnl_lock();
546 + err = __rtnl_link_register(&imq_link_ops);
547 +
548 + for (i = 0; i < numdevs && !err; i++)
549 + err = imq_init_one(i);
550 +
551 + if (err) {
552 + __rtnl_link_unregister(&imq_link_ops);
553 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
554 + }
555 + rtnl_unlock();
556 +
557 + return err;
558 +}
559 +
560 +static int __init imq_init_module(void)
561 +{
562 + int err;
563 +
564 +#if defined(CONFIG_IMQ_NUM_DEVS)
565 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
566 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
567 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
568 +#endif
569 +
570 + err = imq_init_devs();
571 + if (err) {
572 + printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
573 + return err;
574 + }
575 +
576 + err = imq_init_hooks();
577 + if (err) {
578 + printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
579 + rtnl_link_unregister(&imq_link_ops);
580 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
581 + return err;
582 + }
583 +
584 + printk(KERN_INFO "IMQ driver loaded successfully.\n");
585 +
586 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
587 + printk(KERN_INFO "\tHooking IMQ before NAT on PREROUTING.\n");
588 +#else
589 + printk(KERN_INFO "\tHooking IMQ after NAT on PREROUTING.\n");
590 +#endif
591 +#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
592 + printk(KERN_INFO "\tHooking IMQ before NAT on POSTROUTING.\n");
593 +#else
594 + printk(KERN_INFO "\tHooking IMQ after NAT on POSTROUTING.\n");
595 +#endif
596 +
597 + return 0;
598 +}
599 +
600 +static void __exit imq_unhook(void)
601 +{
602 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
603 + nf_unregister_hook(&imq_ingress_ipv6);
604 + nf_unregister_hook(&imq_egress_ipv6);
605 +#endif
606 + nf_unregister_hook(&imq_ingress_ipv4);
607 + nf_unregister_hook(&imq_egress_ipv4);
608 +
609 + nf_unregister_queue_imq_handler();
610 +}
611 +
612 +static void __exit imq_cleanup_devs(void)
613 +{
614 + rtnl_link_unregister(&imq_link_ops);
615 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
616 +}
617 +
618 +static void __exit imq_exit_module(void)
619 +{
620 + imq_unhook();
621 + imq_cleanup_devs();
622 + printk(KERN_INFO "IMQ driver unloaded successfully.\n");
623 +}
624 +
625 +module_init(imq_init_module);
626 +module_exit(imq_exit_module);
627 +
628 +module_param(numdevs, int, 0);
629 +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will "
630 + "be created)");
631 +MODULE_AUTHOR("http://www.linuximq.net");
632 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See "
633 + "http://www.linuximq.net/ for more information.");
634 +MODULE_LICENSE("GPL");
635 +MODULE_ALIAS_RTNL_LINK("imq");
636 +
637 diff -Naur linux-2.6.32.8.org/drivers/net/Kconfig linux-2.6.32.8/drivers/net/Kconfig
638 --- linux-2.6.32.8.org/drivers/net/Kconfig 2010-02-17 20:30:19.000000000 +0100
639 +++ linux-2.6.32.8/drivers/net/Kconfig 2010-02-17 20:45:00.000000000 +0100
640 @@ -109,6 +109,129 @@
641 To compile this driver as a module, choose M here: the module
642 will be called eql. If unsure, say N.
643
644 +config IMQ
645 + tristate "IMQ (intermediate queueing device) support"
646 + depends on NETDEVICES && NETFILTER
647 + ---help---
648 + The IMQ device(s) is used as placeholder for QoS queueing
649 + disciplines. Every packet entering/leaving the IP stack can be
650 + directed through the IMQ device where it's enqueued/dequeued to the
651 + attached qdisc. This allows you to treat network devices as classes
652 + and distribute bandwidth among them. Iptables is used to specify
653 + through which IMQ device, if any, packets travel.
654 +
655 + More information at: http://www.linuximq.net/
656 +
657 + To compile this driver as a module, choose M here: the module
658 + will be called imq. If unsure, say N.
659 +
660 +choice
661 + prompt "IMQ behavior (PRE/POSTROUTING)"
662 + depends on IMQ
663 + default IMQ_BEHAVIOR_AB
664 + help
665 +
666 + This settings defines how IMQ behaves in respect to its
667 + hooking in PREROUTING and POSTROUTING.
668 +
669 + IMQ can work in any of the following ways:
670 +
671 + PREROUTING | POSTROUTING
672 + -----------------|-------------------
673 + #1 After NAT | After NAT
674 + #2 After NAT | Before NAT
675 + #3 Before NAT | After NAT
676 + #4 Before NAT | Before NAT
677 +
678 + The default behavior is to hook before NAT on PREROUTING
679 + and after NAT on POSTROUTING (#3).
680 +
681 + This settings are specially usefull when trying to use IMQ
682 + to shape NATed clients.
683 +
684 + More information can be found at: www.linuximq.net
685 +
686 + If not sure leave the default settings alone.
687 +
688 +config IMQ_BEHAVIOR_AA
689 + bool "IMQ AA"
690 + help
691 + This settings defines how IMQ behaves in respect to its
692 + hooking in PREROUTING and POSTROUTING.
693 +
694 + Choosing this option will make IMQ hook like this:
695 +
696 + PREROUTING: After NAT
697 + POSTROUTING: After NAT
698 +
699 + More information can be found at: www.linuximq.net
700 +
701 + If not sure leave the default settings alone.
702 +
703 +config IMQ_BEHAVIOR_AB
704 + bool "IMQ AB"
705 + help
706 + This settings defines how IMQ behaves in respect to its
707 + hooking in PREROUTING and POSTROUTING.
708 +
709 + Choosing this option will make IMQ hook like this:
710 +
711 + PREROUTING: After NAT
712 + POSTROUTING: Before NAT
713 +
714 + More information can be found at: www.linuximq.net
715 +
716 + If not sure leave the default settings alone.
717 +
718 +config IMQ_BEHAVIOR_BA
719 + bool "IMQ BA"
720 + help
721 + This settings defines how IMQ behaves in respect to its
722 + hooking in PREROUTING and POSTROUTING.
723 +
724 + Choosing this option will make IMQ hook like this:
725 +
726 + PREROUTING: Before NAT
727 + POSTROUTING: After NAT
728 +
729 + More information can be found at: www.linuximq.net
730 +
731 + If not sure leave the default settings alone.
732 +
733 +config IMQ_BEHAVIOR_BB
734 + bool "IMQ BB"
735 + help
736 + This settings defines how IMQ behaves in respect to its
737 + hooking in PREROUTING and POSTROUTING.
738 +
739 + Choosing this option will make IMQ hook like this:
740 +
741 + PREROUTING: Before NAT
742 + POSTROUTING: Before NAT
743 +
744 + More information can be found at: www.linuximq.net
745 +
746 + If not sure leave the default settings alone.
747 +
748 +endchoice
749 +
750 +config IMQ_NUM_DEVS
751 +
752 + int "Number of IMQ devices"
753 + range 2 16
754 + depends on IMQ
755 + default "16"
756 + help
757 +
758 + This settings defines how many IMQ devices will be
759 + created.
760 +
761 + The default value is 16.
762 +
763 + More information can be found at: www.linuximq.net
764 +
765 + If not sure leave the default settings alone.
766 +
767 config TUN
768 tristate "Universal TUN/TAP device driver support"
769 select CRC32
770 diff -Naur linux-2.6.32.8.org/drivers/net/Makefile linux-2.6.32.8/drivers/net/Makefile
771 --- linux-2.6.32.8.org/drivers/net/Makefile 2010-02-17 20:30:19.000000000 +0100
772 +++ linux-2.6.32.8/drivers/net/Makefile 2010-02-17 20:46:48.000000000 +0100
773 @@ -165,6 +165,7 @@
774 obj-$(CONFIG_XEN_NETFRONT) += xen-netfront.o
775
776 obj-$(CONFIG_DUMMY) += dummy.o
777 +obj-$(CONFIG_IMQ) += imq.o
778 obj-$(CONFIG_IFB) += ifb.o
779 obj-$(CONFIG_MACVLAN) += macvlan.o
780 obj-$(CONFIG_DE600) += de600.o
781 diff -Naur linux-2.6.32.8.org/include/linux/imq.h linux-2.6.32.8/include/linux/imq.h
782 --- linux-2.6.32.8.org/include/linux/imq.h 1970-01-01 01:00:00.000000000 +0100
783 +++ linux-2.6.32.8/include/linux/imq.h 2010-02-17 20:45:00.000000000 +0100
784 @@ -0,0 +1,13 @@
785 +#ifndef _IMQ_H
786 +#define _IMQ_H
787 +
788 +/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
789 +#define IMQ_F_BITS 5
790 +
791 +#define IMQ_F_IFMASK 0x0f
792 +#define IMQ_F_ENQUEUE 0x10
793 +
794 +#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1)
795 +
796 +#endif /* _IMQ_H */
797 +
798 diff -Naur linux-2.6.32.8.org/include/linux/netdevice.h linux-2.6.32.8/include/linux/netdevice.h
799 --- linux-2.6.32.8.org/include/linux/netdevice.h 2010-02-17 20:30:18.000000000 +0100
800 +++ linux-2.6.32.8/include/linux/netdevice.h 2010-02-17 20:45:00.000000000 +0100
801 @@ -1119,6 +1119,7 @@
802 extern int dev_open(struct net_device *dev);
803 extern int dev_close(struct net_device *dev);
804 extern void dev_disable_lro(struct net_device *dev);
805 +extern struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb);
806 extern int dev_queue_xmit(struct sk_buff *skb);
807 extern int register_netdevice(struct net_device *dev);
808 extern void unregister_netdevice(struct net_device *dev);
809 diff -Naur linux-2.6.32.8.org/include/linux/netfilter/xt_IMQ.h linux-2.6.32.8/include/linux/netfilter/xt_IMQ.h
810 --- linux-2.6.32.8.org/include/linux/netfilter/xt_IMQ.h 1970-01-01 01:00:00.000000000 +0100
811 +++ linux-2.6.32.8/include/linux/netfilter/xt_IMQ.h 2010-02-17 20:45:00.000000000 +0100
812 @@ -0,0 +1,9 @@
813 +#ifndef _XT_IMQ_H
814 +#define _XT_IMQ_H
815 +
816 +struct xt_imq_info {
817 + unsigned int todev; /* target imq device */
818 +};
819 +
820 +#endif /* _XT_IMQ_H */
821 +
822 diff -Naur linux-2.6.32.8.org/include/linux/netfilter_ipv4/ipt_IMQ.h linux-2.6.32.8/include/linux/netfilter_ipv4/ipt_IMQ.h
823 --- linux-2.6.32.8.org/include/linux/netfilter_ipv4/ipt_IMQ.h 1970-01-01 01:00:00.000000000 +0100
824 +++ linux-2.6.32.8/include/linux/netfilter_ipv4/ipt_IMQ.h 2010-02-17 20:45:00.000000000 +0100
825 @@ -0,0 +1,10 @@
826 +#ifndef _IPT_IMQ_H
827 +#define _IPT_IMQ_H
828 +
829 +/* Backwards compatibility for old userspace */
830 +#include <linux/netfilter/xt_IMQ.h>
831 +
832 +#define ipt_imq_info xt_imq_info
833 +
834 +#endif /* _IPT_IMQ_H */
835 +
836 diff -Naur linux-2.6.32.8.org/include/linux/netfilter_ipv6/ip6t_IMQ.h linux-2.6.32.8/include/linux/netfilter_ipv6/ip6t_IMQ.h
837 --- linux-2.6.32.8.org/include/linux/netfilter_ipv6/ip6t_IMQ.h 1970-01-01 01:00:00.000000000 +0100
838 +++ linux-2.6.32.8/include/linux/netfilter_ipv6/ip6t_IMQ.h 2010-02-17 20:45:00.000000000 +0100
839 @@ -0,0 +1,10 @@
840 +#ifndef _IP6T_IMQ_H
841 +#define _IP6T_IMQ_H
842 +
843 +/* Backwards compatibility for old userspace */
844 +#include <linux/netfilter/xt_IMQ.h>
845 +
846 +#define ip6t_imq_info xt_imq_info
847 +
848 +#endif /* _IP6T_IMQ_H */
849 +
850 diff -Naur linux-2.6.32.8.org/include/linux/skbuff.h linux-2.6.32.8/include/linux/skbuff.h
851 --- linux-2.6.32.8.org/include/linux/skbuff.h 2010-02-17 20:30:19.000000000 +0100
852 +++ linux-2.6.32.8/include/linux/skbuff.h 2010-02-17 20:50:34.000000000 +0100
853 @@ -29,6 +29,9 @@
854 #include <linux/rcupdate.h>
855 #include <linux/dmaengine.h>
856 #include <linux/hrtimer.h>
857 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
858 +#include <linux/imq.h>
859 +#endif
860
861 /* Don't change this without changing skb_csum_unnecessary! */
862 #define CHECKSUM_NONE 0
863 @@ -332,6 +335,9 @@
864 * first. This is owned by whoever has the skb queued ATM.
865 */
866 char cb[48];
867 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
868 + void *cb_next;
869 +#endif
870
871 unsigned int len,
872 data_len;
873 @@ -364,6 +370,9 @@
874 struct nf_conntrack *nfct;
875 struct sk_buff *nfct_reasm;
876 #endif
877 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
878 + struct nf_queue_entry *nf_queue_entry;
879 +#endif
880 #ifdef CONFIG_BRIDGE_NETFILTER
881 struct nf_bridge_info *nf_bridge;
882 #endif
883 @@ -395,6 +404,11 @@
884 #ifdef CONFIG_NET_DMA
885 dma_cookie_t dma_cookie;
886 #endif
887 +
888 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
889 + __u8 imq_flags:IMQ_F_BITS;
890 +#endif
891 +
892 #ifdef CONFIG_NETWORK_SECMARK
893 __u32 secmark;
894 #endif
895 @@ -458,6 +472,12 @@
896 return (struct rtable *)skb_dst(skb);
897 }
898
899 +
900 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
901 +extern int skb_save_cb(struct sk_buff *skb);
902 +extern int skb_restore_cb(struct sk_buff *skb);
903 +#endif
904 +
905 extern void kfree_skb(struct sk_buff *skb);
906 extern void consume_skb(struct sk_buff *skb);
907 extern void __kfree_skb(struct sk_buff *skb);
908 @@ -2008,6 +2028,10 @@
909 dst->nfct_reasm = src->nfct_reasm;
910 nf_conntrack_get_reasm(src->nfct_reasm);
911 #endif
912 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
913 + dst->imq_flags = src->imq_flags;
914 + dst->nf_queue_entry = src->nf_queue_entry;
915 +#endif
916 #ifdef CONFIG_BRIDGE_NETFILTER
917 dst->nf_bridge = src->nf_bridge;
918 nf_bridge_get(src->nf_bridge);
919 diff -Naur linux-2.6.32.8.org/include/net/netfilter/nf_queue.h linux-2.6.32.8/include/net/netfilter/nf_queue.h
920 --- linux-2.6.32.8.org/include/net/netfilter/nf_queue.h 2010-02-09 13:57:19.000000000 +0100
921 +++ linux-2.6.32.8/include/net/netfilter/nf_queue.h 2010-02-17 20:45:00.000000000 +0100
922 @@ -13,6 +13,12 @@
923 struct net_device *indev;
924 struct net_device *outdev;
925 int (*okfn)(struct sk_buff *);
926 +
927 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
928 + int (*next_outfn)(struct nf_queue_entry *entry,
929 + unsigned int queuenum);
930 + unsigned int next_queuenum;
931 +#endif
932 };
933
934 #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry))
935 @@ -30,5 +36,11 @@
936 const struct nf_queue_handler *qh);
937 extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh);
938 extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
939 +extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
940 +
941 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
942 +extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
943 +extern void nf_unregister_queue_imq_handler(void);
944 +#endif
945
946 #endif /* _NF_QUEUE_H */
947 diff -Naur linux-2.6.32.8.org/net/core/dev.c linux-2.6.32.8/net/core/dev.c
948 --- linux-2.6.32.8.org/net/core/dev.c 2010-02-17 20:30:19.000000000 +0100
949 +++ linux-2.6.32.8/net/core/dev.c 2010-02-17 20:57:37.000000000 +0100
950 @@ -96,6 +96,9 @@
951 #include <net/net_namespace.h>
952 #include <net/sock.h>
953 #include <linux/rtnetlink.h>
954 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
955 +#include <linux/imq.h>
956 +#endif
957 #include <linux/proc_fs.h>
958 #include <linux/seq_file.h>
959 #include <linux/stat.h>
960 @@ -1723,7 +1726,11 @@
961 int rc;
962
963 if (likely(!skb->next)) {
964 - if (!list_empty(&ptype_all))
965 + if (!list_empty(&ptype_all)
966 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
967 + && !(skb->imq_flags & IMQ_F_ENQUEUE)
968 +#endif
969 + )
970 dev_queue_xmit_nit(skb, dev);
971
972 if (netif_needs_gso(dev, skb)) {
973 @@ -1808,8 +1815,7 @@
974 }
975 EXPORT_SYMBOL(skb_tx_hash);
976
977 -static struct netdev_queue *dev_pick_tx(struct net_device *dev,
978 - struct sk_buff *skb)
979 +struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb)
980 {
981 const struct net_device_ops *ops = dev->netdev_ops;
982 u16 queue_index = 0;
983 @@ -1875,6 +1881,7 @@
984 EXPORT_SYMBOL(skb_checksum_setup);
985 #endif
986
987 +EXPORT_SYMBOL(dev_pick_tx);
988 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
989 struct net_device *dev,
990 struct netdev_queue *txq)
991 diff -Naur linux-2.6.32.8.org/net/core/skbuff.c linux-2.6.32.8/net/core/skbuff.c
992 --- linux-2.6.32.8.org/net/core/skbuff.c 2010-02-17 20:30:18.000000000 +0100
993 +++ linux-2.6.32.8/net/core/skbuff.c 2010-02-17 20:45:00.000000000 +0100
994 @@ -72,6 +72,9 @@
995
996 static struct kmem_cache *skbuff_head_cache __read_mostly;
997 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
998 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
999 +static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
1000 +#endif
1001
1002 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
1003 struct pipe_buffer *buf)
1004 @@ -91,6 +94,83 @@
1005 return 1;
1006 }
1007
1008 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1009 +/* Control buffer save/restore for IMQ devices */
1010 +struct skb_cb_table {
1011 + void *cb_next;
1012 + atomic_t refcnt;
1013 + char cb[48];
1014 +};
1015 +
1016 +static DEFINE_SPINLOCK(skb_cb_store_lock);
1017 +
1018 +int skb_save_cb(struct sk_buff *skb)
1019 +{
1020 + struct skb_cb_table *next;
1021 +
1022 + next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
1023 + if (!next)
1024 + return -ENOMEM;
1025 +
1026 + BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1027 +
1028 + memcpy(next->cb, skb->cb, sizeof(skb->cb));
1029 + next->cb_next = skb->cb_next;
1030 +
1031 + atomic_set(&next->refcnt, 1);
1032 +
1033 + skb->cb_next = next;
1034 + return 0;
1035 +}
1036 +EXPORT_SYMBOL(skb_save_cb);
1037 +
1038 +int skb_restore_cb(struct sk_buff *skb)
1039 +{
1040 + struct skb_cb_table *next;
1041 +
1042 + if (!skb->cb_next)
1043 + return 0;
1044 +
1045 + next = skb->cb_next;
1046 +
1047 + BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1048 +
1049 + memcpy(skb->cb, next->cb, sizeof(skb->cb));
1050 + skb->cb_next = next->cb_next;
1051 +
1052 + spin_lock(&skb_cb_store_lock);
1053 +
1054 + if (atomic_dec_and_test(&next->refcnt)) {
1055 + kmem_cache_free(skbuff_cb_store_cache, next);
1056 + }
1057 +
1058 + spin_unlock(&skb_cb_store_lock);
1059 +
1060 + return 0;
1061 +}
1062 +EXPORT_SYMBOL(skb_restore_cb);
1063 +
1064 +static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
1065 +{
1066 + struct skb_cb_table *next;
1067 + struct sk_buff *old;
1068 +
1069 + if (!__old->cb_next) {
1070 + new->cb_next = NULL;
1071 + return;
1072 + }
1073 +
1074 + spin_lock(&skb_cb_store_lock);
1075 +
1076 + old = (struct sk_buff *)__old;
1077 +
1078 + next = old->cb_next;
1079 + atomic_inc(&next->refcnt);
1080 + new->cb_next = next;
1081 +
1082 + spin_unlock(&skb_cb_store_lock);
1083 +}
1084 +#endif
1085
1086 /* Pipe buffer operations for a socket. */
1087 static struct pipe_buf_operations sock_pipe_buf_ops = {
1088 @@ -456,6 +536,26 @@
1089 WARN_ON(in_irq());
1090 skb->destructor(skb);
1091 }
1092 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1093 + /* This should not happen. When it does, avoid memleak by restoring
1094 + the chain of cb-backups. */
1095 + while(skb->cb_next != NULL) {
1096 + if (net_ratelimit())
1097 + printk(KERN_WARNING "IMQ: kfree_skb: skb->cb_next: "
1098 + "%08x\n", (unsigned int)skb->cb_next);
1099 +
1100 + skb_restore_cb(skb);
1101 + }
1102 + /* This should not happen either, nf_queue_entry is nullified in
1103 + * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
1104 + * leaking entry pointers, maybe memory. We don't know if this is
1105 + * pointer to already freed memory, or should this be freed.
1106 + * If this happens we need to add refcounting, etc for nf_queue_entry.
1107 + */
1108 + if (skb->nf_queue_entry && net_ratelimit())
1109 + printk(KERN_WARNING
1110 + "IMQ: kfree_skb: skb->nf_queue_entry != NULL");
1111 +#endif
1112 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1113 nf_conntrack_put(skb->nfct);
1114 nf_conntrack_put_reasm(skb->nfct_reasm);
1115 @@ -593,6 +693,9 @@
1116 new->sp = secpath_get(old->sp);
1117 #endif
1118 memcpy(new->cb, old->cb, sizeof(old->cb));
1119 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1120 + skb_copy_stored_cb(new, old);
1121 +#endif
1122 new->csum = old->csum;
1123 new->local_df = old->local_df;
1124 new->pkt_type = old->pkt_type;
1125 @@ -2863,6 +2966,13 @@
1126 0,
1127 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1128 NULL);
1129 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1130 + skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
1131 + sizeof(struct skb_cb_table),
1132 + 0,
1133 + SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1134 + NULL);
1135 +#endif
1136 }
1137
1138 /**
1139 diff -Naur linux-2.6.32.8.org/net/netfilter/Kconfig linux-2.6.32.8/net/netfilter/Kconfig
1140 --- linux-2.6.32.8.org/net/netfilter/Kconfig 2010-02-17 20:30:16.000000000 +0100
1141 +++ linux-2.6.32.8/net/netfilter/Kconfig 2010-02-17 20:45:00.000000000 +0100
1142 @@ -411,6 +411,18 @@
1143 For more information on the LEDs available on your system, see
1144 Documentation/leds-class.txt
1145
1146 +config NETFILTER_XT_TARGET_IMQ
1147 + tristate '"IMQ" target support'
1148 + depends on NETFILTER_XTABLES
1149 + depends on IP_NF_MANGLE || IP6_NF_MANGLE
1150 + select IMQ
1151 + default m if NETFILTER_ADVANCED=n
1152 + help
1153 + This option adds a `IMQ' target which is used to specify if and
1154 + to which imq device packets should get enqueued/dequeued.
1155 +
1156 + To compile it as a module, choose M here. If unsure, say N.
1157 +
1158 config NETFILTER_XT_TARGET_MARK
1159 tristate '"MARK" target support'
1160 default m if NETFILTER_ADVANCED=n
1161 diff -Naur linux-2.6.32.8.org/net/netfilter/Makefile linux-2.6.32.8/net/netfilter/Makefile
1162 --- linux-2.6.32.8.org/net/netfilter/Makefile 2010-02-17 20:30:16.000000000 +0100
1163 +++ linux-2.6.32.8/net/netfilter/Makefile 2010-02-17 20:45:00.000000000 +0100
1164 @@ -47,6 +47,7 @@
1165 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
1166 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
1167 obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
1168 +obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
1169 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
1170 obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
1171 obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
1172 diff -Naur linux-2.6.32.8.org/net/netfilter/nf_queue.c linux-2.6.32.8/net/netfilter/nf_queue.c
1173 --- linux-2.6.32.8.org/net/netfilter/nf_queue.c 2010-02-09 13:57:19.000000000 +0100
1174 +++ linux-2.6.32.8/net/netfilter/nf_queue.c 2010-02-17 20:45:00.000000000 +0100
1175 @@ -20,6 +20,26 @@
1176
1177 static DEFINE_MUTEX(queue_handler_mutex);
1178
1179 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1180 +static const struct nf_queue_handler *queue_imq_handler;
1181 +
1182 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
1183 +{
1184 + mutex_lock(&queue_handler_mutex);
1185 + rcu_assign_pointer(queue_imq_handler, qh);
1186 + mutex_unlock(&queue_handler_mutex);
1187 +}
1188 +EXPORT_SYMBOL(nf_register_queue_imq_handler);
1189 +
1190 +void nf_unregister_queue_imq_handler(void)
1191 +{
1192 + mutex_lock(&queue_handler_mutex);
1193 + rcu_assign_pointer(queue_imq_handler, NULL);
1194 + mutex_unlock(&queue_handler_mutex);
1195 +}
1196 +EXPORT_SYMBOL(nf_unregister_queue_imq_handler);
1197 +#endif
1198 +
1199 /* return EBUSY when somebody else is registered, return EEXIST if the
1200 * same handler is registered, return 0 in case of success. */
1201 int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
1202 @@ -80,7 +100,7 @@
1203 }
1204 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
1205
1206 -static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
1207 +void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
1208 {
1209 /* Release those devices we held, or Alexey will kill me. */
1210 if (entry->indev)
1211 @@ -100,6 +120,7 @@
1212 /* Drop reference to owner of hook which queued us. */
1213 module_put(entry->elem->owner);
1214 }
1215 +EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
1216
1217 /*
1218 * Any packet that leaves via this function must come back
1219 @@ -121,12 +142,26 @@
1220 #endif
1221 const struct nf_afinfo *afinfo;
1222 const struct nf_queue_handler *qh;
1223 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1224 + const struct nf_queue_handler *qih = NULL;
1225 +#endif
1226
1227 /* QUEUE == DROP if noone is waiting, to be safe. */
1228 rcu_read_lock();
1229
1230 qh = rcu_dereference(queue_handler[pf]);
1231 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1232 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1233 + if (pf == PF_INET || pf == PF_INET6)
1234 +#else
1235 + if (pf == PF_INET)
1236 +#endif
1237 + qih = rcu_dereference(queue_imq_handler);
1238 +
1239 + if (!qh && !qih)
1240 +#else /* !IMQ */
1241 if (!qh)
1242 +#endif
1243 goto err_unlock;
1244
1245 afinfo = nf_get_afinfo(pf);
1246 @@ -145,6 +180,10 @@
1247 .indev = indev,
1248 .outdev = outdev,
1249 .okfn = okfn,
1250 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1251 + .next_outfn = qh ? qh->outfn : NULL,
1252 + .next_queuenum = queuenum,
1253 +#endif
1254 };
1255
1256 /* If it's going away, ignore hook. */
1257 @@ -170,8 +209,19 @@
1258 }
1259 #endif
1260 afinfo->saveroute(skb, entry);
1261 +
1262 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1263 + if (qih) {
1264 + status = qih->outfn(entry, queuenum);
1265 + goto imq_skip_queue;
1266 + }
1267 +#endif
1268 +
1269 status = qh->outfn(entry, queuenum);
1270
1271 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1272 +imq_skip_queue:
1273 +#endif
1274 rcu_read_unlock();
1275
1276 if (status < 0) {
1277 diff -Naur linux-2.6.32.8.org/net/netfilter/xt_IMQ.c linux-2.6.32.8/net/netfilter/xt_IMQ.c
1278 --- linux-2.6.32.8.org/net/netfilter/xt_IMQ.c 1970-01-01 01:00:00.000000000 +0100
1279 +++ linux-2.6.32.8/net/netfilter/xt_IMQ.c 2010-02-17 20:45:00.000000000 +0100
1280 @@ -0,0 +1,73 @@
1281 +/*
1282 + * This target marks packets to be enqueued to an imq device
1283 + */
1284 +#include <linux/module.h>
1285 +#include <linux/skbuff.h>
1286 +#include <linux/netfilter/x_tables.h>
1287 +#include <linux/netfilter/xt_IMQ.h>
1288 +#include <linux/imq.h>
1289 +
1290 +static unsigned int imq_target(struct sk_buff *pskb,
1291 + const struct xt_target_param *par)
1292 +{
1293 + const struct xt_imq_info *mr = par->targinfo;
1294 +
1295 + pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1296 +
1297 + return XT_CONTINUE;
1298 +}
1299 +
1300 +static bool imq_checkentry(const struct xt_tgchk_param *par)
1301 +{
1302 + struct xt_imq_info *mr = par->targinfo;
1303 +
1304 + if (mr->todev > IMQ_MAX_DEVS - 1) {
1305 + printk(KERN_WARNING
1306 + "IMQ: invalid device specified, highest is %u\n",
1307 + IMQ_MAX_DEVS - 1);
1308 + return 0;
1309 + }
1310 +
1311 + return 1;
1312 +}
1313 +
1314 +static struct xt_target xt_imq_reg[] __read_mostly = {
1315 + {
1316 + .name = "IMQ",
1317 + .family = AF_INET,
1318 + .checkentry = imq_checkentry,
1319 + .target = imq_target,
1320 + .targetsize = sizeof(struct xt_imq_info),
1321 + .table = "mangle",
1322 + .me = THIS_MODULE
1323 + },
1324 + {
1325 + .name = "IMQ",
1326 + .family = AF_INET6,
1327 + .checkentry = imq_checkentry,
1328 + .target = imq_target,
1329 + .targetsize = sizeof(struct xt_imq_info),
1330 + .table = "mangle",
1331 + .me = THIS_MODULE
1332 + },
1333 +};
1334 +
1335 +static int __init imq_init(void)
1336 +{
1337 + return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1338 +}
1339 +
1340 +static void __exit imq_fini(void)
1341 +{
1342 + xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1343 +}
1344 +
1345 +module_init(imq_init);
1346 +module_exit(imq_fini);
1347 +
1348 +MODULE_AUTHOR("http://www.linuximq.net");
1349 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
1350 +MODULE_LICENSE("GPL");
1351 +MODULE_ALIAS("ipt_IMQ");
1352 +MODULE_ALIAS("ip6t_IMQ");
1353 +