]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/linux-2.6.27-imq.patch
Fix core28 updater kernel version
[people/pmueller/ipfire-2.x.git] / src / patches / linux-2.6.27-imq.patch
CommitLineData
a3b414d3
AF
1--- a/drivers/net/Kconfig
2+++ b/drivers/net/Kconfig
3@@ -109,6 +109,129 @@ config EQUALIZER
4 To compile this driver as a module, choose M here: the module
5 will be called eql. If unsure, say N.
6
7+config IMQ
8+ tristate "IMQ (intermediate queueing device) support"
9+ depends on NETDEVICES && NETFILTER
10+ ---help---
11+ The IMQ device(s) is used as placeholder for QoS queueing
12+ disciplines. Every packet entering/leaving the IP stack can be
13+ directed through the IMQ device where it's enqueued/dequeued to the
14+ attached qdisc. This allows you to treat network devices as classes
15+ and distribute bandwidth among them. Iptables is used to specify
16+ through which IMQ device, if any, packets travel.
17+
18+ More information at: http://www.linuximq.net/
19+
20+ To compile this driver as a module, choose M here: the module
21+ will be called imq. If unsure, say N.
22+
23+choice
24+ prompt "IMQ behavior (PRE/POSTROUTING)"
25+ depends on IMQ
26+ default IMQ_BEHAVIOR_AB
27+ help
28+
29+ This settings defines how IMQ behaves in respect to its
30+ hooking in PREROUTING and POSTROUTING.
31+
32+ IMQ can work in any of the following ways:
33+
34+ PREROUTING | POSTROUTING
35+ -----------------|-------------------
36+ #1 After NAT | After NAT
37+ #2 After NAT | Before NAT
38+ #3 Before NAT | After NAT
39+ #4 Before NAT | Before NAT
40+
41+ The default behavior is to hook before NAT on PREROUTING
42+ and after NAT on POSTROUTING (#3).
43+
44+ This settings are specially usefull when trying to use IMQ
45+ to shape NATed clients.
46+
47+ More information can be found at: www.linuximq.net
48+
49+ If not sure leave the default settings alone.
50+
51+config IMQ_BEHAVIOR_AA
52+ bool "IMQ AA"
53+ help
54+ This settings defines how IMQ behaves in respect to its
55+ hooking in PREROUTING and POSTROUTING.
56+
57+ Choosing this option will make IMQ hook like this:
58+
59+ PREROUTING: After NAT
60+ POSTROUTING: After NAT
61+
62+ More information can be found at: www.linuximq.net
63+
64+ If not sure leave the default settings alone.
65+
66+config IMQ_BEHAVIOR_AB
67+ bool "IMQ AB"
68+ help
69+ This settings defines how IMQ behaves in respect to its
70+ hooking in PREROUTING and POSTROUTING.
71+
72+ Choosing this option will make IMQ hook like this:
73+
74+ PREROUTING: After NAT
75+ POSTROUTING: Before NAT
76+
77+ More information can be found at: www.linuximq.net
78+
79+ If not sure leave the default settings alone.
80+
81+config IMQ_BEHAVIOR_BA
82+ bool "IMQ BA"
83+ help
84+ This settings defines how IMQ behaves in respect to its
85+ hooking in PREROUTING and POSTROUTING.
86+
87+ Choosing this option will make IMQ hook like this:
88+
89+ PREROUTING: Before NAT
90+ POSTROUTING: After NAT
91+
92+ More information can be found at: www.linuximq.net
93+
94+ If not sure leave the default settings alone.
95+
96+config IMQ_BEHAVIOR_BB
97+ bool "IMQ BB"
98+ help
99+ This settings defines how IMQ behaves in respect to its
100+ hooking in PREROUTING and POSTROUTING.
101+
102+ Choosing this option will make IMQ hook like this:
103+
104+ PREROUTING: Before NAT
105+ POSTROUTING: Before NAT
106+
107+ More information can be found at: www.linuximq.net
108+
109+ If not sure leave the default settings alone.
110+
111+endchoice
112+
113+config IMQ_NUM_DEVS
114+
115+ int "Number of IMQ devices"
116+ range 2 16
117+ depends on IMQ
118+ default "16"
119+ help
120+
121+ This settings defines how many IMQ devices will be
122+ created.
123+
124+ The default value is 16.
125+
126+ More information can be found at: www.linuximq.net
127+
128+ If not sure leave the default settings alone.
129+
130 config TUN
131 tristate "Universal TUN/TAP device driver support"
132 select CRC32
133--- a/drivers/net/Makefile
134+++ b/drivers/net/Makefile
135@@ -144,6 +144,7 @@ obj-$(CONFIG_SLHC) += slhc.o
136 obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
137
138 obj-$(CONFIG_DUMMY) += dummy.o
139+obj-$(CONFIG_IMQ) += imq.o
140 obj-$(CONFIG_IFB) += ifb.o
141 obj-$(CONFIG_MACVLAN) += macvlan.o
142 obj-$(CONFIG_DE600) += de600.o
143--- /dev/null
144+++ b/drivers/net/imq.c
145@@ -0,0 +1,533 @@
146+/*
147+ * Pseudo-driver for the intermediate queue device.
148+ *
149+ * This program is free software; you can redistribute it and/or
150+ * modify it under the terms of the GNU General Public License
151+ * as published by the Free Software Foundation; either version
152+ * 2 of the License, or (at your option) any later version.
153+ *
154+ * Authors: Patrick McHardy, <kaber@trash.net>
155+ *
156+ * The first version was written by Martin Devera, <devik@cdi.cz>
157+ *
158+ * Credits: Jan Rafaj <imq2t@cedric.vabo.cz>
159+ * - Update patch to 2.4.21
160+ * Sebastian Strollo <sstrollo@nortelnetworks.com>
161+ * - Fix "Dead-loop on netdevice imq"-issue
162+ * Marcel Sebek <sebek64@post.cz>
163+ * - Update to 2.6.2-rc1
164+ *
165+ * After some time of inactivity there is a group taking care
166+ * of IMQ again: http://www.linuximq.net
167+ *
168+ *
169+ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7
170+ * including the following changes:
171+ *
172+ * - Correction of ipv6 support "+"s issue (Hasso Tepper)
173+ * - Correction of imq_init_devs() issue that resulted in
174+ * kernel OOPS unloading IMQ as module (Norbert Buchmuller)
175+ * - Addition of functionality to choose number of IMQ devices
176+ * during kernel config (Andre Correa)
177+ * - Addition of functionality to choose how IMQ hooks on
178+ * PRE and POSTROUTING (after or before NAT) (Andre Correa)
179+ * - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
180+ *
181+ *
182+ * 2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
183+ * released with almost no problems. 2.6.14-x was released
184+ * with some important changes: nfcache was removed; After
185+ * some weeks of trouble we figured out that some IMQ fields
186+ * in skb were missing in skbuff.c - skb_clone and copy_skb_header.
187+ * These functions are correctly patched by this new patch version.
188+ *
189+ * Thanks for all who helped to figure out all the problems with
190+ * 2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
191+ * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
192+ * I didn't forget anybody). I apologize again for my lack of time.
193+ *
194+ *
195+ * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
196+ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid
197+ * recursive locking. New initialization routines to fix 'rmmod' not
198+ * working anymore. Used code from ifb.c. (Jussi Kivilinna)
199+ *
200+ * 2008/08/06 - 2.6.27 - (JK)
201+ * - Replaced tasklet with 'netif_schedule()'.
202+ * - Cleaned up and added comments for imq_nf_queue().
203+ *
204+ * Also, many thanks to pablo Sebastian Greco for making the initial
205+ * patch and to those who helped the testing.
206+ *
207+ * More info at: http://www.linuximq.net/ (Andre Correa)
208+ */
209+
210+#include <linux/module.h>
211+#include <linux/kernel.h>
212+#include <linux/moduleparam.h>
213+#include <linux/skbuff.h>
214+#include <linux/netdevice.h>
215+#include <linux/etherdevice.h>
216+#include <linux/rtnetlink.h>
217+#include <linux/if_arp.h>
218+#include <linux/netfilter.h>
219+#include <linux/netfilter_ipv4.h>
220+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
221+ #include <linux/netfilter_ipv6.h>
222+#endif
223+#include <linux/imq.h>
224+#include <net/pkt_sched.h>
225+#include <net/netfilter/nf_queue.h>
226+
227+static nf_hookfn imq_nf_hook;
228+
229+static struct nf_hook_ops imq_ingress_ipv4 = {
230+ .hook = imq_nf_hook,
231+ .owner = THIS_MODULE,
232+ .pf = PF_INET,
233+ .hooknum = NF_INET_PRE_ROUTING,
234+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
235+ .priority = NF_IP_PRI_MANGLE + 1
236+#else
237+ .priority = NF_IP_PRI_NAT_DST + 1
238+#endif
239+};
240+
241+static struct nf_hook_ops imq_egress_ipv4 = {
242+ .hook = imq_nf_hook,
243+ .owner = THIS_MODULE,
244+ .pf = PF_INET,
245+ .hooknum = NF_INET_POST_ROUTING,
246+#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
247+ .priority = NF_IP_PRI_LAST
248+#else
249+ .priority = NF_IP_PRI_NAT_SRC - 1
250+#endif
251+};
252+
253+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
254+static struct nf_hook_ops imq_ingress_ipv6 = {
255+ .hook = imq_nf_hook,
256+ .owner = THIS_MODULE,
257+ .pf = PF_INET6,
258+ .hooknum = NF_INET_PRE_ROUTING,
259+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
260+ .priority = NF_IP6_PRI_MANGLE + 1
261+#else
262+ .priority = NF_IP6_PRI_NAT_DST + 1
263+#endif
264+};
265+
266+static struct nf_hook_ops imq_egress_ipv6 = {
267+ .hook = imq_nf_hook,
268+ .owner = THIS_MODULE,
269+ .pf = PF_INET6,
270+ .hooknum = NF_INET_POST_ROUTING,
271+#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
272+ .priority = NF_IP6_PRI_LAST
273+#else
274+ .priority = NF_IP6_PRI_NAT_SRC - 1
275+#endif
276+};
277+#endif
278+
279+#if defined(CONFIG_IMQ_NUM_DEVS)
280+static unsigned int numdevs = CONFIG_IMQ_NUM_DEVS;
281+#else
282+static unsigned int numdevs = IMQ_MAX_DEVS;
283+#endif
284+
285+static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
286+
287+static struct net_device_stats *imq_get_stats(struct net_device *dev)
288+{
289+ return &dev->stats;
290+}
291+
292+/* called for packets kfree'd in qdiscs at places other than enqueue */
293+static void imq_skb_destructor(struct sk_buff *skb)
294+{
295+ struct nf_queue_entry *entry = skb->nf_queue_entry;
296+
297+ if (likely(entry)) {
298+ nf_queue_entry_release_refs(entry);
299+ kfree(entry);
300+ }
301+}
302+
303+static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
304+{
305+ int status;
306+
307+ if (!entry->next_outfn) {
308+ nf_reinject(entry, verdict);
309+ return;
310+ }
311+
312+ status = entry->next_outfn(entry, entry->next_queuenum);
313+ if (status < 0) {
314+ nf_queue_entry_release_refs(entry);
315+ kfree_skb(entry->skb);
316+ kfree(entry);
317+ }
318+}
319+
320+static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
321+{
322+ struct nf_queue_entry *entry = skb->nf_queue_entry;
323+
324+ BUG_ON(entry == NULL);
325+
326+ dev->stats.tx_bytes += skb->len;
327+ dev->stats.tx_packets++;
328+
329+ skb->imq_flags = 0;
330+ skb->destructor = NULL;
331+
332+ dev->trans_start = jiffies;
333+ imq_nf_reinject(entry, NF_ACCEPT);
334+ return 0;
335+}
336+
337+static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
338+{
339+ struct net_device *dev;
340+ struct sk_buff *skb_orig, *skb, *skb_shared;
341+ struct Qdisc *q;
342+ spinlock_t *root_lock;
343+ struct netdev_queue *txq;
344+ int users, index;
345+
346+ index = entry->skb->imq_flags & IMQ_F_IFMASK;
347+ if (unlikely(index > numdevs - 1)) {
348+ if (net_ratelimit())
349+ printk(KERN_WARNING
350+ "IMQ: invalid device specified, highest is %u\n",
351+ numdevs - 1);
352+ return -EINVAL;
353+ }
354+
355+ /* check for imq device by index from cache */
356+ dev = imq_devs_cache[index];
357+ if (unlikely(!dev)) {
358+ char buf[8];
359+
360+ /* get device by name and cache result */
361+ snprintf(buf, sizeof(buf), "imq%d", index);
362+ dev = dev_get_by_name(&init_net, buf);
363+ if (!dev) {
364+ /* not found ?!*/
365+ BUG();
366+ return -ENODEV;
367+ }
368+
369+ imq_devs_cache[index] = dev;
370+ }
371+
372+ if (unlikely(!(dev->flags & IFF_UP))) {
373+ entry->skb->imq_flags = 0;
374+ imq_nf_reinject(entry, NF_ACCEPT);
375+ return 0;
376+ }
377+ dev->last_rx = jiffies;
378+
379+ skb = entry->skb;
380+ skb_orig = NULL;
381+
382+ /* skb has owner? => make clone */
383+ if (unlikely(skb->destructor)) {
384+ skb_orig = skb;
385+ skb = skb_clone(skb, GFP_ATOMIC);
386+ if (!skb)
387+ return -ENOMEM;
388+ entry->skb = skb;
389+ }
390+
391+ skb->nf_queue_entry = entry;
392+
393+ dev->stats.rx_bytes += skb->len;
394+ dev->stats.rx_packets++;
395+
396+ txq = dev_pick_tx(dev, skb);
397+ q = txq->qdisc;
398+
399+ if (unlikely(!q->enqueue))
400+ goto packet_not_eaten_by_imq_dev;
401+
402+ if (unlikely(!rtnl_trylock()))
403+ goto packet_not_eaten_by_imq_dev;
404+
405+ root_lock = qdisc_root_lock(q);
406+ spin_lock(root_lock);
407+
408+ users = atomic_read(&skb->users);
409+
410+ skb_shared = skb_get(skb); /* increase reference count by one */
411+ qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
412+
413+ if (likely(atomic_read(&skb_shared->users) == users + 1)) {
414+ kfree_skb(skb_shared); /* decrease reference count by one */
415+
416+ /* imq_skb_destructor frees the skb and entry */
417+ skb->destructor = &imq_skb_destructor;
418+
419+ /* cloned? */
420+ if (skb_orig)
421+ kfree_skb(skb_orig); /* free original */
422+
423+ /* schedule qdisc dequeue */
424+ netif_tx_schedule_all(dev);
425+
426+ spin_unlock(root_lock);
427+ __rtnl_unlock();
428+ return 0;
429+ } else {
430+ /* qdisc dropped packet and decreased skb reference count of
431+ * skb, so we don't really want to and try refree as that would
432+ * actually destroy the skb. */
433+ spin_unlock(root_lock);
434+ __rtnl_unlock();
435+ goto packet_not_eaten_by_imq_dev;
436+ }
437+
438+packet_not_eaten_by_imq_dev:
439+ /* cloned? restore original */
440+ if (skb_orig) {
441+ kfree_skb(skb);
442+ entry->skb = skb_orig;
443+ }
444+ return -1;
445+}
446+
447+static struct nf_queue_handler nfqh = {
448+ .name = "imq",
449+ .outfn = imq_nf_queue,
450+};
451+
452+static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
453+ const struct net_device *indev,
454+ const struct net_device *outdev,
455+ int (*okfn)(struct sk_buff *))
456+{
457+ if (pskb->imq_flags & IMQ_F_ENQUEUE)
458+ return NF_QUEUE;
459+
460+ return NF_ACCEPT;
461+}
462+
463+static int imq_close(struct net_device *dev)
464+{
465+ netif_stop_queue(dev);
466+ return 0;
467+}
468+
469+static int imq_open(struct net_device *dev)
470+{
471+ netif_start_queue(dev);
472+ return 0;
473+}
474+
475+static void imq_setup(struct net_device *dev)
476+{
477+ dev->hard_start_xmit = imq_dev_xmit;
478+ dev->open = imq_open;
479+ dev->get_stats = imq_get_stats;
480+ dev->stop = imq_close;
481+ dev->type = ARPHRD_VOID;
482+ dev->mtu = 1500;
483+ dev->tx_queue_len = 10000;
484+ dev->flags = IFF_NOARP;
485+}
486+
487+static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
488+{
489+ int ret = 0;
490+
491+ if (tb[IFLA_ADDRESS]) {
492+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
493+ ret = -EINVAL;
494+ goto end;
495+ }
496+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
497+ ret = -EADDRNOTAVAIL;
498+ goto end;
499+ }
500+ }
501+ return 0;
502+end:
503+ printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret);
504+ return ret;
505+}
506+
507+static struct rtnl_link_ops imq_link_ops __read_mostly = {
508+ .kind = "imq",
509+ .priv_size = 0,
510+ .setup = imq_setup,
511+ .validate = imq_validate,
512+};
513+
514+static int __init imq_init_hooks(void)
515+{
516+ int err;
517+
518+ nf_register_queue_imq_handler(&nfqh);
519+
520+ err = nf_register_hook(&imq_ingress_ipv4);
521+ if (err)
522+ goto err1;
523+
524+ err = nf_register_hook(&imq_egress_ipv4);
525+ if (err)
526+ goto err2;
527+
528+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
529+ err = nf_register_hook(&imq_ingress_ipv6);
530+ if (err)
531+ goto err3;
532+
533+ err = nf_register_hook(&imq_egress_ipv6);
534+ if (err)
535+ goto err4;
536+#endif
537+
538+ return 0;
539+
540+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
541+err4:
542+ nf_unregister_hook(&imq_ingress_ipv6);
543+err3:
544+ nf_unregister_hook(&imq_egress_ipv4);
545+#endif
546+err2:
547+ nf_unregister_hook(&imq_ingress_ipv4);
548+err1:
549+ nf_unregister_queue_imq_handler();
550+ return err;
551+}
552+
553+static int __init imq_init_one(int index)
554+{
555+ struct net_device *dev;
556+ int ret;
557+
558+ dev = alloc_netdev(0, "imq%d", imq_setup);
559+ if (!dev)
560+ return -ENOMEM;
561+
562+ ret = dev_alloc_name(dev, dev->name);
563+ if (ret < 0)
564+ goto fail;
565+
566+ dev->rtnl_link_ops = &imq_link_ops;
567+ ret = register_netdevice(dev);
568+ if (ret < 0)
569+ goto fail;
570+
571+ return 0;
572+fail:
573+ free_netdev(dev);
574+ return ret;
575+}
576+
577+static int __init imq_init_devs(void)
578+{
579+ int err, i;
580+
581+ if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
582+ printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
583+ IMQ_MAX_DEVS);
584+ return -EINVAL;
585+ }
586+
587+ rtnl_lock();
588+ err = __rtnl_link_register(&imq_link_ops);
589+
590+ for (i = 0; i < numdevs && !err; i++)
591+ err = imq_init_one(i);
592+
593+ if (err) {
594+ __rtnl_link_unregister(&imq_link_ops);
595+ memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
596+ }
597+ rtnl_unlock();
598+
599+ return err;
600+}
601+
602+static int __init imq_init_module(void)
603+{
604+ int err;
605+
606+#if defined(CONFIG_IMQ_NUM_DEVS)
607+ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
608+ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
609+ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
610+#endif
611+
612+ err = imq_init_devs();
613+ if (err) {
614+ printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
615+ return err;
616+ }
617+
618+ err = imq_init_hooks();
619+ if (err) {
620+ printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
621+ rtnl_link_unregister(&imq_link_ops);
622+ memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
623+ return err;
624+ }
625+
626+ printk(KERN_INFO "IMQ driver loaded successfully.\n");
627+
628+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
629+ printk(KERN_INFO "\tHooking IMQ before NAT on PREROUTING.\n");
630+#else
631+ printk(KERN_INFO "\tHooking IMQ after NAT on PREROUTING.\n");
632+#endif
633+#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
634+ printk(KERN_INFO "\tHooking IMQ before NAT on POSTROUTING.\n");
635+#else
636+ printk(KERN_INFO "\tHooking IMQ after NAT on POSTROUTING.\n");
637+#endif
638+
639+ return 0;
640+}
641+
642+static void __exit imq_unhook(void)
643+{
644+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
645+ nf_unregister_hook(&imq_ingress_ipv6);
646+ nf_unregister_hook(&imq_egress_ipv6);
647+#endif
648+ nf_unregister_hook(&imq_ingress_ipv4);
649+ nf_unregister_hook(&imq_egress_ipv4);
650+
651+ nf_unregister_queue_imq_handler();
652+}
653+
654+static void __exit imq_cleanup_devs(void)
655+{
656+ rtnl_link_unregister(&imq_link_ops);
657+ memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
658+}
659+
660+static void __exit imq_exit_module(void)
661+{
662+ imq_unhook();
663+ imq_cleanup_devs();
664+ printk(KERN_INFO "IMQ driver unloaded successfully.\n");
665+}
666+
667+module_init(imq_init_module);
668+module_exit(imq_exit_module);
669+
670+module_param(numdevs, int, 0);
671+MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will "
672+ "be created)");
673+MODULE_AUTHOR("http://www.linuximq.net");
674+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See "
675+ "http://www.linuximq.net/ for more information.");
676+MODULE_LICENSE("GPL");
677+MODULE_ALIAS_RTNL_LINK("imq");
678+
679--- /dev/null
680+++ b/include/linux/imq.h
681@@ -0,0 +1,13 @@
682+#ifndef _IMQ_H
683+#define _IMQ_H
684+
685+/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
686+#define IMQ_F_BITS 5
687+
688+#define IMQ_F_IFMASK 0x0f
689+#define IMQ_F_ENQUEUE 0x10
690+
691+#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1)
692+
693+#endif /* _IMQ_H */
694+
695--- /dev/null
696+++ b/include/linux/netfilter/xt_IMQ.h
697@@ -0,0 +1,9 @@
698+#ifndef _XT_IMQ_H
699+#define _XT_IMQ_H
700+
701+struct xt_imq_info {
702+ unsigned int todev; /* target imq device */
703+};
704+
705+#endif /* _XT_IMQ_H */
706+
707--- /dev/null
708+++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
709@@ -0,0 +1,10 @@
710+#ifndef _IP6T_IMQ_H
711+#define _IP6T_IMQ_H
712+
713+/* Backwards compatibility for old userspace */
714+#include <linux/netfilter/xt_IMQ.h>
715+
716+#define ip6t_imq_info xt_imq_info
717+
718+#endif /* _IP6T_IMQ_H */
719+
720--- a/include/linux/skbuff.h
721+++ b/include/linux/skbuff.h
722@@ -28,6 +28,9 @@
723 #include <linux/rcupdate.h>
724 #include <linux/dmaengine.h>
725 #include <linux/hrtimer.h>
726+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
727+#include <linux/imq.h>
728+#endif
729
730 #define HAVE_ALLOC_SKB /* For the drivers to know */
731 #define HAVE_ALIGNABLE_SKB /* Ditto 8) */
732@@ -302,6 +305,13 @@ struct sk_buff {
733 struct nf_conntrack *nfct;
734 struct sk_buff *nfct_reasm;
735 #endif
736+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
737+ struct nf_queue_entry *nf_queue_entry;
738+/* 2.6.27 started using skb->cb for qdiscs. Problem here is that IMQ run qdisc
739+ * on wrong layer and skb->cb is already used by TCP. So we need to make qdisc
740+ * not use skb, this adds 4 extra bytes to skbuff. */
741+ unsigned int qdisc_skb_cb_pkt_len;
742+#endif
743 #ifdef CONFIG_BRIDGE_NETFILTER
744 struct nf_bridge_info *nf_bridge;
745 #endif
746@@ -321,6 +331,9 @@ struct sk_buff {
747 __u8 do_not_encrypt:1;
748 #endif
749 /* 0/13/14 bit hole */
750+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
751+ __u8 imq_flags:IMQ_F_BITS;
752+#endif
753
754 #ifdef CONFIG_NET_DMA
755 dma_cookie_t dma_cookie;
756@@ -1638,6 +1651,10 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
757 dst->nfct_reasm = src->nfct_reasm;
758 nf_conntrack_get_reasm(src->nfct_reasm);
759 #endif
760+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
761+ dst->imq_flags = src->imq_flags;
762+ dst->nf_queue_entry = src->nf_queue_entry;
763+#endif
764 #ifdef CONFIG_BRIDGE_NETFILTER
765 dst->nf_bridge = src->nf_bridge;
766 nf_bridge_get(src->nf_bridge);
767--- a/include/net/netfilter/nf_queue.h
768+++ b/include/net/netfilter/nf_queue.h
769@@ -13,6 +13,13 @@ struct nf_queue_entry {
770 struct net_device *indev;
771 struct net_device *outdev;
772 int (*okfn)(struct sk_buff *);
773+
774+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
775+ /* following allow IMQ work when other nf_queue handlers are set up */
776+ int (*next_outfn)(struct nf_queue_entry *entry,
777+ unsigned int queuenum);
778+ unsigned int next_queuenum;
779+#endif
780 };
781
782 #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry))
783@@ -30,5 +37,11 @@ extern int nf_unregister_queue_handler(int pf,
784 const struct nf_queue_handler *qh);
785 extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh);
786 extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
787+extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
788+
789+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
790+extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
791+extern void nf_unregister_queue_imq_handler(void);
792+#endif
793
794 #endif /* _NF_QUEUE_H */
795--- a/include/net/sch_generic.h
796+++ b/include/net/sch_generic.h
797@@ -174,7 +174,11 @@ struct tcf_proto
798 };
799
800 struct qdisc_skb_cb {
801+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
802+ unsigned int __imq_do_not_use;
803+#else
804 unsigned int pkt_len;
805+#endif
806 char data[];
807 };
808
809@@ -275,6 +279,8 @@ extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_clas
810 extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
811 extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
812
813+extern struct netdev_queue *dev_pick_tx(struct net_device *dev,
814+ struct sk_buff *skb);
815 extern void dev_init_scheduler(struct net_device *dev);
816 extern void dev_shutdown(struct net_device *dev);
817 extern void dev_activate(struct net_device *dev);
818@@ -340,7 +346,11 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev)
819
820 static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
821 {
822+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
823+ return skb->qdisc_skb_cb_pkt_len; /* see skbuff.h why */
824+#else
825 return qdisc_skb_cb(skb)->pkt_len;
826+#endif
827 }
828
829 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
830@@ -366,7 +376,11 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
831
832 static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
833 {
834+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
835+ skb->qdisc_skb_cb_pkt_len = skb->len; /* see skbuff.h why */
836+#else
837 qdisc_skb_cb(skb)->pkt_len = skb->len;
838+#endif
839 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
840 }
841
842--- a/net/core/dev.c
843+++ b/net/core/dev.c
844@@ -96,6 +96,9 @@
845 #include <net/net_namespace.h>
846 #include <net/sock.h>
847 #include <linux/rtnetlink.h>
848+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
849+#include <linux/imq.h>
850+#endif
851 #include <linux/proc_fs.h>
852 #include <linux/seq_file.h>
853 #include <linux/stat.h>
854@@ -1619,7 +1622,11 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
855 struct netdev_queue *txq)
856 {
857 if (likely(!skb->next)) {
858- if (!list_empty(&ptype_all))
859+ if (!list_empty(&ptype_all)
860+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
861+ && !(skb->imq_flags & IMQ_F_ENQUEUE)
862+#endif
863+ )
864 dev_queue_xmit_nit(skb, dev);
865
866 if (netif_needs_gso(dev, skb)) {
867@@ -1709,8 +1716,7 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
868 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
869 }
870
871-static struct netdev_queue *dev_pick_tx(struct net_device *dev,
872- struct sk_buff *skb)
873+struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb)
874 {
875 u16 queue_index = 0;
876
877@@ -1722,6 +1728,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
878 skb_set_queue_mapping(skb, queue_index);
879 return netdev_get_tx_queue(dev, queue_index);
880 }
881+EXPORT_SYMBOL(dev_pick_tx);
882
883 /**
884 * dev_queue_xmit - transmit a buffer
885--- a/net/core/rtnetlink.c
886+++ b/net/core/rtnetlink.c
887@@ -1406,6 +1406,7 @@ EXPORT_SYMBOL(rtnetlink_put_metrics);
888 EXPORT_SYMBOL(rtnl_lock);
889 EXPORT_SYMBOL(rtnl_trylock);
890 EXPORT_SYMBOL(rtnl_unlock);
891+EXPORT_SYMBOL(__rtnl_unlock);
892 EXPORT_SYMBOL(rtnl_is_locked);
893 EXPORT_SYMBOL(rtnl_unicast);
894 EXPORT_SYMBOL(rtnl_notify);
895--- a/net/core/skbuff.c
896+++ b/net/core/skbufc
897@@ -436,6 +436,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
898 new->sp = secpath_get(old->sp);
899 #endif
900 memcpy(new->cb, old->cb, sizeof(old->cb));
901+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
902+ new->qdisc_skb_cb_pkt_len = old->qdisc_skb_cb_pkt_len;
903+#endif
904 new->csum_start = old->csum_start;
905 new->csum_offset = old->csum_offset;
906 new->local_df = old->local_df;
907@@ -2258,7 +2258,9 @@
908
909 __copy_skb_header(nskb, skb);
910 nskb->mac_len = skb->mac_len;
911-
912+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
913+ nskb->qdisc_skb_cb_pkt_len = skb->qdisc_skb_cb_pkt_len;
914+#endif
915 skb_reserve(nskb, headroom);
916 skb_reset_mac_header(nskb);
917 skb_set_network_header(nskb, skb->mac_len);
918--- a/net/netfilter/Kconfig
919+++ b/net/netfilter/Kconfig
920@@ -342,6 +342,18 @@ config NETFILTER_XT_TARGET_DSCP
921
922 To compile it as a module, choose M here. If unsure, say N.
923
924+config NETFILTER_XT_TARGET_IMQ
925+ tristate '"IMQ" target support'
926+ depends on NETFILTER_XTABLES
927+ depends on IP_NF_MANGLE || IP6_NF_MANGLE
928+ select IMQ
929+ default m if NETFILTER_ADVANCED=n
930+ help
931+ This option adds a `IMQ' target which is used to specify if and
932+ to which imq device packets should get enqueued/dequeued.
933+
934+ To compile it as a module, choose M here. If unsure, say N.
935+
936 config NETFILTER_XT_TARGET_MARK
937 tristate '"MARK" target support'
938 depends on NETFILTER_XTABLES
939--- a/net/netfilter/Makefile
940+++ b/net/netfilter/Makefile
941@@ -42,6 +42,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
942 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
943 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
944 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
945+obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
946 obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
947 obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
948 obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
949--- a/net/netfilter/nf_queue.c
950+++ b/net/netfilter/nf_queue.c
951@@ -20,6 +20,26 @@ static const struct nf_queue_handler *queue_handler[NPROTO];
952
953 static DEFINE_MUTEX(queue_handler_mutex);
954
955+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
956+static const struct nf_queue_handler *queue_imq_handler;
957+
958+void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
959+{
960+ mutex_lock(&queue_handler_mutex);
961+ rcu_assign_pointer(queue_imq_handler, qh);
962+ mutex_unlock(&queue_handler_mutex);
963+}
964+EXPORT_SYMBOL(nf_register_queue_imq_handler);
965+
966+void nf_unregister_queue_imq_handler(void)
967+{
968+ mutex_lock(&queue_handler_mutex);
969+ rcu_assign_pointer(queue_imq_handler, NULL);
970+ mutex_unlock(&queue_handler_mutex);
971+}
972+EXPORT_SYMBOL(nf_unregister_queue_imq_handler);
973+#endif
974+
975 /* return EBUSY when somebody else is registered, return EEXIST if the
976 * same handler is registered, return 0 in case of success. */
977 int nf_register_queue_handler(int pf, const struct nf_queue_handler *qh)
978@@ -80,7 +100,7 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
979 }
980 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
981
982-static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
983+void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
984 {
985 /* Release those devices we held, or Alexey will kill me. */
986 if (entry->indev)
987@@ -100,6 +120,7 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
988 /* Drop reference to owner of hook which queued us. */
989 module_put(entry->elem->owner);
990 }
991+EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
992
993 /*
994 * Any packet that leaves via this function must come back
995@@ -121,12 +142,26 @@ static int __nf_queue(struct sk_buff *skb,
996 #endif
997 const struct nf_afinfo *afinfo;
998 const struct nf_queue_handler *qh;
999+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1000+ const struct nf_queue_handler *qih = NULL;
1001+#endif
1002
1003 /* QUEUE == DROP if noone is waiting, to be safe. */
1004 rcu_read_lock();
1005
1006 qh = rcu_dereference(queue_handler[pf]);
1007+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1008+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1009+ if (pf == PF_INET || pf == PF_INET6)
1010+#else
1011+ if (pf == PF_INET)
1012+#endif
1013+ qih = rcu_dereference(queue_imq_handler);
1014+
1015+ if (!qh && !qih)
1016+#else /* !IMQ */
1017 if (!qh)
1018+#endif
1019 goto err_unlock;
1020
1021 afinfo = nf_get_afinfo(pf);
1022@@ -145,6 +180,10 @@ static int __nf_queue(struct sk_buff *skb,
1023 .indev = indev,
1024 .outdev = outdev,
1025 .okfn = okfn,
1026+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1027+ .next_outfn = qh ? qh->outfn : NULL,
1028+ .next_queuenum = queuenum,
1029+#endif
1030 };
1031
1032 /* If it's going away, ignore hook. */
1033@@ -170,8 +209,19 @@ static int __nf_queue(struct sk_buff *skb,
1034 }
1035 #endif
1036 afinfo->saveroute(skb, entry);
1037+
1038+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1039+ if (qih) {
1040+ status = qih->outfn(entry, queuenum);
1041+ goto imq_skip_queue;
1042+ }
1043+#endif
1044+
1045 status = qh->outfn(entry, queuenum);
1046
1047+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1048+imq_skip_queue:
1049+#endif
1050 rcu_read_unlock();
1051
1052 if (status < 0) {
1053--- /dev/null
1054+++ b/net/netfilter/xt_IMQ.c
1055@@ -0,0 +1,81 @@
1056+/*
1057+ * This target marks packets to be enqueued to an imq device
1058+ */
1059+#include <linux/module.h>
1060+#include <linux/skbuff.h>
1061+#include <linux/netfilter/x_tables.h>
1062+#include <linux/netfilter/xt_IMQ.h>
1063+#include <linux/imq.h>
1064+
1065+static unsigned int imq_target(struct sk_buff *pskb,
1066+ const struct net_device *in,
1067+ const struct net_device *out,
1068+ unsigned int hooknum,
1069+ const struct xt_target *target,
1070+ const void *targinfo)
1071+{
1072+ const struct xt_imq_info *mr = targinfo;
1073+
1074+ pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1075+
1076+ return XT_CONTINUE;
1077+}
1078+
1079+static bool imq_checkentry(const char *tablename,
1080+ const void *entry,
1081+ const struct xt_target *target,
1082+ void *targinfo,
1083+ unsigned int hook_mask)
1084+{
1085+ struct xt_imq_info *mr = targinfo;
1086+
1087+ if (mr->todev > IMQ_MAX_DEVS - 1) {
1088+ printk(KERN_WARNING
1089+ "IMQ: invalid device specified, highest is %u\n",
1090+ IMQ_MAX_DEVS - 1);
1091+ return 0;
1092+ }
1093+
1094+ return 1;
1095+}
1096+
1097+static struct xt_target xt_imq_reg[] __read_mostly = {
1098+ {
1099+ .name = "IMQ",
1100+ .family = AF_INET,
1101+ .target = imq_target,
1102+ .targetsize = sizeof(struct xt_imq_info),
1103+ .table = "mangle",
1104+ .checkentry = imq_checkentry,
1105+ .me = THIS_MODULE
1106+ },
1107+ {
1108+ .name = "IMQ",
1109+ .family = AF_INET6,
1110+ .target = imq_target,
1111+ .targetsize = sizeof(struct xt_imq_info),
1112+ .table = "mangle",
1113+ .checkentry = imq_checkentry,
1114+ .me = THIS_MODULE
1115+ },
1116+};
1117+
1118+static int __init imq_init(void)
1119+{
1120+ return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1121+}
1122+
1123+static void __exit imq_fini(void)
1124+{
1125+ xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1126+}
1127+
1128+module_init(imq_init);
1129+module_exit(imq_fini);
1130+
1131+MODULE_AUTHOR("http://www.linuximq.net");
1132+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
1133+MODULE_LICENSE("GPL");
1134+MODULE_ALIAS("ipt_IMQ");
1135+MODULE_ALIAS("ip6t_IMQ");
1136+
1137--- a/net/sched/sch_api.c
1138+++ b/net/sched/sch_api.c
1139@@ -405,7 +405,11 @@ void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
1140 out:
1141 if (unlikely(pkt_len < 1))
1142 pkt_len = 1;
1143+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1144+ skb->qdisc_skb_cb_pkt_len = pkt_len; /* see skbuff.h why */
1145+#else
1146 qdisc_skb_cb(skb)->pkt_len = pkt_len;
1147+#endif
1148 }
1149 EXPORT_SYMBOL(qdisc_calculate_pkt_len);
1150