]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/linux-2.6.32-imq-test2.patch
Updated GeoIP database.
[people/pmueller/ipfire-2.x.git] / src / patches / linux-2.6.32-imq-test2.patch
CommitLineData
b0d0b681
AF
1diff -U 5 -Nr linux-2.6.32/drivers/net/imq.c linux-2.6.32-imq/drivers/net/imq.c
2--- linux-2.6.32/drivers/net/imq.c 1970-01-01 02:00:00.000000000 +0200
3+++ linux-2.6.32-imq/drivers/net/imq.c 2009-12-11 15:08:01.958734740 +0200
4@@ -0,0 +1,632 @@
5+/*
6+ * Pseudo-driver for the intermediate queue device.
7+ *
8+ * This program is free software; you can redistribute it and/or
9+ * modify it under the terms of the GNU General Public License
10+ * as published by the Free Software Foundation; either version
11+ * 2 of the License, or (at your option) any later version.
12+ *
13+ * Authors: Patrick McHardy, <kaber@trash.net>
14+ *
15+ * The first version was written by Martin Devera, <devik@cdi.cz>
16+ *
17+ * Credits: Jan Rafaj <imq2t@cedric.vabo.cz>
18+ * - Update patch to 2.4.21
19+ * Sebastian Strollo <sstrollo@nortelnetworks.com>
20+ * - Fix "Dead-loop on netdevice imq"-issue
21+ * Marcel Sebek <sebek64@post.cz>
22+ * - Update to 2.6.2-rc1
23+ *
24+ * After some time of inactivity there is a group taking care
25+ * of IMQ again: http://www.linuximq.net
26+ *
27+ *
28+ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7
29+ * including the following changes:
30+ *
31+ * - Correction of ipv6 support "+"s issue (Hasso Tepper)
32+ * - Correction of imq_init_devs() issue that resulted in
33+ * kernel OOPS unloading IMQ as module (Norbert Buchmuller)
34+ * - Addition of functionality to choose number of IMQ devices
35+ * during kernel config (Andre Correa)
36+ * - Addition of functionality to choose how IMQ hooks on
37+ * PRE and POSTROUTING (after or before NAT) (Andre Correa)
38+ * - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
39+ *
40+ *
41+ * 2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
42+ * released with almost no problems. 2.6.14-x was released
43+ * with some important changes: nfcache was removed; After
44+ * some weeks of trouble we figured out that some IMQ fields
45+ * in skb were missing in skbuff.c - skb_clone and copy_skb_header.
46+ * These functions are correctly patched by this new patch version.
47+ *
48+ * Thanks for all who helped to figure out all the problems with
49+ * 2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
50+ * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
51+ * I didn't forget anybody). I apologize again for my lack of time.
52+ *
53+ *
54+ * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
55+ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid
56+ * recursive locking. New initialization routines to fix 'rmmod' not
57+ * working anymore. Used code from ifb.c. (Jussi Kivilinna)
58+ *
59+ * 2008/08/06 - 2.6.26 - (JK)
60+ * - Replaced tasklet with 'netif_schedule()'.
61+ * - Cleaned up and added comments for imq_nf_queue().
62+ *
63+ * 2009/04/12
64+ * - Add skb_save_cb/skb_restore_cb helper functions for backuping
65+ * control buffer. This is needed because qdisc-layer on kernels
66+ * 2.6.27 and newer overwrite control buffer. (Jussi Kivilinna)
67+ * - Add better locking for IMQ device. Hopefully this will solve
68+ * SMP issues. (Jussi Kivilinna)
69+ * - Port to 2.6.27
70+ * - Port to 2.6.28
71+ * - Port to 2.6.29 + fix rmmod not working
72+ *
73+ * 2009/04/20 - (Jussi Kivilinna)
74+ * - Use netdevice feature flags to avoid extra packet handling
75+ * by core networking layer and possibly increase performance.
76+ *
77+ * 2009/09/26 - (Jussi Kivilinna)
78+ * - Add imq_nf_reinject_lockless to fix deadlock with
79+ * imq_nf_queue/imq_nf_reinject.
80+ *
81+ * 2009/12/08 - (Jussi Kivilinna)
82+ * - Port to 2.6.32
83+ * - Add check for skb->nf_queue_entry==NULL in imq_dev_xmit()
84+ * - Also add better error checking for skb->nf_queue_entry usage
85+ *
86+ * Also, many thanks to pablo Sebastian Greco for making the initial
87+ * patch and to those who helped the testing.
88+ *
89+ * More info at: http://www.linuximq.net/ (Andre Correa)
90+ */
91+
92+#include <linux/module.h>
93+#include <linux/kernel.h>
94+#include <linux/moduleparam.h>
95+#include <linux/list.h>
96+#include <linux/skbuff.h>
97+#include <linux/netdevice.h>
98+#include <linux/etherdevice.h>
99+#include <linux/rtnetlink.h>
100+#include <linux/if_arp.h>
101+#include <linux/netfilter.h>
102+#include <linux/netfilter_ipv4.h>
103+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
104+ #include <linux/netfilter_ipv6.h>
105+#endif
106+#include <linux/imq.h>
107+#include <net/pkt_sched.h>
108+#include <net/netfilter/nf_queue.h>
109+
110+static nf_hookfn imq_nf_hook;
111+
112+static struct nf_hook_ops imq_ingress_ipv4 = {
113+ .hook = imq_nf_hook,
114+ .owner = THIS_MODULE,
115+ .pf = PF_INET,
116+ .hooknum = NF_INET_PRE_ROUTING,
117+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
118+ .priority = NF_IP_PRI_MANGLE + 1
119+#else
120+ .priority = NF_IP_PRI_NAT_DST + 1
121+#endif
122+};
123+
124+static struct nf_hook_ops imq_egress_ipv4 = {
125+ .hook = imq_nf_hook,
126+ .owner = THIS_MODULE,
127+ .pf = PF_INET,
128+ .hooknum = NF_INET_POST_ROUTING,
129+#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
130+ .priority = NF_IP_PRI_LAST
131+#else
132+ .priority = NF_IP_PRI_NAT_SRC - 1
133+#endif
134+};
135+
136+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
137+static struct nf_hook_ops imq_ingress_ipv6 = {
138+ .hook = imq_nf_hook,
139+ .owner = THIS_MODULE,
140+ .pf = PF_INET6,
141+ .hooknum = NF_INET_PRE_ROUTING,
142+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
143+ .priority = NF_IP6_PRI_MANGLE + 1
144+#else
145+ .priority = NF_IP6_PRI_NAT_DST + 1
146+#endif
147+};
148+
149+static struct nf_hook_ops imq_egress_ipv6 = {
150+ .hook = imq_nf_hook,
151+ .owner = THIS_MODULE,
152+ .pf = PF_INET6,
153+ .hooknum = NF_INET_POST_ROUTING,
154+#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
155+ .priority = NF_IP6_PRI_LAST
156+#else
157+ .priority = NF_IP6_PRI_NAT_SRC - 1
158+#endif
159+};
160+#endif
161+
162+#if defined(CONFIG_IMQ_NUM_DEVS)
163+static unsigned int numdevs = CONFIG_IMQ_NUM_DEVS;
164+#else
165+static unsigned int numdevs = IMQ_MAX_DEVS;
166+#endif
167+
168+static DEFINE_SPINLOCK(imq_nf_queue_lock);
169+
170+static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
171+
172+
173+static struct net_device_stats *imq_get_stats(struct net_device *dev)
174+{
175+ return &dev->stats;
176+}
177+
178+/* called for packets kfree'd in qdiscs at places other than enqueue */
179+static void imq_skb_destructor(struct sk_buff *skb)
180+{
181+ struct nf_queue_entry *entry = skb->nf_queue_entry;
182+
183+ skb->nf_queue_entry = NULL;
184+
185+ if (entry) {
186+ nf_queue_entry_release_refs(entry);
187+ kfree(entry);
188+ }
189+
190+ skb_restore_cb(skb); /* kfree backup */
191+}
192+
193+/* locking not needed when called from imq_nf_queue */
194+static void imq_nf_reinject_lockless(struct nf_queue_entry *entry,
195+ unsigned int verdict)
196+{
197+ int status;
198+
199+ if (!entry->next_outfn) {
200+ nf_reinject(entry, verdict);
201+ return;
202+ }
203+
204+ status = entry->next_outfn(entry, entry->next_queuenum);
205+ if (status < 0) {
206+ nf_queue_entry_release_refs(entry);
207+ kfree_skb(entry->skb);
208+ kfree(entry);
209+ }
210+}
211+
212+static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
213+{
214+ int status;
215+
216+ if (!entry->next_outfn) {
217+ spin_lock_bh(&imq_nf_queue_lock);
218+ nf_reinject(entry, verdict);
219+ spin_unlock_bh(&imq_nf_queue_lock);
220+ return;
221+ }
222+
223+ rcu_read_lock();
224+ local_bh_disable();
225+ status = entry->next_outfn(entry, entry->next_queuenum);
226+ local_bh_enable();
227+ if (status < 0) {
228+ nf_queue_entry_release_refs(entry);
229+ kfree_skb(entry->skb);
230+ kfree(entry);
231+ }
232+
233+ rcu_read_unlock();
234+}
235+
236+static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
237+{
238+ struct nf_queue_entry *entry = skb->nf_queue_entry;
239+
240+ skb->nf_queue_entry = NULL;
241+ dev->trans_start = jiffies;
242+
243+ dev->stats.tx_bytes += skb->len;
244+ dev->stats.tx_packets++;
245+
246+ if (entry == NULL) {
247+ /* We don't know what is going on here.. packet is queued for
248+ * imq device, but (probably) not by us.
249+ *
250+ * If this packet was not send here by imq_nf_queue(), then
251+ * skb_save_cb() was not used and skb_free() should not show:
252+ * WARNING: IMQ: kfree_skb: skb->cb_next:..
253+ * and/or
254+ * WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
255+ *
256+ * However if this message is shown, then IMQ is somehow broken
257+ * and you should report this to linuximq.net.
258+ */
259+
260+ /* imq_dev_xmit is black hole that eats all packets, report that
261+ * we eat this packet happily and increase dropped counters.
262+ */
263+
264+ dev->stats.tx_dropped++;
265+ dev_kfree_skb(skb);
266+
267+ return NETDEV_TX_OK;
268+ }
269+
270+ skb_restore_cb(skb); /* restore skb->cb */
271+
272+ skb->imq_flags = 0;
273+ skb->destructor = NULL;
274+
275+ imq_nf_reinject(entry, NF_ACCEPT);
276+
277+ return NETDEV_TX_OK;
278+}
279+
280+static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
281+{
282+ struct net_device *dev;
283+ struct sk_buff *skb_orig, *skb, *skb_shared;
284+ struct Qdisc *q;
285+ struct netdev_queue *txq;
286+ int users, index;
287+ int retval = -EINVAL;
288+
289+ index = entry->skb->imq_flags & IMQ_F_IFMASK;
290+ if (unlikely(index > numdevs - 1)) {
291+ if (net_ratelimit())
292+ printk(KERN_WARNING
293+ "IMQ: invalid device specified, highest is %u\n",
294+ numdevs - 1);
295+ retval = -EINVAL;
296+ goto out;
297+ }
298+
299+ /* check for imq device by index from cache */
300+ dev = imq_devs_cache[index];
301+ if (unlikely(!dev)) {
302+ char buf[8];
303+
304+ /* get device by name and cache result */
305+ snprintf(buf, sizeof(buf), "imq%d", index);
306+ dev = dev_get_by_name(&init_net, buf);
307+ if (!dev) {
308+ /* not found ?!*/
309+ BUG();
310+ retval = -ENODEV;
311+ goto out;
312+ }
313+
314+ imq_devs_cache[index] = dev;
315+ dev_put(dev);
316+ }
317+
318+ if (unlikely(!(dev->flags & IFF_UP))) {
319+ entry->skb->imq_flags = 0;
320+ imq_nf_reinject_lockless(entry, NF_ACCEPT);
321+ retval = 0;
322+ goto out;
323+ }
324+ dev->last_rx = jiffies;
325+
326+ skb = entry->skb;
327+ skb_orig = NULL;
328+
329+ /* skb has owner? => make clone */
330+ if (unlikely(skb->destructor)) {
331+ skb_orig = skb;
332+ skb = skb_clone(skb, GFP_ATOMIC);
333+ if (!skb) {
334+ retval = -ENOMEM;
335+ goto out;
336+ }
337+ entry->skb = skb;
338+ }
339+
340+ skb->nf_queue_entry = entry;
341+
342+ dev->stats.rx_bytes += skb->len;
343+ dev->stats.rx_packets++;
344+
345+ txq = dev_pick_tx(dev, skb);
346+
347+ q = rcu_dereference(txq->qdisc);
348+ if (unlikely(!q->enqueue))
349+ goto packet_not_eaten_by_imq_dev;
350+
351+ spin_lock_bh(qdisc_lock(q));
352+
353+ users = atomic_read(&skb->users);
354+
355+ skb_shared = skb_get(skb); /* increase reference count by one */
356+ skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will
357+ overwrite it */
358+ qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
359+
360+ if (likely(atomic_read(&skb_shared->users) == users + 1)) {
361+ kfree_skb(skb_shared); /* decrease reference count by one */
362+
363+ skb->destructor = &imq_skb_destructor;
364+
365+ /* cloned? */
366+ if (skb_orig)
367+ kfree_skb(skb_orig); /* free original */
368+
369+ spin_unlock_bh(qdisc_lock(q));
370+
371+ /* schedule qdisc dequeue */
372+ __netif_schedule(q);
373+
374+ retval = 0;
375+ goto out;
376+ } else {
377+ skb_restore_cb(skb_shared); /* restore skb->cb */
378+ skb->nf_queue_entry = NULL;
379+ /* qdisc dropped packet and decreased skb reference count of
380+ * skb, so we don't really want to and try refree as that would
381+ * actually destroy the skb. */
382+ spin_unlock_bh(qdisc_lock(q));
383+ goto packet_not_eaten_by_imq_dev;
384+ }
385+
386+packet_not_eaten_by_imq_dev:
387+ /* cloned? restore original */
388+ if (skb_orig) {
389+ kfree_skb(skb);
390+ entry->skb = skb_orig;
391+ }
392+ retval = -1;
393+out:
394+ return retval;
395+}
396+
397+static struct nf_queue_handler nfqh = {
398+ .name = "imq",
399+ .outfn = imq_nf_queue,
400+};
401+
402+static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
403+ const struct net_device *indev,
404+ const struct net_device *outdev,
405+ int (*okfn)(struct sk_buff *))
406+{
407+ if (pskb->imq_flags & IMQ_F_ENQUEUE)
408+ return NF_QUEUE;
409+
410+ return NF_ACCEPT;
411+}
412+
413+static int imq_close(struct net_device *dev)
414+{
415+ netif_stop_queue(dev);
416+ return 0;
417+}
418+
419+static int imq_open(struct net_device *dev)
420+{
421+ netif_start_queue(dev);
422+ return 0;
423+}
424+
425+static const struct net_device_ops imq_netdev_ops = {
426+ .ndo_open = imq_open,
427+ .ndo_stop = imq_close,
428+ .ndo_start_xmit = imq_dev_xmit,
429+ .ndo_get_stats = imq_get_stats,
430+};
431+
432+static void imq_setup(struct net_device *dev)
433+{
434+ dev->netdev_ops = &imq_netdev_ops;
435+ dev->type = ARPHRD_VOID;
436+ dev->mtu = 16000;
437+ dev->tx_queue_len = 11000;
438+ dev->flags = IFF_NOARP;
439+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
440+ NETIF_F_GSO | NETIF_F_HW_CSUM |
441+ NETIF_F_HIGHDMA;
442+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
443+}
444+
445+static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
446+{
447+ int ret = 0;
448+
449+ if (tb[IFLA_ADDRESS]) {
450+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
451+ ret = -EINVAL;
452+ goto end;
453+ }
454+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
455+ ret = -EADDRNOTAVAIL;
456+ goto end;
457+ }
458+ }
459+ return 0;
460+end:
461+ printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret);
462+ return ret;
463+}
464+
465+static struct rtnl_link_ops imq_link_ops __read_mostly = {
466+ .kind = "imq",
467+ .priv_size = 0,
468+ .setup = imq_setup,
469+ .validate = imq_validate,
470+};
471+
472+static int __init imq_init_hooks(void)
473+{
474+ int err;
475+
476+ nf_register_queue_imq_handler(&nfqh);
477+
478+ err = nf_register_hook(&imq_ingress_ipv4);
479+ if (err)
480+ goto err1;
481+
482+ err = nf_register_hook(&imq_egress_ipv4);
483+ if (err)
484+ goto err2;
485+
486+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
487+ err = nf_register_hook(&imq_ingress_ipv6);
488+ if (err)
489+ goto err3;
490+
491+ err = nf_register_hook(&imq_egress_ipv6);
492+ if (err)
493+ goto err4;
494+#endif
495+
496+ return 0;
497+
498+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
499+err4:
500+ nf_unregister_hook(&imq_ingress_ipv6);
501+err3:
502+ nf_unregister_hook(&imq_egress_ipv4);
503+#endif
504+err2:
505+ nf_unregister_hook(&imq_ingress_ipv4);
506+err1:
507+ nf_unregister_queue_imq_handler();
508+ return err;
509+}
510+
511+static int __init imq_init_one(int index)
512+{
513+ struct net_device *dev;
514+ int ret;
515+
516+ dev = alloc_netdev(0, "imq%d", imq_setup);
517+ if (!dev)
518+ return -ENOMEM;
519+
520+ ret = dev_alloc_name(dev, dev->name);
521+ if (ret < 0)
522+ goto fail;
523+
524+ dev->rtnl_link_ops = &imq_link_ops;
525+ ret = register_netdevice(dev);
526+ if (ret < 0)
527+ goto fail;
528+
529+ return 0;
530+fail:
531+ free_netdev(dev);
532+ return ret;
533+}
534+
535+static int __init imq_init_devs(void)
536+{
537+ int err, i;
538+
539+ if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
540+ printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
541+ IMQ_MAX_DEVS);
542+ return -EINVAL;
543+ }
544+
545+ rtnl_lock();
546+ err = __rtnl_link_register(&imq_link_ops);
547+
548+ for (i = 0; i < numdevs && !err; i++)
549+ err = imq_init_one(i);
550+
551+ if (err) {
552+ __rtnl_link_unregister(&imq_link_ops);
553+ memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
554+ }
555+ rtnl_unlock();
556+
557+ return err;
558+}
559+
560+static int __init imq_init_module(void)
561+{
562+ int err;
563+
564+#if defined(CONFIG_IMQ_NUM_DEVS)
565+ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
566+ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
567+ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
568+#endif
569+
570+ err = imq_init_devs();
571+ if (err) {
572+ printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
573+ return err;
574+ }
575+
576+ err = imq_init_hooks();
577+ if (err) {
578+ printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
579+ rtnl_link_unregister(&imq_link_ops);
580+ memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
581+ return err;
582+ }
583+
584+ printk(KERN_INFO "IMQ driver loaded successfully.\n");
585+
586+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
587+ printk(KERN_INFO "\tHooking IMQ before NAT on PREROUTING.\n");
588+#else
589+ printk(KERN_INFO "\tHooking IMQ after NAT on PREROUTING.\n");
590+#endif
591+#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
592+ printk(KERN_INFO "\tHooking IMQ before NAT on POSTROUTING.\n");
593+#else
594+ printk(KERN_INFO "\tHooking IMQ after NAT on POSTROUTING.\n");
595+#endif
596+
597+ return 0;
598+}
599+
600+static void __exit imq_unhook(void)
601+{
602+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
603+ nf_unregister_hook(&imq_ingress_ipv6);
604+ nf_unregister_hook(&imq_egress_ipv6);
605+#endif
606+ nf_unregister_hook(&imq_ingress_ipv4);
607+ nf_unregister_hook(&imq_egress_ipv4);
608+
609+ nf_unregister_queue_imq_handler();
610+}
611+
612+static void __exit imq_cleanup_devs(void)
613+{
614+ rtnl_link_unregister(&imq_link_ops);
615+ memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
616+}
617+
618+static void __exit imq_exit_module(void)
619+{
620+ imq_unhook();
621+ imq_cleanup_devs();
622+ printk(KERN_INFO "IMQ driver unloaded successfully.\n");
623+}
624+
625+module_init(imq_init_module);
626+module_exit(imq_exit_module);
627+
628+module_param(numdevs, int, 0);
629+MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will "
630+ "be created)");
631+MODULE_AUTHOR("http://www.linuximq.net");
632+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See "
633+ "http://www.linuximq.net/ for more information.");
634+MODULE_LICENSE("GPL");
635+MODULE_ALIAS_RTNL_LINK("imq");
636+
637diff -U 5 -Nr linux-2.6.32/drivers/net/Kconfig linux-2.6.32-imq/drivers/net/Kconfig
638--- linux-2.6.32/drivers/net/Kconfig 2009-12-03 05:51:21.000000000 +0200
639+++ linux-2.6.32-imq/drivers/net/Kconfig 2009-12-11 14:16:42.678730699 +0200
640@@ -107,10 +107,133 @@
641 <http://www.tldp.org/docs.html#howto>.
642
643 To compile this driver as a module, choose M here: the module
644 will be called eql. If unsure, say N.
645
646+config IMQ
647+ tristate "IMQ (intermediate queueing device) support"
648+ depends on NETDEVICES && NETFILTER
649+ ---help---
650+ The IMQ device(s) is used as placeholder for QoS queueing
651+ disciplines. Every packet entering/leaving the IP stack can be
652+ directed through the IMQ device where it's enqueued/dequeued to the
653+ attached qdisc. This allows you to treat network devices as classes
654+ and distribute bandwidth among them. Iptables is used to specify
655+ through which IMQ device, if any, packets travel.
656+
657+ More information at: http://www.linuximq.net/
658+
659+ To compile this driver as a module, choose M here: the module
660+ will be called imq. If unsure, say N.
661+
662+choice
663+ prompt "IMQ behavior (PRE/POSTROUTING)"
664+ depends on IMQ
665+ default IMQ_BEHAVIOR_AB
666+ help
667+
668+ This settings defines how IMQ behaves in respect to its
669+ hooking in PREROUTING and POSTROUTING.
670+
671+ IMQ can work in any of the following ways:
672+
673+ PREROUTING | POSTROUTING
674+ -----------------|-------------------
675+ #1 After NAT | After NAT
676+ #2 After NAT | Before NAT
677+ #3 Before NAT | After NAT
678+ #4 Before NAT | Before NAT
679+
680+ The default behavior is to hook before NAT on PREROUTING
681+ and after NAT on POSTROUTING (#3).
682+
683+ This settings are specially usefull when trying to use IMQ
684+ to shape NATed clients.
685+
686+ More information can be found at: www.linuximq.net
687+
688+ If not sure leave the default settings alone.
689+
690+config IMQ_BEHAVIOR_AA
691+ bool "IMQ AA"
692+ help
693+ This settings defines how IMQ behaves in respect to its
694+ hooking in PREROUTING and POSTROUTING.
695+
696+ Choosing this option will make IMQ hook like this:
697+
698+ PREROUTING: After NAT
699+ POSTROUTING: After NAT
700+
701+ More information can be found at: www.linuximq.net
702+
703+ If not sure leave the default settings alone.
704+
705+config IMQ_BEHAVIOR_AB
706+ bool "IMQ AB"
707+ help
708+ This settings defines how IMQ behaves in respect to its
709+ hooking in PREROUTING and POSTROUTING.
710+
711+ Choosing this option will make IMQ hook like this:
712+
713+ PREROUTING: After NAT
714+ POSTROUTING: Before NAT
715+
716+ More information can be found at: www.linuximq.net
717+
718+ If not sure leave the default settings alone.
719+
720+config IMQ_BEHAVIOR_BA
721+ bool "IMQ BA"
722+ help
723+ This settings defines how IMQ behaves in respect to its
724+ hooking in PREROUTING and POSTROUTING.
725+
726+ Choosing this option will make IMQ hook like this:
727+
728+ PREROUTING: Before NAT
729+ POSTROUTING: After NAT
730+
731+ More information can be found at: www.linuximq.net
732+
733+ If not sure leave the default settings alone.
734+
735+config IMQ_BEHAVIOR_BB
736+ bool "IMQ BB"
737+ help
738+ This settings defines how IMQ behaves in respect to its
739+ hooking in PREROUTING and POSTROUTING.
740+
741+ Choosing this option will make IMQ hook like this:
742+
743+ PREROUTING: Before NAT
744+ POSTROUTING: Before NAT
745+
746+ More information can be found at: www.linuximq.net
747+
748+ If not sure leave the default settings alone.
749+
750+endchoice
751+
752+config IMQ_NUM_DEVS
753+
754+ int "Number of IMQ devices"
755+ range 2 16
756+ depends on IMQ
757+ default "16"
758+ help
759+
760+ This settings defines how many IMQ devices will be
761+ created.
762+
763+ The default value is 16.
764+
765+ More information can be found at: www.linuximq.net
766+
767+ If not sure leave the default settings alone.
768+
769 config TUN
770 tristate "Universal TUN/TAP device driver support"
771 select CRC32
772 ---help---
773 TUN/TAP provides packet reception and transmission for user space
774diff -U 5 -Nr linux-2.6.32/drivers/net/Makefile linux-2.6.32-imq/drivers/net/Makefile
775--- linux-2.6.32/drivers/net/Makefile 2009-12-03 05:51:21.000000000 +0200
776+++ linux-2.6.32-imq/drivers/net/Makefile 2009-12-11 14:16:42.678730699 +0200
777@@ -163,10 +163,11 @@
778 obj-$(CONFIG_SLHC) += slhc.o
779
780 obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
781
782 obj-$(CONFIG_DUMMY) += dummy.o
783+obj-$(CONFIG_IMQ) += imq.o
784 obj-$(CONFIG_IFB) += ifb.o
785 obj-$(CONFIG_MACVLAN) += macvlan.o
786 obj-$(CONFIG_DE600) += de600.o
787 obj-$(CONFIG_DE620) += de620.o
788 obj-$(CONFIG_LANCE) += lance.o
789diff -U 5 -Nr linux-2.6.32/include/linux/imq.h linux-2.6.32-imq/include/linux/imq.h
790--- linux-2.6.32/include/linux/imq.h 1970-01-01 02:00:00.000000000 +0200
791+++ linux-2.6.32-imq/include/linux/imq.h 2009-12-11 14:16:42.678730699 +0200
792@@ -0,0 +1,13 @@
793+#ifndef _IMQ_H
794+#define _IMQ_H
795+
796+/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
797+#define IMQ_F_BITS 5
798+
799+#define IMQ_F_IFMASK 0x0f
800+#define IMQ_F_ENQUEUE 0x10
801+
802+#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1)
803+
804+#endif /* _IMQ_H */
805+
806diff -U 5 -Nr linux-2.6.32/include/linux/netdevice.h linux-2.6.32-imq/include/linux/netdevice.h
807--- linux-2.6.32/include/linux/netdevice.h 2009-12-03 05:51:21.000000000 +0200
808+++ linux-2.6.32-imq/include/linux/netdevice.h 2009-12-11 14:16:42.679730960 +0200
809@@ -1112,10 +1112,11 @@
810 extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
811 extern int dev_alloc_name(struct net_device *dev, const char *name);
812 extern int dev_open(struct net_device *dev);
813 extern int dev_close(struct net_device *dev);
814 extern void dev_disable_lro(struct net_device *dev);
815+extern struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb);
816 extern int dev_queue_xmit(struct sk_buff *skb);
817 extern int register_netdevice(struct net_device *dev);
818 extern void unregister_netdevice(struct net_device *dev);
819 extern void free_netdev(struct net_device *dev);
820 extern void synchronize_net(void);
821diff -U 5 -Nr linux-2.6.32/include/linux/netfilter/xt_IMQ.h linux-2.6.32-imq/include/linux/netfilter/xt_IMQ.h
822--- linux-2.6.32/include/linux/netfilter/xt_IMQ.h 1970-01-01 02:00:00.000000000 +0200
823+++ linux-2.6.32-imq/include/linux/netfilter/xt_IMQ.h 2009-12-11 14:16:42.679730960 +0200
824@@ -0,0 +1,9 @@
825+#ifndef _XT_IMQ_H
826+#define _XT_IMQ_H
827+
828+struct xt_imq_info {
829+ unsigned int todev; /* target imq device */
830+};
831+
832+#endif /* _XT_IMQ_H */
833+
834diff -U 5 -Nr linux-2.6.32/include/linux/netfilter_ipv4/ipt_IMQ.h linux-2.6.32-imq/include/linux/netfilter_ipv4/ipt_IMQ.h
835--- linux-2.6.32/include/linux/netfilter_ipv4/ipt_IMQ.h 1970-01-01 02:00:00.000000000 +0200
836+++ linux-2.6.32-imq/include/linux/netfilter_ipv4/ipt_IMQ.h 2009-12-11 14:16:42.679730960 +0200
837@@ -0,0 +1,10 @@
838+#ifndef _IPT_IMQ_H
839+#define _IPT_IMQ_H
840+
841+/* Backwards compatibility for old userspace */
842+#include <linux/netfilter/xt_IMQ.h>
843+
844+#define ipt_imq_info xt_imq_info
845+
846+#endif /* _IPT_IMQ_H */
847+
848diff -U 5 -Nr linux-2.6.32/include/linux/netfilter_ipv6/ip6t_IMQ.h linux-2.6.32-imq/include/linux/netfilter_ipv6/ip6t_IMQ.h
849--- linux-2.6.32/include/linux/netfilter_ipv6/ip6t_IMQ.h 1970-01-01 02:00:00.000000000 +0200
850+++ linux-2.6.32-imq/include/linux/netfilter_ipv6/ip6t_IMQ.h 2009-12-11 14:16:42.679730960 +0200
851@@ -0,0 +1,10 @@
852+#ifndef _IP6T_IMQ_H
853+#define _IP6T_IMQ_H
854+
855+/* Backwards compatibility for old userspace */
856+#include <linux/netfilter/xt_IMQ.h>
857+
858+#define ip6t_imq_info xt_imq_info
859+
860+#endif /* _IP6T_IMQ_H */
861+
862diff -U 5 -Nr linux-2.6.32/include/linux/skbuff.h linux-2.6.32-imq/include/linux/skbuff.h
863--- linux-2.6.32/include/linux/skbuff.h 2009-12-03 05:51:21.000000000 +0200
864+++ linux-2.6.32-imq/include/linux/skbuff.h 2009-12-11 14:16:42.680730834 +0200
865@@ -27,10 +27,13 @@
866 #include <linux/textsearch.h>
867 #include <net/checksum.h>
868 #include <linux/rcupdate.h>
869 #include <linux/dmaengine.h>
870 #include <linux/hrtimer.h>
871+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
872+#include <linux/imq.h>
873+#endif
874
875 /* Don't change this without changing skb_csum_unnecessary! */
876 #define CHECKSUM_NONE 0
877 #define CHECKSUM_UNNECESSARY 1
878 #define CHECKSUM_COMPLETE 2
879@@ -328,10 +331,13 @@
880 * layer. Please put your private variables there. If you
881 * want to keep them across layers you have to do a skb_clone()
882 * first. This is owned by whoever has the skb queued ATM.
883 */
884 char cb[48];
885+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
886+ void *cb_next;
887+#endif
888
889 unsigned int len,
890 data_len;
891 __u16 mac_len,
892 hdr_len;
893@@ -360,10 +366,13 @@
894 void (*destructor)(struct sk_buff *skb);
895 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
896 struct nf_conntrack *nfct;
897 struct sk_buff *nfct_reasm;
898 #endif
899+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
900+ struct nf_queue_entry *nf_queue_entry;
901+#endif
902 #ifdef CONFIG_BRIDGE_NETFILTER
903 struct nf_bridge_info *nf_bridge;
904 #endif
905
906 int iif;
907@@ -381,10 +390,14 @@
908 #endif
909 kmemcheck_bitfield_end(flags2);
910
911 /* 0/14 bit hole */
912
913+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
914+ __u8 imq_flags:IMQ_F_BITS;
915+#endif
916+
917 #ifdef CONFIG_NET_DMA
918 dma_cookie_t dma_cookie;
919 #endif
920 #ifdef CONFIG_NETWORK_SECMARK
921 __u32 secmark;
922@@ -435,10 +448,16 @@
923 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
924 {
925 return (struct rtable *)skb_dst(skb);
926 }
927
928+
929+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
930+extern int skb_save_cb(struct sk_buff *skb);
931+extern int skb_restore_cb(struct sk_buff *skb);
932+#endif
933+
934 extern void kfree_skb(struct sk_buff *skb);
935 extern void consume_skb(struct sk_buff *skb);
936 extern void __kfree_skb(struct sk_buff *skb);
937 extern struct sk_buff *__alloc_skb(unsigned int size,
938 gfp_t priority, int fclone, int node);
939@@ -1970,10 +1989,14 @@
940 nf_conntrack_get(src->nfct);
941 dst->nfctinfo = src->nfctinfo;
942 dst->nfct_reasm = src->nfct_reasm;
943 nf_conntrack_get_reasm(src->nfct_reasm);
944 #endif
945+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
946+ dst->imq_flags = src->imq_flags;
947+ dst->nf_queue_entry = src->nf_queue_entry;
948+#endif
949 #ifdef CONFIG_BRIDGE_NETFILTER
950 dst->nf_bridge = src->nf_bridge;
951 nf_bridge_get(src->nf_bridge);
952 #endif
953 }
954diff -U 5 -Nr linux-2.6.32/include/net/netfilter/nf_queue.h linux-2.6.32-imq/include/net/netfilter/nf_queue.h
955--- linux-2.6.32/include/net/netfilter/nf_queue.h 2009-12-03 05:51:21.000000000 +0200
956+++ linux-2.6.32-imq/include/net/netfilter/nf_queue.h 2009-12-11 14:16:42.680730834 +0200
957@@ -11,10 +11,16 @@
958 u_int8_t pf;
959 unsigned int hook;
960 struct net_device *indev;
961 struct net_device *outdev;
962 int (*okfn)(struct sk_buff *);
963+
964+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
965+ int (*next_outfn)(struct nf_queue_entry *entry,
966+ unsigned int queuenum);
967+ unsigned int next_queuenum;
968+#endif
969 };
970
971 #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry))
972
973 /* Packet queuing */
974@@ -28,7 +34,13 @@
975 const struct nf_queue_handler *qh);
976 extern int nf_unregister_queue_handler(u_int8_t pf,
977 const struct nf_queue_handler *qh);
978 extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh);
979 extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
980+extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
981+
982+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
983+extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
984+extern void nf_unregister_queue_imq_handler(void);
985+#endif
986
987 #endif /* _NF_QUEUE_H */
988diff -U 5 -Nr linux-2.6.32/net/core/dev.c linux-2.6.32-imq/net/core/dev.c
989--- linux-2.6.32/net/core/dev.c 2009-12-03 05:51:21.000000000 +0200
990+++ linux-2.6.32-imq/net/core/dev.c 2009-12-11 14:16:42.681731014 +0200
991@@ -94,10 +94,13 @@
992 #include <linux/notifier.h>
993 #include <linux/skbuff.h>
994 #include <net/net_namespace.h>
995 #include <net/sock.h>
996 #include <linux/rtnetlink.h>
997+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
998+#include <linux/imq.h>
999+#endif
1000 #include <linux/proc_fs.h>
1001 #include <linux/seq_file.h>
1002 #include <linux/stat.h>
1003 #include <linux/if_bridge.h>
1004 #include <linux/if_macvlan.h>
1005@@ -1702,11 +1705,15 @@
1006 {
1007 const struct net_device_ops *ops = dev->netdev_ops;
1008 int rc;
1009
1010 if (likely(!skb->next)) {
1011- if (!list_empty(&ptype_all))
1012+ if (!list_empty(&ptype_all)
1013+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1014+ && !(skb->imq_flags & IMQ_F_ENQUEUE)
1015+#endif
1016+ )
1017 dev_queue_xmit_nit(skb, dev);
1018
1019 if (netif_needs_gso(dev, skb)) {
1020 if (unlikely(dev_gso_segment(skb)))
1021 goto out_kfree_skb;
1022@@ -1787,12 +1794,11 @@
1023
1024 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1025 }
1026 EXPORT_SYMBOL(skb_tx_hash);
1027
1028-static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1029- struct sk_buff *skb)
1030+struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb)
1031 {
1032 const struct net_device_ops *ops = dev->netdev_ops;
1033 u16 queue_index = 0;
1034
1035 if (ops->ndo_select_queue)
1036@@ -1801,10 +1807,11 @@
1037 queue_index = skb_tx_hash(dev, skb);
1038
1039 skb_set_queue_mapping(skb, queue_index);
1040 return netdev_get_tx_queue(dev, queue_index);
1041 }
1042+EXPORT_SYMBOL(dev_pick_tx);
1043
1044 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1045 struct net_device *dev,
1046 struct netdev_queue *txq)
1047 {
1048diff -U 5 -Nr linux-2.6.32/net/core/skbuff.c linux-2.6.32-imq/net/core/skbuff.c
1049--- linux-2.6.32/net/core/skbuff.c 2009-12-03 05:51:21.000000000 +0200
1050+++ linux-2.6.32-imq/net/core/skbuff.c 2009-12-11 15:12:39.294981618 +0200
1051@@ -70,10 +70,13 @@
1052
1053 #include "kmap_skb.h"
1054
1055 static struct kmem_cache *skbuff_head_cache __read_mostly;
1056 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
1057+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1058+static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
1059+#endif
1060
1061 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
1062 struct pipe_buffer *buf)
1063 {
1064 put_page(buf->page);
1065@@ -89,10 +92,87 @@
1066 struct pipe_buffer *buf)
1067 {
1068 return 1;
1069 }
1070
1071+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1072+/* Control buffer save/restore for IMQ devices */
1073+struct skb_cb_table {
1074+ void *cb_next;
1075+ atomic_t refcnt;
1076+ char cb[48];
1077+};
1078+
1079+static DEFINE_SPINLOCK(skb_cb_store_lock);
1080+
1081+int skb_save_cb(struct sk_buff *skb)
1082+{
1083+ struct skb_cb_table *next;
1084+
1085+ next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
1086+ if (!next)
1087+ return -ENOMEM;
1088+
1089+ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1090+
1091+ memcpy(next->cb, skb->cb, sizeof(skb->cb));
1092+ next->cb_next = skb->cb_next;
1093+
1094+ atomic_set(&next->refcnt, 1);
1095+
1096+ skb->cb_next = next;
1097+ return 0;
1098+}
1099+EXPORT_SYMBOL(skb_save_cb);
1100+
1101+int skb_restore_cb(struct sk_buff *skb)
1102+{
1103+ struct skb_cb_table *next;
1104+
1105+ if (!skb->cb_next)
1106+ return 0;
1107+
1108+ next = skb->cb_next;
1109+
1110+ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1111+
1112+ memcpy(skb->cb, next->cb, sizeof(skb->cb));
1113+ skb->cb_next = next->cb_next;
1114+
1115+ spin_lock(&skb_cb_store_lock);
1116+
1117+ if (atomic_dec_and_test(&next->refcnt)) {
1118+ kmem_cache_free(skbuff_cb_store_cache, next);
1119+ }
1120+
1121+ spin_unlock(&skb_cb_store_lock);
1122+
1123+ return 0;
1124+}
1125+EXPORT_SYMBOL(skb_restore_cb);
1126+
1127+static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
1128+{
1129+ struct skb_cb_table *next;
1130+ struct sk_buff *old;
1131+
1132+ if (!__old->cb_next) {
1133+ new->cb_next = NULL;
1134+ return;
1135+ }
1136+
1137+ spin_lock(&skb_cb_store_lock);
1138+
1139+ old = (struct sk_buff *)__old;
1140+
1141+ next = old->cb_next;
1142+ atomic_inc(&next->refcnt);
1143+ new->cb_next = next;
1144+
1145+ spin_unlock(&skb_cb_store_lock);
1146+}
1147+#endif
1148
1149 /* Pipe buffer operations for a socket. */
1150 static struct pipe_buf_operations sock_pipe_buf_ops = {
1151 .can_merge = 0,
1152 .map = generic_pipe_buf_map,
1153@@ -396,10 +476,30 @@
1154 #endif
1155 if (skb->destructor) {
1156 WARN_ON(in_irq());
1157 skb->destructor(skb);
1158 }
1159+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1160+ /* This should not happen. When it does, avoid memleak by restoring
1161+ the chain of cb-backups. */
1162+ while(skb->cb_next != NULL) {
1163+ if (net_ratelimit())
1164+ printk(KERN_WARNING "IMQ: kfree_skb: skb->cb_next: "
1165+ "%08x\n", (unsigned int)skb->cb_next);
1166+
1167+ skb_restore_cb(skb);
1168+ }
1169+ /* This should not happen either, nf_queue_entry is nullified in
1170+ * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
1171+ * leaking entry pointers, maybe memory. We don't know if this is
1172+ * pointer to already freed memory, or should this be freed.
1173+ * If this happens we need to add refcounting, etc for nf_queue_entry.
1174+ */
1175+ if (skb->nf_queue_entry && net_ratelimit())
1176+ printk(KERN_WARNING
1177+ "IMQ: kfree_skb: skb->nf_queue_entry != NULL");
1178+#endif
1179 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1180 nf_conntrack_put(skb->nfct);
1181 nf_conntrack_put_reasm(skb->nfct_reasm);
1182 #endif
1183 #ifdef CONFIG_BRIDGE_NETFILTER
1184@@ -533,10 +633,13 @@
1185 skb_dst_set(new, dst_clone(skb_dst(old)));
1186 #ifdef CONFIG_XFRM
1187 new->sp = secpath_get(old->sp);
1188 #endif
1189 memcpy(new->cb, old->cb, sizeof(old->cb));
1190+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1191+ skb_copy_stored_cb(new, old);
1192+#endif
1193 new->csum = old->csum;
1194 new->local_df = old->local_df;
1195 new->pkt_type = old->pkt_type;
1196 new->ip_summed = old->ip_summed;
1197 skb_copy_queue_mapping(new, old);
1198@@ -2774,10 +2877,17 @@
1199 (2*sizeof(struct sk_buff)) +
1200 sizeof(atomic_t),
1201 0,
1202 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1203 NULL);
1204+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1205+ skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
1206+ sizeof(struct skb_cb_table),
1207+ 0,
1208+ SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1209+ NULL);
1210+#endif
1211 }
1212
1213 /**
1214 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
1215 * @skb: Socket buffer containing the buffers to be mapped
1216diff -U 5 -Nr linux-2.6.32/net/netfilter/Kconfig linux-2.6.32-imq/net/netfilter/Kconfig
1217--- linux-2.6.32/net/netfilter/Kconfig 2009-12-03 05:51:21.000000000 +0200
1218+++ linux-2.6.32-imq/net/netfilter/Kconfig 2009-12-11 14:16:42.681731014 +0200
1219@@ -394,10 +394,22 @@
1220 echo netfilter-ssh > /sys/class/leds/<ledname>/trigger
1221
1222 For more information on the LEDs available on your system, see
1223 Documentation/leds-class.txt
1224
1225+config NETFILTER_XT_TARGET_IMQ
1226+ tristate '"IMQ" target support'
1227+ depends on NETFILTER_XTABLES
1228+ depends on IP_NF_MANGLE || IP6_NF_MANGLE
1229+ select IMQ
1230+ default m if NETFILTER_ADVANCED=n
1231+ help
1232+ This option adds a `IMQ' target which is used to specify if and
1233+ to which imq device packets should get enqueued/dequeued.
1234+
1235+ To compile it as a module, choose M here. If unsure, say N.
1236+
1237 config NETFILTER_XT_TARGET_MARK
1238 tristate '"MARK" target support'
1239 default m if NETFILTER_ADVANCED=n
1240 help
1241 This option adds a `MARK' target, which allows you to create rules
1242diff -U 5 -Nr linux-2.6.32/net/netfilter/Makefile linux-2.6.32-imq/net/netfilter/Makefile
1243--- linux-2.6.32/net/netfilter/Makefile 2009-12-03 05:51:21.000000000 +0200
1244+++ linux-2.6.32-imq/net/netfilter/Makefile 2009-12-11 14:16:42.681731014 +0200
1245@@ -44,10 +44,11 @@
1246 obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
1247 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
1248 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
1249 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
1250 obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
1251+obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
1252 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
1253 obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
1254 obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
1255 obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
1256 obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
1257diff -U 5 -Nr linux-2.6.32/net/netfilter/nf_queue.c linux-2.6.32-imq/net/netfilter/nf_queue.c
1258--- linux-2.6.32/net/netfilter/nf_queue.c 2009-12-03 05:51:21.000000000 +0200
1259+++ linux-2.6.32-imq/net/netfilter/nf_queue.c 2009-12-11 14:16:42.681731014 +0200
1260@@ -18,10 +18,30 @@
1261 */
1262 static const struct nf_queue_handler *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
1263
1264 static DEFINE_MUTEX(queue_handler_mutex);
1265
1266+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1267+static const struct nf_queue_handler *queue_imq_handler;
1268+
1269+void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
1270+{
1271+ mutex_lock(&queue_handler_mutex);
1272+ rcu_assign_pointer(queue_imq_handler, qh);
1273+ mutex_unlock(&queue_handler_mutex);
1274+}
1275+EXPORT_SYMBOL(nf_register_queue_imq_handler);
1276+
1277+void nf_unregister_queue_imq_handler(void)
1278+{
1279+ mutex_lock(&queue_handler_mutex);
1280+ rcu_assign_pointer(queue_imq_handler, NULL);
1281+ mutex_unlock(&queue_handler_mutex);
1282+}
1283+EXPORT_SYMBOL(nf_unregister_queue_imq_handler);
1284+#endif
1285+
1286 /* return EBUSY when somebody else is registered, return EEXIST if the
1287 * same handler is registered, return 0 in case of success. */
1288 int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
1289 {
1290 int ret;
1291@@ -78,11 +98,11 @@
1292
1293 synchronize_rcu();
1294 }
1295 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
1296
1297-static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
1298+void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
1299 {
1300 /* Release those devices we held, or Alexey will kill me. */
1301 if (entry->indev)
1302 dev_put(entry->indev);
1303 if (entry->outdev)
1304@@ -98,10 +118,11 @@
1305 }
1306 #endif
1307 /* Drop reference to owner of hook which queued us. */
1308 module_put(entry->elem->owner);
1309 }
1310+EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
1311
1312 /*
1313 * Any packet that leaves via this function must come back
1314 * through nf_reinject().
1315 */
1316@@ -119,16 +140,30 @@
1317 struct net_device *physindev;
1318 struct net_device *physoutdev;
1319 #endif
1320 const struct nf_afinfo *afinfo;
1321 const struct nf_queue_handler *qh;
1322+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1323+ const struct nf_queue_handler *qih = NULL;
1324+#endif
1325
1326 /* QUEUE == DROP if noone is waiting, to be safe. */
1327 rcu_read_lock();
1328
1329 qh = rcu_dereference(queue_handler[pf]);
1330+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1331+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1332+ if (pf == PF_INET || pf == PF_INET6)
1333+#else
1334+ if (pf == PF_INET)
1335+#endif
1336+ qih = rcu_dereference(queue_imq_handler);
1337+
1338+ if (!qh && !qih)
1339+#else /* !IMQ */
1340 if (!qh)
1341+#endif
1342 goto err_unlock;
1343
1344 afinfo = nf_get_afinfo(pf);
1345 if (!afinfo)
1346 goto err_unlock;
1347@@ -143,10 +178,14 @@
1348 .pf = pf,
1349 .hook = hook,
1350 .indev = indev,
1351 .outdev = outdev,
1352 .okfn = okfn,
1353+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1354+ .next_outfn = qh ? qh->outfn : NULL,
1355+ .next_queuenum = queuenum,
1356+#endif
1357 };
1358
1359 /* If it's going away, ignore hook. */
1360 if (!try_module_get(entry->elem->owner)) {
1361 rcu_read_unlock();
1362@@ -168,12 +207,23 @@
1363 if (physoutdev)
1364 dev_hold(physoutdev);
1365 }
1366 #endif
1367 afinfo->saveroute(skb, entry);
1368+
1369+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1370+ if (qih) {
1371+ status = qih->outfn(entry, queuenum);
1372+ goto imq_skip_queue;
1373+ }
1374+#endif
1375+
1376 status = qh->outfn(entry, queuenum);
1377
1378+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1379+imq_skip_queue:
1380+#endif
1381 rcu_read_unlock();
1382
1383 if (status < 0) {
1384 nf_queue_entry_release_refs(entry);
1385 goto err;
1386diff -U 5 -Nr linux-2.6.32/net/netfilter/xt_IMQ.c linux-2.6.32-imq/net/netfilter/xt_IMQ.c
1387--- linux-2.6.32/net/netfilter/xt_IMQ.c 1970-01-01 02:00:00.000000000 +0200
1388+++ linux-2.6.32-imq/net/netfilter/xt_IMQ.c 2009-12-11 14:16:42.681731014 +0200
1389@@ -0,0 +1,73 @@
1390+/*
1391+ * This target marks packets to be enqueued to an imq device
1392+ */
1393+#include <linux/module.h>
1394+#include <linux/skbuff.h>
1395+#include <linux/netfilter/x_tables.h>
1396+#include <linux/netfilter/xt_IMQ.h>
1397+#include <linux/imq.h>
1398+
1399+static unsigned int imq_target(struct sk_buff *pskb,
1400+ const struct xt_target_param *par)
1401+{
1402+ const struct xt_imq_info *mr = par->targinfo;
1403+
1404+ pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1405+
1406+ return XT_CONTINUE;
1407+}
1408+
1409+static bool imq_checkentry(const struct xt_tgchk_param *par)
1410+{
1411+ struct xt_imq_info *mr = par->targinfo;
1412+
1413+ if (mr->todev > IMQ_MAX_DEVS - 1) {
1414+ printk(KERN_WARNING
1415+ "IMQ: invalid device specified, highest is %u\n",
1416+ IMQ_MAX_DEVS - 1);
1417+ return 0;
1418+ }
1419+
1420+ return 1;
1421+}
1422+
1423+static struct xt_target xt_imq_reg[] __read_mostly = {
1424+ {
1425+ .name = "IMQ",
1426+ .family = AF_INET,
1427+ .checkentry = imq_checkentry,
1428+ .target = imq_target,
1429+ .targetsize = sizeof(struct xt_imq_info),
1430+ .table = "mangle",
1431+ .me = THIS_MODULE
1432+ },
1433+ {
1434+ .name = "IMQ",
1435+ .family = AF_INET6,
1436+ .checkentry = imq_checkentry,
1437+ .target = imq_target,
1438+ .targetsize = sizeof(struct xt_imq_info),
1439+ .table = "mangle",
1440+ .me = THIS_MODULE
1441+ },
1442+};
1443+
1444+static int __init imq_init(void)
1445+{
1446+ return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1447+}
1448+
1449+static void __exit imq_fini(void)
1450+{
1451+ xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1452+}
1453+
1454+module_init(imq_init);
1455+module_exit(imq_fini);
1456+
1457+MODULE_AUTHOR("http://www.linuximq.net");
1458+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
1459+MODULE_LICENSE("GPL");
1460+MODULE_ALIAS("ipt_IMQ");
1461+MODULE_ALIAS("ip6t_IMQ");
1462+