1 --- a/drivers/net/Kconfig
2 +++ b/drivers/net/Kconfig
3 @@ -109,6 +109,129 @@ config EQUALIZER
4 To compile this driver as a module, choose M here: the module
5 will be called eql. If unsure, say N.
8 + tristate "IMQ (intermediate queueing device) support"
9 + depends on NETDEVICES && NETFILTER
11 + The IMQ device(s) is used as placeholder for QoS queueing
12 + disciplines. Every packet entering/leaving the IP stack can be
13 + directed through the IMQ device where it's enqueued/dequeued to the
14 + attached qdisc. This allows you to treat network devices as classes
15 + and distribute bandwidth among them. Iptables is used to specify
16 + through which IMQ device, if any, packets travel.
18 + More information at: http://www.linuximq.net/
20 + To compile this driver as a module, choose M here: the module
21 + will be called imq. If unsure, say N.
24 + prompt "IMQ behavior (PRE/POSTROUTING)"
26 + default IMQ_BEHAVIOR_AB
29 + This settings defines how IMQ behaves in respect to its
30 + hooking in PREROUTING and POSTROUTING.
32 + IMQ can work in any of the following ways:
34 + PREROUTING | POSTROUTING
35 + -----------------|-------------------
36 + #1 After NAT | After NAT
37 + #2 After NAT | Before NAT
38 + #3 Before NAT | After NAT
39 + #4 Before NAT | Before NAT
41 + The default behavior is to hook before NAT on PREROUTING
42 + and after NAT on POSTROUTING (#3).
44 + This settings are specially usefull when trying to use IMQ
45 + to shape NATed clients.
47 + More information can be found at: www.linuximq.net
49 + If not sure leave the default settings alone.
51 +config IMQ_BEHAVIOR_AA
54 + This settings defines how IMQ behaves in respect to its
55 + hooking in PREROUTING and POSTROUTING.
57 + Choosing this option will make IMQ hook like this:
59 + PREROUTING: After NAT
60 + POSTROUTING: After NAT
62 + More information can be found at: www.linuximq.net
64 + If not sure leave the default settings alone.
66 +config IMQ_BEHAVIOR_AB
69 + This settings defines how IMQ behaves in respect to its
70 + hooking in PREROUTING and POSTROUTING.
72 + Choosing this option will make IMQ hook like this:
74 + PREROUTING: After NAT
75 + POSTROUTING: Before NAT
77 + More information can be found at: www.linuximq.net
79 + If not sure leave the default settings alone.
81 +config IMQ_BEHAVIOR_BA
84 + This settings defines how IMQ behaves in respect to its
85 + hooking in PREROUTING and POSTROUTING.
87 + Choosing this option will make IMQ hook like this:
89 + PREROUTING: Before NAT
90 + POSTROUTING: After NAT
92 + More information can be found at: www.linuximq.net
94 + If not sure leave the default settings alone.
96 +config IMQ_BEHAVIOR_BB
99 + This settings defines how IMQ behaves in respect to its
100 + hooking in PREROUTING and POSTROUTING.
102 + Choosing this option will make IMQ hook like this:
104 + PREROUTING: Before NAT
105 + POSTROUTING: Before NAT
107 + More information can be found at: www.linuximq.net
109 + If not sure leave the default settings alone.
115 + int "Number of IMQ devices"
121 + This settings defines how many IMQ devices will be
124 + The default value is 16.
126 + More information can be found at: www.linuximq.net
128 + If not sure leave the default settings alone.
131 tristate "Universal TUN/TAP device driver support"
133 --- a/drivers/net/Makefile
134 +++ b/drivers/net/Makefile
135 @@ -144,6 +144,7 @@ obj-$(CONFIG_SLHC) += slhc.o
136 obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
138 obj-$(CONFIG_DUMMY) += dummy.o
139 +obj-$(CONFIG_IMQ) += imq.o
140 obj-$(CONFIG_IFB) += ifb.o
141 obj-$(CONFIG_MACVLAN) += macvlan.o
142 obj-$(CONFIG_DE600) += de600.o
144 +++ b/drivers/net/imq.c
147 + * Pseudo-driver for the intermediate queue device.
149 + * This program is free software; you can redistribute it and/or
150 + * modify it under the terms of the GNU General Public License
151 + * as published by the Free Software Foundation; either version
152 + * 2 of the License, or (at your option) any later version.
154 + * Authors: Patrick McHardy, <kaber@trash.net>
156 + * The first version was written by Martin Devera, <devik@cdi.cz>
158 + * Credits: Jan Rafaj <imq2t@cedric.vabo.cz>
159 + * - Update patch to 2.4.21
160 + * Sebastian Strollo <sstrollo@nortelnetworks.com>
161 + * - Fix "Dead-loop on netdevice imq"-issue
162 + * Marcel Sebek <sebek64@post.cz>
163 + * - Update to 2.6.2-rc1
165 + * After some time of inactivity there is a group taking care
166 + * of IMQ again: http://www.linuximq.net
169 + * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7
170 + * including the following changes:
172 + * - Correction of ipv6 support "+"s issue (Hasso Tepper)
173 + * - Correction of imq_init_devs() issue that resulted in
174 + * kernel OOPS unloading IMQ as module (Norbert Buchmuller)
175 + * - Addition of functionality to choose number of IMQ devices
176 + * during kernel config (Andre Correa)
177 + * - Addition of functionality to choose how IMQ hooks on
178 + * PRE and POSTROUTING (after or before NAT) (Andre Correa)
179 + * - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
182 + * 2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
183 + * released with almost no problems. 2.6.14-x was released
184 + * with some important changes: nfcache was removed; After
185 + * some weeks of trouble we figured out that some IMQ fields
186 + * in skb were missing in skbuff.c - skb_clone and copy_skb_header.
187 + * These functions are correctly patched by this new patch version.
189 + * Thanks for all who helped to figure out all the problems with
190 + * 2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
191 + * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
192 + * I didn't forget anybody). I apologize again for my lack of time.
195 + * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
196 + * of qdisc_restart() and moved qdisc_run() to tasklet to avoid
197 + * recursive locking. New initialization routines to fix 'rmmod' not
198 + * working anymore. Used code from ifb.c. (Jussi Kivilinna)
200 + * 2008/08/06 - 2.6.27 - (JK)
201 + * - Replaced tasklet with 'netif_schedule()'.
202 + * - Cleaned up and added comments for imq_nf_queue().
204 + * Also, many thanks to pablo Sebastian Greco for making the initial
205 + * patch and to those who helped the testing.
207 + * More info at: http://www.linuximq.net/ (Andre Correa)
210 +#include <linux/module.h>
211 +#include <linux/kernel.h>
212 +#include <linux/moduleparam.h>
213 +#include <linux/skbuff.h>
214 +#include <linux/netdevice.h>
215 +#include <linux/etherdevice.h>
216 +#include <linux/rtnetlink.h>
217 +#include <linux/if_arp.h>
218 +#include <linux/netfilter.h>
219 +#include <linux/netfilter_ipv4.h>
220 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
221 + #include <linux/netfilter_ipv6.h>
223 +#include <linux/imq.h>
224 +#include <net/pkt_sched.h>
225 +#include <net/netfilter/nf_queue.h>
227 +static nf_hookfn imq_nf_hook;
229 +static struct nf_hook_ops imq_ingress_ipv4 = {
230 + .hook = imq_nf_hook,
231 + .owner = THIS_MODULE,
233 + .hooknum = NF_INET_PRE_ROUTING,
234 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
235 + .priority = NF_IP_PRI_MANGLE + 1
237 + .priority = NF_IP_PRI_NAT_DST + 1
241 +static struct nf_hook_ops imq_egress_ipv4 = {
242 + .hook = imq_nf_hook,
243 + .owner = THIS_MODULE,
245 + .hooknum = NF_INET_POST_ROUTING,
246 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
247 + .priority = NF_IP_PRI_LAST
249 + .priority = NF_IP_PRI_NAT_SRC - 1
253 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
254 +static struct nf_hook_ops imq_ingress_ipv6 = {
255 + .hook = imq_nf_hook,
256 + .owner = THIS_MODULE,
258 + .hooknum = NF_INET_PRE_ROUTING,
259 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
260 + .priority = NF_IP6_PRI_MANGLE + 1
262 + .priority = NF_IP6_PRI_NAT_DST + 1
266 +static struct nf_hook_ops imq_egress_ipv6 = {
267 + .hook = imq_nf_hook,
268 + .owner = THIS_MODULE,
270 + .hooknum = NF_INET_POST_ROUTING,
271 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
272 + .priority = NF_IP6_PRI_LAST
274 + .priority = NF_IP6_PRI_NAT_SRC - 1
279 +#if defined(CONFIG_IMQ_NUM_DEVS)
280 +static unsigned int numdevs = CONFIG_IMQ_NUM_DEVS;
282 +static unsigned int numdevs = IMQ_MAX_DEVS;
285 +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
287 +static struct net_device_stats *imq_get_stats(struct net_device *dev)
289 + return &dev->stats;
292 +/* called for packets kfree'd in qdiscs at places other than enqueue */
293 +static void imq_skb_destructor(struct sk_buff *skb)
295 + struct nf_queue_entry *entry = skb->nf_queue_entry;
297 + if (likely(entry)) {
298 + nf_queue_entry_release_refs(entry);
303 +static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
307 + if (!entry->next_outfn) {
308 + nf_reinject(entry, verdict);
312 + status = entry->next_outfn(entry, entry->next_queuenum);
314 + nf_queue_entry_release_refs(entry);
315 + kfree_skb(entry->skb);
320 +static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
322 + struct nf_queue_entry *entry = skb->nf_queue_entry;
324 + BUG_ON(entry == NULL);
326 + dev->stats.tx_bytes += skb->len;
327 + dev->stats.tx_packets++;
329 + skb->imq_flags = 0;
330 + skb->destructor = NULL;
332 + dev->trans_start = jiffies;
333 + imq_nf_reinject(entry, NF_ACCEPT);
337 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
339 + struct net_device *dev;
340 + struct sk_buff *skb_orig, *skb, *skb_shared;
342 + spinlock_t *root_lock;
343 + struct netdev_queue *txq;
346 + index = entry->skb->imq_flags & IMQ_F_IFMASK;
347 + if (unlikely(index > numdevs - 1)) {
348 + if (net_ratelimit())
349 + printk(KERN_WARNING
350 + "IMQ: invalid device specified, highest is %u\n",
355 + /* check for imq device by index from cache */
356 + dev = imq_devs_cache[index];
357 + if (unlikely(!dev)) {
360 + /* get device by name and cache result */
361 + snprintf(buf, sizeof(buf), "imq%d", index);
362 + dev = dev_get_by_name(&init_net, buf);
369 + imq_devs_cache[index] = dev;
372 + if (unlikely(!(dev->flags & IFF_UP))) {
373 + entry->skb->imq_flags = 0;
374 + imq_nf_reinject(entry, NF_ACCEPT);
377 + dev->last_rx = jiffies;
382 + /* skb has owner? => make clone */
383 + if (unlikely(skb->destructor)) {
385 + skb = skb_clone(skb, GFP_ATOMIC);
391 + skb->nf_queue_entry = entry;
393 + dev->stats.rx_bytes += skb->len;
394 + dev->stats.rx_packets++;
396 + txq = dev_pick_tx(dev, skb);
399 + if (unlikely(!q->enqueue))
400 + goto packet_not_eaten_by_imq_dev;
402 + if (unlikely(!rtnl_trylock()))
403 + goto packet_not_eaten_by_imq_dev;
405 + root_lock = qdisc_root_lock(q);
406 + spin_lock(root_lock);
408 + users = atomic_read(&skb->users);
410 + skb_shared = skb_get(skb); /* increase reference count by one */
411 + qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
413 + if (likely(atomic_read(&skb_shared->users) == users + 1)) {
414 + kfree_skb(skb_shared); /* decrease reference count by one */
416 + /* imq_skb_destructor frees the skb and entry */
417 + skb->destructor = &imq_skb_destructor;
421 + kfree_skb(skb_orig); /* free original */
423 + /* schedule qdisc dequeue */
424 + netif_tx_schedule_all(dev);
426 + spin_unlock(root_lock);
430 + /* qdisc dropped packet and decreased skb reference count of
431 + * skb, so we don't really want to and try refree as that would
432 + * actually destroy the skb. */
433 + spin_unlock(root_lock);
435 + goto packet_not_eaten_by_imq_dev;
438 +packet_not_eaten_by_imq_dev:
439 + /* cloned? restore original */
442 + entry->skb = skb_orig;
447 +static struct nf_queue_handler nfqh = {
449 + .outfn = imq_nf_queue,
452 +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
453 + const struct net_device *indev,
454 + const struct net_device *outdev,
455 + int (*okfn)(struct sk_buff *))
457 + if (pskb->imq_flags & IMQ_F_ENQUEUE)
463 +static int imq_close(struct net_device *dev)
465 + netif_stop_queue(dev);
469 +static int imq_open(struct net_device *dev)
471 + netif_start_queue(dev);
475 +static void imq_setup(struct net_device *dev)
477 + dev->hard_start_xmit = imq_dev_xmit;
478 + dev->open = imq_open;
479 + dev->get_stats = imq_get_stats;
480 + dev->stop = imq_close;
481 + dev->type = ARPHRD_VOID;
483 + dev->tx_queue_len = 10000;
484 + dev->flags = IFF_NOARP;
487 +static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
491 + if (tb[IFLA_ADDRESS]) {
492 + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
496 + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
497 + ret = -EADDRNOTAVAIL;
503 + printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret);
507 +static struct rtnl_link_ops imq_link_ops __read_mostly = {
510 + .setup = imq_setup,
511 + .validate = imq_validate,
514 +static int __init imq_init_hooks(void)
518 + nf_register_queue_imq_handler(&nfqh);
520 + err = nf_register_hook(&imq_ingress_ipv4);
524 + err = nf_register_hook(&imq_egress_ipv4);
528 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
529 + err = nf_register_hook(&imq_ingress_ipv6);
533 + err = nf_register_hook(&imq_egress_ipv6);
540 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
542 + nf_unregister_hook(&imq_ingress_ipv6);
544 + nf_unregister_hook(&imq_egress_ipv4);
547 + nf_unregister_hook(&imq_ingress_ipv4);
549 + nf_unregister_queue_imq_handler();
553 +static int __init imq_init_one(int index)
555 + struct net_device *dev;
558 + dev = alloc_netdev(0, "imq%d", imq_setup);
562 + ret = dev_alloc_name(dev, dev->name);
566 + dev->rtnl_link_ops = &imq_link_ops;
567 + ret = register_netdevice(dev);
577 +static int __init imq_init_devs(void)
581 + if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
582 + printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
588 + err = __rtnl_link_register(&imq_link_ops);
590 + for (i = 0; i < numdevs && !err; i++)
591 + err = imq_init_one(i);
594 + __rtnl_link_unregister(&imq_link_ops);
595 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
602 +static int __init imq_init_module(void)
606 +#if defined(CONFIG_IMQ_NUM_DEVS)
607 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
608 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
609 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
612 + err = imq_init_devs();
614 + printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
618 + err = imq_init_hooks();
620 + printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
621 + rtnl_link_unregister(&imq_link_ops);
622 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
626 + printk(KERN_INFO "IMQ driver loaded successfully.\n");
628 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
629 + printk(KERN_INFO "\tHooking IMQ before NAT on PREROUTING.\n");
631 + printk(KERN_INFO "\tHooking IMQ after NAT on PREROUTING.\n");
633 +#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
634 + printk(KERN_INFO "\tHooking IMQ before NAT on POSTROUTING.\n");
636 + printk(KERN_INFO "\tHooking IMQ after NAT on POSTROUTING.\n");
642 +static void __exit imq_unhook(void)
644 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
645 + nf_unregister_hook(&imq_ingress_ipv6);
646 + nf_unregister_hook(&imq_egress_ipv6);
648 + nf_unregister_hook(&imq_ingress_ipv4);
649 + nf_unregister_hook(&imq_egress_ipv4);
651 + nf_unregister_queue_imq_handler();
654 +static void __exit imq_cleanup_devs(void)
656 + rtnl_link_unregister(&imq_link_ops);
657 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
660 +static void __exit imq_exit_module(void)
663 + imq_cleanup_devs();
664 + printk(KERN_INFO "IMQ driver unloaded successfully.\n");
667 +module_init(imq_init_module);
668 +module_exit(imq_exit_module);
670 +module_param(numdevs, int, 0);
671 +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will "
673 +MODULE_AUTHOR("http://www.linuximq.net");
674 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See "
675 + "http://www.linuximq.net/ for more information.");
676 +MODULE_LICENSE("GPL");
677 +MODULE_ALIAS_RTNL_LINK("imq");
680 +++ b/include/linux/imq.h
685 +/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
686 +#define IMQ_F_BITS 5
688 +#define IMQ_F_IFMASK 0x0f
689 +#define IMQ_F_ENQUEUE 0x10
691 +#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1)
696 +++ b/include/linux/netfilter/xt_IMQ.h
701 +struct xt_imq_info {
702 + unsigned int todev; /* target imq device */
705 +#endif /* _XT_IMQ_H */
708 +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
713 +/* Backwards compatibility for old userspace */
714 +#include <linux/netfilter/xt_IMQ.h>
716 +#define ip6t_imq_info xt_imq_info
718 +#endif /* _IP6T_IMQ_H */
720 --- a/include/linux/skbuff.h
721 +++ b/include/linux/skbuff.h
723 #include <linux/rcupdate.h>
724 #include <linux/dmaengine.h>
725 #include <linux/hrtimer.h>
726 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
727 +#include <linux/imq.h>
730 #define HAVE_ALLOC_SKB /* For the drivers to know */
731 #define HAVE_ALIGNABLE_SKB /* Ditto 8) */
732 @@ -302,6 +305,13 @@ struct sk_buff {
733 struct nf_conntrack *nfct;
734 struct sk_buff *nfct_reasm;
736 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
737 + struct nf_queue_entry *nf_queue_entry;
738 +/* 2.6.27 started using skb->cb for qdiscs. Problem here is that IMQ run qdisc
739 + * on wrong layer and skb->cb is already used by TCP. So we need to make qdisc
740 + * not use skb, this adds 4 extra bytes to skbuff. */
741 + unsigned int qdisc_skb_cb_pkt_len;
743 #ifdef CONFIG_BRIDGE_NETFILTER
744 struct nf_bridge_info *nf_bridge;
746 @@ -321,6 +331,9 @@ struct sk_buff {
747 __u8 do_not_encrypt:1;
749 /* 0/13/14 bit hole */
750 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
751 + __u8 imq_flags:IMQ_F_BITS;
754 #ifdef CONFIG_NET_DMA
755 dma_cookie_t dma_cookie;
756 @@ -1638,6 +1651,10 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
757 dst->nfct_reasm = src->nfct_reasm;
758 nf_conntrack_get_reasm(src->nfct_reasm);
760 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
761 + dst->imq_flags = src->imq_flags;
762 + dst->nf_queue_entry = src->nf_queue_entry;
764 #ifdef CONFIG_BRIDGE_NETFILTER
765 dst->nf_bridge = src->nf_bridge;
766 nf_bridge_get(src->nf_bridge);
767 --- a/include/net/netfilter/nf_queue.h
768 +++ b/include/net/netfilter/nf_queue.h
769 @@ -13,6 +13,13 @@ struct nf_queue_entry {
770 struct net_device *indev;
771 struct net_device *outdev;
772 int (*okfn)(struct sk_buff *);
774 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
775 + /* following allow IMQ work when other nf_queue handlers are set up */
776 + int (*next_outfn)(struct nf_queue_entry *entry,
777 + unsigned int queuenum);
778 + unsigned int next_queuenum;
782 #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry))
783 @@ -30,5 +37,11 @@ extern int nf_unregister_queue_handler(int pf,
784 const struct nf_queue_handler *qh);
785 extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh);
786 extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
787 +extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
789 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
790 +extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
791 +extern void nf_unregister_queue_imq_handler(void);
794 #endif /* _NF_QUEUE_H */
795 --- a/include/net/sch_generic.h
796 +++ b/include/net/sch_generic.h
797 @@ -174,7 +174,11 @@ struct tcf_proto
800 struct qdisc_skb_cb {
801 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
802 + unsigned int __imq_do_not_use;
804 unsigned int pkt_len;
809 @@ -275,6 +279,8 @@ extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_clas
810 extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
811 extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
813 +extern struct netdev_queue *dev_pick_tx(struct net_device *dev,
814 + struct sk_buff *skb);
815 extern void dev_init_scheduler(struct net_device *dev);
816 extern void dev_shutdown(struct net_device *dev);
817 extern void dev_activate(struct net_device *dev);
818 @@ -340,7 +346,11 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev)
820 static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
822 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
823 + return skb->qdisc_skb_cb_pkt_len; /* see skbuff.h why */
825 return qdisc_skb_cb(skb)->pkt_len;
829 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
830 @@ -366,7 +376,11 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
832 static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
834 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
835 + skb->qdisc_skb_cb_pkt_len = skb->len; /* see skbuff.h why */
837 qdisc_skb_cb(skb)->pkt_len = skb->len;
839 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
845 #include <net/net_namespace.h>
846 #include <net/sock.h>
847 #include <linux/rtnetlink.h>
848 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
849 +#include <linux/imq.h>
851 #include <linux/proc_fs.h>
852 #include <linux/seq_file.h>
853 #include <linux/stat.h>
854 @@ -1619,7 +1622,11 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
855 struct netdev_queue *txq)
857 if (likely(!skb->next)) {
858 - if (!list_empty(&ptype_all))
859 + if (!list_empty(&ptype_all)
860 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
861 + && !(skb->imq_flags & IMQ_F_ENQUEUE)
864 dev_queue_xmit_nit(skb, dev);
866 if (netif_needs_gso(dev, skb)) {
867 @@ -1709,8 +1716,7 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
868 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
871 -static struct netdev_queue *dev_pick_tx(struct net_device *dev,
872 - struct sk_buff *skb)
873 +struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb)
877 @@ -1722,6 +1728,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
878 skb_set_queue_mapping(skb, queue_index);
879 return netdev_get_tx_queue(dev, queue_index);
881 +EXPORT_SYMBOL(dev_pick_tx);
884 * dev_queue_xmit - transmit a buffer
885 --- a/net/core/rtnetlink.c
886 +++ b/net/core/rtnetlink.c
887 @@ -1406,6 +1406,7 @@ EXPORT_SYMBOL(rtnetlink_put_metrics);
888 EXPORT_SYMBOL(rtnl_lock);
889 EXPORT_SYMBOL(rtnl_trylock);
890 EXPORT_SYMBOL(rtnl_unlock);
891 +EXPORT_SYMBOL(__rtnl_unlock);
892 EXPORT_SYMBOL(rtnl_is_locked);
893 EXPORT_SYMBOL(rtnl_unicast);
894 EXPORT_SYMBOL(rtnl_notify);
895 --- a/net/core/skbuff.c
896 +++ b/net/core/skbufc
897 @@ -436,6 +436,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
898 new->sp = secpath_get(old->sp);
900 memcpy(new->cb, old->cb, sizeof(old->cb));
901 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
902 + new->qdisc_skb_cb_pkt_len = old->qdisc_skb_cb_pkt_len;
904 new->csum_start = old->csum_start;
905 new->csum_offset = old->csum_offset;
906 new->local_df = old->local_df;
907 @@ -2258,7 +2258,9 @@
909 __copy_skb_header(nskb, skb);
910 nskb->mac_len = skb->mac_len;
912 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
913 + nskb->qdisc_skb_cb_pkt_len = skb->qdisc_skb_cb_pkt_len;
915 skb_reserve(nskb, headroom);
916 skb_reset_mac_header(nskb);
917 skb_set_network_header(nskb, skb->mac_len);
918 --- a/net/netfilter/Kconfig
919 +++ b/net/netfilter/Kconfig
920 @@ -342,6 +342,18 @@ config NETFILTER_XT_TARGET_DSCP
922 To compile it as a module, choose M here. If unsure, say N.
924 +config NETFILTER_XT_TARGET_IMQ
925 + tristate '"IMQ" target support'
926 + depends on NETFILTER_XTABLES
927 + depends on IP_NF_MANGLE || IP6_NF_MANGLE
929 + default m if NETFILTER_ADVANCED=n
931 + This option adds a `IMQ' target which is used to specify if and
932 + to which imq device packets should get enqueued/dequeued.
934 + To compile it as a module, choose M here. If unsure, say N.
936 config NETFILTER_XT_TARGET_MARK
937 tristate '"MARK" target support'
938 depends on NETFILTER_XTABLES
939 --- a/net/netfilter/Makefile
940 +++ b/net/netfilter/Makefile
941 @@ -42,6 +42,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
942 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
943 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
944 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
945 +obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
946 obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
947 obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
948 obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
949 --- a/net/netfilter/nf_queue.c
950 +++ b/net/netfilter/nf_queue.c
951 @@ -20,6 +20,26 @@ static const struct nf_queue_handler *queue_handler[NPROTO];
953 static DEFINE_MUTEX(queue_handler_mutex);
955 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
956 +static const struct nf_queue_handler *queue_imq_handler;
958 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
960 + mutex_lock(&queue_handler_mutex);
961 + rcu_assign_pointer(queue_imq_handler, qh);
962 + mutex_unlock(&queue_handler_mutex);
964 +EXPORT_SYMBOL(nf_register_queue_imq_handler);
966 +void nf_unregister_queue_imq_handler(void)
968 + mutex_lock(&queue_handler_mutex);
969 + rcu_assign_pointer(queue_imq_handler, NULL);
970 + mutex_unlock(&queue_handler_mutex);
972 +EXPORT_SYMBOL(nf_unregister_queue_imq_handler);
975 /* return EBUSY when somebody else is registered, return EEXIST if the
976 * same handler is registered, return 0 in case of success. */
977 int nf_register_queue_handler(int pf, const struct nf_queue_handler *qh)
978 @@ -80,7 +100,7 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
980 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
982 -static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
983 +void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
985 /* Release those devices we held, or Alexey will kill me. */
987 @@ -100,6 +120,7 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
988 /* Drop reference to owner of hook which queued us. */
989 module_put(entry->elem->owner);
991 +EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
994 * Any packet that leaves via this function must come back
995 @@ -121,12 +142,26 @@ static int __nf_queue(struct sk_buff *skb,
997 const struct nf_afinfo *afinfo;
998 const struct nf_queue_handler *qh;
999 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1000 + const struct nf_queue_handler *qih = NULL;
1003 /* QUEUE == DROP if noone is waiting, to be safe. */
1006 qh = rcu_dereference(queue_handler[pf]);
1007 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1008 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1009 + if (pf == PF_INET || pf == PF_INET6)
1011 + if (pf == PF_INET)
1013 + qih = rcu_dereference(queue_imq_handler);
1021 afinfo = nf_get_afinfo(pf);
1022 @@ -145,6 +180,10 @@ static int __nf_queue(struct sk_buff *skb,
1026 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1027 + .next_outfn = qh ? qh->outfn : NULL,
1028 + .next_queuenum = queuenum,
1032 /* If it's going away, ignore hook. */
1033 @@ -170,8 +209,19 @@ static int __nf_queue(struct sk_buff *skb,
1036 afinfo->saveroute(skb, entry);
1038 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1040 + status = qih->outfn(entry, queuenum);
1041 + goto imq_skip_queue;
1045 status = qh->outfn(entry, queuenum);
1047 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1054 +++ b/net/netfilter/xt_IMQ.c
1057 + * This target marks packets to be enqueued to an imq device
1059 +#include <linux/module.h>
1060 +#include <linux/skbuff.h>
1061 +#include <linux/netfilter/x_tables.h>
1062 +#include <linux/netfilter/xt_IMQ.h>
1063 +#include <linux/imq.h>
1065 +static unsigned int imq_target(struct sk_buff *pskb,
1066 + const struct net_device *in,
1067 + const struct net_device *out,
1068 + unsigned int hooknum,
1069 + const struct xt_target *target,
1070 + const void *targinfo)
1072 + const struct xt_imq_info *mr = targinfo;
1074 + pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1076 + return XT_CONTINUE;
1079 +static bool imq_checkentry(const char *tablename,
1080 + const void *entry,
1081 + const struct xt_target *target,
1083 + unsigned int hook_mask)
1085 + struct xt_imq_info *mr = targinfo;
1087 + if (mr->todev > IMQ_MAX_DEVS - 1) {
1088 + printk(KERN_WARNING
1089 + "IMQ: invalid device specified, highest is %u\n",
1090 + IMQ_MAX_DEVS - 1);
1097 +static struct xt_target xt_imq_reg[] __read_mostly = {
1100 + .family = AF_INET,
1101 + .target = imq_target,
1102 + .targetsize = sizeof(struct xt_imq_info),
1103 + .table = "mangle",
1104 + .checkentry = imq_checkentry,
1109 + .family = AF_INET6,
1110 + .target = imq_target,
1111 + .targetsize = sizeof(struct xt_imq_info),
1112 + .table = "mangle",
1113 + .checkentry = imq_checkentry,
1118 +static int __init imq_init(void)
1120 + return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1123 +static void __exit imq_fini(void)
1125 + xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1128 +module_init(imq_init);
1129 +module_exit(imq_fini);
1131 +MODULE_AUTHOR("http://www.linuximq.net");
1132 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
1133 +MODULE_LICENSE("GPL");
1134 +MODULE_ALIAS("ipt_IMQ");
1135 +MODULE_ALIAS("ip6t_IMQ");
1137 --- a/net/sched/sch_api.c
1138 +++ b/net/sched/sch_api.c
1139 @@ -405,7 +405,11 @@ void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
1141 if (unlikely(pkt_len < 1))
1143 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1144 + skb->qdisc_skb_cb_pkt_len = pkt_len; /* see skbuff.h why */
1146 qdisc_skb_cb(skb)->pkt_len = pkt_len;
1149 EXPORT_SYMBOL(qdisc_calculate_pkt_len);