]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.fixes/pkt_sched_multiq_support
Updated xen patches taken from suse.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.fixes / pkt_sched_multiq_support
1 From: Hannes Reinecke <hare@suse.de>
2 Date: Wed, 17 Sep 2008 16:45:55 +0200
3 Subject: pkt_sched: Add multiqueue scheduler support
4 References: FATE#303913
5
6 This patch is intended to add a qdisc to support the new tx multiqueue
7 architecture by providing a band for each hardware queue. By doing
8 this it is possible to support a different qdisc per physical hardware
9 queue.
10
11 This qdisc uses the skb->queue_mapping to select which band to place
12 the traffic onto. It then uses a round robin w/ a check to see if the
13 subqueue is stopped to determine which band to dequeue the packet from.
14
15 Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
16 Signed-off-by: Hannes Reinecke <hare@suse.de>
17 ---
18 Documentation/networking/multiqueue.txt | 47 +++-
19 include/linux/pkt_sched.h | 7 +
20 net/sched/Kconfig | 9 +
21 net/sched/Makefile | 1 +
22 net/sched/sch_multiq.c | 469 +++++++++++++++++++++++++++++++
23 5 files changed, 532 insertions(+), 1 deletions(-)
24 create mode 100644 net/sched/sch_multiq.c
25
26 diff --git a/Documentation/networking/multiqueue.txt b/Documentation/networking/multiqueue.txt
27 index d391ea6..5787ee6 100644
28 --- a/Documentation/networking/multiqueue.txt
29 +++ b/Documentation/networking/multiqueue.txt
30 @@ -24,4 +24,49 @@ netif_{start|stop|wake}_subqueue() functions to manage each queue while the
31 device is still operational. netdev->queue_lock is still used when the device
32 comes online or when it's completely shut down (unregister_netdev(), etc.).
33
34 -Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com>
35 +
36 +Section 2: Qdisc support for multiqueue devices
37 +
38 +-----------------------------------------------
39 +
40 +Currently two qdiscs support multiqueue devices. The first is the default
41 +pfifo_fast qdisc. This qdisc supports one qdisc per hardware queue. A new
42 +round-robin qdisc, sch_multiq also supports multiple hardware queues. The
43 +qdisc is responsible for classifying the skb's and then directing the skb's to
44 +bands and queues based on the value in skb->queue_mapping. Use this field in
45 +the base driver to determine which queue to send the skb to.
46 +
47 +sch_multiq has been added for hardware that wishes to avoid unnecessary
48 +requeuing. It will cycle though the bands and verify that the hardware queue
49 +associated with the band is not stopped prior to dequeuing a packet.
50 +
51 +On qdisc load, the number of bands is based on the number of queues on the
52 +hardware. Once the association is made, any skb with skb->queue_mapping set,
53 +will be queued to the band associated with the hardware queue.
54 +
55 +
56 +Section 3: Brief howto using MULTIQ for multiqueue devices
57 +---------------------------------------------------------------
58 +
59 +The userspace command 'tc,' part of the iproute2 package, is used to configure
60 +qdiscs. To add the MULTIQ qdisc to your network device, assuming the device
61 +is called eth0, run the following command:
62 +
63 +# tc qdisc add dev eth0 root handle 1: multiq
64 +
65 +The qdisc will allocate the number of bands to equal the number of queues that
66 +the device reports, and bring the qdisc online. Assuming eth0 has 4 Tx
67 +queues, the band mapping would look like:
68 +
69 +band 0 => queue 0
70 +band 1 => queue 1
71 +band 2 => queue 2
72 +band 3 => queue 3
73 +
74 +Traffic will begin flowing through each queue if your base device has either
75 +the default simple_tx_hash or a custom netdev->select_queue() defined.
76 +
77 +The behavior of tc filters remains the same.
78 +
79 +Author: Alexander Duyck <alexander.h.duyck@intel.com>
80 +Original Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com>
81 diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
82 index e5de421..5d921fa 100644
83 --- a/include/linux/pkt_sched.h
84 +++ b/include/linux/pkt_sched.h
85 @@ -123,6 +123,13 @@ struct tc_prio_qopt
86 __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
87 };
88
89 +/* MULTIQ section */
90 +
91 +struct tc_multiq_qopt {
92 + __u16 bands; /* Number of bands */
93 + __u16 max_bands; /* Maximum number of queues */
94 +};
95 +
96 /* TBF section */
97
98 struct tc_tbf_qopt
99 diff --git a/net/sched/Kconfig b/net/sched/Kconfig
100 index 9437b27..efaa7a7 100644
101 --- a/net/sched/Kconfig
102 +++ b/net/sched/Kconfig
103 @@ -106,6 +106,15 @@ config NET_SCH_PRIO
104 To compile this code as a module, choose M here: the
105 module will be called sch_prio.
106
107 +config NET_SCH_MULTIQ
108 + tristate "Hardware Multiqueue-aware Multi Band Queuing (MULTIQ)"
109 + ---help---
110 + Say Y here if you want to use an n-band queue packet scheduler
111 + to support devices that have multiple hardware transmit queues.
112 +
113 + To compile this code as a module, choose M here: the
114 + module will be called sch_multiq.
115 +
116 config NET_SCH_RED
117 tristate "Random Early Detection (RED)"
118 ---help---
119 diff --git a/net/sched/Makefile b/net/sched/Makefile
120 index 1d2b0f7..3d9b953 100644
121 --- a/net/sched/Makefile
122 +++ b/net/sched/Makefile
123 @@ -26,6 +26,7 @@ obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
124 obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
125 obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
126 obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
127 +obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
128 obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
129 obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
130 obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
131 diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
132 new file mode 100644
133 index 0000000..ce00df4
134 --- /dev/null
135 +++ b/net/sched/sch_multiq.c
136 @@ -0,0 +1,469 @@
137 +/*
138 + * Copyright (c) 2008, Intel Corporation.
139 + *
140 + * This program is free software; you can redistribute it and/or modify it
141 + * under the terms and conditions of the GNU General Public License,
142 + * version 2, as published by the Free Software Foundation.
143 + *
144 + * This program is distributed in the hope it will be useful, but WITHOUT
145 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
146 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
147 + * more details.
148 + *
149 + * You should have received a copy of the GNU General Public License along with
150 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
151 + * Place - Suite 330, Boston, MA 02111-1307 USA.
152 + *
153 + * Author: Alexander Duyck <alexander.h.duyck@intel.com>
154 + */
155 +
156 +#include <linux/module.h>
157 +#include <linux/types.h>
158 +#include <linux/kernel.h>
159 +#include <linux/string.h>
160 +#include <linux/errno.h>
161 +#include <linux/skbuff.h>
162 +#include <net/netlink.h>
163 +#include <net/pkt_sched.h>
164 +
165 +
166 +struct multiq_sched_data {
167 + u16 bands;
168 + u16 max_bands;
169 + u16 curband;
170 + struct tcf_proto *filter_list;
171 + struct Qdisc **queues;
172 +};
173 +
174 +
175 +static struct Qdisc *
176 +multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
177 +{
178 + struct multiq_sched_data *q = qdisc_priv(sch);
179 + u32 band;
180 + struct tcf_result res;
181 + int err;
182 +
183 + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
184 + err = tc_classify(skb, q->filter_list, &res);
185 +#ifdef CONFIG_NET_CLS_ACT
186 + switch (err) {
187 + case TC_ACT_STOLEN:
188 + case TC_ACT_QUEUED:
189 + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
190 + case TC_ACT_SHOT:
191 + return NULL;
192 + }
193 +#endif
194 + band = skb_get_queue_mapping(skb);
195 +
196 + if (band >= q->bands)
197 + return q->queues[0];
198 +
199 + return q->queues[band];
200 +}
201 +
202 +static int
203 +multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
204 +{
205 + struct Qdisc *qdisc;
206 + int ret;
207 +
208 + qdisc = multiq_classify(skb, sch, &ret);
209 +#ifdef CONFIG_NET_CLS_ACT
210 + if (qdisc == NULL) {
211 +
212 + if (ret & __NET_XMIT_BYPASS)
213 + sch->qstats.drops++;
214 + kfree_skb(skb);
215 + return ret;
216 + }
217 +#endif
218 +
219 + ret = qdisc_enqueue(skb, qdisc);
220 + if (ret == NET_XMIT_SUCCESS) {
221 + sch->bstats.bytes += qdisc_pkt_len(skb);
222 + sch->bstats.packets++;
223 + sch->q.qlen++;
224 + return NET_XMIT_SUCCESS;
225 + }
226 + if (net_xmit_drop_count(ret))
227 + sch->qstats.drops++;
228 + return ret;
229 +}
230 +
231 +
232 +static int
233 +multiq_requeue(struct sk_buff *skb, struct Qdisc *sch)
234 +{
235 + struct Qdisc *qdisc;
236 + int ret;
237 +
238 + qdisc = multiq_classify(skb, sch, &ret);
239 +#ifdef CONFIG_NET_CLS_ACT
240 + if (qdisc == NULL) {
241 + if (ret & __NET_XMIT_BYPASS)
242 + sch->qstats.drops++;
243 + kfree_skb(skb);
244 + return ret;
245 + }
246 +#endif
247 +
248 + ret = qdisc->ops->requeue(skb, qdisc);
249 + if (ret == NET_XMIT_SUCCESS) {
250 + sch->q.qlen++;
251 + sch->qstats.requeues++;
252 + return NET_XMIT_SUCCESS;
253 + }
254 + if (net_xmit_drop_count(ret))
255 + sch->qstats.drops++;
256 + return ret;
257 +}
258 +
259 +
260 +static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
261 +{
262 + struct multiq_sched_data *q = qdisc_priv(sch);
263 + struct Qdisc *qdisc;
264 + struct sk_buff *skb;
265 + int band;
266 +
267 + for (band = 0; band < q->bands; band++) {
268 + /* cycle through bands to ensure fairness */
269 + q->curband++;
270 + if (q->curband >= q->bands)
271 + q->curband = 0;
272 +
273 + /* Check that target subqueue is available before
274 + * pulling an skb to avoid excessive requeues
275 + */
276 + if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
277 + qdisc = q->queues[q->curband];
278 + skb = qdisc->dequeue(qdisc);
279 + if (skb) {
280 + sch->q.qlen--;
281 + return skb;
282 + }
283 + }
284 + }
285 + return NULL;
286 +
287 +}
288 +
289 +static unsigned int multiq_drop(struct Qdisc *sch)
290 +{
291 + struct multiq_sched_data *q = qdisc_priv(sch);
292 + int band;
293 + unsigned int len;
294 + struct Qdisc *qdisc;
295 +
296 + for (band = q->bands-1; band >= 0; band--) {
297 + qdisc = q->queues[band];
298 + if (qdisc->ops->drop) {
299 + len = qdisc->ops->drop(qdisc);
300 + if (len != 0) {
301 + sch->q.qlen--;
302 + return len;
303 + }
304 + }
305 + }
306 + return 0;
307 +}
308 +
309 +
310 +static void
311 +multiq_reset(struct Qdisc *sch)
312 +{
313 + u16 band;
314 + struct multiq_sched_data *q = qdisc_priv(sch);
315 +
316 + for (band = 0; band < q->bands; band++)
317 + qdisc_reset(q->queues[band]);
318 + sch->q.qlen = 0;
319 + q->curband = 0;
320 +}
321 +
322 +static void
323 +multiq_destroy(struct Qdisc *sch)
324 +{
325 + int band;
326 + struct multiq_sched_data *q = qdisc_priv(sch);
327 +
328 + tcf_destroy_chain(&q->filter_list);
329 + for (band = 0; band < q->bands; band++)
330 + qdisc_destroy(q->queues[band]);
331 +
332 + kfree(q->queues);
333 +}
334 +
335 +static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
336 +{
337 + struct multiq_sched_data *q = qdisc_priv(sch);
338 + struct tc_multiq_qopt *qopt;
339 + int i;
340 +
341 + if (sch->parent != TC_H_ROOT)
342 + return -EINVAL;
343 + if (!netif_is_multiqueue(qdisc_dev(sch)))
344 + return -EINVAL;
345 + if (nla_len(opt) < sizeof(*qopt))
346 + return -EINVAL;
347 +
348 + qopt = nla_data(opt);
349 +
350 + qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
351 +
352 + sch_tree_lock(sch);
353 + q->bands = qopt->bands;
354 + for (i = q->bands; i < q->max_bands; i++) {
355 + struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
356 + if (child != &noop_qdisc) {
357 + qdisc_tree_decrease_qlen(child, child->q.qlen);
358 + qdisc_destroy(child);
359 + }
360 + }
361 +
362 + sch_tree_unlock(sch);
363 +
364 + for (i = 0; i < q->bands; i++) {
365 + if (q->queues[i] == &noop_qdisc) {
366 + struct Qdisc *child;
367 + child = qdisc_create_dflt(qdisc_dev(sch),
368 + sch->dev_queue,
369 + &pfifo_qdisc_ops,
370 + TC_H_MAKE(sch->handle,
371 + i + 1));
372 + if (child) {
373 + sch_tree_lock(sch);
374 + child = xchg(&q->queues[i], child);
375 +
376 + if (child != &noop_qdisc) {
377 + qdisc_tree_decrease_qlen(child,
378 + child->q.qlen);
379 + qdisc_destroy(child);
380 + }
381 + sch_tree_unlock(sch);
382 + }
383 + }
384 + }
385 + return 0;
386 +}
387 +
388 +static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
389 +{
390 + struct multiq_sched_data *q = qdisc_priv(sch);
391 + int i;
392 +
393 + q->queues = NULL;
394 +
395 + if (opt == NULL)
396 + return -EINVAL;
397 +
398 + q->max_bands = qdisc_dev(sch)->num_tx_queues;
399 +
400 + q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
401 + if (!q->queues)
402 + return -ENOBUFS;
403 + for (i = 0; i < q->max_bands; i++)
404 + q->queues[i] = &noop_qdisc;
405 +
406 + return multiq_tune(sch, opt);
407 +}
408 +
409 +static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
410 +{
411 + struct multiq_sched_data *q = qdisc_priv(sch);
412 + unsigned char *b = skb_tail_pointer(skb);
413 + struct tc_multiq_qopt opt;
414 +
415 + opt.bands = q->bands;
416 + opt.max_bands = q->max_bands;
417 +
418 + NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
419 +
420 + return skb->len;
421 +
422 +nla_put_failure:
423 + nlmsg_trim(skb, b);
424 + return -1;
425 +}
426 +
427 +static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
428 + struct Qdisc **old)
429 +{
430 + struct multiq_sched_data *q = qdisc_priv(sch);
431 + unsigned long band = arg - 1;
432 +
433 + if (band >= q->bands)
434 + return -EINVAL;
435 +
436 + if (new == NULL)
437 + new = &noop_qdisc;
438 +
439 + sch_tree_lock(sch);
440 + *old = q->queues[band];
441 + q->queues[band] = new;
442 + qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
443 + qdisc_reset(*old);
444 + sch_tree_unlock(sch);
445 +
446 + return 0;
447 +}
448 +
449 +static struct Qdisc *
450 +multiq_leaf(struct Qdisc *sch, unsigned long arg)
451 +{
452 + struct multiq_sched_data *q = qdisc_priv(sch);
453 + unsigned long band = arg - 1;
454 +
455 + if (band >= q->bands)
456 + return NULL;
457 +
458 + return q->queues[band];
459 +}
460 +
461 +static unsigned long multiq_get(struct Qdisc *sch, u32 classid)
462 +{
463 + struct multiq_sched_data *q = qdisc_priv(sch);
464 + unsigned long band = TC_H_MIN(classid);
465 +
466 + if (band - 1 >= q->bands)
467 + return 0;
468 + return band;
469 +}
470 +
471 +static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
472 + u32 classid)
473 +{
474 + return multiq_get(sch, classid);
475 +}
476 +
477 +
478 +static void multiq_put(struct Qdisc *q, unsigned long cl)
479 +{
480 + return;
481 +}
482 +
483 +static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent,
484 + struct nlattr **tca, unsigned long *arg)
485 +{
486 + unsigned long cl = *arg;
487 + struct multiq_sched_data *q = qdisc_priv(sch);
488 +
489 + if (cl - 1 > q->bands)
490 + return -ENOENT;
491 + return 0;
492 +}
493 +
494 +static int multiq_delete(struct Qdisc *sch, unsigned long cl)
495 +{
496 + struct multiq_sched_data *q = qdisc_priv(sch);
497 + if (cl - 1 > q->bands)
498 + return -ENOENT;
499 + return 0;
500 +}
501 +
502 +
503 +static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
504 + struct sk_buff *skb, struct tcmsg *tcm)
505 +{
506 + struct multiq_sched_data *q = qdisc_priv(sch);
507 +
508 + if (cl - 1 > q->bands)
509 + return -ENOENT;
510 + tcm->tcm_handle |= TC_H_MIN(cl);
511 + if (q->queues[cl-1])
512 + tcm->tcm_info = q->queues[cl-1]->handle;
513 + return 0;
514 +}
515 +
516 +static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
517 + struct gnet_dump *d)
518 +{
519 + struct multiq_sched_data *q = qdisc_priv(sch);
520 + struct Qdisc *cl_q;
521 +
522 + cl_q = q->queues[cl - 1];
523 + if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
524 + gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
525 + return -1;
526 +
527 + return 0;
528 +}
529 +
530 +static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
531 +{
532 + struct multiq_sched_data *q = qdisc_priv(sch);
533 + int band;
534 +
535 + if (arg->stop)
536 + return;
537 +
538 + for (band = 0; band < q->bands; band++) {
539 + if (arg->count < arg->skip) {
540 + arg->count++;
541 + continue;
542 + }
543 + if (arg->fn(sch, band+1, arg) < 0) {
544 + arg->stop = 1;
545 + break;
546 + }
547 + arg->count++;
548 + }
549 +}
550 +
551 +static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl)
552 +{
553 + struct multiq_sched_data *q = qdisc_priv(sch);
554 +
555 + if (cl)
556 + return NULL;
557 + return &q->filter_list;
558 +}
559 +
560 +static const struct Qdisc_class_ops multiq_class_ops = {
561 + .graft = multiq_graft,
562 + .leaf = multiq_leaf,
563 + .get = multiq_get,
564 + .put = multiq_put,
565 + .change = multiq_change,
566 + .delete = multiq_delete,
567 + .walk = multiq_walk,
568 + .tcf_chain = multiq_find_tcf,
569 + .bind_tcf = multiq_bind,
570 + .unbind_tcf = multiq_put,
571 + .dump = multiq_dump_class,
572 + .dump_stats = multiq_dump_class_stats,
573 +};
574 +
575 +static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
576 + .next = NULL,
577 + .cl_ops = &multiq_class_ops,
578 + .id = "multiq",
579 + .priv_size = sizeof(struct multiq_sched_data),
580 + .enqueue = multiq_enqueue,
581 + .dequeue = multiq_dequeue,
582 + .requeue = multiq_requeue,
583 + .drop = multiq_drop,
584 + .init = multiq_init,
585 + .reset = multiq_reset,
586 + .destroy = multiq_destroy,
587 + .change = multiq_tune,
588 + .dump = multiq_dump,
589 + .owner = THIS_MODULE,
590 +};
591 +
592 +static int __init multiq_module_init(void)
593 +{
594 + return register_qdisc(&multiq_qdisc_ops);
595 +}
596 +
597 +static void __exit multiq_module_exit(void)
598 +{
599 + unregister_qdisc(&multiq_qdisc_ops);
600 +}
601 +
602 +module_init(multiq_module_init)
603 +module_exit(multiq_module_exit)
604 +
605 +MODULE_LICENSE("GPL");
606 --
607 1.5.2.4
608