1 From: Hannes Reinecke <hare@suse.de>
2 Date: Wed, 17 Sep 2008 16:45:55 +0200
3 Subject: pkt_sched: Add multiqueue scheduler support
4 References: FATE#303913
6 This patch is intended to add a qdisc to support the new tx multiqueue
7 architecture by providing a band for each hardware queue. By doing
8 this it is possible to support a different qdisc per physical hardware
11 This qdisc uses the skb->queue_mapping to select which band to place
12 the traffic onto. It then uses a round robin w/ a check to see if the
13 subqueue is stopped to determine which band to dequeue the packet from.
15 Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
16 Signed-off-by: Hannes Reinecke <hare@suse.de>
18 Documentation/networking/multiqueue.txt | 47 +++-
19 include/linux/pkt_sched.h | 7 +
20 net/sched/Kconfig | 9 +
21 net/sched/Makefile | 1 +
22 net/sched/sch_multiq.c | 469 +++++++++++++++++++++++++++++++
23 5 files changed, 532 insertions(+), 1 deletions(-)
24 create mode 100644 net/sched/sch_multiq.c
26 diff --git a/Documentation/networking/multiqueue.txt b/Documentation/networking/multiqueue.txt
27 index d391ea6..5787ee6 100644
28 --- a/Documentation/networking/multiqueue.txt
29 +++ b/Documentation/networking/multiqueue.txt
30 @@ -24,4 +24,49 @@ netif_{start|stop|wake}_subqueue() functions to manage each queue while the
31 device is still operational. netdev->queue_lock is still used when the device
32 comes online or when it's completely shut down (unregister_netdev(), etc.).
34 -Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com>
36 +Section 2: Qdisc support for multiqueue devices
38 +-----------------------------------------------
40 +Currently two qdiscs support multiqueue devices. The first is the default
41 +pfifo_fast qdisc. This qdisc supports one qdisc per hardware queue. A new
42 +round-robin qdisc, sch_multiq also supports multiple hardware queues. The
43 +qdisc is responsible for classifying the skb's and then directing the skb's to
44 +bands and queues based on the value in skb->queue_mapping. Use this field in
45 +the base driver to determine which queue to send the skb to.
47 +sch_multiq has been added for hardware that wishes to avoid unnecessary
48 +requeuing. It will cycle though the bands and verify that the hardware queue
49 +associated with the band is not stopped prior to dequeuing a packet.
51 +On qdisc load, the number of bands is based on the number of queues on the
52 +hardware. Once the association is made, any skb with skb->queue_mapping set,
53 +will be queued to the band associated with the hardware queue.
56 +Section 3: Brief howto using MULTIQ for multiqueue devices
57 +---------------------------------------------------------------
59 +The userspace command 'tc,' part of the iproute2 package, is used to configure
60 +qdiscs. To add the MULTIQ qdisc to your network device, assuming the device
61 +is called eth0, run the following command:
63 +# tc qdisc add dev eth0 root handle 1: multiq
65 +The qdisc will allocate the number of bands to equal the number of queues that
66 +the device reports, and bring the qdisc online. Assuming eth0 has 4 Tx
67 +queues, the band mapping would look like:
74 +Traffic will begin flowing through each queue if your base device has either
75 +the default simple_tx_hash or a custom netdev->select_queue() defined.
77 +The behavior of tc filters remains the same.
79 +Author: Alexander Duyck <alexander.h.duyck@intel.com>
80 +Original Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com>
81 diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
82 index e5de421..5d921fa 100644
83 --- a/include/linux/pkt_sched.h
84 +++ b/include/linux/pkt_sched.h
85 @@ -123,6 +123,13 @@ struct tc_prio_qopt
86 __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
91 +struct tc_multiq_qopt {
92 + __u16 bands; /* Number of bands */
93 + __u16 max_bands; /* Maximum number of queues */
99 diff --git a/net/sched/Kconfig b/net/sched/Kconfig
100 index 9437b27..efaa7a7 100644
101 --- a/net/sched/Kconfig
102 +++ b/net/sched/Kconfig
103 @@ -106,6 +106,15 @@ config NET_SCH_PRIO
104 To compile this code as a module, choose M here: the
105 module will be called sch_prio.
107 +config NET_SCH_MULTIQ
108 + tristate "Hardware Multiqueue-aware Multi Band Queuing (MULTIQ)"
110 + Say Y here if you want to use an n-band queue packet scheduler
111 + to support devices that have multiple hardware transmit queues.
113 + To compile this code as a module, choose M here: the
114 + module will be called sch_multiq.
117 tristate "Random Early Detection (RED)"
119 diff --git a/net/sched/Makefile b/net/sched/Makefile
120 index 1d2b0f7..3d9b953 100644
121 --- a/net/sched/Makefile
122 +++ b/net/sched/Makefile
123 @@ -26,6 +26,7 @@ obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
124 obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
125 obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
126 obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
127 +obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
128 obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
129 obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
130 obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
131 diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
133 index 0000000..ce00df4
135 +++ b/net/sched/sch_multiq.c
138 + * Copyright (c) 2008, Intel Corporation.
140 + * This program is free software; you can redistribute it and/or modify it
141 + * under the terms and conditions of the GNU General Public License,
142 + * version 2, as published by the Free Software Foundation.
144 + * This program is distributed in the hope it will be useful, but WITHOUT
145 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
146 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
149 + * You should have received a copy of the GNU General Public License along with
150 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
151 + * Place - Suite 330, Boston, MA 02111-1307 USA.
153 + * Author: Alexander Duyck <alexander.h.duyck@intel.com>
156 +#include <linux/module.h>
157 +#include <linux/types.h>
158 +#include <linux/kernel.h>
159 +#include <linux/string.h>
160 +#include <linux/errno.h>
161 +#include <linux/skbuff.h>
162 +#include <net/netlink.h>
163 +#include <net/pkt_sched.h>
166 +struct multiq_sched_data {
170 + struct tcf_proto *filter_list;
171 + struct Qdisc **queues;
175 +static struct Qdisc *
176 +multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
178 + struct multiq_sched_data *q = qdisc_priv(sch);
180 + struct tcf_result res;
183 + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
184 + err = tc_classify(skb, q->filter_list, &res);
185 +#ifdef CONFIG_NET_CLS_ACT
187 + case TC_ACT_STOLEN:
188 + case TC_ACT_QUEUED:
189 + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
194 + band = skb_get_queue_mapping(skb);
196 + if (band >= q->bands)
197 + return q->queues[0];
199 + return q->queues[band];
203 +multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
205 + struct Qdisc *qdisc;
208 + qdisc = multiq_classify(skb, sch, &ret);
209 +#ifdef CONFIG_NET_CLS_ACT
210 + if (qdisc == NULL) {
212 + if (ret & __NET_XMIT_BYPASS)
213 + sch->qstats.drops++;
219 + ret = qdisc_enqueue(skb, qdisc);
220 + if (ret == NET_XMIT_SUCCESS) {
221 + sch->bstats.bytes += qdisc_pkt_len(skb);
222 + sch->bstats.packets++;
224 + return NET_XMIT_SUCCESS;
226 + if (net_xmit_drop_count(ret))
227 + sch->qstats.drops++;
233 +multiq_requeue(struct sk_buff *skb, struct Qdisc *sch)
235 + struct Qdisc *qdisc;
238 + qdisc = multiq_classify(skb, sch, &ret);
239 +#ifdef CONFIG_NET_CLS_ACT
240 + if (qdisc == NULL) {
241 + if (ret & __NET_XMIT_BYPASS)
242 + sch->qstats.drops++;
248 + ret = qdisc->ops->requeue(skb, qdisc);
249 + if (ret == NET_XMIT_SUCCESS) {
251 + sch->qstats.requeues++;
252 + return NET_XMIT_SUCCESS;
254 + if (net_xmit_drop_count(ret))
255 + sch->qstats.drops++;
260 +static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
262 + struct multiq_sched_data *q = qdisc_priv(sch);
263 + struct Qdisc *qdisc;
264 + struct sk_buff *skb;
267 + for (band = 0; band < q->bands; band++) {
268 + /* cycle through bands to ensure fairness */
270 + if (q->curband >= q->bands)
273 + /* Check that target subqueue is available before
274 + * pulling an skb to avoid excessive requeues
276 + if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
277 + qdisc = q->queues[q->curband];
278 + skb = qdisc->dequeue(qdisc);
289 +static unsigned int multiq_drop(struct Qdisc *sch)
291 + struct multiq_sched_data *q = qdisc_priv(sch);
294 + struct Qdisc *qdisc;
296 + for (band = q->bands-1; band >= 0; band--) {
297 + qdisc = q->queues[band];
298 + if (qdisc->ops->drop) {
299 + len = qdisc->ops->drop(qdisc);
311 +multiq_reset(struct Qdisc *sch)
314 + struct multiq_sched_data *q = qdisc_priv(sch);
316 + for (band = 0; band < q->bands; band++)
317 + qdisc_reset(q->queues[band]);
323 +multiq_destroy(struct Qdisc *sch)
326 + struct multiq_sched_data *q = qdisc_priv(sch);
328 + tcf_destroy_chain(&q->filter_list);
329 + for (band = 0; band < q->bands; band++)
330 + qdisc_destroy(q->queues[band]);
335 +static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
337 + struct multiq_sched_data *q = qdisc_priv(sch);
338 + struct tc_multiq_qopt *qopt;
341 + if (sch->parent != TC_H_ROOT)
343 + if (!netif_is_multiqueue(qdisc_dev(sch)))
345 + if (nla_len(opt) < sizeof(*qopt))
348 + qopt = nla_data(opt);
350 + qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
352 + sch_tree_lock(sch);
353 + q->bands = qopt->bands;
354 + for (i = q->bands; i < q->max_bands; i++) {
355 + struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
356 + if (child != &noop_qdisc) {
357 + qdisc_tree_decrease_qlen(child, child->q.qlen);
358 + qdisc_destroy(child);
362 + sch_tree_unlock(sch);
364 + for (i = 0; i < q->bands; i++) {
365 + if (q->queues[i] == &noop_qdisc) {
366 + struct Qdisc *child;
367 + child = qdisc_create_dflt(qdisc_dev(sch),
370 + TC_H_MAKE(sch->handle,
373 + sch_tree_lock(sch);
374 + child = xchg(&q->queues[i], child);
376 + if (child != &noop_qdisc) {
377 + qdisc_tree_decrease_qlen(child,
379 + qdisc_destroy(child);
381 + sch_tree_unlock(sch);
388 +static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
390 + struct multiq_sched_data *q = qdisc_priv(sch);
398 + q->max_bands = qdisc_dev(sch)->num_tx_queues;
400 + q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
403 + for (i = 0; i < q->max_bands; i++)
404 + q->queues[i] = &noop_qdisc;
406 + return multiq_tune(sch, opt);
409 +static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
411 + struct multiq_sched_data *q = qdisc_priv(sch);
412 + unsigned char *b = skb_tail_pointer(skb);
413 + struct tc_multiq_qopt opt;
415 + opt.bands = q->bands;
416 + opt.max_bands = q->max_bands;
418 + NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
423 + nlmsg_trim(skb, b);
427 +static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
428 + struct Qdisc **old)
430 + struct multiq_sched_data *q = qdisc_priv(sch);
431 + unsigned long band = arg - 1;
433 + if (band >= q->bands)
439 + sch_tree_lock(sch);
440 + *old = q->queues[band];
441 + q->queues[band] = new;
442 + qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
444 + sch_tree_unlock(sch);
449 +static struct Qdisc *
450 +multiq_leaf(struct Qdisc *sch, unsigned long arg)
452 + struct multiq_sched_data *q = qdisc_priv(sch);
453 + unsigned long band = arg - 1;
455 + if (band >= q->bands)
458 + return q->queues[band];
461 +static unsigned long multiq_get(struct Qdisc *sch, u32 classid)
463 + struct multiq_sched_data *q = qdisc_priv(sch);
464 + unsigned long band = TC_H_MIN(classid);
466 + if (band - 1 >= q->bands)
471 +static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
474 + return multiq_get(sch, classid);
478 +static void multiq_put(struct Qdisc *q, unsigned long cl)
483 +static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent,
484 + struct nlattr **tca, unsigned long *arg)
486 + unsigned long cl = *arg;
487 + struct multiq_sched_data *q = qdisc_priv(sch);
489 + if (cl - 1 > q->bands)
494 +static int multiq_delete(struct Qdisc *sch, unsigned long cl)
496 + struct multiq_sched_data *q = qdisc_priv(sch);
497 + if (cl - 1 > q->bands)
503 +static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
504 + struct sk_buff *skb, struct tcmsg *tcm)
506 + struct multiq_sched_data *q = qdisc_priv(sch);
508 + if (cl - 1 > q->bands)
510 + tcm->tcm_handle |= TC_H_MIN(cl);
511 + if (q->queues[cl-1])
512 + tcm->tcm_info = q->queues[cl-1]->handle;
516 +static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
517 + struct gnet_dump *d)
519 + struct multiq_sched_data *q = qdisc_priv(sch);
520 + struct Qdisc *cl_q;
522 + cl_q = q->queues[cl - 1];
523 + if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
524 + gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
530 +static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
532 + struct multiq_sched_data *q = qdisc_priv(sch);
538 + for (band = 0; band < q->bands; band++) {
539 + if (arg->count < arg->skip) {
543 + if (arg->fn(sch, band+1, arg) < 0) {
551 +static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl)
553 + struct multiq_sched_data *q = qdisc_priv(sch);
557 + return &q->filter_list;
560 +static const struct Qdisc_class_ops multiq_class_ops = {
561 + .graft = multiq_graft,
562 + .leaf = multiq_leaf,
565 + .change = multiq_change,
566 + .delete = multiq_delete,
567 + .walk = multiq_walk,
568 + .tcf_chain = multiq_find_tcf,
569 + .bind_tcf = multiq_bind,
570 + .unbind_tcf = multiq_put,
571 + .dump = multiq_dump_class,
572 + .dump_stats = multiq_dump_class_stats,
575 +static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
577 + .cl_ops = &multiq_class_ops,
579 + .priv_size = sizeof(struct multiq_sched_data),
580 + .enqueue = multiq_enqueue,
581 + .dequeue = multiq_dequeue,
582 + .requeue = multiq_requeue,
583 + .drop = multiq_drop,
584 + .init = multiq_init,
585 + .reset = multiq_reset,
586 + .destroy = multiq_destroy,
587 + .change = multiq_tune,
588 + .dump = multiq_dump,
589 + .owner = THIS_MODULE,
592 +static int __init multiq_module_init(void)
594 + return register_qdisc(&multiq_qdisc_ops);
597 +static void __exit multiq_module_exit(void)
599 + unregister_qdisc(&multiq_qdisc_ops);
602 +module_init(multiq_module_init)
603 +module_exit(multiq_module_exit)
605 +MODULE_LICENSE("GPL");