1 From: Eric Dumazet <edumazet@google.com>
2 Date: Fri, 11 May 2012 09:30:50 +0000
3 Subject: [PATCH 3/7] fq_codel: Fair Queue Codel AQM
5 commit 4b549a2ef4bef9965d97cbd992ba67930cd3e0fe upstream.
7 Fair Queue Codel packet scheduler
11 - Packets are classified (internal classifier or external) on flows.
12 - This is a Stochastic model (as we use a hash, several flows might
13 be hashed on same slot)
14 - Each flow has a CoDel managed queue.
15 - Flows are linked onto two (Round Robin) lists,
16 so that new flows have priority on old ones.
18 - For a given flow, packets are not reordered (CoDel uses a FIFO)
20 - ECN capability is on by default.
21 - Very low memory footprint (64 bytes per flow)
23 tc qdisc ... fq_codel [ limit PACKETS ] [ flows number ]
24 [ target TIME ] [ interval TIME ] [ noecn ]
27 defaults : 1024 flows, 10240 packets limit, quantum : device MTU
28 target : 5ms (CoDel default)
29 interval : 100ms (CoDel default)
31 Impressive results on load :
33 class htb 1:1 root leaf 10: prio 0 quantum 1514 rate 200000Kbit ceil 200000Kbit burst 1475b/8 mpu 0b overhead 0b cburst 1475b/8 mpu 0b overhead 0b level 0
34 Sent 43304920109 bytes 33063109 pkt (dropped 0, overlimits 0 requeues 0)
35 rate 201691Kbit 28595pps backlog 0b 312p requeues 0
36 lended: 33063109 borrowed: 0 giants: 0
37 tokens: -912 ctokens: -912
39 class fq_codel 10:1735 parent 10:
40 (dropped 1292, overlimits 0 requeues 0)
41 backlog 15140b 10p requeues 0
42 deficit 1514 count 1 lastcount 1 ldelay 7.1ms
43 class fq_codel 10:4524 parent 10:
44 (dropped 1291, overlimits 0 requeues 0)
45 backlog 16654b 11p requeues 0
46 deficit 1514 count 1 lastcount 1 ldelay 7.1ms
47 class fq_codel 10:4e74 parent 10:
48 (dropped 1290, overlimits 0 requeues 0)
49 backlog 6056b 4p requeues 0
50 deficit 1514 count 1 lastcount 1 ldelay 6.4ms dropping drop_next 92.0ms
51 class fq_codel 10:628a parent 10:
52 (dropped 1289, overlimits 0 requeues 0)
53 backlog 7570b 5p requeues 0
54 deficit 1514 count 1 lastcount 1 ldelay 5.4ms dropping drop_next 90.9ms
55 class fq_codel 10:a4b3 parent 10:
56 (dropped 302, overlimits 0 requeues 0)
57 backlog 16654b 11p requeues 0
58 deficit 1514 count 1 lastcount 1 ldelay 7.1ms
59 class fq_codel 10:c3c2 parent 10:
60 (dropped 1284, overlimits 0 requeues 0)
61 backlog 13626b 9p requeues 0
62 deficit 1514 count 1 lastcount 1 ldelay 5.9ms
63 class fq_codel 10:d331 parent 10:
64 (dropped 299, overlimits 0 requeues 0)
65 backlog 15140b 10p requeues 0
66 deficit 1514 count 1 lastcount 1 ldelay 7.0ms
67 class fq_codel 10:d526 parent 10:
68 (dropped 12160, overlimits 0 requeues 0)
69 backlog 35870b 211p requeues 0
70 deficit 1508 count 12160 lastcount 1 ldelay 15.3ms dropping drop_next 247us
71 class fq_codel 10:e2c6 parent 10:
72 (dropped 1288, overlimits 0 requeues 0)
73 backlog 15140b 10p requeues 0
74 deficit 1514 count 1 lastcount 1 ldelay 7.1ms
75 class fq_codel 10:eab5 parent 10:
76 (dropped 1285, overlimits 0 requeues 0)
77 backlog 16654b 11p requeues 0
78 deficit 1514 count 1 lastcount 1 ldelay 5.9ms
79 class fq_codel 10:f220 parent 10:
80 (dropped 1289, overlimits 0 requeues 0)
81 backlog 15140b 10p requeues 0
82 deficit 1514 count 1 lastcount 1 ldelay 7.1ms
84 qdisc htb 1: root refcnt 6 r2q 10 default 1 direct_packets_stat 0 ver 3.17
85 Sent 43331086547 bytes 33092812 pkt (dropped 0, overlimits 66063544 requeues 71)
86 rate 201697Kbit 28602pps backlog 0b 260p requeues 71
87 qdisc fq_codel 10: parent 1:1 limit 10240p flows 65536 target 5.0ms interval 100.0ms ecn
88 Sent 43331086547 bytes 33092812 pkt (dropped 949359, overlimits 0 requeues 0)
89 rate 201697Kbit 28602pps backlog 189352b 260p requeues 0
90 maxpacket 1514 drop_overlimit 0 new_flow_count 5582 ecn_mark 125593
91 new_flows_len 0 old_flows_len 11
93 PING 172.30.42.18 (172.30.42.18) 56(84) bytes of data.
94 64 bytes from 172.30.42.18: icmp_req=1 ttl=64 time=0.227 ms
95 64 bytes from 172.30.42.18: icmp_req=2 ttl=64 time=0.165 ms
96 64 bytes from 172.30.42.18: icmp_req=3 ttl=64 time=0.166 ms
97 64 bytes from 172.30.42.18: icmp_req=4 ttl=64 time=0.151 ms
98 64 bytes from 172.30.42.18: icmp_req=5 ttl=64 time=0.164 ms
99 64 bytes from 172.30.42.18: icmp_req=6 ttl=64 time=0.172 ms
100 64 bytes from 172.30.42.18: icmp_req=7 ttl=64 time=0.175 ms
101 64 bytes from 172.30.42.18: icmp_req=8 ttl=64 time=0.183 ms
102 64 bytes from 172.30.42.18: icmp_req=9 ttl=64 time=0.158 ms
103 64 bytes from 172.30.42.18: icmp_req=10 ttl=64 time=0.200 ms
105 10 packets transmitted, 10 received, 0% packet loss, time 8999ms
106 rtt min/avg/max/mdev = 0.151/0.176/0.227/0.022 ms
108 Much better than SFQ because of priority given to new flows, and fast
109 path dirtying less cache lines.
111 Signed-off-by: Eric Dumazet <edumazet@google.com>
112 Signed-off-by: David S. Miller <davem@davemloft.net>
114 include/linux/pkt_sched.h | 54 ++++
115 net/sched/Kconfig | 11 +
116 net/sched/Makefile | 1 +
117 net/sched/sch_fq_codel.c | 624 +++++++++++++++++++++++++++++++++++++++++++++
118 4 files changed, 690 insertions(+)
119 create mode 100644 net/sched/sch_fq_codel.c
121 diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
122 index cde56c2..32aef0a 100644
123 --- a/include/linux/pkt_sched.h
124 +++ b/include/linux/pkt_sched.h
125 @@ -681,4 +681,58 @@ struct tc_codel_xstats {
126 __u32 dropping; /* are we in dropping state ? */
132 + TCA_FQ_CODEL_UNSPEC,
133 + TCA_FQ_CODEL_TARGET,
134 + TCA_FQ_CODEL_LIMIT,
135 + TCA_FQ_CODEL_INTERVAL,
137 + TCA_FQ_CODEL_FLOWS,
138 + TCA_FQ_CODEL_QUANTUM,
142 +#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1)
145 + TCA_FQ_CODEL_XSTATS_QDISC,
146 + TCA_FQ_CODEL_XSTATS_CLASS,
149 +struct tc_fq_codel_qd_stats {
150 + __u32 maxpacket; /* largest packet we've seen so far */
151 + __u32 drop_overlimit; /* number of time max qdisc
152 + * packet limit was hit
154 + __u32 ecn_mark; /* number of packets we ECN marked
155 + * instead of being dropped
157 + __u32 new_flow_count; /* number of time packets
158 + * created a 'new flow'
160 + __u32 new_flows_len; /* count of flows in new list */
161 + __u32 old_flows_len; /* count of flows in old list */
164 +struct tc_fq_codel_cl_stats {
166 + __u32 ldelay; /* in-queue delay seen by most recently
175 +struct tc_fq_codel_xstats {
178 + struct tc_fq_codel_qd_stats qdisc_stats;
179 + struct tc_fq_codel_cl_stats class_stats;
184 diff --git a/net/sched/Kconfig b/net/sched/Kconfig
185 index fadd252..e7a8976 100644
186 --- a/net/sched/Kconfig
187 +++ b/net/sched/Kconfig
188 @@ -261,6 +261,17 @@ config NET_SCH_CODEL
192 +config NET_SCH_FQ_CODEL
193 + tristate "Fair Queue Controlled Delay AQM (FQ_CODEL)"
195 + Say Y here if you want to use the FQ Controlled Delay (FQ_CODEL)
196 + packet scheduling algorithm.
198 + To compile this driver as a module, choose M here: the module
199 + will be called sch_fq_codel.
203 config NET_SCH_INGRESS
204 tristate "Ingress Qdisc"
205 depends on NET_CLS_ACT
206 diff --git a/net/sched/Makefile b/net/sched/Makefile
207 index 30fab03..5940a19 100644
208 --- a/net/sched/Makefile
209 +++ b/net/sched/Makefile
210 @@ -38,6 +38,7 @@ obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
211 obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
212 obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
213 obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
214 +obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o
216 obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
217 obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
218 diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
220 index 0000000..a7b3754
222 +++ b/net/sched/sch_fq_codel.c
225 + * Fair Queue CoDel discipline
227 + * This program is free software; you can redistribute it and/or
228 + * modify it under the terms of the GNU General Public License
229 + * as published by the Free Software Foundation; either version
230 + * 2 of the License, or (at your option) any later version.
232 + * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
235 +#include <linux/module.h>
236 +#include <linux/types.h>
237 +#include <linux/kernel.h>
238 +#include <linux/jiffies.h>
239 +#include <linux/string.h>
240 +#include <linux/in.h>
241 +#include <linux/errno.h>
242 +#include <linux/init.h>
243 +#include <linux/skbuff.h>
244 +#include <linux/jhash.h>
245 +#include <linux/slab.h>
246 +#include <linux/vmalloc.h>
247 +#include <net/netlink.h>
248 +#include <net/pkt_sched.h>
249 +#include <net/flow_keys.h>
250 +#include <net/codel.h>
252 +/* Fair Queue CoDel.
255 + * Packets are classified (internal classifier or external) on flows.
256 + * This is a Stochastic model (as we use a hash, several flows
257 + * might be hashed on same slot)
258 + * Each flow has a CoDel managed queue.
259 + * Flows are linked onto two (Round Robin) lists,
260 + * so that new flows have priority on old ones.
262 + * For a given flow, packets are not reordered (CoDel uses a FIFO)
264 + * ECN capability is on by default.
265 + * Low memory footprint (64 bytes per flow)
268 +struct fq_codel_flow {
269 + struct sk_buff *head;
270 + struct sk_buff *tail;
271 + struct list_head flowchain;
273 + u32 dropped; /* number of drops (or ECN marks) on this flow */
274 + struct codel_vars cvars;
275 +}; /* please try to keep this structure <= 64 bytes */
277 +struct fq_codel_sched_data {
278 + struct tcf_proto *filter_list; /* optional external classifier */
279 + struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
280 + u32 *backlogs; /* backlog table [flows_cnt] */
281 + u32 flows_cnt; /* number of flows */
282 + u32 perturbation; /* hash perturbation */
283 + u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
284 + struct codel_params cparams;
285 + struct codel_stats cstats;
286 + u32 drop_overlimit;
287 + u32 new_flow_count;
289 + struct list_head new_flows; /* list of new flows */
290 + struct list_head old_flows; /* list of old flows */
293 +static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
294 + const struct sk_buff *skb)
296 + struct flow_keys keys;
299 + skb_flow_dissect(skb, &keys);
300 + hash = jhash_3words((__force u32)keys.dst,
301 + (__force u32)keys.src ^ keys.ip_proto,
302 + (__force u32)keys.ports, q->perturbation);
303 + return ((u64)hash * q->flows_cnt) >> 32;
306 +static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
309 + struct fq_codel_sched_data *q = qdisc_priv(sch);
310 + struct tcf_result res;
313 + if (TC_H_MAJ(skb->priority) == sch->handle &&
314 + TC_H_MIN(skb->priority) > 0 &&
315 + TC_H_MIN(skb->priority) <= q->flows_cnt)
316 + return TC_H_MIN(skb->priority);
318 + if (!q->filter_list)
319 + return fq_codel_hash(q, skb) + 1;
321 + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
322 + result = tc_classify(skb, q->filter_list, &res);
324 +#ifdef CONFIG_NET_CLS_ACT
326 + case TC_ACT_STOLEN:
327 + case TC_ACT_QUEUED:
328 + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
333 + if (TC_H_MIN(res.classid) <= q->flows_cnt)
334 + return TC_H_MIN(res.classid);
339 +/* helper functions : might be changed when/if skb use a standard list_head */
341 +/* remove one skb from head of slot queue */
342 +static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
344 + struct sk_buff *skb = flow->head;
346 + flow->head = skb->next;
351 +/* add skb to flow queue (tail add) */
352 +static inline void flow_queue_add(struct fq_codel_flow *flow,
353 + struct sk_buff *skb)
355 + if (flow->head == NULL)
358 + flow->tail->next = skb;
363 +static unsigned int fq_codel_drop(struct Qdisc *sch)
365 + struct fq_codel_sched_data *q = qdisc_priv(sch);
366 + struct sk_buff *skb;
367 + unsigned int maxbacklog = 0, idx = 0, i, len;
368 + struct fq_codel_flow *flow;
370 + /* Queue is full! Find the fat flow and drop packet from it.
371 + * This might sound expensive, but with 1024 flows, we scan
372 + * 4KB of memory, and we dont need to handle a complex tree
373 + * in fast path (packet queue/enqueue) with many cache misses.
375 + for (i = 0; i < q->flows_cnt; i++) {
376 + if (q->backlogs[i] > maxbacklog) {
377 + maxbacklog = q->backlogs[i];
381 + flow = &q->flows[idx];
382 + skb = dequeue_head(flow);
383 + len = qdisc_pkt_len(skb);
384 + q->backlogs[idx] -= len;
387 + sch->qstats.drops++;
388 + sch->qstats.backlog -= len;
393 +static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
395 + struct fq_codel_sched_data *q = qdisc_priv(sch);
397 + struct fq_codel_flow *flow;
398 + int uninitialized_var(ret);
400 + idx = fq_codel_classify(skb, sch, &ret);
402 + if (ret & __NET_XMIT_BYPASS)
403 + sch->qstats.drops++;
409 + codel_set_enqueue_time(skb);
410 + flow = &q->flows[idx];
411 + flow_queue_add(flow, skb);
412 + q->backlogs[idx] += qdisc_pkt_len(skb);
413 + sch->qstats.backlog += qdisc_pkt_len(skb);
415 + if (list_empty(&flow->flowchain)) {
416 + list_add_tail(&flow->flowchain, &q->new_flows);
417 + codel_vars_init(&flow->cvars);
418 + q->new_flow_count++;
419 + flow->deficit = q->quantum;
422 + if (++sch->q.qlen < sch->limit)
423 + return NET_XMIT_SUCCESS;
425 + q->drop_overlimit++;
426 + /* Return Congestion Notification only if we dropped a packet
429 + if (fq_codel_drop(sch) == idx)
430 + return NET_XMIT_CN;
432 + /* As we dropped a packet, better let upper stack know this */
433 + qdisc_tree_decrease_qlen(sch, 1);
434 + return NET_XMIT_SUCCESS;
437 +/* This is the specific function called from codel_dequeue()
438 + * to dequeue a packet from queue. Note: backlog is handled in
439 + * codel, we dont need to reduce it here.
441 +static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
443 + struct fq_codel_flow *flow;
444 + struct sk_buff *skb = NULL;
446 + flow = container_of(vars, struct fq_codel_flow, cvars);
448 + skb = dequeue_head(flow);
449 + sch->qstats.backlog -= qdisc_pkt_len(skb);
455 +static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
457 + struct fq_codel_sched_data *q = qdisc_priv(sch);
458 + struct sk_buff *skb;
459 + struct fq_codel_flow *flow;
460 + struct list_head *head;
461 + u32 prev_drop_count, prev_ecn_mark;
464 + head = &q->new_flows;
465 + if (list_empty(head)) {
466 + head = &q->old_flows;
467 + if (list_empty(head))
470 + flow = list_first_entry(head, struct fq_codel_flow, flowchain);
472 + if (flow->deficit <= 0) {
473 + flow->deficit += q->quantum;
474 + list_move_tail(&flow->flowchain, &q->old_flows);
478 + prev_drop_count = q->cstats.drop_count;
479 + prev_ecn_mark = q->cstats.ecn_mark;
481 + skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
482 + dequeue, &q->backlogs[flow - q->flows]);
484 + flow->dropped += q->cstats.drop_count - prev_drop_count;
485 + flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
488 + /* force a pass through old_flows to prevent starvation */
489 + if ((head == &q->new_flows) && !list_empty(&q->old_flows))
490 + list_move_tail(&flow->flowchain, &q->old_flows);
492 + list_del_init(&flow->flowchain);
495 + qdisc_bstats_update(sch, skb);
496 + flow->deficit -= qdisc_pkt_len(skb);
497 + /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
498 + * or HTB crashes. Defer it for next round.
500 + if (q->cstats.drop_count && sch->q.qlen) {
501 + qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
502 + q->cstats.drop_count = 0;
507 +static void fq_codel_reset(struct Qdisc *sch)
509 + struct sk_buff *skb;
511 + while ((skb = fq_codel_dequeue(sch)) != NULL)
515 +static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
516 + [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
517 + [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
518 + [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
519 + [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
520 + [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
521 + [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
524 +static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
526 + struct fq_codel_sched_data *q = qdisc_priv(sch);
527 + struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
533 + err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
536 + if (tb[TCA_FQ_CODEL_FLOWS]) {
539 + q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
540 + if (!q->flows_cnt ||
541 + q->flows_cnt > 65536)
544 + sch_tree_lock(sch);
546 + if (tb[TCA_FQ_CODEL_TARGET]) {
547 + u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
549 + q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
552 + if (tb[TCA_FQ_CODEL_INTERVAL]) {
553 + u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
555 + q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
558 + if (tb[TCA_FQ_CODEL_LIMIT])
559 + sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
561 + if (tb[TCA_FQ_CODEL_ECN])
562 + q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
564 + if (tb[TCA_FQ_CODEL_QUANTUM])
565 + q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
567 + while (sch->q.qlen > sch->limit) {
568 + struct sk_buff *skb = fq_codel_dequeue(sch);
571 + q->cstats.drop_count++;
573 + qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
574 + q->cstats.drop_count = 0;
576 + sch_tree_unlock(sch);
580 +static void *fq_codel_zalloc(size_t sz)
582 + void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
589 +static void fq_codel_free(void *addr)
592 + if (is_vmalloc_addr(addr))
599 +static void fq_codel_destroy(struct Qdisc *sch)
601 + struct fq_codel_sched_data *q = qdisc_priv(sch);
603 + tcf_destroy_chain(&q->filter_list);
604 + fq_codel_free(q->backlogs);
605 + fq_codel_free(q->flows);
608 +static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
610 + struct fq_codel_sched_data *q = qdisc_priv(sch);
613 + sch->limit = 10*1024;
614 + q->flows_cnt = 1024;
615 + q->quantum = psched_mtu(qdisc_dev(sch));
616 + q->perturbation = net_random();
617 + INIT_LIST_HEAD(&q->new_flows);
618 + INIT_LIST_HEAD(&q->old_flows);
619 + codel_params_init(&q->cparams);
620 + codel_stats_init(&q->cstats);
621 + q->cparams.ecn = true;
624 + int err = fq_codel_change(sch, opt);
630 + q->flows = fq_codel_zalloc(q->flows_cnt *
631 + sizeof(struct fq_codel_flow));
634 + q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
635 + if (!q->backlogs) {
636 + fq_codel_free(q->flows);
639 + for (i = 0; i < q->flows_cnt; i++) {
640 + struct fq_codel_flow *flow = q->flows + i;
642 + INIT_LIST_HEAD(&flow->flowchain);
645 + if (sch->limit >= 1)
646 + sch->flags |= TCQ_F_CAN_BYPASS;
648 + sch->flags &= ~TCQ_F_CAN_BYPASS;
652 +static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
654 + struct fq_codel_sched_data *q = qdisc_priv(sch);
655 + struct nlattr *opts;
657 + opts = nla_nest_start(skb, TCA_OPTIONS);
659 + goto nla_put_failure;
661 + if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
662 + codel_time_to_us(q->cparams.target)) ||
663 + nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
665 + nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
666 + codel_time_to_us(q->cparams.interval)) ||
667 + nla_put_u32(skb, TCA_FQ_CODEL_ECN,
669 + nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
671 + nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
673 + goto nla_put_failure;
675 + nla_nest_end(skb, opts);
682 +static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
684 + struct fq_codel_sched_data *q = qdisc_priv(sch);
685 + struct tc_fq_codel_xstats st = {
686 + .type = TCA_FQ_CODEL_XSTATS_QDISC,
687 + .qdisc_stats.maxpacket = q->cstats.maxpacket,
688 + .qdisc_stats.drop_overlimit = q->drop_overlimit,
689 + .qdisc_stats.ecn_mark = q->cstats.ecn_mark,
690 + .qdisc_stats.new_flow_count = q->new_flow_count,
692 + struct list_head *pos;
694 + list_for_each(pos, &q->new_flows)
695 + st.qdisc_stats.new_flows_len++;
697 + list_for_each(pos, &q->old_flows)
698 + st.qdisc_stats.old_flows_len++;
700 + return gnet_stats_copy_app(d, &st, sizeof(st));
703 +static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
708 +static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
713 +static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
716 + /* we cannot bypass queue discipline anymore */
717 + sch->flags &= ~TCQ_F_CAN_BYPASS;
721 +static void fq_codel_put(struct Qdisc *q, unsigned long cl)
725 +static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
727 + struct fq_codel_sched_data *q = qdisc_priv(sch);
731 + return &q->filter_list;
734 +static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
735 + struct sk_buff *skb, struct tcmsg *tcm)
737 + tcm->tcm_handle |= TC_H_MIN(cl);
741 +static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
742 + struct gnet_dump *d)
744 + struct fq_codel_sched_data *q = qdisc_priv(sch);
746 + struct gnet_stats_queue qs = { 0 };
747 + struct tc_fq_codel_xstats xstats;
749 + if (idx < q->flows_cnt) {
750 + const struct fq_codel_flow *flow = &q->flows[idx];
751 + const struct sk_buff *skb = flow->head;
753 + memset(&xstats, 0, sizeof(xstats));
754 + xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
755 + xstats.class_stats.deficit = flow->deficit;
756 + xstats.class_stats.ldelay =
757 + codel_time_to_us(flow->cvars.ldelay);
758 + xstats.class_stats.count = flow->cvars.count;
759 + xstats.class_stats.lastcount = flow->cvars.lastcount;
760 + xstats.class_stats.dropping = flow->cvars.dropping;
761 + if (flow->cvars.dropping) {
762 + codel_tdiff_t delta = flow->cvars.drop_next -
765 + xstats.class_stats.drop_next = (delta >= 0) ?
766 + codel_time_to_us(delta) :
767 + -codel_time_to_us(-delta);
773 + qs.backlog = q->backlogs[idx];
774 + qs.drops = flow->dropped;
776 + if (gnet_stats_copy_queue(d, &qs) < 0)
778 + if (idx < q->flows_cnt)
779 + return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
783 +static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
785 + struct fq_codel_sched_data *q = qdisc_priv(sch);
791 + for (i = 0; i < q->flows_cnt; i++) {
792 + if (list_empty(&q->flows[i].flowchain) ||
793 + arg->count < arg->skip) {
797 + if (arg->fn(sch, i + 1, arg) < 0) {
805 +static const struct Qdisc_class_ops fq_codel_class_ops = {
806 + .leaf = fq_codel_leaf,
807 + .get = fq_codel_get,
808 + .put = fq_codel_put,
809 + .tcf_chain = fq_codel_find_tcf,
810 + .bind_tcf = fq_codel_bind,
811 + .unbind_tcf = fq_codel_put,
812 + .dump = fq_codel_dump_class,
813 + .dump_stats = fq_codel_dump_class_stats,
814 + .walk = fq_codel_walk,
817 +static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
818 + .cl_ops = &fq_codel_class_ops,
820 + .priv_size = sizeof(struct fq_codel_sched_data),
821 + .enqueue = fq_codel_enqueue,
822 + .dequeue = fq_codel_dequeue,
823 + .peek = qdisc_peek_dequeued,
824 + .drop = fq_codel_drop,
825 + .init = fq_codel_init,
826 + .reset = fq_codel_reset,
827 + .destroy = fq_codel_destroy,
828 + .change = fq_codel_change,
829 + .dump = fq_codel_dump,
830 + .dump_stats = fq_codel_dump_stats,
831 + .owner = THIS_MODULE,
834 +static int __init fq_codel_module_init(void)
836 + return register_qdisc(&fq_codel_qdisc_ops);
839 +static void __exit fq_codel_module_exit(void)
841 + unregister_qdisc(&fq_codel_qdisc_ops);
844 +module_init(fq_codel_module_init)
845 +module_exit(fq_codel_module_exit)
846 +MODULE_AUTHOR("Eric Dumazet");
847 +MODULE_LICENSE("GPL");