]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/linux-3.2-codel/0007-fq_codel-should-use-qdisc-backlog-as-threshold.patch
codel: Enable fq_codel on all devices.
[people/teissler/ipfire-2.x.git] / src / patches / linux-3.2-codel / 0007-fq_codel-should-use-qdisc-backlog-as-threshold.patch
1 From: Eric Dumazet <edumazet@google.com>
2 Date: Wed, 16 May 2012 04:39:09 +0000
3 Subject: [PATCH 7/7] fq_codel: should use qdisc backlog as threshold
4
5 commit 865ec5523dadbedefbc5710a68969f686a28d928 upstream.
6
7 codel_should_drop() logic allows a packet being not dropped if queue
8 size is under max packet size.
9
10 In fq_codel, we have two possible backlogs : The qdisc global one, and
11 the flow local one.
12
13 The meaningful one for codel_should_drop() should be the global backlog,
14 not the per flow one, so that thin flows can have a non zero drop/mark
15 probability.
16
17 Signed-off-by: Eric Dumazet <edumazet@google.com>
18 Cc: Dave Taht <dave.taht@bufferbloat.net>
19 Cc: Kathleen Nichols <nichols@pollere.com>
20 Cc: Van Jacobson <van@pollere.net>
21 Signed-off-by: David S. Miller <davem@davemloft.net>
22 ---
23 include/net/codel.h | 15 +++++++--------
24 net/sched/sch_codel.c | 4 ++--
25 net/sched/sch_fq_codel.c | 5 +++--
26 3 files changed, 12 insertions(+), 12 deletions(-)
27
28 diff --git a/include/net/codel.h b/include/net/codel.h
29 index 7546517..550debf 100644
30 --- a/include/net/codel.h
31 +++ b/include/net/codel.h
32 @@ -205,7 +205,7 @@ static codel_time_t codel_control_law(codel_time_t t,
33
34
35 static bool codel_should_drop(const struct sk_buff *skb,
36 - unsigned int *backlog,
37 + struct Qdisc *sch,
38 struct codel_vars *vars,
39 struct codel_params *params,
40 struct codel_stats *stats,
41 @@ -219,13 +219,13 @@ static bool codel_should_drop(const struct sk_buff *skb,
42 }
43
44 vars->ldelay = now - codel_get_enqueue_time(skb);
45 - *backlog -= qdisc_pkt_len(skb);
46 + sch->qstats.backlog -= qdisc_pkt_len(skb);
47
48 if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
49 stats->maxpacket = qdisc_pkt_len(skb);
50
51 if (codel_time_before(vars->ldelay, params->target) ||
52 - *backlog <= stats->maxpacket) {
53 + sch->qstats.backlog <= stats->maxpacket) {
54 /* went below - stay below for at least interval */
55 vars->first_above_time = 0;
56 return false;
57 @@ -249,8 +249,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
58 struct codel_params *params,
59 struct codel_vars *vars,
60 struct codel_stats *stats,
61 - codel_skb_dequeue_t dequeue_func,
62 - u32 *backlog)
63 + codel_skb_dequeue_t dequeue_func)
64 {
65 struct sk_buff *skb = dequeue_func(vars, sch);
66 codel_time_t now;
67 @@ -261,7 +260,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
68 return skb;
69 }
70 now = codel_get_time();
71 - drop = codel_should_drop(skb, backlog, vars, params, stats, now);
72 + drop = codel_should_drop(skb, sch, vars, params, stats, now);
73 if (vars->dropping) {
74 if (!drop) {
75 /* sojourn time below target - leave dropping state */
76 @@ -292,7 +291,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
77 qdisc_drop(skb, sch);
78 stats->drop_count++;
79 skb = dequeue_func(vars, sch);
80 - if (!codel_should_drop(skb, backlog,
81 + if (!codel_should_drop(skb, sch,
82 vars, params, stats, now)) {
83 /* leave dropping state */
84 vars->dropping = false;
85 @@ -313,7 +312,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
86 stats->drop_count++;
87
88 skb = dequeue_func(vars, sch);
89 - drop = codel_should_drop(skb, backlog, vars, params,
90 + drop = codel_should_drop(skb, sch, vars, params,
91 stats, now);
92 }
93 vars->dropping = true;
94 diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
95 index 213ef60..2f9ab17 100644
96 --- a/net/sched/sch_codel.c
97 +++ b/net/sched/sch_codel.c
98 @@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
99 struct codel_sched_data *q = qdisc_priv(sch);
100 struct sk_buff *skb;
101
102 - skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
103 - dequeue, &sch->qstats.backlog);
104 + skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
105 +
106 /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
107 * or HTB crashes. Defer it for next round.
108 */
109 diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
110 index 337ff20..9fc1c62 100644
111 --- a/net/sched/sch_fq_codel.c
112 +++ b/net/sched/sch_fq_codel.c
113 @@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
114 */
115 static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
116 {
117 + struct fq_codel_sched_data *q = qdisc_priv(sch);
118 struct fq_codel_flow *flow;
119 struct sk_buff *skb = NULL;
120
121 flow = container_of(vars, struct fq_codel_flow, cvars);
122 if (flow->head) {
123 skb = dequeue_head(flow);
124 - sch->qstats.backlog -= qdisc_pkt_len(skb);
125 + q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
126 sch->q.qlen--;
127 }
128 return skb;
129 @@ -256,7 +257,7 @@ begin:
130 prev_ecn_mark = q->cstats.ecn_mark;
131
132 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
133 - dequeue, &q->backlogs[flow - q->flows]);
134 + dequeue);
135
136 flow->dropped += q->cstats.drop_count - prev_drop_count;
137 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
138 --
139 1.7.10
140