]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.4.133/net_sched-fq-take-care-of-throttled-flows-before-reuse.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.4.133 / net_sched-fq-take-care-of-throttled-flows-before-reuse.patch
1 From foo@baz Wed May 16 16:57:32 CEST 2018
2 From: Eric Dumazet <edumazet@google.com>
3 Date: Wed, 2 May 2018 10:03:30 -0700
4 Subject: net_sched: fq: take care of throttled flows before reuse
5
6 From: Eric Dumazet <edumazet@google.com>
7
8 [ Upstream commit 7df40c2673a1307c3260aab6f9d4b9bf97ca8fd7 ]
9
10 Normally, a socket can not be freed/reused unless all its TX packets
11 left qdisc and were TX-completed. However connect(AF_UNSPEC) allows
12 this to happen.
13
14 With commit fc59d5bdf1e3 ("pkt_sched: fq: clear time_next_packet for
15 reused flows") we cleared f->time_next_packet but took no special
16 action if the flow was still in the throttled rb-tree.
17
18 Since f->time_next_packet is the key used in the rb-tree searches,
19 blindly clearing it might break rb-tree integrity. We need to make
20 sure the flow is no longer in the rb-tree to avoid this problem.
21
22 Fixes: fc59d5bdf1e3 ("pkt_sched: fq: clear time_next_packet for reused flows")
23 Signed-off-by: Eric Dumazet <edumazet@google.com>
24 Signed-off-by: David S. Miller <davem@davemloft.net>
25 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
26 ---
27 net/sched/sch_fq.c | 37 +++++++++++++++++++++++++------------
28 1 file changed, 25 insertions(+), 12 deletions(-)
29
30 --- a/net/sched/sch_fq.c
31 +++ b/net/sched/sch_fq.c
32 @@ -126,6 +126,28 @@ static bool fq_flow_is_detached(const st
33 return f->next == &detached;
34 }
35
36 +static bool fq_flow_is_throttled(const struct fq_flow *f)
37 +{
38 + return f->next == &throttled;
39 +}
40 +
41 +static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
42 +{
43 + if (head->first)
44 + head->last->next = flow;
45 + else
46 + head->first = flow;
47 + head->last = flow;
48 + flow->next = NULL;
49 +}
50 +
51 +static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
52 +{
53 + rb_erase(&f->rate_node, &q->delayed);
54 + q->throttled_flows--;
55 + fq_flow_add_tail(&q->old_flows, f);
56 +}
57 +
58 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
59 {
60 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
61 @@ -153,15 +175,6 @@ static void fq_flow_set_throttled(struct
62
63 static struct kmem_cache *fq_flow_cachep __read_mostly;
64
65 -static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
66 -{
67 - if (head->first)
68 - head->last->next = flow;
69 - else
70 - head->first = flow;
71 - head->last = flow;
72 - flow->next = NULL;
73 -}
74
75 /* limit number of collected flows per round */
76 #define FQ_GC_MAX 8
77 @@ -265,6 +278,8 @@ static struct fq_flow *fq_classify(struc
78 f->socket_hash != sk->sk_hash)) {
79 f->credit = q->initial_quantum;
80 f->socket_hash = sk->sk_hash;
81 + if (fq_flow_is_throttled(f))
82 + fq_flow_unset_throttled(q, f);
83 f->time_next_packet = 0ULL;
84 }
85 return f;
86 @@ -419,9 +434,7 @@ static void fq_check_throttled(struct fq
87 q->time_next_delayed_flow = f->time_next_packet;
88 break;
89 }
90 - rb_erase(p, &q->delayed);
91 - q->throttled_flows--;
92 - fq_flow_add_tail(&q->old_flows, f);
93 + fq_flow_unset_throttled(q, f);
94 }
95 }
96