]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/3.10.18/tcp-must-unclone-packets-before-mangling-them.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.10.18 / tcp-must-unclone-packets-before-mangling-them.patch
1 From 35536f38006d0995e3c14246c7b56f01e8f9f047 Mon Sep 17 00:00:00 2001
2 From: Eric Dumazet <edumazet@google.com>
3 Date: Tue, 15 Oct 2013 11:54:30 -0700
4 Subject: tcp: must unclone packets before mangling them
5
6 From: Eric Dumazet <edumazet@google.com>
7
8 [ Upstream commit c52e2421f7368fd36cbe330d2cf41b10452e39a9 ]
9
10 TCP stack should make sure it owns skbs before mangling them.
11
12 We had various crashes using bnx2x, and it turned out gso_size
13 was cleared right before bnx2x driver was populating TC descriptor
14 of the _previous_ packet send. TCP stack can sometime retransmit
15 packets that are still in Qdisc.
16
17 Of course we could make bnx2x driver more robust (using
18 ACCESS_ONCE(shinfo->gso_size) for example), but the bug is TCP stack.
19
20 We have identified two points where skb_unclone() was needed.
21
22 This patch adds a WARN_ON_ONCE() to warn us if we missed another
23 fix of this kind.
24
25 Kudos to Neal for finding the root cause of this bug. Its visible
26 using small MSS.
27
28 Signed-off-by: Eric Dumazet <edumazet@google.com>
29 Signed-off-by: Neal Cardwell <ncardwell@google.com>
30 Cc: Yuchung Cheng <ycheng@google.com>
31 Signed-off-by: David S. Miller <davem@davemloft.net>
32 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
33 ---
34 net/ipv4/tcp_output.c | 9 ++++++---
35 1 file changed, 6 insertions(+), 3 deletions(-)
36
37 --- a/net/ipv4/tcp_output.c
38 +++ b/net/ipv4/tcp_output.c
39 @@ -976,6 +976,9 @@ static void tcp_queue_skb(struct sock *s
40 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
41 unsigned int mss_now)
42 {
43 + /* Make sure we own this skb before messing gso_size/gso_segs */
44 + WARN_ON_ONCE(skb_cloned(skb));
45 +
46 if (skb->len <= mss_now || !sk_can_gso(sk) ||
47 skb->ip_summed == CHECKSUM_NONE) {
48 /* Avoid the costly divide in the normal
49 @@ -1057,9 +1060,7 @@ int tcp_fragment(struct sock *sk, struct
50 if (nsize < 0)
51 nsize = 0;
52
53 - if (skb_cloned(skb) &&
54 - skb_is_nonlinear(skb) &&
55 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
56 + if (skb_unclone(skb, GFP_ATOMIC))
57 return -ENOMEM;
58
59 /* Get a new skb... force flag on. */
60 @@ -2334,6 +2335,8 @@ int __tcp_retransmit_skb(struct sock *sk
61 int oldpcount = tcp_skb_pcount(skb);
62
63 if (unlikely(oldpcount > 1)) {
64 + if (skb_unclone(skb, GFP_ATOMIC))
65 + return -ENOMEM;
66 tcp_init_tso_segs(sk, skb, cur_mss);
67 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
68 }