]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | From: Peter Zijlstra <a.p.zijlstra@chello.nl> |
2 | Subject: netvm: prevent a stream specific deadlock | |
3 | Patch-mainline: No | |
4 | References: FATE#303834 | |
5 | ||
6 | It could happen that all !SOCK_MEMALLOC sockets have buffered so much data | |
7 | that we're over the global rmem limit. This will prevent SOCK_MEMALLOC buffers | |
8 | from receiving data, which will prevent userspace from running, which is needed | |
9 | to reduce the buffered data. | |
10 | ||
11 | Fix this by exempting the SOCK_MEMALLOC sockets from the rmem limit. | |
12 | ||
13 | Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> | |
14 | Acked-by: Neil Brown <neilb@suse.de> | |
15 | Acked-by: Suresh Jayaraman <sjayaraman@suse.de> | |
16 | ||
17 | --- | |
18 | include/net/sock.h | 7 ++++--- | |
19 | net/core/sock.c | 2 +- | |
20 | net/ipv4/tcp_input.c | 12 ++++++------ | |
21 | net/sctp/ulpevent.c | 2 +- | |
22 | 4 files changed, 12 insertions(+), 11 deletions(-) | |
23 | ||
24 | --- a/include/net/sock.h | |
25 | +++ b/include/net/sock.h | |
26 | @@ -788,12 +788,13 @@ static inline int sk_wmem_schedule(struc | |
27 | __sk_mem_schedule(sk, size, SK_MEM_SEND); | |
28 | } | |
29 | ||
30 | -static inline int sk_rmem_schedule(struct sock *sk, int size) | |
31 | +static inline int sk_rmem_schedule(struct sock *sk, struct sk_buff *skb) | |
32 | { | |
33 | if (!sk_has_account(sk)) | |
34 | return 1; | |
35 | - return size <= sk->sk_forward_alloc || | |
36 | - __sk_mem_schedule(sk, size, SK_MEM_RECV); | |
37 | + return skb->truesize <= sk->sk_forward_alloc || | |
38 | + __sk_mem_schedule(sk, skb->truesize, SK_MEM_RECV) || | |
39 | + skb_emergency(skb); | |
40 | } | |
41 | ||
42 | static inline void sk_mem_reclaim(struct sock *sk) | |
43 | --- a/net/core/sock.c | |
44 | +++ b/net/core/sock.c | |
45 | @@ -381,7 +381,7 @@ int sock_queue_rcv_skb(struct sock *sk, | |
46 | if (err) | |
47 | goto out; | |
48 | ||
49 | - if (!sk_rmem_schedule(sk, skb->truesize)) { | |
50 | + if (!sk_rmem_schedule(sk, skb)) { | |
51 | err = -ENOBUFS; | |
52 | goto out; | |
53 | } | |
54 | --- a/net/ipv4/tcp_input.c | |
55 | +++ b/net/ipv4/tcp_input.c | |
56 | @@ -3939,19 +3939,19 @@ static void tcp_ofo_queue(struct sock *s | |
57 | static int tcp_prune_ofo_queue(struct sock *sk); | |
58 | static int tcp_prune_queue(struct sock *sk); | |
59 | ||
60 | -static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) | |
61 | +static inline int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb) | |
62 | { | |
63 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | |
64 | - !sk_rmem_schedule(sk, size)) { | |
65 | + !sk_rmem_schedule(sk, skb)) { | |
66 | ||
67 | if (tcp_prune_queue(sk) < 0) | |
68 | return -1; | |
69 | ||
70 | - if (!sk_rmem_schedule(sk, size)) { | |
71 | + if (!sk_rmem_schedule(sk, skb)) { | |
72 | if (!tcp_prune_ofo_queue(sk)) | |
73 | return -1; | |
74 | ||
75 | - if (!sk_rmem_schedule(sk, size)) | |
76 | + if (!sk_rmem_schedule(sk, skb)) | |
77 | return -1; | |
78 | } | |
79 | } | |
80 | @@ -4006,7 +4006,7 @@ static void tcp_data_queue(struct sock * | |
81 | if (eaten <= 0) { | |
82 | queue_and_out: | |
83 | if (eaten < 0 && | |
84 | - tcp_try_rmem_schedule(sk, skb->truesize)) | |
85 | + tcp_try_rmem_schedule(sk, skb)) | |
86 | goto drop; | |
87 | ||
88 | skb_set_owner_r(skb, sk); | |
89 | @@ -4077,7 +4077,7 @@ drop: | |
90 | ||
91 | TCP_ECN_check_ce(tp, skb); | |
92 | ||
93 | - if (tcp_try_rmem_schedule(sk, skb->truesize)) | |
94 | + if (tcp_try_rmem_schedule(sk, skb)) | |
95 | goto drop; | |
96 | ||
97 | /* Disable header prediction. */ | |
98 | --- a/net/sctp/ulpevent.c | |
99 | +++ b/net/sctp/ulpevent.c | |
100 | @@ -701,7 +701,7 @@ struct sctp_ulpevent *sctp_ulpevent_make | |
101 | if (rx_count >= asoc->base.sk->sk_rcvbuf) { | |
102 | ||
103 | if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || | |
104 | - (!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize))) | |
105 | + (!sk_rmem_schedule(asoc->base.sk, chunk->skb))) | |
106 | goto fail; | |
107 | } | |
108 |