]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blobdiff - src/patches/suse-2.6.27.31/patches.suse/SoN-23-netvm-tcp-deadlock.patch
Move xen patchset to new version's subdir.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.suse / SoN-23-netvm-tcp-deadlock.patch
diff --git a/src/patches/suse-2.6.27.31/patches.suse/SoN-23-netvm-tcp-deadlock.patch b/src/patches/suse-2.6.27.31/patches.suse/SoN-23-netvm-tcp-deadlock.patch
new file mode 100644 (file)
index 0000000..c28b75a
--- /dev/null
@@ -0,0 +1,108 @@
+From: Peter Zijlstra <a.p.zijlstra@chello.nl> 
+Subject: netvm: prevent a stream specific deadlock
+Patch-mainline: No
+References: FATE#303834
+
+It could happen that all !SOCK_MEMALLOC sockets have buffered so much data
+that we're over the global rmem limit. This will prevent SOCK_MEMALLOC buffers
+from receiving data, which will prevent userspace from running, which is needed
+to reduce the buffered data.
+
+Fix this by exempting the SOCK_MEMALLOC sockets from the rmem limit.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Acked-by: Neil Brown <neilb@suse.de>
+Acked-by: Suresh Jayaraman <sjayaraman@suse.de>
+
+---
+ include/net/sock.h   |    7 ++++---
+ net/core/sock.c      |    2 +-
+ net/ipv4/tcp_input.c |   12 ++++++------
+ net/sctp/ulpevent.c  |    2 +-
+ 4 files changed, 12 insertions(+), 11 deletions(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -788,12 +788,13 @@ static inline int sk_wmem_schedule(struc
+               __sk_mem_schedule(sk, size, SK_MEM_SEND);
+ }
+-static inline int sk_rmem_schedule(struct sock *sk, int size)
++static inline int sk_rmem_schedule(struct sock *sk, struct sk_buff *skb)
+ {
+       if (!sk_has_account(sk))
+               return 1;
+-      return size <= sk->sk_forward_alloc ||
+-              __sk_mem_schedule(sk, size, SK_MEM_RECV);
++      return skb->truesize <= sk->sk_forward_alloc ||
++              __sk_mem_schedule(sk, skb->truesize, SK_MEM_RECV) ||
++              skb_emergency(skb);
+ }
+ static inline void sk_mem_reclaim(struct sock *sk)
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -381,7 +381,7 @@ int sock_queue_rcv_skb(struct sock *sk,
+       if (err)
+               goto out;
+-      if (!sk_rmem_schedule(sk, skb->truesize)) {
++      if (!sk_rmem_schedule(sk, skb)) {
+               err = -ENOBUFS;
+               goto out;
+       }
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3939,19 +3939,19 @@ static void tcp_ofo_queue(struct sock *s
+ static int tcp_prune_ofo_queue(struct sock *sk);
+ static int tcp_prune_queue(struct sock *sk);
+-static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
++static inline int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb)
+ {
+       if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+-          !sk_rmem_schedule(sk, size)) {
++          !sk_rmem_schedule(sk, skb)) {
+               if (tcp_prune_queue(sk) < 0)
+                       return -1;
+-              if (!sk_rmem_schedule(sk, size)) {
++              if (!sk_rmem_schedule(sk, skb)) {
+                       if (!tcp_prune_ofo_queue(sk))
+                               return -1;
+-                      if (!sk_rmem_schedule(sk, size))
++                      if (!sk_rmem_schedule(sk, skb))
+                               return -1;
+               }
+       }
+@@ -4006,7 +4006,7 @@ static void tcp_data_queue(struct sock *
+               if (eaten <= 0) {
+ queue_and_out:
+                       if (eaten < 0 &&
+-                          tcp_try_rmem_schedule(sk, skb->truesize))
++                          tcp_try_rmem_schedule(sk, skb))
+                               goto drop;
+                       skb_set_owner_r(skb, sk);
+@@ -4077,7 +4077,7 @@ drop:
+       TCP_ECN_check_ce(tp, skb);
+-      if (tcp_try_rmem_schedule(sk, skb->truesize))
++      if (tcp_try_rmem_schedule(sk, skb))
+               goto drop;
+       /* Disable header prediction. */
+--- a/net/sctp/ulpevent.c
++++ b/net/sctp/ulpevent.c
+@@ -701,7 +701,7 @@ struct sctp_ulpevent *sctp_ulpevent_make
+       if (rx_count >= asoc->base.sk->sk_rcvbuf) {
+               if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
+-                  (!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize)))
++                  (!sk_rmem_schedule(asoc->base.sk, chunk->skb)))
+                       goto fail;
+       }