--- /dev/null
+From foo@baz Wed Apr 29 12:00:42 CEST 2015
+From: =?UTF-8?q?Sebastian=20P=C3=B6hn?= <sebastian.poehn@gmail.com>
+Date: Mon, 20 Apr 2015 09:19:20 +0200
+Subject: ip_forward: Drop frames with attached skb->sk
+
+From: =?UTF-8?q?Sebastian=20P=C3=B6hn?= <sebastian.poehn@gmail.com>
+
+[ Upstream commit 2ab957492d13bb819400ac29ae55911d50a82a13 ]
+
+Initial discussion was:
+[FYI] xfrm: Don't lookup sk_policy for timewait sockets
+
+Forwarded frames should not have a socket attached. Especially
+tw sockets will lead to panics later-on in the stack.
+
+This was observed with TPROXY assigning a tw socket and broken
+policy routing (misconfigured). As a result frame enters
+forwarding path instead of input. We cannot solve this in
+TPROXY as it cannot know that policy routing is broken.
+
+v2:
+Remove useless comment
+
+Signed-off-by: Sebastian Poehn <sebastian.poehn@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_forward.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -126,6 +126,9 @@ int ip_forward(struct sk_buff *skb)
+ struct rtable *rt; /* Route we use */
+ struct ip_options *opt = &(IPCB(skb)->opt);
+
++ if (unlikely(skb->sk))
++ goto drop;
++
+ if (skb_warn_if_lro(skb))
+ goto drop;
+
--- /dev/null
+ip_forward-drop-frames-with-attached-skb-sk.patch
+tcp-fix-possible-deadlock-in-tcp_send_fin.patch
+tcp-avoid-looping-in-tcp_send_fin.patch
--- /dev/null
+From foo@baz Wed Apr 29 12:00:42 CEST 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 23 Apr 2015 10:42:39 -0700
+Subject: tcp: avoid looping in tcp_send_fin()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 845704a535e9b3c76448f52af1b70e4422ea03fd ]
+
+Presence of an unbound loop in tcp_send_fin() had always been hard
+to explain when analyzing crash dumps involving gigantic dying processes
+with millions of sockets.
+
+Lets try a different strategy :
+
+In case of memory pressure, try to add the FIN flag to last packet
+in write queue, even if packet was already sent. TCP stack will
+be able to deliver this FIN after a timeout event. Note that this
+FIN being delivered by a retransmit, it also carries a Push flag
+given our current implementation.
+
+By checking sk_under_memory_pressure(), we anticipate that cooking
+many FIN packets might deplete tcp memory.
+
+In the case we could not allocate a packet, even with __GFP_WAIT
+allocation, then not sending a FIN seems quite reasonable if it allows
+to get rid of this socket, free memory, and not block the process from
+eventually doing other useful work.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c | 50 +++++++++++++++++++++++++++++---------------------
+ 1 file changed, 29 insertions(+), 21 deletions(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2573,7 +2573,8 @@ begin_fwd:
+
+ /* We allow to exceed memory limits for FIN packets to expedite
+ * connection tear down and (memory) recovery.
+- * Otherwise tcp_send_fin() could loop forever.
++ * Otherwise tcp_send_fin() could be tempted to either delay FIN
++ * or even be forced to close flow without any FIN.
+ */
+ static void sk_forced_wmem_schedule(struct sock *sk, int size)
+ {
+@@ -2586,33 +2587,40 @@ static void sk_forced_wmem_schedule(stru
+ sk_memory_allocated_add(sk, amt, &status);
+ }
+
+-/* Send a fin. The caller locks the socket for us. This cannot be
+- * allowed to fail queueing a FIN frame under any circumstances.
++/* Send a FIN. The caller locks the socket for us.
++ * We should try to send a FIN packet really hard, but eventually give up.
+ */
+ void tcp_send_fin(struct sock *sk)
+ {
++ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+- struct sk_buff *skb = tcp_write_queue_tail(sk);
+- int mss_now;
+
+- /* Optimization, tack on the FIN if we have a queue of
+- * unsent frames. But be careful about outgoing SACKS
+- * and IP options.
++ /* Optimization, tack on the FIN if we have one skb in write queue and
++ * this skb was not yet sent, or we are under memory pressure.
++ * Note: in the latter case, FIN packet will be sent after a timeout,
++ * as TCP stack thinks it has already been transmitted.
+ */
+- mss_now = tcp_current_mss(sk);
+-
+- if (tcp_send_head(sk) != NULL) {
+- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
+- TCP_SKB_CB(skb)->end_seq++;
++ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
++coalesce:
++ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
++ TCP_SKB_CB(tskb)->end_seq++;
+ tp->write_seq++;
++ if (!tcp_send_head(sk)) {
++ /* This means tskb was already sent.
++ * Pretend we included the FIN on previous transmit.
++ * We need to set tp->snd_nxt to the value it would have
++ * if FIN had been sent. This is because retransmit path
++ * does not change tp->snd_nxt.
++ */
++ tp->snd_nxt++;
++ return;
++ }
+ } else {
+- /* Socket is locked, keep trying until memory is available. */
+- for (;;) {
+- skb = alloc_skb_fclone(MAX_TCP_HEADER,
+- sk->sk_allocation);
+- if (skb)
+- break;
+- yield();
++ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
++ if (unlikely(!skb)) {
++ if (tskb)
++ goto coalesce;
++ return;
+ }
+ skb_reserve(skb, MAX_TCP_HEADER);
+ sk_forced_wmem_schedule(sk, skb->truesize);
+@@ -2621,7 +2629,7 @@ void tcp_send_fin(struct sock *sk)
+ TCPHDR_ACK | TCPHDR_FIN);
+ tcp_queue_skb(sk, skb);
+ }
+- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
++ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
+ }
+
+ /* We get here when a process closes a file descriptor (either due to
--- /dev/null
+From foo@baz Wed Apr 29 12:00:42 CEST 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 21 Apr 2015 18:32:24 -0700
+Subject: tcp: fix possible deadlock in tcp_send_fin()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit d83769a580f1132ac26439f50068a29b02be535e ]
+
+Using sk_stream_alloc_skb() in tcp_send_fin() is dangerous in
+case a huge process is killed by OOM, and tcp_mem[2] is hit.
+
+To be able to free memory we need to make progress, so this
+patch allows FIN packets to not care about tcp_mem[2], if
+skb allocation succeeded.
+
+In a follow-up patch, we might abort tcp_send_fin() infinite loop
+in case TIF_MEMDIE is set on this thread, as memory allocator
+did its best getting extra memory already.
+
+This patch reverts d22e15371811 ("tcp: fix tcp fin memory accounting")
+
+Fixes: d22e15371811 ("tcp: fix tcp fin memory accounting")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c | 20 +++++++++++++++++++-
+ 1 file changed, 19 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2571,6 +2571,21 @@ begin_fwd:
+ }
+ }
+
++/* We allow to exceed memory limits for FIN packets to expedite
++ * connection tear down and (memory) recovery.
++ * Otherwise tcp_send_fin() could loop forever.
++ */
++static void sk_forced_wmem_schedule(struct sock *sk, int size)
++{
++ int amt, status;
++
++ if (size <= sk->sk_forward_alloc)
++ return;
++ amt = sk_mem_pages(size);
++ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
++ sk_memory_allocated_add(sk, amt, &status);
++}
++
+ /* Send a fin. The caller locks the socket for us. This cannot be
+ * allowed to fail queueing a FIN frame under any circumstances.
+ */
+@@ -2593,11 +2608,14 @@ void tcp_send_fin(struct sock *sk)
+ } else {
+ /* Socket is locked, keep trying until memory is available. */
+ for (;;) {
+- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
++ skb = alloc_skb_fclone(MAX_TCP_HEADER,
++ sk->sk_allocation);
+ if (skb)
+ break;
+ yield();
+ }
++ skb_reserve(skb, MAX_TCP_HEADER);
++ sk_forced_wmem_schedule(sk, skb->truesize);
+ /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
+ tcp_init_nondata_skb(skb, tp->write_seq,
+ TCPHDR_ACK | TCPHDR_FIN);
--- /dev/null
+ip_forward-drop-frames-with-attached-skb-sk.patch
+tcp-fix-possible-deadlock-in-tcp_send_fin.patch
+tcp-avoid-looping-in-tcp_send_fin.patch
+net-do-not-deplete-pfmemalloc-reserve.patch
+net-fix-crash-in-build_skb.patch
--- /dev/null
+ip_forward-drop-frames-with-attached-skb-sk.patch
+net-add-skb_checksum_complete_unset.patch
+ppp-call-skb_checksum_complete_unset-in-ppp_receive_frame.patch
+tcp-fix-possible-deadlock-in-tcp_send_fin.patch
+tcp-avoid-looping-in-tcp_send_fin.patch
+net-do-not-deplete-pfmemalloc-reserve.patch
+net-fix-crash-in-build_skb.patch
+pxa168-fix-double-deallocation-of-managed-resources.patch
+net-mlx4_en-prevent-setting-invalid-rss-hash-function.patch
--- /dev/null
+ip_forward-drop-frames-with-attached-skb-sk.patch
+net-add-skb_checksum_complete_unset.patch
+ppp-call-skb_checksum_complete_unset-in-ppp_receive_frame.patch
+tcp-fix-possible-deadlock-in-tcp_send_fin.patch
+tcp-avoid-looping-in-tcp_send_fin.patch
+net-do-not-deplete-pfmemalloc-reserve.patch
+net-fix-crash-in-build_skb.patch
+pxa168-fix-double-deallocation-of-managed-resources.patch
+net-rfs-fix-crash-in-get_rps_cpus.patch
+net-mlx4_en-prevent-setting-invalid-rss-hash-function.patch