--- /dev/null
+From foo@baz Fri Aug 11 09:41:52 PDT 2017
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Fri, 4 Aug 2017 14:20:54 +0200
+Subject: bpf, s390: fix jit branch offset related to ldimm64
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+
+[ Upstream commit b0a0c2566f28e71e5e32121992ac8060cec75510 ]
+
+While testing some other work that required JIT modifications, I
+run into test_bpf causing a hang when JIT enabled on s390. The
+problematic test case was the one from ddc665a4bb4b (bpf, arm64:
+fix jit branch offset related to ldimm64), and turns out that we
+do have a similar issue on s390 as well. In bpf_jit_prog() we
+update next instruction address after returning from bpf_jit_insn()
+with an insn_count. bpf_jit_insn() returns either -1 in case of
+error (e.g. unsupported insn), 1 or 2. The latter is only the
+case for ldimm64 due to spanning 2 insns, however, next address
+is only set to i + 1 not taking actual insn_count into account,
+thus fix is to use insn_count instead of 1. bpf_jit_enable in
+mode 2 provides also disasm on s390:
+
+Before fix:
+
+ 000003ff800349b6: a7f40003 brc 15,3ff800349bc ; target
+ 000003ff800349ba: 0000 unknown
+ 000003ff800349bc: e3b0f0700024 stg %r11,112(%r15)
+ 000003ff800349c2: e3e0f0880024 stg %r14,136(%r15)
+ 000003ff800349c8: 0db0 basr %r11,%r0
+ 000003ff800349ca: c0ef00000000 llilf %r14,0
+ 000003ff800349d0: e320b0360004 lg %r2,54(%r11)
+ 000003ff800349d6: e330b03e0004 lg %r3,62(%r11)
+ 000003ff800349dc: ec23ffeda065 clgrj %r2,%r3,10,3ff800349b6 ; jmp
+ 000003ff800349e2: e3e0b0460004 lg %r14,70(%r11)
+ 000003ff800349e8: e3e0b04e0004 lg %r14,78(%r11)
+ 000003ff800349ee: b904002e lgr %r2,%r14
+ 000003ff800349f2: e3b0f0700004 lg %r11,112(%r15)
+ 000003ff800349f8: e3e0f0880004 lg %r14,136(%r15)
+ 000003ff800349fe: 07fe bcr 15,%r14
+
+After fix:
+
+ 000003ff80ef3db4: a7f40003 brc 15,3ff80ef3dba
+ 000003ff80ef3db8: 0000 unknown
+ 000003ff80ef3dba: e3b0f0700024 stg %r11,112(%r15)
+ 000003ff80ef3dc0: e3e0f0880024 stg %r14,136(%r15)
+ 000003ff80ef3dc6: 0db0 basr %r11,%r0
+ 000003ff80ef3dc8: c0ef00000000 llilf %r14,0
+ 000003ff80ef3dce: e320b0360004 lg %r2,54(%r11)
+ 000003ff80ef3dd4: e330b03e0004 lg %r3,62(%r11)
+ 000003ff80ef3dda: ec230006a065 clgrj %r2,%r3,10,3ff80ef3de6 ; jmp
+ 000003ff80ef3de0: e3e0b0460004 lg %r14,70(%r11)
+ 000003ff80ef3de6: e3e0b04e0004 lg %r14,78(%r11) ; target
+ 000003ff80ef3dec: b904002e lgr %r2,%r14
+ 000003ff80ef3df0: e3b0f0700004 lg %r11,112(%r15)
+ 000003ff80ef3df6: e3e0f0880004 lg %r14,136(%r15)
+ 000003ff80ef3dfc: 07fe bcr 15,%r14
+
+test_bpf.ko suite runs fine after the fix.
+
+Fixes: 054623105728 ("s390/bpf: Add s390x eBPF JIT compiler backend")
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Tested-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/net/bpf_jit_comp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -1250,7 +1250,8 @@ static int bpf_jit_prog(struct bpf_jit *
+ insn_count = bpf_jit_insn(jit, fp, i);
+ if (insn_count < 0)
+ return -1;
+- jit->addrs[i + 1] = jit->prg; /* Next instruction address */
++ /* Next instruction address */
++ jit->addrs[i + insn_count] = jit->prg;
+ }
+ bpf_jit_epilogue(jit);
+
--- /dev/null
+From foo@baz Fri Aug 11 09:41:52 PDT 2017
+From: Willem de Bruijn <willemb@google.com>
+Date: Tue, 8 Aug 2017 14:22:55 -0400
+Subject: net: avoid skb_warn_bad_offload false positives on UFO
+
+From: Willem de Bruijn <willemb@google.com>
+
+
+[ Upstream commit 8d63bee643f1fb53e472f0e135cae4eb99d62d19 ]
+
+skb_warn_bad_offload triggers a warning when an skb enters the GSO
+stack at __skb_gso_segment that does not have CHECKSUM_PARTIAL
+checksum offload set.
+
+Commit b2504a5dbef3 ("net: reduce skb_warn_bad_offload() noise")
+observed that SKB_GSO_DODGY producers can trigger the check and
+that passing those packets through the GSO handlers will fix it
+up. But, the software UFO handler will set ip_summed to
+CHECKSUM_NONE.
+
+When __skb_gso_segment is called from the receive path, this
+triggers the warning again.
+
+Make UFO set CHECKSUM_UNNECESSARY instead of CHECKSUM_NONE. On
+Tx these two are equivalent. On Rx, this better matches the
+skb state (checksum computed), as CHECKSUM_NONE here means no
+checksum computed.
+
+See also this thread for context:
+http://patchwork.ozlabs.org/patch/799015/
+
+Fixes: b2504a5dbef3 ("net: reduce skb_warn_bad_offload() noise")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 2 +-
+ net/ipv4/udp_offload.c | 2 +-
+ net/ipv6/udp_offload.c | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2551,7 +2551,7 @@ static inline bool skb_needs_check(struc
+ {
+ if (tx_path)
+ return skb->ip_summed != CHECKSUM_PARTIAL &&
+- skb->ip_summed != CHECKSUM_NONE;
++ skb->ip_summed != CHECKSUM_UNNECESSARY;
+
+ return skb->ip_summed == CHECKSUM_NONE;
+ }
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -231,7 +231,7 @@ static struct sk_buff *udp4_ufo_fragment
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
+- skb->ip_summed = CHECKSUM_NONE;
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Fragment the skb. IP headers of the fragments are updated in
+ * inet_gso_segment()
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -86,7 +86,7 @@ static struct sk_buff *udp6_ufo_fragment
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
+- skb->ip_summed = CHECKSUM_NONE;
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Check if there is enough headroom to insert fragment header. */
+ tnl_hlen = skb_tnl_header_len(skb);
--- /dev/null
+From foo@baz Fri Aug 11 09:41:52 PDT 2017
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 2 Aug 2017 23:10:46 -0700
+Subject: net: fix keepalive code vs TCP_FASTOPEN_CONNECT
+
+From: Eric Dumazet <edumazet@google.com>
+
+
+[ Upstream commit 2dda640040876cd8ae646408b69eea40c24f9ae9 ]
+
+syzkaller was able to trigger a divide by 0 in TCP stack [1]
+
+Issue here is that keepalive timer needs to be updated to not attempt
+to send a probe if the connection setup was deferred using
+TCP_FASTOPEN_CONNECT socket option added in linux-4.11
+
+[1]
+ divide error: 0000 [#1] SMP
+ CPU: 18 PID: 0 Comm: swapper/18 Not tainted
+ task: ffff986f62f4b040 ti: ffff986f62fa2000 task.ti: ffff986f62fa2000
+ RIP: 0010:[<ffffffff8409cc0d>] [<ffffffff8409cc0d>] __tcp_select_window+0x8d/0x160
+ Call Trace:
+ <IRQ>
+ [<ffffffff8409d951>] tcp_transmit_skb+0x11/0x20
+ [<ffffffff8409da21>] tcp_xmit_probe_skb+0xc1/0xe0
+ [<ffffffff840a0ee8>] tcp_write_wakeup+0x68/0x160
+ [<ffffffff840a151b>] tcp_keepalive_timer+0x17b/0x230
+ [<ffffffff83b3f799>] call_timer_fn+0x39/0xf0
+ [<ffffffff83b40797>] run_timer_softirq+0x1d7/0x280
+ [<ffffffff83a04ddb>] __do_softirq+0xcb/0x257
+ [<ffffffff83ae03ac>] irq_exit+0x9c/0xb0
+ [<ffffffff83a04c1a>] smp_apic_timer_interrupt+0x6a/0x80
+ [<ffffffff83a03eaf>] apic_timer_interrupt+0x7f/0x90
+ <EOI>
+ [<ffffffff83fed2ea>] ? cpuidle_enter_state+0x13a/0x3b0
+ [<ffffffff83fed2cd>] ? cpuidle_enter_state+0x11d/0x3b0
+
+Tested:
+
+Following packetdrill no longer crashes the kernel
+
+`echo 0 >/proc/sys/net/ipv4/tcp_timestamps`
+
+// Cache warmup: send a Fast Open cookie request
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 fcntl(3, F_SETFL, O_RDWR|O_NONBLOCK) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN_CONNECT, [1], 4) = 0
+ +0 connect(3, ..., ...) = -1 EINPROGRESS (Operation is now in progress)
+ +0 > S 0:0(0) <mss 1460,nop,nop,sackOK,nop,wscale 8,FO,nop,nop>
+ +.01 < S. 123:123(0) ack 1 win 14600 <mss 1460,nop,nop,sackOK,nop,wscale 6,FO abcd1234,nop,nop>
+ +0 > . 1:1(0) ack 1
+ +0 close(3) = 0
+ +0 > F. 1:1(0) ack 1
+ +0 < F. 1:1(0) ack 2 win 92
+ +0 > . 2:2(0) ack 2
+
+ +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 4
+ +0 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0
+ +0 setsockopt(4, SOL_TCP, TCP_FASTOPEN_CONNECT, [1], 4) = 0
+ +0 setsockopt(4, SOL_SOCKET, SO_KEEPALIVE, [1], 4) = 0
+ +.01 connect(4, ..., ...) = 0
+ +0 setsockopt(4, SOL_TCP, TCP_KEEPIDLE, [5], 4) = 0
+ +10 close(4) = 0
+
+`echo 1 >/proc/sys/net/ipv4/tcp_timestamps`
+
+Fixes: 19f6d3f3c842 ("net/tcp-fastopen: Add new API support")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Cc: Wei Wang <weiwan@google.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_timer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -606,7 +606,8 @@ static void tcp_keepalive_timer (unsigne
+ goto death;
+ }
+
+- if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
++ if (!sock_flag(sk, SOCK_KEEPOPEN) ||
++ ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
+ goto out;
+
+ elapsed = keepalive_time_when(tp);
--- /dev/null
+From foo@baz Fri Aug 11 09:41:52 PDT 2017
+From: Xin Long <lucien.xin@gmail.com>
+Date: Wed, 9 Aug 2017 18:15:19 +0800
+Subject: net: sched: set xt_tgchk_param par.nft_compat as 0 in ipt_init_target
+
+From: Xin Long <lucien.xin@gmail.com>
+
+
+[ Upstream commit 96d9703050a0036a3360ec98bb41e107c90664fe ]
+
+Commit 55917a21d0cc ("netfilter: x_tables: add context to know if
+extension runs from nft_compat") introduced a member nft_compat to
+xt_tgchk_param structure.
+
+But it didn't set it's value for ipt_init_target. With unexpected
+value in par.nft_compat, it may return unexpected result in some
+target's checkentry.
+
+This patch is to set all it's fields as 0 and only initialize the
+non-zero fields in ipt_init_target.
+
+v1->v2:
+ As Wang Cong's suggestion, fix it by setting all it's fields as
+ 0 and only initializing the non-zero fields.
+
+Fixes: 55917a21d0cc ("netfilter: x_tables: add context to know if extension runs from nft_compat")
+Suggested-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/act_ipt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -42,8 +42,8 @@ static int ipt_init_target(struct xt_ent
+ return PTR_ERR(target);
+
+ t->u.kernel.target = target;
++ memset(&par, 0, sizeof(par));
+ par.table = table;
+- par.entryinfo = NULL;
+ par.target = target;
+ par.targinfo = t->data;
+ par.hook_mask = hook;
--- /dev/null
+From foo@baz Fri Aug 11 09:41:52 PDT 2017
+From: Willem de Bruijn <willemb@google.com>
+Date: Thu, 10 Aug 2017 12:41:58 -0400
+Subject: packet: fix tp_reserve race in packet_set_ring
+
+From: Willem de Bruijn <willemb@google.com>
+
+
+[ Upstream commit c27927e372f0785f3303e8fad94b85945e2c97b7 ]
+
+Updates to tp_reserve can race with reads of the field in
+packet_set_ring. Avoid this by holding the socket lock during
+updates in setsockopt PACKET_RESERVE.
+
+This bug was discovered by syzkaller.
+
+Fixes: 8913336a7e8d ("packet: add PACKET_RESERVE sockopt")
+Reported-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3622,14 +3622,19 @@ packet_setsockopt(struct socket *sock, i
+
+ if (optlen != sizeof(val))
+ return -EINVAL;
+- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+- return -EBUSY;
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+ if (val > INT_MAX)
+ return -EINVAL;
+- po->tp_reserve = val;
+- return 0;
++ lock_sock(sk);
++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++ ret = -EBUSY;
++ } else {
++ po->tp_reserve = val;
++ ret = 0;
++ }
++ release_sock(sk);
++ return ret;
+ }
+ case PACKET_LOSS:
+ {
--- /dev/null
+From foo@baz Fri Aug 11 09:19:02 PDT 2017
+Date: Fri, 11 Aug 2017 09:19:02 -0700
+To: Greg KH <gregkh@linuxfoundation.org>
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Subject: revert "ipv4: Should use consistent conditional judgement for ip fragment in __ip_append_data and ip_finish_output"
+
+This reverts commit f102bb7164c9020e12662998f0fd99c3be72d4f6 which is
+commit 0a28cfd51e17f4f0a056bcf66bfbe492c3b99f38 upstream as there is
+another patch that needs to be applied instead of this one.
+
+Cc: Zheng Li <james.z.li@ericsson.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv4/ip_output.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -922,7 +922,7 @@ static int __ip_append_data(struct sock
+ csummode = CHECKSUM_PARTIAL;
+
+ cork->length += length;
+- if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
++ if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+ (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
--- /dev/null
+From foo@baz Fri Aug 11 09:14:09 PDT 2017
+Date: Fri, 11 Aug 2017 09:14:09 -0700
+To: Greg KH <gregkh@linuxfoundation.org>
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Subject: revert "net: account for current skb length when deciding about UFO"
+
+This reverts commit ef09c9ff343122a0b245416066992d096416ff19 which is
+commit a5cb659bbc1c8644efa0c3138a757a1e432a4880 upstream as it causes
+merge issues with later patches that are much more important...
+
+Cc: Michal Kubecek <mkubecek@suse.cz>
+Cc: Vlad Yasevich <vyasevic@redhat.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv4/ip_output.c | 3 +--
+ net/ipv6/ip6_output.c | 2 +-
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -922,8 +922,7 @@ static int __ip_append_data(struct sock
+ csummode = CHECKSUM_PARTIAL;
+
+ cork->length += length;
+- if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
+- (skb && skb_is_gso(skb))) &&
++ if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+ (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1357,7 +1357,7 @@ emsgsize:
+ */
+
+ cork->length += length;
+- if ((((length + (skb ? skb->len : headersize)) > mtu) ||
++ if ((((length + fragheaderlen) > mtu) ||
+ (skb && skb_is_gso(skb))) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO) &&
--- /dev/null
+tcp-avoid-setting-cwnd-to-invalid-ssthresh-after-cwnd-reduction-states.patch
+net-fix-keepalive-code-vs-tcp_fastopen_connect.patch
+bpf-s390-fix-jit-branch-offset-related-to-ldimm64.patch
+net-sched-set-xt_tgchk_param-par.nft_compat-as-0-in-ipt_init_target.patch
+tcp-fastopen-tcp_connect-must-refresh-the-route.patch
+net-avoid-skb_warn_bad_offload-false-positives-on-ufo.patch
+packet-fix-tp_reserve-race-in-packet_set_ring.patch
+revert-net-account-for-current-skb-length-when-deciding-about-ufo.patch
+revert-ipv4-should-use-consistent-conditional-judgement-for-ip-fragment-in-__ip_append_data-and-ip_finish_output.patch
+udp-consistently-apply-ufo-or-fragmentation.patch
--- /dev/null
+From foo@baz Fri Aug 11 09:41:52 PDT 2017
+From: Yuchung Cheng <ycheng@google.com>
+Date: Tue, 1 Aug 2017 13:22:32 -0700
+Subject: tcp: avoid setting cwnd to invalid ssthresh after cwnd reduction states
+
+From: Yuchung Cheng <ycheng@google.com>
+
+
+[ Upstream commit ed254971edea92c3ac5c67c6a05247a92aa6075e ]
+
+If the sender switches the congestion control during ECN-triggered
+cwnd-reduction state (CA_CWR), upon exiting recovery cwnd is set to
+the ssthresh value calculated by the previous congestion control. If
+the previous congestion control is BBR that always keep ssthresh
+to TCP_INIFINITE_SSTHRESH, cwnd ends up being infinite. The safe
+step is to avoid assigning invalid ssthresh value when recovery ends.
+
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2503,8 +2503,8 @@ static inline void tcp_end_cwnd_reductio
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
+- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
+- (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
++ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
++ (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
+ tp->snd_cwnd = tp->snd_ssthresh;
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ }
--- /dev/null
+From foo@baz Fri Aug 11 09:41:52 PDT 2017
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 8 Aug 2017 01:41:58 -0700
+Subject: tcp: fastopen: tcp_connect() must refresh the route
+
+From: Eric Dumazet <edumazet@google.com>
+
+
+[ Upstream commit 8ba60924710cde564a3905588b6219741d6356d0 ]
+
+With new TCP_FASTOPEN_CONNECT socket option, there is a possibility
+to call tcp_connect() while socket sk_dst_cache is either NULL
+or invalid.
+
+ +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 4
+ +0 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0
+ +0 setsockopt(4, SOL_TCP, TCP_FASTOPEN_CONNECT, [1], 4) = 0
+ +0 connect(4, ..., ...) = 0
+
+<< sk->sk_dst_cache becomes obsolete, or even set to NULL >>
+
+ +1 sendto(4, ..., 1000, MSG_FASTOPEN, ..., ...) = 1000
+
+We need to refresh the route otherwise bad things can happen,
+especially when syzkaller is running on the host :/
+
+Fixes: 19f6d3f3c8422 ("net/tcp-fastopen: Add new API support")
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Wei Wang <weiwan@google.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Acked-by: Wei Wang <weiwan@google.com>
+Acked-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3256,6 +3256,9 @@ int tcp_connect(struct sock *sk)
+ struct sk_buff *buff;
+ int err;
+
++ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
++ return -EHOSTUNREACH; /* Routing failure or similar. */
++
+ tcp_connect_init(sk);
+
+ if (unlikely(tp->repair)) {
--- /dev/null
+From foo@baz Fri Aug 11 09:41:52 PDT 2017
+From: Willem de Bruijn <willemb@google.com>
+Date: Thu, 10 Aug 2017 12:29:19 -0400
+Subject: udp: consistently apply ufo or fragmentation
+
+From: Willem de Bruijn <willemb@google.com>
+
+
+[ Upstream commit 85f1bd9a7b5a79d5baa8bf44af19658f7bf77bfa ]
+
+When iteratively building a UDP datagram with MSG_MORE and that
+datagram exceeds MTU, consistently choose UFO or fragmentation.
+
+Once skb_is_gso, always apply ufo. Conversely, once a datagram is
+split across multiple skbs, do not consider ufo.
+
+Sendpage already maintains the first invariant, only add the second.
+IPv6 does not have a sendpage implementation to modify.
+
+A gso skb must have a partial checksum, do not follow sk_no_check_tx
+in udp_send_skb.
+
+Found by syzkaller.
+
+Fixes: e89e9cf539a2 ("[IPv4/IPv6]: UFO Scatter-gather approach")
+Reported-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_output.c | 7 +++++--
+ net/ipv4/udp.c | 2 +-
+ net/ipv6/ip6_output.c | 7 ++++---
+ 3 files changed, 10 insertions(+), 6 deletions(-)
+
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -922,10 +922,12 @@ static int __ip_append_data(struct sock
+ csummode = CHECKSUM_PARTIAL;
+
+ cork->length += length;
+- if (((length > mtu) || (skb && skb_is_gso(skb))) &&
++ if ((skb && skb_is_gso(skb)) ||
++ ((length > mtu) &&
++ (skb_queue_len(queue) <= 1) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+- (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
++ (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
+ err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+ hh_len, fragheaderlen, transhdrlen,
+ maxfraglen, flags);
+@@ -1241,6 +1243,7 @@ ssize_t ip_append_page(struct sock *sk,
+ return -EINVAL;
+
+ if ((size + skb->len > mtu) &&
++ (skb_queue_len(&sk->sk_write_queue) == 1) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO)) {
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -819,7 +819,7 @@ static int udp_send_skb(struct sk_buff *
+ if (is_udplite) /* UDP-Lite */
+ csum = udplite_csum(skb);
+
+- else if (sk->sk_no_check_tx) { /* UDP csum disabled */
++ else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */
+
+ skb->ip_summed = CHECKSUM_NONE;
+ goto send;
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1357,11 +1357,12 @@ emsgsize:
+ */
+
+ cork->length += length;
+- if ((((length + fragheaderlen) > mtu) ||
+- (skb && skb_is_gso(skb))) &&
++ if ((skb && skb_is_gso(skb)) ||
++ (((length + fragheaderlen) > mtu) &&
++ (skb_queue_len(queue) <= 1) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO) &&
+- (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
++ (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
+ err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
+ hh_len, fragheaderlen, exthdrlen,
+ transhdrlen, mtu, flags, fl6);