--- /dev/null
+From foo@baz Fri Aug 3 21:30:27 CEST 2018
+From: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+Date: Fri, 27 Jul 2018 18:15:46 +0200
+Subject: ipv4: remove BUG_ON() from fib_compute_spec_dst
+
+From: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+
+[ Upstream commit 9fc12023d6f51551d6ca9ed7e02ecc19d79caf17 ]
+
+Remove BUG_ON() from fib_compute_spec_dst routine and check
+in_dev pointer during flowi4 data structure initialization.
+fib_compute_spec_dst routine can be run concurrently with device removal
+where ip_ptr net_device pointer is set to NULL. This can happen
+if userspace enables pkt info on UDP rx socket and the device
+is removed while traffic is flowing
+
+Fixes: 35ebf65e851c ("ipv4: Create and use fib_compute_spec_dst() helper")
+Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/fib_frontend.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -209,19 +209,19 @@ __be32 fib_compute_spec_dst(struct sk_bu
+ return ip_hdr(skb)->daddr;
+
+ in_dev = __in_dev_get_rcu(dev);
+- BUG_ON(!in_dev);
+
+ net = dev_net(dev);
+
+ scope = RT_SCOPE_UNIVERSE;
+ if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
++ bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev);
+ fl4.flowi4_oif = 0;
+ fl4.flowi4_iif = LOOPBACK_IFINDEX;
+ fl4.daddr = ip_hdr(skb)->saddr;
+ fl4.saddr = 0;
+ fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
+ fl4.flowi4_scope = scope;
+- fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
++ fl4.flowi4_mark = vmark ? skb->mark : 0,
+ if (!fib_lookup(net, &fl4, &res))
+ return FIB_RES_PREFSRC(net, res);
+ } else {
--- /dev/null
+From foo@baz Fri Aug 3 21:30:27 CEST 2018
+From: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+Date: Thu, 26 Jul 2018 15:05:37 +0300
+Subject: NET: stmmac: align DMA stuff to largest cache line length
+
+From: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+
+[ Upstream commit 9939a46d90c6c76f4533d534dbadfa7b39dc6acc ]
+
+As for today STMMAC_ALIGN macro (which is used to align DMA stuff)
+relies on L1 line length (L1_CACHE_BYTES).
+This isn't correct in case of system with several cache levels
+which might have L1 cache line length smaller than L2 line. This
+can lead to sharing one cache line between DMA buffer and other
+data, so we can lose this data while invalidate DMA buffer before
+DMA transaction.
+
+Fix that by using SMP_CACHE_BYTES instead of L1_CACHE_BYTES for
+aligning.
+
+Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -53,7 +53,7 @@
+ #include "stmmac.h"
+ #include <linux/reset.h>
+
+-#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
++#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
+
+ /* Module parameters */
+ #define TX_TIMEO 5000
media-si470x-fix-__be16-annotations.patch
random-mix-rdrand-with-entropy-sent-in-from-userspace.patch
squashfs-be-more-careful-about-metadata-corruption.patch
+net-stmmac-align-dma-stuff-to-largest-cache-line-length.patch
+xen-netfront-wait-xenbus-state-change-when-load-module-manually.patch
+tcp-do-not-force-quickack-when-receiving-out-of-order-packets.patch
+tcp-add-max_quickacks-param-to-tcp_incr_quickack-and-tcp_enter_quickack_mode.patch
+tcp-do-not-aggressively-quick-ack-after-ecn-events.patch
+tcp-refactor-tcp_ecn_check_ce-to-remove-sk-type-cast.patch
+tcp-add-one-more-quick-ack-after-after-ecn-events.patch
+ipv4-remove-bug_on-from-fib_compute_spec_dst.patch
--- /dev/null
+From foo@baz Fri Aug 3 21:30:27 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 21 May 2018 15:08:56 -0700
+Subject: tcp: add max_quickacks param to tcp_incr_quickack and tcp_enter_quickack_mode
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 9a9c9b51e54618861420093ae6e9b50a961914c5 ]
+
+We want to add finer control of the number of ACK packets sent after
+ECN events.
+
+This patch is not changing current behavior, it only enables following
+change.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tcp.h | 2 +-
+ net/ipv4/tcp_dctcp.c | 4 ++--
+ net/ipv4/tcp_input.c | 24 +++++++++++++-----------
+ 3 files changed, 16 insertions(+), 14 deletions(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -372,7 +372,7 @@ ssize_t tcp_splice_read(struct socket *s
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+
+-void tcp_enter_quickack_mode(struct sock *sk);
++void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
+ static inline void tcp_dec_quickack_mode(struct sock *sk,
+ const unsigned int pkts)
+ {
+--- a/net/ipv4/tcp_dctcp.c
++++ b/net/ipv4/tcp_dctcp.c
+@@ -135,7 +135,7 @@ static void dctcp_ce_state_0_to_1(struct
+ */
+ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+ __tcp_send_ack(sk, ca->prior_rcv_nxt);
+- tcp_enter_quickack_mode(sk);
++ tcp_enter_quickack_mode(sk, 1);
+ }
+
+ ca->prior_rcv_nxt = tp->rcv_nxt;
+@@ -156,7 +156,7 @@ static void dctcp_ce_state_1_to_0(struct
+ */
+ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+ __tcp_send_ack(sk, ca->prior_rcv_nxt);
+- tcp_enter_quickack_mode(sk);
++ tcp_enter_quickack_mode(sk, 1);
+ }
+
+ ca->prior_rcv_nxt = tp->rcv_nxt;
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -171,21 +171,23 @@ static void tcp_measure_rcv_mss(struct s
+ }
+ }
+
+-static void tcp_incr_quickack(struct sock *sk)
++static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
+ {
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
+
+ if (quickacks == 0)
+ quickacks = 2;
++ quickacks = min(quickacks, max_quickacks);
+ if (quickacks > icsk->icsk_ack.quick)
+- icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
++ icsk->icsk_ack.quick = quickacks;
+ }
+
+-void tcp_enter_quickack_mode(struct sock *sk)
++void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
+ {
+ struct inet_connection_sock *icsk = inet_csk(sk);
+- tcp_incr_quickack(sk);
++
++ tcp_incr_quickack(sk, max_quickacks);
+ icsk->icsk_ack.pingpong = 0;
+ icsk->icsk_ack.ato = TCP_ATO_MIN;
+ }
+@@ -228,7 +230,7 @@ static void __tcp_ecn_check_ce(struct tc
+ * it is probably a retransmit.
+ */
+ if (tp->ecn_flags & TCP_ECN_SEEN)
+- tcp_enter_quickack_mode((struct sock *)tp);
++ tcp_enter_quickack_mode((struct sock *)tp, TCP_MAX_QUICKACKS);
+ break;
+ case INET_ECN_CE:
+ if (tcp_ca_needs_ecn((struct sock *)tp))
+@@ -236,7 +238,7 @@ static void __tcp_ecn_check_ce(struct tc
+
+ if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
+ /* Better not delay acks, sender can have a very low cwnd */
+- tcp_enter_quickack_mode((struct sock *)tp);
++ tcp_enter_quickack_mode((struct sock *)tp, TCP_MAX_QUICKACKS);
+ tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ }
+ tp->ecn_flags |= TCP_ECN_SEEN;
+@@ -644,7 +646,7 @@ static void tcp_event_data_recv(struct s
+ /* The _first_ data packet received, initialize
+ * delayed ACK engine.
+ */
+- tcp_incr_quickack(sk);
++ tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
+ icsk->icsk_ack.ato = TCP_ATO_MIN;
+ } else {
+ int m = now - icsk->icsk_ack.lrcvtime;
+@@ -660,7 +662,7 @@ static void tcp_event_data_recv(struct s
+ /* Too long gap. Apparently sender failed to
+ * restart window, so that we send ACKs quickly.
+ */
+- tcp_incr_quickack(sk);
++ tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
+ sk_mem_reclaim(sk);
+ }
+ }
+@@ -4001,7 +4003,7 @@ static void tcp_send_dupack(struct sock
+ if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
+ before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
+- tcp_enter_quickack_mode(sk);
++ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
+
+ if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
+ u32 end_seq = TCP_SKB_CB(skb)->end_seq;
+@@ -4501,7 +4503,7 @@ queue_and_out:
+ tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
+
+ out_of_window:
+- tcp_enter_quickack_mode(sk);
++ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
+ inet_csk_schedule_ack(sk);
+ drop:
+ __kfree_skb(skb);
+@@ -5521,7 +5523,7 @@ static int tcp_rcv_synsent_state_process
+ * to stand against the temptation 8) --ANK
+ */
+ inet_csk_schedule_ack(sk);
+- tcp_enter_quickack_mode(sk);
++ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+ TCP_DELACK_MAX, TCP_RTO_MAX);
+
--- /dev/null
+From foo@baz Fri Aug 3 21:30:27 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 27 Jun 2018 08:47:21 -0700
+Subject: tcp: add one more quick ack after after ECN events
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 15ecbe94a45ef88491ca459b26efdd02f91edb6d ]
+
+Larry Brakmo proposal ( https://patchwork.ozlabs.org/patch/935233/
+tcp: force cwnd at least 2 in tcp_cwnd_reduction) made us rethink
+about our recent patch removing ~16 quick acks after ECN events.
+
+tcp_enter_quickack_mode(sk, 1) makes sure one immediate ack is sent,
+but in the case the sender cwnd was lowered to 1, we do not want
+to have a delayed ack for the next packet we will receive.
+
+Fixes: 522040ea5fdd ("tcp: do not aggressively quick ack after ECN events")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Neal Cardwell <ncardwell@google.com>
+Cc: Lawrence Brakmo <brakmo@fb.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -232,7 +232,7 @@ static void __tcp_ecn_check_ce(struct so
+ * it is probably a retransmit.
+ */
+ if (tp->ecn_flags & TCP_ECN_SEEN)
+- tcp_enter_quickack_mode(sk, 1);
++ tcp_enter_quickack_mode(sk, 2);
+ break;
+ case INET_ECN_CE:
+ if (tcp_ca_needs_ecn(sk))
+@@ -240,7 +240,7 @@ static void __tcp_ecn_check_ce(struct so
+
+ if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
+ /* Better not delay acks, sender can have a very low cwnd */
+- tcp_enter_quickack_mode(sk, 1);
++ tcp_enter_quickack_mode(sk, 2);
+ tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ }
+ tp->ecn_flags |= TCP_ECN_SEEN;
--- /dev/null
+From foo@baz Fri Aug 3 21:30:27 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 21 May 2018 15:08:57 -0700
+Subject: tcp: do not aggressively quick ack after ECN events
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 522040ea5fdd1c33bbf75e1d7c7c0422b96a94ef ]
+
+ECN signals currently forces TCP to enter quickack mode for
+up to 16 (TCP_MAX_QUICKACKS) following incoming packets.
+
+We believe this is not needed, and only sending one immediate ack
+for the current packet should be enough.
+
+This should reduce the extra load noticed in DCTCP environments,
+after congestion events.
+
+This is part 2 of our effort to reduce pure ACK packets.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
+Acked-by: Yuchung Cheng <ycheng@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -230,7 +230,7 @@ static void __tcp_ecn_check_ce(struct tc
+ * it is probably a retransmit.
+ */
+ if (tp->ecn_flags & TCP_ECN_SEEN)
+- tcp_enter_quickack_mode((struct sock *)tp, TCP_MAX_QUICKACKS);
++ tcp_enter_quickack_mode((struct sock *)tp, 1);
+ break;
+ case INET_ECN_CE:
+ if (tcp_ca_needs_ecn((struct sock *)tp))
+@@ -238,7 +238,7 @@ static void __tcp_ecn_check_ce(struct tc
+
+ if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
+ /* Better not delay acks, sender can have a very low cwnd */
+- tcp_enter_quickack_mode((struct sock *)tp, TCP_MAX_QUICKACKS);
++ tcp_enter_quickack_mode((struct sock *)tp, 1);
+ tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ }
+ tp->ecn_flags |= TCP_ECN_SEEN;
--- /dev/null
+From foo@baz Fri Aug 3 21:30:27 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 17 May 2018 14:47:25 -0700
+Subject: tcp: do not force quickack when receiving out-of-order packets
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a3893637e1eb0ef5eb1bbc52b3a8d2dfa317a35d ]
+
+As explained in commit 9f9843a751d0 ("tcp: properly handle stretch
+acks in slow start"), TCP stacks have to consider how many packets
+are acknowledged in one single ACK, because of GRO, but also
+because of ACK compression or losses.
+
+We plan to add SACK compression in the following patch, we
+must therefore not call tcp_enter_quickack_mode()
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4512,8 +4512,6 @@ drop:
+ if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
+ goto out_of_window;
+
+- tcp_enter_quickack_mode(sk);
+-
+ if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
+ /* Partial packet, seq < rcv_next < end_seq */
+ SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
--- /dev/null
+From foo@baz Fri Aug 3 21:30:27 CEST 2018
+From: Yousuk Seung <ysseung@google.com>
+Date: Mon, 4 Jun 2018 15:29:51 -0700
+Subject: tcp: refactor tcp_ecn_check_ce to remove sk type cast
+
+From: Yousuk Seung <ysseung@google.com>
+
+[ Upstream commit f4c9f85f3b2cb7669830cd04d0be61192a4d2436 ]
+
+Refactor tcp_ecn_check_ce and __tcp_ecn_check_ce to accept struct sock*
+instead of tcp_sock* to clean up type casts. This is a pure refactor
+patch.
+
+Signed-off-by: Yousuk Seung <ysseung@google.com>
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 26 ++++++++++++++------------
+ 1 file changed, 14 insertions(+), 12 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -221,8 +221,10 @@ static void tcp_ecn_withdraw_cwr(struct
+ tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+ }
+
+-static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
++static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
+ {
++ struct tcp_sock *tp = tcp_sk(sk);
++
+ switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
+ case INET_ECN_NOT_ECT:
+ /* Funny extension: if ECT is not set on a segment,
+@@ -230,31 +232,31 @@ static void __tcp_ecn_check_ce(struct tc
+ * it is probably a retransmit.
+ */
+ if (tp->ecn_flags & TCP_ECN_SEEN)
+- tcp_enter_quickack_mode((struct sock *)tp, 1);
++ tcp_enter_quickack_mode(sk, 1);
+ break;
+ case INET_ECN_CE:
+- if (tcp_ca_needs_ecn((struct sock *)tp))
+- tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE);
++ if (tcp_ca_needs_ecn(sk))
++ tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
+
+ if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
+ /* Better not delay acks, sender can have a very low cwnd */
+- tcp_enter_quickack_mode((struct sock *)tp, 1);
++ tcp_enter_quickack_mode(sk, 1);
+ tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ }
+ tp->ecn_flags |= TCP_ECN_SEEN;
+ break;
+ default:
+- if (tcp_ca_needs_ecn((struct sock *)tp))
+- tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE);
++ if (tcp_ca_needs_ecn(sk))
++ tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
+ tp->ecn_flags |= TCP_ECN_SEEN;
+ break;
+ }
+ }
+
+-static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
++static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
+ {
+- if (tp->ecn_flags & TCP_ECN_OK)
+- __tcp_ecn_check_ce(tp, skb);
++ if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
++ __tcp_ecn_check_ce(sk, skb);
+ }
+
+ static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
+@@ -668,7 +670,7 @@ static void tcp_event_data_recv(struct s
+ }
+ icsk->icsk_ack.lrcvtime = now;
+
+- tcp_ecn_check_ce(tp, skb);
++ tcp_ecn_check_ce(sk, skb);
+
+ if (skb->len >= 128)
+ tcp_grow_window(sk, skb);
+@@ -4231,7 +4233,7 @@ static void tcp_data_queue_ofo(struct so
+ struct sk_buff *skb1;
+ u32 seq, end_seq;
+
+- tcp_ecn_check_ce(tp, skb);
++ tcp_ecn_check_ce(sk, skb);
+
+ if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
--- /dev/null
+From foo@baz Fri Aug 3 21:30:27 CEST 2018
+From: Xiao Liang <xiliang@redhat.com>
+Date: Fri, 27 Jul 2018 17:56:08 +0800
+Subject: xen-netfront: wait xenbus state change when load module manually
+
+From: Xiao Liang <xiliang@redhat.com>
+
+[ Upstream commit 822fb18a82abaf4ee7058793d95d340f5dab7bfc ]
+
+When loading module manually, after call xenbus_switch_state to initializes
+the state of the netfront device, the driver state did not change so fast
+that may lead no dev created in latest kernel. This patch adds wait to make
+sure xenbus knows the driver is not in closed/unknown state.
+
+Current state:
+[vm]# ethtool eth0
+Settings for eth0:
+ Link detected: yes
+[vm]# modprobe -r xen_netfront
+[vm]# modprobe xen_netfront
+[vm]# ethtool eth0
+Settings for eth0:
+Cannot get device settings: No such device
+Cannot get wake-on-lan settings: No such device
+Cannot get message level: No such device
+Cannot get link status: No such device
+No data available
+
+With the patch installed.
+[vm]# ethtool eth0
+Settings for eth0:
+ Link detected: yes
+[vm]# modprobe -r xen_netfront
+[vm]# modprobe xen_netfront
+[vm]# ethtool eth0
+Settings for eth0:
+ Link detected: yes
+
+Signed-off-by: Xiao Liang <xiliang@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netfront.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -85,6 +85,7 @@ struct netfront_cb {
+ /* IRQ name is queue name with "-tx" or "-rx" appended */
+ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+
++static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
+ static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+
+ struct netfront_stats {
+@@ -1359,6 +1360,11 @@ static struct net_device *xennet_create_
+ netif_carrier_off(netdev);
+
+ xenbus_switch_state(dev, XenbusStateInitialising);
++ wait_event(module_load_q,
++ xenbus_read_driver_state(dev->otherend) !=
++ XenbusStateClosed &&
++ xenbus_read_driver_state(dev->otherend) !=
++ XenbusStateUnknown);
+ return netdev;
+
+ exit:
--- /dev/null
+ipv4-remove-bug_on-from-fib_compute_spec_dst.patch
+net-ena-fix-use-of-uninitialized-dma-address-bits-field.patch
+net-fix-amd-xgbe-flow-control-issue.patch
+net-lan78xx-fix-rx-handling-before-first-packet-is-send.patch
+net-mdio-mux-bcm-iproc-fix-wrong-getter-and-setter-pair.patch
+net-stmmac-align-dma-stuff-to-largest-cache-line-length.patch
+tcp_bbr-fix-bw-probing-to-raise-in-flight-data-for-very-small-bdps.patch
+xen-netfront-wait-xenbus-state-change-when-load-module-manually.patch
+netlink-do-not-subscribe-to-non-existent-groups.patch
+netlink-don-t-shift-with-ub-on-nlk-ngroups.patch
+tcp-do-not-force-quickack-when-receiving-out-of-order-packets.patch
+tcp-add-max_quickacks-param-to-tcp_incr_quickack-and-tcp_enter_quickack_mode.patch
+tcp-do-not-aggressively-quick-ack-after-ecn-events.patch
+tcp-refactor-tcp_ecn_check_ce-to-remove-sk-type-cast.patch
+tcp-add-one-more-quick-ack-after-after-ecn-events.patch