--- /dev/null
+From foo@baz Thu Feb 21 09:43:21 CET 2019
+From: "David S. Miller" <davem@davemloft.net>
+Date: Sat, 16 Feb 2019 13:44:39 -0800
+Subject: net: Add header for usage of fls64()
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 8681ef1f3d295bd3600315325f3b3396d76d02f6 ]
+
+Fixes: 3b89ea9c5902 ("net: Fix for_each_netdev_feature on Big endian")
+Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/netdev_features.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/linux/netdev_features.h
++++ b/include/linux/netdev_features.h
+@@ -11,6 +11,7 @@
+ #define _LINUX_NETDEV_FEATURES_H
+
+ #include <linux/types.h>
++#include <linux/bitops.h>
+ #include <asm/byteorder.h>
+
+ typedef u64 netdev_features_t;
--- /dev/null
+From foo@baz Thu Feb 21 07:26:37 CET 2019
+From: Alexander Duyck <alexander.h.duyck@linux.intel.com>
+Date: Fri, 15 Feb 2019 14:44:18 -0800
+Subject: net: Do not allocate page fragments that are not skb aligned
+
+From: Alexander Duyck <alexander.h.duyck@linux.intel.com>
+
+[ Upstream commit 3bed3cc4156eedf652b4df72bdb35d4f1a2a739d ]
+
+This patch addresses the fact that there are drivers, specifically tun,
+that will call into the network page fragment allocators with buffer sizes
+that are not cache aligned. Doing this could result in data alignment
+and DMA performance issues as these fragment pools are also shared with the
+skb allocator and any other devices that will use napi_alloc_frags or
+netdev_alloc_frags.
+
+Fixes: ffde7328a36d ("net: Split netdev_alloc_frag into __alloc_page_frag and add __napi_alloc_frag")
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -374,6 +374,8 @@ static void *__netdev_alloc_frag(unsigne
+ */
+ void *netdev_alloc_frag(unsigned int fragsz)
+ {
++ fragsz = SKB_DATA_ALIGN(fragsz);
++
+ return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
+ }
+ EXPORT_SYMBOL(netdev_alloc_frag);
+@@ -387,6 +389,8 @@ static void *__napi_alloc_frag(unsigned
+
+ void *napi_alloc_frag(unsigned int fragsz)
+ {
++ fragsz = SKB_DATA_ALIGN(fragsz);
++
+ return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
+ }
+ EXPORT_SYMBOL(napi_alloc_frag);
--- /dev/null
+From foo@baz Thu Feb 21 09:43:21 CET 2019
+From: Hauke Mehrtens <hauke.mehrtens@intel.com>
+Date: Fri, 15 Feb 2019 17:58:54 +0100
+Subject: net: Fix for_each_netdev_feature on Big endian
+
+From: Hauke Mehrtens <hauke.mehrtens@intel.com>
+
+[ Upstream commit 3b89ea9c5902acccdbbdec307c85edd1bf52515e ]
+
+The features attribute is of type u64 and stored in the native endianes on
+the system. The for_each_set_bit() macro takes a pointer to a 32 bit array
+and goes over the bits in this area. On little Endian systems this also
+works with an u64 as the most significant bit is on the highest address,
+but on big endian the words are swapped. When we expect bit 15 here we get
+bit 47 (15 + 32).
+
+This patch converts it more or less to its own for_each_set_bit()
+implementation which works on 64 bit integers directly. This is then
+completely in host endianness and should work like expected.
+
+Fixes: fd867d51f ("net/core: generic support for disabling netdev features down stack")
+Signed-off-by: Hauke Mehrtens <hauke.mehrtens@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/netdev_features.h | 23 +++++++++++++++++++++--
+ net/core/dev.c | 4 ++--
+ 2 files changed, 23 insertions(+), 4 deletions(-)
+
+--- a/include/linux/netdev_features.h
++++ b/include/linux/netdev_features.h
+@@ -11,6 +11,7 @@
+ #define _LINUX_NETDEV_FEATURES_H
+
+ #include <linux/types.h>
++#include <asm/byteorder.h>
+
+ typedef u64 netdev_features_t;
+
+@@ -125,8 +126,26 @@ enum {
+ #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
+ #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
+
+-#define for_each_netdev_feature(mask_addr, bit) \
+- for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
++/* Finds the next feature with the highest number of the range of start till 0.
++ */
++static inline int find_next_netdev_feature(u64 feature, unsigned long start)
++{
++ /* like BITMAP_LAST_WORD_MASK() for u64
++ * this sets the most significant 64 - start to 0.
++ */
++ feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
++
++ return fls64(feature) - 1;
++}
++
++/* This goes for the MSB to the LSB through the set feature bits,
++ * mask_addr should be a u64 and bit an int
++ */
++#define for_each_netdev_feature(mask_addr, bit) \
++ for ((bit) = find_next_netdev_feature((mask_addr), \
++ NETDEV_FEATURE_COUNT); \
++ (bit) >= 0; \
++ (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
+
+ /* Features valid for ethtool to change */
+ /* = all defined minus driver/device-class-related */
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6421,7 +6421,7 @@ static netdev_features_t netdev_sync_upp
+ netdev_features_t feature;
+ int feature_bit;
+
+- for_each_netdev_feature(&upper_disables, feature_bit) {
++ for_each_netdev_feature(upper_disables, feature_bit) {
+ feature = __NETIF_F_BIT(feature_bit);
+ if (!(upper->wanted_features & feature)
+ && (features & feature)) {
+@@ -6441,7 +6441,7 @@ static void netdev_sync_lower_features(s
+ netdev_features_t feature;
+ int feature_bit;
+
+- for_each_netdev_feature(&upper_disables, feature_bit) {
++ for_each_netdev_feature(upper_disables, feature_bit) {
+ feature = __NETIF_F_BIT(feature_bit);
+ if (!(features & feature) && (lower->features & feature)) {
+ netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
--- /dev/null
+From foo@baz Thu Feb 21 08:41:54 CET 2019
+From: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+Date: Wed, 6 Feb 2019 19:18:04 +0100
+Subject: net: ipv4: use a dedicated counter for icmp_v4 redirect packets
+
+From: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+
+[ Upstream commit c09551c6ff7fe16a79a42133bcecba5fc2fc3291 ]
+
+According to the algorithm described in the comment block at the
+beginning of ip_rt_send_redirect, the host should try to send
+'ip_rt_redirect_number' ICMP redirect packets with an exponential
+backoff and then stop sending them at all assuming that the destination
+ignores redirects.
+If the device has previously sent some ICMP error packets that are
+rate-limited (e.g TTL expired) and continues to receive traffic,
+the redirect packets will never be transmitted. This happens since
+peer->rate_tokens will be typically greater than 'ip_rt_redirect_number'
+and so it will never be reset even if the redirect silence timeout
+(ip_rt_redirect_silence) has elapsed without receiving any packet
+requiring redirects.
+
+Fix it by using a dedicated counter for the number of ICMP redirect
+packets that has been sent by the host
+
+I have not been able to identify a given commit that introduced the
+issue since ip_rt_send_redirect implements the same rate-limiting
+algorithm from commit 1da177e4c3f4 ("Linux-2.6.12-rc2")
+
+Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inetpeer.h | 1 +
+ net/ipv4/inetpeer.c | 1 +
+ net/ipv4/route.c | 7 +++++--
+ 3 files changed, 7 insertions(+), 2 deletions(-)
+
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -40,6 +40,7 @@ struct inet_peer {
+
+ u32 metrics[RTAX_MAX];
+ u32 rate_tokens; /* rate limiting for ICMP */
++ u32 n_redirects;
+ unsigned long rate_last;
+ union {
+ struct list_head gc_list;
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -448,6 +448,7 @@ relookup:
+ atomic_set(&p->rid, 0);
+ p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+ p->rate_tokens = 0;
++ p->n_redirects = 0;
+ /* 60*HZ is arbitrary, but chosen enough high so that the first
+ * calculation of tokens is at its maximum.
+ */
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -876,13 +876,15 @@ void ip_rt_send_redirect(struct sk_buff
+ /* No redirected packets during ip_rt_redirect_silence;
+ * reset the algorithm.
+ */
+- if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
++ if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
+ peer->rate_tokens = 0;
++ peer->n_redirects = 0;
++ }
+
+ /* Too many ignored redirects; do not send anything
+ * set dst.rate_last to the last seen redirected packet.
+ */
+- if (peer->rate_tokens >= ip_rt_redirect_number) {
++ if (peer->n_redirects >= ip_rt_redirect_number) {
+ peer->rate_last = jiffies;
+ goto out_put_peer;
+ }
+@@ -899,6 +901,7 @@ void ip_rt_send_redirect(struct sk_buff
+ icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
+ peer->rate_last = jiffies;
+ ++peer->rate_tokens;
++ ++peer->n_redirects;
+ #ifdef CONFIG_IP_ROUTE_VERBOSE
+ if (log_martians &&
+ peer->rate_tokens == ip_rt_redirect_number)
--- /dev/null
+From foo@baz Thu Feb 21 08:41:54 CET 2019
+From: Jose Abreu <jose.abreu@synopsys.com>
+Date: Mon, 18 Feb 2019 14:35:03 +0100
+Subject: net: stmmac: Fix a race in EEE enable callback
+
+From: Jose Abreu <jose.abreu@synopsys.com>
+
+[ Upstream commit 8a7493e58ad688eb23b81e45461c5d314f4402f1 ]
+
+We are saving the status of EEE even before we try to enable it. This
+leads to a race with XMIT function that tries to arm EEE timer before we
+set it up.
+
+Fix this by only saving the EEE parameters after all operations are
+performed with success.
+
+Signed-off-by: Jose Abreu <joabreu@synopsys.com>
+Fixes: d765955d2ae0 ("stmmac: add the Energy Efficient Ethernet support")
+Cc: Joao Pinto <jpinto@synopsys.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+Cc: Alexandre Torgue <alexandre.torgue@st.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 22 ++++++++++---------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -614,25 +614,27 @@ static int stmmac_ethtool_op_set_eee(str
+ struct ethtool_eee *edata)
+ {
+ struct stmmac_priv *priv = netdev_priv(dev);
++ int ret;
+
+- priv->eee_enabled = edata->eee_enabled;
+-
+- if (!priv->eee_enabled)
++ if (!edata->eee_enabled) {
+ stmmac_disable_eee_mode(priv);
+- else {
++ } else {
+ /* We are asking for enabling the EEE but it is safe
+ * to verify all by invoking the eee_init function.
+ * In case of failure it will return an error.
+ */
+- priv->eee_enabled = stmmac_eee_init(priv);
+- if (!priv->eee_enabled)
++ edata->eee_enabled = stmmac_eee_init(priv);
++ if (!edata->eee_enabled)
+ return -EOPNOTSUPP;
+-
+- /* Do not change tx_lpi_timer in case of failure */
+- priv->tx_lpi_timer = edata->tx_lpi_timer;
+ }
+
+- return phy_ethtool_set_eee(priv->phydev, edata);
++ ret = phy_ethtool_set_eee(dev->phydev, edata);
++ if (ret)
++ return ret;
++
++ priv->eee_enabled = edata->eee_enabled;
++ priv->tx_lpi_timer = edata->tx_lpi_timer;
++ return 0;
+ }
+
+ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
net-fix-ipv6-prefix-route-residue.patch
vsock-cope-with-memory-allocation-failure-at-socket-.patch
hwmon-lm80-fix-missing-unlock-on-error-in-set_fan_di.patch
+net-fix-for_each_netdev_feature-on-big-endian.patch
+sky2-increase-d3-delay-again.patch
+net-add-header-for-usage-of-fls64.patch
+tcp-tcp_v4_err-should-be-more-careful.patch
+net-do-not-allocate-page-fragments-that-are-not-skb-aligned.patch
+tcp-clear-icsk_backoff-in-tcp_write_queue_purge.patch
+vxlan-test-dev-flags-iff_up-before-calling-netif_rx.patch
+net-stmmac-fix-a-race-in-eee-enable-callback.patch
+net-ipv4-use-a-dedicated-counter-for-icmp_v4-redirect-packets.patch
--- /dev/null
+From foo@baz Thu Feb 21 09:43:21 CET 2019
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Tue, 19 Feb 2019 23:45:29 +0800
+Subject: sky2: Increase D3 delay again
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+[ Upstream commit 1765f5dcd00963e33f1b8a4e0f34061fbc0e2f7f ]
+
+Another platform requires even longer delay to make the device work
+correctly after S3.
+
+So increase the delay to 300ms.
+
+BugLink: https://bugs.launchpad.net/bugs/1798921
+
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/sky2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -5079,7 +5079,7 @@ static int sky2_probe(struct pci_dev *pd
+ INIT_WORK(&hw->restart_work, sky2_restart);
+
+ pci_set_drvdata(pdev, hw);
+- pdev->d3_delay = 200;
++ pdev->d3_delay = 300;
+
+ return 0;
+
--- /dev/null
+From foo@baz Thu Feb 21 07:26:37 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 15 Feb 2019 13:36:20 -0800
+Subject: tcp: clear icsk_backoff in tcp_write_queue_purge()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 04c03114be82194d4a4858d41dba8e286ad1787c ]
+
+soukjin bae reported a crash in tcp_v4_err() handling
+ICMP_DEST_UNREACH after tcp_write_queue_head(sk)
+returned a NULL pointer.
+
+Current logic should have prevented this :
+
+ if (seq != tp->snd_una || !icsk->icsk_retransmits ||
+ !icsk->icsk_backoff || fastopen)
+ break;
+
+Problem is the write queue might have been purged
+and icsk_backoff has not been cleared.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: soukjin bae <soukjin.bae@samsung.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tcp.h | 1 +
+ net/ipv4/tcp.c | 1 -
+ 2 files changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1457,6 +1457,7 @@ static inline void tcp_write_queue_purge
+ sk_wmem_free_skb(sk, skb);
+ sk_mem_reclaim(sk);
+ tcp_clear_all_retrans_hints(tcp_sk(sk));
++ inet_csk(sk)->icsk_backoff = 0;
+ }
+
+ static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2253,7 +2253,6 @@ int tcp_disconnect(struct sock *sk, int
+ tp->write_seq += tp->max_window + 2;
+ if (tp->write_seq == 0)
+ tp->write_seq = 1;
+- icsk->icsk_backoff = 0;
+ tp->snd_cwnd = 2;
+ icsk->icsk_probes_out = 0;
+ tp->packets_out = 0;
--- /dev/null
+From foo@baz Thu Feb 21 08:41:54 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 15 Feb 2019 13:36:21 -0800
+Subject: tcp: tcp_v4_err() should be more careful
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 2c4cc9712364c051b1de2d175d5fbea6be948ebf ]
+
+ICMP handlers are not very often stressed, we should
+make them more resilient to bugs that might surface in
+the future.
+
+If there is no packet in retransmit queue, we should
+avoid a NULL deref.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: soukjin bae <soukjin.bae@samsung.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_ipv4.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -466,14 +466,15 @@ void tcp_v4_err(struct sk_buff *icmp_skb
+ if (sock_owned_by_user(sk))
+ break;
+
++ skb = tcp_write_queue_head(sk);
++ if (WARN_ON_ONCE(!skb))
++ break;
++
+ icsk->icsk_backoff--;
+ icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
+ TCP_TIMEOUT_INIT;
+ icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
+
+- skb = tcp_write_queue_head(sk);
+- BUG_ON(!skb);
+-
+ remaining = icsk->icsk_rto -
+ min(icsk->icsk_rto,
+ tcp_time_stamp - tcp_skb_timestamp(skb));
--- /dev/null
+From foo@baz Thu Feb 21 08:41:54 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 7 Feb 2019 12:27:38 -0800
+Subject: vxlan: test dev->flags & IFF_UP before calling netif_rx()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 4179cb5a4c924cd233eaadd081882425bc98f44e ]
+
+netif_rx() must be called under a strict contract.
+
+At device dismantle phase, core networking clears IFF_UP
+and flush_all_backlogs() is called after rcu grace period
+to make sure no incoming packet might be in a cpu backlog
+and still referencing the device.
+
+Most drivers call netif_rx() from their interrupt handler,
+and since the interrupts are disabled at device dismantle,
+netif_rx() does not have to check dev->flags & IFF_UP
+
+Virtual drivers do not have this guarantee, and must
+therefore make the check themselves.
+
+Otherwise we risk use-after-free and/or crashes.
+
+Note this patch also fixes a small issue that came
+with commit ce6502a8f957 ("vxlan: fix a use after free
+in vxlan_encap_bypass"), since the dev->stats.rx_dropped
+change was done on the wrong device.
+
+Fixes: d342894c5d2f ("vxlan: virtual extensible lan")
+Fixes: ce6502a8f957 ("vxlan: fix a use after free in vxlan_encap_bypass")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Petr Machata <petrm@mellanox.com>
+Cc: Ido Schimmel <idosch@mellanox.com>
+Cc: Roopa Prabhu <roopa@cumulusnetworks.com>
+Cc: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/vxlan.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1881,7 +1881,7 @@ static void vxlan_encap_bypass(struct sk
+ struct pcpu_sw_netstats *tx_stats, *rx_stats;
+ union vxlan_addr loopback;
+ union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
+- struct net_device *dev = skb->dev;
++ struct net_device *dev;
+ int len = skb->len;
+
+ tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
+@@ -1901,8 +1901,15 @@ static void vxlan_encap_bypass(struct sk
+ #endif
+ }
+
++ rcu_read_lock();
++ dev = skb->dev;
++ if (unlikely(!(dev->flags & IFF_UP))) {
++ kfree_skb(skb);
++ goto drop;
++ }
++
+ if (dst_vxlan->flags & VXLAN_F_LEARN)
+- vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
++ vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source);
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->tx_packets++;
+@@ -1915,8 +1922,10 @@ static void vxlan_encap_bypass(struct sk
+ rx_stats->rx_bytes += len;
+ u64_stats_update_end(&rx_stats->syncp);
+ } else {
++drop:
+ dev->stats.rx_dropped++;
+ }
++ rcu_read_unlock();
+ }
+
+ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,