--- /dev/null
+From foo@baz Tue Apr 21 23:10:35 CEST 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 14 Apr 2015 18:45:00 -0700
+Subject: bnx2x: Fix busy_poll vs netpoll
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 074975d0374333f656c48487aa046a21a9b9d7a1 ]
+
+Commit 9a2620c877454 ("bnx2x: prevent WARN during driver unload")
+switched the napi/busy_lock locking mechanism from spin_lock() into
+spin_lock_bh(), breaking inter-operability with netconsole, as netpoll
+disables interrupts prior to calling our napi mechanism.
+
+This switches the driver into using atomic assignments instead of the
+spinlock mechanisms previously employed.
+
+Based on initial patch from Yuval Mintz & Ariel Elior
+
+I basically added softirq starvation avoidance, and mixture
+of atomic operations, plain writes and barriers.
+
+Note this slightly reduces the overhead for this driver when no
+busy_poll sockets are in use.
+
+Fixes: 9a2620c877454 ("bnx2x: prevent WARN during driver unload")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 135 ++++++++----------------
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 9 -
+ 2 files changed, 55 insertions(+), 89 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -531,20 +531,8 @@ struct bnx2x_fastpath {
+ struct napi_struct napi;
+
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+- unsigned int state;
+-#define BNX2X_FP_STATE_IDLE 0
+-#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
+-#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
+-#define BNX2X_FP_STATE_DISABLED (1 << 2)
+-#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
+-#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
+-#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
+-#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
+-#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
+-#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
+- /* protect state */
+- spinlock_t lock;
+-#endif /* CONFIG_NET_RX_BUSY_POLL */
++ unsigned long busy_poll_state;
++#endif
+
+ union host_hc_status_block status_blk;
+ /* chip independent shortcuts into sb structure */
+@@ -619,104 +607,83 @@ struct bnx2x_fastpath {
+ #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
+
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+-static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
++
++enum bnx2x_fp_state {
++ BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */
++
++ BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
++ BNX2X_STATE_FP_NAPI_REQ = BIT(1),
++
++ BNX2X_STATE_FP_POLL_BIT = 2,
++ BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */
++
++ BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
++};
++
++static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
+ {
+- spin_lock_init(&fp->lock);
+- fp->state = BNX2X_FP_STATE_IDLE;
++ WRITE_ONCE(fp->busy_poll_state, 0);
+ }
+
+ /* called from the device poll routine to get ownership of a FP */
+ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+ {
+- bool rc = true;
++ unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
+
+- spin_lock_bh(&fp->lock);
+- if (fp->state & BNX2X_FP_LOCKED) {
+- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
+- fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
+- rc = false;
+- } else {
+- /* we don't care if someone yielded */
+- fp->state = BNX2X_FP_STATE_NAPI;
++ while (1) {
++ switch (old) {
++ case BNX2X_STATE_FP_POLL:
++ /* make sure bnx2x_fp_lock_poll() wont starve us */
++ set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
++ &fp->busy_poll_state);
++ /* fallthrough */
++ case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
++ return false;
++ default:
++ break;
++ }
++ prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
++ if (unlikely(prev != old)) {
++ old = prev;
++ continue;
++ }
++ return true;
+ }
+- spin_unlock_bh(&fp->lock);
+- return rc;
+ }
+
+-/* returns true is someone tried to get the FP while napi had it */
+-static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+ {
+- bool rc = false;
+-
+- spin_lock_bh(&fp->lock);
+- WARN_ON(fp->state &
+- (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
+-
+- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
+- rc = true;
+-
+- /* state ==> idle, unless currently disabled */
+- fp->state &= BNX2X_FP_STATE_DISABLED;
+- spin_unlock_bh(&fp->lock);
+- return rc;
++ smp_wmb();
++ fp->busy_poll_state = 0;
+ }
+
+ /* called from bnx2x_low_latency_poll() */
+ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+ {
+- bool rc = true;
+-
+- spin_lock_bh(&fp->lock);
+- if ((fp->state & BNX2X_FP_LOCKED)) {
+- fp->state |= BNX2X_FP_STATE_POLL_YIELD;
+- rc = false;
+- } else {
+- /* preserve yield marks */
+- fp->state |= BNX2X_FP_STATE_POLL;
+- }
+- spin_unlock_bh(&fp->lock);
+- return rc;
++ return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
+ }
+
+-/* returns true if someone tried to get the FP while it was locked */
+-static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+ {
+- bool rc = false;
+-
+- spin_lock_bh(&fp->lock);
+- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
+-
+- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
+- rc = true;
+-
+- /* state ==> idle, unless currently disabled */
+- fp->state &= BNX2X_FP_STATE_DISABLED;
+- spin_unlock_bh(&fp->lock);
+- return rc;
++ smp_mb__before_atomic();
++ clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
+ }
+
+-/* true if a socket is polling, even if it did not get the lock */
++/* true if a socket is polling */
+ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+ {
+- WARN_ON(!(fp->state & BNX2X_FP_OWNED));
+- return fp->state & BNX2X_FP_USER_PEND;
++ return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
+ }
+
+ /* false if fp is currently owned */
+ static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+ {
+- int rc = true;
+-
+- spin_lock_bh(&fp->lock);
+- if (fp->state & BNX2X_FP_OWNED)
+- rc = false;
+- fp->state |= BNX2X_FP_STATE_DISABLED;
+- spin_unlock_bh(&fp->lock);
++ set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
++ return !bnx2x_fp_ll_polling(fp);
+
+- return rc;
+ }
+ #else
+-static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
+ {
+ }
+
+@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(st
+ return true;
+ }
+
+-static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+ {
+- return false;
+ }
+
+ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(st
+ return false;
+ }
+
+-static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+ {
+- return false;
+ }
+
+ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struc
+ int i;
+
+ for_each_rx_queue_cnic(bp, i) {
+- bnx2x_fp_init_lock(&bp->fp[i]);
++ bnx2x_fp_busy_poll_init(&bp->fp[i]);
+ napi_enable(&bnx2x_fp(bp, i, napi));
+ }
+ }
+@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx
+ int i;
+
+ for_each_eth_queue(bp, i) {
+- bnx2x_fp_init_lock(&bp->fp[i]);
++ bnx2x_fp_busy_poll_init(&bp->fp[i]);
+ napi_enable(&bnx2x_fp(bp, i, napi));
+ }
+ }
+@@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct
+ }
+ }
+
++ bnx2x_fp_unlock_napi(fp);
++
+ /* Fall out from the NAPI loop if needed */
+- if (!bnx2x_fp_unlock_napi(fp) &&
+- !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
++ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+
+ /* No need to update SB for FCoE L2 ring as long as
+ * it's connected to the default SB and the SB
--- /dev/null
+From foo@baz Tue Apr 21 23:10:35 CEST 2015
+From: Alexei Starovoitov <ast@plumgrid.com>
+Date: Tue, 14 Apr 2015 15:57:13 -0700
+Subject: bpf: fix verifier memory corruption
+
+From: Alexei Starovoitov <ast@plumgrid.com>
+
+[ Upstream commit c3de6317d748e23b9e46ba36e10483728d00d144 ]
+
+Due to missing bounds check the DAG pass of the BPF verifier can corrupt
+the memory which can cause random crashes during program loading:
+
+[8.449451] BUG: unable to handle kernel paging request at ffffffffffffffff
+[8.451293] IP: [<ffffffff811de33d>] kmem_cache_alloc_trace+0x8d/0x2f0
+[8.452329] Oops: 0000 [#1] SMP
+[8.452329] Call Trace:
+[8.452329] [<ffffffff8116cc82>] bpf_check+0x852/0x2000
+[8.452329] [<ffffffff8116b7e4>] bpf_prog_load+0x1e4/0x310
+[8.452329] [<ffffffff811b190f>] ? might_fault+0x5f/0xb0
+[8.452329] [<ffffffff8116c206>] SyS_bpf+0x806/0xa30
+
+Fixes: f1bca824dabb ("bpf: add search pruning optimization to verifier")
+Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
+Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1380,7 +1380,8 @@ peek_stack:
+ /* tell verifier to check for equivalent states
+ * after every call and jump
+ */
+- env->explored_states[t + 1] = STATE_LIST_MARK;
++ if (t + 1 < insn_cnt)
++ env->explored_states[t + 1] = STATE_LIST_MARK;
+ } else {
+ /* conditional jump with two edges */
+ ret = push_insn(t, t + 1, FALLTHROUGH, env);
--- /dev/null
+From foo@baz Tue Apr 21 23:10:35 CEST 2015
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Thu, 16 Apr 2015 16:12:53 +0800
+Subject: Revert "net: Reset secmark when scrubbing packet"
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 4c0ee414e877b899f7fc80aafb98d9425c02797f ]
+
+This patch reverts commit b8fb4e0648a2ab3734140342002f68fb0c7d1602
+because the secmark must be preserved even when a packet crosses
+namespace boundaries. The reason is that security labels apply to
+the system as a whole and is not per-namespace.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4178,7 +4178,6 @@ void skb_scrub_packet(struct sk_buff *sk
+ skb_dst_drop(skb);
+ skb->mark = 0;
+ skb_sender_cpu_clear(skb);
+- skb_init_secmark(skb);
+ secpath_reset(skb);
+ nf_reset(skb);
+ nf_reset_trace(skb);
--- /dev/null
+From foo@baz Tue Apr 21 23:10:35 CEST 2015
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Thu, 16 Apr 2015 09:03:27 +0800
+Subject: skbuff: Do not scrub skb mark within the same name space
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 213dd74aee765d4e5f3f4b9607fef0cf97faa2af ]
+
+On Wed, Apr 15, 2015 at 05:41:26PM +0200, Nicolas Dichtel wrote:
+> Le 15/04/2015 15:57, Herbert Xu a écrit :
+> >On Wed, Apr 15, 2015 at 06:22:29PM +0800, Herbert Xu wrote:
+> [snip]
+> >Subject: skbuff: Do not scrub skb mark within the same name space
+> >
+> >The commit ea23192e8e577dfc51e0f4fc5ca113af334edff9 ("tunnels:
+> Maybe add a Fixes tag?
+> Fixes: ea23192e8e57 ("tunnels: harmonize cleanup done on skb on rx path")
+>
+> >harmonize cleanup done on skb on rx path") broke anyone trying to
+> >use netfilter marking across IPv4 tunnels. While most of the
+> >fields that are cleared by skb_scrub_packet don't matter, the
+> >netfilter mark must be preserved.
+> >
+> >This patch rearranges skb_scurb_packet to preserve the mark field.
+> nit: s/scurb/scrub
+>
+> Else it's fine for me.
+
+Sure.
+
+PS I used the wrong email for James the first time around. So
+let me repeat the question here. Should secmark be preserved
+or cleared across tunnels within the same name space? In fact,
+do our security models even support name spaces?
+
+---8<---
+The commit ea23192e8e577dfc51e0f4fc5ca113af334edff9 ("tunnels:
+harmonize cleanup done on skb on rx path") broke anyone trying to
+use netfilter marking across IPv4 tunnels. While most of the
+fields that are cleared by skb_scrub_packet don't matter, the
+netfilter mark must be preserved.
+
+This patch rearranges skb_scrub_packet to preserve the mark field.
+
+Fixes: ea23192e8e57 ("tunnels: harmonize cleanup done on skb on rx path")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4169,18 +4169,21 @@ EXPORT_SYMBOL(skb_try_coalesce);
+ */
+ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
+ {
+- if (xnet)
+- skb_orphan(skb);
+ skb->tstamp.tv64 = 0;
+ skb->pkt_type = PACKET_HOST;
+ skb->skb_iif = 0;
+ skb->ignore_df = 0;
+ skb_dst_drop(skb);
+- skb->mark = 0;
+ skb_sender_cpu_clear(skb);
+ secpath_reset(skb);
+ nf_reset(skb);
+ nf_reset_trace(skb);
++
++ if (!xnet)
++ return;
++
++ skb_orphan(skb);
++ skb->mark = 0;
+ }
+ EXPORT_SYMBOL_GPL(skb_scrub_packet);
+
--- /dev/null
+From foo@baz Tue Apr 21 23:10:35 CEST 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 9 Apr 2015 13:31:56 -0700
+Subject: tcp: tcp_make_synack() should clear skb->tstamp
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit b50edd7812852d989f2ef09dcfc729690f54a42d ]
+
+I noticed tcpdump was giving funky timestamps for locally
+generated SYNACK messages on loopback interface.
+
+11:42:46.938990 IP 127.0.0.1.48245 > 127.0.0.2.23850: S
+945476042:945476042(0) win 43690 <mss 65495,nop,nop,sackOK,nop,wscale 7>
+
+20:28:58.502209 IP 127.0.0.2.23850 > 127.0.0.1.48245: S
+3160535375:3160535375(0) ack 945476043 win 43690 <mss
+65495,nop,nop,sackOK,nop,wscale 7>
+
+This is because we need to clear skb->tstamp before
+entering lower stack, otherwise net_timestamp_check()
+does not set skb->tstamp.
+
+Fixes: 7faee5c0d514 ("tcp: remove TCP_SKB_CB(skb)->when")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2929,6 +2929,8 @@ struct sk_buff *tcp_make_synack(struct s
+ }
+ #endif
+
++ /* Do not fool tcpdump (if any), clean our debris */
++ skb->tstamp.tv64 = 0;
+ return skb;
+ }
+ EXPORT_SYMBOL(tcp_make_synack);
--- /dev/null
+From foo@baz Tue Apr 21 23:10:35 CEST 2015
+From: Jesse Gross <jesse@nicira.com>
+Date: Thu, 9 Apr 2015 11:19:14 -0700
+Subject: udptunnels: Call handle_offloads after inserting vlan tag.
+
+From: Jesse Gross <jesse@nicira.com>
+
+[ Upstream commit b736a623bd099cdf5521ca9bd03559f3bc7fa31c ]
+
+handle_offloads() calls skb_reset_inner_headers() to store
+the layer pointers to the encapsulated packet. However, we
+currently push the vlag tag (if there is one) onto the packet
+afterwards. This changes the MAC header for the encapsulated
+packet but it is not reflected in skb->inner_mac_header, which
+breaks GSO and drivers which attempt to use this for encapsulation
+offloads.
+
+Fixes: 1eaa8178 ("vxlan: Add tx-vlan offload support.")
+Signed-off-by: Jesse Gross <jesse@nicira.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/vxlan.c | 20 ++++++++++----------
+ net/ipv4/geneve.c | 8 ++++----
+ 2 files changed, 14 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1713,12 +1713,6 @@ static int vxlan6_xmit_skb(struct dst_en
+ }
+ }
+
+- skb = iptunnel_handle_offloads(skb, udp_sum, type);
+- if (IS_ERR(skb)) {
+- err = -EINVAL;
+- goto err;
+- }
+-
+ skb_scrub_packet(skb, xnet);
+
+ min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+@@ -1738,6 +1732,12 @@ static int vxlan6_xmit_skb(struct dst_en
+ goto err;
+ }
+
++ skb = iptunnel_handle_offloads(skb, udp_sum, type);
++ if (IS_ERR(skb)) {
++ err = -EINVAL;
++ goto err;
++ }
++
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ vxh->vx_flags = htonl(VXLAN_HF_VNI);
+ vxh->vx_vni = md->vni;
+@@ -1798,10 +1798,6 @@ int vxlan_xmit_skb(struct rtable *rt, st
+ }
+ }
+
+- skb = iptunnel_handle_offloads(skb, udp_sum, type);
+- if (IS_ERR(skb))
+- return PTR_ERR(skb);
+-
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ + VXLAN_HLEN + sizeof(struct iphdr)
+ + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
+@@ -1817,6 +1813,10 @@ int vxlan_xmit_skb(struct rtable *rt, st
+ if (WARN_ON(!skb))
+ return -ENOMEM;
+
++ skb = iptunnel_handle_offloads(skb, udp_sum, type);
++ if (IS_ERR(skb))
++ return PTR_ERR(skb);
++
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ vxh->vx_flags = htonl(VXLAN_HF_VNI);
+ vxh->vx_vni = md->vni;
+--- a/net/ipv4/geneve.c
++++ b/net/ipv4/geneve.c
+@@ -113,10 +113,6 @@ int geneve_xmit_skb(struct geneve_sock *
+ int min_headroom;
+ int err;
+
+- skb = udp_tunnel_handle_offloads(skb, csum);
+- if (IS_ERR(skb))
+- return PTR_ERR(skb);
+-
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
+ + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
+@@ -131,6 +127,10 @@ int geneve_xmit_skb(struct geneve_sock *
+ if (unlikely(!skb))
+ return -ENOMEM;
+
++ skb = udp_tunnel_handle_offloads(skb, csum);
++ if (IS_ERR(skb))
++ return PTR_ERR(skb);
++
+ gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
+ geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
+