]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 28 Oct 2016 15:57:43 +0000 (11:57 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 28 Oct 2016 15:57:43 +0000 (11:57 -0400)
added patches:
ipip-properly-mark-ipip-gro-packets-as-encapsulated.patch
perf-stat-fix-interval-output-values.patch
powerpc-eeh-null-check-uses-of-eeh_pe_bus_get.patch
tunnels-don-t-apply-gro-to-multiple-layers-of-encapsulation.patch
tunnels-remove-encapsulation-offloads-on-decap.patch

queue-4.4/ipip-properly-mark-ipip-gro-packets-as-encapsulated.patch [new file with mode: 0644]
queue-4.4/perf-stat-fix-interval-output-values.patch [new file with mode: 0644]
queue-4.4/powerpc-eeh-null-check-uses-of-eeh_pe_bus_get.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/tunnels-don-t-apply-gro-to-multiple-layers-of-encapsulation.patch [new file with mode: 0644]
queue-4.4/tunnels-remove-encapsulation-offloads-on-decap.patch [new file with mode: 0644]

diff --git a/queue-4.4/ipip-properly-mark-ipip-gro-packets-as-encapsulated.patch b/queue-4.4/ipip-properly-mark-ipip-gro-packets-as-encapsulated.patch
new file mode 100644 (file)
index 0000000..195a425
--- /dev/null
@@ -0,0 +1,54 @@
+From b8cba75bdf6a48ea4811bbefb11a94a5c7281b68 Mon Sep 17 00:00:00 2001
+From: Jesse Gross <jesse@kernel.org>
+Date: Sat, 19 Mar 2016 09:32:00 -0700
+Subject: ipip: Properly mark ipip GRO packets as encapsulated.
+
+From: Jesse Gross <jesse@kernel.org>
+
+commit b8cba75bdf6a48ea4811bbefb11a94a5c7281b68 upstream.
+
+ipip encapsulated packets can be merged together by GRO but the result
+does not have the proper GSO type set or even marked as being
+encapsulated at all. Later retransmission of these packets will likely
+fail if the device does not support ipip offloads. This is similar to
+the issue resolved in IPv6 sit in feec0cb3
+("ipv6: gro: support sit protocol").
+
+Reported-by: Patrick Boutilier <boutilpj@ednet.ns.ca>
+Fixes: 9667e9bb ("ipip: Add gro callbacks to ipip offload")
+Tested-by: Patrick Boutilier <boutilpj@ednet.ns.ca>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Jesse Gross <jesse@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Juerg Haefliger <juerg.haefliger@hpe.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv4/af_inet.c |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1425,6 +1425,13 @@ out_unlock:
+       return err;
+ }
++static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
++{
++      skb->encapsulation = 1;
++      skb_shinfo(skb)->gso_type |= SKB_GSO_IPIP;
++      return inet_gro_complete(skb, nhoff);
++}
++
+ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
+                        unsigned short type, unsigned char protocol,
+                        struct net *net)
+@@ -1653,7 +1660,7 @@ static const struct net_offload ipip_off
+       .callbacks = {
+               .gso_segment    = inet_gso_segment,
+               .gro_receive    = inet_gro_receive,
+-              .gro_complete   = inet_gro_complete,
++              .gro_complete   = ipip_gro_complete,
+       },
+ };
diff --git a/queue-4.4/perf-stat-fix-interval-output-values.patch b/queue-4.4/perf-stat-fix-interval-output-values.patch
new file mode 100644 (file)
index 0000000..b39360c
--- /dev/null
@@ -0,0 +1,94 @@
+From 51fd2df1e882a3c2a3f4b6c9ff243a93c9046dba Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Wed, 3 Feb 2016 08:43:56 +0100
+Subject: perf stat: Fix interval output values
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit 51fd2df1e882a3c2a3f4b6c9ff243a93c9046dba upstream.
+
+We broke interval data displays with commit:
+
+  3f416f22d1e2 ("perf stat: Do not clean event's private stats")
+
+This commit removed stats cleaning, which is important for '-r' option
+to carry counters data over the whole run. But it's necessary to clean
+it for interval mode, otherwise the displayed value is avg of all
+previous values.
+
+Before:
+  $ perf stat -e cycles -a -I 1000 record
+  #           time             counts unit events
+       1.000240796         75,216,287      cycles
+       2.000512791        107,823,524      cycles
+
+  $ perf stat report
+  #           time             counts unit events
+       1.000240796         75,216,287      cycles
+       2.000512791         91,519,906      cycles
+
+Now:
+  $ perf stat report
+  #           time             counts unit events
+       1.000240796         75,216,287      cycles
+       2.000512791        107,823,524      cycles
+
+Notice the second value being bigger (91,.. < 107,..).
+
+This could be easily verified by using perf script which displays raw
+stat data:
+
+  $ perf script
+  CPU  THREAD       VAL         ENA         RUN        TIME EVENT
+    0      -1  23855779  1000209530  1000209530  1000240796 cycles
+    1      -1  33340397  1000224964  1000224964  1000240796 cycles
+    2      -1  15835415  1000226695  1000226695  1000240796 cycles
+    3      -1   2184696  1000228245  1000228245  1000240796 cycles
+    0      -1  97014312  2000514533  2000514533  2000512791 cycles
+    1      -1  46121497  2000543795  2000543795  2000512791 cycles
+    2      -1  32269530  2000543566  2000543566  2000512791 cycles
+    3      -1   7634472  2000544108  2000544108  2000512791 cycles
+
+The sum of the first 4 values is the first interval aggregated value:
+
+  23855779 + 33340397 + 15835415 + 2184696 = 75,216,287
+
+The sum of the second 4 values minus first value is the second interval
+aggregated value:
+
+  97014312 + 46121497 + 32269530 + 7634472 - 75216287 = 107,823,524
+
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: David Ahern <dsahern@gmail.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Stephane Eranian <eranian@google.com>
+Link: http://lkml.kernel.org/r/1454485436-20639-1-git-send-email-jolsa@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jeremy Linton <jeremy.linton@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/stat.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/tools/perf/util/stat.c
++++ b/tools/perf/util/stat.c
+@@ -311,6 +311,16 @@ int perf_stat_process_counter(struct per
+       aggr->val = aggr->ena = aggr->run = 0;
++      /*
++       * We calculate counter's data every interval,
++       * and the display code shows ps->res_stats
++       * avg value. We need to zero the stats for
++       * interval mode, otherwise overall avg running
++       * averages will be shown for each interval.
++       */
++      if (config->interval)
++              init_stats(ps->res_stats);
++
+       if (counter->per_pkg)
+               zero_per_pkg(counter);
diff --git a/queue-4.4/powerpc-eeh-null-check-uses-of-eeh_pe_bus_get.patch b/queue-4.4/powerpc-eeh-null-check-uses-of-eeh_pe_bus_get.patch
new file mode 100644 (file)
index 0000000..6c67205
--- /dev/null
@@ -0,0 +1,58 @@
+From 04fec21c06e35b169a83e75a84a015ab4606bf5e Mon Sep 17 00:00:00 2001
+From: Russell Currey <ruscur@russell.cc>
+Date: Mon, 12 Sep 2016 14:17:22 +1000
+Subject: powerpc/eeh: Null check uses of eeh_pe_bus_get
+
+From: Russell Currey <ruscur@russell.cc>
+
+commit 04fec21c06e35b169a83e75a84a015ab4606bf5e upstream.
+
+eeh_pe_bus_get() can return NULL if a PCI bus isn't found for a given PE.
+Some callers don't check this, and can cause a null pointer dereference
+under certain circumstances.
+
+Fix this by checking NULL everywhere eeh_pe_bus_get() is called.
+
+Fixes: 8a6b1bc70dbb ("powerpc/eeh: EEH core to handle special event")
+Cc: stable@vger.kernel.org # v3.11+
+Signed-off-by: Russell Currey <ruscur@russell.cc>
+Reviewed-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/eeh_driver.c             |    8 ++++++++
+ arch/powerpc/platforms/powernv/eeh-powernv.c |    5 +++++
+ 2 files changed, 13 insertions(+)
+
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -909,6 +909,14 @@ static void eeh_handle_special_event(voi
+                               /* Notify all devices to be down */
+                               eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
+                               bus = eeh_pe_bus_get(phb_pe);
++                              if (!bus) {
++                                      pr_err("%s: Cannot find PCI bus for "
++                                             "PHB#%d-PE#%x\n",
++                                             __func__,
++                                             pe->phb->global_number,
++                                             pe->addr);
++                                      break;
++                              }
+                               eeh_pe_dev_traverse(pe,
+                                       eeh_report_failure, NULL);
+                               pcibios_remove_pci_devices(bus);
+--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
++++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
+@@ -956,6 +956,11 @@ static int pnv_eeh_reset(struct eeh_pe *
+               }
+               bus = eeh_pe_bus_get(pe);
++              if (!bus) {
++                      pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
++                             __func__, pe->phb->global_number, pe->addr);
++                      return -EIO;
++              }
+               if (pci_is_root_bus(bus) ||
+                       pci_is_root_bus(bus->parent))
+                       ret = pnv_eeh_root_reset(hose, option);
index ee0c9e8efb2f3ccc4ddc52aae9d65c89b617044e..6ad7807ee76ccba81a03eeb47159295b7c3d1d92 100644 (file)
@@ -17,3 +17,8 @@ drm-i915-unalias-obj-phys_handle-and-obj-userptr.patch
 mm-hugetlb-fix-memory-offline-with-hugepage-size-memory-block-size.patch
 brcmfmac-avoid-potential-stack-overflow-in-brcmf_cfg80211_start_ap.patch
 posix_acl-clear-sgid-bit-when-setting-file-permissions.patch
+ipip-properly-mark-ipip-gro-packets-as-encapsulated.patch
+tunnels-don-t-apply-gro-to-multiple-layers-of-encapsulation.patch
+tunnels-remove-encapsulation-offloads-on-decap.patch
+powerpc-eeh-null-check-uses-of-eeh_pe_bus_get.patch
+perf-stat-fix-interval-output-values.patch
diff --git a/queue-4.4/tunnels-don-t-apply-gro-to-multiple-layers-of-encapsulation.patch b/queue-4.4/tunnels-don-t-apply-gro-to-multiple-layers-of-encapsulation.patch
new file mode 100644 (file)
index 0000000..4c5869f
--- /dev/null
@@ -0,0 +1,159 @@
+From fac8e0f579695a3ecbc4d3cac369139d7f819971 Mon Sep 17 00:00:00 2001
+From: Jesse Gross <jesse@kernel.org>
+Date: Sat, 19 Mar 2016 09:32:01 -0700
+Subject: tunnels: Don't apply GRO to multiple layers of encapsulation.
+
+From: Jesse Gross <jesse@kernel.org>
+
+commit fac8e0f579695a3ecbc4d3cac369139d7f819971 upstream.
+
+When drivers express support for TSO of encapsulated packets, they
+only mean that they can do it for one layer of encapsulation.
+Supporting additional levels would mean updating, at a minimum,
+more IP length fields and they are unaware of this.
+
+No encapsulation device expresses support for handling offloaded
+encapsulated packets, so we won't generate these types of frames
+in the transmit path. However, GRO doesn't have a check for
+multiple levels of encapsulation and will attempt to build them.
+
+UDP tunnel GRO actually does prevent this situation but it only
+handles multiple UDP tunnels stacked on top of each other. This
+generalizes that solution to prevent any kind of tunnel stacking
+that would cause problems.
+
+Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
+Signed-off-by: Jesse Gross <jesse@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Juerg Haefliger <juerg.haefliger@hpe.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/netdevice.h |    4 ++--
+ net/core/dev.c            |    2 +-
+ net/ipv4/af_inet.c        |   15 ++++++++++++++-
+ net/ipv4/gre_offload.c    |    5 +++++
+ net/ipv4/udp_offload.c    |    6 +++---
+ net/ipv6/ip6_offload.c    |   15 ++++++++++++++-
+ 6 files changed, 39 insertions(+), 8 deletions(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1986,8 +1986,8 @@ struct napi_gro_cb {
+       /* This is non-zero if the packet may be of the same flow. */
+       u8      same_flow:1;
+-      /* Used in udp_gro_receive */
+-      u8      udp_mark:1;
++      /* Used in tunnel GRO receive */
++      u8      encap_mark:1;
+       /* GRO checksum is valid */
+       u8      csum_valid:1;
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4239,7 +4239,7 @@ static enum gro_result dev_gro_receive(s
+               NAPI_GRO_CB(skb)->same_flow = 0;
+               NAPI_GRO_CB(skb)->flush = 0;
+               NAPI_GRO_CB(skb)->free = 0;
+-              NAPI_GRO_CB(skb)->udp_mark = 0;
++              NAPI_GRO_CB(skb)->encap_mark = 0;
+               NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
+               /* Setup for GRO checksum validation */
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1383,6 +1383,19 @@ out:
+       return pp;
+ }
++static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
++                                       struct sk_buff *skb)
++{
++      if (NAPI_GRO_CB(skb)->encap_mark) {
++              NAPI_GRO_CB(skb)->flush = 1;
++              return NULL;
++      }
++
++      NAPI_GRO_CB(skb)->encap_mark = 1;
++
++      return inet_gro_receive(head, skb);
++}
++
+ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+       if (sk->sk_family == AF_INET)
+@@ -1659,7 +1672,7 @@ static struct packet_offload ip_packet_o
+ static const struct net_offload ipip_offload = {
+       .callbacks = {
+               .gso_segment    = inet_gso_segment,
+-              .gro_receive    = inet_gro_receive,
++              .gro_receive    = ipip_gro_receive,
+               .gro_complete   = ipip_gro_complete,
+       },
+ };
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -128,6 +128,11 @@ static struct sk_buff **gre_gro_receive(
+       struct packet_offload *ptype;
+       __be16 type;
++      if (NAPI_GRO_CB(skb)->encap_mark)
++              goto out;
++
++      NAPI_GRO_CB(skb)->encap_mark = 1;
++
+       off = skb_gro_offset(skb);
+       hlen = off + sizeof(*greh);
+       greh = skb_gro_header_fast(skb, off);
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -299,14 +299,14 @@ struct sk_buff **udp_gro_receive(struct
+       unsigned int off = skb_gro_offset(skb);
+       int flush = 1;
+-      if (NAPI_GRO_CB(skb)->udp_mark ||
++      if (NAPI_GRO_CB(skb)->encap_mark ||
+           (skb->ip_summed != CHECKSUM_PARTIAL &&
+            NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+            !NAPI_GRO_CB(skb)->csum_valid))
+               goto out;
+-      /* mark that this skb passed once through the udp gro layer */
+-      NAPI_GRO_CB(skb)->udp_mark = 1;
++      /* mark that this skb passed once through the tunnel gro layer */
++      NAPI_GRO_CB(skb)->encap_mark = 1;
+       rcu_read_lock();
+       uo_priv = rcu_dereference(udp_offload_base);
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -258,6 +258,19 @@ out:
+       return pp;
+ }
++static struct sk_buff **sit_gro_receive(struct sk_buff **head,
++                                      struct sk_buff *skb)
++{
++      if (NAPI_GRO_CB(skb)->encap_mark) {
++              NAPI_GRO_CB(skb)->flush = 1;
++              return NULL;
++      }
++
++      NAPI_GRO_CB(skb)->encap_mark = 1;
++
++      return ipv6_gro_receive(head, skb);
++}
++
+ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
+ {
+       const struct net_offload *ops;
+@@ -302,7 +315,7 @@ static struct packet_offload ipv6_packet
+ static const struct net_offload sit_offload = {
+       .callbacks = {
+               .gso_segment    = ipv6_gso_segment,
+-              .gro_receive    = ipv6_gro_receive,
++              .gro_receive    = sit_gro_receive,
+               .gro_complete   = sit_gro_complete,
+       },
+ };
diff --git a/queue-4.4/tunnels-remove-encapsulation-offloads-on-decap.patch b/queue-4.4/tunnels-remove-encapsulation-offloads-on-decap.patch
new file mode 100644 (file)
index 0000000..dd17227
--- /dev/null
@@ -0,0 +1,143 @@
+From a09a4c8dd1ec7f830e1fb9e59eb72bddc965d168 Mon Sep 17 00:00:00 2001
+From: Jesse Gross <jesse@kernel.org>
+Date: Sat, 19 Mar 2016 09:32:02 -0700
+Subject: tunnels: Remove encapsulation offloads on decap.
+
+From: Jesse Gross <jesse@kernel.org>
+
+commit a09a4c8dd1ec7f830e1fb9e59eb72bddc965d168 upstream.
+
+If a packet is either locally encapsulated or processed through GRO
+it is marked with the offloads that it requires. However, when it is
+decapsulated these tunnel offload indications are not removed. This
+means that if we receive an encapsulated TCP packet, aggregate it with
+GRO, decapsulate, and retransmit the resulting frame on a NIC that does
+not support encapsulation, we won't be able to take advantage of hardware
+offloads even though it is just a simple TCP packet at this point.
+
+This fixes the problem by stripping off encapsulation offload indications
+when packets are decapsulated.
+
+The performance impacts of this bug are significant. In a test where a
+Geneve encapsulated TCP stream is sent to a hypervisor, GRO'ed, decapsulated,
+and bridged to a VM performance is improved by 60% (5Gbps->8Gbps) as a
+result of avoiding unnecessary segmentation at the VM tap interface.
+
+Reported-by: Ramu Ramamurthy <sramamur@linux.vnet.ibm.com>
+Fixes: 68c33163 ("v4 GRE: Add TCP segmentation offload for GRE")
+Signed-off-by: Jesse Gross <jesse@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(backported from commit a09a4c8dd1ec7f830e1fb9e59eb72bddc965d168)
+[adapt iptunnel_pull_header arguments, avoid 7f290c9]
+Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Juerg Haefliger <juerg.haefliger@hpe.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip_tunnels.h  |   16 ++++++++++++++++
+ net/ipv4/fou.c            |   13 +++++++++++--
+ net/ipv4/ip_tunnel_core.c |    3 ++-
+ net/ipv6/sit.c            |    5 +++--
+ 4 files changed, 32 insertions(+), 5 deletions(-)
+
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -283,6 +283,22 @@ struct metadata_dst *iptunnel_metadata_r
+ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
+                                        int gso_type_mask);
++static inline int iptunnel_pull_offloads(struct sk_buff *skb)
++{
++      if (skb_is_gso(skb)) {
++              int err;
++
++              err = skb_unclone(skb, GFP_ATOMIC);
++              if (unlikely(err))
++                      return err;
++              skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
++                                             NETIF_F_GSO_SHIFT);
++      }
++
++      skb->encapsulation = 0;
++      return 0;
++}
++
+ static inline void iptunnel_xmit_stats(int err,
+                                      struct net_device_stats *err_stats,
+                                      struct pcpu_sw_netstats __percpu *stats)
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -48,7 +48,7 @@ static inline struct fou *fou_from_sock(
+       return sk->sk_user_data;
+ }
+-static void fou_recv_pull(struct sk_buff *skb, size_t len)
++static int fou_recv_pull(struct sk_buff *skb, size_t len)
+ {
+       struct iphdr *iph = ip_hdr(skb);
+@@ -59,6 +59,7 @@ static void fou_recv_pull(struct sk_buff
+       __skb_pull(skb, len);
+       skb_postpull_rcsum(skb, udp_hdr(skb), len);
+       skb_reset_transport_header(skb);
++      return iptunnel_pull_offloads(skb);
+ }
+ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
+@@ -68,9 +69,14 @@ static int fou_udp_recv(struct sock *sk,
+       if (!fou)
+               return 1;
+-      fou_recv_pull(skb, sizeof(struct udphdr));
++      if (fou_recv_pull(skb, sizeof(struct udphdr)))
++              goto drop;
+       return -fou->protocol;
++
++drop:
++      kfree_skb(skb);
++      return 0;
+ }
+ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
+@@ -170,6 +176,9 @@ static int gue_udp_recv(struct sock *sk,
+       __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
+       skb_reset_transport_header(skb);
++      if (iptunnel_pull_offloads(skb))
++              goto drop;
++
+       return -guehdr->proto_ctype;
+ drop:
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -116,7 +116,8 @@ int iptunnel_pull_header(struct sk_buff
+       skb->vlan_tci = 0;
+       skb_set_queue_mapping(skb, 0);
+       skb->pkt_type = PACKET_HOST;
+-      return 0;
++
++      return iptunnel_pull_offloads(skb);
+ }
+ EXPORT_SYMBOL_GPL(iptunnel_pull_header);
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -681,14 +681,15 @@ static int ipip6_rcv(struct sk_buff *skb
+               skb->mac_header = skb->network_header;
+               skb_reset_network_header(skb);
+               IPCB(skb)->flags = 0;
+-              skb->protocol = htons(ETH_P_IPV6);
++              skb->dev = tunnel->dev;
+               if (packet_is_spoofed(skb, iph, tunnel)) {
+                       tunnel->dev->stats.rx_errors++;
+                       goto out;
+               }
+-              __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
++              if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6)))
++                      goto out;
+               err = IP_ECN_decapsulate(iph, skb);
+               if (unlikely(err)) {