]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 17 Oct 2020 11:05:48 +0000 (13:05 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 17 Oct 2020 11:05:48 +0000 (13:05 +0200)
added patches:
ibmveth-identify-ingress-large-send-packets.patch
net-ipv4-always-honour-route-mtu-during-forwarding.patch
r8169-fix-data-corruption-issue-on-rtl8402.patch
tipc-fix-the-skb_unshare-in-tipc_buf_append.patch

queue-4.9/ibmveth-identify-ingress-large-send-packets.patch [new file with mode: 0644]
queue-4.9/net-ipv4-always-honour-route-mtu-during-forwarding.patch [new file with mode: 0644]
queue-4.9/r8169-fix-data-corruption-issue-on-rtl8402.patch [new file with mode: 0644]
queue-4.9/series [new file with mode: 0644]
queue-4.9/tipc-fix-the-skb_unshare-in-tipc_buf_append.patch [new file with mode: 0644]

diff --git a/queue-4.9/ibmveth-identify-ingress-large-send-packets.patch b/queue-4.9/ibmveth-identify-ingress-large-send-packets.patch
new file mode 100644 (file)
index 0000000..3359a2b
--- /dev/null
@@ -0,0 +1,57 @@
+From foo@baz Sat Oct 17 01:04:11 PM CEST 2020
+From: David Wilder <dwilder@us.ibm.com>
+Date: Tue, 13 Oct 2020 16:20:14 -0700
+Subject: ibmveth: Identify ingress large send packets.
+
+From: David Wilder <dwilder@us.ibm.com>
+
+[ Upstream commit 413f142cc05cb03f2d1ea83388e40c1ddc0d74e9 ]
+
+Ingress large send packets are identified by either:
+The IBMVETH_RXQ_LRG_PKT flag in the receive buffer
+or with a -1 placed in the ip header checksum.
+The method used depends on firmware version. Frame
+geometry and sufficient header validation is performed by the
+hypervisor eliminating the need for further header checks here.
+
+Fixes: 7b5967389f5a ("ibmveth: set correct gso_size and gso_type")
+Signed-off-by: David Wilder <dwilder@us.ibm.com>
+Reviewed-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Reviewed-by: Cristobal Forno <cris.forno@ibm.com>
+Reviewed-by: Pradeep Satyanarayana <pradeeps@linux.vnet.ibm.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ibm/ibmveth.c |   13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1256,6 +1256,7 @@ static int ibmveth_poll(struct napi_stru
+                       int offset = ibmveth_rxq_frame_offset(adapter);
+                       int csum_good = ibmveth_rxq_csum_good(adapter);
+                       int lrg_pkt = ibmveth_rxq_large_packet(adapter);
++                      __sum16 iph_check = 0;
+                       skb = ibmveth_rxq_get_buffer(adapter);
+@@ -1307,7 +1308,17 @@ static int ibmveth_poll(struct napi_stru
+                               }
+                       }
+-                      if (length > netdev->mtu + ETH_HLEN) {
++                      /* PHYP without PLSO support places a -1 in the ip
++                       * checksum for large send frames.
++                       */
++                      if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
++                              struct iphdr *iph = (struct iphdr *)skb->data;
++
++                              iph_check = iph->check;
++                      }
++
++                      if ((length > netdev->mtu + ETH_HLEN) ||
++                          lrg_pkt || iph_check == 0xffff) {
+                               ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+                               adapter->rx_large_packets++;
+                       }
diff --git a/queue-4.9/net-ipv4-always-honour-route-mtu-during-forwarding.patch b/queue-4.9/net-ipv4-always-honour-route-mtu-during-forwarding.patch
new file mode 100644 (file)
index 0000000..643dc2a
--- /dev/null
@@ -0,0 +1,80 @@
+From foo@baz Sat Oct 17 01:04:11 PM CEST 2020
+From: "Maciej Żenczykowski" <maze@google.com>
+Date: Wed, 23 Sep 2020 13:18:15 -0700
+Subject: net/ipv4: always honour route mtu during forwarding
+
+From: "Maciej Żenczykowski" <maze@google.com>
+
+[ Upstream commit 02a1b175b0e92d9e0fa5df3957ade8d733ceb6a0 ]
+
+Documentation/networking/ip-sysctl.txt:46 says:
+  ip_forward_use_pmtu - BOOLEAN
+    By default we don't trust protocol path MTUs while forwarding
+    because they could be easily forged and can lead to unwanted
+    fragmentation by the router.
+    You only need to enable this if you have user-space software
+    which tries to discover path mtus by itself and depends on the
+    kernel honoring this information. This is normally not the case.
+    Default: 0 (disabled)
+    Possible values:
+    0 - disabled
+    1 - enabled
+
+Which makes it pretty clear that setting it to 1 is a potential
+security/safety/DoS issue, and yet it is entirely reasonable to want
+forwarded traffic to honour explicitly administrator configured
+route mtus (instead of defaulting to device mtu).
+
+Indeed, I can't think of a single reason why you wouldn't want to.
+Since you configured a route mtu you probably know better...
+
+It is pretty common to have a higher device mtu to allow receiving
+large (jumbo) frames, while having some routes via that interface
+(potentially including the default route to the internet) specify
+a lower mtu.
+
+Note that ipv6 forwarding uses device mtu unless the route is locked
+(in which case it will use the route mtu).
+
+This approach is not usable for IPv4 where an 'mtu lock' on a route
+also has the side effect of disabling TCP path mtu discovery via
+disabling the IPv4 DF (don't frag) bit on all outgoing frames.
+
+I'm not aware of a way to lock a route from an IPv6 RA, so that also
+potentially seems wrong.
+
+Signed-off-by: Maciej Żenczykowski <maze@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Cc: Lorenzo Colitti <lorenzo@google.com>
+Cc: Sunmeet Gill (Sunny) <sgill@quicinc.com>
+Cc: Vinay Paradkar <vparadka@qti.qualcomm.com>
+Cc: Tyler Wear <twear@quicinc.com>
+Cc: David Ahern <dsahern@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip.h |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -342,12 +342,18 @@ static inline unsigned int ip_dst_mtu_ma
+                                                   bool forwarding)
+ {
+       struct net *net = dev_net(dst->dev);
++      unsigned int mtu;
+       if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
+           ip_mtu_locked(dst) ||
+           !forwarding)
+               return dst_mtu(dst);
++      /* 'forwarding = true' case should always honour route mtu */
++      mtu = dst_metric_raw(dst, RTAX_MTU);
++      if (mtu)
++              return mtu;
++
+       return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
+ }
diff --git a/queue-4.9/r8169-fix-data-corruption-issue-on-rtl8402.patch b/queue-4.9/r8169-fix-data-corruption-issue-on-rtl8402.patch
new file mode 100644 (file)
index 0000000..35159ae
--- /dev/null
@@ -0,0 +1,154 @@
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Thu, 1 Oct 2020 09:23:02 +0200
+Subject: r8169: fix data corruption issue on RTL8402
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit ef9da46ddef071e1bbb943afbbe9b38771855554 ]
+
+Petr reported that after resume from suspend RTL8402 partially
+truncates incoming packets, and re-initializing register RxConfig
+before the actual chip re-initialization sequence is needed to avoid
+the issue.
+
+Reported-by: Petr Tesarik <ptesarik@suse.cz>
+Proposed-by: Petr Tesarik <ptesarik@suse.cz>
+Tested-by: Petr Tesarik <ptesarik@suse.cz>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169.c |  108 ++++++++++++++++++-----------------
+ 1 file changed, 56 insertions(+), 52 deletions(-)
+
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -4476,6 +4476,58 @@ static void rtl_rar_set(struct rtl8169_p
+       rtl_unlock_work(tp);
+ }
++static void rtl_init_rxcfg(struct rtl8169_private *tp)
++{
++      void __iomem *ioaddr = tp->mmio_addr;
++
++      switch (tp->mac_version) {
++      case RTL_GIGA_MAC_VER_01:
++      case RTL_GIGA_MAC_VER_02:
++      case RTL_GIGA_MAC_VER_03:
++      case RTL_GIGA_MAC_VER_04:
++      case RTL_GIGA_MAC_VER_05:
++      case RTL_GIGA_MAC_VER_06:
++      case RTL_GIGA_MAC_VER_10:
++      case RTL_GIGA_MAC_VER_11:
++      case RTL_GIGA_MAC_VER_12:
++      case RTL_GIGA_MAC_VER_13:
++      case RTL_GIGA_MAC_VER_14:
++      case RTL_GIGA_MAC_VER_15:
++      case RTL_GIGA_MAC_VER_16:
++      case RTL_GIGA_MAC_VER_17:
++              RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
++              break;
++      case RTL_GIGA_MAC_VER_18:
++      case RTL_GIGA_MAC_VER_19:
++      case RTL_GIGA_MAC_VER_20:
++      case RTL_GIGA_MAC_VER_21:
++      case RTL_GIGA_MAC_VER_22:
++      case RTL_GIGA_MAC_VER_23:
++      case RTL_GIGA_MAC_VER_24:
++      case RTL_GIGA_MAC_VER_34:
++      case RTL_GIGA_MAC_VER_35:
++              RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
++              break;
++      case RTL_GIGA_MAC_VER_40:
++      case RTL_GIGA_MAC_VER_41:
++      case RTL_GIGA_MAC_VER_42:
++      case RTL_GIGA_MAC_VER_43:
++      case RTL_GIGA_MAC_VER_44:
++      case RTL_GIGA_MAC_VER_45:
++      case RTL_GIGA_MAC_VER_46:
++      case RTL_GIGA_MAC_VER_47:
++      case RTL_GIGA_MAC_VER_48:
++      case RTL_GIGA_MAC_VER_49:
++      case RTL_GIGA_MAC_VER_50:
++      case RTL_GIGA_MAC_VER_51:
++              RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
++              break;
++      default:
++              RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
++              break;
++      }
++}
++
+ static int rtl_set_mac_address(struct net_device *dev, void *p)
+ {
+       struct rtl8169_private *tp = netdev_priv(dev);
+@@ -4494,6 +4546,10 @@ static int rtl_set_mac_address(struct ne
+       pm_runtime_put_noidle(d);
++      /* Reportedly at least Asus X453MA truncates packets otherwise */
++      if (tp->mac_version == RTL_GIGA_MAC_VER_37)
++              rtl_init_rxcfg(tp);
++
+       return 0;
+ }
+@@ -4930,58 +4986,6 @@ static void rtl_init_pll_power_ops(struc
+               break;
+       }
+ }
+-
+-static void rtl_init_rxcfg(struct rtl8169_private *tp)
+-{
+-      void __iomem *ioaddr = tp->mmio_addr;
+-
+-      switch (tp->mac_version) {
+-      case RTL_GIGA_MAC_VER_01:
+-      case RTL_GIGA_MAC_VER_02:
+-      case RTL_GIGA_MAC_VER_03:
+-      case RTL_GIGA_MAC_VER_04:
+-      case RTL_GIGA_MAC_VER_05:
+-      case RTL_GIGA_MAC_VER_06:
+-      case RTL_GIGA_MAC_VER_10:
+-      case RTL_GIGA_MAC_VER_11:
+-      case RTL_GIGA_MAC_VER_12:
+-      case RTL_GIGA_MAC_VER_13:
+-      case RTL_GIGA_MAC_VER_14:
+-      case RTL_GIGA_MAC_VER_15:
+-      case RTL_GIGA_MAC_VER_16:
+-      case RTL_GIGA_MAC_VER_17:
+-              RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
+-              break;
+-      case RTL_GIGA_MAC_VER_18:
+-      case RTL_GIGA_MAC_VER_19:
+-      case RTL_GIGA_MAC_VER_20:
+-      case RTL_GIGA_MAC_VER_21:
+-      case RTL_GIGA_MAC_VER_22:
+-      case RTL_GIGA_MAC_VER_23:
+-      case RTL_GIGA_MAC_VER_24:
+-      case RTL_GIGA_MAC_VER_34:
+-      case RTL_GIGA_MAC_VER_35:
+-              RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
+-              break;
+-      case RTL_GIGA_MAC_VER_40:
+-      case RTL_GIGA_MAC_VER_41:
+-      case RTL_GIGA_MAC_VER_42:
+-      case RTL_GIGA_MAC_VER_43:
+-      case RTL_GIGA_MAC_VER_44:
+-      case RTL_GIGA_MAC_VER_45:
+-      case RTL_GIGA_MAC_VER_46:
+-      case RTL_GIGA_MAC_VER_47:
+-      case RTL_GIGA_MAC_VER_48:
+-      case RTL_GIGA_MAC_VER_49:
+-      case RTL_GIGA_MAC_VER_50:
+-      case RTL_GIGA_MAC_VER_51:
+-              RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+-              break;
+-      default:
+-              RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
+-              break;
+-      }
+-}
+ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
+ {
diff --git a/queue-4.9/series b/queue-4.9/series
new file mode 100644 (file)
index 0000000..9374b3b
--- /dev/null
@@ -0,0 +1,4 @@
+ibmveth-identify-ingress-large-send-packets.patch
+tipc-fix-the-skb_unshare-in-tipc_buf_append.patch
+net-ipv4-always-honour-route-mtu-during-forwarding.patch
+r8169-fix-data-corruption-issue-on-rtl8402.patch
diff --git a/queue-4.9/tipc-fix-the-skb_unshare-in-tipc_buf_append.patch b/queue-4.9/tipc-fix-the-skb_unshare-in-tipc_buf_append.patch
new file mode 100644 (file)
index 0000000..85ec71c
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Sat Oct 17 01:04:11 PM CEST 2020
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Wed, 7 Oct 2020 21:12:50 -0700
+Subject: tipc: fix the skb_unshare() in tipc_buf_append()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit ed42989eab57d619667d7e87dfbd8fe207db54fe ]
+
+skb_unshare() drops a reference count on the old skb unconditionally,
+so in the failure case, we end up freeing the skb twice here.
+And because the skb is allocated in fclone and cloned by caller
+tipc_msg_reassemble(), the consequence is actually freeing the
+original skb too, thus triggered the UAF by syzbot.
+
+Fix this by replacing this skb_unshare() with skb_cloned()+skb_copy().
+
+Fixes: ff48b6222e65 ("tipc: use skb_unshare() instead in tipc_buf_append()")
+Reported-and-tested-by: syzbot+e96a7ba46281824cc46a@syzkaller.appspotmail.com
+Cc: Jon Maloy <jmaloy@redhat.com>
+Cc: Ying Xue <ying.xue@windriver.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Reviewed-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/msg.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -140,7 +140,8 @@ int tipc_buf_append(struct sk_buff **hea
+       if (fragid == FIRST_FRAGMENT) {
+               if (unlikely(head))
+                       goto err;
+-              frag = skb_unshare(frag, GFP_ATOMIC);
++              if (skb_cloned(frag))
++                      frag = skb_copy(frag, GFP_ATOMIC);
+               if (unlikely(!frag))
+                       goto err;
+               head = *headbuf = frag;