]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 17 Oct 2020 11:03:30 +0000 (13:03 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 17 Oct 2020 11:03:30 +0000 (13:03 +0200)
added patches:
ibmveth-identify-ingress-large-send-packets.patch
ibmveth-switch-order-of-ibmveth_helper-calls.patch
ipv4-restore-flowi4_oif-update-before-call-to-xfrm_lookup_route.patch
mlx4-handle-non-napi-callers-to-napi_poll.patch
net-ipv4-always-honour-route-mtu-during-forwarding.patch
net-usb-qmi_wwan-add-cellient-mpl200-card.patch
r8169-fix-data-corruption-issue-on-rtl8402.patch
tipc-fix-the-skb_unshare-in-tipc_buf_append.patch

queue-4.14/ibmveth-identify-ingress-large-send-packets.patch [new file with mode: 0644]
queue-4.14/ibmveth-switch-order-of-ibmveth_helper-calls.patch [new file with mode: 0644]
queue-4.14/ipv4-restore-flowi4_oif-update-before-call-to-xfrm_lookup_route.patch [new file with mode: 0644]
queue-4.14/mlx4-handle-non-napi-callers-to-napi_poll.patch [new file with mode: 0644]
queue-4.14/net-ipv4-always-honour-route-mtu-during-forwarding.patch [new file with mode: 0644]
queue-4.14/net-usb-qmi_wwan-add-cellient-mpl200-card.patch [new file with mode: 0644]
queue-4.14/r8169-fix-data-corruption-issue-on-rtl8402.patch [new file with mode: 0644]
queue-4.14/series [new file with mode: 0644]
queue-4.14/tipc-fix-the-skb_unshare-in-tipc_buf_append.patch [new file with mode: 0644]

diff --git a/queue-4.14/ibmveth-identify-ingress-large-send-packets.patch b/queue-4.14/ibmveth-identify-ingress-large-send-packets.patch
new file mode 100644 (file)
index 0000000..18b2f55
--- /dev/null
@@ -0,0 +1,57 @@
+From foo@baz Sat Oct 17 01:00:22 PM CEST 2020
+From: David Wilder <dwilder@us.ibm.com>
+Date: Tue, 13 Oct 2020 16:20:14 -0700
+Subject: ibmveth: Identify ingress large send packets.
+
+From: David Wilder <dwilder@us.ibm.com>
+
+[ Upstream commit 413f142cc05cb03f2d1ea83388e40c1ddc0d74e9 ]
+
+Ingress large send packets are identified by either:
+The IBMVETH_RXQ_LRG_PKT flag in the receive buffer
+or with a -1 placed in the ip header checksum.
+The method used depends on firmware version. Frame
+geometry and sufficient header validation is performed by the
+hypervisor eliminating the need for further header checks here.
+
+Fixes: 7b5967389f5a ("ibmveth: set correct gso_size and gso_type")
+Signed-off-by: David Wilder <dwilder@us.ibm.com>
+Reviewed-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Reviewed-by: Cristobal Forno <cris.forno@ibm.com>
+Reviewed-by: Pradeep Satyanarayana <pradeeps@linux.vnet.ibm.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ibm/ibmveth.c |   13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1330,6 +1330,7 @@ static int ibmveth_poll(struct napi_stru
+                       int offset = ibmveth_rxq_frame_offset(adapter);
+                       int csum_good = ibmveth_rxq_csum_good(adapter);
+                       int lrg_pkt = ibmveth_rxq_large_packet(adapter);
++                      __sum16 iph_check = 0;
+                       skb = ibmveth_rxq_get_buffer(adapter);
+@@ -1366,7 +1367,17 @@ static int ibmveth_poll(struct napi_stru
+                       skb_put(skb, length);
+                       skb->protocol = eth_type_trans(skb, netdev);
+-                      if (length > netdev->mtu + ETH_HLEN) {
++                      /* PHYP without PLSO support places a -1 in the ip
++                       * checksum for large send frames.
++                       */
++                      if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
++                              struct iphdr *iph = (struct iphdr *)skb->data;
++
++                              iph_check = iph->check;
++                      }
++
++                      if ((length > netdev->mtu + ETH_HLEN) ||
++                          lrg_pkt || iph_check == 0xffff) {
+                               ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+                               adapter->rx_large_packets++;
+                       }
diff --git a/queue-4.14/ibmveth-switch-order-of-ibmveth_helper-calls.patch b/queue-4.14/ibmveth-switch-order-of-ibmveth_helper-calls.patch
new file mode 100644 (file)
index 0000000..c95ec06
--- /dev/null
@@ -0,0 +1,48 @@
+From foo@baz Sat Oct 17 01:00:22 PM CEST 2020
+From: David Wilder <dwilder@us.ibm.com>
+Date: Tue, 13 Oct 2020 16:20:13 -0700
+Subject: ibmveth: Switch order of ibmveth_helper calls.
+
+From: David Wilder <dwilder@us.ibm.com>
+
+[ Upstream commit 5ce9ad815a296374ca21f43f3b1ab5083d202ee1 ]
+
+ibmveth_rx_csum_helper() must be called after ibmveth_rx_mss_helper()
+as ibmveth_rx_csum_helper() may alter ip and tcp checksum values.
+
+Fixes: 66aa0678efc2 ("ibmveth: Support to enable LSO/CSO for Trunk VEA.")
+Signed-off-by: David Wilder <dwilder@us.ibm.com>
+Reviewed-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Reviewed-by: Cristobal Forno <cris.forno@ibm.com>
+Reviewed-by: Pradeep Satyanarayana <pradeeps@linux.vnet.ibm.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ibm/ibmveth.c |   10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1366,16 +1366,16 @@ static int ibmveth_poll(struct napi_stru
+                       skb_put(skb, length);
+                       skb->protocol = eth_type_trans(skb, netdev);
+-                      if (csum_good) {
+-                              skb->ip_summed = CHECKSUM_UNNECESSARY;
+-                              ibmveth_rx_csum_helper(skb, adapter);
+-                      }
+-
+                       if (length > netdev->mtu + ETH_HLEN) {
+                               ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+                               adapter->rx_large_packets++;
+                       }
++                      if (csum_good) {
++                              skb->ip_summed = CHECKSUM_UNNECESSARY;
++                              ibmveth_rx_csum_helper(skb, adapter);
++                      }
++
+                       napi_gro_receive(napi, skb);    /* send it up */
+                       netdev->stats.rx_packets++;
diff --git a/queue-4.14/ipv4-restore-flowi4_oif-update-before-call-to-xfrm_lookup_route.patch b/queue-4.14/ipv4-restore-flowi4_oif-update-before-call-to-xfrm_lookup_route.patch
new file mode 100644 (file)
index 0000000..4b3b552
--- /dev/null
@@ -0,0 +1,40 @@
+From foo@baz Sat Oct 17 01:00:22 PM CEST 2020
+From: David Ahern <dsahern@kernel.org>
+Date: Fri, 9 Oct 2020 11:01:01 -0700
+Subject: ipv4: Restore flowi4_oif update before call to xfrm_lookup_route
+
+From: David Ahern <dsahern@kernel.org>
+
+[ Upstream commit 874fb9e2ca949b443cc419a4f2227cafd4381d39 ]
+
+Tobias reported regressions in IPsec tests following the patch
+referenced by the Fixes tag below. The root cause is dropping the
+reset of the flowi4_oif after the fib_lookup. Apparently it is
+needed for xfrm cases, so restore the oif update to ip_route_output_flow
+right before the call to xfrm_lookup_route.
+
+Fixes: 2fbc6e89b2f1 ("ipv4: Update exception handling for multipath routes via same device")
+Reported-by: Tobias Brunner <tobias@strongswan.org>
+Signed-off-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/route.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2603,10 +2603,12 @@ struct rtable *ip_route_output_flow(stru
+       if (IS_ERR(rt))
+               return rt;
+-      if (flp4->flowi4_proto)
++      if (flp4->flowi4_proto) {
++              flp4->flowi4_oif = rt->dst.dev->ifindex;
+               rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
+                                                       flowi4_to_flowi(flp4),
+                                                       sk, 0);
++      }
+       return rt;
+ }
diff --git a/queue-4.14/mlx4-handle-non-napi-callers-to-napi_poll.patch b/queue-4.14/mlx4-handle-non-napi-callers-to-napi_poll.patch
new file mode 100644 (file)
index 0000000..005513f
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Sat Oct 17 01:00:22 PM CEST 2020
+From: Jonathan Lemon <bsd@fb.com>
+Date: Thu, 8 Oct 2020 11:45:26 -0700
+Subject: mlx4: handle non-napi callers to napi_poll
+
+From: Jonathan Lemon <bsd@fb.com>
+
+[ Upstream commit b2b8a92733b288128feb57ffa694758cf475106c ]
+
+netcons calls napi_poll with a budget of 0 to transmit packets.
+Handle this by:
+ - skipping RX processing
+ - do not try to recycle TX packets to the RX cache
+
+Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c |    3 +++
+ drivers/net/ethernet/mellanox/mlx4/en_tx.c |    2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -948,6 +948,9 @@ int mlx4_en_poll_rx_cq(struct napi_struc
+       bool clean_complete = true;
+       int done;
++      if (!budget)
++              return 0;
++
+       if (priv->tx_ring_num[TX_XDP]) {
+               xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
+               if (xdp_tx_cq->xdp_busy) {
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -343,7 +343,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_
+               .dma = tx_info->map0_dma,
+       };
+-      if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
++      if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
+               dma_unmap_page(priv->ddev, tx_info->map0_dma,
+                              PAGE_SIZE, priv->dma_dir);
+               put_page(tx_info->page);
diff --git a/queue-4.14/net-ipv4-always-honour-route-mtu-during-forwarding.patch b/queue-4.14/net-ipv4-always-honour-route-mtu-during-forwarding.patch
new file mode 100644 (file)
index 0000000..152d232
--- /dev/null
@@ -0,0 +1,80 @@
+From foo@baz Sat Oct 17 01:00:22 PM CEST 2020
+From: "Maciej Żenczykowski" <maze@google.com>
+Date: Wed, 23 Sep 2020 13:18:15 -0700
+Subject: net/ipv4: always honour route mtu during forwarding
+
+From: "Maciej Żenczykowski" <maze@google.com>
+
+[ Upstream commit 02a1b175b0e92d9e0fa5df3957ade8d733ceb6a0 ]
+
+Documentation/networking/ip-sysctl.txt:46 says:
+  ip_forward_use_pmtu - BOOLEAN
+    By default we don't trust protocol path MTUs while forwarding
+    because they could be easily forged and can lead to unwanted
+    fragmentation by the router.
+    You only need to enable this if you have user-space software
+    which tries to discover path mtus by itself and depends on the
+    kernel honoring this information. This is normally not the case.
+    Default: 0 (disabled)
+    Possible values:
+    0 - disabled
+    1 - enabled
+
+Which makes it pretty clear that setting it to 1 is a potential
+security/safety/DoS issue, and yet it is entirely reasonable to want
+forwarded traffic to honour explicitly administrator configured
+route mtus (instead of defaulting to device mtu).
+
+Indeed, I can't think of a single reason why you wouldn't want to.
+Since you configured a route mtu you probably know better...
+
+It is pretty common to have a higher device mtu to allow receiving
+large (jumbo) frames, while having some routes via that interface
+(potentially including the default route to the internet) specify
+a lower mtu.
+
+Note that ipv6 forwarding uses device mtu unless the route is locked
+(in which case it will use the route mtu).
+
+This approach is not usable for IPv4 where an 'mtu lock' on a route
+also has the side effect of disabling TCP path mtu discovery via
+disabling the IPv4 DF (don't frag) bit on all outgoing frames.
+
+I'm not aware of a way to lock a route from an IPv6 RA, so that also
+potentially seems wrong.
+
+Signed-off-by: Maciej Żenczykowski <maze@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Cc: Lorenzo Colitti <lorenzo@google.com>
+Cc: Sunmeet Gill (Sunny) <sgill@quicinc.com>
+Cc: Vinay Paradkar <vparadka@qti.qualcomm.com>
+Cc: Tyler Wear <twear@quicinc.com>
+Cc: David Ahern <dsahern@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip.h |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -364,12 +364,18 @@ static inline unsigned int ip_dst_mtu_ma
+                                                   bool forwarding)
+ {
+       struct net *net = dev_net(dst->dev);
++      unsigned int mtu;
+       if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
+           ip_mtu_locked(dst) ||
+           !forwarding)
+               return dst_mtu(dst);
++      /* 'forwarding = true' case should always honour route mtu */
++      mtu = dst_metric_raw(dst, RTAX_MTU);
++      if (mtu)
++              return mtu;
++
+       return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
+ }
diff --git a/queue-4.14/net-usb-qmi_wwan-add-cellient-mpl200-card.patch b/queue-4.14/net-usb-qmi_wwan-add-cellient-mpl200-card.patch
new file mode 100644 (file)
index 0000000..8fac4ef
--- /dev/null
@@ -0,0 +1,29 @@
+From foo@baz Sat Oct 17 01:00:22 PM CEST 2020
+From: Wilken Gottwalt <wilken.gottwalt@mailbox.org>
+Date: Thu, 8 Oct 2020 09:21:38 +0200
+Subject: net: usb: qmi_wwan: add Cellient MPL200 card
+
+From: Wilken Gottwalt <wilken.gottwalt@mailbox.org>
+
+[ Upstream commit 28802e7c0c9954218d1830f7507edc9d49b03a00 ]
+
+Add usb ids of the Cellient MPL200 card.
+
+Signed-off-by: Wilken Gottwalt <wilken.gottwalt@mailbox.org>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1301,6 +1301,7 @@ static const struct usb_device_id produc
+       {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
+       {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)},    /* Foxconn T77W968 LTE */
+       {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)},    /* Foxconn T77W968 LTE with eSIM support*/
++      {QMI_FIXED_INTF(0x2692, 0x9025, 4)},    /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
+       /* 4. Gobi 1000 devices */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
diff --git a/queue-4.14/r8169-fix-data-corruption-issue-on-rtl8402.patch b/queue-4.14/r8169-fix-data-corruption-issue-on-rtl8402.patch
new file mode 100644 (file)
index 0000000..8c24f41
--- /dev/null
@@ -0,0 +1,154 @@
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Thu, 1 Oct 2020 09:23:02 +0200
+Subject: r8169: fix data corruption issue on RTL8402
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit ef9da46ddef071e1bbb943afbbe9b38771855554 ]
+
+Petr reported that after resume from suspend RTL8402 partially
+truncates incoming packets, and re-initializing register RxConfig
+before the actual chip re-initialization sequence is needed to avoid
+the issue.
+
+Reported-by: Petr Tesarik <ptesarik@suse.cz>
+Proposed-by: Petr Tesarik <ptesarik@suse.cz>
+Tested-by: Petr Tesarik <ptesarik@suse.cz>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169.c |  108 ++++++++++++++++++-----------------
+ 1 file changed, 56 insertions(+), 52 deletions(-)
+
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -4501,6 +4501,58 @@ static void rtl_rar_set(struct rtl8169_p
+       rtl_unlock_work(tp);
+ }
++static void rtl_init_rxcfg(struct rtl8169_private *tp)
++{
++      void __iomem *ioaddr = tp->mmio_addr;
++
++      switch (tp->mac_version) {
++      case RTL_GIGA_MAC_VER_01:
++      case RTL_GIGA_MAC_VER_02:
++      case RTL_GIGA_MAC_VER_03:
++      case RTL_GIGA_MAC_VER_04:
++      case RTL_GIGA_MAC_VER_05:
++      case RTL_GIGA_MAC_VER_06:
++      case RTL_GIGA_MAC_VER_10:
++      case RTL_GIGA_MAC_VER_11:
++      case RTL_GIGA_MAC_VER_12:
++      case RTL_GIGA_MAC_VER_13:
++      case RTL_GIGA_MAC_VER_14:
++      case RTL_GIGA_MAC_VER_15:
++      case RTL_GIGA_MAC_VER_16:
++      case RTL_GIGA_MAC_VER_17:
++              RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
++              break;
++      case RTL_GIGA_MAC_VER_18:
++      case RTL_GIGA_MAC_VER_19:
++      case RTL_GIGA_MAC_VER_20:
++      case RTL_GIGA_MAC_VER_21:
++      case RTL_GIGA_MAC_VER_22:
++      case RTL_GIGA_MAC_VER_23:
++      case RTL_GIGA_MAC_VER_24:
++      case RTL_GIGA_MAC_VER_34:
++      case RTL_GIGA_MAC_VER_35:
++              RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
++              break;
++      case RTL_GIGA_MAC_VER_40:
++      case RTL_GIGA_MAC_VER_41:
++      case RTL_GIGA_MAC_VER_42:
++      case RTL_GIGA_MAC_VER_43:
++      case RTL_GIGA_MAC_VER_44:
++      case RTL_GIGA_MAC_VER_45:
++      case RTL_GIGA_MAC_VER_46:
++      case RTL_GIGA_MAC_VER_47:
++      case RTL_GIGA_MAC_VER_48:
++      case RTL_GIGA_MAC_VER_49:
++      case RTL_GIGA_MAC_VER_50:
++      case RTL_GIGA_MAC_VER_51:
++              RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
++              break;
++      default:
++              RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
++              break;
++      }
++}
++
+ static int rtl_set_mac_address(struct net_device *dev, void *p)
+ {
+       struct rtl8169_private *tp = netdev_priv(dev);
+@@ -4519,6 +4571,10 @@ static int rtl_set_mac_address(struct ne
+       pm_runtime_put_noidle(d);
++      /* Reportedly at least Asus X453MA truncates packets otherwise */
++      if (tp->mac_version == RTL_GIGA_MAC_VER_37)
++              rtl_init_rxcfg(tp);
++
+       return 0;
+ }
+@@ -4955,58 +5011,6 @@ static void rtl_init_pll_power_ops(struc
+               break;
+       }
+ }
+-
+-static void rtl_init_rxcfg(struct rtl8169_private *tp)
+-{
+-      void __iomem *ioaddr = tp->mmio_addr;
+-
+-      switch (tp->mac_version) {
+-      case RTL_GIGA_MAC_VER_01:
+-      case RTL_GIGA_MAC_VER_02:
+-      case RTL_GIGA_MAC_VER_03:
+-      case RTL_GIGA_MAC_VER_04:
+-      case RTL_GIGA_MAC_VER_05:
+-      case RTL_GIGA_MAC_VER_06:
+-      case RTL_GIGA_MAC_VER_10:
+-      case RTL_GIGA_MAC_VER_11:
+-      case RTL_GIGA_MAC_VER_12:
+-      case RTL_GIGA_MAC_VER_13:
+-      case RTL_GIGA_MAC_VER_14:
+-      case RTL_GIGA_MAC_VER_15:
+-      case RTL_GIGA_MAC_VER_16:
+-      case RTL_GIGA_MAC_VER_17:
+-              RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
+-              break;
+-      case RTL_GIGA_MAC_VER_18:
+-      case RTL_GIGA_MAC_VER_19:
+-      case RTL_GIGA_MAC_VER_20:
+-      case RTL_GIGA_MAC_VER_21:
+-      case RTL_GIGA_MAC_VER_22:
+-      case RTL_GIGA_MAC_VER_23:
+-      case RTL_GIGA_MAC_VER_24:
+-      case RTL_GIGA_MAC_VER_34:
+-      case RTL_GIGA_MAC_VER_35:
+-              RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
+-              break;
+-      case RTL_GIGA_MAC_VER_40:
+-      case RTL_GIGA_MAC_VER_41:
+-      case RTL_GIGA_MAC_VER_42:
+-      case RTL_GIGA_MAC_VER_43:
+-      case RTL_GIGA_MAC_VER_44:
+-      case RTL_GIGA_MAC_VER_45:
+-      case RTL_GIGA_MAC_VER_46:
+-      case RTL_GIGA_MAC_VER_47:
+-      case RTL_GIGA_MAC_VER_48:
+-      case RTL_GIGA_MAC_VER_49:
+-      case RTL_GIGA_MAC_VER_50:
+-      case RTL_GIGA_MAC_VER_51:
+-              RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+-              break;
+-      default:
+-              RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
+-              break;
+-      }
+-}
+ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
+ {
diff --git a/queue-4.14/series b/queue-4.14/series
new file mode 100644 (file)
index 0000000..6637c58
--- /dev/null
@@ -0,0 +1,8 @@
+ibmveth-switch-order-of-ibmveth_helper-calls.patch
+ibmveth-identify-ingress-large-send-packets.patch
+ipv4-restore-flowi4_oif-update-before-call-to-xfrm_lookup_route.patch
+mlx4-handle-non-napi-callers-to-napi_poll.patch
+net-usb-qmi_wwan-add-cellient-mpl200-card.patch
+tipc-fix-the-skb_unshare-in-tipc_buf_append.patch
+net-ipv4-always-honour-route-mtu-during-forwarding.patch
+r8169-fix-data-corruption-issue-on-rtl8402.patch
diff --git a/queue-4.14/tipc-fix-the-skb_unshare-in-tipc_buf_append.patch b/queue-4.14/tipc-fix-the-skb_unshare-in-tipc_buf_append.patch
new file mode 100644 (file)
index 0000000..3dc3b30
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Sat Oct 17 01:00:22 PM CEST 2020
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Wed, 7 Oct 2020 21:12:50 -0700
+Subject: tipc: fix the skb_unshare() in tipc_buf_append()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit ed42989eab57d619667d7e87dfbd8fe207db54fe ]
+
+skb_unshare() drops a reference count on the old skb unconditionally,
+so in the failure case, we end up freeing the skb twice here.
+And because the skb is allocated in fclone and cloned by caller
+tipc_msg_reassemble(), the consequence is actually freeing the
+original skb too, thus triggered the UAF by syzbot.
+
+Fix this by replacing this skb_unshare() with skb_cloned()+skb_copy().
+
+Fixes: ff48b6222e65 ("tipc: use skb_unshare() instead in tipc_buf_append()")
+Reported-and-tested-by: syzbot+e96a7ba46281824cc46a@syzkaller.appspotmail.com
+Cc: Jon Maloy <jmaloy@redhat.com>
+Cc: Ying Xue <ying.xue@windriver.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Reviewed-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/msg.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -140,7 +140,8 @@ int tipc_buf_append(struct sk_buff **hea
+       if (fragid == FIRST_FRAGMENT) {
+               if (unlikely(head))
+                       goto err;
+-              frag = skb_unshare(frag, GFP_ATOMIC);
++              if (skb_cloned(frag))
++                      frag = skb_copy(frag, GFP_ATOMIC);
+               if (unlikely(!frag))
+                       goto err;
+               head = *headbuf = frag;