--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: David Wilder <dwilder@us.ibm.com>
+Date: Tue, 13 Oct 2020 16:20:14 -0700
+Subject: ibmveth: Identify ingress large send packets.
+
+From: David Wilder <dwilder@us.ibm.com>
+
+[ Upstream commit 413f142cc05cb03f2d1ea83388e40c1ddc0d74e9 ]
+
+Ingress large send packets are identified by either:
+The IBMVETH_RXQ_LRG_PKT flag in the receive buffer
+or with a -1 placed in the ip header checksum.
+The method used depends on firmware version. Frame
+geometry and sufficient header validation is performed by the
+hypervisor eliminating the need for further header checks here.
+
+Fixes: 7b5967389f5a ("ibmveth: set correct gso_size and gso_type")
+Signed-off-by: David Wilder <dwilder@us.ibm.com>
+Reviewed-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Reviewed-by: Cristobal Forno <cris.forno@ibm.com>
+Reviewed-by: Pradeep Satyanarayana <pradeeps@linux.vnet.ibm.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ibm/ibmveth.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1330,6 +1330,7 @@ static int ibmveth_poll(struct napi_stru
+ int offset = ibmveth_rxq_frame_offset(adapter);
+ int csum_good = ibmveth_rxq_csum_good(adapter);
+ int lrg_pkt = ibmveth_rxq_large_packet(adapter);
++ __sum16 iph_check = 0;
+
+ skb = ibmveth_rxq_get_buffer(adapter);
+
+@@ -1366,7 +1367,17 @@ static int ibmveth_poll(struct napi_stru
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+- if (length > netdev->mtu + ETH_HLEN) {
++ /* PHYP without PLSO support places a -1 in the ip
++ * checksum for large send frames.
++ */
++ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
++ struct iphdr *iph = (struct iphdr *)skb->data;
++
++ iph_check = iph->check;
++ }
++
++ if ((length > netdev->mtu + ETH_HLEN) ||
++ lrg_pkt || iph_check == 0xffff) {
+ ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+ adapter->rx_large_packets++;
+ }
--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: David Wilder <dwilder@us.ibm.com>
+Date: Tue, 13 Oct 2020 16:20:13 -0700
+Subject: ibmveth: Switch order of ibmveth_helper calls.
+
+From: David Wilder <dwilder@us.ibm.com>
+
+[ Upstream commit 5ce9ad815a296374ca21f43f3b1ab5083d202ee1 ]
+
+ibmveth_rx_csum_helper() must be called after ibmveth_rx_mss_helper()
+as ibmveth_rx_csum_helper() may alter ip and tcp checksum values.
+
+Fixes: 66aa0678efc2 ("ibmveth: Support to enable LSO/CSO for Trunk VEA.")
+Signed-off-by: David Wilder <dwilder@us.ibm.com>
+Reviewed-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Reviewed-by: Cristobal Forno <cris.forno@ibm.com>
+Reviewed-by: Pradeep Satyanarayana <pradeeps@linux.vnet.ibm.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ibm/ibmveth.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1366,16 +1366,16 @@ static int ibmveth_poll(struct napi_stru
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+- if (csum_good) {
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- ibmveth_rx_csum_helper(skb, adapter);
+- }
+-
+ if (length > netdev->mtu + ETH_HLEN) {
+ ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+ adapter->rx_large_packets++;
+ }
+
++ if (csum_good) {
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ ibmveth_rx_csum_helper(skb, adapter);
++ }
++
+ napi_gro_receive(napi, skb); /* send it up */
+
+ netdev->stats.rx_packets++;
--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: David Ahern <dsahern@kernel.org>
+Date: Fri, 9 Oct 2020 11:01:01 -0700
+Subject: ipv4: Restore flowi4_oif update before call to xfrm_lookup_route
+
+From: David Ahern <dsahern@kernel.org>
+
+[ Upstream commit 874fb9e2ca949b443cc419a4f2227cafd4381d39 ]
+
+Tobias reported regressions in IPsec tests following the patch
+referenced by the Fixes tag below. The root cause is dropping the
+reset of the flowi4_oif after the fib_lookup. Apparently it is
+needed for xfrm cases, so restore the oif update to ip_route_output_flow
+right before the call to xfrm_lookup_route.
+
+Fixes: 2fbc6e89b2f1 ("ipv4: Update exception handling for multipath routes via same device")
+Reported-by: Tobias Brunner <tobias@strongswan.org>
+Signed-off-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/route.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2634,10 +2634,12 @@ struct rtable *ip_route_output_flow(stru
+ if (IS_ERR(rt))
+ return rt;
+
+- if (flp4->flowi4_proto)
++ if (flp4->flowi4_proto) {
++ flp4->flowi4_oif = rt->dst.dev->ifindex;
+ rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
+ flowi4_to_flowi(flp4),
+ sk, 0);
++ }
+
+ return rt;
+ }
--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: Jonathan Lemon <bsd@fb.com>
+Date: Thu, 8 Oct 2020 11:45:26 -0700
+Subject: mlx4: handle non-napi callers to napi_poll
+
+From: Jonathan Lemon <bsd@fb.com>
+
+[ Upstream commit b2b8a92733b288128feb57ffa694758cf475106c ]
+
+netcons calls napi_poll with a budget of 0 to transmit packets.
+Handle this by:
+ - skipping RX processing
+ - do not try to recycle TX packets to the RX cache
+
+Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c | 3 +++
+ drivers/net/ethernet/mellanox/mlx4/en_tx.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -945,6 +945,9 @@ int mlx4_en_poll_rx_cq(struct napi_struc
+ bool clean_complete = true;
+ int done;
+
++ if (!budget)
++ return 0;
++
+ if (priv->tx_ring_num[TX_XDP]) {
+ xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
+ if (xdp_tx_cq->xdp_busy) {
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -343,7 +343,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_
+ .dma = tx_info->map0_dma,
+ };
+
+- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
++ if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
+ dma_unmap_page(priv->ddev, tx_info->map0_dma,
+ PAGE_SIZE, priv->dma_dir);
+ put_page(tx_info->page);
--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: Marek Vasut <marex@denx.de>
+Date: Tue, 6 Oct 2020 15:52:53 +0200
+Subject: net: fec: Fix PHY init after phy_reset_after_clk_enable()
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 0da1ccbbefb662915228bc17e1c7d4ad28b3ddab ]
+
+The phy_reset_after_clk_enable() does a PHY reset, which means the PHY
+loses its register settings. The fec_enet_mii_probe() starts the PHY
+and does the necessary calls to configure the PHY via PHY framework,
+and loads the correct register settings into the PHY. Therefore,
+fec_enet_mii_probe() should be called only after the PHY has been
+reset, not before as it is now.
+
+Fixes: 1b0a83ac04e3 ("net: fec: add phy_reset_after_clk_enable() support")
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Tested-by: Richard Leitner <richard.leitner@skidata.com>
+Signed-off-by: Marek Vasut <marex@denx.de>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: NXP Linux Team <linux-imx@nxp.com>
+Cc: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2950,17 +2950,17 @@ fec_enet_open(struct net_device *ndev)
+ /* Init MAC prior to mii bus probe */
+ fec_restart(ndev);
+
+- /* Probe and connect to PHY when open the interface */
+- ret = fec_enet_mii_probe(ndev);
+- if (ret)
+- goto err_enet_mii_probe;
+-
+ /* Call phy_reset_after_clk_enable() again if it failed during
+ * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+ */
+ if (reset_again)
+ fec_enet_phy_reset_after_clk_enable(ndev);
+
++ /* Probe and connect to PHY when open the interface */
++ ret = fec_enet_mii_probe(ndev);
++ if (ret)
++ goto err_enet_mii_probe;
++
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_used();
+
--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: Marek Vasut <marex@denx.de>
+Date: Sat, 10 Oct 2020 11:10:00 +0200
+Subject: net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 64a632da538a6827fad0ea461925cedb9899ebe2 ]
+
+The phy_reset_after_clk_enable() is always called with ndev->phydev,
+however that pointer may be NULL even though the PHY device instance
+already exists and is sufficient to perform the PHY reset.
+
+This condition happens in fec_open(), where the clock must be enabled
+first, then the PHY must be reset, and then the PHY IDs can be read
+out of the PHY.
+
+If the PHY still is not bound to the MAC, but there is OF PHY node
+and a matching PHY device instance already, use the OF PHY node to
+obtain the PHY device instance, and then use that PHY device instance
+when triggering the PHY reset.
+
+Fixes: 1b0a83ac04e3 ("net: fec: add phy_reset_after_clk_enable() support")
+Signed-off-by: Marek Vasut <marex@denx.de>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: NXP Linux Team <linux-imx@nxp.com>
+Cc: Richard Leitner <richard.leitner@skidata.com>
+Cc: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1897,6 +1897,27 @@ static int fec_enet_mdio_write(struct mi
+ return ret;
+ }
+
++static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ struct phy_device *phy_dev = ndev->phydev;
++
++ if (phy_dev) {
++ phy_reset_after_clk_enable(phy_dev);
++ } else if (fep->phy_node) {
++ /*
++ * If the PHY still is not bound to the MAC, but there is
++ * OF PHY node and a matching PHY device instance already,
++ * use the OF PHY node to obtain the PHY device instance,
++ * and then use that PHY device instance when triggering
++ * the PHY reset.
++ */
++ phy_dev = of_phy_find_device(fep->phy_node);
++ phy_reset_after_clk_enable(phy_dev);
++ put_device(&phy_dev->mdio.dev);
++ }
++}
++
+ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+@@ -1923,7 +1944,7 @@ static int fec_enet_clk_enable(struct ne
+ if (ret)
+ goto failed_clk_ref;
+
+- phy_reset_after_clk_enable(ndev->phydev);
++ fec_enet_phy_reset_after_clk_enable(ndev);
+ } else {
+ clk_disable_unprepare(fep->clk_enet_out);
+ if (fep->clk_ptp) {
+@@ -2938,7 +2959,7 @@ fec_enet_open(struct net_device *ndev)
+ * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+ */
+ if (reset_again)
+- phy_reset_after_clk_enable(ndev->phydev);
++ fec_enet_phy_reset_after_clk_enable(ndev);
+
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_used();
--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: Yonghong Song <yhs@fb.com>
+Date: Wed, 14 Oct 2020 07:46:12 -0700
+Subject: net: fix pos incrementment in ipv6_route_seq_next
+
+From: Yonghong Song <yhs@fb.com>
+
+[ Upstream commit 6617dfd440149e42ce4d2be615eb31a4755f4d30 ]
+
+Commit 4fc427e05158 ("ipv6_route_seq_next should increase position index")
+tried to fix the issue where seq_file pos is not increased
+if a NULL element is returned with seq_ops->next(). See bug
+ https://bugzilla.kernel.org/show_bug.cgi?id=206283
+The commit effectively does:
+ - increase pos for all seq_ops->start()
+ - increase pos for all seq_ops->next()
+
+For ipv6_route, increasing pos for all seq_ops->next() is correct.
+But increasing pos for seq_ops->start() is not correct
+since pos is used to determine how many items to skip during
+seq_ops->start():
+ iter->skip = *pos;
+seq_ops->start() just fetches the *current* pos item.
+The item can be skipped only after seq_ops->show() which essentially
+is the beginning of seq_ops->next().
+
+For example, I have 7 ipv6 route entries,
+ root@arch-fb-vm1:~/net-next dd if=/proc/net/ipv6_route bs=4096
+ 00000000000000000000000000000000 40 00000000000000000000000000000000 00 00000000000000000000000000000000 00000400 00000001 00000000 00000001 eth0
+ fe800000000000000000000000000000 40 00000000000000000000000000000000 00 00000000000000000000000000000000 00000100 00000001 00000000 00000001 eth0
+ 00000000000000000000000000000000 00 00000000000000000000000000000000 00 00000000000000000000000000000000 ffffffff 00000001 00000000 00200200 lo
+ 00000000000000000000000000000001 80 00000000000000000000000000000000 00 00000000000000000000000000000000 00000000 00000003 00000000 80200001 lo
+ fe800000000000002050e3fffebd3be8 80 00000000000000000000000000000000 00 00000000000000000000000000000000 00000000 00000002 00000000 80200001 eth0
+ ff000000000000000000000000000000 08 00000000000000000000000000000000 00 00000000000000000000000000000000 00000100 00000004 00000000 00000001 eth0
+ 00000000000000000000000000000000 00 00000000000000000000000000000000 00 00000000000000000000000000000000 ffffffff 00000001 00000000 00200200 lo
+ 0+1 records in
+ 0+1 records out
+ 1050 bytes (1.0 kB, 1.0 KiB) copied, 0.00707908 s, 148 kB/s
+ root@arch-fb-vm1:~/net-next
+
+In the above, I specify buffer size 4096, so all records can be returned
+to user space with a single trip to the kernel.
+
+If I use buffer size 128, since each record size is 149, internally
+kernel seq_read() will read 149 into its internal buffer and return the data
+to user space in two read() syscalls. Then user read() syscall will trigger
+next seq_ops->start(). Since the current implementation increased pos even
+for seq_ops->start(), it will skip record #2, #4 and #6, assuming the first
+record is #1.
+
+ root@arch-fb-vm1:~/net-next dd if=/proc/net/ipv6_route bs=128
+ 00000000000000000000000000000000 40 00000000000000000000000000000000 00 00000000000000000000000000000000 00000400 00000001 00000000 00000001 eth0
+ 00000000000000000000000000000000 00 00000000000000000000000000000000 00 00000000000000000000000000000000 ffffffff 00000001 00000000 00200200 lo
+ fe800000000000002050e3fffebd3be8 80 00000000000000000000000000000000 00 00000000000000000000000000000000 00000000 00000002 00000000 80200001 eth0
+ 00000000000000000000000000000000 00 00000000000000000000000000000000 00 00000000000000000000000000000000 ffffffff 00000001 00000000 00200200 lo
+4+1 records in
+4+1 records out
+600 bytes copied, 0.00127758 s, 470 kB/s
+
+To fix the problem, create a fake pos pointer so seq_ops->start()
+won't actually increase seq_file pos. With this fix, the
+above `dd` command with `bs=128` will show correct result.
+
+Fixes: 4fc427e05158 ("ipv6_route_seq_next should increase position index")
+Cc: Alexei Starovoitov <ast@kernel.org>
+Suggested-by: Vasily Averin <vvs@virtuozzo.com>
+Reviewed-by: Vasily Averin <vvs@virtuozzo.com>
+Signed-off-by: Yonghong Song <yhs@fb.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Acked-by: Andrii Nakryiko <andrii@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_fib.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -2417,8 +2417,10 @@ static void *ipv6_route_seq_start(struct
+ iter->skip = *pos;
+
+ if (iter->tbl) {
++ loff_t p = 0;
++
+ ipv6_route_seq_setup_walk(iter, net);
+- return ipv6_route_seq_next(seq, NULL, pos);
++ return ipv6_route_seq_next(seq, NULL, &p);
+ } else {
+ return NULL;
+ }
--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: "Maciej Żenczykowski" <maze@google.com>
+Date: Wed, 23 Sep 2020 13:18:15 -0700
+Subject: net/ipv4: always honour route mtu during forwarding
+
+From: "Maciej Żenczykowski" <maze@google.com>
+
+[ Upstream commit 02a1b175b0e92d9e0fa5df3957ade8d733ceb6a0 ]
+
+Documentation/networking/ip-sysctl.txt:46 says:
+ ip_forward_use_pmtu - BOOLEAN
+ By default we don't trust protocol path MTUs while forwarding
+ because they could be easily forged and can lead to unwanted
+ fragmentation by the router.
+ You only need to enable this if you have user-space software
+ which tries to discover path mtus by itself and depends on the
+ kernel honoring this information. This is normally not the case.
+ Default: 0 (disabled)
+ Possible values:
+ 0 - disabled
+ 1 - enabled
+
+Which makes it pretty clear that setting it to 1 is a potential
+security/safety/DoS issue, and yet it is entirely reasonable to want
+forwarded traffic to honour explicitly administrator configured
+route mtus (instead of defaulting to device mtu).
+
+Indeed, I can't think of a single reason why you wouldn't want to.
+Since you configured a route mtu you probably know better...
+
+It is pretty common to have a higher device mtu to allow receiving
+large (jumbo) frames, while having some routes via that interface
+(potentially including the default route to the internet) specify
+a lower mtu.
+
+Note that ipv6 forwarding uses device mtu unless the route is locked
+(in which case it will use the route mtu).
+
+This approach is not usable for IPv4 where an 'mtu lock' on a route
+also has the side effect of disabling TCP path mtu discovery via
+disabling the IPv4 DF (don't frag) bit on all outgoing frames.
+
+I'm not aware of a way to lock a route from an IPv6 RA, so that also
+potentially seems wrong.
+
+Signed-off-by: Maciej Żenczykowski <maze@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Cc: Lorenzo Colitti <lorenzo@google.com>
+Cc: Sunmeet Gill (Sunny) <sgill@quicinc.com>
+Cc: Vinay Paradkar <vparadka@qti.qualcomm.com>
+Cc: Tyler Wear <twear@quicinc.com>
+Cc: David Ahern <dsahern@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -399,12 +399,18 @@ static inline unsigned int ip_dst_mtu_ma
+ bool forwarding)
+ {
+ struct net *net = dev_net(dst->dev);
++ unsigned int mtu;
+
+ if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
+ ip_mtu_locked(dst) ||
+ !forwarding)
+ return dst_mtu(dst);
+
++ /* 'forwarding = true' case should always honour route mtu */
++ mtu = dst_metric_raw(dst, RTAX_MTU);
++ if (mtu)
++ return mtu;
++
+ return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
+ }
+
--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: Karsten Graul <kgraul@linux.ibm.com>
+Date: Wed, 14 Oct 2020 19:43:28 +0200
+Subject: net/smc: fix valid DMBE buffer sizes
+
+From: Karsten Graul <kgraul@linux.ibm.com>
+
+[ Upstream commit ef12ad45880b696eb993d86c481ca891836ab593 ]
+
+The SMCD_DMBE_SIZES should include all valid DMBE buffer sizes, so the
+correct value is 6 which means 1MB. With 7 the registration of an ISM
+buffer would always fail because of the invalid size requested.
+Fix that and set the value to 6.
+
+Fixes: c6ba7c9ba43d ("net/smc: add base infrastructure for SMC-D and ISM")
+Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/smc/smc_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -770,7 +770,7 @@ static struct smc_buf_desc *smcr_new_buf
+ return buf_desc;
+ }
+
+-#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
++#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
+
+ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
+ bool is_dmb, int bufsize)
--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: Rohit Maheshwari <rohitm@chelsio.com>
+Date: Thu, 8 Oct 2020 00:10:21 +0530
+Subject: net/tls: sendfile fails with ktls offload
+
+From: Rohit Maheshwari <rohitm@chelsio.com>
+
+[ Upstream commit ea1dd3e9d080c961b9a451130b61c72dc9a5397b ]
+
+At first when sendpage gets called, if there is more data, 'more' in
+tls_push_data() gets set which later sets pending_open_record_frags, but
+when there is no more data in file left, and last time tls_push_data()
+gets called, pending_open_record_frags doesn't get reset. And later when
+2 bytes of encrypted alert comes as sendmsg, it first checks for
+pending_open_record_frags, and since this is set, it creates a record with
+0 data bytes to encrypt, meaning record length is prepend_size + tag_size
+only, which causes problem.
+ We should set/reset pending_open_record_frags based on more bit.
+
+Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
+Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_device.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -351,13 +351,13 @@ static int tls_push_data(struct sock *sk
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
+ int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
+- int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
+ struct tls_record_info *record = ctx->open_record;
+ struct page_frag *pfrag;
+ size_t orig_size = size;
+ u32 max_open_record_len;
+- int copy, rc = 0;
++ bool more = false;
+ bool done = false;
++ int copy, rc = 0;
+ long timeo;
+
+ if (flags &
+@@ -422,9 +422,8 @@ handle_error:
+ if (!size) {
+ last_record:
+ tls_push_record_flags = flags;
+- if (more) {
+- tls_ctx->pending_open_record_frags =
+- record->num_frags;
++ if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
++ more = true;
+ break;
+ }
+
+@@ -445,6 +444,8 @@ last_record:
+ }
+ } while (!done);
+
++ tls_ctx->pending_open_record_frags = more;
++
+ if (orig_size - size > 0)
+ rc = orig_size - size;
+
--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: Wilken Gottwalt <wilken.gottwalt@mailbox.org>
+Date: Thu, 8 Oct 2020 09:21:38 +0200
+Subject: net: usb: qmi_wwan: add Cellient MPL200 card
+
+From: Wilken Gottwalt <wilken.gottwalt@mailbox.org>
+
+[ Upstream commit 28802e7c0c9954218d1830f7507edc9d49b03a00 ]
+
+Add usb ids of the Cellient MPL200 card.
+
+Signed-off-by: Wilken Gottwalt <wilken.gottwalt@mailbox.org>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1312,6 +1312,7 @@ static const struct usb_device_id produc
+ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
+ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
+ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
++ {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
+
+ /* 4. Gobi 1000 devices */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Thu, 1 Oct 2020 09:23:02 +0200
+Subject: r8169: fix data corruption issue on RTL8402
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit ef9da46ddef071e1bbb943afbbe9b38771855554 ]
+
+Petr reported that after resume from suspend RTL8402 partially
+truncates incoming packets, and re-initializing register RxConfig
+before the actual chip re-initialization sequence is needed to avoid
+the issue.
+
+Reported-by: Petr Tesarik <ptesarik@suse.cz>
+Proposed-by: Petr Tesarik <ptesarik@suse.cz>
+Tested-by: Petr Tesarik <ptesarik@suse.cz>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169.c | 46 +++++++++++++++++++----------------
+ 1 file changed, 25 insertions(+), 21 deletions(-)
+
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -4111,6 +4111,27 @@ static void rtl_rar_set(struct rtl8169_p
+ rtl_unlock_work(tp);
+ }
+
++static void rtl_init_rxcfg(struct rtl8169_private *tp)
++{
++ switch (tp->mac_version) {
++ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
++ case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
++ RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
++ break;
++ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
++ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
++ case RTL_GIGA_MAC_VER_38:
++ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
++ break;
++ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
++ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
++ break;
++ default:
++ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
++ break;
++ }
++}
++
+ static int rtl_set_mac_address(struct net_device *dev, void *p)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+@@ -4128,6 +4149,10 @@ static int rtl_set_mac_address(struct ne
+
+ pm_runtime_put_noidle(d);
+
++ /* Reportedly at least Asus X453MA truncates packets otherwise */
++ if (tp->mac_version == RTL_GIGA_MAC_VER_37)
++ rtl_init_rxcfg(tp);
++
+ return 0;
+ }
+
+@@ -4289,27 +4314,6 @@ static void rtl_pll_power_up(struct rtl8
+ }
+ }
+
+-static void rtl_init_rxcfg(struct rtl8169_private *tp)
+-{
+- switch (tp->mac_version) {
+- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+- case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
+- RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
+- break;
+- case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
+- case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
+- case RTL_GIGA_MAC_VER_38:
+- RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
+- break;
+- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+- RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+- break;
+- default:
+- RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
+- break;
+- }
+-}
+-
+ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
+ {
+ tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
--- /dev/null
+ibmveth-switch-order-of-ibmveth_helper-calls.patch
+ibmveth-identify-ingress-large-send-packets.patch
+ipv4-restore-flowi4_oif-update-before-call-to-xfrm_lookup_route.patch
+mlx4-handle-non-napi-callers-to-napi_poll.patch
+net-fec-fix-phy_device-lookup-for-phy_reset_after_clk_enable.patch
+net-fec-fix-phy-init-after-phy_reset_after_clk_enable.patch
+net-fix-pos-incrementment-in-ipv6_route_seq_next.patch
+net-smc-fix-valid-dmbe-buffer-sizes.patch
+net-usb-qmi_wwan-add-cellient-mpl200-card.patch
+tipc-fix-the-skb_unshare-in-tipc_buf_append.patch
+net-ipv4-always-honour-route-mtu-during-forwarding.patch
+r8169-fix-data-corruption-issue-on-rtl8402.patch
+net-tls-sendfile-fails-with-ktls-offload.patch
--- /dev/null
+From foo@baz Sat Oct 17 12:48:24 PM CEST 2020
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Wed, 7 Oct 2020 21:12:50 -0700
+Subject: tipc: fix the skb_unshare() in tipc_buf_append()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit ed42989eab57d619667d7e87dfbd8fe207db54fe ]
+
+skb_unshare() drops a reference count on the old skb unconditionally,
+so in the failure case, we end up freeing the skb twice here.
+And because the skb is allocated in fclone and cloned by caller
+tipc_msg_reassemble(), the consequence is actually freeing the
+original skb too, thus triggered the UAF by syzbot.
+
+Fix this by replacing this skb_unshare() with skb_cloned()+skb_copy().
+
+Fixes: ff48b6222e65 ("tipc: use skb_unshare() instead in tipc_buf_append()")
+Reported-and-tested-by: syzbot+e96a7ba46281824cc46a@syzkaller.appspotmail.com
+Cc: Jon Maloy <jmaloy@redhat.com>
+Cc: Ying Xue <ying.xue@windriver.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Reviewed-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/msg.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -140,7 +140,8 @@ int tipc_buf_append(struct sk_buff **hea
+ if (fragid == FIRST_FRAGMENT) {
+ if (unlikely(head))
+ goto err;
+- frag = skb_unshare(frag, GFP_ATOMIC);
++ if (skb_cloned(frag))
++ frag = skb_copy(frag, GFP_ATOMIC);
+ if (unlikely(!frag))
+ goto err;
+ head = *headbuf = frag;