--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Wed, 7 Oct 2020 23:18:21 -0700
+Subject: [PATCH stable 5.9 23/24] can: j1935: j1939_tp_tx_dat_new(): fix missing initialization of skbcnt
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit e009f95b1543e26606dca2f7e6e9f0f9174538e5 ]
+
+This fixes an uninit-value warning:
+BUG: KMSAN: uninit-value in can_receive+0x26b/0x630 net/can/af_can.c:650
+
+Reported-and-tested-by: syzbot+3f3837e61a48d32b495f@syzkaller.appspotmail.com
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Cc: Robin van der Gracht <robin@protonic.nl>
+Cc: Oleksij Rempel <linux@rempel-privat.de>
+Cc: Pengutronix Kernel Team <kernel@pengutronix.de>
+Cc: Oliver Hartkopp <socketcan@hartkopp.net>
+Cc: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Link: https://lore.kernel.org/r/20201008061821.24663-1-xiyou.wangcong@gmail.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/can/j1939/transport.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -580,6 +580,7 @@ sk_buff *j1939_tp_tx_dat_new(struct j193
+ skb->dev = priv->ndev;
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
++ can_skb_prv(skb)->skbcnt = 0;
+ /* reserve CAN header */
+ skb_reserve(skb, offsetof(struct can_frame, data));
+
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Tue, 11 Aug 2020 10:15:44 +0200
+Subject: [PATCH stable 5.9 22/24] can: m_can_platform: don't call m_can_class_suspend in runtime suspend
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+[ Upstream commit 81f1f5ae8b3cbd54fdd994c9e9aacdb7b414a802 ]
+
+ 0704c5743694 can: m_can_platform: remove unnecessary m_can_class_resume() call
+
+removed the m_can_class_resume() call in the runtime resume path to get
+rid of a infinite recursion, so the runtime resume now only handles the device
+clocks.
+
+Unfortunately it did not remove the complementary m_can_class_suspend() call in
+the runtime suspend function, so those paths are now unbalanced, which causes
+the pinctrl state to get stuck on the "sleep" state, which breaks all CAN
+functionality on SoCs where this state is defined. Remove the
+m_can_class_suspend() call to fix this.
+
+Fixes: 0704c5743694 can: m_can_platform: remove unnecessary m_can_class_resume() call
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Link: https://lore.kernel.org/r/20200811081545.19921-1-l.stach@pengutronix.de
+Acked-by: Dan Murphy <dmurphy@ti.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/can/m_can/m_can_platform.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/net/can/m_can/m_can_platform.c
++++ b/drivers/net/can/m_can/m_can_platform.c
+@@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct m_can_classdev *mcan_class = netdev_priv(ndev);
+
+- m_can_class_suspend(dev);
+-
+ clk_disable_unprepare(mcan_class->cclk);
+ clk_disable_unprepare(mcan_class->hclk);
+
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Herat Ramani <herat@chelsio.com>
+Date: Tue, 13 Oct 2020 15:01:29 +0530
+Subject: [PATCH stable 5.9 03/24] cxgb4: handle 4-tuple PEDIT to NAT mode translation
+
+From: Herat Ramani <herat@chelsio.com>
+
+[ Upstream commit 2ef813b8f405db3f72202b6fcae40a628ab80a53 ]
+
+The 4-tuple NAT offload via PEDIT always overwrites all the 4-tuple
+fields even if they had not been explicitly enabled. If any fields in
+the 4-tuple are not enabled, then the hardware overwrites the
+disabled fields with zeros, instead of ignoring them.
+
+So, add a parser that can translate the enabled 4-tuple PEDIT fields
+to one of the NAT mode combinations supported by the hardware and
+hence avoid overwriting disabled fields to 0. Any rule with
+unsupported NAT mode combination is rejected.
+
+Signed-off-by: Herat Ramani <herat@chelsio.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 175 +++++++++++++++++--
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h | 15 +
+ 2 files changed, 177 insertions(+), 13 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[
+ PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
+ };
+
++static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
++ /* Default supported NAT modes */
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_NONE,
++ .natmode = NAT_MODE_NONE,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_DIP,
++ .natmode = NAT_MODE_DIP,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
++ .natmode = NAT_MODE_DIP_DP,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
++ CXGB4_ACTION_NATMODE_SIP,
++ .natmode = NAT_MODE_DIP_DP_SIP,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
++ CXGB4_ACTION_NATMODE_SPORT,
++ .natmode = NAT_MODE_DIP_DP_SP,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
++ .natmode = NAT_MODE_SIP_SP,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
++ CXGB4_ACTION_NATMODE_SPORT,
++ .natmode = NAT_MODE_DIP_SIP_SP,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
++ CXGB4_ACTION_NATMODE_DPORT |
++ CXGB4_ACTION_NATMODE_SPORT,
++ .natmode = NAT_MODE_ALL,
++ },
++ /* T6+ can ignore L4 ports when they're disabled. */
++ {
++ .chip = CHELSIO_T6,
++ .flags = CXGB4_ACTION_NATMODE_SIP,
++ .natmode = NAT_MODE_SIP_SP,
++ },
++ {
++ .chip = CHELSIO_T6,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
++ .natmode = NAT_MODE_DIP_DP_SP,
++ },
++ {
++ .chip = CHELSIO_T6,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
++ .natmode = NAT_MODE_ALL,
++ },
++};
++
++static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
++ u8 natmode_flags)
++{
++ u8 i = 0;
++
++ /* Translate the enabled NAT 4-tuple fields to one of the
++ * hardware supported NAT mode configurations. This ensures
++ * that we pick a valid combination, where the disabled fields
++ * do not get overwritten to 0.
++ */
++ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
++ if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
++ fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
++ return;
++ }
++ }
++}
++
+ static struct ch_tc_flower_entry *allocate_flower_entry(void)
+ {
+ struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
+@@ -289,7 +372,8 @@ static void offload_pedit(struct ch_filt
+ }
+
+ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
+- u32 mask, u32 offset, u8 htype)
++ u32 mask, u32 offset, u8 htype,
++ u8 *natmode_flags)
+ {
+ switch (htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+@@ -314,67 +398,102 @@ static void process_pedit_field(struct c
+ switch (offset) {
+ case PEDIT_IP4_SRC:
+ offload_pedit(fs, val, mask, IP4_SRC);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
+ case PEDIT_IP4_DST:
+ offload_pedit(fs, val, mask, IP4_DST);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ }
+- fs->nat_mode = NAT_MODE_ALL;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ switch (offset) {
+ case PEDIT_IP6_SRC_31_0:
+ offload_pedit(fs, val, mask, IP6_SRC_31_0);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
+ case PEDIT_IP6_SRC_63_32:
+ offload_pedit(fs, val, mask, IP6_SRC_63_32);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
+ case PEDIT_IP6_SRC_95_64:
+ offload_pedit(fs, val, mask, IP6_SRC_95_64);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
+ case PEDIT_IP6_SRC_127_96:
+ offload_pedit(fs, val, mask, IP6_SRC_127_96);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
+ case PEDIT_IP6_DST_31_0:
+ offload_pedit(fs, val, mask, IP6_DST_31_0);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ break;
+ case PEDIT_IP6_DST_63_32:
+ offload_pedit(fs, val, mask, IP6_DST_63_32);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ break;
+ case PEDIT_IP6_DST_95_64:
+ offload_pedit(fs, val, mask, IP6_DST_95_64);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ break;
+ case PEDIT_IP6_DST_127_96:
+ offload_pedit(fs, val, mask, IP6_DST_127_96);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ }
+- fs->nat_mode = NAT_MODE_ALL;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ switch (offset) {
+ case PEDIT_TCP_SPORT_DPORT:
+- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
+ fs->nat_fport = val;
+- else
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++ } else {
+ fs->nat_lport = val >> 16;
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
++ }
+ }
+- fs->nat_mode = NAT_MODE_ALL;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ switch (offset) {
+ case PEDIT_UDP_SPORT_DPORT:
+- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
+ fs->nat_fport = val;
+- else
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++ } else {
+ fs->nat_lport = val >> 16;
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
++ }
+ }
+- fs->nat_mode = NAT_MODE_ALL;
++ break;
+ }
+ }
+
++static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags,
++ struct netlink_ext_ack *extack)
++{
++ u8 i = 0;
++
++ /* Extract the NAT mode to enable based on what 4-tuple fields
++ * are enabled to be overwritten. This ensures that the
++ * disabled fields don't get overwritten to 0.
++ */
++ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
++ const struct cxgb4_natmode_config *c;
++
++ c = &cxgb4_natmode_config_array[i];
++ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
++ natmode_flags == c->flags)
++ return 0;
++ }
++ NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination");
++ return -EOPNOTSUPP;
++}
++
+ void cxgb4_process_flow_actions(struct net_device *in,
+ struct flow_action *actions,
+ struct ch_filter_specification *fs)
+ {
+ struct flow_action_entry *act;
++ u8 natmode_flags = 0;
+ int i;
+
+ flow_action_for_each(i, act, actions) {
+@@ -426,7 +545,8 @@ void cxgb4_process_flow_actions(struct n
+ val = act->mangle.val;
+ offset = act->mangle.offset;
+
+- process_pedit_field(fs, val, mask, offset, htype);
++ process_pedit_field(fs, val, mask, offset, htype,
++ &natmode_flags);
+ }
+ break;
+ case FLOW_ACTION_QUEUE:
+@@ -438,6 +558,9 @@ void cxgb4_process_flow_actions(struct n
+ break;
+ }
+ }
++ if (natmode_flags)
++ cxgb4_action_natmode_tweak(fs, natmode_flags);
++
+ }
+
+ static bool valid_l4_mask(u32 mask)
+@@ -454,7 +577,8 @@ static bool valid_l4_mask(u32 mask)
+ }
+
+ static bool valid_pedit_action(struct net_device *dev,
+- const struct flow_action_entry *act)
++ const struct flow_action_entry *act,
++ u8 *natmode_flags)
+ {
+ u32 mask, offset;
+ u8 htype;
+@@ -479,7 +603,10 @@ static bool valid_pedit_action(struct ne
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ switch (offset) {
+ case PEDIT_IP4_SRC:
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
++ break;
+ case PEDIT_IP4_DST:
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -493,10 +620,13 @@ static bool valid_pedit_action(struct ne
+ case PEDIT_IP6_SRC_63_32:
+ case PEDIT_IP6_SRC_95_64:
+ case PEDIT_IP6_SRC_127_96:
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
++ break;
+ case PEDIT_IP6_DST_31_0:
+ case PEDIT_IP6_DST_63_32:
+ case PEDIT_IP6_DST_95_64:
+ case PEDIT_IP6_DST_127_96:
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -512,6 +642,10 @@ static bool valid_pedit_action(struct ne
+ __func__);
+ return false;
+ }
++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++ else
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -527,6 +661,10 @@ static bool valid_pedit_action(struct ne
+ __func__);
+ return false;
+ }
++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++ else
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -546,10 +684,12 @@ int cxgb4_validate_flow_actions(struct n
+ struct netlink_ext_ack *extack,
+ u8 matchall_filter)
+ {
++ struct adapter *adap = netdev2adap(dev);
+ struct flow_action_entry *act;
+ bool act_redir = false;
+ bool act_pedit = false;
+ bool act_vlan = false;
++ u8 natmode_flags = 0;
+ int i;
+
+ if (!flow_action_basic_hw_stats_check(actions, extack))
+@@ -563,7 +703,6 @@ int cxgb4_validate_flow_actions(struct n
+ break;
+ case FLOW_ACTION_MIRRED:
+ case FLOW_ACTION_REDIRECT: {
+- struct adapter *adap = netdev2adap(dev);
+ struct net_device *n_dev, *target_dev;
+ bool found = false;
+ unsigned int i;
+@@ -620,7 +759,8 @@ int cxgb4_validate_flow_actions(struct n
+ }
+ break;
+ case FLOW_ACTION_MANGLE: {
+- bool pedit_valid = valid_pedit_action(dev, act);
++ bool pedit_valid = valid_pedit_action(dev, act,
++ &natmode_flags);
+
+ if (!pedit_valid)
+ return -EOPNOTSUPP;
+@@ -642,6 +782,15 @@ int cxgb4_validate_flow_actions(struct n
+ return -EINVAL;
+ }
+
++ if (act_pedit) {
++ int ret;
++
++ ret = cxgb4_action_natmode_validate(adap, natmode_flags,
++ extack);
++ if (ret)
++ return ret;
++ }
++
+ return 0;
+ }
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+@@ -108,6 +108,21 @@ struct ch_tc_pedit_fields {
+ #define PEDIT_TCP_SPORT_DPORT 0x0
+ #define PEDIT_UDP_SPORT_DPORT 0x0
+
++enum cxgb4_action_natmode_flags {
++ CXGB4_ACTION_NATMODE_NONE = 0,
++ CXGB4_ACTION_NATMODE_DIP = (1 << 0),
++ CXGB4_ACTION_NATMODE_SIP = (1 << 1),
++ CXGB4_ACTION_NATMODE_DPORT = (1 << 2),
++ CXGB4_ACTION_NATMODE_SPORT = (1 << 3),
++};
++
++/* TC PEDIT action to NATMODE translation entry */
++struct cxgb4_natmode_config {
++ enum chip_type chip;
++ u8 flags;
++ u8 natmode;
++};
++
+ void cxgb4_process_flow_actions(struct net_device *in,
+ struct flow_action *actions,
+ struct ch_filter_specification *fs);
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: David Wilder <dwilder@us.ibm.com>
+Date: Tue, 13 Oct 2020 16:20:14 -0700
+Subject: [PATCH stable 5.9 02/24] ibmveth: Identify ingress large send packets.
+
+From: David Wilder <dwilder@us.ibm.com>
+
+[ Upstream commit 413f142cc05cb03f2d1ea83388e40c1ddc0d74e9 ]
+
+Ingress large send packets are identified by either:
+The IBMVETH_RXQ_LRG_PKT flag in the receive buffer
+or with a -1 placed in the ip header checksum.
+The method used depends on firmware version. Frame
+geometry and sufficient header validation is performed by the
+hypervisor eliminating the need for further header checks here.
+
+Fixes: 7b5967389f5a ("ibmveth: set correct gso_size and gso_type")
+Signed-off-by: David Wilder <dwilder@us.ibm.com>
+Reviewed-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Reviewed-by: Cristobal Forno <cris.forno@ibm.com>
+Reviewed-by: Pradeep Satyanarayana <pradeeps@linux.vnet.ibm.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ibm/ibmveth.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1349,6 +1349,7 @@ static int ibmveth_poll(struct napi_stru
+ int offset = ibmveth_rxq_frame_offset(adapter);
+ int csum_good = ibmveth_rxq_csum_good(adapter);
+ int lrg_pkt = ibmveth_rxq_large_packet(adapter);
++ __sum16 iph_check = 0;
+
+ skb = ibmveth_rxq_get_buffer(adapter);
+
+@@ -1385,7 +1386,17 @@ static int ibmveth_poll(struct napi_stru
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+- if (length > netdev->mtu + ETH_HLEN) {
++ /* PHYP without PLSO support places a -1 in the ip
++ * checksum for large send frames.
++ */
++ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
++ struct iphdr *iph = (struct iphdr *)skb->data;
++
++ iph_check = iph->check;
++ }
++
++ if ((length > netdev->mtu + ETH_HLEN) ||
++ lrg_pkt || iph_check == 0xffff) {
+ ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+ adapter->rx_large_packets++;
+ }
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: David Wilder <dwilder@us.ibm.com>
+Date: Tue, 13 Oct 2020 16:20:13 -0700
+Subject: [PATCH stable 5.9 01/24] ibmveth: Switch order of ibmveth_helper calls.
+
+From: David Wilder <dwilder@us.ibm.com>
+
+[ Upstream commit 5ce9ad815a296374ca21f43f3b1ab5083d202ee1 ]
+
+ibmveth_rx_csum_helper() must be called after ibmveth_rx_mss_helper()
+as ibmveth_rx_csum_helper() may alter ip and tcp checksum values.
+
+Fixes: 66aa0678efc2 ("ibmveth: Support to enable LSO/CSO for Trunk VEA.")
+Signed-off-by: David Wilder <dwilder@us.ibm.com>
+Reviewed-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Reviewed-by: Cristobal Forno <cris.forno@ibm.com>
+Reviewed-by: Pradeep Satyanarayana <pradeeps@linux.vnet.ibm.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ibm/ibmveth.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1385,16 +1385,16 @@ static int ibmveth_poll(struct napi_stru
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+- if (csum_good) {
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- ibmveth_rx_csum_helper(skb, adapter);
+- }
+-
+ if (length > netdev->mtu + ETH_HLEN) {
+ ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+ adapter->rx_large_packets++;
+ }
+
++ if (csum_good) {
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ ibmveth_rx_csum_helper(skb, adapter);
++ }
++
+ napi_gro_receive(napi, skb); /* send it up */
+
+ netdev->stats.rx_packets++;
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: David Ahern <dsahern@kernel.org>
+Date: Fri, 9 Oct 2020 11:01:01 -0700
+Subject: [PATCH stable 5.9 04/24] ipv4: Restore flowi4_oif update before call to xfrm_lookup_route
+
+From: David Ahern <dsahern@kernel.org>
+
+[ Upstream commit 874fb9e2ca949b443cc419a4f2227cafd4381d39 ]
+
+Tobias reported regressions in IPsec tests following the patch
+referenced by the Fixes tag below. The root cause is dropping the
+reset of the flowi4_oif after the fib_lookup. Apparently it is
+needed for xfrm cases, so restore the oif update to ip_route_output_flow
+right before the call to xfrm_lookup_route.
+
+Fixes: 2fbc6e89b2f1 ("ipv4: Update exception handling for multipath routes via same device")
+Reported-by: Tobias Brunner <tobias@strongswan.org>
+Signed-off-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/route.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2769,10 +2769,12 @@ struct rtable *ip_route_output_flow(stru
+ if (IS_ERR(rt))
+ return rt;
+
+- if (flp4->flowi4_proto)
++ if (flp4->flowi4_proto) {
++ flp4->flowi4_oif = rt->dst.dev->ifindex;
+ rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
+ flowi4_to_flowi(flp4),
+ sk, 0);
++ }
+
+ return rt;
+ }
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Jonathan Lemon <bsd@fb.com>
+Date: Thu, 8 Oct 2020 11:45:26 -0700
+Subject: [PATCH stable 5.9 05/24] mlx4: handle non-napi callers to napi_poll
+
+From: Jonathan Lemon <bsd@fb.com>
+
+[ Upstream commit b2b8a92733b288128feb57ffa694758cf475106c ]
+
+netcons calls napi_poll with a budget of 0 to transmit packets.
+Handle this by:
+ - skipping RX processing
+ - do not try to recycle TX packets to the RX cache
+
+Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c | 3 +++
+ drivers/net/ethernet/mellanox/mlx4/en_tx.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -943,6 +943,9 @@ int mlx4_en_poll_rx_cq(struct napi_struc
+ bool clean_complete = true;
+ int done;
+
++ if (!budget)
++ return 0;
++
+ if (priv->tx_ring_num[TX_XDP]) {
+ xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
+ if (xdp_tx_cq->xdp_busy) {
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -350,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_
+ .dma = tx_info->map0_dma,
+ };
+
+- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
++ if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
+ dma_unmap_page(priv->ddev, tx_info->map0_dma,
+ PAGE_SIZE, priv->dma_dir);
+ put_page(tx_info->page);
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Fri, 9 Oct 2020 19:00:00 +0200
+Subject: [PATCH stable 5.9 06/24] mptcp: fix fallback for MP_JOIN subflows
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit d582484726c4c46c8580923e855665fb91e3463e ]
+
+Additional/MP_JOIN subflows that do not pass some initial handshake
+tests currently causes fallback to TCP. That is an RFC violation:
+we should instead reset the subflow and leave the the msk untouched.
+
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/91
+Fixes: f296234c98a8 ("mptcp: Add handling of incoming MP_JOIN requests")
+Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/options.c | 32 +++++++++++++++++++++++++-------
+ net/mptcp/protocol.h | 1 +
+ net/mptcp/subflow.c | 10 ++++++++--
+ 3 files changed, 34 insertions(+), 9 deletions(-)
+
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -626,6 +626,12 @@ bool mptcp_established_options(struct so
+ if (unlikely(mptcp_check_fallback(sk)))
+ return false;
+
++ /* prevent adding of any MPTCP related options on reset packet
++ * until we support MP_TCPRST/MP_FASTCLOSE
++ */
++ if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST))
++ return false;
++
+ if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts))
+ ret = true;
+ else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining,
+@@ -676,7 +682,7 @@ bool mptcp_synack_options(const struct r
+ return false;
+ }
+
+-static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
++static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ struct mptcp_subflow_context *subflow,
+ struct sk_buff *skb,
+ struct mptcp_options_received *mp_opt)
+@@ -693,15 +699,20 @@ static bool check_fully_established(stru
+ TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
+ subflow->mp_join && mp_opt->mp_join &&
+ READ_ONCE(msk->pm.server_side))
+- tcp_send_ack(sk);
++ tcp_send_ack(ssk);
+ goto fully_established;
+ }
+
+- /* we should process OoO packets before the first subflow is fully
+- * established, but not expected for MP_JOIN subflows
++ /* we must process OoO packets before the first subflow is fully
++ * established. OoO packets are instead a protocol violation
++ * for MP_JOIN subflows as the peer must not send any data
++ * before receiving the forth ack - cfr. RFC 8684 section 3.2.
+ */
+- if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1)
++ if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) {
++ if (subflow->mp_join)
++ goto reset;
+ return subflow->mp_capable;
++ }
+
+ if (mp_opt->dss && mp_opt->use_ack) {
+ /* subflows are fully established as soon as we get any
+@@ -713,9 +724,12 @@ static bool check_fully_established(stru
+ }
+
+ /* If the first established packet does not contain MP_CAPABLE + data
+- * then fallback to TCP
++ * then fallback to TCP. Fallback scenarios requires a reset for
++ * MP_JOIN subflows.
+ */
+ if (!mp_opt->mp_capable) {
++ if (subflow->mp_join)
++ goto reset;
+ subflow->mp_capable = 0;
+ pr_fallback(msk);
+ __mptcp_do_fallback(msk);
+@@ -732,12 +746,16 @@ fully_established:
+
+ subflow->pm_notified = 1;
+ if (subflow->mp_join) {
+- clear_3rdack_retransmission(sk);
++ clear_3rdack_retransmission(ssk);
+ mptcp_pm_subflow_established(msk, subflow);
+ } else {
+ mptcp_pm_fully_established(msk);
+ }
+ return true;
++
++reset:
++ mptcp_subflow_reset(ssk);
++ return false;
+ }
+
+ static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -348,6 +348,7 @@ void mptcp_subflow_fully_established(str
+ struct mptcp_options_received *mp_opt);
+ bool mptcp_subflow_data_available(struct sock *sk);
+ void __init mptcp_subflow_init(void);
++void mptcp_subflow_reset(struct sock *ssk);
+
+ /* called with sk socket lock held */
+ int __mptcp_subflow_connect(struct sock *sk, int ifindex,
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -270,6 +270,13 @@ static bool subflow_thmac_valid(struct m
+ return thmac == subflow->thmac;
+ }
+
++void mptcp_subflow_reset(struct sock *ssk)
++{
++ tcp_set_state(ssk, TCP_CLOSE);
++ tcp_send_active_reset(ssk, GFP_ATOMIC);
++ tcp_done(ssk);
++}
++
+ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+@@ -342,8 +349,7 @@ fallback:
+ return;
+
+ do_reset:
+- tcp_send_active_reset(sk, GFP_ATOMIC);
+- tcp_done(sk);
++ mptcp_subflow_reset(sk);
+ }
+
+ struct request_sock_ops mptcp_subflow_request_sock_ops;
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Fri, 9 Oct 2020 19:00:01 +0200
+Subject: [PATCH stable 5.9 07/24] mptcp: subflows garbage collection
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 0e4f35d7880157ceccf0a58377d778b02762af82 ]
+
+The msk can close MP_JOIN subflows if the initial handshake
+fails. Currently such subflows are kept alive in the
+conn_list until the msk itself is closed.
+
+Beyond the wasted memory, we could end-up sending the
+DATA_FIN and the DATA_FIN ack on such socket, even after a
+reset.
+
+Fixes: 43b54c6ee382 ("mptcp: Use full MPTCP-level disconnect state machine")
+Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c | 17 +++++++++++++++++
+ net/mptcp/protocol.h | 1 +
+ net/mptcp/subflow.c | 6 ++++++
+ 3 files changed, 24 insertions(+)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1383,6 +1383,20 @@ static void pm_work(struct mptcp_sock *m
+ spin_unlock_bh(&msk->pm.lock);
+ }
+
++static void __mptcp_close_subflow(struct mptcp_sock *msk)
++{
++ struct mptcp_subflow_context *subflow, *tmp;
++
++ list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++
++ if (inet_sk_state_load(ssk) != TCP_CLOSE)
++ continue;
++
++ __mptcp_close_ssk((struct sock *)msk, ssk, subflow, 0);
++ }
++}
++
+ static void mptcp_worker(struct work_struct *work)
+ {
+ struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
+@@ -1400,6 +1414,9 @@ static void mptcp_worker(struct work_str
+ mptcp_clean_una(sk);
+ mptcp_check_data_fin_ack(sk);
+ __mptcp_flush_join_list(msk);
++ if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
++ __mptcp_close_subflow(msk);
++
+ __mptcp_move_skbs(msk);
+
+ if (msk->pm.status)
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -90,6 +90,7 @@
+ #define MPTCP_WORK_RTX 2
+ #define MPTCP_WORK_EOF 3
+ #define MPTCP_FALLBACK_DONE 4
++#define MPTCP_WORK_CLOSE_SUBFLOW 5
+
+ struct mptcp_options_received {
+ u64 sndr_key;
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -272,9 +272,15 @@ static bool subflow_thmac_valid(struct m
+
+ void mptcp_subflow_reset(struct sock *ssk)
+ {
++ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
++ struct sock *sk = subflow->conn;
++
+ tcp_set_state(ssk, TCP_CLOSE);
+ tcp_send_active_reset(ssk, GFP_ATOMIC);
+ tcp_done(ssk);
++ if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
++ schedule_work(&mptcp_sk(sk)->work))
++ sock_hold(sk);
+ }
+
+ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Christian Eggers <ceggers@arri.de>
+Date: Mon, 12 Oct 2020 10:39:42 +0200
+Subject: [PATCH stable 5.9 08/24] net: dsa: microchip: fix race condition
+
+From: Christian Eggers <ceggers@arri.de>
+
+[ Upstream commit 8098bd69bc4e925070313b1b95d03510f4f24738 ]
+
+Between queuing the delayed work and finishing the setup of the dsa
+ports, the process may sleep in request_module() (via
+phy_device_create()) and the queued work may be executed prior to the
+switch net devices being registered. In ksz_mib_read_work(), a NULL
+dereference will happen within netof_carrier_ok(dp->slave).
+
+Not queuing the delayed work in ksz_init_mib_timer() makes things even
+worse because the work will now be queued for immediate execution
+(instead of 2000 ms) in ksz_mac_link_down() via
+dsa_port_link_register_of().
+
+Call tree:
+ksz9477_i2c_probe()
+\--ksz9477_switch_register()
+ \--ksz_switch_register()
+ +--dsa_register_switch()
+ | \--dsa_switch_probe()
+ | \--dsa_tree_setup()
+ | \--dsa_tree_setup_switches()
+ | +--dsa_switch_setup()
+ | | +--ksz9477_setup()
+ | | | \--ksz_init_mib_timer()
+ | | | |--/* Start the timer 2 seconds later. */
+ | | | \--schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
+ | | \--__mdiobus_register()
+ | | \--mdiobus_scan()
+ | | \--get_phy_device()
+ | | +--get_phy_id()
+ | | \--phy_device_create()
+ | | |--/* sleeping, ksz_mib_read_work() can be called meanwhile */
+ | | \--request_module()
+ | |
+ | \--dsa_port_setup()
+ | +--/* Called for non-CPU ports */
+ | +--dsa_slave_create()
+ | | +--/* Too late, ksz_mib_read_work() may be called beforehand */
+ | | \--port->slave = ...
+ | ...
+ | +--Called for CPU port */
+ | \--dsa_port_link_register_of()
+ | \--ksz_mac_link_down()
+ | +--/* mib_read must be initialized here */
+ | +--/* work is already scheduled, so it will be executed after 2000 ms */
+ | \--schedule_delayed_work(&dev->mib_read, 0);
+ \-- /* here port->slave is setup properly, scheduling the delayed work should be safe */
+
+Solution:
+1. Do not queue (only initialize) delayed work in ksz_init_mib_timer().
+2. Only queue delayed work in ksz_mac_link_down() if init is completed.
+3. Queue work once in ksz_switch_register(), after dsa_register_switch()
+has completed.
+
+Fixes: 7c6ff470aa86 ("net: dsa: microchip: add MIB counter reading support")
+Signed-off-by: Christian Eggers <ceggers@arri.de>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dsa/microchip/ksz_common.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -103,14 +103,8 @@ void ksz_init_mib_timer(struct ksz_devic
+
+ INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
+
+- /* Read MIB counters every 30 seconds to avoid overflow. */
+- dev->mib_read_interval = msecs_to_jiffies(30000);
+-
+ for (i = 0; i < dev->mib_port_cnt; i++)
+ dev->dev_ops->port_init_cnt(dev, i);
+-
+- /* Start the timer 2 seconds later. */
+- schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
+ }
+ EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
+
+@@ -143,7 +137,9 @@ void ksz_mac_link_down(struct dsa_switch
+
+ /* Read all MIB counters when the link is going down. */
+ p->read = true;
+- schedule_delayed_work(&dev->mib_read, 0);
++ /* timer started */
++ if (dev->mib_read_interval)
++ schedule_delayed_work(&dev->mib_read, 0);
+ }
+ EXPORT_SYMBOL_GPL(ksz_mac_link_down);
+
+@@ -450,6 +446,12 @@ int ksz_switch_register(struct ksz_devic
+ return ret;
+ }
+
++ /* Read MIB counters every 30 seconds to avoid overflow. */
++ dev->mib_read_interval = msecs_to_jiffies(30000);
++
++ /* Start the MIB timer. */
++ schedule_delayed_work(&dev->mib_read, 0);
++
+ return 0;
+ }
+ EXPORT_SYMBOL(ksz_switch_register);
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Marek Vasut <marex@denx.de>
+Date: Tue, 6 Oct 2020 15:52:53 +0200
+Subject: [PATCH stable 5.9 10/24] net: fec: Fix PHY init after phy_reset_after_clk_enable()
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 0da1ccbbefb662915228bc17e1c7d4ad28b3ddab ]
+
+The phy_reset_after_clk_enable() does a PHY reset, which means the PHY
+loses its register settings. The fec_enet_mii_probe() starts the PHY
+and does the necessary calls to configure the PHY via PHY framework,
+and loads the correct register settings into the PHY. Therefore,
+fec_enet_mii_probe() should be called only after the PHY has been
+reset, not before as it is now.
+
+Fixes: 1b0a83ac04e3 ("net: fec: add phy_reset_after_clk_enable() support")
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Tested-by: Richard Leitner <richard.leitner@skidata.com>
+Signed-off-by: Marek Vasut <marex@denx.de>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: NXP Linux Team <linux-imx@nxp.com>
+Cc: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3005,17 +3005,17 @@ fec_enet_open(struct net_device *ndev)
+ /* Init MAC prior to mii bus probe */
+ fec_restart(ndev);
+
+- /* Probe and connect to PHY when open the interface */
+- ret = fec_enet_mii_probe(ndev);
+- if (ret)
+- goto err_enet_mii_probe;
+-
+ /* Call phy_reset_after_clk_enable() again if it failed during
+ * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+ */
+ if (reset_again)
+ fec_enet_phy_reset_after_clk_enable(ndev);
+
++ /* Probe and connect to PHY when open the interface */
++ ret = fec_enet_mii_probe(ndev);
++ if (ret)
++ goto err_enet_mii_probe;
++
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_used();
+
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Marek Vasut <marex@denx.de>
+Date: Sat, 10 Oct 2020 11:10:00 +0200
+Subject: [PATCH stable 5.9 09/24] net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 64a632da538a6827fad0ea461925cedb9899ebe2 ]
+
+The phy_reset_after_clk_enable() is always called with ndev->phydev,
+however that pointer may be NULL even though the PHY device instance
+already exists and is sufficient to perform the PHY reset.
+
+This condition happens in fec_open(), where the clock must be enabled
+first, then the PHY must be reset, and then the PHY IDs can be read
+out of the PHY.
+
+If the PHY still is not bound to the MAC, but there is OF PHY node
+and a matching PHY device instance already, use the OF PHY node to
+obtain the PHY device instance, and then use that PHY device instance
+when triggering the PHY reset.
+
+Fixes: 1b0a83ac04e3 ("net: fec: add phy_reset_after_clk_enable() support")
+Signed-off-by: Marek Vasut <marex@denx.de>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: NXP Linux Team <linux-imx@nxp.com>
+Cc: Richard Leitner <richard.leitner@skidata.com>
+Cc: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1912,6 +1912,27 @@ out:
+ return ret;
+ }
+
++static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ struct phy_device *phy_dev = ndev->phydev;
++
++ if (phy_dev) {
++ phy_reset_after_clk_enable(phy_dev);
++ } else if (fep->phy_node) {
++ /*
++ * If the PHY still is not bound to the MAC, but there is
++ * OF PHY node and a matching PHY device instance already,
++ * use the OF PHY node to obtain the PHY device instance,
++ * and then use that PHY device instance when triggering
++ * the PHY reset.
++ */
++ phy_dev = of_phy_find_device(fep->phy_node);
++ phy_reset_after_clk_enable(phy_dev);
++ put_device(&phy_dev->mdio.dev);
++ }
++}
++
+ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+@@ -1938,7 +1959,7 @@ static int fec_enet_clk_enable(struct ne
+ if (ret)
+ goto failed_clk_ref;
+
+- phy_reset_after_clk_enable(ndev->phydev);
++ fec_enet_phy_reset_after_clk_enable(ndev);
+ } else {
+ clk_disable_unprepare(fep->clk_enet_out);
+ if (fep->clk_ptp) {
+@@ -2993,7 +3014,7 @@ fec_enet_open(struct net_device *ndev)
+ * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+ */
+ if (reset_again)
+- phy_reset_after_clk_enable(ndev->phydev);
++ fec_enet_phy_reset_after_clk_enable(ndev);
+
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_used();
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Yonghong Song <yhs@fb.com>
+Date: Wed, 14 Oct 2020 07:46:12 -0700
+Subject: [PATCH stable 5.9 11/24] net: fix pos incrementment in ipv6_route_seq_next
+
+From: Yonghong Song <yhs@fb.com>
+
+[ Upstream commit 6617dfd440149e42ce4d2be615eb31a4755f4d30 ]
+
+Commit 4fc427e05158 ("ipv6_route_seq_next should increase position index")
+tried to fix the issue where seq_file pos is not increased
+if a NULL element is returned with seq_ops->next(). See bug
+ https://bugzilla.kernel.org/show_bug.cgi?id=206283
+The commit effectively does:
+ - increase pos for all seq_ops->start()
+ - increase pos for all seq_ops->next()
+
+For ipv6_route, increasing pos for all seq_ops->next() is correct.
+But increasing pos for seq_ops->start() is not correct
+since pos is used to determine how many items to skip during
+seq_ops->start():
+ iter->skip = *pos;
+seq_ops->start() just fetches the *current* pos item.
+The item can be skipped only after seq_ops->show() which essentially
+is the beginning of seq_ops->next().
+
+For example, I have 7 ipv6 route entries,
+ root@arch-fb-vm1:~/net-next dd if=/proc/net/ipv6_route bs=4096
+ 00000000000000000000000000000000 40 00000000000000000000000000000000 00 00000000000000000000000000000000 00000400 00000001 00000000 00000001 eth0
+ fe800000000000000000000000000000 40 00000000000000000000000000000000 00 00000000000000000000000000000000 00000100 00000001 00000000 00000001 eth0
+ 00000000000000000000000000000000 00 00000000000000000000000000000000 00 00000000000000000000000000000000 ffffffff 00000001 00000000 00200200 lo
+ 00000000000000000000000000000001 80 00000000000000000000000000000000 00 00000000000000000000000000000000 00000000 00000003 00000000 80200001 lo
+ fe800000000000002050e3fffebd3be8 80 00000000000000000000000000000000 00 00000000000000000000000000000000 00000000 00000002 00000000 80200001 eth0
+ ff000000000000000000000000000000 08 00000000000000000000000000000000 00 00000000000000000000000000000000 00000100 00000004 00000000 00000001 eth0
+ 00000000000000000000000000000000 00 00000000000000000000000000000000 00 00000000000000000000000000000000 ffffffff 00000001 00000000 00200200 lo
+ 0+1 records in
+ 0+1 records out
+ 1050 bytes (1.0 kB, 1.0 KiB) copied, 0.00707908 s, 148 kB/s
+ root@arch-fb-vm1:~/net-next
+
+In the above, I specify buffer size 4096, so all records can be returned
+to user space with a single trip to the kernel.
+
+If I use buffer size 128, since each record size is 149, internally
+kernel seq_read() will read 149 into its internal buffer and return the data
+to user space in two read() syscalls. Then user read() syscall will trigger
+next seq_ops->start(). Since the current implementation increased pos even
+for seq_ops->start(), it will skip record #2, #4 and #6, assuming the first
+record is #1.
+
+ root@arch-fb-vm1:~/net-next dd if=/proc/net/ipv6_route bs=128
+ 00000000000000000000000000000000 40 00000000000000000000000000000000 00 00000000000000000000000000000000 00000400 00000001 00000000 00000001 eth0
+ 00000000000000000000000000000000 00 00000000000000000000000000000000 00 00000000000000000000000000000000 ffffffff 00000001 00000000 00200200 lo
+ fe800000000000002050e3fffebd3be8 80 00000000000000000000000000000000 00 00000000000000000000000000000000 00000000 00000002 00000000 80200001 eth0
+ 00000000000000000000000000000000 00 00000000000000000000000000000000 00 00000000000000000000000000000000 ffffffff 00000001 00000000 00200200 lo
+4+1 records in
+4+1 records out
+600 bytes copied, 0.00127758 s, 470 kB/s
+
+To fix the problem, create a fake pos pointer so seq_ops->start()
+won't actually increase seq_file pos. With this fix, the
+above `dd` command with `bs=128` will show correct result.
+
+Fixes: 4fc427e05158 ("ipv6_route_seq_next should increase position index")
+Cc: Alexei Starovoitov <ast@kernel.org>
+Suggested-by: Vasily Averin <vvs@virtuozzo.com>
+Reviewed-by: Vasily Averin <vvs@virtuozzo.com>
+Signed-off-by: Yonghong Song <yhs@fb.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Acked-by: Andrii Nakryiko <andrii@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_fib.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -2618,8 +2618,10 @@ static void *ipv6_route_seq_start(struct
+ iter->skip = *pos;
+
+ if (iter->tbl) {
++ loff_t p = 0;
++
+ ipv6_route_seq_setup_walk(iter, net);
+- return ipv6_route_seq_next(seq, NULL, pos);
++ return ipv6_route_seq_next(seq, NULL, &p);
+ } else {
+ return NULL;
+ }
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Alex Elder <elder@linaro.org>
+Date: Fri, 9 Oct 2020 15:28:48 -0500
+Subject: [PATCH stable 5.9 12/24] net: ipa: skip suspend/resume activities if not set up
+
+From: Alex Elder <elder@linaro.org>
+
+[ Upstream commit d1704382821032fede445b816f4296fd379baacf ]
+
+When processing a system suspend request we suspend modem endpoints
+if they are enabled, and call ipa_cmd_tag_process() (which issues
+IPA commands) to ensure the IPA pipeline is cleared. It is an error
+to attempt to issue an IPA command before setup is complete, so this
+is clearly a bug. But we also shouldn't suspend or resume any
+endpoints that have not been set up.
+
+Have ipa_endpoint_suspend() and ipa_endpoint_resume() immediately
+return if setup hasn't completed, to avoid any attempt to configure
+endpoints or issue IPA commands in that case.
+
+Fixes: 84f9bd12d46d ("soc: qcom: ipa: IPA endpoints")
+Tested-by: Matthias Kaehlcke <mka@chromium.org>
+Signed-off-by: Alex Elder <elder@linaro.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ipa/ipa_endpoint.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/ipa/ipa_endpoint.c
++++ b/drivers/net/ipa/ipa_endpoint.c
+@@ -1471,6 +1471,9 @@ void ipa_endpoint_resume_one(struct ipa_
+
+ void ipa_endpoint_suspend(struct ipa *ipa)
+ {
++ if (!ipa->setup_complete)
++ return;
++
+ if (ipa->modem_netdev)
+ ipa_modem_suspend(ipa->modem_netdev);
+
+@@ -1482,6 +1485,9 @@ void ipa_endpoint_suspend(struct ipa *ip
+
+ void ipa_endpoint_resume(struct ipa *ipa)
+ {
++ if (!ipa->setup_complete)
++ return;
++
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
+
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+Date: Thu, 8 Oct 2020 23:23:10 +0200
+Subject: [PATCH stable 5.9 24/24] net: j1939: j1939_session_fresh_new(): fix missing initialization of skbcnt
+
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+
+[ Upstream commit 13ba4c434422837d7c8c163f9c8d854e67bf3c99 ]
+
+This patch add the initialization of skbcnt, similar to:
+
+ e009f95b1543 can: j1935: j1939_tp_tx_dat_new(): fix missing initialization of skbcnt
+
+Let's play save and initialize this skbcnt as well.
+
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/can/j1939/transport.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -1488,6 +1488,7 @@ j1939_session *j1939_session_fresh_new(s
+ skb->dev = priv->ndev;
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
++ can_skb_prv(skb)->skbcnt = 0;
+ skcb = j1939_skb_to_cb(skb);
+ memcpy(skcb, rel_skcb, sizeof(*skcb));
+
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Davide Caratti <dcaratti@redhat.com>
+Date: Tue, 6 Oct 2020 18:26:17 +0200
+Subject: [PATCH stable 5.9 13/24] net: mptcp: make DACK4/DACK8 usage consistent among all subflows
+
+From: Davide Caratti <dcaratti@redhat.com>
+
+[ Upstream commit 37198e93ced70733f0b993dff28b7c33857e254f ]
+
+using packetdrill it's possible to observe the same MPTCP DSN being acked
+by different subflows with DACK4 and DACK8. This is in contrast with what
+specified in RFC8684 §3.3.2: if an MPTCP endpoint transmits a 64-bit wide
+DSN, it MUST be acknowledged with a 64-bit wide DACK. Fix 'use_64bit_ack'
+variable to make it a property of MPTCP sockets, not TCP subflows.
+
+Fixes: a0c1d0eafd1e ("mptcp: Use 32-bit DATA_ACK when possible")
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/options.c | 2 +-
+ net/mptcp/protocol.h | 2 +-
+ net/mptcp/subflow.c | 3 +--
+ 3 files changed, 3 insertions(+), 4 deletions(-)
+
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -516,7 +516,7 @@ static bool mptcp_established_options_ds
+ return ret;
+ }
+
+- if (subflow->use_64bit_ack) {
++ if (READ_ONCE(msk->use_64bit_ack)) {
+ ack_size = TCPOLEN_MPTCP_DSS_ACK64;
+ opts->ext_copy.data_ack = READ_ONCE(msk->ack_seq);
+ opts->ext_copy.ack64 = 1;
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -203,6 +203,7 @@ struct mptcp_sock {
+ bool fully_established;
+ bool rcv_data_fin;
+ bool snd_data_fin_enable;
++ bool use_64bit_ack; /* Set when we received a 64-bit DSN */
+ spinlock_t join_list_lock;
+ struct work_struct work;
+ struct list_head conn_list;
+@@ -295,7 +296,6 @@ struct mptcp_subflow_context {
+ backup : 1,
+ data_avail : 1,
+ rx_eof : 1,
+- use_64bit_ack : 1, /* Set when we received a 64-bit DSN */
+ can_ack : 1; /* only after processing the remote a key */
+ u32 remote_nonce;
+ u64 thmac;
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -781,12 +781,11 @@ static enum mapping_status get_mapping_s
+ if (!mpext->dsn64) {
+ map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
+ mpext->data_seq);
+- subflow->use_64bit_ack = 0;
+ pr_debug("expanded seq=%llu", subflow->map_seq);
+ } else {
+ map_seq = mpext->data_seq;
+- subflow->use_64bit_ack = 1;
+ }
++ WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
+
+ if (subflow->map_valid) {
+ /* Allow replacing only with an identical map */
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Leon Romanovsky <leonro@nvidia.com>
+Date: Wed, 14 Oct 2020 11:56:42 +0300
+Subject: [PATCH stable 5.9 14/24] net: sched: Fix suspicious RCU usage while accessing tcf_tunnel_info
+
+From: Leon Romanovsky <leonro@nvidia.com>
+
+[ Upstream commit d086a1c65aabb5a4e1edc580ca583e2964c62b44 ]
+
+The access of tcf_tunnel_info() produces the following splat, so fix it
+by dereferencing the tcf_tunnel_key_params pointer with marker that
+internal tcfa_liock is held.
+
+ =============================
+ WARNING: suspicious RCU usage
+ 5.9.0+ #1 Not tainted
+ -----------------------------
+ include/net/tc_act/tc_tunnel_key.h:59 suspicious rcu_dereference_protected() usage!
+ other info that might help us debug this:
+
+ rcu_scheduler_active = 2, debug_locks = 1
+ 1 lock held by tc/34839:
+ #0: ffff88828572c2a0 (&p->tcfa_lock){+...}-{2:2}, at: tc_setup_flow_action+0xb3/0x48b5
+ stack backtrace:
+ CPU: 1 PID: 34839 Comm: tc Not tainted 5.9.0+ #1
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014
+ Call Trace:
+ dump_stack+0x9a/0xd0
+ tc_setup_flow_action+0x14cb/0x48b5
+ fl_hw_replace_filter+0x347/0x690 [cls_flower]
+ fl_change+0x2bad/0x4875 [cls_flower]
+ tc_new_tfilter+0xf6f/0x1ba0
+ rtnetlink_rcv_msg+0x5f2/0x870
+ netlink_rcv_skb+0x124/0x350
+ netlink_unicast+0x433/0x700
+ netlink_sendmsg+0x6f1/0xbd0
+ sock_sendmsg+0xb0/0xe0
+ ____sys_sendmsg+0x4fa/0x6d0
+ ___sys_sendmsg+0x12e/0x1b0
+ __sys_sendmsg+0xa4/0x120
+ do_syscall_64+0x2d/0x40
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ RIP: 0033:0x7f1f8cd4fe57
+ Code: 0c 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 89 54 24 1c 48 89 74 24 10
+ RSP: 002b:00007ffdc1e193b8 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+ RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f1f8cd4fe57
+ RDX: 0000000000000000 RSI: 00007ffdc1e19420 RDI: 0000000000000003
+ RBP: 000000005f85aafa R08: 0000000000000001 R09: 00007ffdc1e1936c
+ R10: 000000000040522d R11: 0000000000000246 R12: 0000000000000001
+ R13: 0000000000000000 R14: 00007ffdc1e1d6f0 R15: 0000000000482420
+
+Fixes: 3ebaf6da0716 ("net: sched: Do not assume RTNL is held in tunnel key action helpers")
+Fixes: 7a47281439ba ("net: sched: lock action when translating it to flow_action infra")
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tc_act/tc_tunnel_key.h | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/include/net/tc_act/tc_tunnel_key.h
++++ b/include/net/tc_act/tc_tunnel_key.h
+@@ -56,7 +56,10 @@ static inline struct ip_tunnel_info *tcf
+ {
+ #ifdef CONFIG_NET_CLS_ACT
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
++ struct tcf_tunnel_key_params *params;
++
++ params = rcu_dereference_protected(t->params,
++ lockdep_is_held(&a->tcfa_lock));
+
+ return ¶ms->tcft_enc_metadata->u.tun_info;
+ #else
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Karsten Graul <kgraul@linux.ibm.com>
+Date: Wed, 14 Oct 2020 19:43:27 +0200
+Subject: [PATCH stable 5.9 15/24] net/smc: fix use-after-free of delayed events
+
+From: Karsten Graul <kgraul@linux.ibm.com>
+
+[ Upstream commit d535ca1367787ddc8bff22d679a11f864c8228bc ]
+
+When a delayed event is enqueued then the event worker will send this
+event the next time it is running and no other flow is currently
+active. The event handler is called for the delayed event, and the
+pointer to the event keeps set in lgr->delayed_event. This pointer is
+cleared later in the processing by smc_llc_flow_start().
+This can lead to a use-after-free condition when the processing does not
+reach smc_llc_flow_start(), but frees the event because of an error
+situation. Then the delayed_event pointer is still set but the event is
+freed.
+Fix this by always clearing the delayed event pointer when the event is
+provided to the event handler for processing, and remove the code to
+clear it in smc_llc_flow_start().
+
+Fixes: 555da9af827d ("net/smc: add event-based llc_flow framework")
+Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/smc/smc_llc.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/net/smc/smc_llc.c
++++ b/net/smc/smc_llc.c
+@@ -233,8 +233,6 @@ static bool smc_llc_flow_start(struct sm
+ default:
+ flow->type = SMC_LLC_FLOW_NONE;
+ }
+- if (qentry == lgr->delayed_event)
+- lgr->delayed_event = NULL;
+ smc_llc_flow_qentry_set(flow, qentry);
+ spin_unlock_bh(&lgr->llc_flow_lock);
+ return true;
+@@ -1603,13 +1601,12 @@ static void smc_llc_event_work(struct wo
+ struct smc_llc_qentry *qentry;
+
+ if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
+- if (smc_link_usable(lgr->delayed_event->link)) {
+- smc_llc_event_handler(lgr->delayed_event);
+- } else {
+- qentry = lgr->delayed_event;
+- lgr->delayed_event = NULL;
++ qentry = lgr->delayed_event;
++ lgr->delayed_event = NULL;
++ if (smc_link_usable(qentry->link))
++ smc_llc_event_handler(qentry);
++ else
+ kfree(qentry);
+- }
+ }
+
+ again:
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Karsten Graul <kgraul@linux.ibm.com>
+Date: Wed, 14 Oct 2020 19:43:28 +0200
+Subject: [PATCH stable 5.9 16/24] net/smc: fix valid DMBE buffer sizes
+
+From: Karsten Graul <kgraul@linux.ibm.com>
+
+[ Upstream commit ef12ad45880b696eb993d86c481ca891836ab593 ]
+
+The SMCD_DMBE_SIZES should include all valid DMBE buffer sizes, so the
+correct value is 6 which means 1MB. With 7 the registration of an ISM
+buffer would always fail because of the invalid size requested.
+Fix that and set the value to 6.
+
+Fixes: c6ba7c9ba43d ("net/smc: add base infrastructure for SMC-D and ISM")
+Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/smc/smc_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1597,7 +1597,7 @@ out:
+ return rc;
+ }
+
+-#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
++#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
+
+ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
+ bool is_dmb, int bufsize)
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Rohit Maheshwari <rohitm@chelsio.com>
+Date: Thu, 8 Oct 2020 00:10:21 +0530
+Subject: [PATCH stable 5.9 17/24] net/tls: sendfile fails with ktls offload
+
+From: Rohit Maheshwari <rohitm@chelsio.com>
+
+[ Upstream commit ea1dd3e9d080c961b9a451130b61c72dc9a5397b ]
+
+At first when sendpage gets called, if there is more data, 'more' in
+tls_push_data() gets set which later sets pending_open_record_frags, but
+when there is no more data in file left, and last time tls_push_data()
+gets called, pending_open_record_frags doesn't get reset. And later when
+2 bytes of encrypted alert comes as sendmsg, it first checks for
+pending_open_record_frags, and since this is set, it creates a record with
+0 data bytes to encrypt, meaning record length is prepend_size + tag_size
+only, which causes problem.
+ We should set/reset pending_open_record_frags based on more bit.
+
+Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
+Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_device.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -418,14 +418,14 @@ static int tls_push_data(struct sock *sk
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
+- int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
+ struct tls_record_info *record = ctx->open_record;
+ int tls_push_record_flags;
+ struct page_frag *pfrag;
+ size_t orig_size = size;
+ u32 max_open_record_len;
+- int copy, rc = 0;
++ bool more = false;
+ bool done = false;
++ int copy, rc = 0;
+ long timeo;
+
+ if (flags &
+@@ -492,9 +492,8 @@ handle_error:
+ if (!size) {
+ last_record:
+ tls_push_record_flags = flags;
+- if (more) {
+- tls_ctx->pending_open_record_frags =
+- !!record->num_frags;
++ if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
++ more = true;
+ break;
+ }
+
+@@ -526,6 +525,8 @@ last_record:
+ }
+ } while (!done);
+
++ tls_ctx->pending_open_record_frags = more;
++
+ if (orig_size - size > 0)
+ rc = orig_size - size;
+
--- /dev/null
+ibmveth-switch-order-of-ibmveth_helper-calls.patch
+ibmveth-identify-ingress-large-send-packets.patch
+cxgb4-handle-4-tuple-pedit-to-nat-mode-translation.patch
+ipv4-restore-flowi4_oif-update-before-call-to-xfrm_lookup_route.patch
+mlx4-handle-non-napi-callers-to-napi_poll.patch
+mptcp-fix-fallback-for-mp_join-subflows.patch
+mptcp-subflows-garbage-collection.patch
+net-dsa-microchip-fix-race-condition.patch
+net-fec-fix-phy_device-lookup-for-phy_reset_after_clk_enable.patch
+net-fec-fix-phy-init-after-phy_reset_after_clk_enable.patch
+net-fix-pos-incrementment-in-ipv6_route_seq_next.patch
+net-ipa-skip-suspend-resume-activities-if-not-set-up.patch
+net-mptcp-make-dack4-dack8-usage-consistent-among-all-subflows.patch
+net-sched-fix-suspicious-rcu-usage-while-accessing-tcf_tunnel_info.patch
+net-smc-fix-use-after-free-of-delayed-events.patch
+net-smc-fix-valid-dmbe-buffer-sizes.patch
+net-tls-sendfile-fails-with-ktls-offload.patch
+tipc-fix-null-pointer-dereference-in-tipc_named_rcv.patch
+tipc-fix-the-skb_unshare-in-tipc_buf_append.patch
+socket-fix-option-so_timestamping_new.patch
+socket-don-t-clear-sock_tstamp_new-when-so_timestampns-is-disabled.patch
+can-m_can_platform-don-t-call-m_can_class_suspend-in-runtime-suspend.patch
+can-j1935-j1939_tp_tx_dat_new-fix-missing-initialization-of-skbcnt.patch
+net-j1939-j1939_session_fresh_new-fix-missing-initialization-of-skbcnt.patch
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Christian Eggers <ceggers@arri.de>
+Date: Mon, 12 Oct 2020 11:35:42 +0200
+Subject: [PATCH stable 5.9 21/24] socket: don't clear SOCK_TSTAMP_NEW when SO_TIMESTAMPNS is disabled
+
+From: Christian Eggers <ceggers@arri.de>
+
+[ Upstream commit 4e3bbb33e6f36e4b05be1b1b9b02e3dd5aaa3e69 ]
+
+SOCK_TSTAMP_NEW (timespec64 instead of timespec) is also used for
+hardware time stamps (configured via SO_TIMESTAMPING_NEW).
+
+User space (ptp4l) first configures hardware time stamping via
+SO_TIMESTAMPING_NEW which sets SOCK_TSTAMP_NEW. In the next step, ptp4l
+disables SO_TIMESTAMPNS(_NEW) (software time stamps), but this must not
+switch hardware time stamps back to "32 bit mode".
+
+This problem happens on 32 bit platforms were the libc has already
+switched to struct timespec64 (from SO_TIMExxx_OLD to SO_TIMExxx_NEW
+socket options). ptp4l complains with "missing timestamp on transmitted
+peer delay request" because the wrong format is received (and
+discarded).
+
+Fixes: 887feae36aee ("socket: Add SO_TIMESTAMP[NS]_NEW")
+Fixes: 783da70e8396 ("net: add sock_enable_timestamps")
+Signed-off-by: Christian Eggers <ceggers@arri.de>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Acked-by: Deepa Dinamani <deepa.kernel@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/sock.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -769,7 +769,6 @@ static void __sock_set_timestamps(struct
+ } else {
+ sock_reset_flag(sk, SOCK_RCVTSTAMP);
+ sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
+- sock_reset_flag(sk, SOCK_TSTAMP_NEW);
+ }
+ }
+
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Christian Eggers <ceggers@arri.de>
+Date: Mon, 12 Oct 2020 11:35:41 +0200
+Subject: [PATCH stable 5.9 20/24] socket: fix option SO_TIMESTAMPING_NEW
+
+From: Christian Eggers <ceggers@arri.de>
+
+[ Upstream commit 59e611a566e7cd48cf54b6777a11fe3f9c2f9db5 ]
+
+The comparison of optname with SO_TIMESTAMPING_NEW is wrong way around,
+so SOCK_TSTAMP_NEW will first be set and than reset again. Additionally
+move it out of the test for SOF_TIMESTAMPING_RX_SOFTWARE as this seems
+unrelated.
+
+This problem happens on 32 bit platforms were the libc has already
+switched to struct timespec64 (from SO_TIMExxx_OLD to SO_TIMExxx_NEW
+socket options). ptp4l complains with "missing timestamp on transmitted
+peer delay request" because the wrong format is received (and
+discarded).
+
+Fixes: 9718475e6908 ("socket: Add SO_TIMESTAMPING_NEW")
+Signed-off-by: Christian Eggers <ceggers@arri.de>
+Reviewed-by: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Reviewed-by: Deepa Dinamani <deepa.kernel@gmail.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Acked-by: Deepa Dinamani <deepa.kernel@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/sock.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1007,8 +1007,6 @@ set_sndbuf:
+ __sock_set_timestamps(sk, valbool, true, true);
+ break;
+ case SO_TIMESTAMPING_NEW:
+- sock_set_flag(sk, SOCK_TSTAMP_NEW);
+- fallthrough;
+ case SO_TIMESTAMPING_OLD:
+ if (val & ~SOF_TIMESTAMPING_MASK) {
+ ret = -EINVAL;
+@@ -1037,16 +1035,14 @@ set_sndbuf:
+ }
+
+ sk->sk_tsflags = val;
++ sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
++
+ if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
+ sock_enable_timestamp(sk,
+ SOCK_TIMESTAMPING_RX_SOFTWARE);
+- else {
+- if (optname == SO_TIMESTAMPING_NEW)
+- sock_reset_flag(sk, SOCK_TSTAMP_NEW);
+-
++ else
+ sock_disable_timestamp(sk,
+ (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
+- }
+ break;
+
+ case SO_RCVLOWAT:
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Hoang Huu Le <hoang.h.le@dektech.com.au>
+Date: Thu, 8 Oct 2020 14:31:56 +0700
+Subject: [PATCH stable 5.9 18/24] tipc: fix NULL pointer dereference in tipc_named_rcv
+
+From: Hoang Huu Le <hoang.h.le@dektech.com.au>
+
+[ Upstream commit 7b50ee3dad2581dc022b4e32e55964d4fcdccf20 ]
+
+In the function node_lost_contact(), we call __skb_queue_purge() without
+grabbing the list->lock. This can cause to a race-condition why processing
+the list 'namedq' in calling path tipc_named_rcv()->tipc_named_dequeue().
+
+ [] BUG: kernel NULL pointer dereference, address: 0000000000000000
+ [] #PF: supervisor read access in kernel mode
+ [] #PF: error_code(0x0000) - not-present page
+ [] PGD 7ca63067 P4D 7ca63067 PUD 6c553067 PMD 0
+ [] Oops: 0000 [#1] SMP NOPTI
+ [] CPU: 1 PID: 15 Comm: ksoftirqd/1 Tainted: G O 5.9.0-rc6+ #2
+ [] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS [...]
+ [] RIP: 0010:tipc_named_rcv+0x103/0x320 [tipc]
+ [] Code: 41 89 44 24 10 49 8b 16 49 8b 46 08 49 c7 06 00 00 00 [...]
+ [] RSP: 0018:ffffc900000a7c58 EFLAGS: 00000282
+ [] RAX: 00000000000012ec RBX: 0000000000000000 RCX: ffff88807bde1270
+ [] RDX: 0000000000002c7c RSI: 0000000000002c7c RDI: ffff88807b38f1a8
+ [] RBP: ffff88807b006288 R08: ffff88806a367800 R09: ffff88806a367900
+ [] R10: ffff88806a367a00 R11: ffff88806a367b00 R12: ffff88807b006258
+ [] R13: ffff88807b00628a R14: ffff888069334d00 R15: ffff88806a434600
+ [] FS: 0000000000000000(0000) GS:ffff888079480000(0000) knlGS:0[...]
+ [] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [] CR2: 0000000000000000 CR3: 0000000077320000 CR4: 00000000000006e0
+ [] Call Trace:
+ [] ? tipc_bcast_rcv+0x9a/0x1a0 [tipc]
+ [] tipc_rcv+0x40d/0x670 [tipc]
+ [] ? _raw_spin_unlock+0xa/0x20
+ [] tipc_l2_rcv_msg+0x55/0x80 [tipc]
+ [] __netif_receive_skb_one_core+0x8c/0xa0
+ [] process_backlog+0x98/0x140
+ [] net_rx_action+0x13a/0x420
+ [] __do_softirq+0xdb/0x316
+ [] ? smpboot_thread_fn+0x2f/0x1e0
+ [] ? smpboot_thread_fn+0x74/0x1e0
+ [] ? smpboot_thread_fn+0x14e/0x1e0
+ [] run_ksoftirqd+0x1a/0x40
+ [] smpboot_thread_fn+0x149/0x1e0
+ [] ? sort_range+0x20/0x20
+ [] kthread+0x131/0x150
+ [] ? kthread_unuse_mm+0xa0/0xa0
+ [] ret_from_fork+0x22/0x30
+ [] Modules linked in: veth tipc(O) ip6_udp_tunnel udp_tunnel [...]
+ [] CR2: 0000000000000000
+ [] ---[ end trace 65c276a8e2e2f310 ]---
+
+To fix this, we need to grab the lock of the 'namedq' list on both
+path calling.
+
+Fixes: cad2929dc432 ("tipc: update a binding service via broadcast")
+Acked-by: Jon Maloy <jmaloy@redhat.com>
+Signed-off-by: Hoang Huu Le <hoang.h.le@dektech.com.au>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/name_distr.c | 10 +++++++++-
+ net/tipc/node.c | 2 +-
+ 2 files changed, 10 insertions(+), 2 deletions(-)
+
+--- a/net/tipc/name_distr.c
++++ b/net/tipc/name_distr.c
+@@ -327,8 +327,13 @@ static struct sk_buff *tipc_named_dequeu
+ struct tipc_msg *hdr;
+ u16 seqno;
+
++ spin_lock_bh(&namedq->lock);
+ skb_queue_walk_safe(namedq, skb, tmp) {
+- skb_linearize(skb);
++ if (unlikely(skb_linearize(skb))) {
++ __skb_unlink(skb, namedq);
++ kfree_skb(skb);
++ continue;
++ }
+ hdr = buf_msg(skb);
+ seqno = msg_named_seqno(hdr);
+ if (msg_is_last_bulk(hdr)) {
+@@ -338,12 +343,14 @@ static struct sk_buff *tipc_named_dequeu
+
+ if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
+ __skb_unlink(skb, namedq);
++ spin_unlock_bh(&namedq->lock);
+ return skb;
+ }
+
+ if (*open && (*rcv_nxt == seqno)) {
+ (*rcv_nxt)++;
+ __skb_unlink(skb, namedq);
++ spin_unlock_bh(&namedq->lock);
+ return skb;
+ }
+
+@@ -353,6 +360,7 @@ static struct sk_buff *tipc_named_dequeu
+ continue;
+ }
+ }
++ spin_unlock_bh(&namedq->lock);
+ return NULL;
+ }
+
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -1485,7 +1485,7 @@ static void node_lost_contact(struct tip
+
+ /* Clean up broadcast state */
+ tipc_bcast_remove_peer(n->net, n->bc_entry.link);
+- __skb_queue_purge(&n->bc_entry.namedq);
++ skb_queue_purge(&n->bc_entry.namedq);
+
+ /* Abort any ongoing link failover */
+ for (i = 0; i < MAX_BEARERS; i++) {
--- /dev/null
+From foo@baz Sat Oct 17 08:52:03 AM CEST 2020
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Wed, 7 Oct 2020 21:12:50 -0700
+Subject: [PATCH stable 5.9 19/24] tipc: fix the skb_unshare() in tipc_buf_append()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit ed42989eab57d619667d7e87dfbd8fe207db54fe ]
+
+skb_unshare() drops a reference count on the old skb unconditionally,
+so in the failure case, we end up freeing the skb twice here.
+And because the skb is allocated in fclone and cloned by caller
+tipc_msg_reassemble(), the consequence is actually freeing the
+original skb too, thus triggered the UAF by syzbot.
+
+Fix this by replacing this skb_unshare() with skb_cloned()+skb_copy().
+
+Fixes: ff48b6222e65 ("tipc: use skb_unshare() instead in tipc_buf_append()")
+Reported-and-tested-by: syzbot+e96a7ba46281824cc46a@syzkaller.appspotmail.com
+Cc: Jon Maloy <jmaloy@redhat.com>
+Cc: Ying Xue <ying.xue@windriver.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Reviewed-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/msg.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -150,7 +150,8 @@ int tipc_buf_append(struct sk_buff **hea
+ if (fragid == FIRST_FRAGMENT) {
+ if (unlikely(head))
+ goto err;
+- frag = skb_unshare(frag, GFP_ATOMIC);
++ if (skb_cloned(frag))
++ frag = skb_copy(frag, GFP_ATOMIC);
+ if (unlikely(!frag))
+ goto err;
+ head = *headbuf = frag;