]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 17 Oct 2020 07:02:21 +0000 (09:02 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 17 Oct 2020 07:02:21 +0000 (09:02 +0200)
added patches:
can-j1935-j1939_tp_tx_dat_new-fix-missing-initialization-of-skbcnt.patch
can-m_can_platform-don-t-call-m_can_class_suspend-in-runtime-suspend.patch
cxgb4-handle-4-tuple-pedit-to-nat-mode-translation.patch
ibmveth-identify-ingress-large-send-packets.patch
ibmveth-switch-order-of-ibmveth_helper-calls.patch
ipv4-restore-flowi4_oif-update-before-call-to-xfrm_lookup_route.patch
mlx4-handle-non-napi-callers-to-napi_poll.patch
net-dsa-microchip-fix-race-condition.patch
net-fec-fix-phy-init-after-phy_reset_after_clk_enable.patch
net-fec-fix-phy_device-lookup-for-phy_reset_after_clk_enable.patch
net-fix-pos-incrementment-in-ipv6_route_seq_next.patch
net-ipa-skip-suspend-resume-activities-if-not-set-up.patch
net-ipv4-always-honour-route-mtu-during-forwarding.patch
net-j1939-j1939_session_fresh_new-fix-missing-initialization-of-skbcnt.patch
net-mptcp-make-dack4-dack8-usage-consistent-among-all-subflows.patch
net-sched-fix-suspicious-rcu-usage-while-accessing-tcf_tunnel_info.patch
net-smc-fix-use-after-free-of-delayed-events.patch
net-smc-fix-valid-dmbe-buffer-sizes.patch
net-tls-sendfile-fails-with-ktls-offload.patch
net-usb-qmi_wwan-add-cellient-mpl200-card.patch
net_sched-remove-a-redundant-goto-chain-check.patch
r8169-fix-data-corruption-issue-on-rtl8402.patch
socket-don-t-clear-sock_tstamp_new-when-so_timestampns-is-disabled.patch
socket-fix-option-so_timestamping_new.patch
tipc-fix-the-skb_unshare-in-tipc_buf_append.patch

26 files changed:
queue-5.8/can-j1935-j1939_tp_tx_dat_new-fix-missing-initialization-of-skbcnt.patch [new file with mode: 0644]
queue-5.8/can-m_can_platform-don-t-call-m_can_class_suspend-in-runtime-suspend.patch [new file with mode: 0644]
queue-5.8/cxgb4-handle-4-tuple-pedit-to-nat-mode-translation.patch [new file with mode: 0644]
queue-5.8/ibmveth-identify-ingress-large-send-packets.patch [new file with mode: 0644]
queue-5.8/ibmveth-switch-order-of-ibmveth_helper-calls.patch [new file with mode: 0644]
queue-5.8/ipv4-restore-flowi4_oif-update-before-call-to-xfrm_lookup_route.patch [new file with mode: 0644]
queue-5.8/mlx4-handle-non-napi-callers-to-napi_poll.patch [new file with mode: 0644]
queue-5.8/net-dsa-microchip-fix-race-condition.patch [new file with mode: 0644]
queue-5.8/net-fec-fix-phy-init-after-phy_reset_after_clk_enable.patch [new file with mode: 0644]
queue-5.8/net-fec-fix-phy_device-lookup-for-phy_reset_after_clk_enable.patch [new file with mode: 0644]
queue-5.8/net-fix-pos-incrementment-in-ipv6_route_seq_next.patch [new file with mode: 0644]
queue-5.8/net-ipa-skip-suspend-resume-activities-if-not-set-up.patch [new file with mode: 0644]
queue-5.8/net-ipv4-always-honour-route-mtu-during-forwarding.patch [new file with mode: 0644]
queue-5.8/net-j1939-j1939_session_fresh_new-fix-missing-initialization-of-skbcnt.patch [new file with mode: 0644]
queue-5.8/net-mptcp-make-dack4-dack8-usage-consistent-among-all-subflows.patch [new file with mode: 0644]
queue-5.8/net-sched-fix-suspicious-rcu-usage-while-accessing-tcf_tunnel_info.patch [new file with mode: 0644]
queue-5.8/net-smc-fix-use-after-free-of-delayed-events.patch [new file with mode: 0644]
queue-5.8/net-smc-fix-valid-dmbe-buffer-sizes.patch [new file with mode: 0644]
queue-5.8/net-tls-sendfile-fails-with-ktls-offload.patch [new file with mode: 0644]
queue-5.8/net-usb-qmi_wwan-add-cellient-mpl200-card.patch [new file with mode: 0644]
queue-5.8/net_sched-remove-a-redundant-goto-chain-check.patch [new file with mode: 0644]
queue-5.8/r8169-fix-data-corruption-issue-on-rtl8402.patch [new file with mode: 0644]
queue-5.8/series [new file with mode: 0644]
queue-5.8/socket-don-t-clear-sock_tstamp_new-when-so_timestampns-is-disabled.patch [new file with mode: 0644]
queue-5.8/socket-fix-option-so_timestamping_new.patch [new file with mode: 0644]
queue-5.8/tipc-fix-the-skb_unshare-in-tipc_buf_append.patch [new file with mode: 0644]

diff --git a/queue-5.8/can-j1935-j1939_tp_tx_dat_new-fix-missing-initialization-of-skbcnt.patch b/queue-5.8/can-j1935-j1939_tp_tx_dat_new-fix-missing-initialization-of-skbcnt.patch
new file mode 100644 (file)
index 0000000..bd3f2b5
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Wed, 7 Oct 2020 23:18:21 -0700
+Subject: can: j1935: j1939_tp_tx_dat_new(): fix missing initialization of skbcnt
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit e009f95b1543e26606dca2f7e6e9f0f9174538e5 ]
+
+This fixes an uninit-value warning:
+BUG: KMSAN: uninit-value in can_receive+0x26b/0x630 net/can/af_can.c:650
+
+Reported-and-tested-by: syzbot+3f3837e61a48d32b495f@syzkaller.appspotmail.com
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Cc: Robin van der Gracht <robin@protonic.nl>
+Cc: Oleksij Rempel <linux@rempel-privat.de>
+Cc: Pengutronix Kernel Team <kernel@pengutronix.de>
+Cc: Oliver Hartkopp <socketcan@hartkopp.net>
+Cc: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Link: https://lore.kernel.org/r/20201008061821.24663-1-xiyou.wangcong@gmail.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/can/j1939/transport.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -580,6 +580,7 @@ sk_buff *j1939_tp_tx_dat_new(struct j193
+       skb->dev = priv->ndev;
+       can_skb_reserve(skb);
+       can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
++      can_skb_prv(skb)->skbcnt = 0;
+       /* reserve CAN header */
+       skb_reserve(skb, offsetof(struct can_frame, data));
diff --git a/queue-5.8/can-m_can_platform-don-t-call-m_can_class_suspend-in-runtime-suspend.patch b/queue-5.8/can-m_can_platform-don-t-call-m_can_class_suspend-in-runtime-suspend.patch
new file mode 100644 (file)
index 0000000..74a8ba1
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Tue, 11 Aug 2020 10:15:44 +0200
+Subject: can: m_can_platform: don't call m_can_class_suspend in runtime suspend
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+[ Upstream commit 81f1f5ae8b3cbd54fdd994c9e9aacdb7b414a802 ]
+
+    0704c5743694 can: m_can_platform: remove unnecessary m_can_class_resume() call
+
+removed the m_can_class_resume() call in the runtime resume path to get
+rid of a infinite recursion, so the runtime resume now only handles the device
+clocks.
+
+Unfortunately it did not remove the complementary m_can_class_suspend() call in
+the runtime suspend function, so those paths are now unbalanced, which causes
+the pinctrl state to get stuck on the "sleep" state, which breaks all CAN
+functionality on SoCs where this state is defined. Remove the
+m_can_class_suspend() call to fix this.
+
+Fixes: 0704c5743694 can: m_can_platform: remove unnecessary m_can_class_resume() call
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Link: https://lore.kernel.org/r/20200811081545.19921-1-l.stach@pengutronix.de
+Acked-by: Dan Murphy <dmurphy@ti.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/can/m_can/m_can_platform.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/net/can/m_can/m_can_platform.c
++++ b/drivers/net/can/m_can/m_can_platform.c
+@@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct m_can_classdev *mcan_class = netdev_priv(ndev);
+-      m_can_class_suspend(dev);
+-
+       clk_disable_unprepare(mcan_class->cclk);
+       clk_disable_unprepare(mcan_class->hclk);
diff --git a/queue-5.8/cxgb4-handle-4-tuple-pedit-to-nat-mode-translation.patch b/queue-5.8/cxgb4-handle-4-tuple-pedit-to-nat-mode-translation.patch
new file mode 100644 (file)
index 0000000..145d19e
--- /dev/null
@@ -0,0 +1,387 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Herat Ramani <herat@chelsio.com>
+Date: Tue, 13 Oct 2020 15:01:29 +0530
+Subject: xgb4: handle 4-tuple PEDIT to NAT mode translation
+
+From: Herat Ramani <herat@chelsio.com>
+
+[ Upstream commit 2ef813b8f405db3f72202b6fcae40a628ab80a53 ]
+
+The 4-tuple NAT offload via PEDIT always overwrites all the 4-tuple
+fields even if they had not been explicitly enabled. If any fields in
+the 4-tuple are not enabled, then the hardware overwrites the
+disabled fields with zeros, instead of ignoring them.
+
+So, add a parser that can translate the enabled 4-tuple PEDIT fields
+to one of the NAT mode combinations supported by the hardware and
+hence avoid overwriting disabled fields to 0. Any rule with
+unsupported NAT mode combination is rejected.
+
+Signed-off-by: Herat Ramani <herat@chelsio.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c |  175 +++++++++++++++++--
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h |   15 +
+ 2 files changed, 177 insertions(+), 13 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[
+       PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
+ };
++static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
++      /* Default supported NAT modes */
++      {
++              .chip = CHELSIO_T5,
++              .flags = CXGB4_ACTION_NATMODE_NONE,
++              .natmode = NAT_MODE_NONE,
++      },
++      {
++              .chip = CHELSIO_T5,
++              .flags = CXGB4_ACTION_NATMODE_DIP,
++              .natmode = NAT_MODE_DIP,
++      },
++      {
++              .chip = CHELSIO_T5,
++              .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
++              .natmode = NAT_MODE_DIP_DP,
++      },
++      {
++              .chip = CHELSIO_T5,
++              .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
++                       CXGB4_ACTION_NATMODE_SIP,
++              .natmode = NAT_MODE_DIP_DP_SIP,
++      },
++      {
++              .chip = CHELSIO_T5,
++              .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
++                       CXGB4_ACTION_NATMODE_SPORT,
++              .natmode = NAT_MODE_DIP_DP_SP,
++      },
++      {
++              .chip = CHELSIO_T5,
++              .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
++              .natmode = NAT_MODE_SIP_SP,
++      },
++      {
++              .chip = CHELSIO_T5,
++              .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
++                       CXGB4_ACTION_NATMODE_SPORT,
++              .natmode = NAT_MODE_DIP_SIP_SP,
++      },
++      {
++              .chip = CHELSIO_T5,
++              .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
++                       CXGB4_ACTION_NATMODE_DPORT |
++                       CXGB4_ACTION_NATMODE_SPORT,
++              .natmode = NAT_MODE_ALL,
++      },
++      /* T6+ can ignore L4 ports when they're disabled. */
++      {
++              .chip = CHELSIO_T6,
++              .flags = CXGB4_ACTION_NATMODE_SIP,
++              .natmode = NAT_MODE_SIP_SP,
++      },
++      {
++              .chip = CHELSIO_T6,
++              .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
++              .natmode = NAT_MODE_DIP_DP_SP,
++      },
++      {
++              .chip = CHELSIO_T6,
++              .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
++              .natmode = NAT_MODE_ALL,
++      },
++};
++
++static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
++                                     u8 natmode_flags)
++{
++      u8 i = 0;
++
++      /* Translate the enabled NAT 4-tuple fields to one of the
++       * hardware supported NAT mode configurations. This ensures
++       * that we pick a valid combination, where the disabled fields
++       * do not get overwritten to 0.
++       */
++      for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
++              if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
++                      fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
++                      return;
++              }
++      }
++}
++
+ static struct ch_tc_flower_entry *allocate_flower_entry(void)
+ {
+       struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
+@@ -287,7 +370,8 @@ static void offload_pedit(struct ch_filt
+ }
+ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
+-                              u32 mask, u32 offset, u8 htype)
++                              u32 mask, u32 offset, u8 htype,
++                              u8 *natmode_flags)
+ {
+       switch (htype) {
+       case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+@@ -312,67 +396,102 @@ static void process_pedit_field(struct c
+               switch (offset) {
+               case PEDIT_IP4_SRC:
+                       offload_pedit(fs, val, mask, IP4_SRC);
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+                       break;
+               case PEDIT_IP4_DST:
+                       offload_pedit(fs, val, mask, IP4_DST);
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+               }
+-              fs->nat_mode = NAT_MODE_ALL;
+               break;
+       case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+               switch (offset) {
+               case PEDIT_IP6_SRC_31_0:
+                       offload_pedit(fs, val, mask, IP6_SRC_31_0);
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+                       break;
+               case PEDIT_IP6_SRC_63_32:
+                       offload_pedit(fs, val, mask, IP6_SRC_63_32);
++                      *natmode_flags |=  CXGB4_ACTION_NATMODE_SIP;
+                       break;
+               case PEDIT_IP6_SRC_95_64:
+                       offload_pedit(fs, val, mask, IP6_SRC_95_64);
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+                       break;
+               case PEDIT_IP6_SRC_127_96:
+                       offload_pedit(fs, val, mask, IP6_SRC_127_96);
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+                       break;
+               case PEDIT_IP6_DST_31_0:
+                       offload_pedit(fs, val, mask, IP6_DST_31_0);
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+                       break;
+               case PEDIT_IP6_DST_63_32:
+                       offload_pedit(fs, val, mask, IP6_DST_63_32);
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+                       break;
+               case PEDIT_IP6_DST_95_64:
+                       offload_pedit(fs, val, mask, IP6_DST_95_64);
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+                       break;
+               case PEDIT_IP6_DST_127_96:
+                       offload_pedit(fs, val, mask, IP6_DST_127_96);
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+               }
+-              fs->nat_mode = NAT_MODE_ALL;
+               break;
+       case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+               switch (offset) {
+               case PEDIT_TCP_SPORT_DPORT:
+-                      if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++                      if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
+                               fs->nat_fport = val;
+-                      else
++                              *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++                      } else {
+                               fs->nat_lport = val >> 16;
++                              *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
++                      }
+               }
+-              fs->nat_mode = NAT_MODE_ALL;
+               break;
+       case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+               switch (offset) {
+               case PEDIT_UDP_SPORT_DPORT:
+-                      if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++                      if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
+                               fs->nat_fport = val;
+-                      else
++                              *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++                      } else {
+                               fs->nat_lport = val >> 16;
++                              *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
++                      }
+               }
+-              fs->nat_mode = NAT_MODE_ALL;
++              break;
+       }
+ }
++static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags,
++                                       struct netlink_ext_ack *extack)
++{
++      u8 i = 0;
++
++      /* Extract the NAT mode to enable based on what 4-tuple fields
++       * are enabled to be overwritten. This ensures that the
++       * disabled fields don't get overwritten to 0.
++       */
++      for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
++              const struct cxgb4_natmode_config *c;
++
++              c = &cxgb4_natmode_config_array[i];
++              if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
++                  natmode_flags == c->flags)
++                      return 0;
++      }
++      NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination");
++      return -EOPNOTSUPP;
++}
++
+ void cxgb4_process_flow_actions(struct net_device *in,
+                               struct flow_action *actions,
+                               struct ch_filter_specification *fs)
+ {
+       struct flow_action_entry *act;
++      u8 natmode_flags = 0;
+       int i;
+       flow_action_for_each(i, act, actions) {
+@@ -423,13 +542,17 @@ void cxgb4_process_flow_actions(struct n
+                       val = act->mangle.val;
+                       offset = act->mangle.offset;
+-                      process_pedit_field(fs, val, mask, offset, htype);
++                      process_pedit_field(fs, val, mask, offset, htype,
++                                          &natmode_flags);
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
++      if (natmode_flags)
++              cxgb4_action_natmode_tweak(fs, natmode_flags);
++
+ }
+ static bool valid_l4_mask(u32 mask)
+@@ -446,7 +569,8 @@ static bool valid_l4_mask(u32 mask)
+ }
+ static bool valid_pedit_action(struct net_device *dev,
+-                             const struct flow_action_entry *act)
++                             const struct flow_action_entry *act,
++                             u8 *natmode_flags)
+ {
+       u32 mask, offset;
+       u8 htype;
+@@ -471,7 +595,10 @@ static bool valid_pedit_action(struct ne
+       case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+               switch (offset) {
+               case PEDIT_IP4_SRC:
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
++                      break;
+               case PEDIT_IP4_DST:
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+                       break;
+               default:
+                       netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -485,10 +612,13 @@ static bool valid_pedit_action(struct ne
+               case PEDIT_IP6_SRC_63_32:
+               case PEDIT_IP6_SRC_95_64:
+               case PEDIT_IP6_SRC_127_96:
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
++                      break;
+               case PEDIT_IP6_DST_31_0:
+               case PEDIT_IP6_DST_63_32:
+               case PEDIT_IP6_DST_95_64:
+               case PEDIT_IP6_DST_127_96:
++                      *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+                       break;
+               default:
+                       netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -504,6 +634,10 @@ static bool valid_pedit_action(struct ne
+                                          __func__);
+                               return false;
+                       }
++                      if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++                              *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++                      else
++                              *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+                       break;
+               default:
+                       netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -519,6 +653,10 @@ static bool valid_pedit_action(struct ne
+                                          __func__);
+                               return false;
+                       }
++                      if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++                              *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++                      else
++                              *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+                       break;
+               default:
+                       netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -537,10 +675,12 @@ int cxgb4_validate_flow_actions(struct n
+                               struct flow_action *actions,
+                               struct netlink_ext_ack *extack)
+ {
++      struct adapter *adap = netdev2adap(dev);
+       struct flow_action_entry *act;
+       bool act_redir = false;
+       bool act_pedit = false;
+       bool act_vlan = false;
++      u8 natmode_flags = 0;
+       int i;
+       if (!flow_action_basic_hw_stats_check(actions, extack))
+@@ -553,7 +693,6 @@ int cxgb4_validate_flow_actions(struct n
+                       /* Do nothing */
+                       break;
+               case FLOW_ACTION_REDIRECT: {
+-                      struct adapter *adap = netdev2adap(dev);
+                       struct net_device *n_dev, *target_dev;
+                       unsigned int i;
+                       bool found = false;
+@@ -603,7 +742,8 @@ int cxgb4_validate_flow_actions(struct n
+                       }
+                       break;
+               case FLOW_ACTION_MANGLE: {
+-                      bool pedit_valid = valid_pedit_action(dev, act);
++                      bool pedit_valid = valid_pedit_action(dev, act,
++                                                            &natmode_flags);
+                       if (!pedit_valid)
+                               return -EOPNOTSUPP;
+@@ -622,6 +762,15 @@ int cxgb4_validate_flow_actions(struct n
+               return -EINVAL;
+       }
++      if (act_pedit) {
++              int ret;
++
++              ret = cxgb4_action_natmode_validate(adap, natmode_flags,
++                                                  extack);
++              if (ret)
++                      return ret;
++      }
++
+       return 0;
+ }
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+@@ -108,6 +108,21 @@ struct ch_tc_pedit_fields {
+ #define PEDIT_TCP_SPORT_DPORT         0x0
+ #define PEDIT_UDP_SPORT_DPORT         0x0
++enum cxgb4_action_natmode_flags {
++      CXGB4_ACTION_NATMODE_NONE = 0,
++      CXGB4_ACTION_NATMODE_DIP = (1 << 0),
++      CXGB4_ACTION_NATMODE_SIP = (1 << 1),
++      CXGB4_ACTION_NATMODE_DPORT = (1 << 2),
++      CXGB4_ACTION_NATMODE_SPORT = (1 << 3),
++};
++
++/* TC PEDIT action to NATMODE translation entry */
++struct cxgb4_natmode_config {
++      enum chip_type chip;
++      u8 flags;
++      u8 natmode;
++};
++
+ void cxgb4_process_flow_actions(struct net_device *in,
+                               struct flow_action *actions,
+                               struct ch_filter_specification *fs);
diff --git a/queue-5.8/ibmveth-identify-ingress-large-send-packets.patch b/queue-5.8/ibmveth-identify-ingress-large-send-packets.patch
new file mode 100644 (file)
index 0000000..a3455f0
--- /dev/null
@@ -0,0 +1,57 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: David Wilder <dwilder@us.ibm.com>
+Date: Tue, 13 Oct 2020 16:20:14 -0700
+Subject: ibmveth: Identify ingress large send packets.
+
+From: David Wilder <dwilder@us.ibm.com>
+
+[ Upstream commit 413f142cc05cb03f2d1ea83388e40c1ddc0d74e9 ]
+
+Ingress large send packets are identified by either:
+The IBMVETH_RXQ_LRG_PKT flag in the receive buffer
+or with a -1 placed in the ip header checksum.
+The method used depends on firmware version. Frame
+geometry and sufficient header validation is performed by the
+hypervisor eliminating the need for further header checks here.
+
+Fixes: 7b5967389f5a ("ibmveth: set correct gso_size and gso_type")
+Signed-off-by: David Wilder <dwilder@us.ibm.com>
+Reviewed-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Reviewed-by: Cristobal Forno <cris.forno@ibm.com>
+Reviewed-by: Pradeep Satyanarayana <pradeeps@linux.vnet.ibm.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ibm/ibmveth.c |   13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1349,6 +1349,7 @@ static int ibmveth_poll(struct napi_stru
+                       int offset = ibmveth_rxq_frame_offset(adapter);
+                       int csum_good = ibmveth_rxq_csum_good(adapter);
+                       int lrg_pkt = ibmveth_rxq_large_packet(adapter);
++                      __sum16 iph_check = 0;
+                       skb = ibmveth_rxq_get_buffer(adapter);
+@@ -1385,7 +1386,17 @@ static int ibmveth_poll(struct napi_stru
+                       skb_put(skb, length);
+                       skb->protocol = eth_type_trans(skb, netdev);
+-                      if (length > netdev->mtu + ETH_HLEN) {
++                      /* PHYP without PLSO support places a -1 in the ip
++                       * checksum for large send frames.
++                       */
++                      if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
++                              struct iphdr *iph = (struct iphdr *)skb->data;
++
++                              iph_check = iph->check;
++                      }
++
++                      if ((length > netdev->mtu + ETH_HLEN) ||
++                          lrg_pkt || iph_check == 0xffff) {
+                               ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+                               adapter->rx_large_packets++;
+                       }
diff --git a/queue-5.8/ibmveth-switch-order-of-ibmveth_helper-calls.patch b/queue-5.8/ibmveth-switch-order-of-ibmveth_helper-calls.patch
new file mode 100644 (file)
index 0000000..776f8c6
--- /dev/null
@@ -0,0 +1,48 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: David Wilder <dwilder@us.ibm.com>
+Date: Tue, 13 Oct 2020 16:20:13 -0700
+Subject: ibmveth: Switch order of ibmveth_helper calls.
+
+From: David Wilder <dwilder@us.ibm.com>
+
+[ Upstream commit 5ce9ad815a296374ca21f43f3b1ab5083d202ee1 ]
+
+ibmveth_rx_csum_helper() must be called after ibmveth_rx_mss_helper()
+as ibmveth_rx_csum_helper() may alter ip and tcp checksum values.
+
+Fixes: 66aa0678efc2 ("ibmveth: Support to enable LSO/CSO for Trunk VEA.")
+Signed-off-by: David Wilder <dwilder@us.ibm.com>
+Reviewed-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Reviewed-by: Cristobal Forno <cris.forno@ibm.com>
+Reviewed-by: Pradeep Satyanarayana <pradeeps@linux.vnet.ibm.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ibm/ibmveth.c |   10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1385,16 +1385,16 @@ static int ibmveth_poll(struct napi_stru
+                       skb_put(skb, length);
+                       skb->protocol = eth_type_trans(skb, netdev);
+-                      if (csum_good) {
+-                              skb->ip_summed = CHECKSUM_UNNECESSARY;
+-                              ibmveth_rx_csum_helper(skb, adapter);
+-                      }
+-
+                       if (length > netdev->mtu + ETH_HLEN) {
+                               ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+                               adapter->rx_large_packets++;
+                       }
++                      if (csum_good) {
++                              skb->ip_summed = CHECKSUM_UNNECESSARY;
++                              ibmveth_rx_csum_helper(skb, adapter);
++                      }
++
+                       napi_gro_receive(napi, skb);    /* send it up */
+                       netdev->stats.rx_packets++;
diff --git a/queue-5.8/ipv4-restore-flowi4_oif-update-before-call-to-xfrm_lookup_route.patch b/queue-5.8/ipv4-restore-flowi4_oif-update-before-call-to-xfrm_lookup_route.patch
new file mode 100644 (file)
index 0000000..4b34bab
--- /dev/null
@@ -0,0 +1,40 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: David Ahern <dsahern@kernel.org>
+Date: Fri, 9 Oct 2020 11:01:01 -0700
+Subject: ipv4: Restore flowi4_oif update before call to xfrm_lookup_route
+
+From: David Ahern <dsahern@kernel.org>
+
+[ Upstream commit 874fb9e2ca949b443cc419a4f2227cafd4381d39 ]
+
+Tobias reported regressions in IPsec tests following the patch
+referenced by the Fixes tag below. The root cause is dropping the
+reset of the flowi4_oif after the fib_lookup. Apparently it is
+needed for xfrm cases, so restore the oif update to ip_route_output_flow
+right before the call to xfrm_lookup_route.
+
+Fixes: 2fbc6e89b2f1 ("ipv4: Update exception handling for multipath routes via same device")
+Reported-by: Tobias Brunner <tobias@strongswan.org>
+Signed-off-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/route.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2764,10 +2764,12 @@ struct rtable *ip_route_output_flow(stru
+       if (IS_ERR(rt))
+               return rt;
+-      if (flp4->flowi4_proto)
++      if (flp4->flowi4_proto) {
++              flp4->flowi4_oif = rt->dst.dev->ifindex;
+               rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
+                                                       flowi4_to_flowi(flp4),
+                                                       sk, 0);
++      }
+       return rt;
+ }
diff --git a/queue-5.8/mlx4-handle-non-napi-callers-to-napi_poll.patch b/queue-5.8/mlx4-handle-non-napi-callers-to-napi_poll.patch
new file mode 100644 (file)
index 0000000..d14b5dd
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Jonathan Lemon <bsd@fb.com>
+Date: Thu, 8 Oct 2020 11:45:26 -0700
+Subject: mlx4: handle non-napi callers to napi_poll
+
+From: Jonathan Lemon <bsd@fb.com>
+
+[ Upstream commit b2b8a92733b288128feb57ffa694758cf475106c ]
+
+netcons calls napi_poll with a budget of 0 to transmit packets.
+Handle this by:
+ - skipping RX processing
+ - do not try to recycle TX packets to the RX cache
+
+Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c |    3 +++
+ drivers/net/ethernet/mellanox/mlx4/en_tx.c |    2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -943,6 +943,9 @@ int mlx4_en_poll_rx_cq(struct napi_struc
+       bool clean_complete = true;
+       int done;
++      if (!budget)
++              return 0;
++
+       if (priv->tx_ring_num[TX_XDP]) {
+               xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
+               if (xdp_tx_cq->xdp_busy) {
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -350,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_
+               .dma = tx_info->map0_dma,
+       };
+-      if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
++      if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
+               dma_unmap_page(priv->ddev, tx_info->map0_dma,
+                              PAGE_SIZE, priv->dma_dir);
+               put_page(tx_info->page);
diff --git a/queue-5.8/net-dsa-microchip-fix-race-condition.patch b/queue-5.8/net-dsa-microchip-fix-race-condition.patch
new file mode 100644 (file)
index 0000000..e442570
--- /dev/null
@@ -0,0 +1,112 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Christian Eggers <ceggers@arri.de>
+Date: Mon, 12 Oct 2020 10:39:42 +0200
+Subject: net: dsa: microchip: fix race condition
+
+From: Christian Eggers <ceggers@arri.de>
+
+[ Upstream commit 8098bd69bc4e925070313b1b95d03510f4f24738 ]
+
+Between queuing the delayed work and finishing the setup of the dsa
+ports, the process may sleep in request_module() (via
+phy_device_create()) and the queued work may be executed prior to the
+switch net devices being registered. In ksz_mib_read_work(), a NULL
+dereference will happen within netof_carrier_ok(dp->slave).
+
+Not queuing the delayed work in ksz_init_mib_timer() makes things even
+worse because the work will now be queued for immediate execution
+(instead of 2000 ms) in ksz_mac_link_down() via
+dsa_port_link_register_of().
+
+Call tree:
+ksz9477_i2c_probe()
+\--ksz9477_switch_register()
+   \--ksz_switch_register()
+      +--dsa_register_switch()
+      |  \--dsa_switch_probe()
+      |     \--dsa_tree_setup()
+      |        \--dsa_tree_setup_switches()
+      |           +--dsa_switch_setup()
+      |           |  +--ksz9477_setup()
+      |           |  |  \--ksz_init_mib_timer()
+      |           |  |     |--/* Start the timer 2 seconds later. */
+      |           |  |     \--schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
+      |           |  \--__mdiobus_register()
+      |           |     \--mdiobus_scan()
+      |           |        \--get_phy_device()
+      |           |           +--get_phy_id()
+      |           |           \--phy_device_create()
+      |           |              |--/* sleeping, ksz_mib_read_work() can be called meanwhile */
+      |           |              \--request_module()
+      |           |
+      |           \--dsa_port_setup()
+      |              +--/* Called for non-CPU ports */
+      |              +--dsa_slave_create()
+      |              |  +--/* Too late, ksz_mib_read_work() may be called beforehand */
+      |              |  \--port->slave = ...
+      |             ...
+      |              +--Called for CPU port */
+      |              \--dsa_port_link_register_of()
+      |                 \--ksz_mac_link_down()
+      |                    +--/* mib_read must be initialized here */
+      |                    +--/* work is already scheduled, so it will be executed after 2000 ms */
+      |                    \--schedule_delayed_work(&dev->mib_read, 0);
+      \-- /* here port->slave is setup properly, scheduling the delayed work should be safe */
+
+Solution:
+1. Do not queue (only initialize) delayed work in ksz_init_mib_timer().
+2. Only queue delayed work in ksz_mac_link_down() if init is completed.
+3. Queue work once in ksz_switch_register(), after dsa_register_switch()
+has completed.
+
+Fixes: 7c6ff470aa86 ("net: dsa: microchip: add MIB counter reading support")
+Signed-off-by: Christian Eggers <ceggers@arri.de>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dsa/microchip/ksz_common.c |   16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -103,14 +103,8 @@ void ksz_init_mib_timer(struct ksz_devic
+       INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
+-      /* Read MIB counters every 30 seconds to avoid overflow. */
+-      dev->mib_read_interval = msecs_to_jiffies(30000);
+-
+       for (i = 0; i < dev->mib_port_cnt; i++)
+               dev->dev_ops->port_init_cnt(dev, i);
+-
+-      /* Start the timer 2 seconds later. */
+-      schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
+ }
+ EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
+@@ -144,7 +138,9 @@ void ksz_adjust_link(struct dsa_switch *
+       /* Read all MIB counters when the link is going down. */
+       if (!phydev->link) {
+               p->read = true;
+-              schedule_delayed_work(&dev->mib_read, 0);
++              /* timer started */
++              if (dev->mib_read_interval)
++                      schedule_delayed_work(&dev->mib_read, 0);
+       }
+       mutex_lock(&dev->dev_mutex);
+       if (!phydev->link)
+@@ -460,6 +456,12 @@ int ksz_switch_register(struct ksz_devic
+               return ret;
+       }
++      /* Read MIB counters every 30 seconds to avoid overflow. */
++      dev->mib_read_interval = msecs_to_jiffies(30000);
++
++      /* Start the MIB timer. */
++      schedule_delayed_work(&dev->mib_read, 0);
++
+       return 0;
+ }
+ EXPORT_SYMBOL(ksz_switch_register);
diff --git a/queue-5.8/net-fec-fix-phy-init-after-phy_reset_after_clk_enable.patch b/queue-5.8/net-fec-fix-phy-init-after-phy_reset_after_clk_enable.patch
new file mode 100644 (file)
index 0000000..49ec9b0
--- /dev/null
@@ -0,0 +1,55 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Marek Vasut <marex@denx.de>
+Date: Tue, 6 Oct 2020 15:52:53 +0200
+Subject: net: fec: Fix PHY init after phy_reset_after_clk_enable()
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 0da1ccbbefb662915228bc17e1c7d4ad28b3ddab ]
+
+The phy_reset_after_clk_enable() does a PHY reset, which means the PHY
+loses its register settings. The fec_enet_mii_probe() starts the PHY
+and does the necessary calls to configure the PHY via PHY framework,
+and loads the correct register settings into the PHY. Therefore,
+fec_enet_mii_probe() should be called only after the PHY has been
+reset, not before as it is now.
+
+Fixes: 1b0a83ac04e3 ("net: fec: add phy_reset_after_clk_enable() support")
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Tested-by: Richard Leitner <richard.leitner@skidata.com>
+Signed-off-by: Marek Vasut <marex@denx.de>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: NXP Linux Team <linux-imx@nxp.com>
+Cc: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c |   10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3006,17 +3006,17 @@ fec_enet_open(struct net_device *ndev)
+       /* Init MAC prior to mii bus probe */
+       fec_restart(ndev);
+-      /* Probe and connect to PHY when open the interface */
+-      ret = fec_enet_mii_probe(ndev);
+-      if (ret)
+-              goto err_enet_mii_probe;
+-
+       /* Call phy_reset_after_clk_enable() again if it failed during
+        * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+        */
+       if (reset_again)
+               fec_enet_phy_reset_after_clk_enable(ndev);
++      /* Probe and connect to PHY when open the interface */
++      ret = fec_enet_mii_probe(ndev);
++      if (ret)
++              goto err_enet_mii_probe;
++
+       if (fep->quirks & FEC_QUIRK_ERR006687)
+               imx6q_cpuidle_fec_irqs_used();
diff --git a/queue-5.8/net-fec-fix-phy_device-lookup-for-phy_reset_after_clk_enable.patch b/queue-5.8/net-fec-fix-phy_device-lookup-for-phy_reset_after_clk_enable.patch
new file mode 100644 (file)
index 0000000..fe301ef
--- /dev/null
@@ -0,0 +1,83 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Marek Vasut <marex@denx.de>
+Date: Sat, 10 Oct 2020 11:10:00 +0200
+Subject: net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 64a632da538a6827fad0ea461925cedb9899ebe2 ]
+
+The phy_reset_after_clk_enable() is always called with ndev->phydev,
+however that pointer may be NULL even though the PHY device instance
+already exists and is sufficient to perform the PHY reset.
+
+This condition happens in fec_open(), where the clock must be enabled
+first, then the PHY must be reset, and then the PHY IDs can be read
+out of the PHY.
+
+If the PHY still is not bound to the MAC, but there is OF PHY node
+and a matching PHY device instance already, use the OF PHY node to
+obtain the PHY device instance, and then use that PHY device instance
+when triggering the PHY reset.
+
+Fixes: 1b0a83ac04e3 ("net: fec: add phy_reset_after_clk_enable() support")
+Signed-off-by: Marek Vasut <marex@denx.de>
+Cc: Christoph Niedermaier <cniedermaier@dh-electronics.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: NXP Linux Team <linux-imx@nxp.com>
+Cc: Richard Leitner <richard.leitner@skidata.com>
+Cc: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c |   25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1913,6 +1913,27 @@ out:
+       return ret;
+ }
++static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
++{
++      struct fec_enet_private *fep = netdev_priv(ndev);
++      struct phy_device *phy_dev = ndev->phydev;
++
++      if (phy_dev) {
++              phy_reset_after_clk_enable(phy_dev);
++      } else if (fep->phy_node) {
++              /*
++               * If the PHY still is not bound to the MAC, but there is
++               * OF PHY node and a matching PHY device instance already,
++               * use the OF PHY node to obtain the PHY device instance,
++               * and then use that PHY device instance when triggering
++               * the PHY reset.
++               */
++              phy_dev = of_phy_find_device(fep->phy_node);
++              phy_reset_after_clk_enable(phy_dev);
++              put_device(&phy_dev->mdio.dev);
++      }
++}
++
+ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ {
+       struct fec_enet_private *fep = netdev_priv(ndev);
+@@ -1939,7 +1960,7 @@ static int fec_enet_clk_enable(struct ne
+               if (ret)
+                       goto failed_clk_ref;
+-              phy_reset_after_clk_enable(ndev->phydev);
++              fec_enet_phy_reset_after_clk_enable(ndev);
+       } else {
+               clk_disable_unprepare(fep->clk_enet_out);
+               if (fep->clk_ptp) {
+@@ -2994,7 +3015,7 @@ fec_enet_open(struct net_device *ndev)
+        * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+        */
+       if (reset_again)
+-              phy_reset_after_clk_enable(ndev->phydev);
++              fec_enet_phy_reset_after_clk_enable(ndev);
+       if (fep->quirks & FEC_QUIRK_ERR006687)
+               imx6q_cpuidle_fec_irqs_used();
diff --git a/queue-5.8/net-fix-pos-incrementment-in-ipv6_route_seq_next.patch b/queue-5.8/net-fix-pos-incrementment-in-ipv6_route_seq_next.patch
new file mode 100644 (file)
index 0000000..2725b76
--- /dev/null
@@ -0,0 +1,90 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Yonghong Song <yhs@fb.com>
+Date: Wed, 14 Oct 2020 07:46:12 -0700
+Subject: net: fix pos incrementment in ipv6_route_seq_next
+
+From: Yonghong Song <yhs@fb.com>
+
+[ Upstream commit 6617dfd440149e42ce4d2be615eb31a4755f4d30 ]
+
+Commit 4fc427e05158 ("ipv6_route_seq_next should increase position index")
+tried to fix the issue where seq_file pos is not increased
+if a NULL element is returned with seq_ops->next(). See bug
+  https://bugzilla.kernel.org/show_bug.cgi?id=206283
+The commit effectively does:
+  - increase pos for all seq_ops->start()
+  - increase pos for all seq_ops->next()
+
+For ipv6_route, increasing pos for all seq_ops->next() is correct.
+But increasing pos for seq_ops->start() is not correct
+since pos is used to determine how many items to skip during
+seq_ops->start():
+  iter->skip = *pos;
+seq_ops->start() just fetches the *current* pos item.
+The item can be skipped only after seq_ops->show() which essentially
+is the beginning of seq_ops->next().
+
+For example, I have 7 ipv6 route entries,
+  root@arch-fb-vm1:~/net-next dd if=/proc/net/ipv6_route bs=4096
+  00000000000000000000000000000000 40 00000000000000000000000000000000 00 00000000000000000000000000000000 00000400 00000001 00000000 00000001     eth0
+  fe800000000000000000000000000000 40 00000000000000000000000000000000 00 00000000000000000000000000000000 00000100 00000001 00000000 00000001     eth0
+  00000000000000000000000000000000 00 00000000000000000000000000000000 00 00000000000000000000000000000000 ffffffff 00000001 00000000 00200200       lo
+  00000000000000000000000000000001 80 00000000000000000000000000000000 00 00000000000000000000000000000000 00000000 00000003 00000000 80200001       lo
+  fe800000000000002050e3fffebd3be8 80 00000000000000000000000000000000 00 00000000000000000000000000000000 00000000 00000002 00000000 80200001     eth0
+  ff000000000000000000000000000000 08 00000000000000000000000000000000 00 00000000000000000000000000000000 00000100 00000004 00000000 00000001     eth0
+  00000000000000000000000000000000 00 00000000000000000000000000000000 00 00000000000000000000000000000000 ffffffff 00000001 00000000 00200200       lo
+  0+1 records in
+  0+1 records out
+  1050 bytes (1.0 kB, 1.0 KiB) copied, 0.00707908 s, 148 kB/s
+  root@arch-fb-vm1:~/net-next
+
+In the above, I specify buffer size 4096, so all records can be returned
+to user space with a single trip to the kernel.
+
+If I use buffer size 128, since each record size is 149, internally
+kernel seq_read() will read 149 into its internal buffer and return the data
+to user space in two read() syscalls. Then user read() syscall will trigger
+next seq_ops->start(). Since the current implementation increased pos even
+for seq_ops->start(), it will skip record #2, #4 and #6, assuming the first
+record is #1.
+
+  root@arch-fb-vm1:~/net-next dd if=/proc/net/ipv6_route bs=128
+  00000000000000000000000000000000 40 00000000000000000000000000000000 00 00000000000000000000000000000000 00000400 00000001 00000000 00000001     eth0
+  00000000000000000000000000000000 00 00000000000000000000000000000000 00 00000000000000000000000000000000 ffffffff 00000001 00000000 00200200       lo
+  fe800000000000002050e3fffebd3be8 80 00000000000000000000000000000000 00 00000000000000000000000000000000 00000000 00000002 00000000 80200001     eth0
+  00000000000000000000000000000000 00 00000000000000000000000000000000 00 00000000000000000000000000000000 ffffffff 00000001 00000000 00200200       lo
+4+1 records in
+4+1 records out
+600 bytes copied, 0.00127758 s, 470 kB/s
+
+To fix the problem, create a fake pos pointer so seq_ops->start()
+won't actually increase seq_file pos. With this fix, the
+above `dd` command with `bs=128` will show correct result.
+
+Fixes: 4fc427e05158 ("ipv6_route_seq_next should increase position index")
+Cc: Alexei Starovoitov <ast@kernel.org>
+Suggested-by: Vasily Averin <vvs@virtuozzo.com>
+Reviewed-by: Vasily Averin <vvs@virtuozzo.com>
+Signed-off-by: Yonghong Song <yhs@fb.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Acked-by: Andrii Nakryiko <andrii@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_fib.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -2617,8 +2617,10 @@ static void *ipv6_route_seq_start(struct
+       iter->skip = *pos;
+       if (iter->tbl) {
++              loff_t p = 0;
++
+               ipv6_route_seq_setup_walk(iter, net);
+-              return ipv6_route_seq_next(seq, NULL, pos);
++              return ipv6_route_seq_next(seq, NULL, &p);
+       } else {
+               return NULL;
+       }
diff --git a/queue-5.8/net-ipa-skip-suspend-resume-activities-if-not-set-up.patch b/queue-5.8/net-ipa-skip-suspend-resume-activities-if-not-set-up.patch
new file mode 100644 (file)
index 0000000..a68ec23
--- /dev/null
@@ -0,0 +1,51 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Alex Elder <elder@linaro.org>
+Date: Fri, 9 Oct 2020 15:28:48 -0500
+Subject: net: ipa: skip suspend/resume activities if not set up
+
+From: Alex Elder <elder@linaro.org>
+
+[ Upstream commit d1704382821032fede445b816f4296fd379baacf ]
+
+When processing a system suspend request we suspend modem endpoints
+if they are enabled, and call ipa_cmd_tag_process() (which issues
+IPA commands) to ensure the IPA pipeline is cleared.  It is an error
+to attempt to issue an IPA command before setup is complete, so this
+is clearly a bug.  But we also shouldn't suspend or resume any
+endpoints that have not been set up.
+
+Have ipa_endpoint_suspend() and ipa_endpoint_resume() immediately
+return if setup hasn't completed, to avoid any attempt to configure
+endpoints or issue IPA commands in that case.
+
+Fixes: 84f9bd12d46d ("soc: qcom: ipa: IPA endpoints")
+Tested-by: Matthias Kaehlcke <mka@chromium.org>
+Signed-off-by: Alex Elder <elder@linaro.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ipa/ipa_endpoint.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/ipa/ipa_endpoint.c
++++ b/drivers/net/ipa/ipa_endpoint.c
+@@ -1447,6 +1447,9 @@ void ipa_endpoint_resume_one(struct ipa_
+ void ipa_endpoint_suspend(struct ipa *ipa)
+ {
++      if (!ipa->setup_complete)
++              return;
++
+       if (ipa->modem_netdev)
+               ipa_modem_suspend(ipa->modem_netdev);
+@@ -1458,6 +1461,9 @@ void ipa_endpoint_suspend(struct ipa *ip
+ void ipa_endpoint_resume(struct ipa *ipa)
+ {
++      if (!ipa->setup_complete)
++              return;
++
+       ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
+       ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
diff --git a/queue-5.8/net-ipv4-always-honour-route-mtu-during-forwarding.patch b/queue-5.8/net-ipv4-always-honour-route-mtu-during-forwarding.patch
new file mode 100644 (file)
index 0000000..8bd5318
--- /dev/null
@@ -0,0 +1,80 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: "Maciej Żenczykowski" <maze@google.com>
+Date: Wed, 23 Sep 2020 13:18:15 -0700
+Subject: net/ipv4: always honour route mtu during forwarding
+
+From: "Maciej Żenczykowski" <maze@google.com>
+
+[ Upstream commit 02a1b175b0e92d9e0fa5df3957ade8d733ceb6a0 ]
+
+Documentation/networking/ip-sysctl.txt:46 says:
+  ip_forward_use_pmtu - BOOLEAN
+    By default we don't trust protocol path MTUs while forwarding
+    because they could be easily forged and can lead to unwanted
+    fragmentation by the router.
+    You only need to enable this if you have user-space software
+    which tries to discover path mtus by itself and depends on the
+    kernel honoring this information. This is normally not the case.
+    Default: 0 (disabled)
+    Possible values:
+    0 - disabled
+    1 - enabled
+
+Which makes it pretty clear that setting it to 1 is a potential
+security/safety/DoS issue, and yet it is entirely reasonable to want
+forwarded traffic to honour explicitly administrator configured
+route mtus (instead of defaulting to device mtu).
+
+Indeed, I can't think of a single reason why you wouldn't want to.
+Since you configured a route mtu you probably know better...
+
+It is pretty common to have a higher device mtu to allow receiving
+large (jumbo) frames, while having some routes via that interface
+(potentially including the default route to the internet) specify
+a lower mtu.
+
+Note that ipv6 forwarding uses device mtu unless the route is locked
+(in which case it will use the route mtu).
+
+This approach is not usable for IPv4 where an 'mtu lock' on a route
+also has the side effect of disabling TCP path mtu discovery via
+disabling the IPv4 DF (don't frag) bit on all outgoing frames.
+
+I'm not aware of a way to lock a route from an IPv6 RA, so that also
+potentially seems wrong.
+
+Signed-off-by: Maciej Żenczykowski <maze@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Cc: Lorenzo Colitti <lorenzo@google.com>
+Cc: Sunmeet Gill (Sunny) <sgill@quicinc.com>
+Cc: Vinay Paradkar <vparadka@qti.qualcomm.com>
+Cc: Tyler Wear <twear@quicinc.com>
+Cc: David Ahern <dsahern@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip.h |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -439,12 +439,18 @@ static inline unsigned int ip_dst_mtu_ma
+                                                   bool forwarding)
+ {
+       struct net *net = dev_net(dst->dev);
++      unsigned int mtu;
+       if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
+           ip_mtu_locked(dst) ||
+           !forwarding)
+               return dst_mtu(dst);
++      /* 'forwarding = true' case should always honour route mtu */
++      mtu = dst_metric_raw(dst, RTAX_MTU);
++      if (mtu)
++              return mtu;
++
+       return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
+ }
diff --git a/queue-5.8/net-j1939-j1939_session_fresh_new-fix-missing-initialization-of-skbcnt.patch b/queue-5.8/net-j1939-j1939_session_fresh_new-fix-missing-initialization-of-skbcnt.patch
new file mode 100644 (file)
index 0000000..01c2c0a
--- /dev/null
@@ -0,0 +1,33 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+Date: Thu, 8 Oct 2020 23:23:10 +0200
+Subject: net: j1939: j1939_session_fresh_new(): fix missing initialization of skbcnt
+
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+
+[ Upstream commit 13ba4c434422837d7c8c163f9c8d854e67bf3c99 ]
+
+This patch add the initialization of skbcnt, similar to:
+
+    e009f95b1543 can: j1935: j1939_tp_tx_dat_new(): fix missing initialization of skbcnt
+
+Let's play save and initialize this skbcnt as well.
+
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/can/j1939/transport.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -1488,6 +1488,7 @@ j1939_session *j1939_session_fresh_new(s
+       skb->dev = priv->ndev;
+       can_skb_reserve(skb);
+       can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
++      can_skb_prv(skb)->skbcnt = 0;
+       skcb = j1939_skb_to_cb(skb);
+       memcpy(skcb, rel_skcb, sizeof(*skcb));
diff --git a/queue-5.8/net-mptcp-make-dack4-dack8-usage-consistent-among-all-subflows.patch b/queue-5.8/net-mptcp-make-dack4-dack8-usage-consistent-among-all-subflows.patch
new file mode 100644 (file)
index 0000000..327271e
--- /dev/null
@@ -0,0 +1,72 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Davide Caratti <dcaratti@redhat.com>
+Date: Tue, 6 Oct 2020 18:26:17 +0200
+Subject: net: mptcp: make DACK4/DACK8 usage consistent among all subflows
+
+From: Davide Caratti <dcaratti@redhat.com>
+
+[ Upstream commit 37198e93ced70733f0b993dff28b7c33857e254f ]
+
+using packetdrill it's possible to observe the same MPTCP DSN being acked
+by different subflows with DACK4 and DACK8. This is in contrast with what
+specified in RFC8684 §3.3.2: if an MPTCP endpoint transmits a 64-bit wide
+DSN, it MUST be acknowledged with a 64-bit wide DACK. Fix 'use_64bit_ack'
+variable to make it a property of MPTCP sockets, not TCP subflows.
+
+Fixes: a0c1d0eafd1e ("mptcp: Use 32-bit DATA_ACK when possible")
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/options.c  |    2 +-
+ net/mptcp/protocol.h |    2 +-
+ net/mptcp/subflow.c  |    3 +--
+ 3 files changed, 3 insertions(+), 4 deletions(-)
+
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -517,7 +517,7 @@ static bool mptcp_established_options_ds
+               return ret;
+       }
+-      if (subflow->use_64bit_ack) {
++      if (READ_ONCE(msk->use_64bit_ack)) {
+               ack_size = TCPOLEN_MPTCP_DSS_ACK64;
+               opts->ext_copy.data_ack = msk->ack_seq;
+               opts->ext_copy.ack64 = 1;
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -199,6 +199,7 @@ struct mptcp_sock {
+       u32             token;
+       unsigned long   flags;
+       bool            can_ack;
++      bool            use_64bit_ack; /* Set when we received a 64-bit DSN */
+       spinlock_t      join_list_lock;
+       struct work_struct work;
+       struct list_head conn_list;
+@@ -285,7 +286,6 @@ struct mptcp_subflow_context {
+               data_avail : 1,
+               rx_eof : 1,
+               data_fin_tx_enable : 1,
+-              use_64bit_ack : 1, /* Set when we received a 64-bit DSN */
+               can_ack : 1;        /* only after processing the remote a key */
+       u64     data_fin_tx_seq;
+       u32     remote_nonce;
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -682,12 +682,11 @@ static enum mapping_status get_mapping_s
+       if (!mpext->dsn64) {
+               map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
+                                    mpext->data_seq);
+-              subflow->use_64bit_ack = 0;
+               pr_debug("expanded seq=%llu", subflow->map_seq);
+       } else {
+               map_seq = mpext->data_seq;
+-              subflow->use_64bit_ack = 1;
+       }
++      WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
+       if (subflow->map_valid) {
+               /* Allow replacing only with an identical map */
diff --git a/queue-5.8/net-sched-fix-suspicious-rcu-usage-while-accessing-tcf_tunnel_info.patch b/queue-5.8/net-sched-fix-suspicious-rcu-usage-while-accessing-tcf_tunnel_info.patch
new file mode 100644 (file)
index 0000000..bffb5aa
--- /dev/null
@@ -0,0 +1,75 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Leon Romanovsky <leonro@nvidia.com>
+Date: Wed, 14 Oct 2020 11:56:42 +0300
+Subject: net: sched: Fix suspicious RCU usage while accessing tcf_tunnel_info
+
+From: Leon Romanovsky <leonro@nvidia.com>
+
+[ Upstream commit d086a1c65aabb5a4e1edc580ca583e2964c62b44 ]
+
+The access of tcf_tunnel_info() produces the following splat, so fix it
+by dereferencing the tcf_tunnel_key_params pointer with marker that
+internal tcfa_liock is held.
+
+ =============================
+ WARNING: suspicious RCU usage
+ 5.9.0+ #1 Not tainted
+ -----------------------------
+ include/net/tc_act/tc_tunnel_key.h:59 suspicious rcu_dereference_protected() usage!
+ other info that might help us debug this:
+
+ rcu_scheduler_active = 2, debug_locks = 1
+ 1 lock held by tc/34839:
+  #0: ffff88828572c2a0 (&p->tcfa_lock){+...}-{2:2}, at: tc_setup_flow_action+0xb3/0x48b5
+ stack backtrace:
+ CPU: 1 PID: 34839 Comm: tc Not tainted 5.9.0+ #1
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014
+ Call Trace:
+  dump_stack+0x9a/0xd0
+  tc_setup_flow_action+0x14cb/0x48b5
+  fl_hw_replace_filter+0x347/0x690 [cls_flower]
+  fl_change+0x2bad/0x4875 [cls_flower]
+  tc_new_tfilter+0xf6f/0x1ba0
+  rtnetlink_rcv_msg+0x5f2/0x870
+  netlink_rcv_skb+0x124/0x350
+  netlink_unicast+0x433/0x700
+  netlink_sendmsg+0x6f1/0xbd0
+  sock_sendmsg+0xb0/0xe0
+  ____sys_sendmsg+0x4fa/0x6d0
+  ___sys_sendmsg+0x12e/0x1b0
+  __sys_sendmsg+0xa4/0x120
+  do_syscall_64+0x2d/0x40
+  entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ RIP: 0033:0x7f1f8cd4fe57
+ Code: 0c 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 89 54 24 1c 48 89 74 24 10
+ RSP: 002b:00007ffdc1e193b8 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+ RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f1f8cd4fe57
+ RDX: 0000000000000000 RSI: 00007ffdc1e19420 RDI: 0000000000000003
+ RBP: 000000005f85aafa R08: 0000000000000001 R09: 00007ffdc1e1936c
+ R10: 000000000040522d R11: 0000000000000246 R12: 0000000000000001
+ R13: 0000000000000000 R14: 00007ffdc1e1d6f0 R15: 0000000000482420
+
+Fixes: 3ebaf6da0716 ("net: sched: Do not assume RTNL is held in tunnel key action helpers")
+Fixes: 7a47281439ba ("net: sched: lock action when translating it to flow_action infra")
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tc_act/tc_tunnel_key.h |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/include/net/tc_act/tc_tunnel_key.h
++++ b/include/net/tc_act/tc_tunnel_key.h
+@@ -56,7 +56,10 @@ static inline struct ip_tunnel_info *tcf
+ {
+ #ifdef CONFIG_NET_CLS_ACT
+       struct tcf_tunnel_key *t = to_tunnel_key(a);
+-      struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
++      struct tcf_tunnel_key_params *params;
++
++      params = rcu_dereference_protected(t->params,
++                                         lockdep_is_held(&a->tcfa_lock));
+       return &params->tcft_enc_metadata->u.tun_info;
+ #else
diff --git a/queue-5.8/net-smc-fix-use-after-free-of-delayed-events.patch b/queue-5.8/net-smc-fix-use-after-free-of-delayed-events.patch
new file mode 100644 (file)
index 0000000..888dd86
--- /dev/null
@@ -0,0 +1,60 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Karsten Graul <kgraul@linux.ibm.com>
+Date: Wed, 14 Oct 2020 19:43:27 +0200
+Subject: net/smc: fix use-after-free of delayed events
+
+From: Karsten Graul <kgraul@linux.ibm.com>
+
+[ Upstream commit d535ca1367787ddc8bff22d679a11f864c8228bc ]
+
+When a delayed event is enqueued then the event worker will send this
+event the next time it is running and no other flow is currently
+active. The event handler is called for the delayed event, and the
+pointer to the event keeps set in lgr->delayed_event. This pointer is
+cleared later in the processing by smc_llc_flow_start().
+This can lead to a use-after-free condition when the processing does not
+reach smc_llc_flow_start(), but frees the event because of an error
+situation. Then the delayed_event pointer is still set but the event is
+freed.
+Fix this by always clearing the delayed event pointer when the event is
+provided to the event handler for processing, and remove the code to
+clear it in smc_llc_flow_start().
+
+Fixes: 555da9af827d ("net/smc: add event-based llc_flow framework")
+Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/smc/smc_llc.c |   13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/net/smc/smc_llc.c
++++ b/net/smc/smc_llc.c
+@@ -233,8 +233,6 @@ static bool smc_llc_flow_start(struct sm
+       default:
+               flow->type = SMC_LLC_FLOW_NONE;
+       }
+-      if (qentry == lgr->delayed_event)
+-              lgr->delayed_event = NULL;
+       smc_llc_flow_qentry_set(flow, qentry);
+       spin_unlock_bh(&lgr->llc_flow_lock);
+       return true;
+@@ -1590,13 +1588,12 @@ static void smc_llc_event_work(struct wo
+       struct smc_llc_qentry *qentry;
+       if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
+-              if (smc_link_usable(lgr->delayed_event->link)) {
+-                      smc_llc_event_handler(lgr->delayed_event);
+-              } else {
+-                      qentry = lgr->delayed_event;
+-                      lgr->delayed_event = NULL;
++              qentry = lgr->delayed_event;
++              lgr->delayed_event = NULL;
++              if (smc_link_usable(qentry->link))
++                      smc_llc_event_handler(qentry);
++              else
+                       kfree(qentry);
+-              }
+       }
+ again:
diff --git a/queue-5.8/net-smc-fix-valid-dmbe-buffer-sizes.patch b/queue-5.8/net-smc-fix-valid-dmbe-buffer-sizes.patch
new file mode 100644 (file)
index 0000000..37d32d5
--- /dev/null
@@ -0,0 +1,33 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Karsten Graul <kgraul@linux.ibm.com>
+Date: Wed, 14 Oct 2020 19:43:28 +0200
+Subject: net/smc: fix valid DMBE buffer sizes
+
+From: Karsten Graul <kgraul@linux.ibm.com>
+
+[ Upstream commit ef12ad45880b696eb993d86c481ca891836ab593 ]
+
+The SMCD_DMBE_SIZES should include all valid DMBE buffer sizes, so the
+correct value is 6 which means 1MB. With 7 the registration of an ISM
+buffer would always fail because of the invalid size requested.
+Fix that and set the value to 6.
+
+Fixes: c6ba7c9ba43d ("net/smc: add base infrastructure for SMC-D and ISM")
+Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/smc/smc_core.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1595,7 +1595,7 @@ out:
+       return rc;
+ }
+-#define SMCD_DMBE_SIZES               7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
++#define SMCD_DMBE_SIZES               6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
+ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
+                                               bool is_dmb, int bufsize)
diff --git a/queue-5.8/net-tls-sendfile-fails-with-ktls-offload.patch b/queue-5.8/net-tls-sendfile-fails-with-ktls-offload.patch
new file mode 100644 (file)
index 0000000..20e63d6
--- /dev/null
@@ -0,0 +1,67 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Rohit Maheshwari <rohitm@chelsio.com>
+Date: Thu, 8 Oct 2020 00:10:21 +0530
+Subject: net/tls: sendfile fails with ktls offload
+
+From: Rohit Maheshwari <rohitm@chelsio.com>
+
+[ Upstream commit ea1dd3e9d080c961b9a451130b61c72dc9a5397b ]
+
+At first when sendpage gets called, if there is more data, 'more' in
+tls_push_data() gets set which later sets pending_open_record_frags, but
+when there is no more data in file left, and last time tls_push_data()
+gets called, pending_open_record_frags doesn't get reset. And later when
+2 bytes of encrypted alert comes as sendmsg, it first checks for
+pending_open_record_frags, and since this is set, it creates a record with
+0 data bytes to encrypt, meaning record length is prepend_size + tag_size
+only, which causes problem.
+ We should set/reset pending_open_record_frags based on more bit.
+
+Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
+Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_device.c |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -418,14 +418,14 @@ static int tls_push_data(struct sock *sk
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct tls_prot_info *prot = &tls_ctx->prot_info;
+       struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
+-      int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
+       struct tls_record_info *record = ctx->open_record;
+       int tls_push_record_flags;
+       struct page_frag *pfrag;
+       size_t orig_size = size;
+       u32 max_open_record_len;
+-      int copy, rc = 0;
++      bool more = false;
+       bool done = false;
++      int copy, rc = 0;
+       long timeo;
+       if (flags &
+@@ -492,9 +492,8 @@ handle_error:
+               if (!size) {
+ last_record:
+                       tls_push_record_flags = flags;
+-                      if (more) {
+-                              tls_ctx->pending_open_record_frags =
+-                                              !!record->num_frags;
++                      if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
++                              more = true;
+                               break;
+                       }
+@@ -526,6 +525,8 @@ last_record:
+               }
+       } while (!done);
++      tls_ctx->pending_open_record_frags = more;
++
+       if (orig_size - size > 0)
+               rc = orig_size - size;
diff --git a/queue-5.8/net-usb-qmi_wwan-add-cellient-mpl200-card.patch b/queue-5.8/net-usb-qmi_wwan-add-cellient-mpl200-card.patch
new file mode 100644 (file)
index 0000000..f49a1c3
--- /dev/null
@@ -0,0 +1,29 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Wilken Gottwalt <wilken.gottwalt@mailbox.org>
+Date: Thu, 8 Oct 2020 09:21:38 +0200
+Subject: net: usb: qmi_wwan: add Cellient MPL200 card
+
+From: Wilken Gottwalt <wilken.gottwalt@mailbox.org>
+
+[ Upstream commit 28802e7c0c9954218d1830f7507edc9d49b03a00 ]
+
+Add usb ids of the Cellient MPL200 card.
+
+Signed-off-by: Wilken Gottwalt <wilken.gottwalt@mailbox.org>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1375,6 +1375,7 @@ static const struct usb_device_id produc
+       {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
+       {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)},    /* Foxconn T77W968 LTE */
+       {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)},    /* Foxconn T77W968 LTE with eSIM support*/
++      {QMI_FIXED_INTF(0x2692, 0x9025, 4)},    /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
+       /* 4. Gobi 1000 devices */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
diff --git a/queue-5.8/net_sched-remove-a-redundant-goto-chain-check.patch b/queue-5.8/net_sched-remove-a-redundant-goto-chain-check.patch
new file mode 100644 (file)
index 0000000..0ef29f2
--- /dev/null
@@ -0,0 +1,55 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Mon, 28 Sep 2020 11:31:03 -0700
+Subject: net_sched: remove a redundant goto chain check
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 1aad8049909a6d3379175ef2824a68ac35c0b564 ]
+
+All TC actions call tcf_action_check_ctrlact() to validate
+goto chain, so this check in tcf_action_init_1() is actually
+redundant. Remove it to save troubles of leaking memory.
+
+Fixes: e49d8c22f126 ("net_sched: defer tcf_idr_insert() in tcf_action_init_1()")
+Reported-by: Vlad Buslov <vladbu@mellanox.com>
+Suggested-by: Davide Caratti <dcaratti@redhat.com>
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Reviewed-by: Davide Caratti <dcaratti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/act_api.c |   14 --------------
+ 1 file changed, 14 deletions(-)
+
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -722,13 +722,6 @@ int tcf_action_destroy(struct tc_action
+       return ret;
+ }
+-static int tcf_action_destroy_1(struct tc_action *a, int bind)
+-{
+-      struct tc_action *actions[] = { a, NULL };
+-
+-      return tcf_action_destroy(actions, bind);
+-}
+-
+ static int tcf_action_put(struct tc_action *p)
+ {
+       return __tcf_action_put(p, false);
+@@ -1000,13 +993,6 @@ struct tc_action *tcf_action_init_1(stru
+       if (err < 0)
+               goto err_mod;
+-      if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
+-          !rcu_access_pointer(a->goto_chain)) {
+-              tcf_action_destroy_1(a, bind);
+-              NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
+-              return ERR_PTR(-EINVAL);
+-      }
+-
+       if (!name && tb[TCA_ACT_COOKIE])
+               tcf_set_action_cookie(&a->act_cookie, cookie);
diff --git a/queue-5.8/r8169-fix-data-corruption-issue-on-rtl8402.patch b/queue-5.8/r8169-fix-data-corruption-issue-on-rtl8402.patch
new file mode 100644 (file)
index 0000000..d35eb14
--- /dev/null
@@ -0,0 +1,36 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Thu, 1 Oct 2020 09:23:02 +0200
+Subject: r8169: fix data corruption issue on RTL8402
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit ef9da46ddef071e1bbb943afbbe9b38771855554 ]
+
+Petr reported that after resume from suspend RTL8402 partially
+truncates incoming packets, and re-initializing register RxConfig
+before the actual chip re-initialization sequence is needed to avoid
+the issue.
+
+Reported-by: Petr Tesarik <ptesarik@suse.cz>
+Proposed-by: Petr Tesarik <ptesarik@suse.cz>
+Tested-by: Petr Tesarik <ptesarik@suse.cz>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4871,6 +4871,10 @@ static int __maybe_unused rtl8169_resume
+       if (netif_running(tp->dev))
+               __rtl8169_resume(tp);
++      /* Reportedly at least Asus X453MA truncates packets otherwise */
++      if (tp->mac_version == RTL_GIGA_MAC_VER_37)
++              rtl_init_rxcfg(tp);
++
+       return 0;
+ }
diff --git a/queue-5.8/series b/queue-5.8/series
new file mode 100644 (file)
index 0000000..11c39c1
--- /dev/null
@@ -0,0 +1,25 @@
+cxgb4-handle-4-tuple-pedit-to-nat-mode-translation.patch
+ibmveth-switch-order-of-ibmveth_helper-calls.patch
+ibmveth-identify-ingress-large-send-packets.patch
+ipv4-restore-flowi4_oif-update-before-call-to-xfrm_lookup_route.patch
+mlx4-handle-non-napi-callers-to-napi_poll.patch
+net-dsa-microchip-fix-race-condition.patch
+net-fec-fix-phy_device-lookup-for-phy_reset_after_clk_enable.patch
+net-fec-fix-phy-init-after-phy_reset_after_clk_enable.patch
+net-fix-pos-incrementment-in-ipv6_route_seq_next.patch
+net-ipa-skip-suspend-resume-activities-if-not-set-up.patch
+net-mptcp-make-dack4-dack8-usage-consistent-among-all-subflows.patch
+net-sched-fix-suspicious-rcu-usage-while-accessing-tcf_tunnel_info.patch
+net-smc-fix-use-after-free-of-delayed-events.patch
+net-smc-fix-valid-dmbe-buffer-sizes.patch
+net-tls-sendfile-fails-with-ktls-offload.patch
+net-usb-qmi_wwan-add-cellient-mpl200-card.patch
+tipc-fix-the-skb_unshare-in-tipc_buf_append.patch
+socket-fix-option-so_timestamping_new.patch
+socket-don-t-clear-sock_tstamp_new-when-so_timestampns-is-disabled.patch
+can-m_can_platform-don-t-call-m_can_class_suspend-in-runtime-suspend.patch
+can-j1935-j1939_tp_tx_dat_new-fix-missing-initialization-of-skbcnt.patch
+net-j1939-j1939_session_fresh_new-fix-missing-initialization-of-skbcnt.patch
+net-ipv4-always-honour-route-mtu-during-forwarding.patch
+net_sched-remove-a-redundant-goto-chain-check.patch
+r8169-fix-data-corruption-issue-on-rtl8402.patch
diff --git a/queue-5.8/socket-don-t-clear-sock_tstamp_new-when-so_timestampns-is-disabled.patch b/queue-5.8/socket-don-t-clear-sock_tstamp_new-when-so_timestampns-is-disabled.patch
new file mode 100644 (file)
index 0000000..7c39eec
--- /dev/null
@@ -0,0 +1,44 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Christian Eggers <ceggers@arri.de>
+Date: Mon, 12 Oct 2020 11:35:42 +0200
+Subject: socket: don't clear SOCK_TSTAMP_NEW when SO_TIMESTAMPNS is disabled
+
+From: Christian Eggers <ceggers@arri.de>
+
+[ Upstream commit 4e3bbb33e6f36e4b05be1b1b9b02e3dd5aaa3e69 ]
+
+SOCK_TSTAMP_NEW (timespec64 instead of timespec) is also used for
+hardware time stamps (configured via SO_TIMESTAMPING_NEW).
+
+User space (ptp4l) first configures hardware time stamping via
+SO_TIMESTAMPING_NEW which sets SOCK_TSTAMP_NEW. In the next step, ptp4l
+disables SO_TIMESTAMPNS(_NEW) (software time stamps), but this must not
+switch hardware time stamps back to "32 bit mode".
+
+This problem happens on 32 bit platforms were the libc has already
+switched to struct timespec64 (from SO_TIMExxx_OLD to SO_TIMExxx_NEW
+socket options). ptp4l complains with "missing timestamp on transmitted
+peer delay request" because the wrong format is received (and
+discarded).
+
+Fixes: 887feae36aee ("socket: Add SO_TIMESTAMP[NS]_NEW")
+Fixes: 783da70e8396 ("net: add sock_enable_timestamps")
+Signed-off-by: Christian Eggers <ceggers@arri.de>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Acked-by: Deepa Dinamani <deepa.kernel@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/sock.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -777,7 +777,6 @@ static void __sock_set_timestamps(struct
+       } else {
+               sock_reset_flag(sk, SOCK_RCVTSTAMP);
+               sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
+-              sock_reset_flag(sk, SOCK_TSTAMP_NEW);
+       }
+ }
diff --git a/queue-5.8/socket-fix-option-so_timestamping_new.patch b/queue-5.8/socket-fix-option-so_timestamping_new.patch
new file mode 100644 (file)
index 0000000..685bb98
--- /dev/null
@@ -0,0 +1,63 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Christian Eggers <ceggers@arri.de>
+Date: Mon, 12 Oct 2020 11:35:41 +0200
+Subject: socket: fix option SO_TIMESTAMPING_NEW
+
+From: Christian Eggers <ceggers@arri.de>
+
+[ Upstream commit 59e611a566e7cd48cf54b6777a11fe3f9c2f9db5 ]
+
+The comparison of optname with SO_TIMESTAMPING_NEW is wrong way around,
+so SOCK_TSTAMP_NEW will first be set and than reset again. Additionally
+move it out of the test for SOF_TIMESTAMPING_RX_SOFTWARE as this seems
+unrelated.
+
+This problem happens on 32 bit platforms were the libc has already
+switched to struct timespec64 (from SO_TIMExxx_OLD to SO_TIMExxx_NEW
+socket options). ptp4l complains with "missing timestamp on transmitted
+peer delay request" because the wrong format is received (and
+discarded).
+
+Fixes: 9718475e6908 ("socket: Add SO_TIMESTAMPING_NEW")
+Signed-off-by: Christian Eggers <ceggers@arri.de>
+Reviewed-by: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Reviewed-by: Deepa Dinamani <deepa.kernel@gmail.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Acked-by: Deepa Dinamani <deepa.kernel@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/sock.c |   10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1007,8 +1007,6 @@ set_sndbuf:
+               __sock_set_timestamps(sk, valbool, true, true);
+               break;
+       case SO_TIMESTAMPING_NEW:
+-              sock_set_flag(sk, SOCK_TSTAMP_NEW);
+-              /* fall through */
+       case SO_TIMESTAMPING_OLD:
+               if (val & ~SOF_TIMESTAMPING_MASK) {
+                       ret = -EINVAL;
+@@ -1037,16 +1035,14 @@ set_sndbuf:
+               }
+               sk->sk_tsflags = val;
++              sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
++
+               if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
+                       sock_enable_timestamp(sk,
+                                             SOCK_TIMESTAMPING_RX_SOFTWARE);
+-              else {
+-                      if (optname == SO_TIMESTAMPING_NEW)
+-                              sock_reset_flag(sk, SOCK_TSTAMP_NEW);
+-
++              else
+                       sock_disable_timestamp(sk,
+                                              (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
+-              }
+               break;
+       case SO_RCVLOWAT:
diff --git a/queue-5.8/tipc-fix-the-skb_unshare-in-tipc_buf_append.patch b/queue-5.8/tipc-fix-the-skb_unshare-in-tipc_buf_append.patch
new file mode 100644 (file)
index 0000000..76fe71e
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Sat Oct 17 08:58:17 AM CEST 2020
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Wed, 7 Oct 2020 21:12:50 -0700
+Subject: tipc: fix the skb_unshare() in tipc_buf_append()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit ed42989eab57d619667d7e87dfbd8fe207db54fe ]
+
+skb_unshare() drops a reference count on the old skb unconditionally,
+so in the failure case, we end up freeing the skb twice here.
+And because the skb is allocated in fclone and cloned by caller
+tipc_msg_reassemble(), the consequence is actually freeing the
+original skb too, thus triggered the UAF by syzbot.
+
+Fix this by replacing this skb_unshare() with skb_cloned()+skb_copy().
+
+Fixes: ff48b6222e65 ("tipc: use skb_unshare() instead in tipc_buf_append()")
+Reported-and-tested-by: syzbot+e96a7ba46281824cc46a@syzkaller.appspotmail.com
+Cc: Jon Maloy <jmaloy@redhat.com>
+Cc: Ying Xue <ying.xue@windriver.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Reviewed-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/msg.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -150,7 +150,8 @@ int tipc_buf_append(struct sk_buff **hea
+       if (fragid == FIRST_FRAGMENT) {
+               if (unlikely(head))
+                       goto err;
+-              frag = skb_unshare(frag, GFP_ATOMIC);
++              if (skb_cloned(frag))
++                      frag = skb_copy(frag, GFP_ATOMIC);
+               if (unlikely(!frag))
+                       goto err;
+               head = *headbuf = frag;