]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.5-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 28 Mar 2020 08:07:43 +0000 (09:07 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 28 Mar 2020 08:07:43 +0000 (09:07 +0100)
added patches:
bnxt_en-fix-memory-leaks-in-bnxt_dcbnl_ieee_getets.patch
bnxt_en-fix-priority-bytes-and-packets-counters-in-ethtool-s.patch
bnxt_en-free-context-memory-after-disabling-pci-in-probe-error-path.patch
bnxt_en-reset-rings-if-ring-reservation-fails-during-open.patch
bnxt_en-return-error-if-bnxt_alloc_ctx_mem-fails.patch
cxgb4-fix-throughput-drop-during-tx-backpressure.patch
cxgb4-fix-txq-restart-check-during-backpressure.patch
geneve-move-debug-check-after-netdev-unregister.patch
hsr-add-restart-routine-into-hsr_get_node_list.patch
hsr-fix-general-protection-fault-in-hsr_addr_is_self.patch
hsr-set-.netnsok-flag.patch
hsr-use-rcu_read_lock-in-hsr_get_node_-list-status.patch
ipv4-fix-a-rcu-list-lock-in-inet_dump_fib.patch
macsec-restrict-to-ethernet-devices.patch
mlxsw-pci-only-issue-reset-when-system-is-ready.patch
mlxsw-spectrum_mr-fix-list-iteration-in-error-path.patch
net-bcmgenet-keep-mac-in-reset-until-phy-is-up.patch
net-bpfilter-fix-dprintf-usage-for-dev-kmsg.patch
net-cbs-fix-software-cbs-to-consider-packet-sending-time.patch
net-dsa-fix-duplicate-frames-flooded-by-learning.patch
net-dsa-mt7530-change-the-link-bit-to-reflect-the-link-status.patch
net-dsa-tag_8021q-replace-dsa_8021q_remove_header-with-__skb_vlan_pop.patch
net-ena-add-pci-shutdown-handler-to-allow-safe-kexec.patch
net-ena-avoid-memory-access-violation-by-validating-req_id-properly.patch
net-ena-fix-continuous-keep-alive-resets.patch
net-ena-fix-incorrect-setting-of-the-number-of-msix-vectors.patch
net-ena-fix-request-of-incorrect-number-of-irq-vectors.patch
net-ip_gre-accept-ifla_info_data-less-configuration.patch
net-ip_gre-separate-erspan-newlink-changelink-callbacks.patch
net-mlx5-dr-fix-postsend-actions-write-length.patch
net-mlx5_core-set-ib-capability-mask1-to-fix-ib_srpt-connection-failure.patch
net-mlx5e-do-not-recover-from-a-non-fatal-syndrome.patch
net-mlx5e-enhance-icosq-wqe-info-fields.patch
net-mlx5e-fix-endianness-handling-in-pedit-mask.patch
net-mlx5e-fix-icosq-recovery-flow-with-striding-rq.patch
net-mlx5e-fix-missing-reset-of-sw-metadata-in-striding-rq-reset.patch
net-mlx5e-ktls-fix-tcp-seq-off-by-1-issue-in-tx-resync-flow.patch
net-mvneta-fix-the-case-where-the-last-poll-did-not-process-all-rx.patch
net-packet-tpacket_rcv-avoid-a-producer-race-condition.patch
net-phy-dp83867-w-a-for-fld-detect-threshold-bootstrapping-issue.patch
net-phy-mdio-bcm-unimac-fix-clock-handling.patch
net-phy-mdio-mux-bcm-iproc-check-clk_prepare_enable-return-value.patch
net-qmi_wwan-add-support-for-askey-wwhc050.patch
net-sched-act_ct-fix-leak-of-ct-zone-template-on-replace.patch
net-stmmac-dwmac-rk-fix-error-path-in-rk_gmac_probe.patch
net_sched-cls_route-remove-the-right-filter-from-hashtable.patch
net_sched-hold-rtnl-lock-in-tcindex_partial_destroy_work.patch
net_sched-keep-alloc_hash-updated-after-hash-allocation.patch
nfc-fdp-fix-a-signedness-bug-in-fdp_nci_send_patch.patch
r8169-re-enable-msi-on-rtl8168c.patch
revert-net-bcmgenet-use-rgmii-loopback-for-mac-reset.patch
slcan-not-call-free_netdev-before-rtnl_unlock-in-slcan_open.patch
tcp-also-null-skb-dev-when-copy-was-needed.patch
tcp-ensure-skb-dev-is-null-before-leaving-tcp-stack.patch
tcp-repair-fix-tcp_queue_seq-implementation.patch
vxlan-check-return-value-of-gro_cells_init.patch

57 files changed:
queue-5.5/bnxt_en-fix-memory-leaks-in-bnxt_dcbnl_ieee_getets.patch [new file with mode: 0644]
queue-5.5/bnxt_en-fix-priority-bytes-and-packets-counters-in-ethtool-s.patch [new file with mode: 0644]
queue-5.5/bnxt_en-free-context-memory-after-disabling-pci-in-probe-error-path.patch [new file with mode: 0644]
queue-5.5/bnxt_en-reset-rings-if-ring-reservation-fails-during-open.patch [new file with mode: 0644]
queue-5.5/bnxt_en-return-error-if-bnxt_alloc_ctx_mem-fails.patch [new file with mode: 0644]
queue-5.5/cxgb4-fix-throughput-drop-during-tx-backpressure.patch [new file with mode: 0644]
queue-5.5/cxgb4-fix-txq-restart-check-during-backpressure.patch [new file with mode: 0644]
queue-5.5/geneve-move-debug-check-after-netdev-unregister.patch [new file with mode: 0644]
queue-5.5/hsr-add-restart-routine-into-hsr_get_node_list.patch [new file with mode: 0644]
queue-5.5/hsr-fix-general-protection-fault-in-hsr_addr_is_self.patch [new file with mode: 0644]
queue-5.5/hsr-set-.netnsok-flag.patch [new file with mode: 0644]
queue-5.5/hsr-use-rcu_read_lock-in-hsr_get_node_-list-status.patch [new file with mode: 0644]
queue-5.5/ipv4-fix-a-rcu-list-lock-in-inet_dump_fib.patch [new file with mode: 0644]
queue-5.5/macsec-restrict-to-ethernet-devices.patch [new file with mode: 0644]
queue-5.5/mlxsw-pci-only-issue-reset-when-system-is-ready.patch [new file with mode: 0644]
queue-5.5/mlxsw-spectrum_mr-fix-list-iteration-in-error-path.patch [new file with mode: 0644]
queue-5.5/net-bcmgenet-keep-mac-in-reset-until-phy-is-up.patch [new file with mode: 0644]
queue-5.5/net-bpfilter-fix-dprintf-usage-for-dev-kmsg.patch [new file with mode: 0644]
queue-5.5/net-cbs-fix-software-cbs-to-consider-packet-sending-time.patch [new file with mode: 0644]
queue-5.5/net-dsa-fix-duplicate-frames-flooded-by-learning.patch [new file with mode: 0644]
queue-5.5/net-dsa-mt7530-change-the-link-bit-to-reflect-the-link-status.patch [new file with mode: 0644]
queue-5.5/net-dsa-tag_8021q-replace-dsa_8021q_remove_header-with-__skb_vlan_pop.patch [new file with mode: 0644]
queue-5.5/net-ena-add-pci-shutdown-handler-to-allow-safe-kexec.patch [new file with mode: 0644]
queue-5.5/net-ena-avoid-memory-access-violation-by-validating-req_id-properly.patch [new file with mode: 0644]
queue-5.5/net-ena-fix-continuous-keep-alive-resets.patch [new file with mode: 0644]
queue-5.5/net-ena-fix-incorrect-setting-of-the-number-of-msix-vectors.patch [new file with mode: 0644]
queue-5.5/net-ena-fix-request-of-incorrect-number-of-irq-vectors.patch [new file with mode: 0644]
queue-5.5/net-ip_gre-accept-ifla_info_data-less-configuration.patch [new file with mode: 0644]
queue-5.5/net-ip_gre-separate-erspan-newlink-changelink-callbacks.patch [new file with mode: 0644]
queue-5.5/net-mlx5-dr-fix-postsend-actions-write-length.patch [new file with mode: 0644]
queue-5.5/net-mlx5_core-set-ib-capability-mask1-to-fix-ib_srpt-connection-failure.patch [new file with mode: 0644]
queue-5.5/net-mlx5e-do-not-recover-from-a-non-fatal-syndrome.patch [new file with mode: 0644]
queue-5.5/net-mlx5e-enhance-icosq-wqe-info-fields.patch [new file with mode: 0644]
queue-5.5/net-mlx5e-fix-endianness-handling-in-pedit-mask.patch [new file with mode: 0644]
queue-5.5/net-mlx5e-fix-icosq-recovery-flow-with-striding-rq.patch [new file with mode: 0644]
queue-5.5/net-mlx5e-fix-missing-reset-of-sw-metadata-in-striding-rq-reset.patch [new file with mode: 0644]
queue-5.5/net-mlx5e-ktls-fix-tcp-seq-off-by-1-issue-in-tx-resync-flow.patch [new file with mode: 0644]
queue-5.5/net-mvneta-fix-the-case-where-the-last-poll-did-not-process-all-rx.patch [new file with mode: 0644]
queue-5.5/net-packet-tpacket_rcv-avoid-a-producer-race-condition.patch [new file with mode: 0644]
queue-5.5/net-phy-dp83867-w-a-for-fld-detect-threshold-bootstrapping-issue.patch [new file with mode: 0644]
queue-5.5/net-phy-mdio-bcm-unimac-fix-clock-handling.patch [new file with mode: 0644]
queue-5.5/net-phy-mdio-mux-bcm-iproc-check-clk_prepare_enable-return-value.patch [new file with mode: 0644]
queue-5.5/net-qmi_wwan-add-support-for-askey-wwhc050.patch [new file with mode: 0644]
queue-5.5/net-sched-act_ct-fix-leak-of-ct-zone-template-on-replace.patch [new file with mode: 0644]
queue-5.5/net-stmmac-dwmac-rk-fix-error-path-in-rk_gmac_probe.patch [new file with mode: 0644]
queue-5.5/net_sched-cls_route-remove-the-right-filter-from-hashtable.patch [new file with mode: 0644]
queue-5.5/net_sched-hold-rtnl-lock-in-tcindex_partial_destroy_work.patch [new file with mode: 0644]
queue-5.5/net_sched-keep-alloc_hash-updated-after-hash-allocation.patch [new file with mode: 0644]
queue-5.5/nfc-fdp-fix-a-signedness-bug-in-fdp_nci_send_patch.patch [new file with mode: 0644]
queue-5.5/r8169-re-enable-msi-on-rtl8168c.patch [new file with mode: 0644]
queue-5.5/revert-net-bcmgenet-use-rgmii-loopback-for-mac-reset.patch [new file with mode: 0644]
queue-5.5/series
queue-5.5/slcan-not-call-free_netdev-before-rtnl_unlock-in-slcan_open.patch [new file with mode: 0644]
queue-5.5/tcp-also-null-skb-dev-when-copy-was-needed.patch [new file with mode: 0644]
queue-5.5/tcp-ensure-skb-dev-is-null-before-leaving-tcp-stack.patch [new file with mode: 0644]
queue-5.5/tcp-repair-fix-tcp_queue_seq-implementation.patch [new file with mode: 0644]
queue-5.5/vxlan-check-return-value-of-gro_cells_init.patch [new file with mode: 0644]

diff --git a/queue-5.5/bnxt_en-fix-memory-leaks-in-bnxt_dcbnl_ieee_getets.patch b/queue-5.5/bnxt_en-fix-memory-leaks-in-bnxt_dcbnl_ieee_getets.patch
new file mode 100644 (file)
index 0000000..8a83b97
--- /dev/null
@@ -0,0 +1,71 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Edwin Peer <edwin.peer@broadcom.com>
+Date: Sun, 22 Mar 2020 16:40:02 -0400
+Subject: bnxt_en: fix memory leaks in bnxt_dcbnl_ieee_getets()
+
+From: Edwin Peer <edwin.peer@broadcom.com>
+
+[ Upstream commit 62d4073e86e62e316bea2c53e77db10418fd5dd7 ]
+
+The allocated ieee_ets structure goes out of scope without being freed,
+leaking memory. Appropriate result codes should be returned so that
+callers do not rely on invalid data passed by reference.
+
+Also cache the ETS config retrieved from the device so that it doesn't
+need to be freed. The balance of the code was clearly written with the
+intent of having the results of querying the hardware cached in the
+device structure. The commensurate store was evidently missed though.
+
+Fixes: 7df4ae9fe855 ("bnxt_en: Implement DCBNL to support host-based DCBX.")
+Signed-off-by: Edwin Peer <edwin.peer@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c |   15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+@@ -479,24 +479,26 @@ static int bnxt_dcbnl_ieee_getets(struct
+ {
+       struct bnxt *bp = netdev_priv(dev);
+       struct ieee_ets *my_ets = bp->ieee_ets;
++      int rc;
+       ets->ets_cap = bp->max_tc;
+       if (!my_ets) {
+-              int rc;
+-
+               if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
+                       return 0;
+               my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
+               if (!my_ets)
+-                      return 0;
++                      return -ENOMEM;
+               rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
+               if (rc)
+-                      return 0;
++                      goto error;
+               rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
+               if (rc)
+-                      return 0;
++                      goto error;
++
++              /* cache result */
++              bp->ieee_ets = my_ets;
+       }
+       ets->cbs = my_ets->cbs;
+@@ -505,6 +507,9 @@ static int bnxt_dcbnl_ieee_getets(struct
+       memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+       memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+       return 0;
++error:
++      kfree(my_ets);
++      return rc;
+ }
+ static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
diff --git a/queue-5.5/bnxt_en-fix-priority-bytes-and-packets-counters-in-ethtool-s.patch b/queue-5.5/bnxt_en-fix-priority-bytes-and-packets-counters-in-ethtool-s.patch
new file mode 100644 (file)
index 0000000..6d656bc
--- /dev/null
@@ -0,0 +1,94 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sun, 22 Mar 2020 16:40:01 -0400
+Subject: bnxt_en: Fix Priority Bytes and Packets counters in ethtool -S.
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+[ Upstream commit a24ec3220f369aa0b94c863b6b310685a727151c ]
+
+There is an indexing bug in determining these ethtool priority
+counters.  Instead of using the queue ID to index, we need to
+normalize by modulo 10 to get the index.  This index is then used
+to obtain the proper CoS queue counter.  Rename bp->pri2cos to
+bp->pri2cos_idx to make this more clear.
+
+Fixes: e37fed790335 ("bnxt_en: Add ethtool -S priority counters.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c         |   10 +++++++++-
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h         |    2 +-
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |    8 ++++----
+ 3 files changed, 14 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7406,14 +7406,22 @@ static int bnxt_hwrm_port_qstats_ext(str
+               pri2cos = &resp2->pri0_cos_queue_id;
+               for (i = 0; i < 8; i++) {
+                       u8 queue_id = pri2cos[i];
++                      u8 queue_idx;
++                      /* Per port queue IDs start from 0, 10, 20, etc */
++                      queue_idx = queue_id % 10;
++                      if (queue_idx > BNXT_MAX_QUEUE) {
++                              bp->pri2cos_valid = false;
++                              goto qstats_done;
++                      }
+                       for (j = 0; j < bp->max_q; j++) {
+                               if (bp->q_ids[j] == queue_id)
+-                                      bp->pri2cos[i] = j;
++                                      bp->pri2cos_idx[i] = queue_idx;
+                       }
+               }
+               bp->pri2cos_valid = 1;
+       }
++qstats_done:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+ }
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1714,7 +1714,7 @@ struct bnxt {
+       u16                     fw_rx_stats_ext_size;
+       u16                     fw_tx_stats_ext_size;
+       u16                     hw_ring_stats_size;
+-      u8                      pri2cos[8];
++      u8                      pri2cos_idx[8];
+       u8                      pri2cos_valid;
+       u16                     hwrm_max_req_len;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -589,25 +589,25 @@ skip_ring_stats:
+               if (bp->pri2cos_valid) {
+                       for (i = 0; i < 8; i++, j++) {
+                               long n = bnxt_rx_bytes_pri_arr[i].base_off +
+-                                       bp->pri2cos[i];
++                                       bp->pri2cos_idx[i];
+                               buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+                       }
+                       for (i = 0; i < 8; i++, j++) {
+                               long n = bnxt_rx_pkts_pri_arr[i].base_off +
+-                                       bp->pri2cos[i];
++                                       bp->pri2cos_idx[i];
+                               buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+                       }
+                       for (i = 0; i < 8; i++, j++) {
+                               long n = bnxt_tx_bytes_pri_arr[i].base_off +
+-                                       bp->pri2cos[i];
++                                       bp->pri2cos_idx[i];
+                               buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+                       }
+                       for (i = 0; i < 8; i++, j++) {
+                               long n = bnxt_tx_pkts_pri_arr[i].base_off +
+-                                       bp->pri2cos[i];
++                                       bp->pri2cos_idx[i];
+                               buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+                       }
diff --git a/queue-5.5/bnxt_en-free-context-memory-after-disabling-pci-in-probe-error-path.patch b/queue-5.5/bnxt_en-free-context-memory-after-disabling-pci-in-probe-error-path.patch
new file mode 100644 (file)
index 0000000..e2e0ef3
--- /dev/null
@@ -0,0 +1,39 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sun, 22 Mar 2020 16:40:04 -0400
+Subject: bnxt_en: Free context memory after disabling PCI in probe error path.
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+[ Upstream commit 62bfb932a51f6d08eb409248e69f8d6428c2cabd ]
+
+Other shutdown code paths will always disable PCI first to shutdown DMA
+before freeing context memory.  Do the same sequence in the error path
+of probe to be safe and consistent.
+
+Fixes: c20dc142dd7b ("bnxt_en: Disable bus master during PCI shutdown and driver unload.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -11959,12 +11959,12 @@ init_err_pci_clean:
+       bnxt_hwrm_func_drv_unrgtr(bp);
+       bnxt_free_hwrm_short_cmd_req(bp);
+       bnxt_free_hwrm_resources(bp);
+-      bnxt_free_ctx_mem(bp);
+-      kfree(bp->ctx);
+-      bp->ctx = NULL;
+       kfree(bp->fw_health);
+       bp->fw_health = NULL;
+       bnxt_cleanup_pci(bp);
++      bnxt_free_ctx_mem(bp);
++      kfree(bp->ctx);
++      bp->ctx = NULL;
+ init_err_free:
+       free_netdev(dev);
diff --git a/queue-5.5/bnxt_en-reset-rings-if-ring-reservation-fails-during-open.patch b/queue-5.5/bnxt_en-reset-rings-if-ring-reservation-fails-during-open.patch
new file mode 100644 (file)
index 0000000..f57e1fc
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date: Sun, 22 Mar 2020 16:40:05 -0400
+Subject: bnxt_en: Reset rings if ring reservation fails during open()
+
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+
+[ Upstream commit 5d765a5e4bd7c368e564e11402bba74cf7f03ac1 ]
+
+If ring counts are not reset when ring reservation fails,
+bnxt_init_dflt_ring_mode() will not be called again to reinitialise
+IRQs when open() is called and results in system crash as napi will
+also be not initialised. This patch fixes it by resetting the ring
+counts.
+
+Fixes: 47558acd56a7 ("bnxt_en: Reserve rings at driver open if none was reserved at probe time.")
+Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -11666,6 +11666,10 @@ static int bnxt_set_dflt_rings(struct bn
+               bp->rx_nr_rings++;
+               bp->cp_nr_rings++;
+       }
++      if (rc) {
++              bp->tx_nr_rings = 0;
++              bp->rx_nr_rings = 0;
++      }
+       return rc;
+ }
diff --git a/queue-5.5/bnxt_en-return-error-if-bnxt_alloc_ctx_mem-fails.patch b/queue-5.5/bnxt_en-return-error-if-bnxt_alloc_ctx_mem-fails.patch
new file mode 100644 (file)
index 0000000..c9c889c
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sun, 22 Mar 2020 16:40:03 -0400
+Subject: bnxt_en: Return error if bnxt_alloc_ctx_mem() fails.
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+[ Upstream commit 0b5b561cea32d5bb1e0a82d65b755a3cb5212141 ]
+
+The current code ignores the return value from
+bnxt_hwrm_func_backing_store_cfg(), causing the driver to proceed in
+the init path even when this vital firmware call has failed.  Fix it
+by propagating the error code to the caller.
+
+Fixes: 1b9394e5a2ad ("bnxt_en: Configure context memory on new devices.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6880,12 +6880,12 @@ skip_rdma:
+       }
+       ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+       rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+-      if (rc)
++      if (rc) {
+               netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
+                          rc);
+-      else
+-              ctx->flags |= BNXT_CTX_FLAG_INITED;
+-
++              return rc;
++      }
++      ctx->flags |= BNXT_CTX_FLAG_INITED;
+       return 0;
+ }
diff --git a/queue-5.5/cxgb4-fix-throughput-drop-during-tx-backpressure.patch b/queue-5.5/cxgb4-fix-throughput-drop-during-tx-backpressure.patch
new file mode 100644 (file)
index 0000000..b7679eb
--- /dev/null
@@ -0,0 +1,100 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
+Date: Thu, 19 Mar 2020 23:08:09 +0530
+Subject: cxgb4: fix throughput drop during Tx backpressure
+
+From: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
+
+[ Upstream commit 7affd80802afb6ca92dba47d768632fbde365241 ]
+
+commit 7c3bebc3d868 ("cxgb4: request the TX CIDX updates to status page")
+reverted back to getting Tx CIDX updates via DMA, instead of interrupts,
+introduced by commit d429005fdf2c ("cxgb4/cxgb4vf: Add support for SGE
+doorbell queue timer")
+
+However, it missed reverting back several code changes where Tx CIDX
+updates are not explicitly requested during backpressure when using
+interrupt mode. These missed changes cause slow recovery during
+backpressure because the corresponding interrupt no longer comes and
+hence results in Tx throughput drop.
+
+So, revert back these missed code changes, as well, which will allow
+explicitly requesting Tx CIDX updates when backpressure happens.
+This enables the corresponding interrupt with Tx CIDX update message
+to get generated and hence speed up recovery and restore back
+throughput.
+
+Fixes: 7c3bebc3d868 ("cxgb4: request the TX CIDX updates to status page")
+Fixes: d429005fdf2c ("cxgb4/cxgb4vf: Add support for SGE doorbell queue timer")
+Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/chelsio/cxgb4/sge.c |   42 +------------------------------
+ 1 file changed, 2 insertions(+), 40 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -1486,16 +1486,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct
+                * has opened up.
+                */
+               eth_txq_stop(q);
+-
+-              /* If we're using the SGE Doorbell Queue Timer facility, we
+-               * don't need to ask the Firmware to send us Egress Queue CIDX
+-               * Updates: the Hardware will do this automatically.  And
+-               * since we send the Ingress Queue CIDX Updates to the
+-               * corresponding Ethernet Response Queue, we'll get them very
+-               * quickly.
+-               */
+-              if (!q->dbqt)
+-                      wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
++              wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+       }
+       wr = (void *)&q->q.desc[q->q.pidx];
+@@ -1805,16 +1796,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(str
+                * has opened up.
+                */
+               eth_txq_stop(txq);
+-
+-              /* If we're using the SGE Doorbell Queue Timer facility, we
+-               * don't need to ask the Firmware to send us Egress Queue CIDX
+-               * Updates: the Hardware will do this automatically.  And
+-               * since we send the Ingress Queue CIDX Updates to the
+-               * corresponding Ethernet Response Queue, we'll get them very
+-               * quickly.
+-               */
+-              if (!txq->dbqt)
+-                      wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
++              wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+       }
+       /* Start filling in our Work Request.  Note that we do _not_ handle
+@@ -3370,26 +3352,6 @@ static void t4_tx_completion_handler(str
+       }
+       txq = &s->ethtxq[pi->first_qset + rspq->idx];
+-
+-      /* We've got the Hardware Consumer Index Update in the Egress Update
+-       * message.  If we're using the SGE Doorbell Queue Timer mechanism,
+-       * these Egress Update messages will be our sole CIDX Updates we get
+-       * since we don't want to chew up PCIe bandwidth for both Ingress
+-       * Messages and Status Page writes.  However, The code which manages
+-       * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
+-       * stored in the Status Page at the end of the TX Queue.  It's easiest
+-       * to simply copy the CIDX Update value from the Egress Update message
+-       * to the Status Page.  Also note that no Endian issues need to be
+-       * considered here since both are Big Endian and we're just copying
+-       * bytes consistently ...
+-       */
+-      if (txq->dbqt) {
+-              struct cpl_sge_egr_update *egr;
+-
+-              egr = (struct cpl_sge_egr_update *)rsp;
+-              WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
+-      }
+-
+       t4_sge_eth_txq_egress_update(adapter, txq, -1);
+ }
diff --git a/queue-5.5/cxgb4-fix-txq-restart-check-during-backpressure.patch b/queue-5.5/cxgb4-fix-txq-restart-check-during-backpressure.patch
new file mode 100644 (file)
index 0000000..342636d
--- /dev/null
@@ -0,0 +1,57 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
+Date: Thu, 19 Mar 2020 23:08:10 +0530
+Subject: cxgb4: fix Txq restart check during backpressure
+
+From: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
+
+[ Upstream commit f1f20a8666c55cb534b8f3fc1130eebf01a06155 ]
+
+Driver reclaims descriptors in much smaller batches, even if hardware
+indicates more to reclaim, during backpressure. So, fix the check to
+restart the Txq during backpressure, by looking at how many
+descriptors hardware had indicated to reclaim, and not on how many
+descriptors that driver had actually reclaimed. Once the Txq is
+restarted, driver will reclaim even more descriptors when Tx path
+is entered again.
+
+Fixes: d429005fdf2c ("cxgb4/cxgb4vf: Add support for SGE doorbell queue timer")
+Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/chelsio/cxgb4/sge.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -1307,8 +1307,9 @@ static inline void *write_tso_wr(struct
+ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
+                                int maxreclaim)
+ {
++      unsigned int reclaimed, hw_cidx;
+       struct sge_txq *q = &eq->q;
+-      unsigned int reclaimed;
++      int hw_in_use;
+       if (!q->in_use || !__netif_tx_trylock(eq->txq))
+               return 0;
+@@ -1316,12 +1317,17 @@ int t4_sge_eth_txq_egress_update(struct
+       /* Reclaim pending completed TX Descriptors. */
+       reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
++      hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
++      hw_in_use = q->pidx - hw_cidx;
++      if (hw_in_use < 0)
++              hw_in_use += q->size;
++
+       /* If the TX Queue is currently stopped and there's now more than half
+        * the queue available, restart it.  Otherwise bail out since the rest
+        * of what we want do here is with the possibility of shipping any
+        * currently buffered Coalesced TX Work Request.
+        */
+-      if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
++      if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
+               netif_tx_wake_queue(eq->txq);
+               eq->q.restarts++;
+       }
diff --git a/queue-5.5/geneve-move-debug-check-after-netdev-unregister.patch b/queue-5.5/geneve-move-debug-check-after-netdev-unregister.patch
new file mode 100644 (file)
index 0000000..853a5e2
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Florian Westphal <fw@strlen.de>
+Date: Sat, 14 Mar 2020 08:18:42 +0100
+Subject: geneve: move debug check after netdev unregister
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 0fda7600c2e174fe27e9cf02e78e345226e441fa ]
+
+The debug check must be done after unregister_netdevice_many() call --
+the list_del() for this is done inside .ndo_stop.
+
+Fixes: 2843a25348f8 ("geneve: speedup geneve tunnels dismantle")
+Reported-and-tested-by: <syzbot+68a8ed58e3d17c700de5@syzkaller.appspotmail.com>
+Cc: Haishuang Yan <yanhaishuang@cmss.chinamobile.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/geneve.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -1845,8 +1845,6 @@ static void geneve_destroy_tunnels(struc
+               if (!net_eq(dev_net(geneve->dev), net))
+                       unregister_netdevice_queue(geneve->dev, head);
+       }
+-
+-      WARN_ON_ONCE(!list_empty(&gn->sock_list));
+ }
+ static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
+@@ -1861,6 +1859,12 @@ static void __net_exit geneve_exit_batch
+       /* unregister the devices gathered above */
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
++
++      list_for_each_entry(net, net_list, exit_list) {
++              const struct geneve_net *gn = net_generic(net, geneve_net_id);
++
++              WARN_ON_ONCE(!list_empty(&gn->sock_list));
++      }
+ }
+ static struct pernet_operations geneve_net_ops = {
diff --git a/queue-5.5/hsr-add-restart-routine-into-hsr_get_node_list.patch b/queue-5.5/hsr-add-restart-routine-into-hsr_get_node_list.patch
new file mode 100644 (file)
index 0000000..7d0f53b
--- /dev/null
@@ -0,0 +1,99 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Fri, 13 Mar 2020 06:50:24 +0000
+Subject: hsr: add restart routine into hsr_get_node_list()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit ca19c70f5225771c05bcdcb832b4eb84d7271c5e ]
+
+The hsr_get_node_list() is to send node addresses to the userspace.
+If there are so many nodes, it could fail because of buffer size.
+In order to avoid this failure, the restart routine is added.
+
+Fixes: f421436a591d ("net/hsr: Add support for the High-availability Seamless Redundancy protocol (HSRv0)")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/hsr/hsr_netlink.c |   38 ++++++++++++++++++++++++--------------
+ 1 file changed, 24 insertions(+), 14 deletions(-)
+
+--- a/net/hsr/hsr_netlink.c
++++ b/net/hsr/hsr_netlink.c
+@@ -360,16 +360,14 @@ fail:
+  */
+ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
+ {
+-      /* For receiving */
+-      struct nlattr *na;
++      unsigned char addr[ETH_ALEN];
+       struct net_device *hsr_dev;
+-
+-      /* For sending */
+       struct sk_buff *skb_out;
+-      void *msg_head;
+       struct hsr_priv *hsr;
+-      void *pos;
+-      unsigned char addr[ETH_ALEN];
++      bool restart = false;
++      struct nlattr *na;
++      void *pos = NULL;
++      void *msg_head;
+       int res;
+       if (!info)
+@@ -387,8 +385,9 @@ static int hsr_get_node_list(struct sk_b
+       if (!is_hsr_master(hsr_dev))
+               goto rcu_unlock;
++restart:
+       /* Send reply */
+-      skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
++      skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+       if (!skb_out) {
+               res = -ENOMEM;
+               goto fail;
+@@ -402,17 +401,28 @@ static int hsr_get_node_list(struct sk_b
+               goto nla_put_failure;
+       }
+-      res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
+-      if (res < 0)
+-              goto nla_put_failure;
++      if (!restart) {
++              res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
++              if (res < 0)
++                      goto nla_put_failure;
++      }
+       hsr = netdev_priv(hsr_dev);
+-      pos = hsr_get_next_node(hsr, NULL, addr);
++      if (!pos)
++              pos = hsr_get_next_node(hsr, NULL, addr);
+       while (pos) {
+               res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+-              if (res < 0)
++              if (res < 0) {
++                      if (res == -EMSGSIZE) {
++                              genlmsg_end(skb_out, msg_head);
++                              genlmsg_unicast(genl_info_net(info), skb_out,
++                                              info->snd_portid);
++                              restart = true;
++                              goto restart;
++                      }
+                       goto nla_put_failure;
++              }
+               pos = hsr_get_next_node(hsr, pos, addr);
+       }
+       rcu_read_unlock();
+@@ -429,7 +439,7 @@ invalid:
+       return 0;
+ nla_put_failure:
+-      kfree_skb(skb_out);
++      nlmsg_free(skb_out);
+       /* Fall through */
+ fail:
diff --git a/queue-5.5/hsr-fix-general-protection-fault-in-hsr_addr_is_self.patch b/queue-5.5/hsr-fix-general-protection-fault-in-hsr_addr_is_self.patch
new file mode 100644 (file)
index 0000000..8e6bf2e
--- /dev/null
@@ -0,0 +1,141 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Sat, 21 Mar 2020 06:46:50 +0000
+Subject: hsr: fix general protection fault in hsr_addr_is_self()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit 3a303cfdd28d5f930a307c82e8a9d996394d5ebd ]
+
+The port->hsr is used in the hsr_handle_frame(), which is a
+callback of rx_handler.
+hsr master and slaves are initialized in hsr_add_port().
+This function initializes several pointers, which includes port->hsr after
+registering rx_handler.
+So, in the rx_handler routine, un-initialized pointer would be used.
+In order to fix this, pointers should be initialized before
+registering rx_handler.
+
+Test commands:
+    ip netns del left
+    ip netns del right
+    modprobe -rv veth
+    modprobe -rv hsr
+    killall ping
+    modprobe hsr
+    ip netns add left
+    ip netns add right
+    ip link add veth0 type veth peer name veth1
+    ip link add veth2 type veth peer name veth3
+    ip link add veth4 type veth peer name veth5
+    ip link set veth1 netns left
+    ip link set veth3 netns right
+    ip link set veth4 netns left
+    ip link set veth5 netns right
+    ip link set veth0 up
+    ip link set veth2 up
+    ip link set veth0 address fc:00:00:00:00:01
+    ip link set veth2 address fc:00:00:00:00:02
+    ip netns exec left ip link set veth1 up
+    ip netns exec left ip link set veth4 up
+    ip netns exec right ip link set veth3 up
+    ip netns exec right ip link set veth5 up
+    ip link add hsr0 type hsr slave1 veth0 slave2 veth2
+    ip a a 192.168.100.1/24 dev hsr0
+    ip link set hsr0 up
+    ip netns exec left ip link add hsr1 type hsr slave1 veth1 slave2 veth4
+    ip netns exec left ip a a 192.168.100.2/24 dev hsr1
+    ip netns exec left ip link set hsr1 up
+    ip netns exec left ip n a 192.168.100.1 dev hsr1 lladdr \
+           fc:00:00:00:00:01 nud permanent
+    ip netns exec left ip n r 192.168.100.1 dev hsr1 lladdr \
+           fc:00:00:00:00:01 nud permanent
+    for i in {1..100}
+    do
+        ip netns exec left ping 192.168.100.1 &
+    done
+    ip netns exec left hping3 192.168.100.1 -2 --flood &
+    ip netns exec right ip link add hsr2 type hsr slave1 veth3 slave2 veth5
+    ip netns exec right ip a a 192.168.100.3/24 dev hsr2
+    ip netns exec right ip link set hsr2 up
+    ip netns exec right ip n a 192.168.100.1 dev hsr2 lladdr \
+           fc:00:00:00:00:02 nud permanent
+    ip netns exec right ip n r 192.168.100.1 dev hsr2 lladdr \
+           fc:00:00:00:00:02 nud permanent
+    for i in {1..100}
+    do
+        ip netns exec right ping 192.168.100.1 &
+    done
+    ip netns exec right hping3 192.168.100.1 -2 --flood &
+    while :
+    do
+        ip link add hsr0 type hsr slave1 veth0 slave2 veth2
+       ip a a 192.168.100.1/24 dev hsr0
+       ip link set hsr0 up
+       ip link del hsr0
+    done
+
+Splat looks like:
+[  120.954938][    C0] general protection fault, probably for non-canonical address 0xdffffc0000000006: 0000 [#1]I
+[  120.957761][    C0] KASAN: null-ptr-deref in range [0x0000000000000030-0x0000000000000037]
+[  120.959064][    C0] CPU: 0 PID: 1511 Comm: hping3 Not tainted 5.6.0-rc5+ #460
+[  120.960054][    C0] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
+[  120.962261][    C0] RIP: 0010:hsr_addr_is_self+0x65/0x2a0 [hsr]
+[  120.963149][    C0] Code: 44 24 18 70 73 2f c0 48 c1 eb 03 48 8d 04 13 c7 00 f1 f1 f1 f1 c7 40 04 00 f2 f2 f2 4
+[  120.966277][    C0] RSP: 0018:ffff8880d9c09af0 EFLAGS: 00010206
+[  120.967293][    C0] RAX: 0000000000000006 RBX: 1ffff1101b38135f RCX: 0000000000000000
+[  120.968516][    C0] RDX: dffffc0000000000 RSI: ffff8880d17cb208 RDI: 0000000000000000
+[  120.969718][    C0] RBP: 0000000000000030 R08: ffffed101b3c0e3c R09: 0000000000000001
+[  120.972203][    C0] R10: 0000000000000001 R11: ffffed101b3c0e3b R12: 0000000000000000
+[  120.973379][    C0] R13: ffff8880aaf80100 R14: ffff8880aaf800f2 R15: ffff8880aaf80040
+[  120.974410][    C0] FS:  00007f58e693f740(0000) GS:ffff8880d9c00000(0000) knlGS:0000000000000000
+[  120.979794][    C0] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[  120.980773][    C0] CR2: 00007ffcb8b38f29 CR3: 00000000afe8e001 CR4: 00000000000606f0
+[  120.981945][    C0] Call Trace:
+[  120.982411][    C0]  <IRQ>
+[  120.982848][    C0]  ? hsr_add_node+0x8c0/0x8c0 [hsr]
+[  120.983522][    C0]  ? rcu_read_lock_held+0x90/0xa0
+[  120.984159][    C0]  ? rcu_read_lock_sched_held+0xc0/0xc0
+[  120.984944][    C0]  hsr_handle_frame+0x1db/0x4e0 [hsr]
+[  120.985597][    C0]  ? hsr_nl_nodedown+0x2b0/0x2b0 [hsr]
+[  120.986289][    C0]  __netif_receive_skb_core+0x6bf/0x3170
+[  120.992513][    C0]  ? check_chain_key+0x236/0x5d0
+[  120.993223][    C0]  ? do_xdp_generic+0x1460/0x1460
+[  120.993875][    C0]  ? register_lock_class+0x14d0/0x14d0
+[  120.994609][    C0]  ? __netif_receive_skb_one_core+0x8d/0x160
+[  120.995377][    C0]  __netif_receive_skb_one_core+0x8d/0x160
+[  120.996204][    C0]  ? __netif_receive_skb_core+0x3170/0x3170
+[ ... ]
+
+Reported-by: syzbot+fcf5dd39282ceb27108d@syzkaller.appspotmail.com
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/hsr/hsr_slave.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/hsr/hsr_slave.c
++++ b/net/hsr/hsr_slave.c
+@@ -145,16 +145,16 @@ int hsr_add_port(struct hsr_priv *hsr, s
+       if (!port)
+               return -ENOMEM;
++      port->hsr = hsr;
++      port->dev = dev;
++      port->type = type;
++
+       if (type != HSR_PT_MASTER) {
+               res = hsr_portdev_setup(dev, port);
+               if (res)
+                       goto fail_dev_setup;
+       }
+-      port->hsr = hsr;
+-      port->dev = dev;
+-      port->type = type;
+-
+       list_add_tail_rcu(&port->port_list, &hsr->ports);
+       synchronize_rcu();
diff --git a/queue-5.5/hsr-set-.netnsok-flag.patch b/queue-5.5/hsr-set-.netnsok-flag.patch
new file mode 100644 (file)
index 0000000..ae22f73
--- /dev/null
@@ -0,0 +1,34 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Fri, 13 Mar 2020 06:50:33 +0000
+Subject: hsr: set .netnsok flag
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit 09e91dbea0aa32be02d8877bd50490813de56b9a ]
+
+The hsr module has been supporting the list and status command.
+(HSR_C_GET_NODE_LIST and HSR_C_GET_NODE_STATUS)
+These commands send node information to the user-space via generic netlink.
+But, in the non-init_net namespace, these commands are not allowed
+because .netnsok flag is false.
+So, there is no way to get node information in the non-init_net namespace.
+
+Fixes: f421436a591d ("net/hsr: Add support for the High-availability Seamless Redundancy protocol (HSRv0)")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/hsr/hsr_netlink.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/hsr/hsr_netlink.c
++++ b/net/hsr/hsr_netlink.c
+@@ -470,6 +470,7 @@ static struct genl_family hsr_genl_famil
+       .version = 1,
+       .maxattr = HSR_A_MAX,
+       .policy = hsr_genl_policy,
++      .netnsok = true,
+       .module = THIS_MODULE,
+       .ops = hsr_ops,
+       .n_ops = ARRAY_SIZE(hsr_ops),
diff --git a/queue-5.5/hsr-use-rcu_read_lock-in-hsr_get_node_-list-status.patch b/queue-5.5/hsr-use-rcu_read_lock-in-hsr_get_node_-list-status.patch
new file mode 100644 (file)
index 0000000..99fbe90
--- /dev/null
@@ -0,0 +1,175 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Fri, 13 Mar 2020 06:50:14 +0000
+Subject: hsr: use rcu_read_lock() in hsr_get_node_{list/status}()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit 173756b86803655d70af7732079b3aa935e6ab68 ]
+
+hsr_get_node_{list/status}() are not under rtnl_lock() because
+they are callback functions of generic netlink.
+But they use __dev_get_by_index() without rtnl_lock().
+So, it would use unsafe data.
+In order to fix it, rcu_read_lock() and dev_get_by_index_rcu()
+are used instead of __dev_get_by_index().
+
+Fixes: f421436a591d ("net/hsr: Add support for the High-availability Seamless Redundancy protocol (HSRv0)")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/hsr/hsr_framereg.c |    9 ++-------
+ net/hsr/hsr_netlink.c  |   39 +++++++++++++++++++++------------------
+ 2 files changed, 23 insertions(+), 25 deletions(-)
+
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -482,12 +482,9 @@ int hsr_get_node_data(struct hsr_priv *h
+       struct hsr_port *port;
+       unsigned long tdiff;
+-      rcu_read_lock();
+       node = find_node_by_addr_A(&hsr->node_db, addr);
+-      if (!node) {
+-              rcu_read_unlock();
+-              return -ENOENT; /* No such entry */
+-      }
++      if (!node)
++              return -ENOENT;
+       ether_addr_copy(addr_b, node->macaddress_B);
+@@ -522,7 +519,5 @@ int hsr_get_node_data(struct hsr_priv *h
+               *addr_b_ifindex = -1;
+       }
+-      rcu_read_unlock();
+-
+       return 0;
+ }
+--- a/net/hsr/hsr_netlink.c
++++ b/net/hsr/hsr_netlink.c
+@@ -251,15 +251,16 @@ static int hsr_get_node_status(struct sk
+       if (!na)
+               goto invalid;
+-      hsr_dev = __dev_get_by_index(genl_info_net(info),
+-                                   nla_get_u32(info->attrs[HSR_A_IFINDEX]));
++      rcu_read_lock();
++      hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
++                                     nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+       if (!hsr_dev)
+-              goto invalid;
++              goto rcu_unlock;
+       if (!is_hsr_master(hsr_dev))
+-              goto invalid;
++              goto rcu_unlock;
+       /* Send reply */
+-      skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
++      skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+       if (!skb_out) {
+               res = -ENOMEM;
+               goto fail;
+@@ -313,12 +314,10 @@ static int hsr_get_node_status(struct sk
+       res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
+       if (res < 0)
+               goto nla_put_failure;
+-      rcu_read_lock();
+       port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+       if (port)
+               res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
+                                 port->dev->ifindex);
+-      rcu_read_unlock();
+       if (res < 0)
+               goto nla_put_failure;
+@@ -328,20 +327,22 @@ static int hsr_get_node_status(struct sk
+       res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
+       if (res < 0)
+               goto nla_put_failure;
+-      rcu_read_lock();
+       port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+       if (port)
+               res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
+                                 port->dev->ifindex);
+-      rcu_read_unlock();
+       if (res < 0)
+               goto nla_put_failure;
++      rcu_read_unlock();
++
+       genlmsg_end(skb_out, msg_head);
+       genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
+       return 0;
++rcu_unlock:
++      rcu_read_unlock();
+ invalid:
+       netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
+       return 0;
+@@ -351,6 +352,7 @@ nla_put_failure:
+       /* Fall through */
+ fail:
++      rcu_read_unlock();
+       return res;
+ }
+@@ -377,15 +379,16 @@ static int hsr_get_node_list(struct sk_b
+       if (!na)
+               goto invalid;
+-      hsr_dev = __dev_get_by_index(genl_info_net(info),
+-                                   nla_get_u32(info->attrs[HSR_A_IFINDEX]));
++      rcu_read_lock();
++      hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
++                                     nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+       if (!hsr_dev)
+-              goto invalid;
++              goto rcu_unlock;
+       if (!is_hsr_master(hsr_dev))
+-              goto invalid;
++              goto rcu_unlock;
+       /* Send reply */
+-      skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
++      skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+       if (!skb_out) {
+               res = -ENOMEM;
+               goto fail;
+@@ -405,14 +408,11 @@ static int hsr_get_node_list(struct sk_b
+       hsr = netdev_priv(hsr_dev);
+-      rcu_read_lock();
+       pos = hsr_get_next_node(hsr, NULL, addr);
+       while (pos) {
+               res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+-              if (res < 0) {
+-                      rcu_read_unlock();
++              if (res < 0)
+                       goto nla_put_failure;
+-              }
+               pos = hsr_get_next_node(hsr, pos, addr);
+       }
+       rcu_read_unlock();
+@@ -422,6 +422,8 @@ static int hsr_get_node_list(struct sk_b
+       return 0;
++rcu_unlock:
++      rcu_read_unlock();
+ invalid:
+       netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
+       return 0;
+@@ -431,6 +433,7 @@ nla_put_failure:
+       /* Fall through */
+ fail:
++      rcu_read_unlock();
+       return res;
+ }
diff --git a/queue-5.5/ipv4-fix-a-rcu-list-lock-in-inet_dump_fib.patch b/queue-5.5/ipv4-fix-a-rcu-list-lock-in-inet_dump_fib.patch
new file mode 100644 (file)
index 0000000..22d5126
--- /dev/null
@@ -0,0 +1,67 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Qian Cai <cai@lca.pw>
+Date: Thu, 19 Mar 2020 22:54:21 -0400
+Subject: ipv4: fix a RCU-list lock in inet_dump_fib()
+
+From: Qian Cai <cai@lca.pw>
+
+[ Upstream commit dddeb30bfc43926620f954266fd12c65a7206f07 ]
+
+There is a place,
+
+inet_dump_fib()
+  fib_table_dump
+    fn_trie_dump_leaf()
+      hlist_for_each_entry_rcu()
+
+without rcu_read_lock() will trigger a warning,
+
+ WARNING: suspicious RCU usage
+ -----------------------------
+ net/ipv4/fib_trie.c:2216 RCU-list traversed in non-reader section!!
+
+ other info that might help us debug this:
+
+ rcu_scheduler_active = 2, debug_locks = 1
+ 1 lock held by ip/1923:
+  #0: ffffffff8ce76e40 (rtnl_mutex){+.+.}, at: netlink_dump+0xd6/0x840
+
+ Call Trace:
+  dump_stack+0xa1/0xea
+  lockdep_rcu_suspicious+0x103/0x10d
+  fn_trie_dump_leaf+0x581/0x590
+  fib_table_dump+0x15f/0x220
+  inet_dump_fib+0x4ad/0x5d0
+  netlink_dump+0x350/0x840
+  __netlink_dump_start+0x315/0x3e0
+  rtnetlink_rcv_msg+0x4d1/0x720
+  netlink_rcv_skb+0xf0/0x220
+  rtnetlink_rcv+0x15/0x20
+  netlink_unicast+0x306/0x460
+  netlink_sendmsg+0x44b/0x770
+  __sys_sendto+0x259/0x270
+  __x64_sys_sendto+0x80/0xa0
+  do_syscall_64+0x69/0xf4
+  entry_SYSCALL_64_after_hwframe+0x49/0xb3
+
+Fixes: 18a8021a7be3 ("net/ipv4: Plumb support for filtering route dumps")
+Signed-off-by: Qian Cai <cai@lca.pw>
+Reviewed-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/fib_frontend.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -997,7 +997,9 @@ static int inet_dump_fib(struct sk_buff
+                       return -ENOENT;
+               }
++              rcu_read_lock();
+               err = fib_table_dump(tb, skb, cb, &filter);
++              rcu_read_unlock();
+               return skb->len ? : err;
+       }
diff --git a/queue-5.5/macsec-restrict-to-ethernet-devices.patch b/queue-5.5/macsec-restrict-to-ethernet-devices.patch
new file mode 100644 (file)
index 0000000..cc208d1
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Willem de Bruijn <willemb@google.com>
+Date: Sun, 22 Mar 2020 13:51:13 -0400
+Subject: macsec: restrict to ethernet devices
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit b06d072ccc4b1acd0147b17914b7ad1caa1818bb ]
+
+Only attach macsec to ethernet devices.
+
+Syzbot was able to trigger a KMSAN warning in macsec_handle_frame
+by attaching to a phonet device.
+
+Macvlan has a similar check in macvlan_port_create.
+
+v1->v2
+  - fix commit message typo
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/macsec.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -16,6 +16,7 @@
+ #include <net/genetlink.h>
+ #include <net/sock.h>
+ #include <net/gro_cells.h>
++#include <linux/if_arp.h>
+ #include <uapi/linux/if_macsec.h>
+@@ -3236,6 +3237,8 @@ static int macsec_newlink(struct net *ne
+       real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
+       if (!real_dev)
+               return -ENODEV;
++      if (real_dev->type != ARPHRD_ETHER)
++              return -EINVAL;
+       dev->priv_flags |= IFF_MACSEC;
diff --git a/queue-5.5/mlxsw-pci-only-issue-reset-when-system-is-ready.patch b/queue-5.5/mlxsw-pci-only-issue-reset-when-system-is-ready.patch
new file mode 100644 (file)
index 0000000..f32e6ba
--- /dev/null
@@ -0,0 +1,114 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Ido Schimmel <idosch@mellanox.com>
+Date: Thu, 19 Mar 2020 13:25:39 +0200
+Subject: mlxsw: pci: Only issue reset when system is ready
+
+From: Ido Schimmel <idosch@mellanox.com>
+
+[ Upstream commit 6002059d7882c3512e6ac52fa82424272ddfcd5c ]
+
+During initialization the driver issues a software reset command and
+then waits for the system status to change back to "ready" state.
+
+However, before issuing the reset command the driver does not check that
+the system is actually in "ready" state. On Spectrum-{1,2} systems this
+was always the case as the hardware initialization time is very short.
+On Spectrum-3 systems this is no longer the case. This results in the
+software reset command timing-out and the driver failing to load:
+
+[ 6.347591] mlxsw_spectrum3 0000:06:00.0: Cmd exec timed-out (opcode=40(ACCESS_REG),opcode_mod=0,in_mod=0)
+[ 6.358382] mlxsw_spectrum3 0000:06:00.0: Reg cmd access failed (reg_id=9023(mrsr),type=write)
+[ 6.368028] mlxsw_spectrum3 0000:06:00.0: cannot register bus device
+[ 6.375274] mlxsw_spectrum3: probe of 0000:06:00.0 failed with error -110
+
+Fix this by waiting for the system to become ready both before issuing
+the reset command and afterwards. In case of failure, print the last
+system status to aid in debugging.
+
+Fixes: da382875c616 ("mlxsw: spectrum: Extend to support Spectrum-3 ASIC")
+Signed-off-by: Ido Schimmel <idosch@mellanox.com>
+Reviewed-by: Jiri Pirko <jiri@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/pci.c |   50 +++++++++++++++++++++++-------
+ 1 file changed, 39 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
+@@ -1322,36 +1322,64 @@ static void mlxsw_pci_mbox_free(struct m
+                           mbox->mapaddr);
+ }
+-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
+-                            const struct pci_device_id *id)
++static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
++                                  const struct pci_device_id *id,
++                                  u32 *p_sys_status)
+ {
+       unsigned long end;
+-      char mrsr_pl[MLXSW_REG_MRSR_LEN];
+-      int err;
++      u32 val;
+-      mlxsw_reg_mrsr_pack(mrsr_pl);
+-      err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+-      if (err)
+-              return err;
+       if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
+               msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+               return 0;
+       }
+-      /* We must wait for the HW to become responsive once again. */
++      /* We must wait for the HW to become responsive. */
+       msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
+       end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+       do {
+-              u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
+-
++              val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
+               if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
+                       return 0;
+               cond_resched();
+       } while (time_before(jiffies, end));
++
++      *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
++
+       return -EBUSY;
+ }
++static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
++                            const struct pci_device_id *id)
++{
++      struct pci_dev *pdev = mlxsw_pci->pdev;
++      char mrsr_pl[MLXSW_REG_MRSR_LEN];
++      u32 sys_status;
++      int err;
++
++      err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
++      if (err) {
++              dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
++                      sys_status);
++              return err;
++      }
++
++      mlxsw_reg_mrsr_pack(mrsr_pl);
++      err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
++      if (err)
++              return err;
++
++      err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
++      if (err) {
++              dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
++                      sys_status);
++              return err;
++      }
++
++      return 0;
++}
++
+ static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+ {
+       int err;
diff --git a/queue-5.5/mlxsw-spectrum_mr-fix-list-iteration-in-error-path.patch b/queue-5.5/mlxsw-spectrum_mr-fix-list-iteration-in-error-path.patch
new file mode 100644 (file)
index 0000000..1e5dca2
--- /dev/null
@@ -0,0 +1,48 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Ido Schimmel <idosch@mellanox.com>
+Date: Thu, 26 Mar 2020 16:17:33 +0200
+Subject: mlxsw: spectrum_mr: Fix list iteration in error path
+
+From: Ido Schimmel <idosch@mellanox.com>
+
+[ Upstream commit f6bf1bafdc2152bb22aff3a4e947f2441a1d49e2 ]
+
+list_for_each_entry_from_reverse() iterates backwards over the list from
+the current position, but in the error path we should start from the
+previous position.
+
+Fix this by using list_for_each_entry_continue_reverse() instead.
+
+This suppresses the following error from coccinelle:
+
+drivers/net/ethernet/mellanox/mlxsw//spectrum_mr.c:655:34-38: ERROR:
+invalid reference to the index variable of the iterator on line 636
+
+Fixes: c011ec1bbfd6 ("mlxsw: spectrum: Add the multicast routing offloading logic")
+Signed-off-by: Ido Schimmel <idosch@mellanox.com>
+Reviewed-by: Jiri Pirko <jiri@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+@@ -637,12 +637,12 @@ static int mlxsw_sp_mr_vif_resolve(struc
+       return 0;
+ err_erif_unresolve:
+-      list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
+-                                       vif_node)
++      list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list,
++                                           vif_node)
+               mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
+ err_irif_unresolve:
+-      list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
+-                                       vif_node)
++      list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list,
++                                           vif_node)
+               mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
+       mr_vif->rif = NULL;
+       return err;
diff --git a/queue-5.5/net-bcmgenet-keep-mac-in-reset-until-phy-is-up.patch b/queue-5.5/net-bcmgenet-keep-mac-in-reset-until-phy-is-up.patch
new file mode 100644 (file)
index 0000000..6654fcb
--- /dev/null
@@ -0,0 +1,100 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Doug Berger <opendmb@gmail.com>
+Date: Mon, 16 Mar 2020 14:44:56 -0700
+Subject: net: bcmgenet: keep MAC in reset until PHY is up
+
+From: Doug Berger <opendmb@gmail.com>
+
+[ Upstream commit 88f6c8bf1aaed5039923fb4c701cab4d42176275 ]
+
+As noted in commit 28c2d1a7a0bf ("net: bcmgenet: enable loopback
+during UniMAC sw_reset") the UniMAC must be clocked at least 5
+cycles while the sw_reset is asserted to ensure a clean reset.
+
+That commit enabled local loopback to provide an Rx clock from the
+GENET sourced Tx clk. However, when connected in MII mode the Tx
+clk is sourced by the PHY so if an EPHY is not supplying clocks
+(e.g. when the link is down) the UniMAC does not receive the
+necessary clocks.
+
+This commit extends the sw_reset window until the PHY reports that
+the link is up thereby ensuring that the clocks are being provided
+to the MAC to produce a clean reset.
+
+One consequence is that if the system attempts to enter a Wake on
+LAN suspend state when the PHY link has not been active the MAC
+may not have had a chance to initialize cleanly. In this case, we
+remove the sw_reset and enable the WoL reception path as normal
+with the hope that the PHY will provide the necessary clocks to
+drive the WoL blocks if the link becomes active after the system
+has entered suspend.
+
+Fixes: 1c1008c793fa ("net: bcmgenet: add main driver file")
+Signed-off-by: Doug Berger <opendmb@gmail.com>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/genet/bcmgenet.c     |   10 ++++------
+ drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c |    6 +++++-
+ drivers/net/ethernet/broadcom/genet/bcmmii.c       |    6 ++++++
+ 3 files changed, 15 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1972,6 +1972,8 @@ static void umac_enable_set(struct bcmge
+       u32 reg;
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
++      if (reg & CMD_SW_RESET)
++              return;
+       if (enable)
+               reg |= mask;
+       else
+@@ -1991,13 +1993,9 @@ static void reset_umac(struct bcmgenet_p
+       bcmgenet_rbuf_ctrl_set(priv, 0);
+       udelay(10);
+-      /* disable MAC while updating its registers */
+-      bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+-
+-      /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
+-      bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
++      /* issue soft reset and disable MAC while updating its registers */
++      bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+       udelay(2);
+-      bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+ }
+ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+@@ -132,8 +132,12 @@ int bcmgenet_wol_power_down_cfg(struct b
+               return -EINVAL;
+       }
+-      /* disable RX */
++      /* Can't suspend with WoL if MAC is still in reset */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
++      if (reg & CMD_SW_RESET)
++              reg &= ~CMD_SW_RESET;
++
++      /* disable RX */
+       reg &= ~CMD_RX_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       mdelay(10);
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -95,6 +95,12 @@ void bcmgenet_mii_setup(struct net_devic
+                              CMD_HD_EN |
+                              CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
+               reg |= cmd_bits;
++              if (reg & CMD_SW_RESET) {
++                      reg &= ~CMD_SW_RESET;
++                      bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++                      udelay(2);
++                      reg |= CMD_TX_EN | CMD_RX_EN;
++              }
+               bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       } else {
+               /* done if nothing has changed */
diff --git a/queue-5.5/net-bpfilter-fix-dprintf-usage-for-dev-kmsg.patch b/queue-5.5/net-bpfilter-fix-dprintf-usage-for-dev-kmsg.patch
new file mode 100644 (file)
index 0000000..19b19c8
--- /dev/null
@@ -0,0 +1,73 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Bruno Meneguele <bmeneg@redhat.com>
+Date: Thu, 12 Mar 2020 20:08:20 -0300
+Subject: net/bpfilter: fix dprintf usage for /dev/kmsg
+
+From: Bruno Meneguele <bmeneg@redhat.com>
+
+[ Upstream commit 13d0f7b814d9b4c67e60d8c2820c86ea181e7d99 ]
+
+The bpfilter UMH code was recently changed to log its informative messages to
+/dev/kmsg, however this interface doesn't support SEEK_CUR yet, used by
+dprintf(). As result dprintf() returns -EINVAL and doesn't log anything.
+
+However there already had some discussions about supporting SEEK_CUR into
+/dev/kmsg interface in the past it wasn't concluded. Since the only user of
+that from userspace perspective inside the kernel is the bpfilter UMH
+(userspace) module it's better to correct it here instead waiting a conclusion
+on the interface.
+
+Fixes: 36c4357c63f3 ("net: bpfilter: print umh messages to /dev/kmsg")
+Signed-off-by: Bruno Meneguele <bmeneg@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bpfilter/main.c |   14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/net/bpfilter/main.c
++++ b/net/bpfilter/main.c
+@@ -10,7 +10,7 @@
+ #include <asm/unistd.h>
+ #include "msgfmt.h"
+-int debug_fd;
++FILE *debug_f;
+ static int handle_get_cmd(struct mbox_request *cmd)
+ {
+@@ -35,9 +35,10 @@ static void loop(void)
+               struct mbox_reply reply;
+               int n;
++              fprintf(debug_f, "testing the buffer\n");
+               n = read(0, &req, sizeof(req));
+               if (n != sizeof(req)) {
+-                      dprintf(debug_fd, "invalid request %d\n", n);
++                      fprintf(debug_f, "invalid request %d\n", n);
+                       return;
+               }
+@@ -47,7 +48,7 @@ static void loop(void)
+               n = write(1, &reply, sizeof(reply));
+               if (n != sizeof(reply)) {
+-                      dprintf(debug_fd, "reply failed %d\n", n);
++                      fprintf(debug_f, "reply failed %d\n", n);
+                       return;
+               }
+       }
+@@ -55,9 +56,10 @@ static void loop(void)
+ int main(void)
+ {
+-      debug_fd = open("/dev/kmsg", 00000002);
+-      dprintf(debug_fd, "Started bpfilter\n");
++      debug_f = fopen("/dev/kmsg", "w");
++      setvbuf(debug_f, 0, _IOLBF, 0);
++      fprintf(debug_f, "Started bpfilter\n");
+       loop();
+-      close(debug_fd);
++      fclose(debug_f);
+       return 0;
+ }
diff --git a/queue-5.5/net-cbs-fix-software-cbs-to-consider-packet-sending-time.patch b/queue-5.5/net-cbs-fix-software-cbs-to-consider-packet-sending-time.patch
new file mode 100644 (file)
index 0000000..560921f
--- /dev/null
@@ -0,0 +1,65 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Zh-yuan Ye <ye.zh-yuan@socionext.com>
+Date: Tue, 24 Mar 2020 17:28:25 +0900
+Subject: net: cbs: Fix software cbs to consider packet sending time
+
+From: Zh-yuan Ye <ye.zh-yuan@socionext.com>
+
+[ Upstream commit 961d0e5b32946703125964f9f5b6321d60f4d706 ]
+
+Currently the software CBS does not consider the packet sending time
+when depleting the credits. It caused the throughput to be
+Idleslope[kbps] * (Port transmit rate[kbps] / |Sendslope[kbps]|) where
+Idleslope * (Port transmit rate / (Idleslope + |Sendslope|)) = Idleslope
+is expected. In order to fix the issue above, this patch takes the time
+when the packet sending completes into account by moving the anchor time
+variable "last" ahead to the send completion time upon transmission and
+adding wait when the next dequeue request comes before the send
+completion time of the previous packet.
+
+changelog:
+V2->V3:
+ - remove unnecessary whitespace cleanup
+ - add the checks if port_rate is 0 before division
+
+V1->V2:
+ - combine variable "send_completed" into "last"
+ - add the comment for estimate of the packet sending
+
+Fixes: 585d763af09c ("net/sched: Introduce Credit Based Shaper (CBS) qdisc")
+Signed-off-by: Zh-yuan Ye <ye.zh-yuan@socionext.com>
+Reviewed-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_cbs.c |   12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/net/sched/sch_cbs.c
++++ b/net/sched/sch_cbs.c
+@@ -181,6 +181,11 @@ static struct sk_buff *cbs_dequeue_soft(
+       s64 credits;
+       int len;
++      /* The previous packet is still being sent */
++      if (now < q->last) {
++              qdisc_watchdog_schedule_ns(&q->watchdog, q->last);
++              return NULL;
++      }
+       if (q->credits < 0) {
+               credits = timediff_to_credits(now - q->last, q->idleslope);
+@@ -212,7 +217,12 @@ static struct sk_buff *cbs_dequeue_soft(
+       credits += q->credits;
+       q->credits = max_t(s64, credits, q->locredit);
+-      q->last = now;
++      /* Estimate of the transmission of the last byte of the packet in ns */
++      if (unlikely(atomic64_read(&q->port_rate) == 0))
++              q->last = now;
++      else
++              q->last = now + div64_s64(len * NSEC_PER_SEC,
++                                        atomic64_read(&q->port_rate));
+       return skb;
+ }
diff --git a/queue-5.5/net-dsa-fix-duplicate-frames-flooded-by-learning.patch b/queue-5.5/net-dsa-fix-duplicate-frames-flooded-by-learning.patch
new file mode 100644 (file)
index 0000000..8eda43a
--- /dev/null
@@ -0,0 +1,34 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Sun, 22 Mar 2020 13:58:50 -0700
+Subject: net: dsa: Fix duplicate frames flooded by learning
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+[ Upstream commit 0e62f543bed03a64495bd2651d4fe1aa4bcb7fe5 ]
+
+When both the switch and the bridge are learning about new addresses,
+switch ports attached to the bridge would see duplicate ARP frames
+because both entities would attempt to send them.
+
+Fixes: 5037d532b83d ("net: dsa: add Broadcom tag RX/TX handler")
+Reported-by: Maxime Bizon <mbizon@freebox.fr>
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Vivien Didelot <vivien.didelot@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dsa/tag_brcm.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/dsa/tag_brcm.c
++++ b/net/dsa/tag_brcm.c
+@@ -140,6 +140,8 @@ static struct sk_buff *brcm_tag_rcv_ll(s
+       /* Remove Broadcom tag and update checksum */
+       skb_pull_rcsum(skb, BRCM_TAG_LEN);
++      skb->offload_fwd_mark = 1;
++
+       return skb;
+ }
+ #endif
diff --git a/queue-5.5/net-dsa-mt7530-change-the-link-bit-to-reflect-the-link-status.patch b/queue-5.5/net-dsa-mt7530-change-the-link-bit-to-reflect-the-link-status.patch
new file mode 100644 (file)
index 0000000..f094b85
--- /dev/null
@@ -0,0 +1,56 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: "René van Dorst" <opensource@vdorst.com>
+Date: Thu, 19 Mar 2020 14:47:56 +0100
+Subject: net: dsa: mt7530: Change the LINK bit to reflect the link status
+
+From: "René van Dorst" <opensource@vdorst.com>
+
+[ Upstream commit 22259471b51925353bd7b16f864c79fdd76e425e ]
+
+Andrew reported:
+
+After a number of network port link up/down changes, sometimes the switch
+port gets stuck in a state where it thinks it is still transmitting packets
+but the cpu port is not actually transmitting anymore. In this state you
+will see a message on the console
+"mtk_soc_eth 1e100000.ethernet eth0: transmit timed out" and the Tx counter
+in ifconfig will be incrementing on virtual port, but not incrementing on
+cpu port.
+
+The issue is that MAC TX/RX status has no impact on the link status or
+queue manager of the switch. So the queue manager just queues up packets
+of a disabled port and sends out pause frames when the queue is full.
+
+Change the LINK bit to reflect the link status.
+
+Fixes: b8f126a8d543 ("net-next: dsa: add dsa support for Mediatek MT7530 switch")
+Reported-by: Andrew Smith <andrew.smith@digi.com>
+Signed-off-by: René van Dorst <opensource@vdorst.com>
+Reviewed-by: Vivien Didelot <vivien.didelot@gmail.com>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dsa/mt7530.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -566,7 +566,7 @@ mt7530_mib_reset(struct dsa_switch *ds)
+ static void
+ mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
+ {
+-      u32 mask = PMCR_TX_EN | PMCR_RX_EN;
++      u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK;
+       if (enable)
+               mt7530_set(priv, MT7530_PMCR_P(port), mask);
+@@ -1443,7 +1443,7 @@ static void mt7530_phylink_mac_config(st
+       mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 |
+                    PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN);
+       mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
+-                 PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK;
++                 PMCR_BACKPR_EN | PMCR_FORCE_MODE;
+       /* Are we connected to external phy */
+       if (port == 5 && dsa_is_user_port(ds, 5))
diff --git a/queue-5.5/net-dsa-tag_8021q-replace-dsa_8021q_remove_header-with-__skb_vlan_pop.patch b/queue-5.5/net-dsa-tag_8021q-replace-dsa_8021q_remove_header-with-__skb_vlan_pop.patch
new file mode 100644 (file)
index 0000000..d608e24
--- /dev/null
@@ -0,0 +1,152 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+Date: Tue, 24 Mar 2020 11:45:34 +0200
+Subject: net: dsa: tag_8021q: replace dsa_8021q_remove_header with __skb_vlan_pop
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit e80f40cbe4dd51371818e967d40da8fe305db5e4 ]
+
+Not only did this wheel did not need reinventing, but there is also
+an issue with it: It doesn't remove the VLAN header in a way that
+preserves the L2 payload checksum when that is being provided by the DSA
+master hw.  It should recalculate checksum both for the push, before
+removing the header, and for the pull afterwards. But the current
+implementation is quite dizzying, with pulls followed immediately
+afterwards by pushes, the memmove is done before the push, etc.  This
+makes a DSA master with RX checksumming offload to print stack traces
+with the infamous 'hw csum failure' message.
+
+So remove the dsa_8021q_remove_header function and replace it with
+something that actually works with inet checksumming.
+
+Fixes: d461933638ae ("net: dsa: tag_8021q: Create helper function for removing VLAN header")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/dsa/8021q.h |    7 -------
+ net/dsa/tag_8021q.c       |   43 -------------------------------------------
+ net/dsa/tag_sja1105.c     |   19 +++++++++----------
+ 3 files changed, 9 insertions(+), 60 deletions(-)
+
+--- a/include/linux/dsa/8021q.h
++++ b/include/linux/dsa/8021q.h
+@@ -28,8 +28,6 @@ int dsa_8021q_rx_switch_id(u16 vid);
+ int dsa_8021q_rx_source_port(u16 vid);
+-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb);
+-
+ #else
+ int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
+@@ -64,11 +62,6 @@ int dsa_8021q_rx_source_port(u16 vid)
+       return 0;
+ }
+-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
+-{
+-      return NULL;
+-}
+-
+ #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
+ #endif /* _NET_DSA_8021Q_H */
+--- a/net/dsa/tag_8021q.c
++++ b/net/dsa/tag_8021q.c
+@@ -298,47 +298,4 @@ struct sk_buff *dsa_8021q_xmit(struct sk
+ }
+ EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
+-/* In the DSA packet_type handler, skb->data points in the middle of the VLAN
+- * tag, after tpid and before tci. This is because so far, ETH_HLEN
+- * (DMAC, SMAC, EtherType) bytes were pulled.
+- * There are 2 bytes of VLAN tag left in skb->data, and upper
+- * layers expect the 'real' EtherType to be consumed as well.
+- * Coincidentally, a VLAN header is also of the same size as
+- * the number of bytes that need to be pulled.
+- *
+- * skb_mac_header                                      skb->data
+- * |                                                       |
+- * v                                                       v
+- * |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
+- * +-----------------------+-----------------------+-------+-------+-------+
+- * |    Destination MAC    |      Source MAC       |  TPID |  TCI  | EType |
+- * +-----------------------+-----------------------+-------+-------+-------+
+- * ^                                               |               |
+- * |<--VLAN_HLEN-->to                              <---VLAN_HLEN--->
+- * from            |
+- *       >>>>>>>   v
+- *       >>>>>>>   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
+- *       >>>>>>>   +-----------------------+-----------------------+-------+
+- *       >>>>>>>   |    Destination MAC    |      Source MAC       | EType |
+- *                 +-----------------------+-----------------------+-------+
+- *                 ^                                                       ^
+- * (now part of    |                                                       |
+- *  skb->head)     skb_mac_header                                  skb->data
+- */
+-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
+-{
+-      u8 *from = skb_mac_header(skb);
+-      u8 *dest = from + VLAN_HLEN;
+-
+-      memmove(dest, from, ETH_HLEN - VLAN_HLEN);
+-      skb_pull(skb, VLAN_HLEN);
+-      skb_push(skb, ETH_HLEN);
+-      skb_reset_mac_header(skb);
+-      skb_reset_mac_len(skb);
+-      skb_pull_rcsum(skb, ETH_HLEN);
+-
+-      return skb;
+-}
+-EXPORT_SYMBOL_GPL(dsa_8021q_remove_header);
+-
+ MODULE_LICENSE("GPL v2");
+--- a/net/dsa/tag_sja1105.c
++++ b/net/dsa/tag_sja1105.c
+@@ -238,14 +238,14 @@ static struct sk_buff *sja1105_rcv(struc
+ {
+       struct sja1105_meta meta = {0};
+       int source_port, switch_id;
+-      struct vlan_ethhdr *hdr;
++      struct ethhdr *hdr;
+       u16 tpid, vid, tci;
+       bool is_link_local;
+       bool is_tagged;
+       bool is_meta;
+-      hdr = vlan_eth_hdr(skb);
+-      tpid = ntohs(hdr->h_vlan_proto);
++      hdr = eth_hdr(skb);
++      tpid = ntohs(hdr->h_proto);
+       is_tagged = (tpid == ETH_P_SJA1105);
+       is_link_local = sja1105_is_link_local(skb);
+       is_meta = sja1105_is_meta_frame(skb);
+@@ -254,7 +254,12 @@ static struct sk_buff *sja1105_rcv(struc
+       if (is_tagged) {
+               /* Normal traffic path. */
+-              tci = ntohs(hdr->h_vlan_TCI);
++              skb_push_rcsum(skb, ETH_HLEN);
++              __skb_vlan_pop(skb, &tci);
++              skb_pull_rcsum(skb, ETH_HLEN);
++              skb_reset_network_header(skb);
++              skb_reset_transport_header(skb);
++
+               vid = tci & VLAN_VID_MASK;
+               source_port = dsa_8021q_rx_source_port(vid);
+               switch_id = dsa_8021q_rx_switch_id(vid);
+@@ -283,12 +288,6 @@ static struct sk_buff *sja1105_rcv(struc
+               return NULL;
+       }
+-      /* Delete/overwrite fake VLAN header, DSA expects to not find
+-       * it there, see dsa_switch_rcv: skb_push(skb, ETH_HLEN).
+-       */
+-      if (is_tagged)
+-              skb = dsa_8021q_remove_header(skb);
+-
+       return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
+                                             is_meta);
+ }
diff --git a/queue-5.5/net-ena-add-pci-shutdown-handler-to-allow-safe-kexec.patch b/queue-5.5/net-ena-add-pci-shutdown-handler-to-allow-safe-kexec.patch
new file mode 100644 (file)
index 0000000..36279a8
--- /dev/null
@@ -0,0 +1,128 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: "Guilherme G. Piccoli" <gpiccoli@canonical.com>
+Date: Fri, 20 Mar 2020 09:55:34 -0300
+Subject: net: ena: Add PCI shutdown handler to allow safe kexec
+
+From: "Guilherme G. Piccoli" <gpiccoli@canonical.com>
+
+[ Upstream commit 428c491332bca498c8eb2127669af51506c346c7 ]
+
+Currently ENA only provides the PCI remove() handler, used during rmmod
+for example. This is not called on shutdown/kexec path; we are potentially
+creating a failure scenario on kexec:
+
+(a) Kexec is triggered, no shutdown() / remove() handler is called for ENA;
+instead pci_device_shutdown() clears the master bit of the PCI device,
+stopping all DMA transactions;
+
+(b) Kexec reboot happens and the device gets enabled again, likely having
+its FW with that DMA transaction buffered; then it may trigger the (now
+invalid) memory operation in the new kernel, corrupting kernel memory area.
+
+This patch aims to prevent this, by implementing a shutdown() handler
+quite similar to the remove() one - the difference being the handling
+of the netdev, which is unregistered on remove(), but following the
+convention observed in other drivers, it's only detached on shutdown().
+
+This prevents an odd issue in AWS Nitro instances, in which after the 2nd
+kexec the next one will fail with an initrd corruption, caused by a wild
+DMA write to invalid kernel memory. The lspci output for the adapter
+present in my instance is:
+
+00:05.0 Ethernet controller [0200]: Amazon.com, Inc. Elastic Network
+Adapter (ENA) [1d0f:ec20]
+
+Suggested-by: Gavin Shan <gshan@redhat.com>
+Signed-off-by: Guilherme G. Piccoli <gpiccoli@canonical.com>
+Acked-by: Sameeh Jubran <sameehj@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_netdev.c |   51 +++++++++++++++++++++------
+ 1 file changed, 41 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -3662,13 +3662,15 @@ err_disable_device:
+ /*****************************************************************************/
+-/* ena_remove - Device Removal Routine
++/* __ena_shutoff - Helper used in both PCI remove/shutdown routines
+  * @pdev: PCI device information struct
++ * @shutdown: Is it a shutdown operation? If false, means it is a removal
+  *
+- * ena_remove is called by the PCI subsystem to alert the driver
+- * that it should release a PCI device.
++ * __ena_shutoff is a helper routine that does the real work on shutdown and
++ * removal paths; the difference between those paths is with regards to whether
++ * dettach or unregister the netdevice.
+  */
+-static void ena_remove(struct pci_dev *pdev)
++static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
+ {
+       struct ena_adapter *adapter = pci_get_drvdata(pdev);
+       struct ena_com_dev *ena_dev;
+@@ -3687,13 +3689,17 @@ static void ena_remove(struct pci_dev *p
+       cancel_work_sync(&adapter->reset_task);
+-      rtnl_lock();
++      rtnl_lock(); /* lock released inside the below if-else block */
+       ena_destroy_device(adapter, true);
+-      rtnl_unlock();
+-
+-      unregister_netdev(netdev);
+-
+-      free_netdev(netdev);
++      if (shutdown) {
++              netif_device_detach(netdev);
++              dev_close(netdev);
++              rtnl_unlock();
++      } else {
++              rtnl_unlock();
++              unregister_netdev(netdev);
++              free_netdev(netdev);
++      }
+       ena_com_rss_destroy(ena_dev);
+@@ -3708,6 +3714,30 @@ static void ena_remove(struct pci_dev *p
+       vfree(ena_dev);
+ }
++/* ena_remove - Device Removal Routine
++ * @pdev: PCI device information struct
++ *
++ * ena_remove is called by the PCI subsystem to alert the driver
++ * that it should release a PCI device.
++ */
++
++static void ena_remove(struct pci_dev *pdev)
++{
++      __ena_shutoff(pdev, false);
++}
++
++/* ena_shutdown - Device Shutdown Routine
++ * @pdev: PCI device information struct
++ *
++ * ena_shutdown is called by the PCI subsystem to alert the driver that
++ * a shutdown/reboot (or kexec) is happening and device must be disabled.
++ */
++
++static void ena_shutdown(struct pci_dev *pdev)
++{
++      __ena_shutoff(pdev, true);
++}
++
+ #ifdef CONFIG_PM
+ /* ena_suspend - PM suspend callback
+  * @pdev: PCI device information struct
+@@ -3757,6 +3787,7 @@ static struct pci_driver ena_pci_driver
+       .id_table       = ena_pci_tbl,
+       .probe          = ena_probe,
+       .remove         = ena_remove,
++      .shutdown       = ena_shutdown,
+ #ifdef CONFIG_PM
+       .suspend    = ena_suspend,
+       .resume     = ena_resume,
diff --git a/queue-5.5/net-ena-avoid-memory-access-violation-by-validating-req_id-properly.patch b/queue-5.5/net-ena-avoid-memory-access-violation-by-validating-req_id-properly.patch
new file mode 100644 (file)
index 0000000..07d4967
--- /dev/null
@@ -0,0 +1,69 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Arthur Kiyanovski <akiyano@amazon.com>
+Date: Tue, 17 Mar 2020 09:06:41 +0200
+Subject: net: ena: avoid memory access violation by validating req_id properly
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit 30623e1ed116bcd1785217d0a98eec643687e091 ]
+
+Rx req_id is an index in struct ena_eth_io_rx_cdesc_base.
+The driver should validate that the Rx req_id it received from
+the device is in range [0, ring_size -1].  Failure to do so could
+yield to potential memory access violoation.
+The validation was mistakenly done when refilling
+the Rx submission queue and not in Rx completion queue.
+
+Fixes: ad974baef2a1 ("net: ena: add support for out of order rx buffers refill")
+Signed-off-by: Noam Dagan <ndagan@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_netdev.c |   15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -532,13 +532,9 @@ static int ena_refill_rx_bufs(struct ena
+               struct ena_rx_buffer *rx_info;
+               req_id = rx_ring->free_ids[next_to_use];
+-              rc = validate_rx_req_id(rx_ring, req_id);
+-              if (unlikely(rc < 0))
+-                      break;
+               rx_info = &rx_ring->rx_buffer_info[req_id];
+-
+               rc = ena_alloc_rx_page(rx_ring, rx_info,
+                                      GFP_ATOMIC | __GFP_COMP);
+               if (unlikely(rc < 0)) {
+@@ -868,9 +864,15 @@ static struct sk_buff *ena_rx_skb(struct
+       struct ena_rx_buffer *rx_info;
+       u16 len, req_id, buf = 0;
+       void *va;
++      int rc;
+       len = ena_bufs[buf].len;
+       req_id = ena_bufs[buf].req_id;
++
++      rc = validate_rx_req_id(rx_ring, req_id);
++      if (unlikely(rc < 0))
++              return NULL;
++
+       rx_info = &rx_ring->rx_buffer_info[req_id];
+       if (unlikely(!rx_info->page)) {
+@@ -943,6 +945,11 @@ static struct sk_buff *ena_rx_skb(struct
+               buf++;
+               len = ena_bufs[buf].len;
+               req_id = ena_bufs[buf].req_id;
++
++              rc = validate_rx_req_id(rx_ring, req_id);
++              if (unlikely(rc < 0))
++                      return NULL;
++
+               rx_info = &rx_ring->rx_buffer_info[req_id];
+       } while (1);
diff --git a/queue-5.5/net-ena-fix-continuous-keep-alive-resets.patch b/queue-5.5/net-ena-fix-continuous-keep-alive-resets.patch
new file mode 100644 (file)
index 0000000..3a0b410
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Arthur Kiyanovski <akiyano@amazon.com>
+Date: Tue, 17 Mar 2020 09:06:42 +0200
+Subject: net: ena: fix continuous keep-alive resets
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit dfdde1345bc124816f0fd42fa91b8748051e758e ]
+
+last_keep_alive_jiffies is updated in probe and when a keep-alive
+event is received.  In case the driver times-out on a keep-alive event,
+it has high chances of continuously timing-out on keep-alive events.
+This is because when the driver recovers from the keep-alive-timeout reset
+the value of last_keep_alive_jiffies is very old, and if a keep-alive
+event is not received before the next timer expires, the value of
+last_keep_alive_jiffies will cause another keep-alive-timeout reset
+and so forth in a loop.
+
+Solution:
+Update last_keep_alive_jiffies whenever the device is restored after
+reset.
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Noam Dagan <ndagan@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_netdev.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -2832,6 +2832,7 @@ static int ena_restore_device(struct ena
+               netif_carrier_on(adapter->netdev);
+       mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
++      adapter->last_keep_alive_jiffies = jiffies;
+       dev_err(&pdev->dev,
+               "Device reset completed successfully, Driver info: %s\n",
+               version);
diff --git a/queue-5.5/net-ena-fix-incorrect-setting-of-the-number-of-msix-vectors.patch b/queue-5.5/net-ena-fix-incorrect-setting-of-the-number-of-msix-vectors.patch
new file mode 100644 (file)
index 0000000..ea00bc1
--- /dev/null
@@ -0,0 +1,54 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Arthur Kiyanovski <akiyano@amazon.com>
+Date: Tue, 17 Mar 2020 09:06:39 +0200
+Subject: net: ena: fix incorrect setting of the number of msix vectors
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit ce1f352162828ba07470328828a32f47aa759020 ]
+
+Overview:
+We don't frequently change the msix vectors throughout the life cycle of
+the driver. We do so in two functions: ena_probe() and ena_restore().
+ena_probe() is only called when the driver is loaded. ena_restore() on the
+other hand is called during device reset / resume operations.
+
+We use num_io_queues for calculating and allocating the number of msix
+vectors. At ena_probe() this value is equal to max_num_io_queues and thus
+this is not an issue, however ena_restore() might be called after the
+number of io queues has changed.
+
+A possible bug scenario is as follows:
+
+* Change number of queues from 8 to 4.
+  (num_io_queues = 4, max_num_io_queues = 8, msix_vecs = 9,)
+* Trigger reset occurs -> ena_restore is called.
+  (num_io_queues = 4, max_num_io_queues =8 , msix_vecs = 5)
+* Change number of queues from 4 to 6.
+  (num_io_queues = 6, max_num_io_queues = 8, msix_vecs = 5)
+* The driver will reset due to failure of check_for_rx_interrupt_queue()
+
+Fix:
+This can be easily fixed by always using max_num_io_queues to init the
+msix_vecs, since this number won't change as opposed to num_io_queues.
+
+Fixes: 4d19266022ec ("net: ena: multiple queue creation related cleanups")
+Signed-off-by: Sameeh Jubran <sameehj@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_netdev.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -1346,7 +1346,7 @@ static int ena_enable_msix(struct ena_ad
+       }
+       /* Reserved the max msix vectors we might need */
+-      msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_io_queues);
++      msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
+       netif_dbg(adapter, probe, adapter->netdev,
+                 "trying to enable MSI-X, vectors %d\n", msix_vecs);
diff --git a/queue-5.5/net-ena-fix-request-of-incorrect-number-of-irq-vectors.patch b/queue-5.5/net-ena-fix-request-of-incorrect-number-of-irq-vectors.patch
new file mode 100644 (file)
index 0000000..5744854
--- /dev/null
@@ -0,0 +1,109 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Arthur Kiyanovski <akiyano@amazon.com>
+Date: Tue, 17 Mar 2020 09:06:40 +0200
+Subject: net: ena: fix request of incorrect number of IRQ vectors
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit e02ae6ed51be3d28923bfd318ae57000f5643da5 ]
+
+Bug:
+In short the main issue is caused by the fact that the number of queues
+is changed using ethtool after ena_probe() has been called and before
+ena_up() was executed. Here is the full scenario in detail:
+
+* ena_probe() is called when the driver is loaded, the driver is not up
+  yet at the end of ena_probe().
+* The number of queues is changed -> io_queue_count is changed as well -
+  ena_up() is not called since the "dev_was_up" boolean in
+  ena_update_queue_count() is false.
+* ena_up() is called by the kernel (it's called asynchronously some
+  time after ena_probe()). ena_setup_io_intr() is called by ena_up() and
+  it uses io_queue_count to get the suitable irq lines for each msix
+  vector. The function ena_request_io_irq() is called right after that
+  and it uses msix_vecs - This value only changes during ena_probe() and
+  ena_restore() - to request the irq vectors. This results in "Failed to
+  request I/O IRQ" error for i > io_queue_count.
+
+Numeric example:
+* After ena_probe() io_queue_count = 8, msix_vecs = 9.
+* The number of queues changes to 4 -> io_queue_count = 4, msix_vecs = 9.
+* ena_up() is executed for the first time:
+  ** ena_setup_io_intr() inits the vectors only up to io_queue_count.
+  ** ena_request_io_irq() calls request_irq() and fails for i = 5.
+
+How to reproduce:
+simply run the following commands:
+    sudo rmmod ena && sudo insmod ena.ko;
+    sudo ethtool -L eth1 combined 3;
+
+Fix:
+Use ENA_MAX_MSIX_VEC(adapter->num_io_queues + adapter->xdp_num_queues)
+instead of adapter->msix_vecs. We need to take XDP queues into
+consideration as they need to have msix vectors assigned to them as well.
+Note that the XDP cannot be attached before the driver is up and running
+but in XDP mode the issue might occur when the number of queues changes
+right after a reset trigger.
+The ENA_MAX_MSIX_VEC simply adds one to the argument since the first msix
+vector is reserved for management queue.
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Sameeh Jubran <sameehj@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_netdev.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -1444,6 +1444,7 @@ static int ena_request_mgmnt_irq(struct
+ static int ena_request_io_irq(struct ena_adapter *adapter)
+ {
++      u32 io_queue_count = adapter->num_io_queues;
+       unsigned long flags = 0;
+       struct ena_irq *irq;
+       int rc = 0, i, k;
+@@ -1454,7 +1455,7 @@ static int ena_request_io_irq(struct ena
+               return -EINVAL;
+       }
+-      for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
++      for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
+               irq = &adapter->irq_tbl[i];
+               rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+                                irq->data);
+@@ -1495,6 +1496,7 @@ static void ena_free_mgmnt_irq(struct en
+ static void ena_free_io_irq(struct ena_adapter *adapter)
+ {
++      u32 io_queue_count = adapter->num_io_queues;
+       struct ena_irq *irq;
+       int i;
+@@ -1505,7 +1507,7 @@ static void ena_free_io_irq(struct ena_a
+       }
+ #endif /* CONFIG_RFS_ACCEL */
+-      for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
++      for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
+               irq = &adapter->irq_tbl[i];
+               irq_set_affinity_hint(irq->vector, NULL);
+               free_irq(irq->vector, irq->data);
+@@ -1520,12 +1522,13 @@ static void ena_disable_msix(struct ena_
+ static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
+ {
++      u32 io_queue_count = adapter->num_io_queues;
+       int i;
+       if (!netif_running(adapter->netdev))
+               return;
+-      for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
++      for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
+               synchronize_irq(adapter->irq_tbl[i].vector);
+ }
diff --git a/queue-5.5/net-ip_gre-accept-ifla_info_data-less-configuration.patch b/queue-5.5/net-ip_gre-accept-ifla_info_data-less-configuration.patch
new file mode 100644 (file)
index 0000000..09a45c9
--- /dev/null
@@ -0,0 +1,33 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Petr Machata <petrm@mellanox.com>
+Date: Mon, 16 Mar 2020 19:53:00 +0200
+Subject: net: ip_gre: Accept IFLA_INFO_DATA-less configuration
+
+From: Petr Machata <petrm@mellanox.com>
+
+[ Upstream commit 32ca98feab8c9076c89c0697c5a85e46fece809d ]
+
+The fix referenced below causes a crash when an ERSPAN tunnel is created
+without passing IFLA_INFO_DATA. Fix by validating passed-in data in the
+same way as ipgre does.
+
+Fixes: e1f8f78ffe98 ("net: ip_gre: Separate ERSPAN newlink / changelink callbacks")
+Reported-by: syzbot+1b4ebf4dae4e510dd219@syzkaller.appspotmail.com
+Signed-off-by: Petr Machata <petrm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_gre.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -1168,6 +1168,8 @@ static int erspan_netlink_parms(struct n
+       err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
+       if (err)
+               return err;
++      if (!data)
++              return 0;
+       if (data[IFLA_GRE_ERSPAN_VER]) {
+               t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
diff --git a/queue-5.5/net-ip_gre-separate-erspan-newlink-changelink-callbacks.patch b/queue-5.5/net-ip_gre-separate-erspan-newlink-changelink-callbacks.patch
new file mode 100644 (file)
index 0000000..5cefb58
--- /dev/null
@@ -0,0 +1,187 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Petr Machata <petrm@mellanox.com>
+Date: Fri, 13 Mar 2020 13:39:36 +0200
+Subject: net: ip_gre: Separate ERSPAN newlink / changelink callbacks
+
+From: Petr Machata <petrm@mellanox.com>
+
+[ Upstream commit e1f8f78ffe9854308b9e12a73ebe4e909074fc33 ]
+
+ERSPAN shares most of the code path with GRE and gretap code. While that
+helps keep the code compact, it is also error prone. Currently a broken
+userspace can turn a gretap tunnel into a de facto ERSPAN one by passing
+IFLA_GRE_ERSPAN_VER. There has been a similar issue in ip6gretap in the
+past.
+
+To prevent these problems in future, split the newlink and changelink code
+paths. Split the ERSPAN code out of ipgre_netlink_parms() into a new
+function erspan_netlink_parms(). Extract a piece of common logic from
+ipgre_newlink() and ipgre_changelink() into ipgre_newlink_encap_setup().
+Add erspan_newlink() and erspan_changelink().
+
+Fixes: 84e54fe0a5ea ("gre: introduce native tunnel support for ERSPAN")
+Signed-off-by: Petr Machata <petrm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_gre.c |  103 ++++++++++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 85 insertions(+), 18 deletions(-)
+
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -1153,6 +1153,22 @@ static int ipgre_netlink_parms(struct ne
+       if (data[IFLA_GRE_FWMARK])
+               *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
++      return 0;
++}
++
++static int erspan_netlink_parms(struct net_device *dev,
++                              struct nlattr *data[],
++                              struct nlattr *tb[],
++                              struct ip_tunnel_parm *parms,
++                              __u32 *fwmark)
++{
++      struct ip_tunnel *t = netdev_priv(dev);
++      int err;
++
++      err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
++      if (err)
++              return err;
++
+       if (data[IFLA_GRE_ERSPAN_VER]) {
+               t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+@@ -1276,45 +1292,70 @@ static void ipgre_tap_setup(struct net_d
+       ip_tunnel_setup(dev, gre_tap_net_id);
+ }
+-static int ipgre_newlink(struct net *src_net, struct net_device *dev,
+-                       struct nlattr *tb[], struct nlattr *data[],
+-                       struct netlink_ext_ack *extack)
++static int
++ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
+ {
+-      struct ip_tunnel_parm p;
+       struct ip_tunnel_encap ipencap;
+-      __u32 fwmark = 0;
+-      int err;
+       if (ipgre_netlink_encap_parms(data, &ipencap)) {
+               struct ip_tunnel *t = netdev_priv(dev);
+-              err = ip_tunnel_encap_setup(t, &ipencap);
++              int err = ip_tunnel_encap_setup(t, &ipencap);
+               if (err < 0)
+                       return err;
+       }
++      return 0;
++}
++
++static int ipgre_newlink(struct net *src_net, struct net_device *dev,
++                       struct nlattr *tb[], struct nlattr *data[],
++                       struct netlink_ext_ack *extack)
++{
++      struct ip_tunnel_parm p;
++      __u32 fwmark = 0;
++      int err;
++
++      err = ipgre_newlink_encap_setup(dev, data);
++      if (err)
++              return err;
++
+       err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
+       if (err < 0)
+               return err;
+       return ip_tunnel_newlink(dev, tb, &p, fwmark);
+ }
++static int erspan_newlink(struct net *src_net, struct net_device *dev,
++                        struct nlattr *tb[], struct nlattr *data[],
++                        struct netlink_ext_ack *extack)
++{
++      struct ip_tunnel_parm p;
++      __u32 fwmark = 0;
++      int err;
++
++      err = ipgre_newlink_encap_setup(dev, data);
++      if (err)
++              return err;
++
++      err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
++      if (err)
++              return err;
++      return ip_tunnel_newlink(dev, tb, &p, fwmark);
++}
++
+ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
+                           struct nlattr *data[],
+                           struct netlink_ext_ack *extack)
+ {
+       struct ip_tunnel *t = netdev_priv(dev);
+-      struct ip_tunnel_encap ipencap;
+       __u32 fwmark = t->fwmark;
+       struct ip_tunnel_parm p;
+       int err;
+-      if (ipgre_netlink_encap_parms(data, &ipencap)) {
+-              err = ip_tunnel_encap_setup(t, &ipencap);
+-
+-              if (err < 0)
+-                      return err;
+-      }
++      err = ipgre_newlink_encap_setup(dev, data);
++      if (err)
++              return err;
+       err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
+       if (err < 0)
+@@ -1327,8 +1368,34 @@ static int ipgre_changelink(struct net_d
+       t->parms.i_flags = p.i_flags;
+       t->parms.o_flags = p.o_flags;
+-      if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
+-              ipgre_link_update(dev, !tb[IFLA_MTU]);
++      ipgre_link_update(dev, !tb[IFLA_MTU]);
++
++      return 0;
++}
++
++static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
++                           struct nlattr *data[],
++                           struct netlink_ext_ack *extack)
++{
++      struct ip_tunnel *t = netdev_priv(dev);
++      __u32 fwmark = t->fwmark;
++      struct ip_tunnel_parm p;
++      int err;
++
++      err = ipgre_newlink_encap_setup(dev, data);
++      if (err)
++              return err;
++
++      err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
++      if (err < 0)
++              return err;
++
++      err = ip_tunnel_changelink(dev, tb, &p, fwmark);
++      if (err < 0)
++              return err;
++
++      t->parms.i_flags = p.i_flags;
++      t->parms.o_flags = p.o_flags;
+       return 0;
+ }
+@@ -1519,8 +1586,8 @@ static struct rtnl_link_ops erspan_link_
+       .priv_size      = sizeof(struct ip_tunnel),
+       .setup          = erspan_setup,
+       .validate       = erspan_validate,
+-      .newlink        = ipgre_newlink,
+-      .changelink     = ipgre_changelink,
++      .newlink        = erspan_newlink,
++      .changelink     = erspan_changelink,
+       .dellink        = ip_tunnel_dellink,
+       .get_size       = ipgre_get_size,
+       .fill_info      = ipgre_fill_info,
diff --git a/queue-5.5/net-mlx5-dr-fix-postsend-actions-write-length.patch b/queue-5.5/net-mlx5-dr-fix-postsend-actions-write-length.patch
new file mode 100644 (file)
index 0000000..49107f8
--- /dev/null
@@ -0,0 +1,44 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Hamdan Igbaria <hamdani@mellanox.com>
+Date: Mon, 24 Feb 2020 14:41:29 +0200
+Subject: net/mlx5: DR, Fix postsend actions write length
+
+From: Hamdan Igbaria <hamdani@mellanox.com>
+
+[ Upstream commit 692b0399a22530b2de8490bea75a7d20d59391d0 ]
+
+Fix the send info write length to be (actions x action) size in bytes.
+
+Fixes: 297cccebdc5a ("net/mlx5: DR, Expose an internal API to issue RDMA operations")
+Signed-off-by: Hamdan Igbaria <hamdani@mellanox.com>
+Reviewed-by: Alex Vesker <valex@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c |    1 -
+ drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c   |    3 ++-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+@@ -930,7 +930,6 @@ static int dr_actions_l2_rewrite(struct
+       action->rewrite.data = (void *)ops;
+       action->rewrite.num_of_actions = i;
+-      action->rewrite.chunk->byte_size = i * sizeof(*ops);
+       ret = mlx5dr_send_postsend_action(dmn, action);
+       if (ret) {
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+@@ -558,7 +558,8 @@ int mlx5dr_send_postsend_action(struct m
+       int ret;
+       send_info.write.addr = (uintptr_t)action->rewrite.data;
+-      send_info.write.length = action->rewrite.chunk->byte_size;
++      send_info.write.length = action->rewrite.num_of_actions *
++                               DR_MODIFY_ACTION_SIZE;
+       send_info.write.lkey = 0;
+       send_info.remote_addr = action->rewrite.chunk->mr_addr;
+       send_info.rkey = action->rewrite.chunk->rkey;
diff --git a/queue-5.5/net-mlx5_core-set-ib-capability-mask1-to-fix-ib_srpt-connection-failure.patch b/queue-5.5/net-mlx5_core-set-ib-capability-mask1-to-fix-ib_srpt-connection-failure.patch
new file mode 100644 (file)
index 0000000..4db4999
--- /dev/null
@@ -0,0 +1,35 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Leon Romanovsky <leonro@mellanox.com>
+Date: Mon, 16 Mar 2020 09:31:03 +0200
+Subject: net/mlx5_core: Set IB capability mask1 to fix ib_srpt connection failure
+
+From: Leon Romanovsky <leonro@mellanox.com>
+
+[ Upstream commit 306f354c67397b3138300cde875c5cab45b857f7 ]
+
+The cap_mask1 isn't protected by field_select and not listed among RW
+fields, but it is required to be written to properly initialize ports
+in IB virtualization mode.
+
+Link: https://lore.kernel.org/linux-rdma/88bab94d2fd72f3145835b4518bc63dda587add6.camel@redhat.com
+Fixes: ab118da4c10a ("net/mlx5: Don't write read-only fields in MODIFY_HCA_VPORT_CONTEXT command")
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/vport.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+@@ -1071,6 +1071,9 @@ int mlx5_core_modify_hca_vport_context(s
+               MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
+       if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
+               MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
++      MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
++      MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
++               req->cap_mask1_perm);
+       err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+ ex:
+       kfree(in);
diff --git a/queue-5.5/net-mlx5e-do-not-recover-from-a-non-fatal-syndrome.patch b/queue-5.5/net-mlx5e-do-not-recover-from-a-non-fatal-syndrome.patch
new file mode 100644 (file)
index 0000000..4ec82c4
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Aya Levin <ayal@mellanox.com>
+Date: Thu, 19 Mar 2020 13:25:17 +0200
+Subject: net/mlx5e: Do not recover from a non-fatal syndrome
+
+From: Aya Levin <ayal@mellanox.com>
+
+[ Upstream commit 187a9830c921d92c4a9a8e2921ecc4b35a97532c ]
+
+For non-fatal syndromes like LOCAL_LENGTH_ERR, recovery shouldn't be
+triggered. In these scenarios, the RQ is not actually in ERR state.
+This misleads the recovery flow which assumes that the RQ is really in
+error state and no more completions arrive, causing crashes on bad page
+state.
+
+Fixes: 8276ea1353a4 ("net/mlx5e: Report and recover from CQE with error on RQ")
+Signed-off-by: Aya Levin <ayal@mellanox.com>
+Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/health.h |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+@@ -10,8 +10,7 @@
+ static inline bool cqe_syndrome_needs_recover(u8 syndrome)
+ {
+-      return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR ||
+-             syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
++      return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
+              syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR ||
+              syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ }
diff --git a/queue-5.5/net-mlx5e-enhance-icosq-wqe-info-fields.patch b/queue-5.5/net-mlx5e-enhance-icosq-wqe-info-fields.patch
new file mode 100644 (file)
index 0000000..f8e44dd
--- /dev/null
@@ -0,0 +1,95 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Aya Levin <ayal@mellanox.com>
+Date: Mon, 9 Mar 2020 09:44:18 +0200
+Subject: net/mlx5e: Enhance ICOSQ WQE info fields
+
+From: Aya Levin <ayal@mellanox.com>
+
+[ Upstream commit 1de0306c3a05d305e45b1f1fabe2f4e94222eb6b ]
+
+Add number of WQEBBs (WQE's Basic Block) to WQE info struct. Set the
+number of WQEBBs on WQE post, and increment the consumer counter (cc)
+on completion.
+
+In case of error completions, the cc was mistakenly not incremented,
+keeping a gap between cc and pc (producer counter). This failed the
+recovery flow on the ICOSQ from a CQE error which timed-out waiting for
+the cc and pc to meet.
+
+Fixes: be5323c8379f ("net/mlx5e: Report and recover from CQE error on ICOSQ")
+Signed-off-by: Aya Levin <ayal@mellanox.com>
+Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en.h      |    1 +
+ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c   |   11 +++++------
+ drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c |    1 +
+ 3 files changed, 7 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -371,6 +371,7 @@ enum {
+ struct mlx5e_sq_wqe_info {
+       u8  opcode;
++      u8 num_wqebbs;
+       /* Auxiliary data for different opcodes. */
+       union {
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -477,6 +477,7 @@ static inline void mlx5e_fill_icosq_frag
+       /* fill sq frag edge with nops to avoid wqe wrapping two pages */
+       for (; wi < edge_wi; wi++) {
+               wi->opcode = MLX5_OPCODE_NOP;
++              wi->num_wqebbs = 1;
+               mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+       }
+ }
+@@ -525,6 +526,7 @@ static int mlx5e_alloc_rx_mpwqe(struct m
+       umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
+       sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
++      sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
+       sq->db.ico_wqe[pi].umr.rq = rq;
+       sq->pc += MLX5E_UMR_WQEBBS;
+@@ -621,6 +623,7 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *
+                       ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+                       wi = &sq->db.ico_wqe[ci];
++                      sqcc += wi->num_wqebbs;
+                       if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+                               netdev_WARN_ONCE(cq->channel->netdev,
+@@ -631,16 +634,12 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *
+                               break;
+                       }
+-                      if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
+-                              sqcc += MLX5E_UMR_WQEBBS;
++                      if (likely(wi->opcode == MLX5_OPCODE_UMR))
+                               wi->umr.rq->mpwqe.umr_completed++;
+-                      } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
+-                              sqcc++;
+-                      } else {
++                      else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
+                               netdev_WARN_ONCE(cq->channel->netdev,
+                                                "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
+                                                wi->opcode);
+-                      }
+               } while (!last_wqe);
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+@@ -78,6 +78,7 @@ void mlx5e_trigger_irq(struct mlx5e_icos
+       u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+       sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
++      sq->db.ico_wqe[pi].num_wqebbs = 1;
+       nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+       mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
+ }
diff --git a/queue-5.5/net-mlx5e-fix-endianness-handling-in-pedit-mask.patch b/queue-5.5/net-mlx5e-fix-endianness-handling-in-pedit-mask.patch
new file mode 100644 (file)
index 0000000..97c9968
--- /dev/null
@@ -0,0 +1,39 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Sebastian Hense <sebastian.hense1@ibm.com>
+Date: Thu, 20 Feb 2020 08:11:36 +0100
+Subject: net/mlx5e: Fix endianness handling in pedit mask
+
+From: Sebastian Hense <sebastian.hense1@ibm.com>
+
+[ Upstream commit 404402abd5f90aa90a134eb9604b1750c1941529 ]
+
+The mask value is provided as 64 bit and has to be casted in
+either 32 or 16 bit. On big endian systems the wrong half was
+casted which resulted in an all zero mask.
+
+Fixes: 2b64beba0251 ("net/mlx5e: Support header re-write of partial fields in TC pedit offload")
+Signed-off-by: Sebastian Hense <sebastian.hense1@ibm.com>
+Reviewed-by: Roi Dayan <roid@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -2432,10 +2432,11 @@ static int offload_pedit_fields(struct p
+                       continue;
+               if (f->field_bsize == 32) {
+-                      mask_be32 = *(__be32 *)&mask;
++                      mask_be32 = (__be32)mask;
+                       mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
+               } else if (f->field_bsize == 16) {
+-                      mask_be16 = *(__be16 *)&mask;
++                      mask_be32 = (__be32)mask;
++                      mask_be16 = *(__be16 *)&mask_be32;
+                       mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
+               }
diff --git a/queue-5.5/net-mlx5e-fix-icosq-recovery-flow-with-striding-rq.patch b/queue-5.5/net-mlx5e-fix-icosq-recovery-flow-with-striding-rq.patch
new file mode 100644 (file)
index 0000000..b6c2ae4
--- /dev/null
@@ -0,0 +1,106 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Aya Levin <ayal@mellanox.com>
+Date: Mon, 16 Mar 2020 16:53:10 +0200
+Subject: net/mlx5e: Fix ICOSQ recovery flow with Striding RQ
+
+From: Aya Levin <ayal@mellanox.com>
+
+[ Upstream commit e239c6d686e1c37fb2ab143162dfb57471a8643f ]
+
+In striding RQ mode, the buffers of an RX WQE are first
+prepared and posted to the HW using a UMR WQEs via the ICOSQ.
+We maintain the state of these in-progress WQEs in the RQ
+SW struct.
+
+In the flow of ICOSQ recovery, the corresponding RQ is not
+in error state, hence:
+
+- The buffers of the in-progress WQEs must be released
+  and the RQ metadata should reflect it.
+- Existing RX WQEs in the RQ should not be affected.
+
+For this, wrap the dealloc of the in-progress WQEs in
+a function, and use it in the ICOSQ recovery flow
+instead of mlx5e_free_rx_descs().
+
+Fixes: be5323c8379f ("net/mlx5e: Report and recover from CQE error on ICOSQ")
+Signed-off-by: Aya Levin <ayal@mellanox.com>
+Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en.h             |    1 
+ drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c |    2 
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c        |   31 +++++++++++----
+ 3 files changed, 26 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -1059,6 +1059,7 @@ int mlx5e_modify_rq_state(struct mlx5e_r
+ void mlx5e_activate_rq(struct mlx5e_rq *rq);
+ void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
+ void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
++void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
+ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
+ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+@@ -90,7 +90,7 @@ static int mlx5e_rx_reporter_err_icosq_c
+               goto out;
+       mlx5e_reset_icosq_cc_pc(icosq);
+-      mlx5e_free_rx_descs(rq);
++      mlx5e_free_rx_in_progress_descs(rq);
+       clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
+       mlx5e_activate_icosq(icosq);
+       mlx5e_activate_rq(rq);
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -822,6 +822,29 @@ int mlx5e_wait_for_min_rx_wqes(struct ml
+       return -ETIMEDOUT;
+ }
++void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
++{
++      struct mlx5_wq_ll *wq;
++      u16 head;
++      int i;
++
++      if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
++              return;
++
++      wq = &rq->mpwqe.wq;
++      head = wq->head;
++
++      /* Outstanding UMR WQEs (in progress) start at wq->head */
++      for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
++              rq->dealloc_wqe(rq, head);
++              head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
++      }
++
++      rq->mpwqe.actual_wq_head = wq->head;
++      rq->mpwqe.umr_in_progress = 0;
++      rq->mpwqe.umr_completed = 0;
++}
++
+ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+ {
+       __be16 wqe_ix_be;
+@@ -829,14 +852,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq
+       if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+               struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
+-              u16 head = wq->head;
+-              int i;
+-              /* Outstanding UMR WQEs (in progress) start at wq->head */
+-              for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
+-                      rq->dealloc_wqe(rq, head);
+-                      head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
+-              }
++              mlx5e_free_rx_in_progress_descs(rq);
+               while (!mlx5_wq_ll_is_empty(wq)) {
+                       struct mlx5e_rx_wqe_ll *wqe;
diff --git a/queue-5.5/net-mlx5e-fix-missing-reset-of-sw-metadata-in-striding-rq-reset.patch b/queue-5.5/net-mlx5e-fix-missing-reset-of-sw-metadata-in-striding-rq-reset.patch
new file mode 100644 (file)
index 0000000..1a65699
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Aya Levin <ayal@mellanox.com>
+Date: Thu, 12 Mar 2020 12:35:32 +0200
+Subject: net/mlx5e: Fix missing reset of SW metadata in Striding RQ reset
+
+From: Aya Levin <ayal@mellanox.com>
+
+[ Upstream commit 39369fd536d485a99a59d8e357c0d4d3ce19a3b8 ]
+
+When resetting the RQ (moving RQ state from RST to RDY), the driver
+resets the WQ's SW metadata.
+In striding RQ mode, we maintain a field that reflects the actual
+expected WQ head (including in progress WQEs posted to the ICOSQ).
+It was mistakenly not reset together with the WQ. Fix this here.
+
+Fixes: 8276ea1353a4 ("net/mlx5e: Report and recover from CQE with error on RQ")
+Signed-off-by: Aya Levin <ayal@mellanox.com>
+Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+@@ -181,10 +181,12 @@ mlx5e_tx_dma_unmap(struct device *pdev,
+ static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
+ {
+-      if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
++      if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+               mlx5_wq_ll_reset(&rq->mpwqe.wq);
+-      else
++              rq->mpwqe.actual_wq_head = 0;
++      } else {
+               mlx5_wq_cyc_reset(&rq->wqe.wq);
++      }
+ }
+ /* SW parser related functions */
diff --git a/queue-5.5/net-mlx5e-ktls-fix-tcp-seq-off-by-1-issue-in-tx-resync-flow.patch b/queue-5.5/net-mlx5e-ktls-fix-tcp-seq-off-by-1-issue-in-tx-resync-flow.patch
new file mode 100644 (file)
index 0000000..58860e0
--- /dev/null
@@ -0,0 +1,38 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Thu, 20 Feb 2020 13:40:24 +0200
+Subject: net/mlx5e: kTLS, Fix TCP seq off-by-1 issue in TX resync flow
+
+From: Tariq Toukan <tariqt@mellanox.com>
+
+[ Upstream commit 56917766def72f5afdf4235adb91b6897ff26d9d ]
+
+We have an off-by-1 issue in the TCP seq comparison.
+The last sequence number that belongs to the TCP packet's payload
+is not "start_seq + len", but one byte before it.
+Fix it so the 'ends_before' is evaluated properly.
+
+This fixes a bug that results in error completions in the
+kTLS HW offload flows.
+
+Fixes: ffbd9ca94e2e ("net/mlx5e: kTLS, Fix corner-case checks in TX resync flow")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Boris Pismenny <borisp@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+@@ -218,7 +218,7 @@ tx_sync_info_get(struct mlx5e_ktls_offlo
+        *    this packet was already acknowledged and its record info
+        *    was released.
+        */
+-      ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
++      ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
+       if (unlikely(tls_record_is_start_marker(record))) {
+               ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
diff --git a/queue-5.5/net-mvneta-fix-the-case-where-the-last-poll-did-not-process-all-rx.patch b/queue-5.5/net-mvneta-fix-the-case-where-the-last-poll-did-not-process-all-rx.patch
new file mode 100644 (file)
index 0000000..da60297
--- /dev/null
@@ -0,0 +1,36 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Date: Mon, 16 Mar 2020 22:56:36 +0800
+Subject: net: mvneta: Fix the case where the last poll did not process all rx
+
+From: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+
+[ Upstream commit 065fd83e1be2e1ba0d446a257fd86a3cc7bddb51 ]
+
+For the case where the last mvneta_poll did not process all
+RX packets, we need to xor the pp->cause_rx_tx or port->cause_rx_tx
+before claculating the rx_queue.
+
+Fixes: 2dcf75e2793c ("net: mvneta: Associate RX queues with each CPU")
+Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/mvneta.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -3036,11 +3036,10 @@ static int mvneta_poll(struct napi_struc
+       /* For the case where the last mvneta_poll did not process all
+        * RX packets
+        */
+-      rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
+-
+       cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
+               port->cause_rx_tx;
++      rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
+       if (rx_queue) {
+               rx_queue = rx_queue - 1;
+               if (pp->bm_priv)
diff --git a/queue-5.5/net-packet-tpacket_rcv-avoid-a-producer-race-condition.patch b/queue-5.5/net-packet-tpacket_rcv-avoid-a-producer-race-condition.patch
new file mode 100644 (file)
index 0000000..97f7d45
--- /dev/null
@@ -0,0 +1,157 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Willem de Bruijn <willemb@google.com>
+Date: Fri, 13 Mar 2020 12:18:09 -0400
+Subject: net/packet: tpacket_rcv: avoid a producer race condition
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit 61fad6816fc10fb8793a925d5c1256d1c3db0cd2 ]
+
+PACKET_RX_RING can cause multiple writers to access the same slot if a
+fast writer wraps the ring while a slow writer is still copying. This
+is particularly likely with few, large, slots (e.g., GSO packets).
+
+Synchronize kernel thread ownership of rx ring slots with a bitmap.
+
+Writers acquire a slot race-free by testing tp_status TP_STATUS_KERNEL
+while holding the sk receive queue lock. They release this lock before
+copying and set tp_status to TP_STATUS_USER to release to userspace
+when done. During copying, another writer may take the lock, also see
+TP_STATUS_KERNEL, and start writing to the same slot.
+
+Introduce a new rx_owner_map bitmap with a bit per slot. To acquire a
+slot, test and set with the lock held. To release race-free, update
+tp_status and owner bit as a transaction, so take the lock again.
+
+This is the one of a variety of discussed options (see Link below):
+
+* instead of a shadow ring, embed the data in the slot itself, such as
+in tp_padding. But any test for this field may match a value left by
+userspace, causing deadlock.
+
+* avoid the lock on release. This leaves a small race if releasing the
+shadow slot before setting TP_STATUS_USER. The below reproducer showed
+that this race is not academic. If releasing the slot after tp_status,
+the race is more subtle. See the first link for details.
+
+* add a new tp_status TP_KERNEL_OWNED to avoid the transactional store
+of two fields. But, legacy applications may interpret all non-zero
+tp_status as owned by the user. As libpcap does. So this is possible
+only opt-in by newer processes. It can be added as an optional mode.
+
+* embed the struct at the tail of pg_vec to avoid extra allocation.
+The implementation proved no less complex than a separate field.
+
+The additional locking cost on release adds contention, no different
+than scaling on multicore or multiqueue h/w. In practice, below
+reproducer nor small packet tcpdump showed a noticeable change in
+perf report in cycles spent in spinlock. Where contention is
+problematic, packet sockets support mitigation through PACKET_FANOUT.
+And we can consider adding opt-in state TP_KERNEL_OWNED.
+
+Easy to reproduce by running multiple netperf or similar TCP_STREAM
+flows concurrently with `tcpdump -B 129 -n greater 60000`.
+
+Based on an earlier patchset by Jon Rosen. See links below.
+
+I believe this issue goes back to the introduction of tpacket_rcv,
+which predates git history.
+
+Link: https://www.mail-archive.com/netdev@vger.kernel.org/msg237222.html
+Suggested-by: Jon Rosen <jrosen@cisco.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Jon Rosen <jrosen@cisco.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c |   21 +++++++++++++++++++++
+ net/packet/internal.h  |    5 ++++-
+ 2 files changed, 25 insertions(+), 1 deletion(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2172,6 +2172,7 @@ static int tpacket_rcv(struct sk_buff *s
+       struct timespec ts;
+       __u32 ts_status;
+       bool is_drop_n_account = false;
++      unsigned int slot_id = 0;
+       bool do_vnet = false;
+       /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
+@@ -2274,6 +2275,13 @@ static int tpacket_rcv(struct sk_buff *s
+       if (!h.raw)
+               goto drop_n_account;
++      if (po->tp_version <= TPACKET_V2) {
++              slot_id = po->rx_ring.head;
++              if (test_bit(slot_id, po->rx_ring.rx_owner_map))
++                      goto drop_n_account;
++              __set_bit(slot_id, po->rx_ring.rx_owner_map);
++      }
++
+       if (do_vnet &&
+           virtio_net_hdr_from_skb(skb, h.raw + macoff -
+                                   sizeof(struct virtio_net_hdr),
+@@ -2379,7 +2387,10 @@ static int tpacket_rcv(struct sk_buff *s
+ #endif
+       if (po->tp_version <= TPACKET_V2) {
++              spin_lock(&sk->sk_receive_queue.lock);
+               __packet_set_status(po, h.raw, status);
++              __clear_bit(slot_id, po->rx_ring.rx_owner_map);
++              spin_unlock(&sk->sk_receive_queue.lock);
+               sk->sk_data_ready(sk);
+       } else {
+               prb_clear_blk_fill_status(&po->rx_ring);
+@@ -4276,6 +4287,7 @@ static int packet_set_ring(struct sock *
+ {
+       struct pgv *pg_vec = NULL;
+       struct packet_sock *po = pkt_sk(sk);
++      unsigned long *rx_owner_map = NULL;
+       int was_running, order = 0;
+       struct packet_ring_buffer *rb;
+       struct sk_buff_head *rb_queue;
+@@ -4361,6 +4373,12 @@ static int packet_set_ring(struct sock *
+                       }
+                       break;
+               default:
++                      if (!tx_ring) {
++                              rx_owner_map = bitmap_alloc(req->tp_frame_nr,
++                                      GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
++                              if (!rx_owner_map)
++                                      goto out_free_pg_vec;
++                      }
+                       break;
+               }
+       }
+@@ -4390,6 +4408,8 @@ static int packet_set_ring(struct sock *
+               err = 0;
+               spin_lock_bh(&rb_queue->lock);
+               swap(rb->pg_vec, pg_vec);
++              if (po->tp_version <= TPACKET_V2)
++                      swap(rb->rx_owner_map, rx_owner_map);
+               rb->frame_max = (req->tp_frame_nr - 1);
+               rb->head = 0;
+               rb->frame_size = req->tp_frame_size;
+@@ -4421,6 +4441,7 @@ static int packet_set_ring(struct sock *
+       }
+ out_free_pg_vec:
++      bitmap_free(rx_owner_map);
+       if (pg_vec)
+               free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -70,7 +70,10 @@ struct packet_ring_buffer {
+       unsigned int __percpu   *pending_refcnt;
+-      struct tpacket_kbdq_core        prb_bdqc;
++      union {
++              unsigned long                   *rx_owner_map;
++              struct tpacket_kbdq_core        prb_bdqc;
++      };
+ };
+ extern struct mutex fanout_mutex;
diff --git a/queue-5.5/net-phy-dp83867-w-a-for-fld-detect-threshold-bootstrapping-issue.patch b/queue-5.5/net-phy-dp83867-w-a-for-fld-detect-threshold-bootstrapping-issue.patch
new file mode 100644 (file)
index 0000000..ba0c37f
--- /dev/null
@@ -0,0 +1,76 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Grygorii Strashko <grygorii.strashko@ti.com>
+Date: Tue, 17 Mar 2020 20:04:54 +0200
+Subject: net: phy: dp83867: w/a for fld detect threshold bootstrapping issue
+
+From: Grygorii Strashko <grygorii.strashko@ti.com>
+
+[ Upstream commit 749f6f6843115b424680f1aada3c0dd613ad807c ]
+
+When the DP83867 PHY is strapped to enable Fast Link Drop (FLD) feature
+STRAP_STS2.STRAP_ FLD (reg 0x006F bit 10), the Energy Lost Threshold for
+FLD Energy Lost Mode FLD_THR_CFG.ENERGY_LOST_FLD_THR (reg 0x002e bits 2:0)
+will be defaulted to 0x2. This may cause the phy link to be unstable. The
+new DP83867 DM recommends to always restore ENERGY_LOST_FLD_THR to 0x1.
+
+Hence, restore default value of FLD_THR_CFG.ENERGY_LOST_FLD_THR to 0x1 when
+FLD is enabled by bootstrapping as recommended by DM.
+
+Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/dp83867.c |   21 ++++++++++++++++++++-
+ 1 file changed, 20 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -28,7 +28,8 @@
+ #define DP83867_CTRL          0x1f
+ /* Extended Registers */
+-#define DP83867_CFG4            0x0031
++#define DP83867_FLD_THR_CFG   0x002e
++#define DP83867_CFG4          0x0031
+ #define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6))
+ #define DP83867_CFG4_SGMII_ANEG_TIMER_11MS   (3 << 5)
+ #define DP83867_CFG4_SGMII_ANEG_TIMER_800US  (2 << 5)
+@@ -91,6 +92,7 @@
+ #define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK   GENMASK(2, 0)
+ #define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT  0
+ #define DP83867_STRAP_STS2_CLK_SKEW_NONE      BIT(2)
++#define DP83867_STRAP_STS2_STRAP_FLD          BIT(10)
+ /* PHY CTRL bits */
+ #define DP83867_PHYCR_FIFO_DEPTH_SHIFT                14
+@@ -123,6 +125,9 @@
+ /* CFG4 bits */
+ #define DP83867_CFG4_PORT_MIRROR_EN              BIT(0)
++/* FLD_THR_CFG */
++#define DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK      0x7
++
+ enum {
+       DP83867_PORT_MIRROING_KEEP,
+       DP83867_PORT_MIRROING_EN,
+@@ -459,6 +464,20 @@ static int dp83867_config_init(struct ph
+               phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+                                  BIT(7));
++      bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2);
++      if (bs & DP83867_STRAP_STS2_STRAP_FLD) {
++              /* When using strap to enable FLD, the ENERGY_LOST_FLD_THR will
++               * be set to 0x2. This may causes the PHY link to be unstable -
++               * the default value 0x1 need to be restored.
++               */
++              ret = phy_modify_mmd(phydev, DP83867_DEVADDR,
++                                   DP83867_FLD_THR_CFG,
++                                   DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK,
++                                   0x1);
++              if (ret)
++                      return ret;
++      }
++
+       if (phy_interface_is_rgmii(phydev)) {
+               val = phy_read(phydev, MII_DP83867_PHYCTRL);
+               if (val < 0)
diff --git a/queue-5.5/net-phy-mdio-bcm-unimac-fix-clock-handling.patch b/queue-5.5/net-phy-mdio-bcm-unimac-fix-clock-handling.patch
new file mode 100644 (file)
index 0000000..a5deb66
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Andre Przywara <andre.przywara@arm.com>
+Date: Tue, 24 Mar 2020 16:10:10 +0000
+Subject: net: phy: mdio-bcm-unimac: Fix clock handling
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+[ Upstream commit c312c7818b86b663d32ec5d4b512abf06b23899a ]
+
+The DT binding for this PHY describes an *optional* clock property.
+Due to a bug in the error handling logic, we are actually ignoring this
+clock *all* of the time so far.
+
+Fix this by using devm_clk_get_optional() to handle this clock properly.
+
+Fixes: b78ac6ecd1b6b ("net: phy: mdio-bcm-unimac: Allow configuring MDIO clock divider")
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/mdio-bcm-unimac.c |    6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/phy/mdio-bcm-unimac.c
++++ b/drivers/net/phy/mdio-bcm-unimac.c
+@@ -242,11 +242,9 @@ static int unimac_mdio_probe(struct plat
+               return -ENOMEM;
+       }
+-      priv->clk = devm_clk_get(&pdev->dev, NULL);
+-      if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
++      priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
++      if (IS_ERR(priv->clk))
+               return PTR_ERR(priv->clk);
+-      else
+-              priv->clk = NULL;
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
diff --git a/queue-5.5/net-phy-mdio-mux-bcm-iproc-check-clk_prepare_enable-return-value.patch b/queue-5.5/net-phy-mdio-mux-bcm-iproc-check-clk_prepare_enable-return-value.patch
new file mode 100644 (file)
index 0000000..f1cfada
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Rayagonda Kokatanur <rayagonda.kokatanur@broadcom.com>
+Date: Tue, 17 Mar 2020 10:24:35 +0530
+Subject: net: phy: mdio-mux-bcm-iproc: check clk_prepare_enable() return value
+
+From: Rayagonda Kokatanur <rayagonda.kokatanur@broadcom.com>
+
+[ Upstream commit 872307abbd0d9afd72171929806c2fa33dc34179 ]
+
+Check clk_prepare_enable() return value.
+
+Fixes: 2c7230446bc9 ("net: phy: Add pm support to Broadcom iProc mdio mux driver")
+Signed-off-by: Rayagonda Kokatanur <rayagonda.kokatanur@broadcom.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/mdio-mux-bcm-iproc.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
++++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
+@@ -288,8 +288,13 @@ static int mdio_mux_iproc_suspend(struct
+ static int mdio_mux_iproc_resume(struct device *dev)
+ {
+       struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
++      int rc;
+-      clk_prepare_enable(md->core_clk);
++      rc = clk_prepare_enable(md->core_clk);
++      if (rc) {
++              dev_err(md->dev, "failed to enable core clk\n");
++              return rc;
++      }
+       mdio_mux_iproc_config(md);
+       return 0;
diff --git a/queue-5.5/net-qmi_wwan-add-support-for-askey-wwhc050.patch b/queue-5.5/net-qmi_wwan-add-support-for-askey-wwhc050.patch
new file mode 100644 (file)
index 0000000..9702c99
--- /dev/null
@@ -0,0 +1,62 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Pawel Dembicki <paweldembicki@gmail.com>
+Date: Fri, 20 Mar 2020 21:46:14 +0100
+Subject: net: qmi_wwan: add support for ASKEY WWHC050
+
+From: Pawel Dembicki <paweldembicki@gmail.com>
+
+[ Upstream commit 12a5ba5a1994568d4ceaff9e78c6b0329d953386 ]
+
+ASKEY WWHC050 is a mcie LTE modem.
+The oem configuration states:
+
+T:  Bus=01 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#=  2 Spd=480  MxCh= 0
+D:  Ver= 2.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=1690 ProdID=7588 Rev=ff.ff
+S:  Manufacturer=Android
+S:  Product=Android
+S:  SerialNumber=813f0eef6e6e
+C:* #Ifs= 6 Cfg#= 1 Atr=80 MxPwr=500mA
+I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option
+E:  Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=(none)
+E:  Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=84(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+E:  Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E:  Ad=86(I) Atr=03(Int.) MxPS=  10 Ivl=32ms
+E:  Ad=85(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+E:  Ad=88(I) Atr=03(Int.) MxPS=   8 Ivl=32ms
+E:  Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 5 Alt= 0 #EPs= 2 Cls=08(stor.) Sub=06 Prot=50 Driver=(none)
+E:  Ad=89(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E:  Ad=06(O) Atr=02(Bulk) MxPS= 512 Ivl=125us
+
+Tested on openwrt distribution.
+
+Signed-off-by: Cezary Jackiewicz <cezary@eko.one.pl>
+Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1210,6 +1210,7 @@ static const struct usb_device_id produc
+       {QMI_FIXED_INTF(0x1435, 0xd182, 5)},    /* Wistron NeWeb D18 */
+       {QMI_FIXED_INTF(0x1435, 0xd191, 4)},    /* Wistron NeWeb D19Q1 */
+       {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
++      {QMI_FIXED_INTF(0x1690, 0x7588, 4)},    /* ASKEY WWHC050 */
+       {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
+       {QMI_FIXED_INTF(0x16d8, 0x6007, 0)},    /* CMOTech CHE-628S */
+       {QMI_FIXED_INTF(0x16d8, 0x6008, 0)},    /* CMOTech CMU-301 */
diff --git a/queue-5.5/net-sched-act_ct-fix-leak-of-ct-zone-template-on-replace.patch b/queue-5.5/net-sched-act_ct-fix-leak-of-ct-zone-template-on-replace.patch
new file mode 100644 (file)
index 0000000..2482549
--- /dev/null
@@ -0,0 +1,36 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Paul Blakey <paulb@mellanox.com>
+Date: Wed, 18 Mar 2020 12:50:33 +0200
+Subject: net/sched: act_ct: Fix leak of ct zone template on replace
+
+From: Paul Blakey <paulb@mellanox.com>
+
+[ Upstream commit dd2af10402684cb5840a127caec9e7cdcff6d167 ]
+
+Currently, on replace, the previous action instance params
+is swapped with a newly allocated params. The old params is
+only freed (via kfree_rcu), without releasing the allocated
+ct zone template related to it.
+
+Call tcf_ct_params_free (via call_rcu) for the old params,
+so it will release it.
+
+Fixes: b57dc7c13ea9 ("net/sched: Introduce action ct")
+Signed-off-by: Paul Blakey <paulb@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/act_ct.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -739,7 +739,7 @@ static int tcf_ct_init(struct net *net,
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+       if (params)
+-              kfree_rcu(params, rcu);
++              call_rcu(&params->rcu, tcf_ct_params_free);
+       if (res == ACT_P_CREATED)
+               tcf_idr_insert(tn, *a);
diff --git a/queue-5.5/net-stmmac-dwmac-rk-fix-error-path-in-rk_gmac_probe.patch b/queue-5.5/net-stmmac-dwmac-rk-fix-error-path-in-rk_gmac_probe.patch
new file mode 100644 (file)
index 0000000..66964f9
--- /dev/null
@@ -0,0 +1,31 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Emil Renner Berthing <kernel@esmil.dk>
+Date: Sat, 21 Mar 2020 15:36:19 +0100
+Subject: net: stmmac: dwmac-rk: fix error path in rk_gmac_probe
+
+From: Emil Renner Berthing <kernel@esmil.dk>
+
+[ Upstream commit 9de9aa487daff7a5c73434c24269b44ed6a428e6 ]
+
+Make sure we clean up devicetree related configuration
+also when clock init fails.
+
+Fixes: fecd4d7eef8b ("net: stmmac: dwmac-rk: Add integrated PHY support")
+Signed-off-by: Emil Renner Berthing <kernel@esmil.dk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+@@ -1411,7 +1411,7 @@ static int rk_gmac_probe(struct platform
+       ret = rk_gmac_clk_init(plat_dat);
+       if (ret)
+-              return ret;
++              goto err_remove_config_dt;
+       ret = rk_gmac_powerup(plat_dat->bsp_priv);
+       if (ret)
diff --git a/queue-5.5/net_sched-cls_route-remove-the-right-filter-from-hashtable.patch b/queue-5.5/net_sched-cls_route-remove-the-right-filter-from-hashtable.patch
new file mode 100644 (file)
index 0000000..f2851fb
--- /dev/null
@@ -0,0 +1,45 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Fri, 13 Mar 2020 22:29:54 -0700
+Subject: net_sched: cls_route: remove the right filter from hashtable
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit ef299cc3fa1a9e1288665a9fdc8bff55629fd359 ]
+
+route4_change() allocates a new filter and copies values from
+the old one. After the new filter is inserted into the hash
+table, the old filter should be removed and freed, as the final
+step of the update.
+
+However, the current code mistakenly removes the new one. This
+looks apparently wrong to me, and it causes double "free" and
+use-after-free too, as reported by syzbot.
+
+Reported-and-tested-by: syzbot+f9b32aaacd60305d9687@syzkaller.appspotmail.com
+Reported-and-tested-by: syzbot+2f8c233f131943d6056d@syzkaller.appspotmail.com
+Reported-and-tested-by: syzbot+9c2df9fd5e9445b74e01@syzkaller.appspotmail.com
+Fixes: 1109c00547fc ("net: sched: RCU cls_route")
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/cls_route.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/sched/cls_route.c
++++ b/net/sched/cls_route.c
+@@ -534,8 +534,8 @@ static int route4_change(struct net *net
+                       fp = &b->ht[h];
+                       for (pfp = rtnl_dereference(*fp); pfp;
+                            fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
+-                              if (pfp == f) {
+-                                      *fp = f->next;
++                              if (pfp == fold) {
++                                      rcu_assign_pointer(*fp, fold->next);
+                                       break;
+                               }
+                       }
diff --git a/queue-5.5/net_sched-hold-rtnl-lock-in-tcindex_partial_destroy_work.patch b/queue-5.5/net_sched-hold-rtnl-lock-in-tcindex_partial_destroy_work.patch
new file mode 100644 (file)
index 0000000..c58418d
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Wed, 11 Mar 2020 22:42:27 -0700
+Subject: net_sched: hold rtnl lock in tcindex_partial_destroy_work()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit b1be2e8cd290f620777bfdb8aa00890cd2fa02b5 ]
+
+syzbot reported a use-after-free in tcindex_dump(). This is due to
+the lack of RTNL in the deferred rcu work. We queue this work with
+RTNL in tcindex_change(), later, tcindex_dump() is called:
+
+        fh = tp->ops->get(tp, t->tcm_handle);
+       ...
+        err = tp->ops->change(..., &fh, ...);
+        tfilter_notify(..., fh, ...);
+
+but there is nothing to serialize the pending
+tcindex_partial_destroy_work() with tcindex_dump().
+
+Fix this by simply holding RTNL in tcindex_partial_destroy_work(),
+so that it won't be called until RTNL is released after
+tc_new_tfilter() is completed.
+
+Reported-and-tested-by: syzbot+653090db2562495901dc@syzkaller.appspotmail.com
+Fixes: 3d210534cc93 ("net_sched: fix a race condition in tcindex_destroy()")
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/cls_tcindex.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -261,8 +261,10 @@ static void tcindex_partial_destroy_work
+                                             struct tcindex_data,
+                                             rwork);
++      rtnl_lock();
+       kfree(p->perfect);
+       kfree(p);
++      rtnl_unlock();
+ }
+ static void tcindex_free_perfect_hash(struct tcindex_data *cp)
diff --git a/queue-5.5/net_sched-keep-alloc_hash-updated-after-hash-allocation.patch b/queue-5.5/net_sched-keep-alloc_hash-updated-after-hash-allocation.patch
new file mode 100644 (file)
index 0000000..abfeb60
--- /dev/null
@@ -0,0 +1,39 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Wed, 11 Mar 2020 22:42:28 -0700
+Subject: net_sched: keep alloc_hash updated after hash allocation
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 0d1c3530e1bd38382edef72591b78e877e0edcd3 ]
+
+In commit 599be01ee567 ("net_sched: fix an OOB access in cls_tcindex")
+I moved cp->hash calculation before the first
+tcindex_alloc_perfect_hash(), but cp->alloc_hash is left untouched.
+This difference could lead to another out of bound access.
+
+cp->alloc_hash should always be the size allocated, we should
+update it after this tcindex_alloc_perfect_hash().
+
+Reported-and-tested-by: syzbot+dcc34d54d68ef7d2d53d@syzkaller.appspotmail.com
+Reported-and-tested-by: syzbot+c72da7b9ed57cde6fca2@syzkaller.appspotmail.com
+Fixes: 599be01ee567 ("net_sched: fix an OOB access in cls_tcindex")
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/cls_tcindex.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -359,6 +359,7 @@ tcindex_set_parms(struct net *net, struc
+               if (tcindex_alloc_perfect_hash(net, cp) < 0)
+                       goto errout;
++              cp->alloc_hash = cp->hash;
+               for (i = 0; i < min(cp->hash, p->hash); i++)
+                       cp->perfect[i].res = p->perfect[i].res;
+               balloc = 1;
diff --git a/queue-5.5/nfc-fdp-fix-a-signedness-bug-in-fdp_nci_send_patch.patch b/queue-5.5/nfc-fdp-fix-a-signedness-bug-in-fdp_nci_send_patch.patch
new file mode 100644 (file)
index 0000000..ff4d5b1
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Fri, 20 Mar 2020 16:21:17 +0300
+Subject: NFC: fdp: Fix a signedness bug in fdp_nci_send_patch()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 0dcdf9f64028ec3b75db6b691560f8286f3898bf ]
+
+The nci_conn_max_data_pkt_payload_size() function sometimes returns
+-EPROTO so "max_size" needs to be signed for the error handling to
+work.  We can make "payload_size" an int as well.
+
+Fixes: a06347c04c13 ("NFC: Add Intel Fields Peak NFC solution driver")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nfc/fdp/fdp.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/nfc/fdp/fdp.c
++++ b/drivers/nfc/fdp/fdp.c
+@@ -184,7 +184,7 @@ static int fdp_nci_send_patch(struct nci
+       const struct firmware *fw;
+       struct sk_buff *skb;
+       unsigned long len;
+-      u8 max_size, payload_size;
++      int max_size, payload_size;
+       int rc = 0;
+       if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) ||
+@@ -207,8 +207,7 @@ static int fdp_nci_send_patch(struct nci
+       while (len) {
+-              payload_size = min_t(unsigned long, (unsigned long) max_size,
+-                                   len);
++              payload_size = min_t(unsigned long, max_size, len);
+               skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size),
+                                   GFP_KERNEL);
diff --git a/queue-5.5/r8169-re-enable-msi-on-rtl8168c.patch b/queue-5.5/r8169-re-enable-msi-on-rtl8168c.patch
new file mode 100644 (file)
index 0000000..243b99f
--- /dev/null
@@ -0,0 +1,35 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Tue, 24 Mar 2020 20:58:29 +0100
+Subject: r8169: re-enable MSI on RTL8168c
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit f13bc68131b0c0d67a77fb43444e109828a983bf ]
+
+The original change fixed an issue on RTL8168b by mimicking the vendor
+driver behavior to disable MSI on chip versions before RTL8168d.
+This however now caused an issue on a system with RTL8168c, see [0].
+Therefore leave MSI disabled on RTL8168b, but re-enable it on RTL8168c.
+
+[0] https://bugzilla.redhat.com/show_bug.cgi?id=1792839
+
+Fixes: 003bd5b4a7b4 ("r8169: don't use MSI before RTL8168d")
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -6579,7 +6579,7 @@ static int rtl_alloc_irq(struct rtl8169_
+               RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
+               rtl_lock_config_regs(tp);
+               /* fall through */
+-      case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
++      case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
+               flags = PCI_IRQ_LEGACY;
+               break;
+       default:
diff --git a/queue-5.5/revert-net-bcmgenet-use-rgmii-loopback-for-mac-reset.patch b/queue-5.5/revert-net-bcmgenet-use-rgmii-loopback-for-mac-reset.patch
new file mode 100644 (file)
index 0000000..05a4ae3
--- /dev/null
@@ -0,0 +1,89 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Doug Berger <opendmb@gmail.com>
+Date: Mon, 16 Mar 2020 14:44:55 -0700
+Subject: Revert "net: bcmgenet: use RGMII loopback for MAC reset"
+
+From: Doug Berger <opendmb@gmail.com>
+
+[ Upstream commit 612eb1c3b9e504de24136c947ed7c07bc342f3aa ]
+
+This reverts commit 3a55402c93877d291b0a612d25edb03d1b4b93ac.
+
+This is not a good solution when connecting to an external switch
+that may not support the isolation of the TXC signal resulting in
+output driver contention on the pin.
+
+A different solution is necessary.
+
+Signed-off-by: Doug Berger <opendmb@gmail.com>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/genet/bcmgenet.c |    2 +
+ drivers/net/ethernet/broadcom/genet/bcmmii.c   |   34 -------------------------
+ 2 files changed, 2 insertions(+), 34 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1996,6 +1996,8 @@ static void reset_umac(struct bcmgenet_p
+       /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
+       bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
++      udelay(2);
++      bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+ }
+ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -181,38 +181,8 @@ int bcmgenet_mii_config(struct net_devic
+       const char *phy_name = NULL;
+       u32 id_mode_dis = 0;
+       u32 port_ctrl;
+-      int bmcr = -1;
+-      int ret;
+       u32 reg;
+-      /* MAC clocking workaround during reset of umac state machines */
+-      reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+-      if (reg & CMD_SW_RESET) {
+-              /* An MII PHY must be isolated to prevent TXC contention */
+-              if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+-                      ret = phy_read(phydev, MII_BMCR);
+-                      if (ret >= 0) {
+-                              bmcr = ret;
+-                              ret = phy_write(phydev, MII_BMCR,
+-                                              bmcr | BMCR_ISOLATE);
+-                      }
+-                      if (ret) {
+-                              netdev_err(dev, "failed to isolate PHY\n");
+-                              return ret;
+-                      }
+-              }
+-              /* Switch MAC clocking to RGMII generated clock */
+-              bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+-              /* Ensure 5 clks with Rx disabled
+-               * followed by 5 clks with Reset asserted
+-               */
+-              udelay(4);
+-              reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN);
+-              bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+-              /* Ensure 5 more clocks before Rx is enabled */
+-              udelay(2);
+-      }
+-
+       switch (priv->phy_interface) {
+       case PHY_INTERFACE_MODE_INTERNAL:
+               phy_name = "internal PHY";
+@@ -282,10 +252,6 @@ int bcmgenet_mii_config(struct net_devic
+       bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+-      /* Restore the MII PHY after isolation */
+-      if (bmcr >= 0)
+-              phy_write(phydev, MII_BMCR, bmcr);
+-
+       priv->ext_phy = !priv->internal_phy &&
+                       (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
index d6f23cedb34d863c821ccea9bbdcbc5586930304..eab38a260f359b86c27434568b2ccd27b50a0cc8 100644 (file)
@@ -5,3 +5,59 @@ mmc-sdhci-omap-fix-busy-detection-by-enabling-mmc_ca.patch
 mmc-sdhci-tegra-fix-busy-detection-by-enabling-mmc_c.patch
 crypto-chacha20poly1305-add-back-missing-test-vectors-and-test-chunking.patch
 crypto-arm64-chacha-correctly-walk-through-blocks.patch
+cxgb4-fix-throughput-drop-during-tx-backpressure.patch
+cxgb4-fix-txq-restart-check-during-backpressure.patch
+geneve-move-debug-check-after-netdev-unregister.patch
+hsr-fix-general-protection-fault-in-hsr_addr_is_self.patch
+ipv4-fix-a-rcu-list-lock-in-inet_dump_fib.patch
+macsec-restrict-to-ethernet-devices.patch
+mlxsw-pci-only-issue-reset-when-system-is-ready.patch
+mlxsw-spectrum_mr-fix-list-iteration-in-error-path.patch
+net-bpfilter-fix-dprintf-usage-for-dev-kmsg.patch
+net-cbs-fix-software-cbs-to-consider-packet-sending-time.patch
+net-dsa-fix-duplicate-frames-flooded-by-learning.patch
+net-dsa-mt7530-change-the-link-bit-to-reflect-the-link-status.patch
+net-dsa-tag_8021q-replace-dsa_8021q_remove_header-with-__skb_vlan_pop.patch
+net-ena-add-pci-shutdown-handler-to-allow-safe-kexec.patch
+net-mvneta-fix-the-case-where-the-last-poll-did-not-process-all-rx.patch
+net-packet-tpacket_rcv-avoid-a-producer-race-condition.patch
+net-phy-dp83867-w-a-for-fld-detect-threshold-bootstrapping-issue.patch
+net-phy-mdio-bcm-unimac-fix-clock-handling.patch
+net-phy-mdio-mux-bcm-iproc-check-clk_prepare_enable-return-value.patch
+net-qmi_wwan-add-support-for-askey-wwhc050.patch
+net-sched-act_ct-fix-leak-of-ct-zone-template-on-replace.patch
+net_sched-cls_route-remove-the-right-filter-from-hashtable.patch
+net_sched-hold-rtnl-lock-in-tcindex_partial_destroy_work.patch
+net_sched-keep-alloc_hash-updated-after-hash-allocation.patch
+net-stmmac-dwmac-rk-fix-error-path-in-rk_gmac_probe.patch
+nfc-fdp-fix-a-signedness-bug-in-fdp_nci_send_patch.patch
+r8169-re-enable-msi-on-rtl8168c.patch
+slcan-not-call-free_netdev-before-rtnl_unlock-in-slcan_open.patch
+tcp-also-null-skb-dev-when-copy-was-needed.patch
+tcp-ensure-skb-dev-is-null-before-leaving-tcp-stack.patch
+tcp-repair-fix-tcp_queue_seq-implementation.patch
+vxlan-check-return-value-of-gro_cells_init.patch
+revert-net-bcmgenet-use-rgmii-loopback-for-mac-reset.patch
+net-bcmgenet-keep-mac-in-reset-until-phy-is-up.patch
+bnxt_en-fix-priority-bytes-and-packets-counters-in-ethtool-s.patch
+bnxt_en-fix-memory-leaks-in-bnxt_dcbnl_ieee_getets.patch
+bnxt_en-return-error-if-bnxt_alloc_ctx_mem-fails.patch
+bnxt_en-free-context-memory-after-disabling-pci-in-probe-error-path.patch
+bnxt_en-reset-rings-if-ring-reservation-fails-during-open.patch
+net-ena-fix-incorrect-setting-of-the-number-of-msix-vectors.patch
+net-ena-fix-request-of-incorrect-number-of-irq-vectors.patch
+net-ena-avoid-memory-access-violation-by-validating-req_id-properly.patch
+net-ena-fix-continuous-keep-alive-resets.patch
+net-ip_gre-separate-erspan-newlink-changelink-callbacks.patch
+net-ip_gre-accept-ifla_info_data-less-configuration.patch
+hsr-use-rcu_read_lock-in-hsr_get_node_-list-status.patch
+hsr-add-restart-routine-into-hsr_get_node_list.patch
+hsr-set-.netnsok-flag.patch
+net-mlx5-dr-fix-postsend-actions-write-length.patch
+net-mlx5e-enhance-icosq-wqe-info-fields.patch
+net-mlx5e-fix-missing-reset-of-sw-metadata-in-striding-rq-reset.patch
+net-mlx5e-fix-icosq-recovery-flow-with-striding-rq.patch
+net-mlx5e-do-not-recover-from-a-non-fatal-syndrome.patch
+net-mlx5_core-set-ib-capability-mask1-to-fix-ib_srpt-connection-failure.patch
+net-mlx5e-ktls-fix-tcp-seq-off-by-1-issue-in-tx-resync-flow.patch
+net-mlx5e-fix-endianness-handling-in-pedit-mask.patch
diff --git a/queue-5.5/slcan-not-call-free_netdev-before-rtnl_unlock-in-slcan_open.patch b/queue-5.5/slcan-not-call-free_netdev-before-rtnl_unlock-in-slcan_open.patch
new file mode 100644 (file)
index 0000000..3da2218
--- /dev/null
@@ -0,0 +1,36 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+Date: Sat, 21 Mar 2020 14:08:29 +0100
+Subject: slcan: not call free_netdev before rtnl_unlock in slcan_open
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit 2091a3d42b4f339eaeed11228e0cbe9d4f92f558 ]
+
+As the description before netdev_run_todo, we cannot call free_netdev
+before rtnl_unlock, fix it by reorder the code.
+
+This patch is a 1:1 copy of upstream slip.c commit f596c87005f7
+("slip: not call free_netdev before rtnl_unlock in slip_open").
+
+Reported-by: yangerkun <yangerkun@huawei.com>
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/can/slcan.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -625,7 +625,10 @@ err_free_chan:
+       tty->disc_data = NULL;
+       clear_bit(SLF_INUSE, &sl->flags);
+       slc_free_netdev(sl->dev);
++      /* do not call free_netdev before rtnl_unlock */
++      rtnl_unlock();
+       free_netdev(sl->dev);
++      return err;
+ err_exit:
+       rtnl_unlock();
diff --git a/queue-5.5/tcp-also-null-skb-dev-when-copy-was-needed.patch b/queue-5.5/tcp-also-null-skb-dev-when-copy-was-needed.patch
new file mode 100644 (file)
index 0000000..00e4720
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Florian Westphal <fw@strlen.de>
+Date: Fri, 20 Mar 2020 16:52:02 +0100
+Subject: tcp: also NULL skb->dev when copy was needed
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 07f8e4d0fddbf2f87e4cefb551278abc38db8cdd ]
+
+In rare cases retransmit logic will make a full skb copy, which will not
+trigger the zeroing added in recent change
+b738a185beaa ("tcp: ensure skb->dev is NULL before leaving TCP stack").
+
+Cc: Eric Dumazet <edumazet@google.com>
+Fixes: 75c119afe14f ("tcp: implement rb-tree based retransmit queue")
+Fixes: 28f8bfd1ac94 ("netfilter: Support iif matches in POSTROUTING")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2976,8 +2976,12 @@ int __tcp_retransmit_skb(struct sock *sk
+               tcp_skb_tsorted_save(skb) {
+                       nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
+-                      err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+-                                   -ENOBUFS;
++                      if (nskb) {
++                              nskb->dev = NULL;
++                              err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
++                      } else {
++                              err = -ENOBUFS;
++                      }
+               } tcp_skb_tsorted_restore(skb);
+               if (!err) {
diff --git a/queue-5.5/tcp-ensure-skb-dev-is-null-before-leaving-tcp-stack.patch b/queue-5.5/tcp-ensure-skb-dev-is-null-before-leaving-tcp-stack.patch
new file mode 100644 (file)
index 0000000..8877e0b
--- /dev/null
@@ -0,0 +1,55 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 19 Mar 2020 12:49:55 -0700
+Subject: tcp: ensure skb->dev is NULL before leaving TCP stack
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit b738a185beaab8728943acdb3e67371b8a88185e ]
+
+skb->rbnode is sharing three skb fields : next, prev, dev
+
+When a packet is sent, TCP keeps the original skb (master)
+in a rtx queue, which was converted to rbtree a while back.
+
+__tcp_transmit_skb() is responsible to clone the master skb,
+and add the TCP header to the clone before sending it
+to network layer.
+
+skb_clone() already clears skb->next and skb->prev, but copies
+the master oskb->dev into the clone.
+
+We need to clear skb->dev, otherwise lower layers could interpret
+the value as a pointer to a netdev.
+
+This old bug surfaced recently when commit 28f8bfd1ac94
+("netfilter: Support iif matches in POSTROUTING") was merged.
+
+Before this netfilter commit, skb->dev value was ignored and
+changed before reaching dev_queue_xmit()
+
+Fixes: 75c119afe14f ("tcp: implement rb-tree based retransmit queue")
+Fixes: 28f8bfd1ac94 ("netfilter: Support iif matches in POSTROUTING")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Martin Zaharinov <micron10@gmail.com>
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1048,6 +1048,10 @@ static int __tcp_transmit_skb(struct soc
+               if (unlikely(!skb))
+                       return -ENOBUFS;
++              /* retransmit skbs might have a non zero value in skb->dev
++               * because skb->dev is aliased with skb->rbnode.rb_left
++               */
++              skb->dev = NULL;
+       }
+       inet = inet_sk(sk);
diff --git a/queue-5.5/tcp-repair-fix-tcp_queue_seq-implementation.patch b/queue-5.5/tcp-repair-fix-tcp_queue_seq-implementation.patch
new file mode 100644 (file)
index 0000000..c86fea7
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 18 Mar 2020 19:21:02 -0700
+Subject: tcp: repair: fix TCP_QUEUE_SEQ implementation
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 6cd6cbf593bfa3ae6fc3ed34ac21da4d35045425 ]
+
+When application uses TCP_QUEUE_SEQ socket option to
+change tp->rcv_next, we must also update tp->copied_seq.
+
+Otherwise, stuff relying on tcp_inq() being precise can
+eventually be confused.
+
+For example, tcp_zerocopy_receive() might crash because
+it does not expect tcp_recv_skb() to return NULL.
+
+We could add tests in various places to fix the issue,
+or simply make sure tcp_inq() wont return a random value,
+and leave fast path as it is.
+
+Note that this fixes ioctl(fd, SIOCINQ, &val) at the same
+time.
+
+Fixes: ee9952831cfd ("tcp: Initial repair mode")
+Fixes: 05255b823a61 ("tcp: add TCP_ZEROCOPY_RECEIVE support for zerocopy receive")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2947,8 +2947,10 @@ static int do_tcp_setsockopt(struct sock
+                       err = -EPERM;
+               else if (tp->repair_queue == TCP_SEND_QUEUE)
+                       WRITE_ONCE(tp->write_seq, val);
+-              else if (tp->repair_queue == TCP_RECV_QUEUE)
++              else if (tp->repair_queue == TCP_RECV_QUEUE) {
+                       WRITE_ONCE(tp->rcv_nxt, val);
++                      WRITE_ONCE(tp->copied_seq, val);
++              }
+               else
+                       err = -EINVAL;
+               break;
diff --git a/queue-5.5/vxlan-check-return-value-of-gro_cells_init.patch b/queue-5.5/vxlan-check-return-value-of-gro_cells_init.patch
new file mode 100644 (file)
index 0000000..dc3698a
--- /dev/null
@@ -0,0 +1,51 @@
+From foo@baz Sat 28 Mar 2020 09:05:48 AM CET
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Wed, 18 Mar 2020 13:28:09 +0000
+Subject: vxlan: check return value of gro_cells_init()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit 384d91c267e621e0926062cfb3f20cb72dc16928 ]
+
+gro_cells_init() returns error if memory allocation is failed.
+But the vxlan module doesn't check the return value of gro_cells_init().
+
+Fixes: 58ce31cca1ff ("vxlan: GRO support at tunnel layer")`
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/vxlan.c |   11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2778,10 +2778,19 @@ static void vxlan_vs_add_dev(struct vxla
+ /* Setup stats when device is created */
+ static int vxlan_init(struct net_device *dev)
+ {
++      struct vxlan_dev *vxlan = netdev_priv(dev);
++      int err;
++
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+       if (!dev->tstats)
+               return -ENOMEM;
++      err = gro_cells_init(&vxlan->gro_cells, dev);
++      if (err) {
++              free_percpu(dev->tstats);
++              return err;
++      }
++
+       return 0;
+ }
+@@ -3042,8 +3051,6 @@ static void vxlan_setup(struct net_devic
+       vxlan->dev = dev;
+-      gro_cells_init(&vxlan->gro_cells, dev);
+-
+       for (h = 0; h < FDB_HASH_SIZE; ++h) {
+               spin_lock_init(&vxlan->hash_lock[h]);
+               INIT_HLIST_HEAD(&vxlan->fdb_head[h]);