]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 30 Oct 2021 13:18:06 +0000 (15:18 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 30 Oct 2021 13:18:06 +0000 (15:18 +0200)
added patches:
arm64-dts-allwinner-h5-nanopi-neo-2-fix-ethernet-node.patch
ib-hfi1-fix-abba-locking-issue-with-sc_disable.patch
ib-qib-protect-from-buffer-overflow-in-struct-qib_user_sdma_pkt-fields.patch
net-batman-adv-fix-error-handling.patch
net-ethernet-microchip-lan743x-fix-dma-allocation-failure-by-using-dma_set_mask_and_coherent.patch
net-ethernet-microchip-lan743x-fix-driver-crash-when-lan743x_pm_resume-fails.patch
net-nxp-lpc_eth.c-avoid-hang-when-bringing-interface-down.patch
net-prevent-infinite-while-loop-in-skb_tx_hash.patch
net-tls-fix-flipped-sign-in-async_wait.err-assignment.patch
nios2-make-nios2_dtb_source_bool-depend-on-compile_test.patch
nvme-tcp-fix-data-digest-pointer-calculation.patch
nvmet-tcp-fix-data-digest-pointer-calculation.patch
phy-phy_ethtool_ksettings_get-lock-the-phy-for-consistency.patch
phy-phy_start_aneg-add-an-unlocked-version.patch
rdma-mlx5-set-user-priority-for-dct.patch
rdma-sa_query-use-strscpy_pad-instead-of-memcpy-to-copy-a-string.patch
regmap-fix-possible-double-free-in-regcache_rbtree_exit.patch
tcp_bpf-fix-one-concurrency-problem-in-the-tcp_bpf_send_verdict-function.patch

19 files changed:
queue-5.4/arm64-dts-allwinner-h5-nanopi-neo-2-fix-ethernet-node.patch [new file with mode: 0644]
queue-5.4/ib-hfi1-fix-abba-locking-issue-with-sc_disable.patch [new file with mode: 0644]
queue-5.4/ib-qib-protect-from-buffer-overflow-in-struct-qib_user_sdma_pkt-fields.patch [new file with mode: 0644]
queue-5.4/net-batman-adv-fix-error-handling.patch [new file with mode: 0644]
queue-5.4/net-ethernet-microchip-lan743x-fix-dma-allocation-failure-by-using-dma_set_mask_and_coherent.patch [new file with mode: 0644]
queue-5.4/net-ethernet-microchip-lan743x-fix-driver-crash-when-lan743x_pm_resume-fails.patch [new file with mode: 0644]
queue-5.4/net-nxp-lpc_eth.c-avoid-hang-when-bringing-interface-down.patch [new file with mode: 0644]
queue-5.4/net-prevent-infinite-while-loop-in-skb_tx_hash.patch [new file with mode: 0644]
queue-5.4/net-tls-fix-flipped-sign-in-async_wait.err-assignment.patch [new file with mode: 0644]
queue-5.4/nios2-make-nios2_dtb_source_bool-depend-on-compile_test.patch [new file with mode: 0644]
queue-5.4/nvme-tcp-fix-data-digest-pointer-calculation.patch [new file with mode: 0644]
queue-5.4/nvmet-tcp-fix-data-digest-pointer-calculation.patch [new file with mode: 0644]
queue-5.4/phy-phy_ethtool_ksettings_get-lock-the-phy-for-consistency.patch [new file with mode: 0644]
queue-5.4/phy-phy_start_aneg-add-an-unlocked-version.patch [new file with mode: 0644]
queue-5.4/rdma-mlx5-set-user-priority-for-dct.patch [new file with mode: 0644]
queue-5.4/rdma-sa_query-use-strscpy_pad-instead-of-memcpy-to-copy-a-string.patch [new file with mode: 0644]
queue-5.4/regmap-fix-possible-double-free-in-regcache_rbtree_exit.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/tcp_bpf-fix-one-concurrency-problem-in-the-tcp_bpf_send_verdict-function.patch [new file with mode: 0644]

diff --git a/queue-5.4/arm64-dts-allwinner-h5-nanopi-neo-2-fix-ethernet-node.patch b/queue-5.4/arm64-dts-allwinner-h5-nanopi-neo-2-fix-ethernet-node.patch
new file mode 100644 (file)
index 0000000..b925d5f
--- /dev/null
@@ -0,0 +1,37 @@
+From 0764e365dacd0b8f75c1736f9236be280649bd18 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Cl=C3=A9ment=20B=C5=93sch?= <u@pkh.me>
+Date: Sun, 5 Sep 2021 02:20:27 +0200
+Subject: arm64: dts: allwinner: h5: NanoPI Neo 2: Fix ethernet node
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Clément Bœsch <u@pkh.me>
+
+commit 0764e365dacd0b8f75c1736f9236be280649bd18 upstream.
+
+RX and TX delay are provided by ethernet PHY. Reflect that in ethernet
+node.
+
+Fixes: 44a94c7ef989 ("arm64: dts: allwinner: H5: Restore EMAC changes")
+Signed-off-by: Clément Bœsch <u@pkh.me>
+Reviewed-by: Jernej Skrabec <jernej.skrabec@gmail.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Maxime Ripard <maxime@cerno.tech>
+Link: https://lore.kernel.org/r/20210905002027.171984-1-u@pkh.me
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
+@@ -114,7 +114,7 @@
+       pinctrl-0 = <&emac_rgmii_pins>;
+       phy-supply = <&reg_gmac_3v3>;
+       phy-handle = <&ext_rgmii_phy>;
+-      phy-mode = "rgmii";
++      phy-mode = "rgmii-id";
+       status = "okay";
+ };
diff --git a/queue-5.4/ib-hfi1-fix-abba-locking-issue-with-sc_disable.patch b/queue-5.4/ib-hfi1-fix-abba-locking-issue-with-sc_disable.patch
new file mode 100644 (file)
index 0000000..4df33fc
--- /dev/null
@@ -0,0 +1,65 @@
+From 13bac861952a78664907a0f927d3e874e9a59034 Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+Date: Wed, 13 Oct 2021 10:18:52 -0400
+Subject: IB/hfi1: Fix abba locking issue with sc_disable()
+
+From: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+
+commit 13bac861952a78664907a0f927d3e874e9a59034 upstream.
+
+sc_disable() after having disabled the send context wakes up any waiters
+by calling hfi1_qp_wakeup() while holding the waitlock for the sc.
+
+This is contrary to the model for all other calls to hfi1_qp_wakeup()
+where the waitlock is dropped and a local is used to drive calls to
+hfi1_qp_wakeup().
+
+Fix by moving the sc->piowait into a local list and driving the wakeup
+calls from the list.
+
+Fixes: 099a884ba4c0 ("IB/hfi1: Handle wakeup of orphaned QPs for pio")
+Link: https://lore.kernel.org/r/20211013141852.128104.2682.stgit@awfm-01.cornelisnetworks.com
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+Reported-by: TOTE Robot <oslab@tsinghua.edu.cn>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/hfi1/pio.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -920,6 +920,7 @@ void sc_disable(struct send_context *sc)
+ {
+       u64 reg;
+       struct pio_buf *pbuf;
++      LIST_HEAD(wake_list);
+       if (!sc)
+               return;
+@@ -954,19 +955,21 @@ void sc_disable(struct send_context *sc)
+       spin_unlock(&sc->release_lock);
+       write_seqlock(&sc->waitlock);
+-      while (!list_empty(&sc->piowait)) {
++      if (!list_empty(&sc->piowait))
++              list_move(&sc->piowait, &wake_list);
++      write_sequnlock(&sc->waitlock);
++      while (!list_empty(&wake_list)) {
+               struct iowait *wait;
+               struct rvt_qp *qp;
+               struct hfi1_qp_priv *priv;
+-              wait = list_first_entry(&sc->piowait, struct iowait, list);
++              wait = list_first_entry(&wake_list, struct iowait, list);
+               qp = iowait_to_qp(wait);
+               priv = qp->priv;
+               list_del_init(&priv->s_iowait.list);
+               priv->s_iowait.lock = NULL;
+               hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
+       }
+-      write_sequnlock(&sc->waitlock);
+       spin_unlock_irq(&sc->alloc_lock);
+ }
diff --git a/queue-5.4/ib-qib-protect-from-buffer-overflow-in-struct-qib_user_sdma_pkt-fields.patch b/queue-5.4/ib-qib-protect-from-buffer-overflow-in-struct-qib_user_sdma_pkt-fields.patch
new file mode 100644 (file)
index 0000000..a313868
--- /dev/null
@@ -0,0 +1,115 @@
+From d39bf40e55e666b5905fdbd46a0dced030ce87be Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+Date: Tue, 12 Oct 2021 13:55:19 -0400
+Subject: IB/qib: Protect from buffer overflow in struct qib_user_sdma_pkt fields
+
+From: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+
+commit d39bf40e55e666b5905fdbd46a0dced030ce87be upstream.
+
+Overflowing either addrlimit or bytes_togo can allow userspace to trigger
+a buffer overflow of kernel memory. Check for overflows in all the places
+doing math on user controlled buffers.
+
+Fixes: f931551bafe1 ("IB/qib: Add new qib driver for QLogic PCIe InfiniBand adapters")
+Link: https://lore.kernel.org/r/20211012175519.7298.77738.stgit@awfm-01.cornelisnetworks.com
+Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/qib/qib_user_sdma.c |   33 ++++++++++++++++++++----------
+ 1 file changed, 23 insertions(+), 10 deletions(-)
+
+--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
++++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
+@@ -602,7 +602,7 @@ done:
+ /*
+  * How many pages in this iovec element?
+  */
+-static int qib_user_sdma_num_pages(const struct iovec *iov)
++static size_t qib_user_sdma_num_pages(const struct iovec *iov)
+ {
+       const unsigned long addr  = (unsigned long) iov->iov_base;
+       const unsigned long  len  = iov->iov_len;
+@@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(
+ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
+                                  struct qib_user_sdma_queue *pq,
+                                  struct qib_user_sdma_pkt *pkt,
+-                                 unsigned long addr, int tlen, int npages)
++                                 unsigned long addr, int tlen, size_t npages)
+ {
+       struct page *pages[8];
+       int i, j;
+@@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const s
+       unsigned long idx;
+       for (idx = 0; idx < niov; idx++) {
+-              const int npages = qib_user_sdma_num_pages(iov + idx);
++              const size_t npages = qib_user_sdma_num_pages(iov + idx);
+               const unsigned long addr = (unsigned long) iov[idx].iov_base;
+               ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
+@@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(cons
+               unsigned pktnw;
+               unsigned pktnwc;
+               int nfrags = 0;
+-              int npages = 0;
+-              int bytes_togo = 0;
++              size_t npages = 0;
++              size_t bytes_togo = 0;
+               int tiddma = 0;
+               int cfur;
+@@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(cons
+                       npages += qib_user_sdma_num_pages(&iov[idx]);
+-                      bytes_togo += slen;
++                      if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
++                          bytes_togo > type_max(typeof(pkt->bytes_togo))) {
++                              ret = -EINVAL;
++                              goto free_pbc;
++                      }
+                       pktnwc += slen >> 2;
+                       idx++;
+                       nfrags++;
+@@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(cons
+               }
+               if (frag_size) {
+-                      int tidsmsize, n;
+-                      size_t pktsize;
++                      size_t tidsmsize, n, pktsize, sz, addrlimit;
+                       n = npages*((2*PAGE_SIZE/frag_size)+1);
+                       pktsize = struct_size(pkt, addr, n);
+@@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(cons
+                       else
+                               tidsmsize = 0;
+-                      pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
++                      if (check_add_overflow(pktsize, tidsmsize, &sz)) {
++                              ret = -EINVAL;
++                              goto free_pbc;
++                      }
++                      pkt = kmalloc(sz, GFP_KERNEL);
+                       if (!pkt) {
+                               ret = -ENOMEM;
+                               goto free_pbc;
+                       }
+                       pkt->largepkt = 1;
+                       pkt->frag_size = frag_size;
+-                      pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
++                      if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
++                                             &addrlimit) ||
++                          addrlimit > type_max(typeof(pkt->addrlimit))) {
++                              ret = -EINVAL;
++                              goto free_pbc;
++                      }
++                      pkt->addrlimit = addrlimit;
+                       if (tiddma) {
+                               char *tidsm = (char *)pkt + pktsize;
diff --git a/queue-5.4/net-batman-adv-fix-error-handling.patch b/queue-5.4/net-batman-adv-fix-error-handling.patch
new file mode 100644 (file)
index 0000000..41a090a
--- /dev/null
@@ -0,0 +1,173 @@
+From 6f68cd634856f8ca93bafd623ba5357e0f648c68 Mon Sep 17 00:00:00 2001
+From: Pavel Skripkin <paskripkin@gmail.com>
+Date: Sun, 24 Oct 2021 16:13:56 +0300
+Subject: net: batman-adv: fix error handling
+
+From: Pavel Skripkin <paskripkin@gmail.com>
+
+commit 6f68cd634856f8ca93bafd623ba5357e0f648c68 upstream.
+
+Syzbot reported ODEBUG warning in batadv_nc_mesh_free(). The problem was
+in wrong error handling in batadv_mesh_init().
+
+Before this patch batadv_mesh_init() was calling batadv_mesh_free() in case
+of any batadv_*_init() calls failure. This approach may work well, when
+there is some kind of indicator, which can tell which parts of batadv are
+initialized; but there isn't any.
+
+All written above lead to cleaning up uninitialized fields. Even if we hide
+ODEBUG warning by initializing bat_priv->nc.work, syzbot was able to hit
+GPF in batadv_nc_purge_paths(), because hash pointer in still NULL. [1]
+
+To fix these bugs we can unwind batadv_*_init() calls one by one.
+It is good approach for 2 reasons: 1) It fixes bugs on error handling
+path 2) It improves the performance, since we won't call unneeded
+batadv_*_free() functions.
+
+So, this patch makes all batadv_*_init() clean up all allocated memory
+before returning with an error to no call correspoing batadv_*_free()
+and open-codes batadv_mesh_free() with proper order to avoid touching
+uninitialized fields.
+
+Link: https://lore.kernel.org/netdev/000000000000c87fbd05cef6bcb0@google.com/ [1]
+Reported-and-tested-by: syzbot+28b0702ada0bf7381f58@syzkaller.appspotmail.com
+Fixes: c6c8fea29769 ("net: Add batman-adv meshing protocol")
+Signed-off-by: Pavel Skripkin <paskripkin@gmail.com>
+Acked-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/bridge_loop_avoidance.c |    8 +++-
+ net/batman-adv/main.c                  |   56 +++++++++++++++++++++++----------
+ net/batman-adv/network-coding.c        |    4 +-
+ net/batman-adv/translation-table.c     |    4 +-
+ 4 files changed, 52 insertions(+), 20 deletions(-)
+
+--- a/net/batman-adv/bridge_loop_avoidance.c
++++ b/net/batman-adv/bridge_loop_avoidance.c
+@@ -1561,10 +1561,14 @@ int batadv_bla_init(struct batadv_priv *
+               return 0;
+       bat_priv->bla.claim_hash = batadv_hash_new(128);
+-      bat_priv->bla.backbone_hash = batadv_hash_new(32);
++      if (!bat_priv->bla.claim_hash)
++              return -ENOMEM;
+-      if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
++      bat_priv->bla.backbone_hash = batadv_hash_new(32);
++      if (!bat_priv->bla.backbone_hash) {
++              batadv_hash_destroy(bat_priv->bla.claim_hash);
+               return -ENOMEM;
++      }
+       batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
+                                  &batadv_claim_hash_lock_class_key);
+--- a/net/batman-adv/main.c
++++ b/net/batman-adv/main.c
+@@ -197,29 +197,41 @@ int batadv_mesh_init(struct net_device *
+       bat_priv->gw.generation = 0;
+-      ret = batadv_v_mesh_init(bat_priv);
+-      if (ret < 0)
+-              goto err;
+-
+       ret = batadv_originator_init(bat_priv);
+-      if (ret < 0)
+-              goto err;
++      if (ret < 0) {
++              atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++              goto err_orig;
++      }
+       ret = batadv_tt_init(bat_priv);
+-      if (ret < 0)
+-              goto err;
++      if (ret < 0) {
++              atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++              goto err_tt;
++      }
++
++      ret = batadv_v_mesh_init(bat_priv);
++      if (ret < 0) {
++              atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++              goto err_v;
++      }
+       ret = batadv_bla_init(bat_priv);
+-      if (ret < 0)
+-              goto err;
++      if (ret < 0) {
++              atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++              goto err_bla;
++      }
+       ret = batadv_dat_init(bat_priv);
+-      if (ret < 0)
+-              goto err;
++      if (ret < 0) {
++              atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++              goto err_dat;
++      }
+       ret = batadv_nc_mesh_init(bat_priv);
+-      if (ret < 0)
+-              goto err;
++      if (ret < 0) {
++              atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++              goto err_nc;
++      }
+       batadv_gw_init(bat_priv);
+       batadv_mcast_init(bat_priv);
+@@ -229,8 +241,20 @@ int batadv_mesh_init(struct net_device *
+       return 0;
+-err:
+-      batadv_mesh_free(soft_iface);
++err_nc:
++      batadv_dat_free(bat_priv);
++err_dat:
++      batadv_bla_free(bat_priv);
++err_bla:
++      batadv_v_mesh_free(bat_priv);
++err_v:
++      batadv_tt_free(bat_priv);
++err_tt:
++      batadv_originator_free(bat_priv);
++err_orig:
++      batadv_purge_outstanding_packets(bat_priv, NULL);
++      atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
++
+       return ret;
+ }
+--- a/net/batman-adv/network-coding.c
++++ b/net/batman-adv/network-coding.c
+@@ -155,8 +155,10 @@ int batadv_nc_mesh_init(struct batadv_pr
+                                  &batadv_nc_coding_hash_lock_class_key);
+       bat_priv->nc.decoding_hash = batadv_hash_new(128);
+-      if (!bat_priv->nc.decoding_hash)
++      if (!bat_priv->nc.decoding_hash) {
++              batadv_hash_destroy(bat_priv->nc.coding_hash);
+               goto err;
++      }
+       batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
+                                  &batadv_nc_decoding_hash_lock_class_key);
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -4405,8 +4405,10 @@ int batadv_tt_init(struct batadv_priv *b
+               return ret;
+       ret = batadv_tt_global_init(bat_priv);
+-      if (ret < 0)
++      if (ret < 0) {
++              batadv_tt_local_table_free(bat_priv);
+               return ret;
++      }
+       batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
+                                    batadv_tt_tvlv_unicast_handler_v1,
diff --git a/queue-5.4/net-ethernet-microchip-lan743x-fix-dma-allocation-failure-by-using-dma_set_mask_and_coherent.patch b/queue-5.4/net-ethernet-microchip-lan743x-fix-dma-allocation-failure-by-using-dma_set_mask_and_coherent.patch
new file mode 100644 (file)
index 0000000..c23d802
--- /dev/null
@@ -0,0 +1,58 @@
+From 95a359c9553342d36d408d35331ff0bfce75272f Mon Sep 17 00:00:00 2001
+From: Yuiko Oshino <yuiko.oshino@microchip.com>
+Date: Fri, 22 Oct 2021 11:53:43 -0400
+Subject: net: ethernet: microchip: lan743x: Fix dma allocation failure by using dma_set_mask_and_coherent
+
+From: Yuiko Oshino <yuiko.oshino@microchip.com>
+
+commit 95a359c9553342d36d408d35331ff0bfce75272f upstream.
+
+The dma failure was reported in the raspberry pi github (issue #4117).
+https://github.com/raspberrypi/linux/issues/4117
+The use of dma_set_mask_and_coherent fixes the issue.
+Tested on 32/64-bit raspberry pi CM4 and 64-bit ubuntu x86 PC with EVB-LAN7430.
+
+Fixes: 23f0703c125b ("lan743x: Add main source files for new lan743x driver")
+Signed-off-by: Yuiko Oshino <yuiko.oshino@microchip.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/microchip/lan743x_main.c |   20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1706,6 +1706,16 @@ static int lan743x_tx_ring_init(struct l
+               ret = -EINVAL;
+               goto cleanup;
+       }
++      if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
++                                    DMA_BIT_MASK(64))) {
++              if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
++                                            DMA_BIT_MASK(32))) {
++                      dev_warn(&tx->adapter->pdev->dev,
++                               "lan743x_: No suitable DMA available\n");
++                      ret = -ENOMEM;
++                      goto cleanup;
++              }
++      }
+       ring_allocation_size = ALIGN(tx->ring_size *
+                                    sizeof(struct lan743x_tx_descriptor),
+                                    PAGE_SIZE);
+@@ -2256,6 +2266,16 @@ static int lan743x_rx_ring_init(struct l
+               ret = -EINVAL;
+               goto cleanup;
+       }
++      if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
++                                    DMA_BIT_MASK(64))) {
++              if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
++                                            DMA_BIT_MASK(32))) {
++                      dev_warn(&rx->adapter->pdev->dev,
++                               "lan743x_: No suitable DMA available\n");
++                      ret = -ENOMEM;
++                      goto cleanup;
++              }
++      }
+       ring_allocation_size = ALIGN(rx->ring_size *
+                                    sizeof(struct lan743x_rx_descriptor),
+                                    PAGE_SIZE);
diff --git a/queue-5.4/net-ethernet-microchip-lan743x-fix-driver-crash-when-lan743x_pm_resume-fails.patch b/queue-5.4/net-ethernet-microchip-lan743x-fix-driver-crash-when-lan743x_pm_resume-fails.patch
new file mode 100644 (file)
index 0000000..80d9dc2
--- /dev/null
@@ -0,0 +1,30 @@
+From d6423d2ec39cce2bfca418c81ef51792891576bc Mon Sep 17 00:00:00 2001
+From: Yuiko Oshino <yuiko.oshino@microchip.com>
+Date: Fri, 22 Oct 2021 11:13:53 -0400
+Subject: net: ethernet: microchip: lan743x: Fix driver crash when lan743x_pm_resume fails
+
+From: Yuiko Oshino <yuiko.oshino@microchip.com>
+
+commit d6423d2ec39cce2bfca418c81ef51792891576bc upstream.
+
+The driver needs to clean up and return when the initialization fails on resume.
+
+Fixes: 23f0703c125b ("lan743x: Add main source files for new lan743x driver")
+Signed-off-by: Yuiko Oshino <yuiko.oshino@microchip.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/microchip/lan743x_main.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -3001,6 +3001,8 @@ static int lan743x_pm_resume(struct devi
+       if (ret) {
+               netif_err(adapter, probe, adapter->netdev,
+                         "lan743x_hardware_init returned %d\n", ret);
++              lan743x_pci_cleanup(adapter);
++              return ret;
+       }
+       /* open netdev when netdev is at running state while resume.
diff --git a/queue-5.4/net-nxp-lpc_eth.c-avoid-hang-when-bringing-interface-down.patch b/queue-5.4/net-nxp-lpc_eth.c-avoid-hang-when-bringing-interface-down.patch
new file mode 100644 (file)
index 0000000..aaced0f
--- /dev/null
@@ -0,0 +1,44 @@
+From ace19b992436a257d9a793672e57abc28fe83e2e Mon Sep 17 00:00:00 2001
+From: Trevor Woerner <twoerner@gmail.com>
+Date: Sun, 24 Oct 2021 13:50:02 -0400
+Subject: net: nxp: lpc_eth.c: avoid hang when bringing interface down
+
+From: Trevor Woerner <twoerner@gmail.com>
+
+commit ace19b992436a257d9a793672e57abc28fe83e2e upstream.
+
+A hard hang is observed whenever the ethernet interface is brought
+down. If the PHY is stopped before the LPC core block is reset,
+the SoC will hang. Comparing lpc_eth_close() and lpc_eth_open() I
+re-arranged the ordering of the functions calls in lpc_eth_close() to
+reset the hardware before stopping the PHY.
+Fixes: b7370112f519 ("lpc32xx: Added ethernet driver")
+Signed-off-by: Trevor Woerner <twoerner@gmail.com>
+Acked-by: Vladimir Zapolskiy <vz@mleia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/nxp/lpc_eth.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/nxp/lpc_eth.c
++++ b/drivers/net/ethernet/nxp/lpc_eth.c
+@@ -1007,9 +1007,6 @@ static int lpc_eth_close(struct net_devi
+       napi_disable(&pldat->napi);
+       netif_stop_queue(ndev);
+-      if (ndev->phydev)
+-              phy_stop(ndev->phydev);
+-
+       spin_lock_irqsave(&pldat->lock, flags);
+       __lpc_eth_reset(pldat);
+       netif_carrier_off(ndev);
+@@ -1017,6 +1014,8 @@ static int lpc_eth_close(struct net_devi
+       writel(0, LPC_ENET_MAC2(pldat->net_base));
+       spin_unlock_irqrestore(&pldat->lock, flags);
++      if (ndev->phydev)
++              phy_stop(ndev->phydev);
+       clk_disable_unprepare(pldat->clk);
+       return 0;
diff --git a/queue-5.4/net-prevent-infinite-while-loop-in-skb_tx_hash.patch b/queue-5.4/net-prevent-infinite-while-loop-in-skb_tx_hash.patch
new file mode 100644 (file)
index 0000000..22bd5c3
--- /dev/null
@@ -0,0 +1,43 @@
+From 0c57eeecc559ca6bc18b8c4e2808bc78dbe769b0 Mon Sep 17 00:00:00 2001
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Mon, 25 Oct 2021 05:05:28 -0400
+Subject: net: Prevent infinite while loop in skb_tx_hash()
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+commit 0c57eeecc559ca6bc18b8c4e2808bc78dbe769b0 upstream.
+
+Drivers call netdev_set_num_tc() and then netdev_set_tc_queue()
+to set the queue count and offset for each TC.  So the queue count
+and offset for the TCs may be zero for a short period after dev->num_tc
+has been set.  If a TX packet is being transmitted at this time in the
+code path netdev_pick_tx() -> skb_tx_hash(), skb_tx_hash() may see
+nonzero dev->num_tc but zero qcount for the TC.  The while loop that
+keeps looping while hash >= qcount will not end.
+
+Fix it by checking the TC's qcount to be nonzero before using it.
+
+Fixes: eadec877ce9c ("net: Add support for subordinate traffic classes to netdev_pick_tx")
+Reviewed-by: Andy Gospodarek <gospo@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2787,6 +2787,12 @@ static u16 skb_tx_hash(const struct net_
+               qoffset = sb_dev->tc_to_txq[tc].offset;
+               qcount = sb_dev->tc_to_txq[tc].count;
++              if (unlikely(!qcount)) {
++                      net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
++                                           sb_dev->name, qoffset, tc);
++                      qoffset = 0;
++                      qcount = dev->real_num_tx_queues;
++              }
+       }
+       if (skb_rx_queue_recorded(skb)) {
diff --git a/queue-5.4/net-tls-fix-flipped-sign-in-async_wait.err-assignment.patch b/queue-5.4/net-tls-fix-flipped-sign-in-async_wait.err-assignment.patch
new file mode 100644 (file)
index 0000000..733ce36
--- /dev/null
@@ -0,0 +1,32 @@
+From 1d9d6fd21ad4a28b16ed9ee5432ae738b9dc58aa Mon Sep 17 00:00:00 2001
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+Date: Wed, 27 Oct 2021 17:59:21 -0400
+Subject: net/tls: Fix flipped sign in async_wait.err assignment
+
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+
+commit 1d9d6fd21ad4a28b16ed9ee5432ae738b9dc58aa upstream.
+
+sk->sk_err contains a positive number, yet async_wait.err wants the
+opposite.  Fix the missed sign flip, which Jakub caught by inspection.
+
+Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance")
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_sw.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -456,7 +456,7 @@ static void tls_encrypt_done(struct cryp
+               /* If err is already set on socket, return the same code */
+               if (sk->sk_err) {
+-                      ctx->async_wait.err = sk->sk_err;
++                      ctx->async_wait.err = -sk->sk_err;
+               } else {
+                       ctx->async_wait.err = err;
+                       tls_err_abort(sk, err);
diff --git a/queue-5.4/nios2-make-nios2_dtb_source_bool-depend-on-compile_test.patch b/queue-5.4/nios2-make-nios2_dtb_source_bool-depend-on-compile_test.patch
new file mode 100644 (file)
index 0000000..fc96d2f
--- /dev/null
@@ -0,0 +1,39 @@
+From 4a089e95b4d6bb625044d47aed0c442a8f7bd093 Mon Sep 17 00:00:00 2001
+From: Guenter Roeck <linux@roeck-us.net>
+Date: Wed, 20 Oct 2021 12:11:16 -0700
+Subject: nios2: Make NIOS2_DTB_SOURCE_BOOL depend on !COMPILE_TEST
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+commit 4a089e95b4d6bb625044d47aed0c442a8f7bd093 upstream.
+
+nios2:allmodconfig builds fail with
+
+make[1]: *** No rule to make target 'arch/nios2/boot/dts/""',
+       needed by 'arch/nios2/boot/dts/built-in.a'.  Stop.
+make: [Makefile:1868: arch/nios2/boot/dts] Error 2 (ignored)
+
+This is seen with compile tests since those enable NIOS2_DTB_SOURCE_BOOL,
+which in turn enables NIOS2_DTB_SOURCE. This causes the build error
+because the default value for NIOS2_DTB_SOURCE is an empty string.
+Disable NIOS2_DTB_SOURCE_BOOL for compile tests to avoid the error.
+
+Fixes: 2fc8483fdcde ("nios2: Build infrastructure")
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Randy Dunlap <rdunlap@infradead.org>
+Signed-off-by: Dinh Nguyen <dinguyen@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/nios2/platform/Kconfig.platform |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/nios2/platform/Kconfig.platform
++++ b/arch/nios2/platform/Kconfig.platform
+@@ -37,6 +37,7 @@ config NIOS2_DTB_PHYS_ADDR
+ config NIOS2_DTB_SOURCE_BOOL
+       bool "Compile and link device tree into kernel image"
++      depends on !COMPILE_TEST
+       help
+         This allows you to specify a dts (device tree source) file
+         which will be compiled and linked into the kernel image.
diff --git a/queue-5.4/nvme-tcp-fix-data-digest-pointer-calculation.patch b/queue-5.4/nvme-tcp-fix-data-digest-pointer-calculation.patch
new file mode 100644 (file)
index 0000000..24ea99b
--- /dev/null
@@ -0,0 +1,33 @@
+From d89b9f3bbb58e9e378881209756b0723694f22ff Mon Sep 17 00:00:00 2001
+From: Varun Prakash <varun@chelsio.com>
+Date: Mon, 25 Oct 2021 22:47:30 +0530
+Subject: nvme-tcp: fix data digest pointer calculation
+
+From: Varun Prakash <varun@chelsio.com>
+
+commit d89b9f3bbb58e9e378881209756b0723694f22ff upstream.
+
+ddgst is of type __le32, &req->ddgst + req->offset
+increases &req->ddgst by 4 * req->offset, fix this by
+type casting &req->ddgst to u8 *.
+
+Fixes: 3f2304f8c6d6 ("nvme-tcp: add NVMe over TCP host driver")
+Signed-off-by: Varun Prakash <varun@chelsio.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/host/tcp.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -962,7 +962,7 @@ static int nvme_tcp_try_send_ddgst(struc
+       int ret;
+       struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
+       struct kvec iov = {
+-              .iov_base = &req->ddgst + req->offset,
++              .iov_base = (u8 *)&req->ddgst + req->offset,
+               .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
+       };
diff --git a/queue-5.4/nvmet-tcp-fix-data-digest-pointer-calculation.patch b/queue-5.4/nvmet-tcp-fix-data-digest-pointer-calculation.patch
new file mode 100644 (file)
index 0000000..3ffe397
--- /dev/null
@@ -0,0 +1,33 @@
+From e790de54e94a7a15fb725b34724d41d41cbaa60c Mon Sep 17 00:00:00 2001
+From: Varun Prakash <varun@chelsio.com>
+Date: Mon, 25 Oct 2021 22:46:54 +0530
+Subject: nvmet-tcp: fix data digest pointer calculation
+
+From: Varun Prakash <varun@chelsio.com>
+
+commit e790de54e94a7a15fb725b34724d41d41cbaa60c upstream.
+
+exp_ddgst is of type __le32, &cmd->exp_ddgst + cmd->offset increases
+&cmd->exp_ddgst by 4 * cmd->offset, fix this by type casting
+&cmd->exp_ddgst to u8 *.
+
+Fixes: 872d26a391da ("nvmet-tcp: add NVMe over TCP target driver")
+Signed-off-by: Varun Prakash <varun@chelsio.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/target/tcp.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -633,7 +633,7 @@ static int nvmet_try_send_ddgst(struct n
+       struct nvmet_tcp_queue *queue = cmd->queue;
+       struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+       struct kvec iov = {
+-              .iov_base = &cmd->exp_ddgst + cmd->offset,
++              .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
+               .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
+       };
+       int ret;
diff --git a/queue-5.4/phy-phy_ethtool_ksettings_get-lock-the-phy-for-consistency.patch b/queue-5.4/phy-phy_ethtool_ksettings_get-lock-the-phy-for-consistency.patch
new file mode 100644 (file)
index 0000000..ebe1011
--- /dev/null
@@ -0,0 +1,39 @@
+From c10a485c3de5ccbf1fff65a382cebcb2730c6b06 Mon Sep 17 00:00:00 2001
+From: Andrew Lunn <andrew@lunn.ch>
+Date: Sun, 24 Oct 2021 21:48:02 +0200
+Subject: phy: phy_ethtool_ksettings_get: Lock the phy for consistency
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+commit c10a485c3de5ccbf1fff65a382cebcb2730c6b06 upstream.
+
+The PHY structure should be locked while copying information out if
+it, otherwise there is no guarantee of self consistency. Without the
+lock the PHY state machine could be updating the structure.
+
+Fixes: 2d55173e71b0 ("phy: add generic function to support ksetting support")
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -367,6 +367,7 @@ EXPORT_SYMBOL(phy_ethtool_ksettings_set)
+ void phy_ethtool_ksettings_get(struct phy_device *phydev,
+                              struct ethtool_link_ksettings *cmd)
+ {
++      mutex_lock(&phydev->lock);
+       linkmode_copy(cmd->link_modes.supported, phydev->supported);
+       linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
+       linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
+@@ -383,6 +384,7 @@ void phy_ethtool_ksettings_get(struct ph
+       cmd->base.autoneg = phydev->autoneg;
+       cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
+       cmd->base.eth_tp_mdix = phydev->mdix;
++      mutex_unlock(&phydev->lock);
+ }
+ EXPORT_SYMBOL(phy_ethtool_ksettings_get);
diff --git a/queue-5.4/phy-phy_start_aneg-add-an-unlocked-version.patch b/queue-5.4/phy-phy_start_aneg-add-an-unlocked-version.patch
new file mode 100644 (file)
index 0000000..16911a3
--- /dev/null
@@ -0,0 +1,81 @@
+From 707293a56f95f8e7e0cfae008010c7933fb68973 Mon Sep 17 00:00:00 2001
+From: Andrew Lunn <andrew@lunn.ch>
+Date: Sun, 24 Oct 2021 21:48:04 +0200
+Subject: phy: phy_start_aneg: Add an unlocked version
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+commit 707293a56f95f8e7e0cfae008010c7933fb68973 upstream.
+
+Split phy_start_aneg into a wrapper which takes the PHY lock, and a
+helper doing the real work. This will be needed when
+phy_ethtook_ksettings_set takes the lock.
+
+Fixes: 2d55173e71b0 ("phy: add generic function to support ksetting support")
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy.c |   30 ++++++++++++++++++++++++------
+ 1 file changed, 24 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -555,7 +555,7 @@ static int phy_check_link_status(struct
+ }
+ /**
+- * phy_start_aneg - start auto-negotiation for this PHY device
++ * _phy_start_aneg - start auto-negotiation for this PHY device
+  * @phydev: the phy_device struct
+  *
+  * Description: Sanitizes the settings (if we're not autonegotiating
+@@ -563,25 +563,43 @@ static int phy_check_link_status(struct
+  *   If the PHYCONTROL Layer is operating, we change the state to
+  *   reflect the beginning of Auto-negotiation or forcing.
+  */
+-int phy_start_aneg(struct phy_device *phydev)
++static int _phy_start_aneg(struct phy_device *phydev)
+ {
+       int err;
++      lockdep_assert_held(&phydev->lock);
++
+       if (!phydev->drv)
+               return -EIO;
+-      mutex_lock(&phydev->lock);
+-
+       if (AUTONEG_DISABLE == phydev->autoneg)
+               phy_sanitize_settings(phydev);
+       err = phy_config_aneg(phydev);
+       if (err < 0)
+-              goto out_unlock;
++              return err;
+       if (phy_is_started(phydev))
+               err = phy_check_link_status(phydev);
+-out_unlock:
++
++      return err;
++}
++
++/**
++ * phy_start_aneg - start auto-negotiation for this PHY device
++ * @phydev: the phy_device struct
++ *
++ * Description: Sanitizes the settings (if we're not autonegotiating
++ *   them), and then calls the driver's config_aneg function.
++ *   If the PHYCONTROL Layer is operating, we change the state to
++ *   reflect the beginning of Auto-negotiation or forcing.
++ */
++int phy_start_aneg(struct phy_device *phydev)
++{
++      int err;
++
++      mutex_lock(&phydev->lock);
++      err = _phy_start_aneg(phydev);
+       mutex_unlock(&phydev->lock);
+       return err;
diff --git a/queue-5.4/rdma-mlx5-set-user-priority-for-dct.patch b/queue-5.4/rdma-mlx5-set-user-priority-for-dct.patch
new file mode 100644 (file)
index 0000000..aee545a
--- /dev/null
@@ -0,0 +1,37 @@
+From 1ab52ac1e9bc9391f592c9fa8340a6e3e9c36286 Mon Sep 17 00:00:00 2001
+From: Patrisious Haddad <phaddad@nvidia.com>
+Date: Wed, 6 Oct 2021 12:31:53 +0300
+Subject: RDMA/mlx5: Set user priority for DCT
+
+From: Patrisious Haddad <phaddad@nvidia.com>
+
+commit 1ab52ac1e9bc9391f592c9fa8340a6e3e9c36286 upstream.
+
+Currently, the driver doesn't set the PCP-based priority for DCT, hence
+DCT response packets are transmitted without user priority.
+
+Fix it by setting user provided priority in the eth_prio field in the DCT
+context, which in turn sets the value in the transmitted packet.
+
+Fixes: 776a3906b692 ("IB/mlx5: Add support for DC target QP")
+Link: https://lore.kernel.org/r/5fd2d94a13f5742d8803c218927322257d53205c.1633512672.git.leonro@nvidia.com
+Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/mlx5/qp.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3865,6 +3865,8 @@ static int mlx5_ib_modify_dct(struct ib_
+               MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
+               MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
+               MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
++              if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
++                      MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
+               err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
+                                          MLX5_ST_SZ_BYTES(create_dct_in), out,
diff --git a/queue-5.4/rdma-sa_query-use-strscpy_pad-instead-of-memcpy-to-copy-a-string.patch b/queue-5.4/rdma-sa_query-use-strscpy_pad-instead-of-memcpy-to-copy-a-string.patch
new file mode 100644 (file)
index 0000000..d9a8210
--- /dev/null
@@ -0,0 +1,84 @@
+From 64733956ebba7cc629856f4a6ee35a52bc9c023f Mon Sep 17 00:00:00 2001
+From: Mark Zhang <markzhang@nvidia.com>
+Date: Sun, 24 Oct 2021 09:08:20 +0300
+Subject: RDMA/sa_query: Use strscpy_pad instead of memcpy to copy a string
+
+From: Mark Zhang <markzhang@nvidia.com>
+
+commit 64733956ebba7cc629856f4a6ee35a52bc9c023f upstream.
+
+When copying the device name, the length of the data memcpy copied exceeds
+the length of the source buffer, which cause the KASAN issue below.  Use
+strscpy_pad() instead.
+
+ BUG: KASAN: slab-out-of-bounds in ib_nl_set_path_rec_attrs+0x136/0x320 [ib_core]
+ Read of size 64 at addr ffff88811a10f5e0 by task rping/140263
+ CPU: 3 PID: 140263 Comm: rping Not tainted 5.15.0-rc1+ #1
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+ Call Trace:
+  dump_stack_lvl+0x57/0x7d
+  print_address_description.constprop.0+0x1d/0xa0
+  kasan_report+0xcb/0x110
+  kasan_check_range+0x13d/0x180
+  memcpy+0x20/0x60
+  ib_nl_set_path_rec_attrs+0x136/0x320 [ib_core]
+  ib_nl_make_request+0x1c6/0x380 [ib_core]
+  send_mad+0x20a/0x220 [ib_core]
+  ib_sa_path_rec_get+0x3e3/0x800 [ib_core]
+  cma_query_ib_route+0x29b/0x390 [rdma_cm]
+  rdma_resolve_route+0x308/0x3e0 [rdma_cm]
+  ucma_resolve_route+0xe1/0x150 [rdma_ucm]
+  ucma_write+0x17b/0x1f0 [rdma_ucm]
+  vfs_write+0x142/0x4d0
+  ksys_write+0x133/0x160
+  do_syscall_64+0x43/0x90
+  entry_SYSCALL_64_after_hwframe+0x44/0xae
+ RIP: 0033:0x7f26499aa90f
+ Code: 89 54 24 18 48 89 74 24 10 89 7c 24 08 e8 29 fd ff ff 48 8b 54 24 18 48 8b 74 24 10 41 89 c0 8b 7c 24 08 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 31 44 89 c7 48 89 44 24 08 e8 5c fd ff ff 48
+ RSP: 002b:00007f26495f2dc0 EFLAGS: 00000293 ORIG_RAX: 0000000000000001
+ RAX: ffffffffffffffda RBX: 00000000000007d0 RCX: 00007f26499aa90f
+ RDX: 0000000000000010 RSI: 00007f26495f2e00 RDI: 0000000000000003
+ RBP: 00005632a8315440 R08: 0000000000000000 R09: 0000000000000001
+ R10: 0000000000000000 R11: 0000000000000293 R12: 00007f26495f2e00
+ R13: 00005632a83154e0 R14: 00005632a8315440 R15: 00005632a830a810
+
+ Allocated by task 131419:
+  kasan_save_stack+0x1b/0x40
+  __kasan_kmalloc+0x7c/0x90
+  proc_self_get_link+0x8b/0x100
+  pick_link+0x4f1/0x5c0
+  step_into+0x2eb/0x3d0
+  walk_component+0xc8/0x2c0
+  link_path_walk+0x3b8/0x580
+  path_openat+0x101/0x230
+  do_filp_open+0x12e/0x240
+  do_sys_openat2+0x115/0x280
+  __x64_sys_openat+0xce/0x140
+  do_syscall_64+0x43/0x90
+  entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+Fixes: 2ca546b92a02 ("IB/sa: Route SA pathrecord query through netlink")
+Link: https://lore.kernel.org/r/72ede0f6dab61f7f23df9ac7a70666e07ef314b0.1635055496.git.leonro@nvidia.com
+Signed-off-by: Mark Zhang <markzhang@nvidia.com>
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/sa_query.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -760,8 +760,9 @@ static void ib_nl_set_path_rec_attrs(str
+       /* Construct the family header first */
+       header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
+-      memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
+-             LS_DEVICE_NAME_MAX);
++      strscpy_pad(header->device_name,
++                  dev_name(&query->port->agent->device->dev),
++                  LS_DEVICE_NAME_MAX);
+       header->port_num = query->port->port_num;
+       if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
diff --git a/queue-5.4/regmap-fix-possible-double-free-in-regcache_rbtree_exit.patch b/queue-5.4/regmap-fix-possible-double-free-in-regcache_rbtree_exit.patch
new file mode 100644 (file)
index 0000000..43bc91d
--- /dev/null
@@ -0,0 +1,70 @@
+From 55e6d8037805b3400096d621091dfbf713f97e83 Mon Sep 17 00:00:00 2001
+From: Yang Yingliang <yangyingliang@huawei.com>
+Date: Tue, 12 Oct 2021 10:37:35 +0800
+Subject: regmap: Fix possible double-free in regcache_rbtree_exit()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+commit 55e6d8037805b3400096d621091dfbf713f97e83 upstream.
+
+In regcache_rbtree_insert_to_block(), when 'present' realloc failed,
+the 'blk' which is supposed to assign to 'rbnode->block' will be freed,
+so 'rbnode->block' points a freed memory, in the error handling path of
+regcache_rbtree_init(), 'rbnode->block' will be freed again in
+regcache_rbtree_exit(), KASAN will report double-free as follows:
+
+BUG: KASAN: double-free or invalid-free in kfree+0xce/0x390
+Call Trace:
+ slab_free_freelist_hook+0x10d/0x240
+ kfree+0xce/0x390
+ regcache_rbtree_exit+0x15d/0x1a0
+ regcache_rbtree_init+0x224/0x2c0
+ regcache_init+0x88d/0x1310
+ __regmap_init+0x3151/0x4a80
+ __devm_regmap_init+0x7d/0x100
+ madera_spi_probe+0x10f/0x333 [madera_spi]
+ spi_probe+0x183/0x210
+ really_probe+0x285/0xc30
+
+To fix this, moving up the assignment of rbnode->block to immediately after
+the reallocation has succeeded so that the data structure stays valid even
+if the second reallocation fails.
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Fixes: 3f4ff561bc88b ("regmap: rbtree: Make cache_present bitmap per node")
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Link: https://lore.kernel.org/r/20211012023735.1632786-1-yangyingliang@huawei.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/regmap/regcache-rbtree.c |    7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -281,14 +281,14 @@ static int regcache_rbtree_insert_to_blo
+       if (!blk)
+               return -ENOMEM;
++      rbnode->block = blk;
++
+       if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+               present = krealloc(rbnode->cache_present,
+                                  BITS_TO_LONGS(blklen) * sizeof(*present),
+                                  GFP_KERNEL);
+-              if (!present) {
+-                      kfree(blk);
++              if (!present)
+                       return -ENOMEM;
+-              }
+               memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+                      (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+@@ -305,7 +305,6 @@ static int regcache_rbtree_insert_to_blo
+       }
+       /* update the rbnode block, its size and the base register */
+-      rbnode->block = blk;
+       rbnode->blklen = blklen;
+       rbnode->base_reg = base_reg;
+       rbnode->cache_present = present;
index 1480e89ed9309aaeeb3d5414281b584b8a6f923e..fe3785c02e2fe33a23c9f5a359e06849af1027a2 100644 (file)
@@ -20,3 +20,21 @@ mmc-sdhci-esdhc-imx-clear-the-buffer_read_ready-to-reset-standard-tuning-circuit
 cfg80211-scan-fix-rcu-in-cfg80211_add_nontrans_list.patch
 net-lan78xx-fix-division-by-zero-in-send-path.patch
 drm-ttm-fix-memleak-in-ttm_transfered_destroy.patch
+tcp_bpf-fix-one-concurrency-problem-in-the-tcp_bpf_send_verdict-function.patch
+ib-qib-protect-from-buffer-overflow-in-struct-qib_user_sdma_pkt-fields.patch
+ib-hfi1-fix-abba-locking-issue-with-sc_disable.patch
+nvmet-tcp-fix-data-digest-pointer-calculation.patch
+nvme-tcp-fix-data-digest-pointer-calculation.patch
+rdma-mlx5-set-user-priority-for-dct.patch
+arm64-dts-allwinner-h5-nanopi-neo-2-fix-ethernet-node.patch
+regmap-fix-possible-double-free-in-regcache_rbtree_exit.patch
+net-batman-adv-fix-error-handling.patch
+net-prevent-infinite-while-loop-in-skb_tx_hash.patch
+rdma-sa_query-use-strscpy_pad-instead-of-memcpy-to-copy-a-string.patch
+nios2-make-nios2_dtb_source_bool-depend-on-compile_test.patch
+net-ethernet-microchip-lan743x-fix-driver-crash-when-lan743x_pm_resume-fails.patch
+net-ethernet-microchip-lan743x-fix-dma-allocation-failure-by-using-dma_set_mask_and_coherent.patch
+net-nxp-lpc_eth.c-avoid-hang-when-bringing-interface-down.patch
+net-tls-fix-flipped-sign-in-async_wait.err-assignment.patch
+phy-phy_ethtool_ksettings_get-lock-the-phy-for-consistency.patch
+phy-phy_start_aneg-add-an-unlocked-version.patch
diff --git a/queue-5.4/tcp_bpf-fix-one-concurrency-problem-in-the-tcp_bpf_send_verdict-function.patch b/queue-5.4/tcp_bpf-fix-one-concurrency-problem-in-the-tcp_bpf_send_verdict-function.patch
new file mode 100644 (file)
index 0000000..3ea43b6
--- /dev/null
@@ -0,0 +1,82 @@
+From cd9733f5d75c94a32544d6ce5be47e14194cf137 Mon Sep 17 00:00:00 2001
+From: Liu Jian <liujian56@huawei.com>
+Date: Tue, 12 Oct 2021 13:20:19 +0800
+Subject: tcp_bpf: Fix one concurrency problem in the tcp_bpf_send_verdict function
+
+From: Liu Jian <liujian56@huawei.com>
+
+commit cd9733f5d75c94a32544d6ce5be47e14194cf137 upstream.
+
+With two Msgs, msgA and msgB and a user doing nonblocking sendmsg calls (or
+multiple cores) on a single socket 'sk' we could get the following flow.
+
+ msgA, sk                               msgB, sk
+ -----------                            ---------------
+ tcp_bpf_sendmsg()
+ lock(sk)
+ psock = sk->psock
+                                        tcp_bpf_sendmsg()
+                                        lock(sk) ... blocking
+tcp_bpf_send_verdict
+if (psock->eval == NONE)
+   psock->eval = sk_psock_msg_verdict
+ ..
+ < handle SK_REDIRECT case >
+   release_sock(sk)                     < lock dropped so grab here >
+   ret = tcp_bpf_sendmsg_redir
+                                        psock = sk->psock
+                                        tcp_bpf_send_verdict
+ lock_sock(sk) ... blocking on B
+                                        if (psock->eval == NONE) <- boom.
+                                         psock->eval will have msgA state
+
+The problem here is we dropped the lock on msgA and grabbed it with msgB.
+Now we have old state in psock and importantly psock->eval has not been
+cleared. So msgB will run whatever action was done on A and the verdict
+program may never see it.
+
+Fixes: 604326b41a6fb ("bpf, sockmap: convert to generic sk_msg interface")
+Signed-off-by: Liu Jian <liujian56@huawei.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20211012052019.184398-1-liujian56@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_bpf.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -313,6 +313,7 @@ static int tcp_bpf_send_verdict(struct s
+       bool cork = false, enospc = sk_msg_full(msg);
+       struct sock *sk_redir;
+       u32 tosend, delta = 0;
++      u32 eval = __SK_NONE;
+       int ret;
+ more_data:
+@@ -356,13 +357,24 @@ more_data:
+       case __SK_REDIRECT:
+               sk_redir = psock->sk_redir;
+               sk_msg_apply_bytes(psock, tosend);
++              if (!psock->apply_bytes) {
++                      /* Clean up before releasing the sock lock. */
++                      eval = psock->eval;
++                      psock->eval = __SK_NONE;
++                      psock->sk_redir = NULL;
++              }
+               if (psock->cork) {
+                       cork = true;
+                       psock->cork = NULL;
+               }
+               sk_msg_return(sk, msg, tosend);
+               release_sock(sk);
++
+               ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
++
++              if (eval == __SK_REDIRECT)
++                      sock_put(sk_redir);
++
+               lock_sock(sk);
+               if (unlikely(ret < 0)) {
+                       int free = sk_msg_free_nocharge(sk, msg);