]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.10
authorSasha Levin <sashal@kernel.org>
Sun, 2 Jan 2022 21:57:04 +0000 (16:57 -0500)
committerSasha Levin <sashal@kernel.org>
Sun, 2 Jan 2022 21:57:04 +0000 (16:57 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
23 files changed:
queue-5.10/fsl-fman-fix-missing-put_device-call-in-fman_port_pr.patch [new file with mode: 0644]
queue-5.10/igc-fix-tx-timestamp-support-for-non-msi-x-platforms.patch [new file with mode: 0644]
queue-5.10/ionic-initialize-the-lif-dbid_inuse-bitmap.patch [new file with mode: 0644]
queue-5.10/net-ag71xx-fix-a-potential-double-free-in-error-hand.patch [new file with mode: 0644]
queue-5.10/net-lantiq_xrx200-fix-statistics-of-received-bytes.patch [new file with mode: 0644]
queue-5.10/net-mlx5-dr-fix-null-vs-is_err-checking-in-dr_domain.patch [new file with mode: 0644]
queue-5.10/net-mlx5e-fix-icosq-recovery-flow-for-xsk.patch [new file with mode: 0644]
queue-5.10/net-mlx5e-fix-wrong-features-assignment-in-case-of-e.patch [new file with mode: 0644]
queue-5.10/net-mlx5e-wrap-the-tx-reporter-dump-callback-to-extr.patch [new file with mode: 0644]
queue-5.10/net-ncsi-check-for-error-return-from-call-to-nla_put.patch [new file with mode: 0644]
queue-5.10/net-phy-fixed_phy-fix-null-vs-is_err-checking-in-__f.patch [new file with mode: 0644]
queue-5.10/net-smc-don-t-send-cdc-llc-message-if-link-not-ready.patch [new file with mode: 0644]
queue-5.10/net-smc-fix-kernel-panic-caused-by-race-of-smc_sock.patch [new file with mode: 0644]
queue-5.10/net-smc-fix-using-of-uninitialized-completions.patch [new file with mode: 0644]
queue-5.10/net-smc-improved-fix-wait-on-already-cleared-link.patch [new file with mode: 0644]
queue-5.10/net-usb-pegasus-do-not-drop-long-ethernet-frames.patch [new file with mode: 0644]
queue-5.10/nfc-st21nfca-fix-memory-leak-in-device-probe-and-rem.patch [new file with mode: 0644]
queue-5.10/scsi-lpfc-terminate-string-in-lpfc_debugfs_nvmeio_tr.patch [new file with mode: 0644]
queue-5.10/sctp-use-call_rcu-to-free-endpoint.patch [new file with mode: 0644]
queue-5.10/selftests-calculate-udpgso-segment-count-without-hea.patch [new file with mode: 0644]
queue-5.10/selftests-net-udpgso_bench_tx-fix-dst-ip-argument.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/udp-using-datalen-to-cap-ipv6-udp-max-gso-segments.patch [new file with mode: 0644]

diff --git a/queue-5.10/fsl-fman-fix-missing-put_device-call-in-fman_port_pr.patch b/queue-5.10/fsl-fman-fix-missing-put_device-call-in-fman_port_pr.patch
new file mode 100644 (file)
index 0000000..7664307
--- /dev/null
@@ -0,0 +1,82 @@
+From cee861fbd395fe79835b1d840496903836ec7aa8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Dec 2021 12:26:27 +0000
+Subject: fsl/fman: Fix missing put_device() call in fman_port_probe
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit bf2b09fedc17248b315f80fb249087b7d28a69a6 ]
+
+The reference taken by 'of_find_device_by_node()' must be released when
+not needed anymore.
+Add the corresponding 'put_device()' in the and error handling paths.
+
+Fixes: 18a6c85fcc78 ("fsl/fman: Add FMan Port Support")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fman/fman_port.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
+index d9baac0dbc7d0..4c9d05c45c033 100644
+--- a/drivers/net/ethernet/freescale/fman/fman_port.c
++++ b/drivers/net/ethernet/freescale/fman/fman_port.c
+@@ -1805,7 +1805,7 @@ static int fman_port_probe(struct platform_device *of_dev)
+       fman = dev_get_drvdata(&fm_pdev->dev);
+       if (!fman) {
+               err = -EINVAL;
+-              goto return_err;
++              goto put_device;
+       }
+       err = of_property_read_u32(port_node, "cell-index", &val);
+@@ -1813,7 +1813,7 @@ static int fman_port_probe(struct platform_device *of_dev)
+               dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
+                       __func__, port_node);
+               err = -EINVAL;
+-              goto return_err;
++              goto put_device;
+       }
+       port_id = (u8)val;
+       port->dts_params.id = port_id;
+@@ -1847,7 +1847,7 @@ static int fman_port_probe(struct platform_device *of_dev)
+       }  else {
+               dev_err(port->dev, "%s: Illegal port type\n", __func__);
+               err = -EINVAL;
+-              goto return_err;
++              goto put_device;
+       }
+       port->dts_params.type = port_type;
+@@ -1861,7 +1861,7 @@ static int fman_port_probe(struct platform_device *of_dev)
+                       dev_err(port->dev, "%s: incorrect qman-channel-id\n",
+                               __func__);
+                       err = -EINVAL;
+-                      goto return_err;
++                      goto put_device;
+               }
+               port->dts_params.qman_channel_id = qman_channel_id;
+       }
+@@ -1871,7 +1871,7 @@ static int fman_port_probe(struct platform_device *of_dev)
+               dev_err(port->dev, "%s: of_address_to_resource() failed\n",
+                       __func__);
+               err = -ENOMEM;
+-              goto return_err;
++              goto put_device;
+       }
+       port->dts_params.fman = fman;
+@@ -1896,6 +1896,8 @@ static int fman_port_probe(struct platform_device *of_dev)
+       return 0;
++put_device:
++      put_device(&fm_pdev->dev);
+ return_err:
+       of_node_put(port_node);
+ free_port:
+-- 
+2.34.1
+
diff --git a/queue-5.10/igc-fix-tx-timestamp-support-for-non-msi-x-platforms.patch b/queue-5.10/igc-fix-tx-timestamp-support-for-non-msi-x-platforms.patch
new file mode 100644 (file)
index 0000000..699697a
--- /dev/null
@@ -0,0 +1,48 @@
+From 19245d142469b1621c5668924efaee86804629a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Dec 2021 16:49:33 -0700
+Subject: igc: Fix TX timestamp support for non-MSI-X platforms
+
+From: James McLaughlin <james.mclaughlin@qsc.com>
+
+[ Upstream commit f85846bbf43de38fb2c89fe7d2a085608c4eb25a ]
+
+Time synchronization was not properly enabled on non-MSI-X platforms.
+
+Fixes: 2c344ae24501 ("igc: Add support for TX timestamping")
+Signed-off-by: James McLaughlin <james.mclaughlin@qsc.com>
+Reviewed-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Tested-by: Nechama Kraus <nechamax.kraus@linux.intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index cae090a072524..61cebb7df6bcb 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -4422,6 +4422,9 @@ static irqreturn_t igc_intr_msi(int irq, void *data)
+                       mod_timer(&adapter->watchdog_timer, jiffies + 1);
+       }
++      if (icr & IGC_ICR_TS)
++              igc_tsync_interrupt(adapter);
++
+       napi_schedule(&q_vector->napi);
+       return IRQ_HANDLED;
+@@ -4465,6 +4468,9 @@ static irqreturn_t igc_intr(int irq, void *data)
+                       mod_timer(&adapter->watchdog_timer, jiffies + 1);
+       }
++      if (icr & IGC_ICR_TS)
++              igc_tsync_interrupt(adapter);
++
+       napi_schedule(&q_vector->napi);
+       return IRQ_HANDLED;
+-- 
+2.34.1
+
diff --git a/queue-5.10/ionic-initialize-the-lif-dbid_inuse-bitmap.patch b/queue-5.10/ionic-initialize-the-lif-dbid_inuse-bitmap.patch
new file mode 100644 (file)
index 0000000..f6efd82
--- /dev/null
@@ -0,0 +1,40 @@
+From f01c164b38b4421e8210e1e5ce4ef639f24bec75 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Dec 2021 15:06:17 +0100
+Subject: ionic: Initialize the 'lif->dbid_inuse' bitmap
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 140c7bc7d1195750342ea0e6ab76179499ae7cd7 ]
+
+When allocated, this bitmap is not initialized. Only the first bit is set a
+few lines below.
+
+Use bitmap_zalloc() to make sure that it is cleared before being used.
+
+Fixes: 6461b446f2a0 ("ionic: Add interrupts and doorbells")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Shannon Nelson <snelson@pensando.io>
+Link: https://lore.kernel.org/r/6a478eae0b5e6c63774e1f0ddb1a3f8c38fa8ade.1640527506.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/pensando/ionic/ionic_lif.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 1b44155fa24b2..e95c09dc2c30d 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -2836,7 +2836,7 @@ int ionic_lif_init(struct ionic_lif *lif)
+               return -EINVAL;
+       }
+-      lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
++      lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
+       if (!lif->dbid_inuse) {
+               dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
+               return -ENOMEM;
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-ag71xx-fix-a-potential-double-free-in-error-hand.patch b/queue-5.10/net-ag71xx-fix-a-potential-double-free-in-error-hand.patch
new file mode 100644 (file)
index 0000000..68057fd
--- /dev/null
@@ -0,0 +1,98 @@
+From 10778e286468d06f1d1b86dd044070c1cecb6435 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Dec 2021 18:51:44 +0100
+Subject: net: ag71xx: Fix a potential double free in error handling paths
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 1cd5384c88af5b59bf9f3b6c1a151bc14b88c2cd ]
+
+'ndev' is a managed resource allocated with devm_alloc_etherdev(), so there
+is no need to call free_netdev() explicitly or there will be a double
+free().
+
+Simplify all error handling paths accordingly.
+
+Fixes: d51b6ce441d3 ("net: ethernet: add ag71xx driver")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/atheros/ag71xx.c | 23 ++++++++---------------
+ 1 file changed, 8 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
+index a60ce90305819..c26c9b0c00d8f 100644
+--- a/drivers/net/ethernet/atheros/ag71xx.c
++++ b/drivers/net/ethernet/atheros/ag71xx.c
+@@ -1904,15 +1904,12 @@ static int ag71xx_probe(struct platform_device *pdev)
+       ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
+       if (IS_ERR(ag->mac_reset)) {
+               netif_err(ag, probe, ndev, "missing mac reset\n");
+-              err = PTR_ERR(ag->mac_reset);
+-              goto err_free;
++              return PTR_ERR(ag->mac_reset);
+       }
+       ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+-      if (!ag->mac_base) {
+-              err = -ENOMEM;
+-              goto err_free;
+-      }
++      if (!ag->mac_base)
++              return -ENOMEM;
+       ndev->irq = platform_get_irq(pdev, 0);
+       err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
+@@ -1920,7 +1917,7 @@ static int ag71xx_probe(struct platform_device *pdev)
+       if (err) {
+               netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
+                         ndev->irq);
+-              goto err_free;
++              return err;
+       }
+       ndev->netdev_ops = &ag71xx_netdev_ops;
+@@ -1948,10 +1945,8 @@ static int ag71xx_probe(struct platform_device *pdev)
+       ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
+                                           sizeof(struct ag71xx_desc),
+                                           &ag->stop_desc_dma, GFP_KERNEL);
+-      if (!ag->stop_desc) {
+-              err = -ENOMEM;
+-              goto err_free;
+-      }
++      if (!ag->stop_desc)
++              return -ENOMEM;
+       ag->stop_desc->data = 0;
+       ag->stop_desc->ctrl = 0;
+@@ -1968,7 +1963,7 @@ static int ag71xx_probe(struct platform_device *pdev)
+       err = of_get_phy_mode(np, &ag->phy_if_mode);
+       if (err) {
+               netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
+-              goto err_free;
++              return err;
+       }
+       netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
+@@ -1976,7 +1971,7 @@ static int ag71xx_probe(struct platform_device *pdev)
+       err = clk_prepare_enable(ag->clk_eth);
+       if (err) {
+               netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
+-              goto err_free;
++              return err;
+       }
+       ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
+@@ -2012,8 +2007,6 @@ err_mdio_remove:
+       ag71xx_mdio_remove(ag);
+ err_put_clk:
+       clk_disable_unprepare(ag->clk_eth);
+-err_free:
+-      free_netdev(ndev);
+       return err;
+ }
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-lantiq_xrx200-fix-statistics-of-received-bytes.patch b/queue-5.10/net-lantiq_xrx200-fix-statistics-of-received-bytes.patch
new file mode 100644 (file)
index 0000000..33b3515
--- /dev/null
@@ -0,0 +1,36 @@
+From e5e22cbcb54854fc1abbd982d9f96b872a0fd6f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Dec 2021 17:22:03 +0100
+Subject: net: lantiq_xrx200: fix statistics of received bytes
+
+From: Aleksander Jan Bajkowski <olek2@wp.pl>
+
+[ Upstream commit 5be60a945329d82f06fc755a43eeefbfc5f77d72 ]
+
+Received frames have FCS truncated. There is no need
+to subtract FCS length from the statistics.
+
+Fixes: fe1a56420cf2 ("net: lantiq: Add Lantiq / Intel VRX200 Ethernet driver")
+Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/lantiq_xrx200.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
+index 072075bc60ee9..500511b72ac60 100644
+--- a/drivers/net/ethernet/lantiq_xrx200.c
++++ b/drivers/net/ethernet/lantiq_xrx200.c
+@@ -209,7 +209,7 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
+       skb->protocol = eth_type_trans(skb, net_dev);
+       netif_receive_skb(skb);
+       net_dev->stats.rx_packets++;
+-      net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
++      net_dev->stats.rx_bytes += len;
+       return 0;
+ }
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-mlx5-dr-fix-null-vs-is_err-checking-in-dr_domain.patch b/queue-5.10/net-mlx5-dr-fix-null-vs-is_err-checking-in-dr_domain.patch
new file mode 100644 (file)
index 0000000..37f8241
--- /dev/null
@@ -0,0 +1,47 @@
+From 057304af5f6d5016631657a3ce2ad9b70e644ad4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Dec 2021 06:54:53 +0000
+Subject: net/mlx5: DR, Fix NULL vs IS_ERR checking in dr_domain_init_resources
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 6b8b42585886c59a008015083282aae434349094 ]
+
+The mlx5_get_uars_page() function  returns error pointers.
+Using IS_ERR() to check the return value to fix this.
+
+Fixes: 4ec9e7b02697 ("net/mlx5: DR, Expose steering domain functionality")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+index 00d861361428f..16a7c7ec5e138 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+@@ -2,6 +2,7 @@
+ /* Copyright (c) 2019 Mellanox Technologies. */
+ #include <linux/mlx5/eswitch.h>
++#include <linux/err.h>
+ #include "dr_types.h"
+ #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type)        \
+@@ -69,9 +70,9 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
+       }
+       dmn->uar = mlx5_get_uars_page(dmn->mdev);
+-      if (!dmn->uar) {
++      if (IS_ERR(dmn->uar)) {
+               mlx5dr_err(dmn, "Couldn't allocate UAR\n");
+-              ret = -ENOMEM;
++              ret = PTR_ERR(dmn->uar);
+               goto clean_pd;
+       }
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-mlx5e-fix-icosq-recovery-flow-for-xsk.patch b/queue-5.10/net-mlx5e-fix-icosq-recovery-flow-for-xsk.patch
new file mode 100644 (file)
index 0000000..2d38038
--- /dev/null
@@ -0,0 +1,124 @@
+From 569da777707f3713868733e4c351bdda23718086 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jul 2020 16:32:44 +0300
+Subject: net/mlx5e: Fix ICOSQ recovery flow for XSK
+
+From: Maxim Mikityanskiy <maximmi@mellanox.com>
+
+[ Upstream commit 19c4aba2d4e23997061fb11aed8a3e41334bfa14 ]
+
+There are two ICOSQs per channel: one is needed for RX, and the other
+for async operations (XSK TX, kTLS offload). Currently, the recovery
+flow for both is the same, and async ICOSQ is mistakenly treated like
+the regular ICOSQ.
+
+This patch prevents running the regular ICOSQ recovery on async ICOSQ.
+The purpose of async ICOSQ is to handle XSK wakeup requests and post
+kTLS offload RX parameters, it has nothing to do with RQ and XSKRQ UMRs,
+so the regular recovery sequence is not applicable here.
+
+Fixes: be5323c8379f ("net/mlx5e: Report and recover from CQE error on ICOSQ")
+Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com>
+Reviewed-by: Aya Levin <ayal@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en.h  |  3 --
+ .../net/ethernet/mellanox/mlx5/core/en_main.c | 30 ++++++++++++++-----
+ 2 files changed, 22 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 9da34f82d4668..73060b30fece3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -916,9 +916,6 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
+ void mlx5e_close_rq(struct mlx5e_rq *rq);
+ struct mlx5e_sq_param;
+-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
+-                   struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
+-void mlx5e_close_icosq(struct mlx5e_icosq *sq);
+ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
+                    struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
+                    struct mlx5e_xdpsq *sq, bool is_redirect);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 6ec4b96497ffb..3f5a2bb9b3c0b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1051,9 +1051,20 @@ static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
+       mlx5e_reporter_icosq_cqe_err(sq);
+ }
++static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
++{
++      struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
++                                            recover_work);
++
++      /* Not implemented yet. */
++
++      netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
++}
++
+ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
+                            struct mlx5e_sq_param *param,
+-                           struct mlx5e_icosq *sq)
++                           struct mlx5e_icosq *sq,
++                           work_func_t recover_work_func)
+ {
+       void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
+       struct mlx5_core_dev *mdev = c->mdev;
+@@ -1073,7 +1084,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
+       if (err)
+               goto err_sq_wq_destroy;
+-      INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
++      INIT_WORK(&sq->recover_work, recover_work_func);
+       return 0;
+@@ -1423,13 +1434,14 @@ static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
+       mlx5e_reporter_tx_err_cqe(sq);
+ }
+-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
+-                   struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
++static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
++                          struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
++                          work_func_t recover_work_func)
+ {
+       struct mlx5e_create_sq_param csp = {};
+       int err;
+-      err = mlx5e_alloc_icosq(c, param, sq);
++      err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
+       if (err)
+               return err;
+@@ -1459,7 +1471,7 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
+       synchronize_net(); /* Sync with NAPI. */
+ }
+-void mlx5e_close_icosq(struct mlx5e_icosq *sq)
++static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+ {
+       struct mlx5e_channel *c = sq->channel;
+@@ -1862,11 +1874,13 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
+       spin_lock_init(&c->async_icosq_lock);
+-      err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
++      err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
++                             mlx5e_async_icosq_err_cqe_work);
+       if (err)
+               goto err_disable_napi;
+-      err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
++      err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
++                             mlx5e_icosq_err_cqe_work);
+       if (err)
+               goto err_close_async_icosq;
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-mlx5e-fix-wrong-features-assignment-in-case-of-e.patch b/queue-5.10/net-mlx5e-fix-wrong-features-assignment-in-case-of-e.patch
new file mode 100644 (file)
index 0000000..b96a96c
--- /dev/null
@@ -0,0 +1,86 @@
+From 5fc12915ecd057781ac5e00f331afedf438c5eec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Nov 2021 11:08:41 +0200
+Subject: net/mlx5e: Fix wrong features assignment in case of error
+
+From: Gal Pressman <gal@nvidia.com>
+
+[ Upstream commit 992d8a4e38f0527f24e273ce3a9cd6dea1a6a436 ]
+
+In case of an error in mlx5e_set_features(), 'netdev->features' must be
+updated with the correct state of the device to indicate which features
+were updated successfully.
+To do that we maintain a copy of 'netdev->features' and update it after
+successful feature changes, so we can assign it to back to
+'netdev->features' if needed.
+
+However, since not all netdev features are handled by the driver (e.g.
+GRO/TSO/etc), some features may not be updated correctly in case of an
+error updating another feature.
+
+For example, while requesting to disable TSO (feature which is not
+handled by the driver) and enable HW-GRO, if an error occurs during
+HW-GRO enable, 'oper_features' will be assigned with 'netdev->features'
+and HW-GRO turned off. TSO will remain enabled in such case, which is a
+bug.
+
+To solve that, instead of using 'netdev->features' as the baseline of
+'oper_features' and changing it on set feature success, use 'features'
+instead and update it in case of errors.
+
+Fixes: 75b81ce719b7 ("net/mlx5e: Don't override netdev features field unless in error flow")
+Signed-off-by: Gal Pressman <gal@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 3f5a2bb9b3c0b..2f6c3a5813ed1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3935,12 +3935,11 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
+ static int mlx5e_handle_feature(struct net_device *netdev,
+                               netdev_features_t *features,
+-                              netdev_features_t wanted_features,
+                               netdev_features_t feature,
+                               mlx5e_feature_handler feature_handler)
+ {
+-      netdev_features_t changes = wanted_features ^ netdev->features;
+-      bool enable = !!(wanted_features & feature);
++      netdev_features_t changes = *features ^ netdev->features;
++      bool enable = !!(*features & feature);
+       int err;
+       if (!(changes & feature))
+@@ -3948,22 +3947,22 @@ static int mlx5e_handle_feature(struct net_device *netdev,
+       err = feature_handler(netdev, enable);
+       if (err) {
++              MLX5E_SET_FEATURE(features, feature, !enable);
+               netdev_err(netdev, "%s feature %pNF failed, err %d\n",
+                          enable ? "Enable" : "Disable", &feature, err);
+               return err;
+       }
+-      MLX5E_SET_FEATURE(features, feature, enable);
+       return 0;
+ }
+ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
+ {
+-      netdev_features_t oper_features = netdev->features;
++      netdev_features_t oper_features = features;
+       int err = 0;
+ #define MLX5E_HANDLE_FEATURE(feature, handler) \
+-      mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
++      mlx5e_handle_feature(netdev, &oper_features, feature, handler)
+       err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+       err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-mlx5e-wrap-the-tx-reporter-dump-callback-to-extr.patch b/queue-5.10/net-mlx5e-wrap-the-tx-reporter-dump-callback-to-extr.patch
new file mode 100644 (file)
index 0000000..2464042
--- /dev/null
@@ -0,0 +1,92 @@
+From 674ad018c93f2cc962c914ef35b887abf175d299 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Nov 2021 16:05:44 +0200
+Subject: net/mlx5e: Wrap the tx reporter dump callback to extract the sq
+
+From: Amir Tzin <amirtz@nvidia.com>
+
+[ Upstream commit 918fc3855a6507a200e9cf22c20be852c0982687 ]
+
+Function mlx5e_tx_reporter_dump_sq() casts its void * argument to struct
+mlx5e_txqsq *, but in TX-timeout-recovery flow the argument is actually
+of type struct mlx5e_tx_timeout_ctx *.
+
+ mlx5_core 0000:08:00.1 enp8s0f1: TX timeout detected
+ mlx5_core 0000:08:00.1 enp8s0f1: TX timeout on queue: 1, SQ: 0x11ec, CQ: 0x146d, SQ Cons: 0x0 SQ Prod: 0x1, usecs since last trans: 21565000
+ BUG: stack guard page was hit at 0000000093f1a2de (stack is 00000000b66ea0dc..000000004d932dae)
+ kernel stack overflow (page fault): 0000 [#1] SMP NOPTI
+ CPU: 5 PID: 95 Comm: kworker/u20:1 Tainted: G W OE 5.13.0_mlnx #1
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+ Workqueue: mlx5e mlx5e_tx_timeout_work [mlx5_core]
+ RIP: 0010:mlx5e_tx_reporter_dump_sq+0xd3/0x180
+ [mlx5_core]
+ Call Trace:
+ mlx5e_tx_reporter_dump+0x43/0x1c0 [mlx5_core]
+ devlink_health_do_dump.part.91+0x71/0xd0
+ devlink_health_report+0x157/0x1b0
+ mlx5e_reporter_tx_timeout+0xb9/0xf0 [mlx5_core]
+ ? mlx5e_tx_reporter_err_cqe_recover+0x1d0/0x1d0
+ [mlx5_core]
+ ? mlx5e_health_queue_dump+0xd0/0xd0 [mlx5_core]
+ ? update_load_avg+0x19b/0x550
+ ? set_next_entity+0x72/0x80
+ ? pick_next_task_fair+0x227/0x340
+ ? finish_task_switch+0xa2/0x280
+   mlx5e_tx_timeout_work+0x83/0xb0 [mlx5_core]
+   process_one_work+0x1de/0x3a0
+   worker_thread+0x2d/0x3c0
+ ? process_one_work+0x3a0/0x3a0
+   kthread+0x115/0x130
+ ? kthread_park+0x90/0x90
+   ret_from_fork+0x1f/0x30
+ --[ end trace 51ccabea504edaff ]---
+ RIP: 0010:mlx5e_tx_reporter_dump_sq+0xd3/0x180
+ PKRU: 55555554
+ Kernel panic - not syncing: Fatal exception
+ Kernel Offset: disabled
+ end Kernel panic - not syncing: Fatal exception
+
+To fix this bug add a wrapper for mlx5e_tx_reporter_dump_sq() which
+extracts the sq from struct mlx5e_tx_timeout_ctx and set it as the
+TX-timeout-recovery flow dump callback.
+
+Fixes: 5f29458b77d5 ("net/mlx5e: Support dump callback in TX reporter")
+Signed-off-by: Aya Levin <ayal@nvidia.com>
+Signed-off-by: Amir Tzin <amirtz@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/en/reporter_tx.c   | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+index 8be6eaa3eeb14..13dd34c571b9f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+@@ -335,6 +335,14 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
+       return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ }
++static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
++                                        void *ctx)
++{
++      struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
++
++      return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
++}
++
+ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
+                                         struct devlink_fmsg *fmsg)
+ {
+@@ -418,7 +426,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
+       to_ctx.sq = sq;
+       err_ctx.ctx = &to_ctx;
+       err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
+-      err_ctx.dump = mlx5e_tx_reporter_dump_sq;
++      err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
+       snprintf(err_str, sizeof(err_str),
+                "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
+                sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-ncsi-check-for-error-return-from-call-to-nla_put.patch b/queue-5.10/net-ncsi-check-for-error-return-from-call-to-nla_put.patch
new file mode 100644 (file)
index 0000000..b35c4c1
--- /dev/null
@@ -0,0 +1,45 @@
+From ba77e118888c960d02bc5ef73ebb2eee52c8c1fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Dec 2021 11:21:18 +0800
+Subject: net/ncsi: check for error return from call to nla_put_u32
+
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+
+[ Upstream commit 92a34ab169f9eefe29cd420ce96b0a0a2a1da853 ]
+
+As we can see from the comment of the nla_put() that it could return
+-EMSGSIZE if the tailroom of the skb is insufficient.
+Therefore, it should be better to check the return value of the
+nla_put_u32 and return the error code if error accurs.
+Also, there are many other functions have the same problem, and if this
+patch is correct, I will commit a new version to fix all.
+
+Fixes: 955dc68cb9b2 ("net/ncsi: Add generic netlink family")
+Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Link: https://lore.kernel.org/r/20211229032118.1706294-1-jiasheng@iscas.ac.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ncsi/ncsi-netlink.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
+index bb5f1650f11cb..c189b4c8a1823 100644
+--- a/net/ncsi/ncsi-netlink.c
++++ b/net/ncsi/ncsi-netlink.c
+@@ -112,7 +112,11 @@ static int ncsi_write_package_info(struct sk_buff *skb,
+               pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR);
+               if (!pnest)
+                       return -ENOMEM;
+-              nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
++              rc = nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
++              if (rc) {
++                      nla_nest_cancel(skb, pnest);
++                      return rc;
++              }
+               if ((0x1 << np->id) == ndp->package_whitelist)
+                       nla_put_flag(skb, NCSI_PKG_ATTR_FORCED);
+               cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST);
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-phy-fixed_phy-fix-null-vs-is_err-checking-in-__f.patch b/queue-5.10/net-phy-fixed_phy-fix-null-vs-is_err-checking-in-__f.patch
new file mode 100644 (file)
index 0000000..a198c4e
--- /dev/null
@@ -0,0 +1,40 @@
+From 8b25b224878f37718790360ac6b9bd106bcd0f4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Dec 2021 02:14:59 +0000
+Subject: net: phy: fixed_phy: Fix NULL vs IS_ERR() checking in
+ __fixed_phy_register
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit b45396afa4177f2b1ddfeff7185da733fade1dc3 ]
+
+The fixed_phy_get_gpiod function() returns NULL, it doesn't return error
+pointers, using NULL checking to fix this.i
+
+Fixes: 5468e82f7034 ("net: phy: fixed-phy: Drop GPIO from fixed_phy_add()")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Link: https://lore.kernel.org/r/20211224021500.10362-1-linmq006@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/fixed_phy.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
+index 18d81f43f2a88..dd30a6883a027 100644
+--- a/drivers/net/phy/fixed_phy.c
++++ b/drivers/net/phy/fixed_phy.c
+@@ -239,8 +239,8 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
+       /* Check if we have a GPIO associated with this fixed phy */
+       if (!gpiod) {
+               gpiod = fixed_phy_get_gpiod(np);
+-              if (IS_ERR(gpiod))
+-                      return ERR_CAST(gpiod);
++              if (!gpiod)
++                      return ERR_PTR(-EINVAL);
+       }
+       /* Get the next available PHY address, up to PHY_MAX_ADDR */
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-smc-don-t-send-cdc-llc-message-if-link-not-ready.patch b/queue-5.10/net-smc-don-t-send-cdc-llc-message-if-link-not-ready.patch
new file mode 100644 (file)
index 0000000..bfeec34
--- /dev/null
@@ -0,0 +1,120 @@
+From a452664a80cb6bf5ac6027fccaccaaf9b1560a9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Dec 2021 17:03:24 +0800
+Subject: net/smc: don't send CDC/LLC message if link not ready
+
+From: Dust Li <dust.li@linux.alibaba.com>
+
+[ Upstream commit 90cee52f2e780345d3629e278291aea5ac74f40f ]
+
+We found smc_llc_send_link_delete_all() sometimes wait
+for 2s timeout when testing with RDMA link up/down.
+It is possible when a smc_link is in ACTIVATING state,
+the underlaying QP is still in RESET or RTR state, which
+cannot send any messages out.
+
+smc_llc_send_link_delete_all() use smc_link_usable() to
+checks whether the link is usable, if the QP is still in
+RESET or RTR state, but the smc_link is in ACTIVATING, this
+LLC message will always fail without any CQE entering the
+CQ, and we will always wait 2s before timeout.
+
+Since we cannot send any messages through the QP before
+the QP enter RTS. I add a wrapper smc_link_sendable()
+which checks the state of QP along with the link state.
+And replace smc_link_usable() with smc_link_sendable()
+in all LLC & CDC message sending routine.
+
+Fixes: 5f08318f617b ("smc: connection data control (CDC)")
+Signed-off-by: Dust Li <dust.li@linux.alibaba.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_core.c | 2 +-
+ net/smc/smc_core.h | 6 ++++++
+ net/smc/smc_llc.c  | 2 +-
+ net/smc/smc_wr.c   | 4 ++--
+ net/smc/smc_wr.h   | 2 +-
+ 5 files changed, 11 insertions(+), 5 deletions(-)
+
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index 135949ef85b3c..fb4327a81a0f0 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -226,7 +226,7 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
+       for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+               struct smc_link *lnk = &lgr->lnk[i];
+-              if (smc_link_usable(lnk))
++              if (smc_link_sendable(lnk))
+                       lnk->state = SMC_LNK_INACTIVE;
+       }
+       wake_up_all(&lgr->llc_msg_waiter);
+diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
+index 4745a9a5a28f5..9364d0f35ccec 100644
+--- a/net/smc/smc_core.h
++++ b/net/smc/smc_core.h
+@@ -359,6 +359,12 @@ static inline bool smc_link_usable(struct smc_link *lnk)
+       return true;
+ }
++static inline bool smc_link_sendable(struct smc_link *lnk)
++{
++      return smc_link_usable(lnk) &&
++              lnk->qp_attr.cur_qp_state == IB_QPS_RTS;
++}
++
+ static inline bool smc_link_active(struct smc_link *lnk)
+ {
+       return lnk->state == SMC_LNK_ACTIVE;
+diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
+index f1d323439a2af..ee1f0fdba0855 100644
+--- a/net/smc/smc_llc.c
++++ b/net/smc/smc_llc.c
+@@ -1358,7 +1358,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
+       delllc.reason = htonl(rsn);
+       for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+-              if (!smc_link_usable(&lgr->lnk[i]))
++              if (!smc_link_sendable(&lgr->lnk[i]))
+                       continue;
+               if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
+                       break;
+diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
+index a71c9631f1ad3..cae22d240e0a6 100644
+--- a/net/smc/smc_wr.c
++++ b/net/smc/smc_wr.c
+@@ -169,7 +169,7 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
+ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
+ {
+       *idx = link->wr_tx_cnt;
+-      if (!smc_link_usable(link))
++      if (!smc_link_sendable(link))
+               return -ENOLINK;
+       for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
+               if (!test_and_set_bit(*idx, link->wr_tx_mask))
+@@ -212,7 +212,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
+       } else {
+               rc = wait_event_interruptible_timeout(
+                       link->wr_tx_wait,
+-                      !smc_link_usable(link) ||
++                      !smc_link_sendable(link) ||
+                       lgr->terminating ||
+                       (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
+                       SMC_WR_TX_WAIT_FREE_SLOT_TIME);
+diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
+index 2bc626f230a56..102d515757ee2 100644
+--- a/net/smc/smc_wr.h
++++ b/net/smc/smc_wr.h
+@@ -62,7 +62,7 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
+ static inline bool smc_wr_tx_link_hold(struct smc_link *link)
+ {
+-      if (!smc_link_usable(link))
++      if (!smc_link_sendable(link))
+               return false;
+       atomic_inc(&link->wr_tx_refcnt);
+       return true;
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-smc-fix-kernel-panic-caused-by-race-of-smc_sock.patch b/queue-5.10/net-smc-fix-kernel-panic-caused-by-race-of-smc_sock.patch
new file mode 100644 (file)
index 0000000..f6cede8
--- /dev/null
@@ -0,0 +1,437 @@
+From 41bb0b6d5aa161ffdece070413e03b236d9db6eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Dec 2021 17:03:25 +0800
+Subject: net/smc: fix kernel panic caused by race of smc_sock
+
+From: Dust Li <dust.li@linux.alibaba.com>
+
+[ Upstream commit 349d43127dac00c15231e8ffbcaabd70f7b0e544 ]
+
+A crash occurs when smc_cdc_tx_handler() tries to access smc_sock
+but smc_release() has already freed it.
+
+[ 4570.695099] BUG: unable to handle page fault for address: 000000002eae9e88
+[ 4570.696048] #PF: supervisor write access in kernel mode
+[ 4570.696728] #PF: error_code(0x0002) - not-present page
+[ 4570.697401] PGD 0 P4D 0
+[ 4570.697716] Oops: 0002 [#1] PREEMPT SMP NOPTI
+[ 4570.698228] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.16.0-rc4+ #111
+[ 4570.699013] Hardware name: Alibaba Cloud Alibaba Cloud ECS, BIOS 8c24b4c 04/0
+[ 4570.699933] RIP: 0010:_raw_spin_lock+0x1a/0x30
+<...>
+[ 4570.711446] Call Trace:
+[ 4570.711746]  <IRQ>
+[ 4570.711992]  smc_cdc_tx_handler+0x41/0xc0
+[ 4570.712470]  smc_wr_tx_tasklet_fn+0x213/0x560
+[ 4570.712981]  ? smc_cdc_tx_dismisser+0x10/0x10
+[ 4570.713489]  tasklet_action_common.isra.17+0x66/0x140
+[ 4570.714083]  __do_softirq+0x123/0x2f4
+[ 4570.714521]  irq_exit_rcu+0xc4/0xf0
+[ 4570.714934]  common_interrupt+0xba/0xe0
+
+Though smc_cdc_tx_handler() checked the existence of smc connection,
+smc_release() may have already dismissed and released the smc socket
+before smc_cdc_tx_handler() further visits it.
+
+smc_cdc_tx_handler()           |smc_release()
+if (!conn)                     |
+                               |
+                               |smc_cdc_tx_dismiss_slots()
+                               |      smc_cdc_tx_dismisser()
+                               |
+                               |sock_put(&smc->sk) <- last sock_put,
+                               |                      smc_sock freed
+bh_lock_sock(&smc->sk) (panic) |
+
+To make sure we won't receive any CDC messages after we free the
+smc_sock, add a refcount on the smc_connection for inflight CDC
+message(posted to the QP but haven't received related CQE), and
+don't release the smc_connection until all the inflight CDC messages
+haven been done, for both success or failed ones.
+
+Using refcount on CDC messages brings another problem: when the link
+is going to be destroyed, smcr_link_clear() will reset the QP, which
+then remove all the pending CQEs related to the QP in the CQ. To make
+sure all the CQEs will always come back so the refcount on the
+smc_connection can always reach 0, smc_ib_modify_qp_reset() was replaced
+by smc_ib_modify_qp_error().
+And remove the timeout in smc_wr_tx_wait_no_pending_sends() since we
+need to wait for all pending WQEs done, or we may encounter use-after-
+free when handling CQEs.
+
+For IB device removal routine, we need to wait for all the QPs on that
+device been destroyed before we can destroy CQs on the device, or
+the refcount on smc_connection won't reach 0 and smc_sock cannot be
+released.
+
+Fixes: 5f08318f617b ("smc: connection data control (CDC)")
+Reported-by: Wen Gu <guwen@linux.alibaba.com>
+Signed-off-by: Dust Li <dust.li@linux.alibaba.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc.h      |  5 +++++
+ net/smc/smc_cdc.c  | 52 +++++++++++++++++++++-------------------------
+ net/smc/smc_cdc.h  |  2 +-
+ net/smc/smc_core.c | 25 +++++++++++++++++-----
+ net/smc/smc_ib.c   |  4 ++--
+ net/smc/smc_ib.h   |  1 +
+ net/smc/smc_wr.c   | 41 +++---------------------------------
+ net/smc/smc_wr.h   |  3 +--
+ 8 files changed, 57 insertions(+), 76 deletions(-)
+
+diff --git a/net/smc/smc.h b/net/smc/smc.h
+index d65e15f0c944c..e6919fe31617b 100644
+--- a/net/smc/smc.h
++++ b/net/smc/smc.h
+@@ -170,6 +170,11 @@ struct smc_connection {
+       u16                     tx_cdc_seq;     /* sequence # for CDC send */
+       u16                     tx_cdc_seq_fin; /* sequence # - tx completed */
+       spinlock_t              send_lock;      /* protect wr_sends */
++      atomic_t                cdc_pend_tx_wr; /* number of pending tx CDC wqe
++                                               * - inc when post wqe,
++                                               * - dec on polled tx cqe
++                                               */
++      wait_queue_head_t       cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/
+       struct delayed_work     tx_work;        /* retry of smc_cdc_msg_send */
+       u32                     tx_off;         /* base offset in peer rmb */
+diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
+index 3602829006dda..0c490cdde6a49 100644
+--- a/net/smc/smc_cdc.c
++++ b/net/smc/smc_cdc.c
+@@ -31,10 +31,6 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
+       struct smc_sock *smc;
+       int diff;
+-      if (!conn)
+-              /* already dismissed */
+-              return;
+-
+       smc = container_of(conn, struct smc_sock, conn);
+       bh_lock_sock(&smc->sk);
+       if (!wc_status) {
+@@ -51,6 +47,12 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
+                             conn);
+               conn->tx_cdc_seq_fin = cdcpend->ctrl_seq;
+       }
++
++      if (atomic_dec_and_test(&conn->cdc_pend_tx_wr) &&
++          unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
++              wake_up(&conn->cdc_pend_tx_wq);
++      WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0);
++
+       smc_tx_sndbuf_nonfull(smc);
+       bh_unlock_sock(&smc->sk);
+ }
+@@ -107,6 +109,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
+       conn->tx_cdc_seq++;
+       conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
+       smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
++
++      atomic_inc(&conn->cdc_pend_tx_wr);
++      smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
++
+       rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
+       if (!rc) {
+               smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
+@@ -114,6 +120,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
+       } else {
+               conn->tx_cdc_seq--;
+               conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
++              atomic_dec(&conn->cdc_pend_tx_wr);
+       }
+       return rc;
+@@ -136,7 +143,18 @@ int smcr_cdc_msg_send_validation(struct smc_connection *conn,
+       peer->token = htonl(local->token);
+       peer->prod_flags.failover_validation = 1;
++      /* We need to set pend->conn here to make sure smc_cdc_tx_handler()
++       * can handle properly
++       */
++      smc_cdc_add_pending_send(conn, pend);
++
++      atomic_inc(&conn->cdc_pend_tx_wr);
++      smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
++
+       rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
++      if (unlikely(rc))
++              atomic_dec(&conn->cdc_pend_tx_wr);
++
+       return rc;
+ }
+@@ -193,31 +211,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
+       return rc;
+ }
+-static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
+-                            unsigned long data)
++void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn)
+ {
+-      struct smc_connection *conn = (struct smc_connection *)data;
+-      struct smc_cdc_tx_pend *cdc_pend =
+-              (struct smc_cdc_tx_pend *)tx_pend;
+-
+-      return cdc_pend->conn == conn;
+-}
+-
+-static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
+-{
+-      struct smc_cdc_tx_pend *cdc_pend =
+-              (struct smc_cdc_tx_pend *)tx_pend;
+-
+-      cdc_pend->conn = NULL;
+-}
+-
+-void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
+-{
+-      struct smc_link *link = conn->lnk;
+-
+-      smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
+-                              smc_cdc_tx_filter, smc_cdc_tx_dismisser,
+-                              (unsigned long)conn);
++      wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr));
+ }
+ /* Send a SMC-D CDC header.
+diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
+index 0a0a89abd38b2..696cc11f2303b 100644
+--- a/net/smc/smc_cdc.h
++++ b/net/smc/smc_cdc.h
+@@ -291,7 +291,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
+                         struct smc_wr_buf **wr_buf,
+                         struct smc_rdma_wr **wr_rdma_buf,
+                         struct smc_cdc_tx_pend **pend);
+-void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
++void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn);
+ int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
+                    struct smc_cdc_tx_pend *pend);
+ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index fb4327a81a0f0..2a22dc85951ee 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -657,7 +657,7 @@ void smc_conn_free(struct smc_connection *conn)
+                       smc_ism_unset_conn(conn);
+               tasklet_kill(&conn->rx_tsklet);
+       } else {
+-              smc_cdc_tx_dismiss_slots(conn);
++              smc_cdc_wait_pend_tx_wr(conn);
+               if (current_work() != &conn->abort_work)
+                       cancel_work_sync(&conn->abort_work);
+       }
+@@ -734,7 +734,7 @@ void smcr_link_clear(struct smc_link *lnk, bool log)
+       smc_llc_link_clear(lnk, log);
+       smcr_buf_unmap_lgr(lnk);
+       smcr_rtoken_clear_link(lnk);
+-      smc_ib_modify_qp_reset(lnk);
++      smc_ib_modify_qp_error(lnk);
+       smc_wr_free_link(lnk);
+       smc_ib_destroy_queue_pair(lnk);
+       smc_ib_dealloc_protection_domain(lnk);
+@@ -878,7 +878,7 @@ static void smc_conn_kill(struct smc_connection *conn, bool soft)
+               else
+                       tasklet_unlock_wait(&conn->rx_tsklet);
+       } else {
+-              smc_cdc_tx_dismiss_slots(conn);
++              smc_cdc_wait_pend_tx_wr(conn);
+       }
+       smc_lgr_unregister_conn(conn);
+       smc_close_active_abort(smc);
+@@ -1002,11 +1002,16 @@ void smc_smcd_terminate_all(struct smcd_dev *smcd)
+ /* Called when an SMCR device is removed or the smc module is unloaded.
+  * If smcibdev is given, all SMCR link groups using this device are terminated.
+  * If smcibdev is NULL, all SMCR link groups are terminated.
++ *
++ * We must wait here for QPs been destroyed before we destroy the CQs,
++ * or we won't received any CQEs and cdc_pend_tx_wr cannot reach 0 thus
++ * smc_sock cannot be released.
+  */
+ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
+ {
+       struct smc_link_group *lgr, *lg;
+       LIST_HEAD(lgr_free_list);
++      LIST_HEAD(lgr_linkdown_list);
+       int i;
+       spin_lock_bh(&smc_lgr_list.lock);
+@@ -1018,7 +1023,7 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
+               list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
+                       for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+                               if (lgr->lnk[i].smcibdev == smcibdev)
+-                                      smcr_link_down_cond_sched(&lgr->lnk[i]);
++                                      list_move_tail(&lgr->list, &lgr_linkdown_list);
+                       }
+               }
+       }
+@@ -1030,6 +1035,16 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
+               __smc_lgr_terminate(lgr, false);
+       }
++      list_for_each_entry_safe(lgr, lg, &lgr_linkdown_list, list) {
++              for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
++                      if (lgr->lnk[i].smcibdev == smcibdev) {
++                              mutex_lock(&lgr->llc_conf_mutex);
++                              smcr_link_down_cond(&lgr->lnk[i]);
++                              mutex_unlock(&lgr->llc_conf_mutex);
++                      }
++              }
++      }
++
+       if (smcibdev) {
+               if (atomic_read(&smcibdev->lnk_cnt))
+                       wait_event(smcibdev->lnks_deleted,
+@@ -1129,7 +1144,6 @@ static void smcr_link_down(struct smc_link *lnk)
+       if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
+               return;
+-      smc_ib_modify_qp_reset(lnk);
+       to_lnk = smc_switch_conns(lgr, lnk, true);
+       if (!to_lnk) { /* no backup link available */
+               smcr_link_clear(lnk, true);
+@@ -1357,6 +1371,7 @@ create:
+       conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
+       conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
+       conn->urg_state = SMC_URG_READ;
++      init_waitqueue_head(&conn->cdc_pend_tx_wq);
+       INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
+       if (ini->is_smcd) {
+               conn->rx_off = sizeof(struct smcd_cdc_msg);
+diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
+index fc766b537ac7a..f1ffbd414602e 100644
+--- a/net/smc/smc_ib.c
++++ b/net/smc/smc_ib.c
+@@ -100,12 +100,12 @@ int smc_ib_modify_qp_rts(struct smc_link *lnk)
+                           IB_QP_MAX_QP_RD_ATOMIC);
+ }
+-int smc_ib_modify_qp_reset(struct smc_link *lnk)
++int smc_ib_modify_qp_error(struct smc_link *lnk)
+ {
+       struct ib_qp_attr qp_attr;
+       memset(&qp_attr, 0, sizeof(qp_attr));
+-      qp_attr.qp_state = IB_QPS_RESET;
++      qp_attr.qp_state = IB_QPS_ERR;
+       return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
+ }
+diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
+index 2ce481187dd0b..f90d15eae2aab 100644
+--- a/net/smc/smc_ib.h
++++ b/net/smc/smc_ib.h
+@@ -74,6 +74,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk);
+ int smc_ib_ready_link(struct smc_link *lnk);
+ int smc_ib_modify_qp_rts(struct smc_link *lnk);
+ int smc_ib_modify_qp_reset(struct smc_link *lnk);
++int smc_ib_modify_qp_error(struct smc_link *lnk);
+ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
+ int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
+                            struct smc_buf_desc *buf_slot, u8 link_idx);
+diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
+index cae22d240e0a6..5a81f8c9ebf90 100644
+--- a/net/smc/smc_wr.c
++++ b/net/smc/smc_wr.c
+@@ -62,13 +62,9 @@ static inline bool smc_wr_is_tx_pend(struct smc_link *link)
+ }
+ /* wait till all pending tx work requests on the given link are completed */
+-int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
++void smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
+ {
+-      if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
+-                             SMC_WR_TX_WAIT_PENDING_TIME))
+-              return 0;
+-      else /* timeout */
+-              return -EPIPE;
++      wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link));
+ }
+ static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
+@@ -87,7 +83,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
+       struct smc_wr_tx_pend pnd_snd;
+       struct smc_link *link;
+       u32 pnd_snd_idx;
+-      int i;
+       link = wc->qp->qp_context;
+@@ -115,14 +110,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
+       if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
+               return;
+       if (wc->status) {
+-              for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
+-                      /* clear full struct smc_wr_tx_pend including .priv */
+-                      memset(&link->wr_tx_pends[i], 0,
+-                             sizeof(link->wr_tx_pends[i]));
+-                      memset(&link->wr_tx_bufs[i], 0,
+-                             sizeof(link->wr_tx_bufs[i]));
+-                      clear_bit(i, link->wr_tx_mask);
+-              }
+               /* terminate link */
+               smcr_link_down_cond_sched(link);
+       }
+@@ -351,25 +338,6 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
+       return rc;
+ }
+-void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
+-                           smc_wr_tx_filter filter,
+-                           smc_wr_tx_dismisser dismisser,
+-                           unsigned long data)
+-{
+-      struct smc_wr_tx_pend_priv *tx_pend;
+-      struct smc_wr_rx_hdr *wr_tx;
+-      int i;
+-
+-      for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
+-              wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
+-              if (wr_tx->type != wr_tx_hdr_type)
+-                      continue;
+-              tx_pend = &link->wr_tx_pends[i].priv;
+-              if (filter(tx_pend, data))
+-                      dismisser(tx_pend);
+-      }
+-}
+-
+ /****************************** receive queue ********************************/
+ int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
+@@ -574,10 +542,7 @@ void smc_wr_free_link(struct smc_link *lnk)
+       smc_wr_wakeup_reg_wait(lnk);
+       smc_wr_wakeup_tx_wait(lnk);
+-      if (smc_wr_tx_wait_no_pending_sends(lnk))
+-              memset(lnk->wr_tx_mask, 0,
+-                     BITS_TO_LONGS(SMC_WR_BUF_CNT) *
+-                                              sizeof(*lnk->wr_tx_mask));
++      smc_wr_tx_wait_no_pending_sends(lnk);
+       wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
+       wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
+diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
+index 102d515757ee2..cb58e60078f57 100644
+--- a/net/smc/smc_wr.h
++++ b/net/smc/smc_wr.h
+@@ -22,7 +22,6 @@
+ #define SMC_WR_BUF_CNT 16     /* # of ctrl buffers per link */
+ #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ)
+-#define SMC_WR_TX_WAIT_PENDING_TIME   (5 * HZ)
+ #define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */
+@@ -122,7 +121,7 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
+                            smc_wr_tx_filter filter,
+                            smc_wr_tx_dismisser dismisser,
+                            unsigned long data);
+-int smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
++void smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
+ int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
+ int smc_wr_rx_post_init(struct smc_link *link);
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-smc-fix-using-of-uninitialized-completions.patch b/queue-5.10/net-smc-fix-using-of-uninitialized-completions.patch
new file mode 100644 (file)
index 0000000..35cd673
--- /dev/null
@@ -0,0 +1,54 @@
+From 9214471bc267e8bb9536ab3d04f34502b6f0d415 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Dec 2021 14:35:30 +0100
+Subject: net/smc: fix using of uninitialized completions
+
+From: Karsten Graul <kgraul@linux.ibm.com>
+
+[ Upstream commit 6d7373dabfd3933ee30c40fc8c09d2a788f6ece1 ]
+
+In smc_wr_tx_send_wait() the completion on index specified by
+pend->idx is initialized and after smc_wr_tx_send() was called the wait
+for completion starts. pend->idx is used to get the correct index for
+the wait, but the pend structure could already be cleared in
+smc_wr_tx_process_cqe().
+Introduce pnd_idx to hold and use a local copy of the correct index.
+
+Fixes: 09c61d24f96d ("net/smc: wait for departure of an IB message")
+Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_wr.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
+index 9dbe4804853e0..a71c9631f1ad3 100644
+--- a/net/smc/smc_wr.c
++++ b/net/smc/smc_wr.c
+@@ -288,18 +288,20 @@ int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
+                       unsigned long timeout)
+ {
+       struct smc_wr_tx_pend *pend;
++      u32 pnd_idx;
+       int rc;
+       pend = container_of(priv, struct smc_wr_tx_pend, priv);
+       pend->compl_requested = 1;
+-      init_completion(&link->wr_tx_compl[pend->idx]);
++      pnd_idx = pend->idx;
++      init_completion(&link->wr_tx_compl[pnd_idx]);
+       rc = smc_wr_tx_send(link, priv);
+       if (rc)
+               return rc;
+       /* wait for completion by smc_wr_tx_process_cqe() */
+       rc = wait_for_completion_interruptible_timeout(
+-                                      &link->wr_tx_compl[pend->idx], timeout);
++                                      &link->wr_tx_compl[pnd_idx], timeout);
+       if (rc <= 0)
+               rc = -ENODATA;
+       if (rc > 0)
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-smc-improved-fix-wait-on-already-cleared-link.patch b/queue-5.10/net-smc-improved-fix-wait-on-already-cleared-link.patch
new file mode 100644 (file)
index 0000000..33b06d0
--- /dev/null
@@ -0,0 +1,386 @@
+From 92c68b2f794b62a97df9fb2c46de410534158a7d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Oct 2021 16:14:40 +0200
+Subject: net/smc: improved fix wait on already cleared link
+
+From: Karsten Graul <kgraul@linux.ibm.com>
+
+[ Upstream commit 95f7f3e7dc6bd2e735cb5de11734ea2222b1e05a ]
+
+Commit 8f3d65c16679 ("net/smc: fix wait on already cleared link")
+introduced link refcounting to avoid waits on already cleared links.
+This patch extents and improves the refcounting to cover all
+remaining possible cases for this kind of error situation.
+
+Fixes: 15e1b99aadfb ("net/smc: no WR buffer wait for terminating link group")
+Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_cdc.c  |  7 +++++-
+ net/smc/smc_core.c | 20 ++++++++-------
+ net/smc/smc_llc.c  | 63 +++++++++++++++++++++++++++++++++++-----------
+ net/smc/smc_tx.c   | 22 ++++------------
+ net/smc/smc_wr.h   | 14 +++++++++++
+ 5 files changed, 85 insertions(+), 41 deletions(-)
+
+diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
+index b1ce6ccbfaec8..3602829006dda 100644
+--- a/net/smc/smc_cdc.c
++++ b/net/smc/smc_cdc.c
+@@ -150,9 +150,11 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
+ again:
+       link = conn->lnk;
++      if (!smc_wr_tx_link_hold(link))
++              return -ENOLINK;
+       rc = smc_cdc_get_free_slot(conn, link, &wr_buf, NULL, &pend);
+       if (rc)
+-              return rc;
++              goto put_out;
+       spin_lock_bh(&conn->send_lock);
+       if (link != conn->lnk) {
+@@ -160,6 +162,7 @@ again:
+               spin_unlock_bh(&conn->send_lock);
+               smc_wr_tx_put_slot(link,
+                                  (struct smc_wr_tx_pend_priv *)pend);
++              smc_wr_tx_link_put(link);
+               if (again)
+                       return -ENOLINK;
+               again = true;
+@@ -167,6 +170,8 @@ again:
+       }
+       rc = smc_cdc_msg_send(conn, wr_buf, pend);
+       spin_unlock_bh(&conn->send_lock);
++put_out:
++      smc_wr_tx_link_put(link);
+       return rc;
+ }
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index 3f1343dfa16ba..135949ef85b3c 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -550,7 +550,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
+               to_lnk = &lgr->lnk[i];
+               break;
+       }
+-      if (!to_lnk) {
++      if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
+               smc_lgr_terminate_sched(lgr);
+               return NULL;
+       }
+@@ -582,24 +582,26 @@ again:
+               read_unlock_bh(&lgr->conns_lock);
+               /* pre-fetch buffer outside of send_lock, might sleep */
+               rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
+-              if (rc) {
+-                      smcr_link_down_cond_sched(to_lnk);
+-                      return NULL;
+-              }
++              if (rc)
++                      goto err_out;
+               /* avoid race with smcr_tx_sndbuf_nonempty() */
+               spin_lock_bh(&conn->send_lock);
+               conn->lnk = to_lnk;
+               rc = smc_switch_cursor(smc, pend, wr_buf);
+               spin_unlock_bh(&conn->send_lock);
+               sock_put(&smc->sk);
+-              if (rc) {
+-                      smcr_link_down_cond_sched(to_lnk);
+-                      return NULL;
+-              }
++              if (rc)
++                      goto err_out;
+               goto again;
+       }
+       read_unlock_bh(&lgr->conns_lock);
++      smc_wr_tx_link_put(to_lnk);
+       return to_lnk;
++
++err_out:
++      smcr_link_down_cond_sched(to_lnk);
++      smc_wr_tx_link_put(to_lnk);
++      return NULL;
+ }
+ static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
+diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
+index d8fe4e1f24d1f..f1d323439a2af 100644
+--- a/net/smc/smc_llc.c
++++ b/net/smc/smc_llc.c
+@@ -383,9 +383,11 @@ int smc_llc_send_confirm_link(struct smc_link *link,
+       struct smc_wr_buf *wr_buf;
+       int rc;
++      if (!smc_wr_tx_link_hold(link))
++              return -ENOLINK;
+       rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+       if (rc)
+-              return rc;
++              goto put_out;
+       confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
+       memset(confllc, 0, sizeof(*confllc));
+       confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
+@@ -402,6 +404,8 @@ int smc_llc_send_confirm_link(struct smc_link *link,
+       confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
+       /* send llc message */
+       rc = smc_wr_tx_send(link, pend);
++put_out:
++      smc_wr_tx_link_put(link);
+       return rc;
+ }
+@@ -415,9 +419,11 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
+       struct smc_link *link;
+       int i, rc, rtok_ix;
++      if (!smc_wr_tx_link_hold(send_link))
++              return -ENOLINK;
+       rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
+       if (rc)
+-              return rc;
++              goto put_out;
+       rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
+       memset(rkeyllc, 0, sizeof(*rkeyllc));
+       rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
+@@ -444,6 +450,8 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
+               (u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
+       /* send llc message */
+       rc = smc_wr_tx_send(send_link, pend);
++put_out:
++      smc_wr_tx_link_put(send_link);
+       return rc;
+ }
+@@ -456,9 +464,11 @@ static int smc_llc_send_delete_rkey(struct smc_link *link,
+       struct smc_wr_buf *wr_buf;
+       int rc;
++      if (!smc_wr_tx_link_hold(link))
++              return -ENOLINK;
+       rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+       if (rc)
+-              return rc;
++              goto put_out;
+       rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
+       memset(rkeyllc, 0, sizeof(*rkeyllc));
+       rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
+@@ -467,6 +477,8 @@ static int smc_llc_send_delete_rkey(struct smc_link *link,
+       rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
+       /* send llc message */
+       rc = smc_wr_tx_send(link, pend);
++put_out:
++      smc_wr_tx_link_put(link);
+       return rc;
+ }
+@@ -480,9 +492,11 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
+       struct smc_wr_buf *wr_buf;
+       int rc;
++      if (!smc_wr_tx_link_hold(link))
++              return -ENOLINK;
+       rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+       if (rc)
+-              return rc;
++              goto put_out;
+       addllc = (struct smc_llc_msg_add_link *)wr_buf;
+       memset(addllc, 0, sizeof(*addllc));
+@@ -504,6 +518,8 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
+       }
+       /* send llc message */
+       rc = smc_wr_tx_send(link, pend);
++put_out:
++      smc_wr_tx_link_put(link);
+       return rc;
+ }
+@@ -517,9 +533,11 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
+       struct smc_wr_buf *wr_buf;
+       int rc;
++      if (!smc_wr_tx_link_hold(link))
++              return -ENOLINK;
+       rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+       if (rc)
+-              return rc;
++              goto put_out;
+       delllc = (struct smc_llc_msg_del_link *)wr_buf;
+       memset(delllc, 0, sizeof(*delllc));
+@@ -536,6 +554,8 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
+       delllc->reason = htonl(reason);
+       /* send llc message */
+       rc = smc_wr_tx_send(link, pend);
++put_out:
++      smc_wr_tx_link_put(link);
+       return rc;
+ }
+@@ -547,9 +567,11 @@ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
+       struct smc_wr_buf *wr_buf;
+       int rc;
++      if (!smc_wr_tx_link_hold(link))
++              return -ENOLINK;
+       rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+       if (rc)
+-              return rc;
++              goto put_out;
+       testllc = (struct smc_llc_msg_test_link *)wr_buf;
+       memset(testllc, 0, sizeof(*testllc));
+       testllc->hd.common.type = SMC_LLC_TEST_LINK;
+@@ -557,6 +579,8 @@ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
+       memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
+       /* send llc message */
+       rc = smc_wr_tx_send(link, pend);
++put_out:
++      smc_wr_tx_link_put(link);
+       return rc;
+ }
+@@ -567,13 +591,16 @@ static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
+       struct smc_wr_buf *wr_buf;
+       int rc;
+-      if (!smc_link_usable(link))
++      if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
+       rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+       if (rc)
+-              return rc;
++              goto put_out;
+       memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
+-      return smc_wr_tx_send(link, pend);
++      rc = smc_wr_tx_send(link, pend);
++put_out:
++      smc_wr_tx_link_put(link);
++      return rc;
+ }
+ /* schedule an llc send on link, may wait for buffers,
+@@ -586,13 +613,16 @@ static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf)
+       struct smc_wr_buf *wr_buf;
+       int rc;
+-      if (!smc_link_usable(link))
++      if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
+       rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+       if (rc)
+-              return rc;
++              goto put_out;
+       memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
+-      return smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
++      rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
++put_out:
++      smc_wr_tx_link_put(link);
++      return rc;
+ }
+ /********************************* receive ***********************************/
+@@ -672,9 +702,11 @@ static int smc_llc_add_link_cont(struct smc_link *link,
+       struct smc_buf_desc *rmb;
+       u8 n;
++      if (!smc_wr_tx_link_hold(link))
++              return -ENOLINK;
+       rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+       if (rc)
+-              return rc;
++              goto put_out;
+       addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
+       memset(addc_llc, 0, sizeof(*addc_llc));
+@@ -706,7 +738,10 @@ static int smc_llc_add_link_cont(struct smc_link *link,
+       addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
+       if (lgr->role == SMC_CLNT)
+               addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
+-      return smc_wr_tx_send(link, pend);
++      rc = smc_wr_tx_send(link, pend);
++put_out:
++      smc_wr_tx_link_put(link);
++      return rc;
+ }
+ static int smc_llc_cli_rkey_exchange(struct smc_link *link,
+diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
+index ff02952b3d03e..52ef1fca0b604 100644
+--- a/net/smc/smc_tx.c
++++ b/net/smc/smc_tx.c
+@@ -479,7 +479,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
+ /* Wakeup sndbuf consumers from any context (IRQ or process)
+  * since there is more data to transmit; usable snd_wnd as max transmit
+  */
+-static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
++static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+ {
+       struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
+       struct smc_link *link = conn->lnk;
+@@ -488,8 +488,11 @@ static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+       struct smc_wr_buf *wr_buf;
+       int rc;
++      if (!link || !smc_wr_tx_link_hold(link))
++              return -ENOLINK;
+       rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend);
+       if (rc < 0) {
++              smc_wr_tx_link_put(link);
+               if (rc == -EBUSY) {
+                       struct smc_sock *smc =
+                               container_of(conn, struct smc_sock, conn);
+@@ -530,22 +533,7 @@ static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+ out_unlock:
+       spin_unlock_bh(&conn->send_lock);
+-      return rc;
+-}
+-
+-static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+-{
+-      struct smc_link *link = conn->lnk;
+-      int rc = -ENOLINK;
+-
+-      if (!link)
+-              return rc;
+-
+-      atomic_inc(&link->wr_tx_refcnt);
+-      if (smc_link_usable(link))
+-              rc = _smcr_tx_sndbuf_nonempty(conn);
+-      if (atomic_dec_and_test(&link->wr_tx_refcnt))
+-              wake_up_all(&link->wr_tx_wait);
++      smc_wr_tx_link_put(link);
+       return rc;
+ }
+diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
+index 423b8709f1c9e..2bc626f230a56 100644
+--- a/net/smc/smc_wr.h
++++ b/net/smc/smc_wr.h
+@@ -60,6 +60,20 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
+       atomic_long_set(wr_tx_id, val);
+ }
++static inline bool smc_wr_tx_link_hold(struct smc_link *link)
++{
++      if (!smc_link_usable(link))
++              return false;
++      atomic_inc(&link->wr_tx_refcnt);
++      return true;
++}
++
++static inline void smc_wr_tx_link_put(struct smc_link *link)
++{
++      if (atomic_dec_and_test(&link->wr_tx_refcnt))
++              wake_up_all(&link->wr_tx_wait);
++}
++
+ static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
+ {
+       wake_up_all(&lnk->wr_tx_wait);
+-- 
+2.34.1
+
diff --git a/queue-5.10/net-usb-pegasus-do-not-drop-long-ethernet-frames.patch b/queue-5.10/net-usb-pegasus-do-not-drop-long-ethernet-frames.patch
new file mode 100644 (file)
index 0000000..eace7fe
--- /dev/null
@@ -0,0 +1,63 @@
+From 367d996d868e334751ddd2e2e6c3b930d08031ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Dec 2021 23:12:08 +0100
+Subject: net: usb: pegasus: Do not drop long Ethernet frames
+
+From: Matthias-Christian Ott <ott@mirix.org>
+
+[ Upstream commit ca506fca461b260ab32952b610c3d4aadc6c11fd ]
+
+The D-Link DSB-650TX (2001:4002) is unable to receive Ethernet frames
+that are longer than 1518 octets, for example, Ethernet frames that
+contain 802.1Q VLAN tags.
+
+The frames are sent to the pegasus driver via USB but the driver
+discards them because they have the Long_pkt field set to 1 in the
+received status report. The function read_bulk_callback of the pegasus
+driver treats such received "packets" (in the terminology of the
+hardware) as errors but the field simply does just indicate that the
+Ethernet frame (MAC destination to FCS) is longer than 1518 octets.
+
+It seems that in the 1990s there was a distinction between
+"giant" (> 1518) and "runt" (< 64) frames and the hardware includes
+flags to indicate this distinction. It seems that the purpose of the
+distinction "giant" frames was to not allow infinitely long frames due
+to transmission errors and to allow hardware to have an upper limit of
+the frame size. However, the hardware already has such limit with its
+2048 octet receive buffer and, therefore, Long_pkt is merely a
+convention and should not be treated as a receive error.
+
+Actually, the hardware is even able to receive Ethernet frames with 2048
+octets which exceeds the claimed limit frame size limit of the driver of
+1536 octets (PEGASUS_MTU).
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Matthias-Christian Ott <ott@mirix.org>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/pegasus.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
+index 2a748a924f838..138279bbb544b 100644
+--- a/drivers/net/usb/pegasus.c
++++ b/drivers/net/usb/pegasus.c
+@@ -518,11 +518,11 @@ static void read_bulk_callback(struct urb *urb)
+               goto goon;
+       rx_status = buf[count - 2];
+-      if (rx_status & 0x1e) {
++      if (rx_status & 0x1c) {
+               netif_dbg(pegasus, rx_err, net,
+                         "RX packet error %x\n", rx_status);
+               net->stats.rx_errors++;
+-              if (rx_status & 0x06)   /* long or runt */
++              if (rx_status & 0x04)   /* runt */
+                       net->stats.rx_length_errors++;
+               if (rx_status & 0x08)
+                       net->stats.rx_crc_errors++;
+-- 
+2.34.1
+
diff --git a/queue-5.10/nfc-st21nfca-fix-memory-leak-in-device-probe-and-rem.patch b/queue-5.10/nfc-st21nfca-fix-memory-leak-in-device-probe-and-rem.patch
new file mode 100644 (file)
index 0000000..1a24bb6
--- /dev/null
@@ -0,0 +1,101 @@
+From feba834abeec4196e46d26648962af05e697d87b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Dec 2021 12:48:11 +0000
+Subject: NFC: st21nfca: Fix memory leak in device probe and remove
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+[ Upstream commit 1b9dadba502234eea7244879b8d5d126bfaf9f0c ]
+
+'phy->pending_skb' is alloced when device probe, but forgot to free
+in the error handling path and remove path, this cause memory leak
+as follows:
+
+unreferenced object 0xffff88800bc06800 (size 512):
+  comm "8", pid 11775, jiffies 4295159829 (age 9.032s)
+  hex dump (first 32 bytes):
+    00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+  backtrace:
+    [<00000000d66c09ce>] __kmalloc_node_track_caller+0x1ed/0x450
+    [<00000000c93382b3>] kmalloc_reserve+0x37/0xd0
+    [<000000005fea522c>] __alloc_skb+0x124/0x380
+    [<0000000019f29f9a>] st21nfca_hci_i2c_probe+0x170/0x8f2
+
+Fix it by freeing 'pending_skb' in error and remove.
+
+Fixes: 68957303f44a ("NFC: ST21NFCA: Add driver for STMicroelectronics ST21NFCA NFC Chip")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nfc/st21nfca/i2c.c | 29 ++++++++++++++++++++---------
+ 1 file changed, 20 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
+index 23ed11f91213d..6ea59426ab0bf 100644
+--- a/drivers/nfc/st21nfca/i2c.c
++++ b/drivers/nfc/st21nfca/i2c.c
+@@ -533,7 +533,8 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
+       phy->gpiod_ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+       if (IS_ERR(phy->gpiod_ena)) {
+               nfc_err(dev, "Unable to get ENABLE GPIO\n");
+-              return PTR_ERR(phy->gpiod_ena);
++              r = PTR_ERR(phy->gpiod_ena);
++              goto out_free;
+       }
+       phy->se_status.is_ese_present =
+@@ -544,7 +545,7 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
+       r = st21nfca_hci_platform_init(phy);
+       if (r < 0) {
+               nfc_err(&client->dev, "Unable to reboot st21nfca\n");
+-              return r;
++              goto out_free;
+       }
+       r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+@@ -553,15 +554,23 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
+                               ST21NFCA_HCI_DRIVER_NAME, phy);
+       if (r < 0) {
+               nfc_err(&client->dev, "Unable to register IRQ handler\n");
+-              return r;
++              goto out_free;
+       }
+-      return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
+-                                      ST21NFCA_FRAME_HEADROOM,
+-                                      ST21NFCA_FRAME_TAILROOM,
+-                                      ST21NFCA_HCI_LLC_MAX_PAYLOAD,
+-                                      &phy->hdev,
+-                                      &phy->se_status);
++      r = st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
++                             ST21NFCA_FRAME_HEADROOM,
++                             ST21NFCA_FRAME_TAILROOM,
++                             ST21NFCA_HCI_LLC_MAX_PAYLOAD,
++                             &phy->hdev,
++                             &phy->se_status);
++      if (r)
++              goto out_free;
++
++      return 0;
++
++out_free:
++      kfree_skb(phy->pending_skb);
++      return r;
+ }
+ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
+@@ -574,6 +583,8 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
+       if (phy->powered)
+               st21nfca_hci_i2c_disable(phy);
++      if (phy->pending_skb)
++              kfree_skb(phy->pending_skb);
+       return 0;
+ }
+-- 
+2.34.1
+
diff --git a/queue-5.10/scsi-lpfc-terminate-string-in-lpfc_debugfs_nvmeio_tr.patch b/queue-5.10/scsi-lpfc-terminate-string-in-lpfc_debugfs_nvmeio_tr.patch
new file mode 100644 (file)
index 0000000..310a228
--- /dev/null
@@ -0,0 +1,40 @@
+From 0dedb7f261d7801fbf79aa467ca1146e61a132e3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Dec 2021 10:05:27 +0300
+Subject: scsi: lpfc: Terminate string in lpfc_debugfs_nvmeio_trc_write()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 9020be114a47bf7ff33e179b3bb0016b91a098e6 ]
+
+The "mybuf" string comes from the user, so we need to ensure that it is NUL
+terminated.
+
+Link: https://lore.kernel.org/r/20211214070527.GA27934@kili
+Fixes: bd2cdd5e400f ("scsi: lpfc: NVME Initiator: Add debugfs support")
+Reviewed-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_debugfs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index b89c5513243e8..beaf3a8d206f8 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -2956,8 +2956,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
+       char mybuf[64];
+       char *pbuf;
+-      if (nbytes > 64)
+-              nbytes = 64;
++      if (nbytes > 63)
++              nbytes = 63;
+       memset(mybuf, 0, sizeof(mybuf));
+-- 
+2.34.1
+
diff --git a/queue-5.10/sctp-use-call_rcu-to-free-endpoint.patch b/queue-5.10/sctp-use-call_rcu-to-free-endpoint.patch
new file mode 100644 (file)
index 0000000..c2f3815
--- /dev/null
@@ -0,0 +1,277 @@
+From 3604a413ca463fcf12128de209e2cfdc6ab096ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Dec 2021 13:04:30 -0500
+Subject: sctp: use call_rcu to free endpoint
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 5ec7d18d1813a5bead0b495045606c93873aecbb ]
+
+This patch is to delay the endpoint free by calling call_rcu() to fix
+another use-after-free issue in sctp_sock_dump():
+
+  BUG: KASAN: use-after-free in __lock_acquire+0x36d9/0x4c20
+  Call Trace:
+    __lock_acquire+0x36d9/0x4c20 kernel/locking/lockdep.c:3218
+    lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3844
+    __raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline]
+    _raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:168
+    spin_lock_bh include/linux/spinlock.h:334 [inline]
+    __lock_sock+0x203/0x350 net/core/sock.c:2253
+    lock_sock_nested+0xfe/0x120 net/core/sock.c:2774
+    lock_sock include/net/sock.h:1492 [inline]
+    sctp_sock_dump+0x122/0xb20 net/sctp/diag.c:324
+    sctp_for_each_transport+0x2b5/0x370 net/sctp/socket.c:5091
+    sctp_diag_dump+0x3ac/0x660 net/sctp/diag.c:527
+    __inet_diag_dump+0xa8/0x140 net/ipv4/inet_diag.c:1049
+    inet_diag_dump+0x9b/0x110 net/ipv4/inet_diag.c:1065
+    netlink_dump+0x606/0x1080 net/netlink/af_netlink.c:2244
+    __netlink_dump_start+0x59a/0x7c0 net/netlink/af_netlink.c:2352
+    netlink_dump_start include/linux/netlink.h:216 [inline]
+    inet_diag_handler_cmd+0x2ce/0x3f0 net/ipv4/inet_diag.c:1170
+    __sock_diag_cmd net/core/sock_diag.c:232 [inline]
+    sock_diag_rcv_msg+0x31d/0x410 net/core/sock_diag.c:263
+    netlink_rcv_skb+0x172/0x440 net/netlink/af_netlink.c:2477
+    sock_diag_rcv+0x2a/0x40 net/core/sock_diag.c:274
+
+This issue occurs when asoc is peeled off and the old sk is freed after
+getting it by asoc->base.sk and before calling lock_sock(sk).
+
+To prevent the sk free, as a holder of the sk, ep should be alive when
+calling lock_sock(). This patch uses call_rcu() and moves sock_put and
+ep free into sctp_endpoint_destroy_rcu(), so that it's safe to try to
+hold the ep under rcu_read_lock in sctp_transport_traverse_process().
+
+If sctp_endpoint_hold() returns true, it means this ep is still alive
+and we have held it and can continue to dump it; If it returns false,
+it means this ep is dead and can be freed after rcu_read_unlock, and
+we should skip it.
+
+In sctp_sock_dump(), after locking the sk, if this ep is different from
+tsp->asoc->ep, it means during this dumping, this asoc was peeled off
+before calling lock_sock(), and the sk should be skipped; If this ep is
+the same with tsp->asoc->ep, it means no peeloff happens on this asoc,
+and due to lock_sock, no peeloff will happen either until release_sock.
+
+Note that delaying endpoint free won't delay the port release, as the
+port release happens in sctp_endpoint_destroy() before calling call_rcu().
+Also, freeing endpoint by call_rcu() makes it safe to access the sk by
+asoc->base.sk in sctp_assocs_seq_show() and sctp_rcv().
+
+Thanks Jones to bring this issue up.
+
+v1->v2:
+  - improve the changelog.
+  - add kfree(ep) into sctp_endpoint_destroy_rcu(), as Jakub noticed.
+
+Reported-by: syzbot+9276d76e83e3bcde6c99@syzkaller.appspotmail.com
+Reported-by: Lee Jones <lee.jones@linaro.org>
+Fixes: d25adbeb0cdb ("sctp: fix an use-after-free issue in sctp_sock_dump")
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sctp/sctp.h    |  6 +++---
+ include/net/sctp/structs.h |  3 ++-
+ net/sctp/diag.c            | 12 ++++++------
+ net/sctp/endpointola.c     | 23 +++++++++++++++--------
+ net/sctp/socket.c          | 23 +++++++++++++++--------
+ 5 files changed, 41 insertions(+), 26 deletions(-)
+
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 4fc747b778eb6..33475d061823e 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -103,6 +103,7 @@ extern struct percpu_counter sctp_sockets_allocated;
+ int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
+ struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
++typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
+ void sctp_transport_walk_start(struct rhashtable_iter *iter);
+ void sctp_transport_walk_stop(struct rhashtable_iter *iter);
+ struct sctp_transport *sctp_transport_get_next(struct net *net,
+@@ -113,9 +114,8 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
+                                 struct net *net,
+                                 const union sctp_addr *laddr,
+                                 const union sctp_addr *paddr, void *p);
+-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
+-                          int (*cb_done)(struct sctp_transport *, void *),
+-                          struct net *net, int *pos, void *p);
++int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
++                                  struct net *net, int *pos, void *p);
+ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
+ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
+                      struct sctp_info *info);
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 51d698f2656fc..be9ff0422c162 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -1339,6 +1339,7 @@ struct sctp_endpoint {
+       u32 secid;
+       u32 peer_secid;
++      struct rcu_head rcu;
+ };
+ /* Recover the outter endpoint structure. */
+@@ -1354,7 +1355,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
+ struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
+ void sctp_endpoint_free(struct sctp_endpoint *);
+ void sctp_endpoint_put(struct sctp_endpoint *);
+-void sctp_endpoint_hold(struct sctp_endpoint *);
++int sctp_endpoint_hold(struct sctp_endpoint *ep);
+ void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
+ struct sctp_association *sctp_endpoint_lookup_assoc(
+       const struct sctp_endpoint *ep,
+diff --git a/net/sctp/diag.c b/net/sctp/diag.c
+index 493fc01e5d2b7..babadd6720a2b 100644
+--- a/net/sctp/diag.c
++++ b/net/sctp/diag.c
+@@ -292,9 +292,8 @@ out:
+       return err;
+ }
+-static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
++static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
+ {
+-      struct sctp_endpoint *ep = tsp->asoc->ep;
+       struct sctp_comm_param *commp = p;
+       struct sock *sk = ep->base.sk;
+       struct sk_buff *skb = commp->skb;
+@@ -304,6 +303,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
+       int err = 0;
+       lock_sock(sk);
++      if (ep != tsp->asoc->ep)
++              goto release;
+       list_for_each_entry(assoc, &ep->asocs, asocs) {
+               if (cb->args[4] < cb->args[1])
+                       goto next;
+@@ -346,9 +347,8 @@ release:
+       return err;
+ }
+-static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
++static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
+ {
+-      struct sctp_endpoint *ep = tsp->asoc->ep;
+       struct sctp_comm_param *commp = p;
+       struct sock *sk = ep->base.sk;
+       const struct inet_diag_req_v2 *r = commp->r;
+@@ -507,8 +507,8 @@ skip:
+       if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
+               goto done;
+-      sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
+-                              net, &pos, &commp);
++      sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
++                                      net, &pos, &commp);
+       cb->args[2] = pos;
+ done:
+diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
+index 48c9c2c7602f7..efffde7f2328e 100644
+--- a/net/sctp/endpointola.c
++++ b/net/sctp/endpointola.c
+@@ -184,6 +184,18 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
+ }
+ /* Final destructor for endpoint.  */
++static void sctp_endpoint_destroy_rcu(struct rcu_head *head)
++{
++      struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
++      struct sock *sk = ep->base.sk;
++
++      sctp_sk(sk)->ep = NULL;
++      sock_put(sk);
++
++      kfree(ep);
++      SCTP_DBG_OBJCNT_DEC(ep);
++}
++
+ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+ {
+       struct sock *sk;
+@@ -213,18 +225,13 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+       if (sctp_sk(sk)->bind_hash)
+               sctp_put_port(sk);
+-      sctp_sk(sk)->ep = NULL;
+-      /* Give up our hold on the sock */
+-      sock_put(sk);
+-
+-      kfree(ep);
+-      SCTP_DBG_OBJCNT_DEC(ep);
++      call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
+ }
+ /* Hold a reference to an endpoint. */
+-void sctp_endpoint_hold(struct sctp_endpoint *ep)
++int sctp_endpoint_hold(struct sctp_endpoint *ep)
+ {
+-      refcount_inc(&ep->base.refcnt);
++      return refcount_inc_not_zero(&ep->base.refcnt);
+ }
+ /* Release a reference to an endpoint and clean up if there are
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index e872bc50bbe61..0a9e2c7d8e5f5 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -5223,11 +5223,12 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
+ }
+ EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
+-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
+-                          int (*cb_done)(struct sctp_transport *, void *),
+-                          struct net *net, int *pos, void *p) {
++int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
++                                  struct net *net, int *pos, void *p)
++{
+       struct rhashtable_iter hti;
+       struct sctp_transport *tsp;
++      struct sctp_endpoint *ep;
+       int ret;
+ again:
+@@ -5236,26 +5237,32 @@ again:
+       tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
+       for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
+-              ret = cb(tsp, p);
+-              if (ret)
+-                      break;
++              ep = tsp->asoc->ep;
++              if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
++                      ret = cb(ep, tsp, p);
++                      if (ret)
++                              break;
++                      sctp_endpoint_put(ep);
++              }
+               (*pos)++;
+               sctp_transport_put(tsp);
+       }
+       sctp_transport_walk_stop(&hti);
+       if (ret) {
+-              if (cb_done && !cb_done(tsp, p)) {
++              if (cb_done && !cb_done(ep, tsp, p)) {
+                       (*pos)++;
++                      sctp_endpoint_put(ep);
+                       sctp_transport_put(tsp);
+                       goto again;
+               }
++              sctp_endpoint_put(ep);
+               sctp_transport_put(tsp);
+       }
+       return ret;
+ }
+-EXPORT_SYMBOL_GPL(sctp_for_each_transport);
++EXPORT_SYMBOL_GPL(sctp_transport_traverse_process);
+ /* 7.2.1 Association Status (SCTP_STATUS)
+-- 
+2.34.1
+
diff --git a/queue-5.10/selftests-calculate-udpgso-segment-count-without-hea.patch b/queue-5.10/selftests-calculate-udpgso-segment-count-without-hea.patch
new file mode 100644 (file)
index 0000000..1f4acb6
--- /dev/null
@@ -0,0 +1,70 @@
+From e537c948c9b3907fe2ca0e2df2cde8cc8b12eb29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Dec 2021 22:24:41 +0000
+Subject: selftests: Calculate udpgso segment count without header adjustment
+
+From: Coco Li <lixiaoyan@google.com>
+
+[ Upstream commit 5471d5226c3b39b3d2f7011c082d5715795bd65c ]
+
+The below referenced commit correctly updated the computation of number
+of segments (gso_size) by using only the gso payload size and
+removing the header lengths.
+
+With this change the regression test started failing. Update
+the tests to match this new behavior.
+
+Both IPv4 and IPv6 tests are updated, as a separate patch in this series
+will update udp_v6_send_skb to match this change in udp_send_skb.
+
+Fixes: 158390e45612 ("udp: using datalen to cap max gso segments")
+Signed-off-by: Coco Li <lixiaoyan@google.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://lore.kernel.org/r/20211223222441.2975883-2-lixiaoyan@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/udpgso.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
+index c66da6ffd6d8d..7badaf215de28 100644
+--- a/tools/testing/selftests/net/udpgso.c
++++ b/tools/testing/selftests/net/udpgso.c
+@@ -156,13 +156,13 @@ struct testcase testcases_v4[] = {
+       },
+       {
+               /* send max number of min sized segments */
+-              .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
++              .tlen = UDP_MAX_SEGMENTS,
+               .gso_len = 1,
+-              .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
++              .r_num_mss = UDP_MAX_SEGMENTS,
+       },
+       {
+               /* send max number + 1 of min sized segments: fail */
+-              .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4 + 1,
++              .tlen = UDP_MAX_SEGMENTS + 1,
+               .gso_len = 1,
+               .tfail = true,
+       },
+@@ -259,13 +259,13 @@ struct testcase testcases_v6[] = {
+       },
+       {
+               /* send max number of min sized segments */
+-              .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
++              .tlen = UDP_MAX_SEGMENTS,
+               .gso_len = 1,
+-              .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
++              .r_num_mss = UDP_MAX_SEGMENTS,
+       },
+       {
+               /* send max number + 1 of min sized segments: fail */
+-              .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6 + 1,
++              .tlen = UDP_MAX_SEGMENTS + 1,
+               .gso_len = 1,
+               .tfail = true,
+       },
+-- 
+2.34.1
+
diff --git a/queue-5.10/selftests-net-udpgso_bench_tx-fix-dst-ip-argument.patch b/queue-5.10/selftests-net-udpgso_bench_tx-fix-dst-ip-argument.patch
new file mode 100644 (file)
index 0000000..287a53d
--- /dev/null
@@ -0,0 +1,63 @@
+From 3b530c72554b27662558029dfb089e5a86956a5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Dec 2021 18:58:10 +0800
+Subject: selftests/net: udpgso_bench_tx: fix dst ip argument
+
+From: wujianguo <wujianguo@chinatelecom.cn>
+
+[ Upstream commit 9c1952aeaa98b3cfc49e2a79cb2c7d6a674213e9 ]
+
+udpgso_bench_tx call setup_sockaddr() for dest address before
+parsing all arguments, if we specify "-p ${dst_port}" after "-D ${dst_ip}",
+then ${dst_port} will be ignored, and using default cfg_port 8000.
+
+This will cause test case "multiple GRO socks" failed in udpgro.sh.
+
+Setup sockaddr after parsing all arguments.
+
+Fixes: 3a687bef148d ("selftests: udp gso benchmark")
+Signed-off-by: Jianguo Wu <wujianguo@chinatelecom.cn>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://lore.kernel.org/r/ff620d9f-5b52-06ab-5286-44b945453002@163.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/udpgso_bench_tx.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
+index 17512a43885e7..f1fdaa2702913 100644
+--- a/tools/testing/selftests/net/udpgso_bench_tx.c
++++ b/tools/testing/selftests/net/udpgso_bench_tx.c
+@@ -419,6 +419,7 @@ static void usage(const char *filepath)
+ static void parse_opts(int argc, char **argv)
+ {
++      const char *bind_addr = NULL;
+       int max_len, hdrlen;
+       int c;
+@@ -446,7 +447,7 @@ static void parse_opts(int argc, char **argv)
+                       cfg_cpu = strtol(optarg, NULL, 0);
+                       break;
+               case 'D':
+-                      setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
++                      bind_addr = optarg;
+                       break;
+               case 'l':
+                       cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000;
+@@ -492,6 +493,11 @@ static void parse_opts(int argc, char **argv)
+               }
+       }
++      if (!bind_addr)
++              bind_addr = cfg_family == PF_INET6 ? "::" : "0.0.0.0";
++
++      setup_sockaddr(cfg_family, bind_addr, &cfg_dst_addr);
++
+       if (optind != argc)
+               usage(argv[0]);
+-- 
+2.34.1
+
index 8046c7a8d962cd68f815c10e0c667490a8609066..1d538a2f5d74999384a9ebf1212720758daf3a8c 100644 (file)
@@ -7,3 +7,25 @@ platform-x86-apple-gmux-use-resource_size-with-res.patch
 memblock-fix-memblock_phys_alloc-section-mismatch-er.patch
 recordmcount.pl-fix-typo-in-s390-mcount-regex.patch
 selinux-initialize-proto-variable-in-selinux_ip_postroute_compat.patch
+scsi-lpfc-terminate-string-in-lpfc_debugfs_nvmeio_tr.patch
+net-mlx5-dr-fix-null-vs-is_err-checking-in-dr_domain.patch
+net-mlx5e-wrap-the-tx-reporter-dump-callback-to-extr.patch
+net-mlx5e-fix-icosq-recovery-flow-for-xsk.patch
+udp-using-datalen-to-cap-ipv6-udp-max-gso-segments.patch
+selftests-calculate-udpgso-segment-count-without-hea.patch
+net-phy-fixed_phy-fix-null-vs-is_err-checking-in-__f.patch
+sctp-use-call_rcu-to-free-endpoint.patch
+net-smc-fix-using-of-uninitialized-completions.patch
+net-usb-pegasus-do-not-drop-long-ethernet-frames.patch
+net-ag71xx-fix-a-potential-double-free-in-error-hand.patch
+net-lantiq_xrx200-fix-statistics-of-received-bytes.patch
+nfc-st21nfca-fix-memory-leak-in-device-probe-and-rem.patch
+net-smc-improved-fix-wait-on-already-cleared-link.patch
+net-smc-don-t-send-cdc-llc-message-if-link-not-ready.patch
+net-smc-fix-kernel-panic-caused-by-race-of-smc_sock.patch
+igc-fix-tx-timestamp-support-for-non-msi-x-platforms.patch
+ionic-initialize-the-lif-dbid_inuse-bitmap.patch
+net-mlx5e-fix-wrong-features-assignment-in-case-of-e.patch
+selftests-net-udpgso_bench_tx-fix-dst-ip-argument.patch
+net-ncsi-check-for-error-return-from-call-to-nla_put.patch
+fsl-fman-fix-missing-put_device-call-in-fman_port_pr.patch
diff --git a/queue-5.10/udp-using-datalen-to-cap-ipv6-udp-max-gso-segments.patch b/queue-5.10/udp-using-datalen-to-cap-ipv6-udp-max-gso-segments.patch
new file mode 100644 (file)
index 0000000..5537ff6
--- /dev/null
@@ -0,0 +1,44 @@
+From 584f5185736b5b82eaf5d9375269b72058485ead Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Dec 2021 22:24:40 +0000
+Subject: udp: using datalen to cap ipv6 udp max gso segments
+
+From: Coco Li <lixiaoyan@google.com>
+
+[ Upstream commit 736ef37fd9a44f5966e25319d08ff7ea99ac79e8 ]
+
+The max number of UDP gso segments is intended to cap to
+UDP_MAX_SEGMENTS, this is checked in udp_send_skb().
+
+skb->len contains network and transport header len here, we should use
+only data len instead.
+
+This is the ipv6 counterpart to the below referenced commit,
+which missed the ipv6 change
+
+Fixes: 158390e45612 ("udp: using datalen to cap max gso segments")
+Signed-off-by: Coco Li <lixiaoyan@google.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://lore.kernel.org/r/20211223222441.2975883-1-lixiaoyan@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/udp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 8a1863146f34c..069551a04369e 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1189,7 +1189,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+                       kfree_skb(skb);
+                       return -EINVAL;
+               }
+-              if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
++              if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
+                       kfree_skb(skb);
+                       return -EINVAL;
+               }
+-- 
+2.34.1
+