]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.10
authorSasha Levin <sashal@kernel.org>
Mon, 31 May 2021 02:37:04 +0000 (22:37 -0400)
committerSasha Levin <sashal@kernel.org>
Mon, 31 May 2021 02:37:04 +0000 (22:37 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
55 files changed:
queue-5.10/alsa-usb-audio-scarlett2-snd_scarlett_gen2_controls_.patch [new file with mode: 0644]
queue-5.10/asoc-cs35l33-fix-an-error-code-in-probe.patch [new file with mode: 0644]
queue-5.10/asoc-cs42l42-regmap-must-use_single_read-write.patch [new file with mode: 0644]
queue-5.10/asoc-qcom-lpass-cpu-use-optional-clk-apis.patch [new file with mode: 0644]
queue-5.10/bnxt_en-fix-context-memory-setup-for-64k-page-size.patch [new file with mode: 0644]
queue-5.10/bnxt_en-include-new-p5-hv-definition-in-vf-check.patch [new file with mode: 0644]
queue-5.10/bpf-offload-reorder-offload-callback-prepare-in-veri.patch [new file with mode: 0644]
queue-5.10/bpf-set-mac_len-in-bpf_skb_change_head.patch [new file with mode: 0644]
queue-5.10/chelsio-chtls-unlock-on-error-in-chtls_pt_recvmsg.patch [new file with mode: 0644]
queue-5.10/cxgb4-avoid-accessing-registers-when-clearing-filter.patch [new file with mode: 0644]
queue-5.10/cxgb4-ch_ktls-clear-resources-when-pf4-device-is-rem.patch [new file with mode: 0644]
queue-5.10/gve-add-null-pointer-checks-when-freeing-irqs.patch [new file with mode: 0644]
queue-5.10/gve-check-tx-qpl-was-actually-assigned.patch [new file with mode: 0644]
queue-5.10/gve-correct-skb-queue-index-validation.patch [new file with mode: 0644]
queue-5.10/gve-update-mgmt_msix_idx-if-num_ntfy-changes.patch [new file with mode: 0644]
queue-5.10/gve-upgrade-memory-barrier-in-poll-routine.patch [new file with mode: 0644]
queue-5.10/interconnect-qcom-add-missing-module_device_table.patch [new file with mode: 0644]
queue-5.10/interconnect-qcom-bcm-voter-add-a-missing-of_node_pu.patch [new file with mode: 0644]
queue-5.10/iommu-virtio-add-missing-module_device_table.patch [new file with mode: 0644]
queue-5.10/iommu-vt-d-use-user-privilege-for-rid2pasid-translat.patch [new file with mode: 0644]
queue-5.10/ipv6-record-frag_max_size-in-atomic-fragments-in-inp.patch [new file with mode: 0644]
queue-5.10/ixgbe-fix-large-mtu-request-from-vf.patch [new file with mode: 0644]
queue-5.10/mips-alchemy-xxs1500-add-gpio-au1000.h-header-file.patch [new file with mode: 0644]
queue-5.10/mips-ralink-export-rt_sysc_membase-for-rt2880_wdt.c.patch [new file with mode: 0644]
queue-5.10/mld-fix-panic-in-mld_newpack.patch [new file with mode: 0644]
queue-5.10/net-bnx2-fix-error-return-code-in-bnx2_init_board.patch [new file with mode: 0644]
queue-5.10/net-dsa-fix-error-code-getting-shifted-with-4-in-dsa.patch [new file with mode: 0644]
queue-5.10/net-ethernet-mtk_eth_soc-fix-packet-statistics-suppo.patch [new file with mode: 0644]
queue-5.10/net-fec-fix-the-potential-memory-leak-in-fec_enet_in.patch [new file with mode: 0644]
queue-5.10/net-hns3-fix-incorrect-resp_msg-issue.patch [new file with mode: 0644]
queue-5.10/net-hns3-put-off-calling-register_netdev-until-clien.patch [new file with mode: 0644]
queue-5.10/net-hso-check-for-allocation-failure-in-hso_create_b.patch [new file with mode: 0644]
queue-5.10/net-hsr-fix-mac_len-checks.patch [new file with mode: 0644]
queue-5.10/net-ipa-memory-region-array-is-variable-size.patch [new file with mode: 0644]
queue-5.10/net-lantiq-fix-memory-corruption-in-rx-ring.patch [new file with mode: 0644]
queue-5.10/net-mdio-octeon-fix-some-double-free-issues.patch [new file with mode: 0644]
queue-5.10/net-mdio-thunder-fix-a-double-free-issue-in-the-.rem.patch [new file with mode: 0644]
queue-5.10/net-mvpp2-add-buffer-header-handling-in-rx.patch [new file with mode: 0644]
queue-5.10/net-netcp-fix-an-error-message.patch [new file with mode: 0644]
queue-5.10/net-packetmmap-fix-only-tx-timestamp-on-request.patch [new file with mode: 0644]
queue-5.10/net-really-orphan-skbs-tied-to-closing-sk.patch [new file with mode: 0644]
queue-5.10/net-sched-fix-packet-stuck-problem-for-lockless-qdis.patch [new file with mode: 0644]
queue-5.10/net-sched-fix-tx-action-reschedule-issue-with-stoppe.patch [new file with mode: 0644]
queue-5.10/net-sched-fix-tx-action-rescheduling-issue-during-de.patch [new file with mode: 0644]
queue-5.10/net-smc-remove-device-from-smcd_dev_list-after-faile.patch [new file with mode: 0644]
queue-5.10/net-stmmac-fix-mac-wol-not-working-if-phy-does-not-s.patch [new file with mode: 0644]
queue-5.10/net-zero-initialize-tc-skb-extension-on-allocation.patch [new file with mode: 0644]
queue-5.10/openvswitch-meter-fix-race-when-getting-now_ms.patch [new file with mode: 0644]
queue-5.10/sch_dsmark-fix-a-null-deref-in-qdisc_reset.patch [new file with mode: 0644]
queue-5.10/scsi-libsas-use-_safe-loop-in-sas_resume_port.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/spi-assume-gpio-cs-active-high-in-acpi-case.patch [new file with mode: 0644]
queue-5.10/staging-emxx_udc-fix-loop-in-_nbu2ss_nuke.patch [new file with mode: 0644]
queue-5.10/tls-splice-check-splice_f_nonblock-instead-of-msg_do.patch [new file with mode: 0644]
queue-5.10/vfio-ccw-check-initialized-flag-in-cp_init.patch [new file with mode: 0644]

diff --git a/queue-5.10/alsa-usb-audio-scarlett2-snd_scarlett_gen2_controls_.patch b/queue-5.10/alsa-usb-audio-scarlett2-snd_scarlett_gen2_controls_.patch
new file mode 100644 (file)
index 0000000..e2813f2
--- /dev/null
@@ -0,0 +1,40 @@
+From 6ff64b087f53868e0e21b77a8d76c0da401f290e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 23 May 2021 02:09:00 +0800
+Subject: ALSA: usb-audio: scarlett2: snd_scarlett_gen2_controls_create() can
+ be static
+
+From: kernel test robot <lkp@intel.com>
+
+[ Upstream commit 2b899f31f1a6db2db4608bac2ac04fe2c4ad89eb ]
+
+sound/usb/mixer_scarlett_gen2.c:2000:5: warning: symbol 'snd_scarlett_gen2_controls_create' was not declared. Should it be static?
+
+Fixes: 265d1a90e4fb ("ALSA: usb-audio: scarlett2: Improve driver startup messages")
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: kernel test robot <lkp@intel.com>
+Link: https://lore.kernel.org/r/20210522180900.GA83915@f59a3af2f1d9
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/mixer_scarlett_gen2.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
+index 01ffef6a5146..9a98b0c048e3 100644
+--- a/sound/usb/mixer_scarlett_gen2.c
++++ b/sound/usb/mixer_scarlett_gen2.c
+@@ -1997,8 +1997,8 @@ static int scarlett2_mixer_status_create(struct usb_mixer_interface *mixer)
+       return usb_submit_urb(mixer->urb, GFP_KERNEL);
+ }
+-int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer,
+-                                    const struct scarlett2_device_info *info)
++static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer,
++                                           const struct scarlett2_device_info *info)
+ {
+       int err;
+-- 
+2.30.2
+
diff --git a/queue-5.10/asoc-cs35l33-fix-an-error-code-in-probe.patch b/queue-5.10/asoc-cs35l33-fix-an-error-code-in-probe.patch
new file mode 100644 (file)
index 0000000..969cc11
--- /dev/null
@@ -0,0 +1,36 @@
+From 85472c5884cd103517d5d11c45c0e12df00b248e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 May 2021 08:08:24 +0300
+Subject: ASoC: cs35l33: fix an error code in probe()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 833bc4cf9754643acc69b3c6b65988ca78df4460 ]
+
+This error path returns zero (success) but it should return -EINVAL.
+
+Fixes: 3333cb7187b9 ("ASoC: cs35l33: Initial commit of the cs35l33 CODEC driver.")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Link: https://lore.kernel.org/r/YKXuyGEzhPT35R3G@mwanda
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/cs35l33.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
+index 6042194d95d3..8894369e329a 100644
+--- a/sound/soc/codecs/cs35l33.c
++++ b/sound/soc/codecs/cs35l33.c
+@@ -1201,6 +1201,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
+               dev_err(&i2c_client->dev,
+                       "CS35L33 Device ID (%X). Expected ID %X\n",
+                       devid, CS35L33_CHIP_ID);
++              ret = -EINVAL;
+               goto err_enable;
+       }
+-- 
+2.30.2
+
diff --git a/queue-5.10/asoc-cs42l42-regmap-must-use_single_read-write.patch b/queue-5.10/asoc-cs42l42-regmap-must-use_single_read-write.patch
new file mode 100644 (file)
index 0000000..84622ea
--- /dev/null
@@ -0,0 +1,49 @@
+From d8b60f0a8ff110cde086a0f1ea3fa95491a6ed99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 May 2021 14:28:55 +0100
+Subject: ASoC: cs42l42: Regmap must use_single_read/write
+
+From: Richard Fitzgerald <rf@opensource.cirrus.com>
+
+[ Upstream commit 0fad605fb0bdc00d8ad78696300ff2fbdee6e048 ]
+
+cs42l42 does not support standard burst transfers so the use_single_read
+and use_single_write flags must be set in the regmap config.
+
+Because of this bug, the patch:
+
+commit 0a0eb567e1d4 ("ASoC: cs42l42: Minor error paths fixups")
+
+broke cs42l42 probe() because without the use_single_* flags it causes
+regmap to issue a burst read.
+
+However, the missing use_single_* could cause problems anyway because the
+regmap cache can attempt burst transfers if these flags are not set.
+
+Fixes: 2c394ca79604 ("ASoC: Add support for CS42L42 codec")
+Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Acked-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Link: https://lore.kernel.org/r/20210511132855.27159-1-rf@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/cs42l42.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
+index 4d82d24c7828..7c6b10bc0b8c 100644
+--- a/sound/soc/codecs/cs42l42.c
++++ b/sound/soc/codecs/cs42l42.c
+@@ -398,6 +398,9 @@ static const struct regmap_config cs42l42_regmap = {
+       .reg_defaults = cs42l42_reg_defaults,
+       .num_reg_defaults = ARRAY_SIZE(cs42l42_reg_defaults),
+       .cache_type = REGCACHE_RBTREE,
++
++      .use_single_read = true,
++      .use_single_write = true,
+ };
+ static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
+-- 
+2.30.2
+
diff --git a/queue-5.10/asoc-qcom-lpass-cpu-use-optional-clk-apis.patch b/queue-5.10/asoc-qcom-lpass-cpu-use-optional-clk-apis.patch
new file mode 100644 (file)
index 0000000..5ca083f
--- /dev/null
@@ -0,0 +1,55 @@
+From d4d163072db5d9c72a0e82f6c77f60aa37db6b63 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 May 2021 18:48:07 -0700
+Subject: ASoC: qcom: lpass-cpu: Use optional clk APIs
+
+From: Stephen Boyd <swboyd@chromium.org>
+
+[ Upstream commit af2702549d68519ac78228e915d9b2c199056787 ]
+
+This driver spits out a warning for me at boot:
+
+ sc7180-lpass-cpu 62f00000.lpass: asoc_qcom_lpass_cpu_platform_probe() error getting optional null: -2
+
+but it looks like it is all an optional clk. Use the optional clk APIs
+here so that we don't see this message and everything else is the same.
+
+Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Cc: Banajit Goswami <bgoswami@codeaurora.org>
+Fixes: 3e53ac8230c1 ("ASoC: qcom: make osr clock optional")
+Signed-off-by: Stephen Boyd <swboyd@chromium.org>
+Reviewed-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20210520014807.3749797-1-swboyd@chromium.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/qcom/lpass-cpu.c | 12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index 4fb2ec7c8867..7a30a12519a7 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -839,18 +839,8 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
+               if (dai_id == LPASS_DP_RX)
+                       continue;
+-              drvdata->mi2s_osr_clk[dai_id] = devm_clk_get(dev,
++              drvdata->mi2s_osr_clk[dai_id] = devm_clk_get_optional(dev,
+                                            variant->dai_osr_clk_names[i]);
+-              if (IS_ERR(drvdata->mi2s_osr_clk[dai_id])) {
+-                      dev_warn(dev,
+-                              "%s() error getting optional %s: %ld\n",
+-                              __func__,
+-                              variant->dai_osr_clk_names[i],
+-                              PTR_ERR(drvdata->mi2s_osr_clk[dai_id]));
+-
+-                      drvdata->mi2s_osr_clk[dai_id] = NULL;
+-              }
+-
+               drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev,
+                                               variant->dai_bit_clk_names[i]);
+               if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
+-- 
+2.30.2
+
diff --git a/queue-5.10/bnxt_en-fix-context-memory-setup-for-64k-page-size.patch b/queue-5.10/bnxt_en-fix-context-memory-setup-for-64k-page-size.patch
new file mode 100644 (file)
index 0000000..628afbb
--- /dev/null
@@ -0,0 +1,67 @@
+From 4c85a2765f1ef4351a72f304cf7655b7d6f576e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 15 May 2021 03:25:19 -0400
+Subject: bnxt_en: Fix context memory setup for 64K page size.
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+[ Upstream commit 702279d2ce4650000bb6302013630304e359dc13 ]
+
+There was a typo in the code that checks for 64K BNXT_PAGE_SHIFT in
+bnxt_hwrm_set_pg_attr().  Fix it and make the code more understandable
+with a new macro BNXT_SET_CTX_PAGE_ATTR().
+
+Fixes: 1b9394e5a2ad ("bnxt_en: Configure context memory on new devices.")
+Reviewed-by: Edwin Peer <edwin.peer@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c |  9 +--------
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 10 ++++++++++
+ 2 files changed, 11 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index ff86324c7fb8..adfaa9a850dd 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6834,14 +6834,7 @@ ctx_err:
+ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
+                                 __le64 *pg_dir)
+ {
+-      u8 pg_size = 0;
+-
+-      if (BNXT_PAGE_SHIFT == 13)
+-              pg_size = 1 << 4;
+-      else if (BNXT_PAGE_SIZE == 16)
+-              pg_size = 2 << 4;
+-
+-      *pg_attr = pg_size;
++      BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
+       if (rmem->depth >= 1) {
+               if (rmem->depth == 2)
+                       *pg_attr |= 2;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index e4e926c65118..a95c5afa2f01 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1440,6 +1440,16 @@ struct bnxt_ctx_pg_info {
+ #define BNXT_MAX_TQM_RINGS            \
+       (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
++#define BNXT_SET_CTX_PAGE_ATTR(attr)                                  \
++do {                                                                  \
++      if (BNXT_PAGE_SIZE == 0x2000)                                   \
++              attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K;    \
++      else if (BNXT_PAGE_SIZE == 0x10000)                             \
++              attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K;   \
++      else                                                            \
++              attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K;    \
++} while (0)
++
+ struct bnxt_ctx_mem_info {
+       u32     qp_max_entries;
+       u16     qp_min_qp1_entries;
+-- 
+2.30.2
+
diff --git a/queue-5.10/bnxt_en-include-new-p5-hv-definition-in-vf-check.patch b/queue-5.10/bnxt_en-include-new-p5-hv-definition-in-vf-check.patch
new file mode 100644 (file)
index 0000000..9032ff7
--- /dev/null
@@ -0,0 +1,39 @@
+From 65ad972ca1c74ae15f07db76b22a95e5b063b5d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 15 May 2021 03:25:18 -0400
+Subject: bnxt_en: Include new P5 HV definition in VF check.
+
+From: Andy Gospodarek <gospo@broadcom.com>
+
+[ Upstream commit ab21494be9dc7d62736c5fcd06be65d49df713ee ]
+
+Otherwise, some of the recently added HyperV VF IDs would not be
+recognized as VF devices and they would not initialize properly.
+
+Fixes: 7fbf359bb2c1 ("bnxt_en: Add PCI IDs for Hyper-V VF devices.")
+Reviewed-by: Edwin Peer <edwin.peer@broadcom.com>
+Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 4385b42a2b63..ff86324c7fb8 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -280,7 +280,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
+ {
+       return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
+               idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
+-              idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
++              idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
++              idx == NETXTREME_E_P5_VF_HV);
+ }
+ #define DB_CP_REARM_FLAGS     (DB_KEY_CP | DB_IDX_VALID)
+-- 
+2.30.2
+
diff --git a/queue-5.10/bpf-offload-reorder-offload-callback-prepare-in-veri.patch b/queue-5.10/bpf-offload-reorder-offload-callback-prepare-in-veri.patch
new file mode 100644 (file)
index 0000000..58379c1
--- /dev/null
@@ -0,0 +1,60 @@
+From 329193dd77e538b75d16e0a64eb4645c7d969fea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 May 2021 10:58:34 +0200
+Subject: bpf, offload: Reorder offload callback 'prepare' in verifier
+
+From: Yinjun Zhang <yinjun.zhang@corigine.com>
+
+[ Upstream commit ceb11679d9fcf3fdb358a310a38760fcbe9b63ed ]
+
+Commit 4976b718c355 ("bpf: Introduce pseudo_btf_id") switched the
+order of resolve_pseudo_ldimm(), in which some pseudo instructions
+are rewritten. Thus those rewritten instructions cannot be passed
+to driver via 'prepare' offload callback.
+
+Reorder the 'prepare' offload callback to fix it.
+
+Fixes: 4976b718c355 ("bpf: Introduce pseudo_btf_id")
+Signed-off-by: Yinjun Zhang <yinjun.zhang@corigine.com>
+Signed-off-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20210520085834.15023-1-simon.horman@netronome.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 364b9760d1a7..4f50d6f128be 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -12364,12 +12364,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
+       if (is_priv)
+               env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
+-      if (bpf_prog_is_dev_bound(env->prog->aux)) {
+-              ret = bpf_prog_offload_verifier_prep(env->prog);
+-              if (ret)
+-                      goto skip_full_check;
+-      }
+-
+       env->explored_states = kvcalloc(state_htab_size(env),
+                                      sizeof(struct bpf_verifier_state_list *),
+                                      GFP_USER);
+@@ -12393,6 +12387,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
+       if (ret < 0)
+               goto skip_full_check;
++      if (bpf_prog_is_dev_bound(env->prog->aux)) {
++              ret = bpf_prog_offload_verifier_prep(env->prog);
++              if (ret)
++                      goto skip_full_check;
++      }
++
+       ret = check_cfg(env);
+       if (ret < 0)
+               goto skip_full_check;
+-- 
+2.30.2
+
diff --git a/queue-5.10/bpf-set-mac_len-in-bpf_skb_change_head.patch b/queue-5.10/bpf-set-mac_len-in-bpf_skb_change_head.patch
new file mode 100644 (file)
index 0000000..8501219
--- /dev/null
@@ -0,0 +1,40 @@
+From 2d93bb1f2e36baa29b5f716d55eb50f1786eb55b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 May 2021 15:47:42 +0000
+Subject: bpf: Set mac_len in bpf_skb_change_head
+
+From: Jussi Maki <joamaki@gmail.com>
+
+[ Upstream commit 84316ca4e100d8cbfccd9f774e23817cb2059868 ]
+
+The skb_change_head() helper did not set "skb->mac_len", which is
+problematic when it's used in combination with skb_redirect_peer().
+Without it, redirecting a packet from a L3 device such as wireguard to
+the veth peer device will cause skb->data to point to the middle of the
+IP header on entry to tcp_v4_rcv() since the L2 header is not pulled
+correctly due to mac_len=0.
+
+Fixes: 3a0af8fd61f9 ("bpf: BPF for lightweight tunnel infrastructure")
+Signed-off-by: Jussi Maki <joamaki@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20210519154743.2554771-2-joamaki@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 9358bc4a3711..ef6bdbb63ecb 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3782,6 +3782,7 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
+               __skb_push(skb, head_room);
+               memset(skb->data, 0, head_room);
+               skb_reset_mac_header(skb);
++              skb_reset_mac_len(skb);
+       }
+       return ret;
+-- 
+2.30.2
+
diff --git a/queue-5.10/chelsio-chtls-unlock-on-error-in-chtls_pt_recvmsg.patch b/queue-5.10/chelsio-chtls-unlock-on-error-in-chtls_pt_recvmsg.patch
new file mode 100644 (file)
index 0000000..1762597
--- /dev/null
@@ -0,0 +1,40 @@
+From bb6cdc8bd3ac6d4fea703b9cb11ba628f829fbba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 May 2021 13:02:48 +0300
+Subject: chelsio/chtls: unlock on error in chtls_pt_recvmsg()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 832ce924b1a14e139e184a6da9f5a69a5e47b256 ]
+
+This error path needs to release some memory and call release_sock(sk);
+before returning.
+
+Fixes: 6919a8264a32 ("Crypto/chtls: add/delete TLS header in driver")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+index 188d871f6b8c..c320cc8ca68d 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+@@ -1564,8 +1564,10 @@ found_ok_skb:
+                       cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
+                                       sizeof(thdr->type), &thdr->type);
+-                      if (cerr && thdr->type != TLS_RECORD_TYPE_DATA)
+-                              return -EIO;
++                      if (cerr && thdr->type != TLS_RECORD_TYPE_DATA) {
++                              copied = -EIO;
++                              break;
++                      }
+                       /*  don't send tls header, skip copy */
+                       goto skip_copy;
+               }
+-- 
+2.30.2
+
diff --git a/queue-5.10/cxgb4-avoid-accessing-registers-when-clearing-filter.patch b/queue-5.10/cxgb4-avoid-accessing-registers-when-clearing-filter.patch
new file mode 100644 (file)
index 0000000..d22c2ac
--- /dev/null
@@ -0,0 +1,39 @@
+From 31e717a73459c591a7563e11c11efd3ce8f9b52a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 May 2021 16:48:31 +0530
+Subject: cxgb4: avoid accessing registers when clearing filters
+
+From: Raju Rangoju <rajur@chelsio.com>
+
+[ Upstream commit 88c380df84fbd03f9b137c2b9d0a44b9f2f553b0 ]
+
+Hardware register having the server TID base can contain
+invalid values when adapter is in bad state (for example,
+due to AER fatal error). Reading these invalid values in the
+register can lead to out-of-bound memory access. So, fix
+by using the saved server TID base when clearing filters.
+
+Fixes: b1a79360ee86 ("cxgb4: Delete all hash and TCAM filters before resource cleanup")
+Signed-off-by: Raju Rangoju <rajur@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index bde8494215c4..e664e05b9f02 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -1042,7 +1042,7 @@ void clear_all_filters(struct adapter *adapter)
+                               cxgb4_del_filter(dev, f->tid, &f->fs);
+               }
+-              sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
++              sb = adapter->tids.stid_base;
+               for (i = 0; i < sb; i++) {
+                       f = (struct filter_entry *)adapter->tids.tid_tab[i];
+-- 
+2.30.2
+
diff --git a/queue-5.10/cxgb4-ch_ktls-clear-resources-when-pf4-device-is-rem.patch b/queue-5.10/cxgb4-ch_ktls-clear-resources-when-pf4-device-is-rem.patch
new file mode 100644 (file)
index 0000000..47d3e03
--- /dev/null
@@ -0,0 +1,242 @@
+From e08cfa6b1417209f7ffc6e991a204294ea14d7cd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 May 2021 15:11:51 +0530
+Subject: cxgb4/ch_ktls: Clear resources when pf4 device is removed
+
+From: Ayush Sawal <ayush.sawal@chelsio.com>
+
+[ Upstream commit 65e302a9bd57b62872040d57eea1201562a7cbb2 ]
+
+This patch maintain the list of active tids and clear all the active
+connection resources when DETACH notification comes.
+
+Fixes: a8c16e8ed624f ("crypto/chcr: move nic TLS functionality to drivers/net")
+Signed-off-by: Ayush Sawal <ayush.sawal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/chelsio/cxgb4/cxgb4_main.c   |  2 +-
+ .../chelsio/inline_crypto/ch_ktls/chcr_ktls.c | 80 ++++++++++++++++++-
+ .../chelsio/inline_crypto/ch_ktls/chcr_ktls.h |  2 +
+ 3 files changed, 82 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 7fd264a6d085..23c13f34a572 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -6484,9 +6484,9 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev,
+       adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
+                                                         direction);
+-      cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
+ out_unlock:
++      cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
+       mutex_unlock(&uld_mutex);
+ }
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+index 3a50d5a62ace..f9353826b245 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+@@ -59,6 +59,7 @@ static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
+ }
+ static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
++static void clear_conn_resources(struct chcr_ktls_info *tx_info);
+ /*
+  * chcr_ktls_save_keys: calculate and save crypto keys.
+  * @tx_info - driver specific tls info.
+@@ -370,10 +371,14 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
+                               chcr_get_ktls_tx_context(tls_ctx);
+       struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
+       struct ch_ktls_port_stats_debug *port_stats;
++      struct chcr_ktls_uld_ctx *u_ctx;
+       if (!tx_info)
+               return;
++      u_ctx = tx_info->adap->uld[CXGB4_ULD_KTLS].handle;
++      if (u_ctx && u_ctx->detach)
++              return;
+       /* clear l2t entry */
+       if (tx_info->l2te)
+               cxgb4_l2t_release(tx_info->l2te);
+@@ -390,6 +395,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
+       if (tx_info->tid != -1) {
+               cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+                                tx_info->tid, tx_info->ip_family);
++
++              xa_erase(&u_ctx->tid_list, tx_info->tid);
+       }
+       port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
+@@ -417,6 +424,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct ch_ktls_port_stats_debug *port_stats;
+       struct chcr_ktls_ofld_ctx_tx *tx_ctx;
++      struct chcr_ktls_uld_ctx *u_ctx;
+       struct chcr_ktls_info *tx_info;
+       struct dst_entry *dst;
+       struct adapter *adap;
+@@ -431,6 +439,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+       adap = pi->adapter;
+       port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
+       atomic64_inc(&port_stats->ktls_tx_connection_open);
++      u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
+       if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
+               pr_err("not expecting for RX direction\n");
+@@ -440,6 +449,9 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+       if (tx_ctx->chcr_info)
+               goto out;
++      if (u_ctx && u_ctx->detach)
++              goto out;
++
+       tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
+       if (!tx_info)
+               goto out;
+@@ -575,6 +587,8 @@ free_tid:
+       cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+                        tx_info->tid, tx_info->ip_family);
++      xa_erase(&u_ctx->tid_list, tx_info->tid);
++
+ put_module:
+       /* release module refcount */
+       module_put(THIS_MODULE);
+@@ -639,8 +653,12 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
+ {
+       const struct cpl_act_open_rpl *p = (void *)input;
+       struct chcr_ktls_info *tx_info = NULL;
++      struct chcr_ktls_ofld_ctx_tx *tx_ctx;
++      struct chcr_ktls_uld_ctx *u_ctx;
+       unsigned int atid, tid, status;
++      struct tls_context *tls_ctx;
+       struct tid_info *t;
++      int ret = 0;
+       tid = GET_TID(p);
+       status = AOPEN_STATUS_G(ntohl(p->atid_status));
+@@ -672,14 +690,29 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
+       if (!status) {
+               tx_info->tid = tid;
+               cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
++              /* Adding tid */
++              tls_ctx = tls_get_ctx(tx_info->sk);
++              tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
++              u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
++              if (u_ctx) {
++                      ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
++                                         GFP_NOWAIT);
++                      if (ret < 0) {
++                              pr_err("%s: Failed to allocate tid XA entry = %d\n",
++                                     __func__, tx_info->tid);
++                              tx_info->open_state = CH_KTLS_OPEN_FAILURE;
++                              goto out;
++                      }
++              }
+               tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
+       } else {
+               tx_info->open_state = CH_KTLS_OPEN_FAILURE;
+       }
++out:
+       spin_unlock(&tx_info->lock);
+       complete(&tx_info->completion);
+-      return 0;
++      return ret;
+ }
+ /*
+@@ -2097,6 +2130,8 @@ static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
+               goto out;
+       }
+       u_ctx->lldi = *lldi;
++      u_ctx->detach = false;
++      xa_init_flags(&u_ctx->tid_list, XA_FLAGS_LOCK_BH);
+ out:
+       return u_ctx;
+ }
+@@ -2130,6 +2165,45 @@ static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
+       return 0;
+ }
++static void clear_conn_resources(struct chcr_ktls_info *tx_info)
++{
++      /* clear l2t entry */
++      if (tx_info->l2te)
++              cxgb4_l2t_release(tx_info->l2te);
++
++#if IS_ENABLED(CONFIG_IPV6)
++      /* clear clip entry */
++      if (tx_info->ip_family == AF_INET6)
++              cxgb4_clip_release(tx_info->netdev, (const u32 *)
++                                 &tx_info->sk->sk_v6_rcv_saddr,
++                                 1);
++#endif
++
++      /* clear tid */
++      if (tx_info->tid != -1)
++              cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
++                               tx_info->tid, tx_info->ip_family);
++}
++
++static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
++{
++      struct ch_ktls_port_stats_debug *port_stats;
++      struct chcr_ktls_ofld_ctx_tx *tx_ctx;
++      struct chcr_ktls_info *tx_info;
++      unsigned long index;
++
++      xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
++              tx_info = tx_ctx->chcr_info;
++              clear_conn_resources(tx_info);
++              port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
++              atomic64_inc(&port_stats->ktls_tx_connection_close);
++              kvfree(tx_info);
++              tx_ctx->chcr_info = NULL;
++              /* release module refcount */
++              module_put(THIS_MODULE);
++      }
++}
++
+ static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
+ {
+       struct chcr_ktls_uld_ctx *u_ctx = handle;
+@@ -2146,7 +2220,10 @@ static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
+       case CXGB4_STATE_DETACH:
+               pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
+               mutex_lock(&dev_mutex);
++              u_ctx->detach = true;
+               list_del(&u_ctx->entry);
++              ch_ktls_reset_all_conn(u_ctx);
++              xa_destroy(&u_ctx->tid_list);
+               mutex_unlock(&dev_mutex);
+               break;
+       default:
+@@ -2185,6 +2262,7 @@ static void __exit chcr_ktls_exit(void)
+               adap = pci_get_drvdata(u_ctx->lldi.pdev);
+               memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
+               list_del(&u_ctx->entry);
++              xa_destroy(&u_ctx->tid_list);
+               kfree(u_ctx);
+       }
+       mutex_unlock(&dev_mutex);
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+index 18b3b1f02415..10572dc55365 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
++++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+@@ -75,6 +75,8 @@ struct chcr_ktls_ofld_ctx_tx {
+ struct chcr_ktls_uld_ctx {
+       struct list_head entry;
+       struct cxgb4_lld_info lldi;
++      struct xarray tid_list;
++      bool detach;
+ };
+ static inline struct chcr_ktls_ofld_ctx_tx *
+-- 
+2.30.2
+
diff --git a/queue-5.10/gve-add-null-pointer-checks-when-freeing-irqs.patch b/queue-5.10/gve-add-null-pointer-checks-when-freeing-irqs.patch
new file mode 100644 (file)
index 0000000..5304d02
--- /dev/null
@@ -0,0 +1,61 @@
+From 4d53f0244ebc82531a257c596dc34af8986ba503 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 May 2021 14:08:13 -0700
+Subject: gve: Add NULL pointer checks when freeing irqs.
+
+From: David Awogbemila <awogbemila@google.com>
+
+[ Upstream commit 5218e919c8d06279884aa0baf76778a6817d5b93 ]
+
+When freeing notification blocks, we index priv->msix_vectors.
+If we failed to allocate priv->msix_vectors (see abort_with_msix_vectors)
+this could lead to a NULL pointer dereference if the driver is unloaded.
+
+Fixes: 893ce44df565 ("gve: Add basic driver framework for Compute Engine Virtual NIC")
+Signed-off-by: David Awogbemila <awogbemila@google.com>
+Acked-by: Willem de Brujin <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/google/gve/gve_main.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index a8fcf1227391..839102ea6aa1 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -301,20 +301,22 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
+ {
+       int i;
+-      /* Free the irqs */
+-      for (i = 0; i < priv->num_ntfy_blks; i++) {
+-              struct gve_notify_block *block = &priv->ntfy_blocks[i];
+-              int msix_idx = i;
+-
+-              irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+-                                    NULL);
+-              free_irq(priv->msix_vectors[msix_idx].vector, block);
++      if (priv->msix_vectors) {
++              /* Free the irqs */
++              for (i = 0; i < priv->num_ntfy_blks; i++) {
++                      struct gve_notify_block *block = &priv->ntfy_blocks[i];
++                      int msix_idx = i;
++
++                      irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
++                                            NULL);
++                      free_irq(priv->msix_vectors[msix_idx].vector, block);
++              }
++              free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
+       }
+       dma_free_coherent(&priv->pdev->dev,
+                         priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
+                         priv->ntfy_blocks, priv->ntfy_block_bus);
+       priv->ntfy_blocks = NULL;
+-      free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
+       pci_disable_msix(priv->pdev);
+       kvfree(priv->msix_vectors);
+       priv->msix_vectors = NULL;
+-- 
+2.30.2
+
diff --git a/queue-5.10/gve-check-tx-qpl-was-actually-assigned.patch b/queue-5.10/gve-check-tx-qpl-was-actually-assigned.patch
new file mode 100644 (file)
index 0000000..edef682
--- /dev/null
@@ -0,0 +1,52 @@
+From 97aca6eae1a6d7f6562348f87e1a679e85cf98be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 May 2021 14:08:11 -0700
+Subject: gve: Check TX QPL was actually assigned
+
+From: Catherine Sullivan <csully@google.com>
+
+[ Upstream commit 5aec55b46c6238506cdf0c60cd0e42ab77a1e5e0 ]
+
+Correctly check the TX QPL was assigned and unassigned if
+other steps in the allocation fail.
+
+Fixes: f5cedc84a30d (gve: Add transmit and receive support)
+Signed-off-by: Catherine Sullivan <csully@google.com>
+Signed-off-by: David Awogbemila <awogbemila@google.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/google/gve/gve_tx.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
+index d0244feb0301..30532ee28dd3 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -207,10 +207,12 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
+               goto abort_with_info;
+       tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
++      if (!tx->tx_fifo.qpl)
++              goto abort_with_desc;
+       /* map Tx FIFO */
+       if (gve_tx_fifo_init(priv, &tx->tx_fifo))
+-              goto abort_with_desc;
++              goto abort_with_qpl;
+       tx->q_resources =
+               dma_alloc_coherent(hdev,
+@@ -229,6 +231,8 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
+ abort_with_fifo:
+       gve_tx_fifo_release(priv, &tx->tx_fifo);
++abort_with_qpl:
++      gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
+ abort_with_desc:
+       dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
+       tx->desc = NULL;
+-- 
+2.30.2
+
diff --git a/queue-5.10/gve-correct-skb-queue-index-validation.patch b/queue-5.10/gve-correct-skb-queue-index-validation.patch
new file mode 100644 (file)
index 0000000..1dd10c1
--- /dev/null
@@ -0,0 +1,37 @@
+From 445c1266c38daa41249b14436986dfc454659b9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 May 2021 14:08:15 -0700
+Subject: gve: Correct SKB queue index validation.
+
+From: David Awogbemila <awogbemila@google.com>
+
+[ Upstream commit fbd4a28b4fa66faaa7f510c0adc531d37e0a7848 ]
+
+SKBs with skb_get_queue_mapping(skb) == tx_cfg.num_queues should also be
+considered invalid.
+
+Fixes: f5cedc84a30d ("gve: Add transmit and receive support")
+Signed-off-by: David Awogbemila <awogbemila@google.com>
+Acked-by: Willem de Brujin <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/google/gve/gve_tx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
+index 30532ee28dd3..b653197b34d1 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -482,7 +482,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
+       struct gve_tx_ring *tx;
+       int nsegs;
+-      WARN(skb_get_queue_mapping(skb) > priv->tx_cfg.num_queues,
++      WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
+            "skb queue index out of range");
+       tx = &priv->tx[skb_get_queue_mapping(skb)];
+       if (unlikely(gve_maybe_stop_tx(tx, skb))) {
+-- 
+2.30.2
+
diff --git a/queue-5.10/gve-update-mgmt_msix_idx-if-num_ntfy-changes.patch b/queue-5.10/gve-update-mgmt_msix_idx-if-num_ntfy-changes.patch
new file mode 100644 (file)
index 0000000..1b154e9
--- /dev/null
@@ -0,0 +1,38 @@
+From c275edad50412d485dc65ae9f00c4ae15334518b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 May 2021 14:08:12 -0700
+Subject: gve: Update mgmt_msix_idx if num_ntfy changes
+
+From: David Awogbemila <awogbemila@google.com>
+
+[ Upstream commit e96b491a0ffa35a8a9607c193fa4d894ca9fb32f ]
+
+If we do not get the expected number of vectors from
+pci_enable_msix_range, we update priv->num_ntfy_blks but not
+priv->mgmt_msix_idx. This patch fixes this so that priv->mgmt_msix_idx
+is updated accordingly.
+
+Fixes: f5cedc84a30d ("gve: Add transmit and receive support")
+Signed-off-by: David Awogbemila <awogbemila@google.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/google/gve/gve_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 02e7d74779f4..a8fcf1227391 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -220,6 +220,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
+               int vecs_left = new_num_ntfy_blks % 2;
+               priv->num_ntfy_blks = new_num_ntfy_blks;
++              priv->mgmt_msix_idx = priv->num_ntfy_blks;
+               priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
+                                               vecs_per_type);
+               priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
+-- 
+2.30.2
+
diff --git a/queue-5.10/gve-upgrade-memory-barrier-in-poll-routine.patch b/queue-5.10/gve-upgrade-memory-barrier-in-poll-routine.patch
new file mode 100644 (file)
index 0000000..6021ce7
--- /dev/null
@@ -0,0 +1,48 @@
+From 185841e1625a30d4e291e1f7878f96bf04d3c1e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 May 2021 14:08:14 -0700
+Subject: gve: Upgrade memory barrier in poll routine
+
+From: Catherine Sullivan <csully@google.com>
+
+[ Upstream commit f81781835f0adfae8d701545386030d223efcd6f ]
+
+As currently written, if the driver checks for more work (via
+gve_tx_poll or gve_rx_poll) before the device posts work and the
+irq doorbell is not unmasked
+(via iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, ...)) before the device
+attempts to raise an interrupt, an interrupt is lost and this could
+potentially lead to the traffic being completely halted. For
+example, if a tx queue has already been stopped, the driver won't get
+the chance to complete work and egress will be halted.
+
+We need a full memory barrier in the poll
+routine to ensure that the irq doorbell is unmasked before the driver
+checks for more work.
+
+Fixes: f5cedc84a30d ("gve: Add transmit and receive support")
+Signed-off-by: Catherine Sullivan <csully@google.com>
+Signed-off-by: David Awogbemila <awogbemila@google.com>
+Acked-by: Willem de Brujin <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/google/gve/gve_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 839102ea6aa1..d6e35421d8f7 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -180,7 +180,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
+       /* Double check we have no extra work.
+        * Ensure unmask synchronizes with checking for work.
+        */
+-      dma_rmb();
++      mb();
+       if (block->tx)
+               reschedule |= gve_tx_poll(block, -1);
+       if (block->rx)
+-- 
+2.30.2
+
diff --git a/queue-5.10/interconnect-qcom-add-missing-module_device_table.patch b/queue-5.10/interconnect-qcom-add-missing-module_device_table.patch
new file mode 100644 (file)
index 0000000..4d5a909
--- /dev/null
@@ -0,0 +1,38 @@
+From 242f3c62f37b1070e0f637df360ee20386594312 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 May 2021 11:44:33 +0800
+Subject: interconnect: qcom: Add missing MODULE_DEVICE_TABLE
+
+From: Zou Wei <zou_wei@huawei.com>
+
+[ Upstream commit 1fd86e280d8b21762901e43d42d66dbfe8b8e0d3 ]
+
+This patch adds missing MODULE_DEVICE_TABLE definition which generates
+correct modalias for automatic loading of this driver when it is built
+as an external module.
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Zou Wei <zou_wei@huawei.com>
+Link: https://lore.kernel.org/r/1620704673-104205-1-git-send-email-zou_wei@huawei.com
+Fixes: 976daac4a1c5 ("interconnect: qcom: Consolidate interconnect RPMh support")
+Signed-off-by: Georgi Djakov <djakov@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/interconnect/qcom/bcm-voter.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
+index 7c3ef817e99c..dd0e3bd50b94 100644
+--- a/drivers/interconnect/qcom/bcm-voter.c
++++ b/drivers/interconnect/qcom/bcm-voter.c
+@@ -370,6 +370,7 @@ static const struct of_device_id bcm_voter_of_match[] = {
+       { .compatible = "qcom,bcm-voter" },
+       { }
+ };
++MODULE_DEVICE_TABLE(of, bcm_voter_of_match);
+ static struct platform_driver qcom_icc_bcm_voter_driver = {
+       .probe = qcom_icc_bcm_voter_probe,
+-- 
+2.30.2
+
diff --git a/queue-5.10/interconnect-qcom-bcm-voter-add-a-missing-of_node_pu.patch b/queue-5.10/interconnect-qcom-bcm-voter-add-a-missing-of_node_pu.patch
new file mode 100644 (file)
index 0000000..1736487
--- /dev/null
@@ -0,0 +1,45 @@
+From 652d1ff2692db6cbe771d78b3e97724580b91453 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Apr 2021 11:36:10 -0700
+Subject: interconnect: qcom: bcm-voter: add a missing of_node_put()
+
+From: Subbaraman Narayanamurthy <subbaram@codeaurora.org>
+
+[ Upstream commit a00593737f8bac2c9e97b696e7ff84a4446653e8 ]
+
+Add a missing of_node_put() in of_bcm_voter_get() to avoid the
+reference leak.
+
+Signed-off-by: Subbaraman Narayanamurthy <subbaram@codeaurora.org>
+Reviewed-by: Matthias Kaehlcke <mka@chromium.org>
+Link: https://lore.kernel.org/r/1619116570-13308-1-git-send-email-subbaram@codeaurora.org
+Fixes: 976daac4a1c5 ("interconnect: qcom: Consolidate interconnect RPMh support")
+Signed-off-by: Georgi Djakov <djakov@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/interconnect/qcom/bcm-voter.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
+index 887d13721e52..7c3ef817e99c 100644
+--- a/drivers/interconnect/qcom/bcm-voter.c
++++ b/drivers/interconnect/qcom/bcm-voter.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /*
+- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+  */
+ #include <asm/div64.h>
+@@ -212,6 +212,7 @@ struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
+       }
+       mutex_unlock(&bcm_voter_lock);
++      of_node_put(node);
+       return voter;
+ }
+ EXPORT_SYMBOL_GPL(of_bcm_voter_get);
+-- 
+2.30.2
+
diff --git a/queue-5.10/iommu-virtio-add-missing-module_device_table.patch b/queue-5.10/iommu-virtio-add-missing-module_device_table.patch
new file mode 100644 (file)
index 0000000..7c37209
--- /dev/null
@@ -0,0 +1,39 @@
+From e3ffbd6b149c28edd26cd556e73017a44244e198 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 8 May 2021 11:14:51 +0800
+Subject: iommu/virtio: Add missing MODULE_DEVICE_TABLE
+
+From: Bixuan Cui <cuibixuan@huawei.com>
+
+[ Upstream commit 382d91fc0f4f1b13f8a0dcbf7145f4f175b71a18 ]
+
+This patch adds missing MODULE_DEVICE_TABLE definition which generates
+correct modalias for automatic loading of this driver when it is built
+as an external module.
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Bixuan Cui <cuibixuan@huawei.com>
+Fixes: fa4afd78ea12 ("iommu/virtio: Build virtio-iommu as module")
+Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
+Link: https://lore.kernel.org/r/20210508031451.53493-1-cuibixuan@huawei.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/virtio-iommu.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
+index 2bfdd5734844..81dea4caf561 100644
+--- a/drivers/iommu/virtio-iommu.c
++++ b/drivers/iommu/virtio-iommu.c
+@@ -1138,6 +1138,7 @@ static struct virtio_device_id id_table[] = {
+       { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
+       { 0 },
+ };
++MODULE_DEVICE_TABLE(virtio, id_table);
+ static struct virtio_driver virtio_iommu_drv = {
+       .driver.name            = KBUILD_MODNAME,
+-- 
+2.30.2
+
diff --git a/queue-5.10/iommu-vt-d-use-user-privilege-for-rid2pasid-translat.patch b/queue-5.10/iommu-vt-d-use-user-privilege-for-rid2pasid-translat.patch
new file mode 100644 (file)
index 0000000..e531756
--- /dev/null
@@ -0,0 +1,72 @@
+From 311c081bd0033b2edb538c0448334aa5cc97e7d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 May 2021 09:50:27 +0800
+Subject: iommu/vt-d: Use user privilege for RID2PASID translation
+
+From: Lu Baolu <baolu.lu@linux.intel.com>
+
+[ Upstream commit 54c80d907400189b09548039be8f3b6e297e8ae3 ]
+
+When first-level page tables are used for IOVA translation, we use user
+privilege by setting U/S bit in the page table entry. This is to make it
+consistent with the second level translation, where the U/S enforcement
+is not available. Clear the SRE (Supervisor Request Enable) field in the
+pasid table entry of RID2PASID so that requests requesting the supervisor
+privilege are blocked and treated as DMA remapping faults.
+
+Fixes: b802d070a52a1 ("iommu/vt-d: Use iova over first level")
+Suggested-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Link: https://lore.kernel.org/r/20210512064426.3440915-1-baolu.lu@linux.intel.com
+Link: https://lore.kernel.org/r/20210519015027.108468-3-baolu.lu@linux.intel.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/intel/iommu.c | 7 +++++--
+ drivers/iommu/intel/pasid.c | 3 ++-
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index eececdeaa40f..b21c8224b1c8 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -2606,9 +2606,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
+                                   struct device *dev,
+                                   u32 pasid)
+ {
+-      int flags = PASID_FLAG_SUPERVISOR_MODE;
+       struct dma_pte *pgd = domain->pgd;
+       int agaw, level;
++      int flags = 0;
+       /*
+        * Skip top levels of page tables for iommu which has
+@@ -2624,7 +2624,10 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
+       if (level != 4 && level != 5)
+               return -EINVAL;
+-      flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
++      if (pasid != PASID_RID2PASID)
++              flags |= PASID_FLAG_SUPERVISOR_MODE;
++      if (level == 5)
++              flags |= PASID_FLAG_FL5LP;
+       if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
+               flags |= PASID_FLAG_PAGE_SNOOP;
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index ce4ef2d245e3..1e7c17989084 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -677,7 +677,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
+        * Since it is a second level only translation setup, we should
+        * set SRE bit as well (addresses are expected to be GPAs).
+        */
+-      pasid_set_sre(pte);
++      if (pasid != PASID_RID2PASID)
++              pasid_set_sre(pte);
+       pasid_set_present(pte);
+       pasid_flush_caches(iommu, pte, pasid, did);
+-- 
+2.30.2
+
diff --git a/queue-5.10/ipv6-record-frag_max_size-in-atomic-fragments-in-inp.patch b/queue-5.10/ipv6-record-frag_max_size-in-atomic-fragments-in-inp.patch
new file mode 100644 (file)
index 0000000..cd3bbb6
--- /dev/null
@@ -0,0 +1,46 @@
+From 4cdd6757cfe18f9275b796a952dc38fe91a98f28 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 May 2021 13:21:14 -0700
+Subject: ipv6: record frag_max_size in atomic fragments in input path
+
+From: Francesco Ruggeri <fruggeri@arista.com>
+
+[ Upstream commit e29f011e8fc04b2cdc742a2b9bbfa1b62518381a ]
+
+Commit dbd1759e6a9c ("ipv6: on reassembly, record frag_max_size")
+filled the frag_max_size field in IP6CB in the input path.
+The field should also be filled in case of atomic fragments.
+
+Fixes: dbd1759e6a9c ('ipv6: on reassembly, record frag_max_size')
+Signed-off-by: Francesco Ruggeri <fruggeri@arista.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/reassembly.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 47a0dc46cbdb..28e44782c94d 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -343,7 +343,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+       hdr = ipv6_hdr(skb);
+       fhdr = (struct frag_hdr *)skb_transport_header(skb);
+-      if (!(fhdr->frag_off & htons(0xFFF9))) {
++      if (!(fhdr->frag_off & htons(IP6_OFFSET | IP6_MF))) {
+               /* It is not a fragmented frame */
+               skb->transport_header += sizeof(struct frag_hdr);
+               __IP6_INC_STATS(net,
+@@ -351,6 +351,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+               IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
+               IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
++              IP6CB(skb)->frag_max_size = ntohs(hdr->payload_len) +
++                                          sizeof(struct ipv6hdr);
+               return 1;
+       }
+-- 
+2.30.2
+
diff --git a/queue-5.10/ixgbe-fix-large-mtu-request-from-vf.patch b/queue-5.10/ixgbe-fix-large-mtu-request-from-vf.patch
new file mode 100644 (file)
index 0000000..f358907
--- /dev/null
@@ -0,0 +1,76 @@
+From 088d4a8b312355f3aae817a8e21763b82ba049da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 May 2021 11:18:35 -0700
+Subject: ixgbe: fix large MTU request from VF
+
+From: Jesse Brandeburg <jesse.brandeburg@intel.com>
+
+[ Upstream commit 63e39d29b3da02e901349f6cd71159818a4737a6 ]
+
+Check that the MTU value requested by the VF is in the supported
+range of MTUs before attempting to set the VF large packet enable,
+otherwise reject the request. This also avoids unnecessary
+register updates in the case of the 82599 controller.
+
+Fixes: 872844ddb9e4 ("ixgbe: Enable jumbo frames support w/ SR-IOV")
+Co-developed-by: Piotr Skajewski <piotrx.skajewski@intel.com>
+Signed-off-by: Piotr Skajewski <piotrx.skajewski@intel.com>
+Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Co-developed-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
+Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
+Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+index 988db46bff0e..214a38de3f41 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+@@ -467,12 +467,16 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
+       return err;
+ }
+-static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
++static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf)
+ {
+       struct ixgbe_hw *hw = &adapter->hw;
+-      int max_frame = msgbuf[1];
+       u32 max_frs;
++      if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
++              e_err(drv, "VF max_frame %d out of range\n", max_frame);
++              return -EINVAL;
++      }
++
+       /*
+        * For 82599EB we have to keep all PFs and VFs operating with
+        * the same max_frame value in order to avoid sending an oversize
+@@ -533,12 +537,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+               }
+       }
+-      /* MTU < 68 is an error and causes problems on some kernels */
+-      if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+-              e_err(drv, "VF max_frame %d out of range\n", max_frame);
+-              return -EINVAL;
+-      }
+-
+       /* pull current max frame size from hardware */
+       max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+       max_frs &= IXGBE_MHADD_MFS_MASK;
+@@ -1249,7 +1247,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+               retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
+               break;
+       case IXGBE_VF_SET_LPE:
+-              retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
++              retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf);
+               break;
+       case IXGBE_VF_SET_MACVLAN:
+               retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
+-- 
+2.30.2
+
diff --git a/queue-5.10/mips-alchemy-xxs1500-add-gpio-au1000.h-header-file.patch b/queue-5.10/mips-alchemy-xxs1500-add-gpio-au1000.h-header-file.patch
new file mode 100644 (file)
index 0000000..5ccd548
--- /dev/null
@@ -0,0 +1,46 @@
+From 52cef50e649e992777dccebe89fd9713de843248 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 16 May 2021 17:01:08 -0700
+Subject: MIPS: alchemy: xxs1500: add gpio-au1000.h header file
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit ff4cff962a7eedc73e54b5096693da7f86c61346 ]
+
+board-xxs1500.c references 2 functions without declaring them, so add
+the header file to placate the build.
+
+../arch/mips/alchemy/board-xxs1500.c: In function 'board_setup':
+../arch/mips/alchemy/board-xxs1500.c:56:2: error: implicit declaration of function 'alchemy_gpio1_input_enable' [-Werror=implicit-function-declaration]
+   56 |  alchemy_gpio1_input_enable();
+../arch/mips/alchemy/board-xxs1500.c:57:2: error: implicit declaration of function 'alchemy_gpio2_enable'; did you mean 'alchemy_uart_enable'? [-Werror=implicit-function-declaration]
+   57 |  alchemy_gpio2_enable();
+
+Fixes: 8e026910fcd4 ("MIPS: Alchemy: merge GPR/MTX-1/XXS1500 board code into single files")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Cc: linux-mips@vger.kernel.org
+Cc: Manuel Lauss <manuel.lauss@googlemail.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Acked-by: Manuel Lauss <manuel.lauss@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/alchemy/board-xxs1500.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c
+index b184baa4e56a..f175bce2987f 100644
+--- a/arch/mips/alchemy/board-xxs1500.c
++++ b/arch/mips/alchemy/board-xxs1500.c
+@@ -18,6 +18,7 @@
+ #include <asm/reboot.h>
+ #include <asm/setup.h>
+ #include <asm/mach-au1x00/au1000.h>
++#include <asm/mach-au1x00/gpio-au1000.h>
+ #include <prom.h>
+ const char *get_system_type(void)
+-- 
+2.30.2
+
diff --git a/queue-5.10/mips-ralink-export-rt_sysc_membase-for-rt2880_wdt.c.patch b/queue-5.10/mips-ralink-export-rt_sysc_membase-for-rt2880_wdt.c.patch
new file mode 100644 (file)
index 0000000..ab19304
--- /dev/null
@@ -0,0 +1,53 @@
+From 2d3cddbd3c14189034eb10df9329b78e13221994 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 16 May 2021 17:54:17 -0700
+Subject: MIPS: ralink: export rt_sysc_membase for rt2880_wdt.c
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit fef532ea0cd871afab7d9a7b6e9da99ac2c24371 ]
+
+rt2880_wdt.c uses (well, attempts to use) rt_sysc_membase. However,
+when this watchdog driver is built as a loadable module, there is a
+build error since the rt_sysc_membase symbol is not exported.
+Export it to quell the build error.
+
+ERROR: modpost: "rt_sysc_membase" [drivers/watchdog/rt2880_wdt.ko] undefined!
+
+Fixes: 473cf939ff34 ("watchdog: add ralink watchdog driver")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Cc: Wim Van Sebroeck <wim@iguana.be>
+Cc: John Crispin <john@phrozen.org>
+Cc: linux-mips@vger.kernel.org
+Cc: linux-watchdog@vger.kernel.org
+Acked-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/ralink/of.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
+index cbae9d23ab7f..a971f1aca096 100644
+--- a/arch/mips/ralink/of.c
++++ b/arch/mips/ralink/of.c
+@@ -8,6 +8,7 @@
+ #include <linux/io.h>
+ #include <linux/clk.h>
++#include <linux/export.h>
+ #include <linux/init.h>
+ #include <linux/sizes.h>
+ #include <linux/of_fdt.h>
+@@ -25,6 +26,7 @@
+ __iomem void *rt_sysc_membase;
+ __iomem void *rt_memc_membase;
++EXPORT_SYMBOL_GPL(rt_sysc_membase);
+ __iomem void *plat_of_remap_node(const char *node)
+ {
+-- 
+2.30.2
+
diff --git a/queue-5.10/mld-fix-panic-in-mld_newpack.patch b/queue-5.10/mld-fix-panic-in-mld_newpack.patch
new file mode 100644 (file)
index 0000000..a39a1c5
--- /dev/null
@@ -0,0 +1,112 @@
+From 055569a20c2a0659eb66cf758fa07e14c21bfa64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 16 May 2021 14:44:42 +0000
+Subject: mld: fix panic in mld_newpack()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit 020ef930b826d21c5446fdc9db80fd72a791bc21 ]
+
+mld_newpack() doesn't allow to allocate high order page,
+only order-0 allocation is allowed.
+If headroom size is too large, a kernel panic could occur in skb_put().
+
+Test commands:
+    ip netns del A
+    ip netns del B
+    ip netns add A
+    ip netns add B
+    ip link add veth0 type veth peer name veth1
+    ip link set veth0 netns A
+    ip link set veth1 netns B
+
+    ip netns exec A ip link set lo up
+    ip netns exec A ip link set veth0 up
+    ip netns exec A ip -6 a a 2001:db8:0::1/64 dev veth0
+    ip netns exec B ip link set lo up
+    ip netns exec B ip link set veth1 up
+    ip netns exec B ip -6 a a 2001:db8:0::2/64 dev veth1
+    for i in {1..99}
+    do
+        let A=$i-1
+        ip netns exec A ip link add ip6gre$i type ip6gre \
+       local 2001:db8:$A::1 remote 2001:db8:$A::2 encaplimit 100
+        ip netns exec A ip -6 a a 2001:db8:$i::1/64 dev ip6gre$i
+        ip netns exec A ip link set ip6gre$i up
+
+        ip netns exec B ip link add ip6gre$i type ip6gre \
+       local 2001:db8:$A::2 remote 2001:db8:$A::1 encaplimit 100
+        ip netns exec B ip -6 a a 2001:db8:$i::2/64 dev ip6gre$i
+        ip netns exec B ip link set ip6gre$i up
+    done
+
+Splat looks like:
+kernel BUG at net/core/skbuff.c:110!
+invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI
+CPU: 0 PID: 7 Comm: kworker/0:1 Not tainted 5.12.0+ #891
+Workqueue: ipv6_addrconf addrconf_dad_work
+RIP: 0010:skb_panic+0x15d/0x15f
+Code: 92 fe 4c 8b 4c 24 10 53 8b 4d 70 45 89 e0 48 c7 c7 00 ae 79 83
+41 57 41 56 41 55 48 8b 54 24 a6 26 f9 ff <0f> 0b 48 8b 6c 24 20 89
+34 24 e8 4a 4e 92 fe 8b 34 24 48 c7 c1 20
+RSP: 0018:ffff88810091f820 EFLAGS: 00010282
+RAX: 0000000000000089 RBX: ffff8881086e9000 RCX: 0000000000000000
+RDX: 0000000000000089 RSI: 0000000000000008 RDI: ffffed1020123efb
+RBP: ffff888005f6eac0 R08: ffffed1022fc0031 R09: ffffed1022fc0031
+R10: ffff888117e00187 R11: ffffed1022fc0030 R12: 0000000000000028
+R13: ffff888008284eb0 R14: 0000000000000ed8 R15: 0000000000000ec0
+FS:  0000000000000000(0000) GS:ffff888117c00000(0000)
+knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8b801c5640 CR3: 0000000033c2c006 CR4: 00000000003706f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ ? ip6_mc_hdr.isra.26.constprop.46+0x12a/0x600
+ ? ip6_mc_hdr.isra.26.constprop.46+0x12a/0x600
+ skb_put.cold.104+0x22/0x22
+ ip6_mc_hdr.isra.26.constprop.46+0x12a/0x600
+ ? rcu_read_lock_sched_held+0x91/0xc0
+ mld_newpack+0x398/0x8f0
+ ? ip6_mc_hdr.isra.26.constprop.46+0x600/0x600
+ ? lock_contended+0xc40/0xc40
+ add_grhead.isra.33+0x280/0x380
+ add_grec+0x5ca/0xff0
+ ? mld_sendpack+0xf40/0xf40
+ ? lock_downgrade+0x690/0x690
+ mld_send_initial_cr.part.34+0xb9/0x180
+ ipv6_mc_dad_complete+0x15d/0x1b0
+ addrconf_dad_completed+0x8d2/0xbb0
+ ? lock_downgrade+0x690/0x690
+ ? addrconf_rs_timer+0x660/0x660
+ ? addrconf_dad_work+0x73c/0x10e0
+ addrconf_dad_work+0x73c/0x10e0
+
+Allowing high order page allocation could fix this problem.
+
+Fixes: 72e09ad107e7 ("ipv6: avoid high order allocations")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/mcast.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 8cd2782a31e4..9fb5077f8e9a 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1601,10 +1601,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
+                    IPV6_TLV_PADN, 0 };
+       /* we assume size > sizeof(ra) here */
+-      /* limit our allocations to order-0 page */
+-      size = min_t(int, size, SKB_MAX_ORDER(0, 0));
+       skb = sock_alloc_send_skb(sk, size, 1, &err);
+-
+       if (!skb)
+               return NULL;
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-bnx2-fix-error-return-code-in-bnx2_init_board.patch b/queue-5.10/net-bnx2-fix-error-return-code-in-bnx2_init_board.patch
new file mode 100644 (file)
index 0000000..506854a
--- /dev/null
@@ -0,0 +1,40 @@
+From 10615b50423aa80a94601dda08e996ba77fb11a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 15 May 2021 15:16:05 +0800
+Subject: net: bnx2: Fix error return code in bnx2_init_board()
+
+From: Zhen Lei <thunder.leizhen@huawei.com>
+
+[ Upstream commit 28c66b6da4087b8cfe81c2ec0a46eb6116dafda9 ]
+
+Fix to return -EPERM from the error handling case instead of 0, as done
+elsewhere in this function.
+
+Fixes: b6016b767397 ("[BNX2]: New Broadcom gigabit network driver.")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
+Reviewed-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnx2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
+index 3e8a179f39db..633b10389653 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -8247,9 +8247,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
+               BNX2_WR(bp, PCI_COMMAND, reg);
+       } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
+               !(bp->flags & BNX2_FLAG_PCIX)) {
+-
+               dev_err(&pdev->dev,
+                       "5706 A1 can only be used in a PCIX bus, aborting\n");
++              rc = -EPERM;
+               goto err_out_unmap;
+       }
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-dsa-fix-error-code-getting-shifted-with-4-in-dsa.patch b/queue-5.10/net-dsa-fix-error-code-getting-shifted-with-4-in-dsa.patch
new file mode 100644 (file)
index 0000000..d090e2d
--- /dev/null
@@ -0,0 +1,68 @@
+From 5b6d123c59ef7d32c502b12c9aab2a5b9d7c3bd0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 9 May 2021 22:33:38 +0300
+Subject: net: dsa: fix error code getting shifted with 4 in
+ dsa_slave_get_sset_count
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit b94cbc909f1d80378a1f541968309e5c1178c98b ]
+
+DSA implements a bunch of 'standardized' ethtool statistics counters,
+namely tx_packets, tx_bytes, rx_packets, rx_bytes. So whatever the
+hardware driver returns in .get_sset_count(), we need to add 4 to that.
+
+That is ok, except that .get_sset_count() can return a negative error
+code, for example:
+
+b53_get_sset_count
+-> phy_ethtool_get_sset_count
+   -> return -EIO
+
+-EIO is -5, and with 4 added to it, it becomes -1, aka -EPERM. One can
+imagine that certain error codes may even become positive, although
+based on code inspection I did not see instances of that.
+
+Check the error code first, if it is negative return it as-is.
+
+Based on a similar patch for dsa_master_get_strings from Dan Carpenter:
+https://patchwork.kernel.org/project/netdevbpf/patch/YJaSe3RPgn7gKxZv@mwanda/
+
+Fixes: 91da11f870f0 ("net: Distributed Switch Architecture protocol support")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/dsa/slave.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index c6806eef906f..9281c9c6a253 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -746,13 +746,15 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
+       struct dsa_switch *ds = dp->ds;
+       if (sset == ETH_SS_STATS) {
+-              int count;
++              int count = 0;
+-              count = 4;
+-              if (ds->ops->get_sset_count)
+-                      count += ds->ops->get_sset_count(ds, dp->index, sset);
++              if (ds->ops->get_sset_count) {
++                      count = ds->ops->get_sset_count(ds, dp->index, sset);
++                      if (count < 0)
++                              return count;
++              }
+-              return count;
++              return count + 4;
+       }
+       return -EOPNOTSUPP;
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-ethernet-mtk_eth_soc-fix-packet-statistics-suppo.patch b/queue-5.10/net-ethernet-mtk_eth_soc-fix-packet-statistics-suppo.patch
new file mode 100644 (file)
index 0000000..41b7451
--- /dev/null
@@ -0,0 +1,160 @@
+From d017af08276d7a08040fa8b7dddcb9be9d3b7feb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 22 May 2021 09:56:30 +0200
+Subject: net: ethernet: mtk_eth_soc: Fix packet statistics support for
+ MT7628/88
+
+From: Stefan Roese <sr@denx.de>
+
+[ Upstream commit ad79fd2c42f7626bdf6935cd72134c2a5a59ff2d ]
+
+The MT7628/88 SoC(s) have other (limited) packet counter registers than
+currently supported in the mtk_eth_soc driver. This patch adds support
+for reading these registers, so that the packet statistics are correctly
+updated.
+
+Additionally the defines for the non-MT7628 variant packet counter
+registers are added and used in this patch instead of using hard coded
+values.
+
+Signed-off-by: Stefan Roese <sr@denx.de>
+Fixes: 296c9120752b ("net: ethernet: mediatek: Add MT7628/88 SoC support")
+Cc: Felix Fietkau <nbd@nbd.name>
+Cc: John Crispin <john@phrozen.org>
+Cc: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com>
+Cc: Reto Schneider <code@reto-schneider.ch>
+Cc: Reto Schneider <reto.schneider@husqvarnagroup.com>
+Cc: David S. Miller <davem@davemloft.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 67 ++++++++++++++-------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 24 +++++++-
+ 2 files changed, 66 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index d930fcda9c3b..a2d3f04a9ff2 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -679,32 +679,53 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
+ void mtk_stats_update_mac(struct mtk_mac *mac)
+ {
+       struct mtk_hw_stats *hw_stats = mac->hw_stats;
+-      unsigned int base = MTK_GDM1_TX_GBCNT;
+-      u64 stats;
+-
+-      base += hw_stats->reg_offset;
++      struct mtk_eth *eth = mac->hw;
+       u64_stats_update_begin(&hw_stats->syncp);
+-      hw_stats->rx_bytes += mtk_r32(mac->hw, base);
+-      stats =  mtk_r32(mac->hw, base + 0x04);
+-      if (stats)
+-              hw_stats->rx_bytes += (stats << 32);
+-      hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
+-      hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
+-      hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
+-      hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
+-      hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
+-      hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
+-      hw_stats->rx_flow_control_packets +=
+-                                      mtk_r32(mac->hw, base + 0x24);
+-      hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
+-      hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
+-      hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
+-      stats =  mtk_r32(mac->hw, base + 0x34);
+-      if (stats)
+-              hw_stats->tx_bytes += (stats << 32);
+-      hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
++      if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
++              hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
++              hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
++              hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
++              hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
++              hw_stats->rx_checksum_errors +=
++                      mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
++      } else {
++              unsigned int offs = hw_stats->reg_offset;
++              u64 stats;
++
++              hw_stats->rx_bytes += mtk_r32(mac->hw,
++                                            MTK_GDM1_RX_GBCNT_L + offs);
++              stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
++              if (stats)
++                      hw_stats->rx_bytes += (stats << 32);
++              hw_stats->rx_packets +=
++                      mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
++              hw_stats->rx_overflow +=
++                      mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
++              hw_stats->rx_fcs_errors +=
++                      mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
++              hw_stats->rx_short_errors +=
++                      mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
++              hw_stats->rx_long_errors +=
++                      mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
++              hw_stats->rx_checksum_errors +=
++                      mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
++              hw_stats->rx_flow_control_packets +=
++                      mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
++              hw_stats->tx_skip +=
++                      mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
++              hw_stats->tx_collisions +=
++                      mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
++              hw_stats->tx_bytes +=
++                      mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
++              stats =  mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
++              if (stats)
++                      hw_stats->tx_bytes += (stats << 32);
++              hw_stats->tx_packets +=
++                      mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
++      }
++
+       u64_stats_update_end(&hw_stats->syncp);
+ }
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index 73ce1f0f307a..54a7cd93cc0f 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -266,8 +266,21 @@
+ /* QDMA FQ Free Page Buffer Length Register */
+ #define MTK_QDMA_FQ_BLEN      0x1B2C
+-/* GMA1 Received Good Byte Count Register */
+-#define MTK_GDM1_TX_GBCNT     0x2400
++/* GMA1 counter / statics register */
++#define MTK_GDM1_RX_GBCNT_L   0x2400
++#define MTK_GDM1_RX_GBCNT_H   0x2404
++#define MTK_GDM1_RX_GPCNT     0x2408
++#define MTK_GDM1_RX_OERCNT    0x2410
++#define MTK_GDM1_RX_FERCNT    0x2414
++#define MTK_GDM1_RX_SERCNT    0x2418
++#define MTK_GDM1_RX_LENCNT    0x241c
++#define MTK_GDM1_RX_CERCNT    0x2420
++#define MTK_GDM1_RX_FCCNT     0x2424
++#define MTK_GDM1_TX_SKIPCNT   0x2428
++#define MTK_GDM1_TX_COLCNT    0x242c
++#define MTK_GDM1_TX_GBCNT_L   0x2430
++#define MTK_GDM1_TX_GBCNT_H   0x2434
++#define MTK_GDM1_TX_GPCNT     0x2438
+ #define MTK_STAT_OFFSET               0x40
+ /* QDMA descriptor txd4 */
+@@ -478,6 +491,13 @@
+ #define MT7628_SDM_MAC_ADRL   (MT7628_SDM_OFFSET + 0x0c)
+ #define MT7628_SDM_MAC_ADRH   (MT7628_SDM_OFFSET + 0x10)
++/* Counter / stat register */
++#define MT7628_SDM_TPCNT      (MT7628_SDM_OFFSET + 0x100)
++#define MT7628_SDM_TBCNT      (MT7628_SDM_OFFSET + 0x104)
++#define MT7628_SDM_RPCNT      (MT7628_SDM_OFFSET + 0x108)
++#define MT7628_SDM_RBCNT      (MT7628_SDM_OFFSET + 0x10c)
++#define MT7628_SDM_CS_ERR     (MT7628_SDM_OFFSET + 0x110)
++
+ struct mtk_rx_dma {
+       unsigned int rxd1;
+       unsigned int rxd2;
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-fec-fix-the-potential-memory-leak-in-fec_enet_in.patch b/queue-5.10/net-fec-fix-the-potential-memory-leak-in-fec_enet_in.patch
new file mode 100644 (file)
index 0000000..fc05a43
--- /dev/null
@@ -0,0 +1,64 @@
+From ee7bb95a7c126de4eb26da924fb14862c031a256 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 May 2021 10:43:59 +0800
+Subject: net: fec: fix the potential memory leak in fec_enet_init()
+
+From: Fugang Duan <fugang.duan@nxp.com>
+
+[ Upstream commit 619fee9eb13b5d29e4267cb394645608088c28a8 ]
+
+If the memory allocated for cbd_base is failed, it should
+free the memory allocated for the queues, otherwise it causes
+memory leak.
+
+And if the memory allocated for the queues is failed, it can
+return error directly.
+
+Fixes: 59d0f7465644 ("net: fec: init multi queue date structure")
+Signed-off-by: Fugang Duan <fugang.duan@nxp.com>
+Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 55c28fbc5f9e..960def41cc55 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3277,7 +3277,9 @@ static int fec_enet_init(struct net_device *ndev)
+               return ret;
+       }
+-      fec_enet_alloc_queue(ndev);
++      ret = fec_enet_alloc_queue(ndev);
++      if (ret)
++              return ret;
+       bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
+@@ -3285,7 +3287,8 @@ static int fec_enet_init(struct net_device *ndev)
+       cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
+                                      GFP_KERNEL);
+       if (!cbd_base) {
+-              return -ENOMEM;
++              ret = -ENOMEM;
++              goto free_queue_mem;
+       }
+       /* Get the Ethernet address */
+@@ -3363,6 +3366,10 @@ static int fec_enet_init(struct net_device *ndev)
+               fec_enet_update_ethtool_stats(ndev);
+       return 0;
++
++free_queue_mem:
++      fec_enet_free_queue(ndev);
++      return ret;
+ }
+ #ifdef CONFIG_OF
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-hns3-fix-incorrect-resp_msg-issue.patch b/queue-5.10/net-hns3-fix-incorrect-resp_msg-issue.patch
new file mode 100644 (file)
index 0000000..c4258b1
--- /dev/null
@@ -0,0 +1,51 @@
+From 5ea1d5a87b5c5b15c4855381e340454f8bbeb6a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 May 2021 19:36:00 +0800
+Subject: net: hns3: fix incorrect resp_msg issue
+
+From: Jiaran Zhang <zhangjiaran@huawei.com>
+
+[ Upstream commit a710b9ffbebaf713f7dbd4dbd9524907e5d66f33 ]
+
+In hclge_mbx_handler(), if there are two consecutive mailbox
+messages that requires resp_msg, the resp_msg is not cleared
+after processing the first message, which will cause the resp_msg
+data of second message incorrect.
+
+Fix it by clearing the resp_msg before processing every mailbox
+message.
+
+Fixes: bb5790b71bad ("net: hns3: refactor mailbox response scheme between PF and VF")
+Signed-off-by: Jiaran Zhang <zhangjiaran@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+index e0254672831f..2c2d53f5c56e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -678,7 +678,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
+       unsigned int flag;
+       int ret = 0;
+-      memset(&resp_msg, 0, sizeof(resp_msg));
+       /* handle all the mailbox requests in the queue */
+       while (!hclge_cmd_crq_empty(&hdev->hw)) {
+               if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+@@ -706,6 +705,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
+               trace_hclge_pf_mbx_get(hdev, req);
++              /* clear the resp_msg before processing every mailbox message */
++              memset(&resp_msg, 0, sizeof(resp_msg));
++
+               switch (req->msg.code) {
+               case HCLGE_MBX_MAP_RING_TO_VECTOR:
+                       ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-hns3-put-off-calling-register_netdev-until-clien.patch b/queue-5.10/net-hns3-put-off-calling-register_netdev-until-clien.patch
new file mode 100644 (file)
index 0000000..4a47620
--- /dev/null
@@ -0,0 +1,139 @@
+From ae2be09522c3917111dcd47afb0ef64b0d5096f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 May 2021 19:36:01 +0800
+Subject: net: hns3: put off calling register_netdev() until client initialize
+ complete
+
+From: Jian Shen <shenjian15@huawei.com>
+
+[ Upstream commit a289a7e5c1d49b7d47df9913c1cc81fb48fab613 ]
+
+Currently, the netdevice is registered before client initializing
+complete. So there is a timewindow between netdevice available
+and usable. In this case, if user try to change the channel number
+or ring param, it may cause the hns3_set_rx_cpu_rmap() being called
+twice, and report bug.
+
+[47199.416502] hns3 0000:35:00.0 eth1: set channels: tqp_num=1, rxfh=0
+[47199.430340] hns3 0000:35:00.0 eth1: already uninitialized
+[47199.438554] hns3 0000:35:00.0: rss changes from 4 to 1
+[47199.511854] hns3 0000:35:00.0: Channels changed, rss_size from 4 to 1, tqps from 4 to 1
+[47200.163524] ------------[ cut here ]------------
+[47200.171674] kernel BUG at lib/cpu_rmap.c:142!
+[47200.177847] Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
+[47200.185259] Modules linked in: hclge(+) hns3(-) hns3_cae(O) hns_roce_hw_v2 hnae3 vfio_iommu_type1 vfio_pci vfio_virqfd vfio pv680_mii(O) [last unloaded: hclge]
+[47200.205912] CPU: 1 PID: 8260 Comm: ethtool Tainted: G           O      5.11.0-rc3+ #1
+[47200.215601] Hardware name:  , xxxxxx 02/04/2021
+[47200.223052] pstate: 60400009 (nZCv daif +PAN -UAO -TCO BTYPE=--)
+[47200.230188] pc : cpu_rmap_add+0x38/0x40
+[47200.237472] lr : irq_cpu_rmap_add+0x84/0x140
+[47200.243291] sp : ffff800010e93a30
+[47200.247295] x29: ffff800010e93a30 x28: ffff082100584880
+[47200.254155] x27: 0000000000000000 x26: 0000000000000000
+[47200.260712] x25: 0000000000000000 x24: 0000000000000004
+[47200.267241] x23: ffff08209ba03000 x22: ffff08209ba038c0
+[47200.273789] x21: 000000000000003f x20: ffff0820e2bc1680
+[47200.280400] x19: ffff0820c970ec80 x18: 00000000000000c0
+[47200.286944] x17: 0000000000000000 x16: ffffb43debe4a0d0
+[47200.293456] x15: fffffc2082990600 x14: dead000000000122
+[47200.300059] x13: ffffffffffffffff x12: 000000000000003e
+[47200.306606] x11: ffff0820815b8080 x10: ffff53e411988000
+[47200.313171] x9 : 0000000000000000 x8 : ffff0820e2bc1700
+[47200.319682] x7 : 0000000000000000 x6 : 000000000000003f
+[47200.326170] x5 : 0000000000000040 x4 : ffff800010e93a20
+[47200.332656] x3 : 0000000000000004 x2 : ffff0820c970ec80
+[47200.339168] x1 : ffff0820e2bc1680 x0 : 0000000000000004
+[47200.346058] Call trace:
+[47200.349324]  cpu_rmap_add+0x38/0x40
+[47200.354300]  hns3_set_rx_cpu_rmap+0x6c/0xe0 [hns3]
+[47200.362294]  hns3_reset_notify_init_enet+0x1cc/0x340 [hns3]
+[47200.370049]  hns3_change_channels+0x40/0xb0 [hns3]
+[47200.376770]  hns3_set_channels+0x12c/0x2a0 [hns3]
+[47200.383353]  ethtool_set_channels+0x140/0x250
+[47200.389772]  dev_ethtool+0x714/0x23d0
+[47200.394440]  dev_ioctl+0x4cc/0x640
+[47200.399277]  sock_do_ioctl+0x100/0x2a0
+[47200.404574]  sock_ioctl+0x28c/0x470
+[47200.409079]  __arm64_sys_ioctl+0xb4/0x100
+[47200.415217]  el0_svc_common.constprop.0+0x84/0x210
+[47200.422088]  do_el0_svc+0x28/0x34
+[47200.426387]  el0_svc+0x28/0x70
+[47200.431308]  el0_sync_handler+0x1a4/0x1b0
+[47200.436477]  el0_sync+0x174/0x180
+[47200.441562] Code: 11000405 79000c45 f8247861 d65f03c0 (d4210000)
+[47200.448869] ---[ end trace a01efe4ce42e5f34 ]---
+
+The process is like below:
+excuting hns3_client_init
+|
+register_netdev()
+|                           hns3_set_channels()
+|                           |
+hns3_set_rx_cpu_rmap()      hns3_reset_notify_uninit_enet()
+|                               |
+|                            quit without calling function
+|                            hns3_free_rx_cpu_rmap for flag
+|                            HNS3_NIC_STATE_INITED is unset.
+|                           |
+|                           hns3_reset_notify_init_enet()
+|                               |
+set HNS3_NIC_STATE_INITED    call hns3_set_rx_cpu_rmap()-- crash
+
+Fix it by calling register_netdev() at the end of function
+hns3_client_init().
+
+Fixes: 08a100689d4b ("net: hns3: re-organize vector handle")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index ef3148919970..25fcb624ac20 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -4113,12 +4113,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
+       if (ret)
+               goto out_init_phy;
+-      ret = register_netdev(netdev);
+-      if (ret) {
+-              dev_err(priv->dev, "probe register netdev fail!\n");
+-              goto out_reg_netdev_fail;
+-      }
+-
+       /* the device can work without cpu rmap, only aRFS needs it */
+       ret = hns3_set_rx_cpu_rmap(netdev);
+       if (ret)
+@@ -4146,17 +4140,23 @@ static int hns3_client_init(struct hnae3_handle *handle)
+       set_bit(HNS3_NIC_STATE_INITED, &priv->state);
++      ret = register_netdev(netdev);
++      if (ret) {
++              dev_err(priv->dev, "probe register netdev fail!\n");
++              goto out_reg_netdev_fail;
++      }
++
+       if (netif_msg_drv(handle))
+               hns3_info_show(priv);
+       return ret;
++out_reg_netdev_fail:
++      hns3_dbg_uninit(handle);
+ out_client_start:
+       hns3_free_rx_cpu_rmap(netdev);
+       hns3_nic_uninit_irq(priv);
+ out_init_irq_fail:
+-      unregister_netdev(netdev);
+-out_reg_netdev_fail:
+       hns3_uninit_phy(netdev);
+ out_init_phy:
+       hns3_uninit_all_ring(priv);
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-hso-check-for-allocation-failure-in-hso_create_b.patch b/queue-5.10/net-hso-check-for-allocation-failure-in-hso_create_b.patch
new file mode 100644 (file)
index 0000000..d8f08bb
--- /dev/null
@@ -0,0 +1,90 @@
+From 5835605cbd92312dce9129c9e5a4a17b3d7522df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 May 2021 17:24:48 +0300
+Subject: net: hso: check for allocation failure in
+ hso_create_bulk_serial_device()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 31db0dbd72444abe645d90c20ecb84d668f5af5e ]
+
+In current kernels, small allocations never actually fail so this
+patch shouldn't affect runtime.
+
+Originally this error handling code written with the idea that if
+the "serial->tiocmget" allocation failed, then we would continue
+operating instead of bailing out early.  But in later years we added
+an unchecked dereference on the next line.
+
+       serial->tiocmget->serial_state_notification = kzalloc();
+        ^^^^^^^^^^^^^^^^^^
+
+Since these allocations are never going fail in real life, this is
+mostly a philosophical debate, but I think bailing out early is the
+correct behavior that the user would want.  And generally it's safer to
+bail as soon an error happens.
+
+Fixes: af0de1303c4e ("usb: hso: obey DMA rules in tiocmget")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/hso.c | 37 ++++++++++++++++++-------------------
+ 1 file changed, 18 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index 01566e4d2003..88f87787833c 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -2618,29 +2618,28 @@ static struct hso_device *hso_create_bulk_serial_device(
+               num_urbs = 2;
+               serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
+                                          GFP_KERNEL);
++              if (!serial->tiocmget)
++                      goto exit;
+               serial->tiocmget->serial_state_notification
+                       = kzalloc(sizeof(struct hso_serial_state_notification),
+                                          GFP_KERNEL);
+-              /* it isn't going to break our heart if serial->tiocmget
+-               *  allocation fails don't bother checking this.
+-               */
+-              if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
+-                      tiocmget = serial->tiocmget;
+-                      tiocmget->endp = hso_get_ep(interface,
+-                                                  USB_ENDPOINT_XFER_INT,
+-                                                  USB_DIR_IN);
+-                      if (!tiocmget->endp) {
+-                              dev_err(&interface->dev, "Failed to find INT IN ep\n");
+-                              goto exit;
+-                      }
+-
+-                      tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
+-                      if (tiocmget->urb) {
+-                              mutex_init(&tiocmget->mutex);
+-                              init_waitqueue_head(&tiocmget->waitq);
+-                      } else
+-                              hso_free_tiomget(serial);
++              if (!serial->tiocmget->serial_state_notification)
++                      goto exit;
++              tiocmget = serial->tiocmget;
++              tiocmget->endp = hso_get_ep(interface,
++                                          USB_ENDPOINT_XFER_INT,
++                                          USB_DIR_IN);
++              if (!tiocmget->endp) {
++                      dev_err(&interface->dev, "Failed to find INT IN ep\n");
++                      goto exit;
+               }
++
++              tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
++              if (tiocmget->urb) {
++                      mutex_init(&tiocmget->mutex);
++                      init_waitqueue_head(&tiocmget->waitq);
++              } else
++                      hso_free_tiomget(serial);
+       }
+       else
+               num_urbs = 1;
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-hsr-fix-mac_len-checks.patch b/queue-5.10/net-hsr-fix-mac_len-checks.patch
new file mode 100644 (file)
index 0000000..1f178ae
--- /dev/null
@@ -0,0 +1,189 @@
+From 74544ec3a0cd48acd351a519d4158eb3dc241e03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 May 2021 13:50:54 -0500
+Subject: net: hsr: fix mac_len checks
+
+From: George McCollister <george.mccollister@gmail.com>
+
+[ Upstream commit 48b491a5cc74333c4a6a82fe21cea42c055a3b0b ]
+
+Commit 2e9f60932a2c ("net: hsr: check skb can contain struct hsr_ethhdr
+in fill_frame_info") added the following which resulted in -EINVAL
+always being returned:
+       if (skb->mac_len < sizeof(struct hsr_ethhdr))
+               return -EINVAL;
+
+mac_len was not being set correctly so this check completely broke
+HSR/PRP since it was always 14, not 20.
+
+Set mac_len correctly and modify the mac_len checks to test in the
+correct places since sometimes it is legitimately 14.
+
+Fixes: 2e9f60932a2c ("net: hsr: check skb can contain struct hsr_ethhdr in fill_frame_info")
+Signed-off-by: George McCollister <george.mccollister@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_device.c  |  2 ++
+ net/hsr/hsr_forward.c | 30 +++++++++++++++++++++---------
+ net/hsr/hsr_forward.h |  8 ++++----
+ net/hsr/hsr_main.h    |  4 ++--
+ net/hsr/hsr_slave.c   | 11 +++++------
+ 5 files changed, 34 insertions(+), 21 deletions(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 6f4c34b6a5d6..fec1b014c0a2 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -218,6 +218,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+       if (master) {
+               skb->dev = master->dev;
+               skb_reset_mac_header(skb);
++              skb_reset_mac_len(skb);
+               hsr_forward_skb(skb, master);
+       } else {
+               atomic_long_inc(&dev->tx_dropped);
+@@ -261,6 +262,7 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master, u16 proto)
+               goto out;
+       skb_reset_mac_header(skb);
++      skb_reset_mac_len(skb);
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index 90c72e4c0a8f..baf4765be6d7 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -451,25 +451,31 @@ static void handle_std_frame(struct sk_buff *skb,
+       }
+ }
+-void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+-                       struct hsr_frame_info *frame)
++int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
++                      struct hsr_frame_info *frame)
+ {
+       if (proto == htons(ETH_P_PRP) ||
+           proto == htons(ETH_P_HSR)) {
++              /* Check if skb contains hsr_ethhdr */
++              if (skb->mac_len < sizeof(struct hsr_ethhdr))
++                      return -EINVAL;
++
+               /* HSR tagged frame :- Data or Supervision */
+               frame->skb_std = NULL;
+               frame->skb_prp = NULL;
+               frame->skb_hsr = skb;
+               frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
+-              return;
++              return 0;
+       }
+       /* Standard frame or PRP from master port */
+       handle_std_frame(skb, frame);
++
++      return 0;
+ }
+-void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+-                       struct hsr_frame_info *frame)
++int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
++                      struct hsr_frame_info *frame)
+ {
+       /* Supervision frame */
+       struct prp_rct *rct = skb_get_PRP_rct(skb);
+@@ -480,9 +486,11 @@ void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+               frame->skb_std = NULL;
+               frame->skb_prp = skb;
+               frame->sequence_nr = prp_get_skb_sequence_nr(rct);
+-              return;
++              return 0;
+       }
+       handle_std_frame(skb, frame);
++
++      return 0;
+ }
+ static int fill_frame_info(struct hsr_frame_info *frame,
+@@ -492,9 +500,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
+       struct hsr_vlan_ethhdr *vlan_hdr;
+       struct ethhdr *ethhdr;
+       __be16 proto;
++      int ret;
+-      /* Check if skb contains hsr_ethhdr */
+-      if (skb->mac_len < sizeof(struct hsr_ethhdr))
++      /* Check if skb contains ethhdr */
++      if (skb->mac_len < sizeof(struct ethhdr))
+               return -EINVAL;
+       memset(frame, 0, sizeof(*frame));
+@@ -521,7 +530,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
+       frame->is_from_san = false;
+       frame->port_rcv = port;
+-      hsr->proto_ops->fill_frame_info(proto, skb, frame);
++      ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
++      if (ret)
++              return ret;
++
+       check_local_dest(port->hsr, skb, frame);
+       return 0;
+diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h
+index 618140d484ad..008f45786f06 100644
+--- a/net/hsr/hsr_forward.h
++++ b/net/hsr/hsr_forward.h
+@@ -23,8 +23,8 @@ struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
+ struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
+                                      struct hsr_port *port);
+ bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
+-void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+-                       struct hsr_frame_info *frame);
+-void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+-                       struct hsr_frame_info *frame);
++int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
++                      struct hsr_frame_info *frame);
++int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
++                      struct hsr_frame_info *frame);
+ #endif /* __HSR_FORWARD_H */
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index f79ca55d6986..9a25a5d349ae 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -192,8 +192,8 @@ struct hsr_proto_ops {
+                                              struct hsr_port *port);
+       struct sk_buff * (*create_tagged_frame)(struct hsr_frame_info *frame,
+                                               struct hsr_port *port);
+-      void (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
+-                              struct hsr_frame_info *frame);
++      int (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
++                             struct hsr_frame_info *frame);
+       bool (*invalid_dan_ingress_frame)(__be16 protocol);
+       void (*update_san_info)(struct hsr_node *node, bool is_sup);
+ };
+diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
+index 36d5fcf09c61..aecc05a28fa1 100644
+--- a/net/hsr/hsr_slave.c
++++ b/net/hsr/hsr_slave.c
+@@ -58,12 +58,11 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
+               goto finish_pass;
+       skb_push(skb, ETH_HLEN);
+-
+-      if (skb_mac_header(skb) != skb->data) {
+-              WARN_ONCE(1, "%s:%d: Malformed frame at source port %s)\n",
+-                        __func__, __LINE__, port->dev->name);
+-              goto finish_consume;
+-      }
++      skb_reset_mac_header(skb);
++      if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
++          protocol == htons(ETH_P_HSR))
++              skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
++      skb_reset_mac_len(skb);
+       hsr_forward_skb(skb, port);
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-ipa-memory-region-array-is-variable-size.patch b/queue-5.10/net-ipa-memory-region-array-is-variable-size.patch
new file mode 100644 (file)
index 0000000..3983f76
--- /dev/null
@@ -0,0 +1,82 @@
+From a8bf2d6ad51e3bb67693b1b0cdc0a55440110cb6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 May 2021 14:42:04 -0500
+Subject: net: ipa: memory region array is variable size
+
+From: Alex Elder <elder@linaro.org>
+
+[ Upstream commit 440c3247cba3d9433ac435d371dd7927d68772a7 ]
+
+IPA configuration data includes an array of memory region
+descriptors.  That was a fixed-size array at one time, but
+at some point we started defining it such that it was only
+as big as required for a given platform.  The actual number
+of entries in the array is recorded in the configuration data
+along with the array.
+
+A loop in ipa_mem_config() still assumes the array has entries
+for all defined memory region IDs.  As a result, this loop can
+go past the end of the actual array and attempt to write
+"canary" values based on nonsensical data.
+
+Fix this, by stashing the number of entries in the array, and
+using that rather than IPA_MEM_COUNT in the initialization loop
+found in ipa_mem_config().
+
+The only remaining use of IPA_MEM_COUNT is in a validation check
+to ensure configuration data doesn't have too many entries.
+That's fine for now.
+
+Fixes: 3128aae8c439a ("net: ipa: redefine struct ipa_mem_data")
+Signed-off-by: Alex Elder <elder@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipa/ipa.h     | 2 ++
+ drivers/net/ipa/ipa_mem.c | 3 ++-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
+index 6c2371084c55..da862db09d7b 100644
+--- a/drivers/net/ipa/ipa.h
++++ b/drivers/net/ipa/ipa.h
+@@ -56,6 +56,7 @@ enum ipa_flag {
+  * @mem_virt:         Virtual address of IPA-local memory space
+  * @mem_offset:               Offset from @mem_virt used for access to IPA memory
+  * @mem_size:         Total size (bytes) of memory at @mem_virt
++ * @mem_count:                Number of entries in the mem array
+  * @mem:              Array of IPA-local memory region descriptors
+  * @imem_iova:                I/O virtual address of IPA region in IMEM
+  * @imem_size;                Size of IMEM region
+@@ -102,6 +103,7 @@ struct ipa {
+       void *mem_virt;
+       u32 mem_offset;
+       u32 mem_size;
++      u32 mem_count;
+       const struct ipa_mem *mem;
+       unsigned long imem_iova;
+diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
+index 2d45c444a67f..a78d66051a17 100644
+--- a/drivers/net/ipa/ipa_mem.c
++++ b/drivers/net/ipa/ipa_mem.c
+@@ -181,7 +181,7 @@ int ipa_mem_config(struct ipa *ipa)
+        * for the region, write "canary" values in the space prior to
+        * the region's base address.
+        */
+-      for (mem_id = 0; mem_id < IPA_MEM_COUNT; mem_id++) {
++      for (mem_id = 0; mem_id < ipa->mem_count; mem_id++) {
+               const struct ipa_mem *mem = &ipa->mem[mem_id];
+               u16 canary_count;
+               __le32 *canary;
+@@ -488,6 +488,7 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
+       ipa->mem_size = resource_size(res);
+       /* The ipa->mem[] array is indexed by enum ipa_mem_id values */
++      ipa->mem_count = mem_data->local_count;
+       ipa->mem = mem_data->local;
+       ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-lantiq-fix-memory-corruption-in-rx-ring.patch b/queue-5.10/net-lantiq-fix-memory-corruption-in-rx-ring.patch
new file mode 100644 (file)
index 0000000..a7bae5d
--- /dev/null
@@ -0,0 +1,70 @@
+From 96be3dc6d4e2741e080ad1b3e1829d58d66ff548 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 May 2021 16:45:58 +0200
+Subject: net: lantiq: fix memory corruption in RX ring
+
+From: Aleksander Jan Bajkowski <olek2@wp.pl>
+
+[ Upstream commit c7718ee96dbc2f9c5fc3b578abdf296dd44b9c20 ]
+
+In a situation where memory allocation or dma mapping fails, an
+invalid address is programmed into the descriptor. This can lead
+to memory corruption. If the memory allocation fails, DMA should
+reuse the previous skb and mapping and drop the packet. This patch
+also increments rx drop counter.
+
+Fixes: fe1a56420cf2 ("net: lantiq: Add Lantiq / Intel VRX200 Ethernet driver ")
+Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/lantiq_xrx200.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
+index 51ed8a54d380..135ba5b6ae98 100644
+--- a/drivers/net/ethernet/lantiq_xrx200.c
++++ b/drivers/net/ethernet/lantiq_xrx200.c
+@@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
+ static int xrx200_alloc_skb(struct xrx200_chan *ch)
+ {
++      dma_addr_t mapping;
+       int ret = 0;
+       ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
+@@ -163,16 +164,17 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
+               goto skip;
+       }
+-      ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev,
+-                      ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
+-                      DMA_FROM_DEVICE);
+-      if (unlikely(dma_mapping_error(ch->priv->dev,
+-                                     ch->dma.desc_base[ch->dma.desc].addr))) {
++      mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
++                               XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
++      if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
+               dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+               ret = -ENOMEM;
+               goto skip;
+       }
++      ch->dma.desc_base[ch->dma.desc].addr = mapping;
++      /* Make sure the address is written before we give it to HW */
++      wmb();
+ skip:
+       ch->dma.desc_base[ch->dma.desc].ctl =
+               LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
+@@ -196,6 +198,8 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
+       ch->dma.desc %= LTQ_DESC_NUM;
+       if (ret) {
++              ch->skb[ch->dma.desc] = skb;
++              net_dev->stats.rx_dropped++;
+               netdev_err(net_dev, "failed to allocate new rx buffer\n");
+               return ret;
+       }
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-mdio-octeon-fix-some-double-free-issues.patch b/queue-5.10/net-mdio-octeon-fix-some-double-free-issues.patch
new file mode 100644 (file)
index 0000000..26c75de
--- /dev/null
@@ -0,0 +1,50 @@
+From df91ec6869e509c5c8b8f2ad0b9dd168badc7f87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 May 2021 09:24:55 +0200
+Subject: net: mdio: octeon: Fix some double free issues
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit e1d027dd97e1e750669cdc0d3b016a4f54e473eb ]
+
+'bus->mii_bus' has been allocated with 'devm_mdiobus_alloc_size()' in the
+probe function. So it must not be freed explicitly or there will be a
+double free.
+
+Remove the incorrect 'mdiobus_free' in the error handling path of the
+probe function and in remove function.
+
+Suggested-By: Andrew Lunn <andrew@lunn.ch>
+Fixes: 35d2aeac9810 ("phy: mdio-octeon: Use devm_mdiobus_alloc_size()")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Russell King <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/mdio/mdio-octeon.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
+index d1e1009d51af..6faf39314ac9 100644
+--- a/drivers/net/mdio/mdio-octeon.c
++++ b/drivers/net/mdio/mdio-octeon.c
+@@ -71,7 +71,6 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
+       return 0;
+ fail_register:
+-      mdiobus_free(bus->mii_bus);
+       smi_en.u64 = 0;
+       oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
+       return err;
+@@ -85,7 +84,6 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
+       bus = platform_get_drvdata(pdev);
+       mdiobus_unregister(bus->mii_bus);
+-      mdiobus_free(bus->mii_bus);
+       smi_en.u64 = 0;
+       oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
+       return 0;
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-mdio-thunder-fix-a-double-free-issue-in-the-.rem.patch b/queue-5.10/net-mdio-thunder-fix-a-double-free-issue-in-the-.rem.patch
new file mode 100644 (file)
index 0000000..262ebec
--- /dev/null
@@ -0,0 +1,40 @@
+From 81c7a418cf2fc08f181929574a96a582cb5f4b48 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 May 2021 09:44:49 +0200
+Subject: net: mdio: thunder: Fix a double free issue in the .remove function
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit a93a0a15876d2a077a3bc260b387d2457a051f24 ]
+
+'bus->mii_bus' have been allocated with 'devm_mdiobus_alloc_size()' in the
+probe function. So it must not be freed explicitly or there will be a
+double free.
+
+Remove the incorrect 'mdiobus_free' in the remove function.
+
+Fixes: 379d7ac7ca31 ("phy: mdio-thunder: Add driver for Cavium Thunder SoC MDIO buses.")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Russell King <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/mdio/mdio-thunder.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
+index 3d7eda99d34e..dd7430c998a2 100644
+--- a/drivers/net/mdio/mdio-thunder.c
++++ b/drivers/net/mdio/mdio-thunder.c
+@@ -126,7 +126,6 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
+                       continue;
+               mdiobus_unregister(bus->mii_bus);
+-              mdiobus_free(bus->mii_bus);
+               oct_mdio_writeq(0, bus->register_base + SMI_EN);
+       }
+       pci_release_regions(pdev);
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-mvpp2-add-buffer-header-handling-in-rx.patch b/queue-5.10/net-mvpp2-add-buffer-header-handling-in-rx.patch
new file mode 100644 (file)
index 0000000..05e3d63
--- /dev/null
@@ -0,0 +1,157 @@
+From 09638144473101014f2a87e0387526b74d849a5c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 May 2021 19:04:41 +0300
+Subject: net: mvpp2: add buffer header handling in RX
+
+From: Stefan Chulski <stefanc@marvell.com>
+
+[ Upstream commit 17f9c1b63cdd4439523cfcdf5683e5070b911f24 ]
+
+If Link Partner sends frames larger than RX buffer size, MAC mark it
+as oversize but still would pass it to the Packet Processor.
+In this scenario, Packet Processor scatter frame between multiple buffers,
+but only a single buffer would be returned to the Buffer Manager pool and
+it would not refill the poll.
+
+Patch add handling of oversize error with buffer header handling, so all
+buffers would be returned to the Buffer Manager pool.
+
+Fixes: 3f518509dedc ("ethernet: Add new driver for Marvell Armada 375 network unit")
+Reported-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Stefan Chulski <stefanc@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/mvpp2/mvpp2.h    | 22 ++++++++
+ .../net/ethernet/marvell/mvpp2/mvpp2_main.c   | 54 +++++++++++++++----
+ 2 files changed, 67 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+index 834775843067..a1aefce55e65 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+@@ -909,6 +909,14 @@ enum mvpp22_ptp_packet_format {
+ #define MVPP2_DESC_DMA_MASK   DMA_BIT_MASK(40)
++/* Buffer header info bits */
++#define MVPP2_B_HDR_INFO_MC_ID_MASK   0xfff
++#define MVPP2_B_HDR_INFO_MC_ID(info)  ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
++#define MVPP2_B_HDR_INFO_LAST_OFFS    12
++#define MVPP2_B_HDR_INFO_LAST_MASK    BIT(12)
++#define MVPP2_B_HDR_INFO_IS_LAST(info) \
++         (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
++
+ struct mvpp2_tai;
+ /* Definitions */
+@@ -918,6 +926,20 @@ struct mvpp2_rss_table {
+       u32 indir[MVPP22_RSS_TABLE_ENTRIES];
+ };
++struct mvpp2_buff_hdr {
++      __le32 next_phys_addr;
++      __le32 next_dma_addr;
++      __le16 byte_count;
++      __le16 info;
++      __le16 reserved1;       /* bm_qset (for future use, BM) */
++      u8 next_phys_addr_high;
++      u8 next_dma_addr_high;
++      __le16 reserved2;
++      __le16 reserved3;
++      __le16 reserved4;
++      __le16 reserved5;
++};
++
+ /* Shared Packet Processor resources */
+ struct mvpp2 {
+       /* Shared registers' base addresses */
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index f5333fc27e14..6aa13c9f9fc9 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -3481,6 +3481,35 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
+       return ret;
+ }
++static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
++                                  int pool, u32 rx_status)
++{
++      phys_addr_t phys_addr, phys_addr_next;
++      dma_addr_t dma_addr, dma_addr_next;
++      struct mvpp2_buff_hdr *buff_hdr;
++
++      phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
++      dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
++
++      do {
++              buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
++
++              phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
++              dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
++
++              if (port->priv->hw_version >= MVPP22) {
++                      phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
++                      dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
++              }
++
++              mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
++
++              phys_addr = phys_addr_next;
++              dma_addr = dma_addr_next;
++
++      } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
++}
++
+ /* Main rx processing */
+ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
+                   int rx_todo, struct mvpp2_rx_queue *rxq)
+@@ -3527,14 +3556,6 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
+                       MVPP2_RXD_BM_POOL_ID_OFFS;
+               bm_pool = &port->priv->bm_pools[pool];
+-              /* In case of an error, release the requested buffer pointer
+-               * to the Buffer Manager. This request process is controlled
+-               * by the hardware, and the information about the buffer is
+-               * comprised by the RX descriptor.
+-               */
+-              if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+-                      goto err_drop_frame;
+-
+               if (port->priv->percpu_pools) {
+                       pp = port->priv->page_pool[pool];
+                       dma_dir = page_pool_get_dma_dir(pp);
+@@ -3546,6 +3567,18 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
+                                       rx_bytes + MVPP2_MH_SIZE,
+                                       dma_dir);
++              /* Buffer header not supported */
++              if (rx_status & MVPP2_RXD_BUF_HDR)
++                      goto err_drop_frame;
++
++              /* In case of an error, release the requested buffer pointer
++               * to the Buffer Manager. This request process is controlled
++               * by the hardware, and the information about the buffer is
++               * comprised by the RX descriptor.
++               */
++              if (rx_status & MVPP2_RXD_ERR_SUMMARY)
++                      goto err_drop_frame;
++
+               /* Prefetch header */
+               prefetch(data);
+@@ -3627,7 +3660,10 @@ err_drop_frame:
+               dev->stats.rx_errors++;
+               mvpp2_rx_error(port, rx_desc);
+               /* Return the buffer to the pool */
+-              mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
++              if (rx_status & MVPP2_RXD_BUF_HDR)
++                      mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
++              else
++                      mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+       }
+       rcu_read_unlock();
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-netcp-fix-an-error-message.patch b/queue-5.10/net-netcp-fix-an-error-message.patch
new file mode 100644 (file)
index 0000000..ebac9fe
--- /dev/null
@@ -0,0 +1,41 @@
+From bde583b074fea06508a8c98051dfde3e66aa2ee9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 8 May 2021 07:38:22 +0200
+Subject: net: netcp: Fix an error message
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit ddb6e00f8413e885ff826e32521cff7924661de0 ]
+
+'ret' is known to be 0 here.
+The expected error code is stored in 'tx_pipe->dma_queue', so use it
+instead.
+
+While at it, switch from %d to %pe which is more user friendly.
+
+Fixes: 84640e27f230 ("net: netcp: Add Keystone NetCP core ethernet driver")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/netcp_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
+index d7a144b4a09f..dc50e948195d 100644
+--- a/drivers/net/ethernet/ti/netcp_core.c
++++ b/drivers/net/ethernet/ti/netcp_core.c
+@@ -1350,8 +1350,8 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
+       tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
+                                            KNAV_QUEUE_SHARED);
+       if (IS_ERR(tx_pipe->dma_queue)) {
+-              dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
+-                      name, ret);
++              dev_err(dev, "Could not open DMA queue for channel \"%s\": %pe\n",
++                      name, tx_pipe->dma_queue);
+               ret = PTR_ERR(tx_pipe->dma_queue);
+               goto err;
+       }
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-packetmmap-fix-only-tx-timestamp-on-request.patch b/queue-5.10/net-packetmmap-fix-only-tx-timestamp-on-request.patch
new file mode 100644 (file)
index 0000000..157a0c9
--- /dev/null
@@ -0,0 +1,56 @@
+From c4c23183bed00d1d340ad74338e3d7411eee81a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 May 2021 13:31:22 +1200
+Subject: net: packetmmap: fix only tx timestamp on request
+
+From: Richard Sanger <rsanger@wand.net.nz>
+
+[ Upstream commit 171c3b151118a2fe0fc1e2a9d1b5a1570cfe82d2 ]
+
+The packetmmap tx ring should only return timestamps if requested via
+setsockopt PACKET_TIMESTAMP, as documented. This allows compatibility
+with non-timestamp aware user-space code which checks
+tp_status == TP_STATUS_AVAILABLE; not expecting additional timestamp
+flags to be set in tp_status.
+
+Fixes: b9c32fb27170 ("packet: if hw/sw ts enabled in rx/tx ring, report which ts we got")
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Signed-off-by: Richard Sanger <rsanger@wand.net.nz>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/packet/af_packet.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 449625c2ccc7..ddb68aa836f7 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -421,7 +421,8 @@ static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
+           ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
+               return TP_STATUS_TS_RAW_HARDWARE;
+-      if (ktime_to_timespec64_cond(skb->tstamp, ts))
++      if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
++          ktime_to_timespec64_cond(skb->tstamp, ts))
+               return TP_STATUS_TS_SOFTWARE;
+       return 0;
+@@ -2339,7 +2340,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+       skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
+-      if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
++      /* Always timestamp; prefer an existing software timestamp taken
++       * closer to the time of capture.
++       */
++      ts_status = tpacket_get_timestamp(skb, &ts,
++                                        po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
++      if (!ts_status)
+               ktime_get_real_ts64(&ts);
+       status |= ts_status;
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-really-orphan-skbs-tied-to-closing-sk.patch b/queue-5.10/net-really-orphan-skbs-tied-to-closing-sk.patch
new file mode 100644 (file)
index 0000000..b8fe95c
--- /dev/null
@@ -0,0 +1,71 @@
+From 9746ba5ad99e54071eb688c767b6cd3c1e5f0322 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 May 2021 10:35:21 +0200
+Subject: net: really orphan skbs tied to closing sk
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 098116e7e640ba677d9e345cbee83d253c13d556 ]
+
+If the owing socket is shutting down - e.g. the sock reference
+count already dropped to 0 and only sk_wmem_alloc is keeping
+the sock alive, skb_orphan_partial() becomes a no-op.
+
+When forwarding packets over veth with GRO enabled, the above
+causes refcount errors.
+
+This change addresses the issue with a plain skb_orphan() call
+in the critical scenario.
+
+Fixes: 9adc89af724f ("net: let skb_orphan_partial wake-up waiters.")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sock.h | 4 +++-
+ net/core/sock.c    | 8 ++++----
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 261195598df3..f68184b8c0aa 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2197,13 +2197,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+       sk_mem_charge(sk, skb->truesize);
+ }
+-static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
++static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+ {
+       if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
+               skb_orphan(skb);
+               skb->destructor = sock_efree;
+               skb->sk = sk;
++              return true;
+       }
++      return false;
+ }
+ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
+diff --git a/net/core/sock.c b/net/core/sock.c
+index c75c1e723a84..dee29f41beaf 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2099,10 +2099,10 @@ void skb_orphan_partial(struct sk_buff *skb)
+       if (skb_is_tcp_pure_ack(skb))
+               return;
+-      if (can_skb_orphan_partial(skb))
+-              skb_set_owner_sk_safe(skb, skb->sk);
+-      else
+-              skb_orphan(skb);
++      if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
++              return;
++
++      skb_orphan(skb);
+ }
+ EXPORT_SYMBOL(skb_orphan_partial);
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-sched-fix-packet-stuck-problem-for-lockless-qdis.patch b/queue-5.10/net-sched-fix-packet-stuck-problem-for-lockless-qdis.patch
new file mode 100644 (file)
index 0000000..1df5469
--- /dev/null
@@ -0,0 +1,199 @@
+From 8e258cd0031fdf922007f6d7c02e818122db82b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 May 2021 11:16:59 +0800
+Subject: net: sched: fix packet stuck problem for lockless qdisc
+
+From: Yunsheng Lin <linyunsheng@huawei.com>
+
+[ Upstream commit a90c57f2cedd52a511f739fb55e6244e22e1a2fb ]
+
+Lockless qdisc has below concurrent problem:
+    cpu0                 cpu1
+     .                     .
+q->enqueue                 .
+     .                     .
+qdisc_run_begin()          .
+     .                     .
+dequeue_skb()              .
+     .                     .
+sch_direct_xmit()          .
+     .                     .
+     .                q->enqueue
+     .             qdisc_run_begin()
+     .            return and do nothing
+     .                     .
+qdisc_run_end()            .
+
+cpu1 enqueue a skb without calling __qdisc_run() because cpu0
+has not released the lock yet and spin_trylock() return false
+for cpu1 in qdisc_run_begin(), and cpu0 do not see the skb
+enqueued by cpu1 when calling dequeue_skb() because cpu1 may
+enqueue the skb after cpu0 calling dequeue_skb() and before
+cpu0 calling qdisc_run_end().
+
+Lockless qdisc has below another concurrent problem when
+tx_action is involved:
+
+cpu0(serving tx_action)     cpu1             cpu2
+          .                   .                .
+          .              q->enqueue            .
+          .            qdisc_run_begin()       .
+          .              dequeue_skb()         .
+          .                   .            q->enqueue
+          .                   .                .
+          .             sch_direct_xmit()      .
+          .                   .         qdisc_run_begin()
+          .                   .       return and do nothing
+          .                   .                .
+ clear __QDISC_STATE_SCHED    .                .
+ qdisc_run_begin()            .                .
+ return and do nothing        .                .
+          .                   .                .
+          .            qdisc_run_end()         .
+
+This patch fixes the above data race by:
+1. If the first spin_trylock() return false and STATE_MISSED is
+   not set, set STATE_MISSED and retry another spin_trylock() in
+   case other CPU may not see STATE_MISSED after it releases the
+   lock.
+2. reschedule if STATE_MISSED is set after the lock is released
+   at the end of qdisc_run_end().
+
+For tx_action case, STATE_MISSED is also set when cpu1 is at the
+end if qdisc_run_end(), so tx_action will be rescheduled again
+to dequeue the skb enqueued by cpu2.
+
+Clear STATE_MISSED before retrying a dequeuing when dequeuing
+returns NULL in order to reduce the overhead of the second
+spin_trylock() and __netif_schedule() calling.
+
+Also clear the STATE_MISSED before calling __netif_schedule()
+at the end of qdisc_run_end() to avoid doing another round of
+dequeuing in the pfifo_fast_dequeue().
+
+The performance impact of this patch, tested using pktgen and
+dummy netdev with pfifo_fast qdisc attached:
+
+ threads  without+this_patch   with+this_patch      delta
+    1        2.61Mpps            2.60Mpps           -0.3%
+    2        3.97Mpps            3.82Mpps           -3.7%
+    4        5.62Mpps            5.59Mpps           -0.5%
+    8        2.78Mpps            2.77Mpps           -0.3%
+   16        2.22Mpps            2.22Mpps           -0.0%
+
+Fixes: 6b3ba9146fe6 ("net: sched: allow qdiscs to handle locking")
+Acked-by: Jakub Kicinski <kuba@kernel.org>
+Tested-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sch_generic.h | 35 ++++++++++++++++++++++++++++++++++-
+ net/sched/sch_generic.c   | 19 +++++++++++++++++++
+ 2 files changed, 53 insertions(+), 1 deletion(-)
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 3648164faa06..4dd2c9e34976 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -36,6 +36,7 @@ struct qdisc_rate_table {
+ enum qdisc_state_t {
+       __QDISC_STATE_SCHED,
+       __QDISC_STATE_DEACTIVATED,
++      __QDISC_STATE_MISSED,
+ };
+ struct qdisc_size_table {
+@@ -159,8 +160,33 @@ static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
+ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+ {
+       if (qdisc->flags & TCQ_F_NOLOCK) {
++              if (spin_trylock(&qdisc->seqlock))
++                      goto nolock_empty;
++
++              /* If the MISSED flag is set, it means other thread has
++               * set the MISSED flag before second spin_trylock(), so
++               * we can return false here to avoid multi cpus doing
++               * the set_bit() and second spin_trylock() concurrently.
++               */
++              if (test_bit(__QDISC_STATE_MISSED, &qdisc->state))
++                      return false;
++
++              /* Set the MISSED flag before the second spin_trylock(),
++               * if the second spin_trylock() return false, it means
++               * other cpu holding the lock will do dequeuing for us
++               * or it will see the MISSED flag set after releasing
++               * lock and reschedule the net_tx_action() to do the
++               * dequeuing.
++               */
++              set_bit(__QDISC_STATE_MISSED, &qdisc->state);
++
++              /* Retry again in case other CPU may not see the new flag
++               * after it releases the lock at the end of qdisc_run_end().
++               */
+               if (!spin_trylock(&qdisc->seqlock))
+                       return false;
++
++nolock_empty:
+               WRITE_ONCE(qdisc->empty, false);
+       } else if (qdisc_is_running(qdisc)) {
+               return false;
+@@ -176,8 +202,15 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+ static inline void qdisc_run_end(struct Qdisc *qdisc)
+ {
+       write_seqcount_end(&qdisc->running);
+-      if (qdisc->flags & TCQ_F_NOLOCK)
++      if (qdisc->flags & TCQ_F_NOLOCK) {
+               spin_unlock(&qdisc->seqlock);
++
++              if (unlikely(test_bit(__QDISC_STATE_MISSED,
++                                    &qdisc->state))) {
++                      clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
++                      __netif_schedule(qdisc);
++              }
++      }
+ }
+ static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 49eae93d1489..8c6b97cc5e41 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -640,8 +640,10 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
+ {
+       struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+       struct sk_buff *skb = NULL;
++      bool need_retry = true;
+       int band;
++retry:
+       for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
+               struct skb_array *q = band2list(priv, band);
+@@ -652,6 +654,23 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
+       }
+       if (likely(skb)) {
+               qdisc_update_stats_at_dequeue(qdisc, skb);
++      } else if (need_retry &&
++                 test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
++              /* Delay clearing the STATE_MISSED here to reduce
++               * the overhead of the second spin_trylock() in
++               * qdisc_run_begin() and __netif_schedule() calling
++               * in qdisc_run_end().
++               */
++              clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
++
++              /* Make sure dequeuing happens after clearing
++               * STATE_MISSED.
++               */
++              smp_mb__after_atomic();
++
++              need_retry = false;
++
++              goto retry;
+       } else {
+               WRITE_ONCE(qdisc->empty, true);
+       }
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-sched-fix-tx-action-reschedule-issue-with-stoppe.patch b/queue-5.10/net-sched-fix-tx-action-reschedule-issue-with-stoppe.patch
new file mode 100644 (file)
index 0000000..ef9b83f
--- /dev/null
@@ -0,0 +1,120 @@
+From 7990bdd4f778e9371cc795897c00a2de3db54db3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 May 2021 11:17:01 +0800
+Subject: net: sched: fix tx action reschedule issue with stopped queue
+
+From: Yunsheng Lin <linyunsheng@huawei.com>
+
+[ Upstream commit dcad9ee9e0663d74a89b25b987f9c7be86432812 ]
+
+The netdev qeueue might be stopped when byte queue limit has
+reached or tx hw ring is full, net_tx_action() may still be
+rescheduled if STATE_MISSED is set, which consumes unnecessary
+cpu without dequeuing and transmiting any skb because the
+netdev queue is stopped, see qdisc_run_end().
+
+This patch fixes it by checking the netdev queue state before
+calling qdisc_run() and clearing STATE_MISSED if netdev queue is
+stopped during qdisc_run(), the net_tx_action() is rescheduled
+again when netdev qeueue is restarted, see netif_tx_wake_queue().
+
+As there is time window between netif_xmit_frozen_or_stopped()
+checking and STATE_MISSED clearing, between which STATE_MISSED
+may set by net_tx_action() scheduled by netif_tx_wake_queue(),
+so set the STATE_MISSED again if netdev queue is restarted.
+
+Fixes: 6b3ba9146fe6 ("net: sched: allow qdiscs to handle locking")
+Reported-by: Michal Kubecek <mkubecek@suse.cz>
+Acked-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c          |  3 ++-
+ net/sched/sch_generic.c | 27 ++++++++++++++++++++++++++-
+ 2 files changed, 28 insertions(+), 2 deletions(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 76a932c52255..0c9ce36afc8c 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3764,7 +3764,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
+       if (q->flags & TCQ_F_NOLOCK) {
+               rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+-              qdisc_run(q);
++              if (likely(!netif_xmit_frozen_or_stopped(txq)))
++                      qdisc_run(q);
+               if (unlikely(to_free))
+                       kfree_skb_list(to_free);
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index e6844d3567ca..854d2b38db85 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -35,6 +35,25 @@
+ const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
+ EXPORT_SYMBOL(default_qdisc_ops);
++static void qdisc_maybe_clear_missed(struct Qdisc *q,
++                                   const struct netdev_queue *txq)
++{
++      clear_bit(__QDISC_STATE_MISSED, &q->state);
++
++      /* Make sure the below netif_xmit_frozen_or_stopped()
++       * checking happens after clearing STATE_MISSED.
++       */
++      smp_mb__after_atomic();
++
++      /* Checking netif_xmit_frozen_or_stopped() again to
++       * make sure STATE_MISSED is set if the STATE_MISSED
++       * set by netif_tx_wake_queue()'s rescheduling of
++       * net_tx_action() is cleared by the above clear_bit().
++       */
++      if (!netif_xmit_frozen_or_stopped(txq))
++              set_bit(__QDISC_STATE_MISSED, &q->state);
++}
++
+ /* Main transmission queue. */
+ /* Modifications to data participating in scheduling must be protected with
+@@ -74,6 +93,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
+                       }
+               } else {
+                       skb = SKB_XOFF_MAGIC;
++                      qdisc_maybe_clear_missed(q, txq);
+               }
+       }
+@@ -242,6 +262,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
+                       }
+               } else {
+                       skb = NULL;
++                      qdisc_maybe_clear_missed(q, txq);
+               }
+               if (lock)
+                       spin_unlock(lock);
+@@ -251,8 +272,10 @@ validate:
+       *validate = true;
+       if ((q->flags & TCQ_F_ONETXQUEUE) &&
+-          netif_xmit_frozen_or_stopped(txq))
++          netif_xmit_frozen_or_stopped(txq)) {
++              qdisc_maybe_clear_missed(q, txq);
+               return skb;
++      }
+       skb = qdisc_dequeue_skb_bad_txq(q);
+       if (unlikely(skb)) {
+@@ -311,6 +334,8 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+               HARD_TX_LOCK(dev, txq, smp_processor_id());
+               if (!netif_xmit_frozen_or_stopped(txq))
+                       skb = dev_hard_start_xmit(skb, dev, txq, &ret);
++              else
++                      qdisc_maybe_clear_missed(q, txq);
+               HARD_TX_UNLOCK(dev, txq);
+       } else {
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-sched-fix-tx-action-rescheduling-issue-during-de.patch b/queue-5.10/net-sched-fix-tx-action-rescheduling-issue-during-de.patch
new file mode 100644 (file)
index 0000000..c48e4a1
--- /dev/null
@@ -0,0 +1,173 @@
+From b2e589b5b18f93cf4150234b4ebaf8d2d6e9eb3e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 May 2021 11:17:00 +0800
+Subject: net: sched: fix tx action rescheduling issue during deactivation
+
+From: Yunsheng Lin <linyunsheng@huawei.com>
+
+[ Upstream commit 102b55ee92f9fda4dde7a45d2b20538e6e3e3d1e ]
+
+Currently qdisc_run() checks the STATE_DEACTIVATED of lockless
+qdisc before calling __qdisc_run(), which ultimately clear the
+STATE_MISSED when all the skb is dequeued. If STATE_DEACTIVATED
+is set before clearing STATE_MISSED, there may be rescheduling
+of net_tx_action() at the end of qdisc_run_end(), see below:
+
+CPU0(net_tx_atcion)  CPU1(__dev_xmit_skb)  CPU2(dev_deactivate)
+          .                   .                     .
+          .            set STATE_MISSED             .
+          .           __netif_schedule()            .
+          .                   .           set STATE_DEACTIVATED
+          .                   .                qdisc_reset()
+          .                   .                     .
+          .<---------------   .              synchronize_net()
+clear __QDISC_STATE_SCHED  |  .                     .
+          .                |  .                     .
+          .                |  .            some_qdisc_is_busy()
+          .                |  .               return *false*
+          .                |  .                     .
+  test STATE_DEACTIVATED   |  .                     .
+__qdisc_run() *not* called |  .                     .
+          .                |  .                     .
+   test STATE_MISS         |  .                     .
+ __netif_schedule()--------|  .                     .
+          .                   .                     .
+          .                   .                     .
+
+__qdisc_run() is not called by net_tx_atcion() in CPU0 because
+CPU2 has set STATE_DEACTIVATED flag during dev_deactivate(), and
+STATE_MISSED is only cleared in __qdisc_run(), __netif_schedule
+is called at the end of qdisc_run_end(), causing tx action
+rescheduling problem.
+
+qdisc_run() called by net_tx_action() runs in the softirq context,
+which should has the same semantic as the qdisc_run() called by
+__dev_xmit_skb() protected by rcu_read_lock_bh(). And there is a
+synchronize_net() between STATE_DEACTIVATED flag being set and
+qdisc_reset()/some_qdisc_is_busy in dev_deactivate(), we can safely
+bail out for the deactived lockless qdisc in net_tx_action(), and
+qdisc_reset() will reset all skb not dequeued yet.
+
+So add the rcu_read_lock() explicitly to protect the qdisc_run()
+and do the STATE_DEACTIVATED checking in net_tx_action() before
+calling qdisc_run_begin(). Another option is to do the checking in
+the qdisc_run_end(), but it will add unnecessary overhead for
+non-tx_action case, because __dev_queue_xmit() will not see qdisc
+with STATE_DEACTIVATED after synchronize_net(), the qdisc with
+STATE_DEACTIVATED can only be seen by net_tx_action() because of
+__netif_schedule().
+
+The STATE_DEACTIVATED checking in qdisc_run() is to avoid race
+between net_tx_action() and qdisc_reset(), see:
+commit d518d2ed8640 ("net/sched: fix race between deactivation
+and dequeue for NOLOCK qdisc"). As the bailout added above for
+deactived lockless qdisc in net_tx_action() provides better
+protection for the race without calling qdisc_run() at all, so
+remove the STATE_DEACTIVATED checking in qdisc_run().
+
+After qdisc_reset(), there is no skb in qdisc to be dequeued, so
+clear the STATE_MISSED in dev_reset_queue() too.
+
+Fixes: 6b3ba9146fe6 ("net: sched: allow qdiscs to handle locking")
+Acked-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+V8: Clearing STATE_MISSED before calling __netif_schedule() has
+    avoid the endless rescheduling problem, but there may still
+    be a unnecessary rescheduling, so adjust the commit log.
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/pkt_sched.h |  7 +------
+ net/core/dev.c          | 26 ++++++++++++++++++++++----
+ net/sched/sch_generic.c |  4 +++-
+ 3 files changed, 26 insertions(+), 11 deletions(-)
+
+diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
+index 4ed32e6b0201..2be90a54a404 100644
+--- a/include/net/pkt_sched.h
++++ b/include/net/pkt_sched.h
+@@ -123,12 +123,7 @@ void __qdisc_run(struct Qdisc *q);
+ static inline void qdisc_run(struct Qdisc *q)
+ {
+       if (qdisc_run_begin(q)) {
+-              /* NOLOCK qdisc must check 'state' under the qdisc seqlock
+-               * to avoid racing with dev_qdisc_reset()
+-               */
+-              if (!(q->flags & TCQ_F_NOLOCK) ||
+-                  likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+-                      __qdisc_run(q);
++              __qdisc_run(q);
+               qdisc_run_end(q);
+       }
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 2f17a4ac82f0..76a932c52255 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4910,25 +4910,43 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
+               sd->output_queue_tailp = &sd->output_queue;
+               local_irq_enable();
++              rcu_read_lock();
++
+               while (head) {
+                       struct Qdisc *q = head;
+                       spinlock_t *root_lock = NULL;
+                       head = head->next_sched;
+-                      if (!(q->flags & TCQ_F_NOLOCK)) {
+-                              root_lock = qdisc_lock(q);
+-                              spin_lock(root_lock);
+-                      }
+                       /* We need to make sure head->next_sched is read
+                        * before clearing __QDISC_STATE_SCHED
+                        */
+                       smp_mb__before_atomic();
++
++                      if (!(q->flags & TCQ_F_NOLOCK)) {
++                              root_lock = qdisc_lock(q);
++                              spin_lock(root_lock);
++                      } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
++                                                   &q->state))) {
++                              /* There is a synchronize_net() between
++                               * STATE_DEACTIVATED flag being set and
++                               * qdisc_reset()/some_qdisc_is_busy() in
++                               * dev_deactivate(), so we can safely bail out
++                               * early here to avoid data race between
++                               * qdisc_deactivate() and some_qdisc_is_busy()
++                               * for lockless qdisc.
++                               */
++                              clear_bit(__QDISC_STATE_SCHED, &q->state);
++                              continue;
++                      }
++
+                       clear_bit(__QDISC_STATE_SCHED, &q->state);
+                       qdisc_run(q);
+                       if (root_lock)
+                               spin_unlock(root_lock);
+               }
++
++              rcu_read_unlock();
+       }
+       xfrm_dev_backlog(sd);
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 8c6b97cc5e41..e6844d3567ca 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -1177,8 +1177,10 @@ static void dev_reset_queue(struct net_device *dev,
+       qdisc_reset(qdisc);
+       spin_unlock_bh(qdisc_lock(qdisc));
+-      if (nolock)
++      if (nolock) {
++              clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+               spin_unlock_bh(&qdisc->seqlock);
++      }
+ }
+ static bool some_qdisc_is_busy(struct net_device *dev)
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-smc-remove-device-from-smcd_dev_list-after-faile.patch b/queue-5.10/net-smc-remove-device-from-smcd_dev_list-after-faile.patch
new file mode 100644 (file)
index 0000000..e354881
--- /dev/null
@@ -0,0 +1,56 @@
+From 4f43bc787d06b56efa38f58e8f263ecd52b3f9d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 May 2021 10:47:06 +0200
+Subject: net/smc: remove device from smcd_dev_list after failed device_add()
+
+From: Julian Wiedmann <jwi@linux.ibm.com>
+
+[ Upstream commit 444d7be9532dcfda8e0385226c862fd7e986f607 ]
+
+If the device_add() for a smcd_dev fails, there's no cleanup step that
+rolls back the earlier list_add(). The device subsequently gets freed,
+and we end up with a corrupted list.
+
+Add some error handling that removes the device from the list.
+
+Fixes: c6ba7c9ba43d ("net/smc: add base infrastructure for SMC-D and ISM")
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_ism.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
+index 024ca21392f7..8e33c0128d73 100644
+--- a/net/smc/smc_ism.c
++++ b/net/smc/smc_ism.c
+@@ -331,6 +331,8 @@ EXPORT_SYMBOL_GPL(smcd_alloc_dev);
+ int smcd_register_dev(struct smcd_dev *smcd)
+ {
++      int rc;
++
+       mutex_lock(&smcd_dev_list.mutex);
+       if (list_empty(&smcd_dev_list.list)) {
+               u8 *system_eid = NULL;
+@@ -350,7 +352,14 @@ int smcd_register_dev(struct smcd_dev *smcd)
+                           dev_name(&smcd->dev), smcd->pnetid,
+                           smcd->pnetid_by_user ? " (user defined)" : "");
+-      return device_add(&smcd->dev);
++      rc = device_add(&smcd->dev);
++      if (rc) {
++              mutex_lock(&smcd_dev_list.mutex);
++              list_del(&smcd->list);
++              mutex_unlock(&smcd_dev_list.mutex);
++      }
++
++      return rc;
+ }
+ EXPORT_SYMBOL_GPL(smcd_register_dev);
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-stmmac-fix-mac-wol-not-working-if-phy-does-not-s.patch b/queue-5.10/net-stmmac-fix-mac-wol-not-working-if-phy-does-not-s.patch
new file mode 100644 (file)
index 0000000..df77ee5
--- /dev/null
@@ -0,0 +1,58 @@
+From 51375c424f510156159a3ae0ec270883b8876d2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 May 2021 14:55:09 +0800
+Subject: net: stmmac: Fix MAC WoL not working if PHY does not support WoL
+
+From: Joakim Zhang <qiangqing.zhang@nxp.com>
+
+[ Upstream commit 576f9eacc680d2b1f37e8010cff62f7b227ea769 ]
+
+Both get and set WoL will check device_can_wakeup(), if MAC supports PMT, it
+will set device wakeup capability. After commit 1d8e5b0f3f2c ("net: stmmac:
+Support WOL with phy"), device wakeup capability will be overwrite in
+stmmac_init_phy() according to phy's Wol feature. If phy doesn't support WoL,
+then MAC will lose wakeup capability. To fix this issue, only overwrite device
+wakeup capability when MAC doesn't support PMT.
+
+For STMMAC now driver checks MAC's WoL capability if MAC supports PMT, if
+not support, driver will check PHY's WoL capability.
+
+Fixes: 1d8e5b0f3f2c ("net: stmmac: Support WOL with phy")
+Reviewed-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 4374ce4671ad..3134f7e669f8 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1052,7 +1052,6 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
+  */
+ static int stmmac_init_phy(struct net_device *dev)
+ {
+-      struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+       struct stmmac_priv *priv = netdev_priv(dev);
+       struct device_node *node;
+       int ret;
+@@ -1078,8 +1077,12 @@ static int stmmac_init_phy(struct net_device *dev)
+               ret = phylink_connect_phy(priv->phylink, phydev);
+       }
+-      phylink_ethtool_get_wol(priv->phylink, &wol);
+-      device_set_wakeup_capable(priv->device, !!wol.supported);
++      if (!priv->plat->pmt) {
++              struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
++
++              phylink_ethtool_get_wol(priv->phylink, &wol);
++              device_set_wakeup_capable(priv->device, !!wol.supported);
++      }
+       return ret;
+ }
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-zero-initialize-tc-skb-extension-on-allocation.patch b/queue-5.10/net-zero-initialize-tc-skb-extension-on-allocation.patch
new file mode 100644 (file)
index 0000000..d9afa21
--- /dev/null
@@ -0,0 +1,175 @@
+From 37c5531e2e1c16c6c462d56da7c12e1bc6ca7635 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 May 2021 16:21:52 +0300
+Subject: net: zero-initialize tc skb extension on allocation
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+[ Upstream commit 9453d45ecb6c2199d72e73c993e9d98677a2801b ]
+
+Function skb_ext_add() doesn't initialize created skb extension with any
+value and leaves it up to the user. However, since extension of type
+TC_SKB_EXT originally contained only single value tc_skb_ext->chain its
+users used to just assign the chain value without setting whole extension
+memory to zero first. This assumption changed when TC_SKB_EXT extension was
+extended with additional fields but not all users were updated to
+initialize the new fields which leads to use of uninitialized memory
+afterwards. UBSAN log:
+
+[  778.299821] UBSAN: invalid-load in net/openvswitch/flow.c:899:28
+[  778.301495] load of value 107 is not a valid value for type '_Bool'
+[  778.303215] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.12.0-rc7+ #2
+[  778.304933] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+[  778.307901] Call Trace:
+[  778.308680]  <IRQ>
+[  778.309358]  dump_stack+0xbb/0x107
+[  778.310307]  ubsan_epilogue+0x5/0x40
+[  778.311167]  __ubsan_handle_load_invalid_value.cold+0x43/0x48
+[  778.312454]  ? memset+0x20/0x40
+[  778.313230]  ovs_flow_key_extract.cold+0xf/0x14 [openvswitch]
+[  778.314532]  ovs_vport_receive+0x19e/0x2e0 [openvswitch]
+[  778.315749]  ? ovs_vport_find_upcall_portid+0x330/0x330 [openvswitch]
+[  778.317188]  ? create_prof_cpu_mask+0x20/0x20
+[  778.318220]  ? arch_stack_walk+0x82/0xf0
+[  778.319153]  ? secondary_startup_64_no_verify+0xb0/0xbb
+[  778.320399]  ? stack_trace_save+0x91/0xc0
+[  778.321362]  ? stack_trace_consume_entry+0x160/0x160
+[  778.322517]  ? lock_release+0x52e/0x760
+[  778.323444]  netdev_frame_hook+0x323/0x610 [openvswitch]
+[  778.324668]  ? ovs_netdev_get_vport+0xe0/0xe0 [openvswitch]
+[  778.325950]  __netif_receive_skb_core+0x771/0x2db0
+[  778.327067]  ? lock_downgrade+0x6e0/0x6f0
+[  778.328021]  ? lock_acquire+0x565/0x720
+[  778.328940]  ? generic_xdp_tx+0x4f0/0x4f0
+[  778.329902]  ? inet_gro_receive+0x2a7/0x10a0
+[  778.330914]  ? lock_downgrade+0x6f0/0x6f0
+[  778.331867]  ? udp4_gro_receive+0x4c4/0x13e0
+[  778.332876]  ? lock_release+0x52e/0x760
+[  778.333808]  ? dev_gro_receive+0xcc8/0x2380
+[  778.334810]  ? lock_downgrade+0x6f0/0x6f0
+[  778.335769]  __netif_receive_skb_list_core+0x295/0x820
+[  778.336955]  ? process_backlog+0x780/0x780
+[  778.337941]  ? mlx5e_rep_tc_netdevice_event_unregister+0x20/0x20 [mlx5_core]
+[  778.339613]  ? seqcount_lockdep_reader_access.constprop.0+0xa7/0xc0
+[  778.341033]  ? kvm_clock_get_cycles+0x14/0x20
+[  778.342072]  netif_receive_skb_list_internal+0x5f5/0xcb0
+[  778.343288]  ? __kasan_kmalloc+0x7a/0x90
+[  778.344234]  ? mlx5e_handle_rx_cqe_mpwrq+0x9e0/0x9e0 [mlx5_core]
+[  778.345676]  ? mlx5e_xmit_xdp_frame_mpwqe+0x14d0/0x14d0 [mlx5_core]
+[  778.347140]  ? __netif_receive_skb_list_core+0x820/0x820
+[  778.348351]  ? mlx5e_post_rx_mpwqes+0xa6/0x25d0 [mlx5_core]
+[  778.349688]  ? napi_gro_flush+0x26c/0x3c0
+[  778.350641]  napi_complete_done+0x188/0x6b0
+[  778.351627]  mlx5e_napi_poll+0x373/0x1b80 [mlx5_core]
+[  778.352853]  __napi_poll+0x9f/0x510
+[  778.353704]  ? mlx5_flow_namespace_set_mode+0x260/0x260 [mlx5_core]
+[  778.355158]  net_rx_action+0x34c/0xa40
+[  778.356060]  ? napi_threaded_poll+0x3d0/0x3d0
+[  778.357083]  ? sched_clock_cpu+0x18/0x190
+[  778.358041]  ? __common_interrupt+0x8e/0x1a0
+[  778.359045]  __do_softirq+0x1ce/0x984
+[  778.359938]  __irq_exit_rcu+0x137/0x1d0
+[  778.360865]  irq_exit_rcu+0xa/0x20
+[  778.361708]  common_interrupt+0x80/0xa0
+[  778.362640]  </IRQ>
+[  778.363212]  asm_common_interrupt+0x1e/0x40
+[  778.364204] RIP: 0010:native_safe_halt+0xe/0x10
+[  778.365273] Code: 4f ff ff ff 4c 89 e7 e8 50 3f 40 fe e9 dc fe ff ff 48 89 df e8 43 3f 40 fe eb 90 cc e9 07 00 00 00 0f 00 2d 74 05 62 00 fb f4 <c3> 90 e9 07 00 00 00 0f 00 2d 64 05 62 00 f4 c3 cc cc 0f 1f 44 00
+[  778.369355] RSP: 0018:ffffffff84407e48 EFLAGS: 00000246
+[  778.370570] RAX: ffff88842de46a80 RBX: ffffffff84425840 RCX: ffffffff83418468
+[  778.372143] RDX: 000000000026f1da RSI: 0000000000000004 RDI: ffffffff8343af5e
+[  778.373722] RBP: fffffbfff0884b08 R08: 0000000000000000 R09: ffff88842de46bcb
+[  778.375292] R10: ffffed1085bc8d79 R11: 0000000000000001 R12: 0000000000000000
+[  778.376860] R13: ffffffff851124a0 R14: 0000000000000000 R15: dffffc0000000000
+[  778.378491]  ? rcu_eqs_enter.constprop.0+0xb8/0xe0
+[  778.379606]  ? default_idle_call+0x5e/0xe0
+[  778.380578]  default_idle+0xa/0x10
+[  778.381406]  default_idle_call+0x96/0xe0
+[  778.382350]  do_idle+0x3d4/0x550
+[  778.383153]  ? arch_cpu_idle_exit+0x40/0x40
+[  778.384143]  cpu_startup_entry+0x19/0x20
+[  778.385078]  start_kernel+0x3c7/0x3e5
+[  778.385978]  secondary_startup_64_no_verify+0xb0/0xbb
+
+Fix the issue by providing new function tc_skb_ext_alloc() that allocates
+tc skb extension and initializes its memory to 0 before returning it to the
+caller. Change all existing users to use new API instead of calling
+skb_ext_add() directly.
+
+Fixes: 038ebb1a713d ("net/sched: act_ct: fix miss set mru for ovs after defrag in act_ct")
+Fixes: d29334c15d33 ("net/sched: act_api: fix miss set post_ct for ovs after do conntrack in act_ct")
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Acked-by: Cong Wang <cong.wang@bytedance.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c |  2 +-
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c     |  2 +-
+ include/net/pkt_cls.h                               | 11 +++++++++++
+ net/sched/cls_api.c                                 |  2 +-
+ 4 files changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+index 76177f7c5ec2..e6f782743fbe 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+@@ -643,7 +643,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
+       }
+       if (chain) {
+-              tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
++              tc_skb_ext = tc_skb_ext_alloc(skb);
+               if (!tc_skb_ext) {
+                       WARN_ON(1);
+                       return false;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index e60b8bc9b804..1bdeb948f56d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -5494,7 +5494,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
+       }
+       if (chain) {
+-              tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
++              tc_skb_ext = tc_skb_ext_alloc(skb);
+               if (WARN_ON(!tc_skb_ext))
+                       return false;
+diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
+index d4d461236351..b608be532964 100644
+--- a/include/net/pkt_cls.h
++++ b/include/net/pkt_cls.h
+@@ -709,6 +709,17 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
+               cls_common->extack = extack;
+ }
++#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
++static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
++{
++      struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
++
++      if (tc_skb_ext)
++              memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
++      return tc_skb_ext;
++}
++#endif
++
+ enum tc_matchall_command {
+       TC_CLSMATCHALL_REPLACE,
+       TC_CLSMATCHALL_DESTROY,
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 9383dc29ead5..a281da07bb1d 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -1625,7 +1625,7 @@ int tcf_classify_ingress(struct sk_buff *skb,
+       /* If we missed on some chain */
+       if (ret == TC_ACT_UNSPEC && last_executed_chain) {
+-              ext = skb_ext_add(skb, TC_SKB_EXT);
++              ext = tc_skb_ext_alloc(skb);
+               if (WARN_ON_ONCE(!ext))
+                       return TC_ACT_SHOT;
+               ext->chain = last_executed_chain;
+-- 
+2.30.2
+
diff --git a/queue-5.10/openvswitch-meter-fix-race-when-getting-now_ms.patch b/queue-5.10/openvswitch-meter-fix-race-when-getting-now_ms.patch
new file mode 100644 (file)
index 0000000..c88c3ab
--- /dev/null
@@ -0,0 +1,57 @@
+From b44a8ed4e15599c51743c73486880a81ff893af9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 May 2021 21:08:00 +0800
+Subject: openvswitch: meter: fix race when getting now_ms.
+
+From: Tao Liu <thomas.liu@ucloud.cn>
+
+[ Upstream commit e4df1b0c24350a0f00229ff895a91f1072bd850d ]
+
+We have observed meters working unexpected if traffic is 3+Gbit/s
+with multiple connections.
+
+now_ms is not pretected by meter->lock, we may get a negative
+long_delta_ms when another cpu updated meter->used, then:
+    delta_ms = (u32)long_delta_ms;
+which will be a large value.
+
+    band->bucket += delta_ms * band->rate;
+then we get a wrong band->bucket.
+
+OpenVswitch userspace datapath has fixed the same issue[1] some
+time ago, and we port the implementation to kernel datapath.
+
+[1] https://patchwork.ozlabs.org/project/openvswitch/patch/20191025114436.9746-1-i.maximets@ovn.org/
+
+Fixes: 96fbc13d7e77 ("openvswitch: Add meter infrastructure")
+Signed-off-by: Tao Liu <thomas.liu@ucloud.cn>
+Suggested-by: Ilya Maximets <i.maximets@ovn.org>
+Reviewed-by: Ilya Maximets <i.maximets@ovn.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/openvswitch/meter.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
+index 8fbefd52af7f..e594b4d6b58a 100644
+--- a/net/openvswitch/meter.c
++++ b/net/openvswitch/meter.c
+@@ -611,6 +611,14 @@ bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
+       spin_lock(&meter->lock);
+       long_delta_ms = (now_ms - meter->used); /* ms */
++      if (long_delta_ms < 0) {
++              /* This condition means that we have several threads fighting
++               * for a meter lock, and the one who received the packets a
++               * bit later wins. Assuming that all racing threads received
++               * packets at the same time to avoid overflow.
++               */
++              long_delta_ms = 0;
++      }
+       /* Make sure delta_ms will not be too large, so that bucket will not
+        * wrap around below.
+-- 
+2.30.2
+
diff --git a/queue-5.10/sch_dsmark-fix-a-null-deref-in-qdisc_reset.patch b/queue-5.10/sch_dsmark-fix-a-null-deref-in-qdisc_reset.patch
new file mode 100644 (file)
index 0000000..ecbdbd2
--- /dev/null
@@ -0,0 +1,76 @@
+From c2e010f477ad22e402a8b8f19d08b6af5c8b15fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 23 May 2021 14:38:53 +0000
+Subject: sch_dsmark: fix a NULL deref in qdisc_reset()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+[ Upstream commit 9b76eade16423ef06829cccfe3e100cfce31afcd ]
+
+If Qdisc_ops->init() is failed, Qdisc_ops->reset() would be called.
+When dsmark_init(Qdisc_ops->init()) is failed, it possibly doesn't
+initialize dsmark_qdisc_data->q. But dsmark_reset(Qdisc_ops->reset())
+uses dsmark_qdisc_data->q pointer wihtout any null checking.
+So, panic would occur.
+
+Test commands:
+    sysctl net.core.default_qdisc=dsmark -w
+    ip link add dummy0 type dummy
+    ip link add vw0 link dummy0 type virt_wifi
+    ip link set vw0 up
+
+Splat looks like:
+KASAN: null-ptr-deref in range [0x0000000000000018-0x000000000000001f]
+CPU: 3 PID: 684 Comm: ip Not tainted 5.12.0+ #910
+RIP: 0010:qdisc_reset+0x2b/0x680
+Code: 1f 44 00 00 48 b8 00 00 00 00 00 fc ff df 41 57 41 56 41 55 41 54
+55 48 89 fd 48 83 c7 18 53 48 89 fa 48 c1 ea 03 48 83 ec 20 <80> 3c 02
+00 0f 85 09 06 00 00 4c 8b 65 18 0f 1f 44 00 00 65 8b 1d
+RSP: 0018:ffff88800fda6bf8 EFLAGS: 00010282
+RAX: dffffc0000000000 RBX: ffff8880050ed800 RCX: 0000000000000000
+RDX: 0000000000000003 RSI: ffffffff99e34100 RDI: 0000000000000018
+RBP: 0000000000000000 R08: fffffbfff346b553 R09: fffffbfff346b553
+R10: 0000000000000001 R11: fffffbfff346b552 R12: ffffffffc0824940
+R13: ffff888109e83800 R14: 00000000ffffffff R15: ffffffffc08249e0
+FS:  00007f5042287680(0000) GS:ffff888119800000(0000)
+knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000055ae1f4dbd90 CR3: 0000000006760002 CR4: 00000000003706e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ ? rcu_read_lock_bh_held+0xa0/0xa0
+ dsmark_reset+0x3d/0xf0 [sch_dsmark]
+ qdisc_reset+0xa9/0x680
+ qdisc_destroy+0x84/0x370
+ qdisc_create_dflt+0x1fe/0x380
+ attach_one_default_qdisc.constprop.41+0xa4/0x180
+ dev_activate+0x4d5/0x8c0
+ ? __dev_open+0x268/0x390
+ __dev_open+0x270/0x390
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_dsmark.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
+index 2b88710994d7..76ed1a05ded2 100644
+--- a/net/sched/sch_dsmark.c
++++ b/net/sched/sch_dsmark.c
+@@ -406,7 +406,8 @@ static void dsmark_reset(struct Qdisc *sch)
+       struct dsmark_qdisc_data *p = qdisc_priv(sch);
+       pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
+-      qdisc_reset(p->q);
++      if (p->q)
++              qdisc_reset(p->q);
+       sch->qstats.backlog = 0;
+       sch->q.qlen = 0;
+ }
+-- 
+2.30.2
+
diff --git a/queue-5.10/scsi-libsas-use-_safe-loop-in-sas_resume_port.patch b/queue-5.10/scsi-libsas-use-_safe-loop-in-sas_resume_port.patch
new file mode 100644 (file)
index 0000000..917acee
--- /dev/null
@@ -0,0 +1,51 @@
+From dca55eef1c64d9b89600de33038e5695aee760e3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 May 2021 17:20:27 +0300
+Subject: scsi: libsas: Use _safe() loop in sas_resume_port()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 8c7e7b8486cda21269d393245883c5e4737d5ee7 ]
+
+If sas_notify_lldd_dev_found() fails then this code calls:
+
+       sas_unregister_dev(port, dev);
+
+which removes "dev", our list iterator, from the list.  This could lead to
+an endless loop.  We need to use list_for_each_entry_safe().
+
+Link: https://lore.kernel.org/r/YKUeq6gwfGcvvhty@mwanda
+Fixes: 303694eeee5e ("[SCSI] libsas: suspend / resume support")
+Reviewed-by: John Garry <john.garry@huawei.com>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/libsas/sas_port.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
+index 19cf418928fa..e3d03d744713 100644
+--- a/drivers/scsi/libsas/sas_port.c
++++ b/drivers/scsi/libsas/sas_port.c
+@@ -25,7 +25,7 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy
+ static void sas_resume_port(struct asd_sas_phy *phy)
+ {
+-      struct domain_device *dev;
++      struct domain_device *dev, *n;
+       struct asd_sas_port *port = phy->port;
+       struct sas_ha_struct *sas_ha = phy->ha;
+       struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
+@@ -44,7 +44,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
+        * 1/ presume every device came back
+        * 2/ force the next revalidation to check all expander phys
+        */
+-      list_for_each_entry(dev, &port->dev_list, dev_list_node) {
++      list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
+               int i, rc;
+               rc = sas_notify_lldd_dev_found(dev);
+-- 
+2.30.2
+
index 3e3e0a1c79df1c807d3647f7b7c5b9d36316c005..95153105ac12fcb2ce6d2273f1ef3639aac9f66e 100644 (file)
@@ -186,3 +186,57 @@ drm-amdgpu-stop-touching-sched.ready-in-the-backend.patch
 platform-x86-touchscreen_dmi-add-info-for-the-chuwi-.patch
 block-fix-a-race-between-del_gendisk-and-blkrrpart.patch
 linux-bits.h-fix-compilation-error-with-genmask.patch
+net-netcp-fix-an-error-message.patch
+net-dsa-fix-error-code-getting-shifted-with-4-in-dsa.patch
+interconnect-qcom-bcm-voter-add-a-missing-of_node_pu.patch
+interconnect-qcom-add-missing-module_device_table.patch
+asoc-cs42l42-regmap-must-use_single_read-write.patch
+net-stmmac-fix-mac-wol-not-working-if-phy-does-not-s.patch
+net-ipa-memory-region-array-is-variable-size.patch
+vfio-ccw-check-initialized-flag-in-cp_init.patch
+spi-assume-gpio-cs-active-high-in-acpi-case.patch
+net-really-orphan-skbs-tied-to-closing-sk.patch
+net-packetmmap-fix-only-tx-timestamp-on-request.patch
+net-fec-fix-the-potential-memory-leak-in-fec_enet_in.patch
+chelsio-chtls-unlock-on-error-in-chtls_pt_recvmsg.patch
+net-mdio-thunder-fix-a-double-free-issue-in-the-.rem.patch
+net-mdio-octeon-fix-some-double-free-issues.patch
+cxgb4-ch_ktls-clear-resources-when-pf4-device-is-rem.patch
+openvswitch-meter-fix-race-when-getting-now_ms.patch
+tls-splice-check-splice_f_nonblock-instead-of-msg_do.patch
+net-sched-fix-packet-stuck-problem-for-lockless-qdis.patch
+net-sched-fix-tx-action-rescheduling-issue-during-de.patch
+net-sched-fix-tx-action-reschedule-issue-with-stoppe.patch
+net-hso-check-for-allocation-failure-in-hso_create_b.patch
+net-bnx2-fix-error-return-code-in-bnx2_init_board.patch
+bnxt_en-include-new-p5-hv-definition-in-vf-check.patch
+bnxt_en-fix-context-memory-setup-for-64k-page-size.patch
+mld-fix-panic-in-mld_newpack.patch
+net-smc-remove-device-from-smcd_dev_list-after-faile.patch
+gve-check-tx-qpl-was-actually-assigned.patch
+gve-update-mgmt_msix_idx-if-num_ntfy-changes.patch
+gve-add-null-pointer-checks-when-freeing-irqs.patch
+gve-upgrade-memory-barrier-in-poll-routine.patch
+gve-correct-skb-queue-index-validation.patch
+iommu-virtio-add-missing-module_device_table.patch
+net-hns3-fix-incorrect-resp_msg-issue.patch
+net-hns3-put-off-calling-register_netdev-until-clien.patch
+iommu-vt-d-use-user-privilege-for-rid2pasid-translat.patch
+cxgb4-avoid-accessing-registers-when-clearing-filter.patch
+staging-emxx_udc-fix-loop-in-_nbu2ss_nuke.patch
+asoc-cs35l33-fix-an-error-code-in-probe.patch
+bpf-offload-reorder-offload-callback-prepare-in-veri.patch
+bpf-set-mac_len-in-bpf_skb_change_head.patch
+ixgbe-fix-large-mtu-request-from-vf.patch
+asoc-qcom-lpass-cpu-use-optional-clk-apis.patch
+scsi-libsas-use-_safe-loop-in-sas_resume_port.patch
+net-lantiq-fix-memory-corruption-in-rx-ring.patch
+ipv6-record-frag_max_size-in-atomic-fragments-in-inp.patch
+alsa-usb-audio-scarlett2-snd_scarlett_gen2_controls_.patch
+net-ethernet-mtk_eth_soc-fix-packet-statistics-suppo.patch
+sch_dsmark-fix-a-null-deref-in-qdisc_reset.patch
+net-hsr-fix-mac_len-checks.patch
+mips-alchemy-xxs1500-add-gpio-au1000.h-header-file.patch
+mips-ralink-export-rt_sysc_membase-for-rt2880_wdt.c.patch
+net-zero-initialize-tc-skb-extension-on-allocation.patch
+net-mvpp2-add-buffer-header-handling-in-rx.patch
diff --git a/queue-5.10/spi-assume-gpio-cs-active-high-in-acpi-case.patch b/queue-5.10/spi-assume-gpio-cs-active-high-in-acpi-case.patch
new file mode 100644 (file)
index 0000000..3f5c48a
--- /dev/null
@@ -0,0 +1,69 @@
+From 38d6445bc0b7f3b41f475b182f454a10cba40729 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 May 2021 17:09:12 +0300
+Subject: spi: Assume GPIO CS active high in ACPI case
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 6b69546912a57ff8c31061f98e56383cc0beffd3 ]
+
+Currently GPIO CS handling, when descriptors are in use, doesn't
+take into consideration that in ACPI case the default polarity
+is Active High and can't be altered. Instead we have to use the
+per-chip definition provided by SPISerialBus() resource.
+
+Fixes: 766c6b63aa04 ("spi: fix client driver breakages when using GPIO descriptors")
+Cc: Liguang Zhang <zhangliguang@linux.alibaba.com>
+Cc: Jay Fang <f.fangjian@huawei.com>
+Cc: Sven Van Asbroeck <thesven73@gmail.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Tested-by: Xin Hao <xhao@linux.alibaba.com>
+Link: https://lore.kernel.org/r/20210511140912.30757-1-andriy.shevchenko@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi.c | 23 ++++++++++++++++++-----
+ 1 file changed, 18 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 419de3d40481..a6f1e94af13c 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -814,16 +814,29 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
+       if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
+               if (!(spi->mode & SPI_NO_CS)) {
+-                      if (spi->cs_gpiod)
+-                              /* polarity handled by gpiolib */
+-                              gpiod_set_value_cansleep(spi->cs_gpiod,
+-                                                       enable1);
+-                      else
++                      if (spi->cs_gpiod) {
++                              /*
++                               * Historically ACPI has no means of the GPIO polarity and
++                               * thus the SPISerialBus() resource defines it on the per-chip
++                               * basis. In order to avoid a chain of negations, the GPIO
++                               * polarity is considered being Active High. Even for the cases
++                               * when _DSD() is involved (in the updated versions of ACPI)
++                               * the GPIO CS polarity must be defined Active High to avoid
++                               * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
++                               * into account.
++                               */
++                              if (has_acpi_companion(&spi->dev))
++                                      gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
++                              else
++                                      /* Polarity handled by GPIO library */
++                                      gpiod_set_value_cansleep(spi->cs_gpiod, enable1);
++                      } else {
+                               /*
+                                * invert the enable line, as active low is
+                                * default for SPI.
+                                */
+                               gpio_set_value_cansleep(spi->cs_gpio, !enable);
++                      }
+               }
+               /* Some SPI masters need both GPIO CS & slave_select */
+               if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
+-- 
+2.30.2
+
diff --git a/queue-5.10/staging-emxx_udc-fix-loop-in-_nbu2ss_nuke.patch b/queue-5.10/staging-emxx_udc-fix-loop-in-_nbu2ss_nuke.patch
new file mode 100644 (file)
index 0000000..df286e7
--- /dev/null
@@ -0,0 +1,49 @@
+From 3756772a93b77f33e0facf0b09b9d51786b5a5f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 May 2021 17:16:50 +0300
+Subject: staging: emxx_udc: fix loop in _nbu2ss_nuke()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit e0112a7c9e847ada15a631b88e279d547e8f26a7 ]
+
+The _nbu2ss_ep_done() function calls:
+
+       list_del_init(&req->queue);
+
+which means that the loop will never exit.
+
+Fixes: ca3d253eb967 ("Staging: emxx_udc: Iterate list using list_for_each_entry")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Link: https://lore.kernel.org/r/YKUd0sDyjm/lkJfJ@mwanda
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/emxx_udc/emxx_udc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
+index a30b4f5b199b..3897f8e8f5e0 100644
+--- a/drivers/staging/emxx_udc/emxx_udc.c
++++ b/drivers/staging/emxx_udc/emxx_udc.c
+@@ -2062,7 +2062,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
+                       struct nbu2ss_ep *ep,
+                       int status)
+ {
+-      struct nbu2ss_req *req;
++      struct nbu2ss_req *req, *n;
+       /* Endpoint Disable */
+       _nbu2ss_epn_exit(udc, ep);
+@@ -2074,7 +2074,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
+               return 0;
+       /* called with irqs blocked */
+-      list_for_each_entry(req, &ep->queue, queue) {
++      list_for_each_entry_safe(req, n, &ep->queue, queue) {
+               _nbu2ss_ep_done(ep, req, status);
+       }
+-- 
+2.30.2
+
diff --git a/queue-5.10/tls-splice-check-splice_f_nonblock-instead-of-msg_do.patch b/queue-5.10/tls-splice-check-splice_f_nonblock-instead-of-msg_do.patch
new file mode 100644 (file)
index 0000000..d08a495
--- /dev/null
@@ -0,0 +1,75 @@
+From a7b4b86b59038a03d970d9488cd77679adc38cde Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 May 2021 11:11:02 +0800
+Subject: tls splice: check SPLICE_F_NONBLOCK instead of MSG_DONTWAIT
+
+From: Jim Ma <majinjing3@gmail.com>
+
+[ Upstream commit 974271e5ed45cfe4daddbeb16224a2156918530e ]
+
+In tls_sw_splice_read, checkout MSG_* is inappropriate, should use
+SPLICE_*, update tls_wait_data to accept nonblock arguments instead
+of flags for recvmsg and splice.
+
+Fixes: c46234ebb4d1 ("tls: RX path for ktls")
+Signed-off-by: Jim Ma <majinjing3@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls_sw.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 845c628ac1b2..3abe5257f757 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -37,6 +37,7 @@
+ #include <linux/sched/signal.h>
+ #include <linux/module.h>
++#include <linux/splice.h>
+ #include <crypto/aead.h>
+ #include <net/strparser.h>
+@@ -1282,7 +1283,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
+ }
+ static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
+-                                   int flags, long timeo, int *err)
++                                   bool nonblock, long timeo, int *err)
+ {
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+@@ -1307,7 +1308,7 @@ static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
+               if (sock_flag(sk, SOCK_DONE))
+                       return NULL;
+-              if ((flags & MSG_DONTWAIT) || !timeo) {
++              if (nonblock || !timeo) {
+                       *err = -EAGAIN;
+                       return NULL;
+               }
+@@ -1787,7 +1788,7 @@ int tls_sw_recvmsg(struct sock *sk,
+               bool async_capable;
+               bool async = false;
+-              skb = tls_wait_data(sk, psock, flags, timeo, &err);
++              skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
+               if (!skb) {
+                       if (psock) {
+                               int ret = __tcp_bpf_recvmsg(sk, psock,
+@@ -1991,9 +1992,9 @@ ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
+       lock_sock(sk);
+-      timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
++      timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
+-      skb = tls_wait_data(sk, NULL, flags, timeo, &err);
++      skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
+       if (!skb)
+               goto splice_read_end;
+-- 
+2.30.2
+
diff --git a/queue-5.10/vfio-ccw-check-initialized-flag-in-cp_init.patch b/queue-5.10/vfio-ccw-check-initialized-flag-in-cp_init.patch
new file mode 100644 (file)
index 0000000..b5c17b1
--- /dev/null
@@ -0,0 +1,48 @@
+From 0081cfdbd67a53dad2e7ea8ec47fb955da802aa2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 May 2021 21:56:29 +0200
+Subject: vfio-ccw: Check initialized flag in cp_init()
+
+From: Eric Farman <farman@linux.ibm.com>
+
+[ Upstream commit c6c82e0cd8125d30f2f1b29205c7e1a2f1a6785b ]
+
+We have a really nice flag in the channel_program struct that
+indicates if it had been initialized by cp_init(), and use it
+as a guard in the other cp accessor routines, but not for a
+duplicate call into cp_init(). The possibility of this occurring
+is low, because that flow is protected by the private->io_mutex
+and FSM CP_PROCESSING state. But then why bother checking it
+in (for example) cp_prefetch() then?
+
+Let's just be consistent and check for that in cp_init() too.
+
+Fixes: 71189f263f8a3 ("vfio-ccw: make it safe to access channel programs")
+Signed-off-by: Eric Farman <farman@linux.ibm.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Acked-by: Matthew Rosato <mjrosato@linux.ibm.com>
+Message-Id: <20210511195631.3995081-2-farman@linux.ibm.com>
+Signed-off-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/s390/cio/vfio_ccw_cp.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
+index b9febc581b1f..8d1b2771c1aa 100644
+--- a/drivers/s390/cio/vfio_ccw_cp.c
++++ b/drivers/s390/cio/vfio_ccw_cp.c
+@@ -638,6 +638,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
+       static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
+       int ret;
++      /* this is an error in the caller */
++      if (cp->initialized)
++              return -EBUSY;
++
+       /*
+        * We only support prefetching the channel program. We assume all channel
+        * programs executed by supported guests likewise support prefetching.
+-- 
+2.30.2
+