--- /dev/null
+From 8994db44bec176ee0158484cec0a21a50ceae3be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Oct 2022 17:30:25 +0800
+Subject: ALSA: ac97: fix possible memory leak in snd_ac97_dev_register()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+[ Upstream commit 4881bda5ea05c8c240fc8afeaa928e2bc43f61fa ]
+
+If device_register() fails in snd_ac97_dev_register(), it should
+call put_device() to give up reference, or the name allocated in
+dev_set_name() is leaked.
+
+Fixes: 0ca06a00e206 ("[ALSA] AC97 bus interface for ad-hoc drivers")
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Link: https://lore.kernel.org/r/20221019093025.1179475-1-yangyingliang@huawei.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/ac97/ac97_codec.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
+index 963731cf0d8c..cd66632bf1c3 100644
+--- a/sound/pci/ac97/ac97_codec.c
++++ b/sound/pci/ac97/ac97_codec.c
+@@ -1946,6 +1946,7 @@ static int snd_ac97_dev_register(struct snd_device *device)
+ snd_ac97_get_short_name(ac97));
+ if ((err = device_register(&ac97->dev)) < 0) {
+ ac97_err(ac97, "Can't register ac97 bus\n");
++ put_device(&ac97->dev);
+ ac97->dev.bus = NULL;
+ return err;
+ }
+--
+2.35.1
+
--- /dev/null
+From 0b11b65e1e9794d2b7944c78c44bb468d061db00 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Oct 2022 08:52:33 +0200
+Subject: ALSA: aoa: Fix I2S device accounting
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit f1fae475f10a26b7e34da4ff2e2f19b7feb3548e ]
+
+i2sbus_add_dev() is supposed to return the number of probed devices,
+i.e. either 1 or 0. However, i2sbus_add_dev() has one error handling
+that returns -ENODEV; this will screw up the accumulation number
+counted in the caller, i2sbus_probe().
+
+Fix the return value to 0 and add the comment for better understanding
+for readers.
+
+Fixes: f3d9478b2ce4 ("[ALSA] snd-aoa: add snd-aoa")
+Link: https://lore.kernel.org/r/20221027065233.13292-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/aoa/soundbus/i2sbus/core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
+index f6841daf9e3b..51ed2f34b276 100644
+--- a/sound/aoa/soundbus/i2sbus/core.c
++++ b/sound/aoa/soundbus/i2sbus/core.c
+@@ -147,6 +147,7 @@ static int i2sbus_get_and_fixup_rsrc(struct device_node *np, int index,
+ return rc;
+ }
+
++/* Returns 1 if added, 0 for otherwise; don't return a negative value! */
+ /* FIXME: look at device node refcounting */
+ static int i2sbus_add_dev(struct macio_dev *macio,
+ struct i2sbus_control *control,
+@@ -213,7 +214,7 @@ static int i2sbus_add_dev(struct macio_dev *macio,
+ * either as the second one in that case is just a modem. */
+ if (!ok) {
+ kfree(dev);
+- return -ENODEV;
++ return 0;
+ }
+
+ mutex_init(&dev->lock);
+--
+2.35.1
+
--- /dev/null
+From 51f607043223fa6bd6df1a2eb9ca7e39e5532032 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Oct 2022 09:34:38 +0800
+Subject: ALSA: aoa: i2sbus: fix possible memory leak in i2sbus_add_dev()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+[ Upstream commit 4a4c8482e370d697738a78dcd7bf2780832cb712 ]
+
+dev_set_name() in soundbus_add_one() allocates memory for name, it need be
+freed when of_device_register() fails, call soundbus_dev_put() to give up
+the reference that hold in device_initialize(), so that it can be freed in
+kobject_cleanup() when the refcount hit to 0. And other resources are also
+freed in i2sbus_release_dev(), so it can return 0 directly.
+
+Fixes: f3d9478b2ce4 ("[ALSA] snd-aoa: add snd-aoa")
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Link: https://lore.kernel.org/r/20221027013438.991920-1-yangyingliang@huawei.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/aoa/soundbus/i2sbus/core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
+index faf6b03131ee..f6841daf9e3b 100644
+--- a/sound/aoa/soundbus/i2sbus/core.c
++++ b/sound/aoa/soundbus/i2sbus/core.c
+@@ -302,6 +302,10 @@ static int i2sbus_add_dev(struct macio_dev *macio,
+
+ if (soundbus_add_one(&dev->sound)) {
+ printk(KERN_DEBUG "i2sbus: device registration error!\n");
++ if (dev->sound.ofdev.dev.kobj.state_initialized) {
++ soundbus_dev_put(&dev->sound);
++ return 0;
++ }
+ goto err;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 492e9e6e7582a8652d2e4e68a82331e7756a5bb6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Oct 2022 12:12:15 +0530
+Subject: amd-xgbe: add the bit rate quirk for Molex cables
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit 170a9e341a3b02c0b2ea0df16ef14a33a4f41de8 ]
+
+The offset 12 (bit-rate) of EEPROM SFP DAC (passive) cables is expected
+to be in the range 0x64 to 0x68. However, the 5 meter and 7 meter Molex
+passive cables have the rate ceiling 0x78 at offset 12.
+
+Add a quirk for Molex passive cables to extend the rate ceiling to 0x78.
+
+Fixes: abf0a1c2b26a ("amd-xgbe: Add support for SFP+ modules")
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Acked-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 21e38b720d87..a7166cd1179f 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -239,6 +239,7 @@ enum xgbe_sfp_speed {
+ #define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
+ #define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
+ #define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
++#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX 0x78
+
+ #define XGBE_SFP_BASE_CU_CABLE_LEN 18
+
+@@ -284,6 +285,8 @@ struct xgbe_sfp_eeprom {
+ #define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
+ #define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
+
++#define XGBE_MOLEX_VENDOR "Molex Inc. "
++
+ struct xgbe_sfp_ascii {
+ union {
+ char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
+@@ -834,7 +837,11 @@ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
+ break;
+ case XGBE_SFP_SPEED_10000:
+ min = XGBE_SFP_BASE_BR_10GBE_MIN;
+- max = XGBE_SFP_BASE_BR_10GBE_MAX;
++ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
++ XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0)
++ max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX;
++ else
++ max = XGBE_SFP_BASE_BR_10GBE_MAX;
+ break;
+ default:
+ return false;
+--
+2.35.1
+
--- /dev/null
+From 988f96ccd65dd22cc23b3dcfefd1b8787b858930 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Oct 2022 12:12:14 +0530
+Subject: amd-xgbe: fix the SFP compliance codes check for DAC cables
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit 09c5f6bf11ac98874339e55f4f5f79a9dbc9b375 ]
+
+The current XGBE code assumes that offset 6 of EEPROM SFP DAC (passive)
+cables is NULL. However, some cables (the 5 meter and 7 meter Molex
+passive cables) have non-zero data at offset 6. Fix the logic by moving
+the passive cable check above the active checks, so as not to be
+improperly identified as an active cable. This will fix the issue for
+any passive cable that advertises 1000Base-CX in offset 6.
+
+Fixes: abf0a1c2b26a ("amd-xgbe: Add support for SFP+ modules")
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Acked-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 213769054391..21e38b720d87 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -1151,7 +1151,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
+ }
+
+ /* Determine the type of SFP */
+- if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
++ if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE &&
++ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
++ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
++ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_LR;
+@@ -1167,9 +1170,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_T;
+- else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
+- xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+- phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+--
+2.35.1
+
--- /dev/null
+From e7068ee671085320632a7a9e2b1175729404f1cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 9 Oct 2022 19:28:46 -0700
+Subject: arc: iounmap() arg is volatile
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit c44f15c1c09481d50fd33478ebb5b8284f8f5edb ]
+
+Add 'volatile' to iounmap()'s argument to prevent build warnings.
+This make it the same as other major architectures.
+
+Placates these warnings: (12 such warnings)
+
+../drivers/video/fbdev/riva/fbdev.c: In function 'rivafb_probe':
+../drivers/video/fbdev/riva/fbdev.c:2067:42: error: passing argument 1 of 'iounmap' discards 'volatile' qualifier from pointer target type [-Werror=discarded-qualifiers]
+ 2067 | iounmap(default_par->riva.PRAMIN);
+
+Fixes: 1162b0701b14b ("ARC: I/O and DMA Mappings")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Cc: Vineet Gupta <vgupta@kernel.org>
+Cc: linux-snps-arc@lists.infradead.org
+Cc: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Vineet Gupta <vgupta@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arc/include/asm/io.h | 2 +-
+ arch/arc/mm/ioremap.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
+index 8f777d6441a5..80347382a380 100644
+--- a/arch/arc/include/asm/io.h
++++ b/arch/arc/include/asm/io.h
+@@ -32,7 +32,7 @@ static inline void ioport_unmap(void __iomem *addr)
+ {
+ }
+
+-extern void iounmap(const void __iomem *addr);
++extern void iounmap(const volatile void __iomem *addr);
+
+ /*
+ * io{read,write}{16,32}be() macros
+diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
+index 95c649fbc95a..d3b1ea16e9cd 100644
+--- a/arch/arc/mm/ioremap.c
++++ b/arch/arc/mm/ioremap.c
+@@ -93,7 +93,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
+ EXPORT_SYMBOL(ioremap_prot);
+
+
+-void iounmap(const void __iomem *addr)
++void iounmap(const volatile void __iomem *addr)
+ {
+ /* weird double cast to handle phys_addr_t > 32 bits */
+ if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
+--
+2.35.1
+
--- /dev/null
+From 3b2a68d430dd0a30a700556b1e69a5440fdfeded Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Oct 2022 19:21:40 -0700
+Subject: arm64: Add AMPERE1 to the Spectre-BHB affected list
+
+From: D Scott Phillips <scott@os.amperecomputing.com>
+
+[ Upstream commit 0e5d5ae837c8ce04d2ddb874ec5f920118bd9d31 ]
+
+Per AmpereOne erratum AC03_CPU_12, "Branch history may allow control of
+speculative execution across software contexts," the AMPERE1 core needs the
+bhb clearing loop to mitigate Spectre-BHB, with a loop iteration count of
+11.
+
+Signed-off-by: D Scott Phillips <scott@os.amperecomputing.com>
+Link: https://lore.kernel.org/r/20221011022140.432370-1-scott@os.amperecomputing.com
+Reviewed-by: James Morse <james.morse@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/cputype.h | 4 ++++
+ arch/arm64/kernel/proton-pack.c | 6 ++++++
+ 2 files changed, 10 insertions(+)
+
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 39f5c1672f48..457b6bb276bb 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -60,6 +60,7 @@
+ #define ARM_CPU_IMP_FUJITSU 0x46
+ #define ARM_CPU_IMP_HISI 0x48
+ #define ARM_CPU_IMP_APPLE 0x61
++#define ARM_CPU_IMP_AMPERE 0xC0
+
+ #define ARM_CPU_PART_AEM_V8 0xD0F
+ #define ARM_CPU_PART_FOUNDATION 0xD00
+@@ -112,6 +113,8 @@
+ #define APPLE_CPU_PART_M1_ICESTORM 0x022
+ #define APPLE_CPU_PART_M1_FIRESTORM 0x023
+
++#define AMPERE_CPU_PART_AMPERE1 0xAC3
++
+ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+ #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+@@ -151,6 +154,7 @@
+ #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
+ #define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
+ #define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
++#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
+
+ /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
+ #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
+diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
+index 6ae53d8cd576..faa8a6bf2376 100644
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -876,6 +876,10 @@ u8 spectre_bhb_loop_affected(int scope)
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+ {},
+ };
++ static const struct midr_range spectre_bhb_k11_list[] = {
++ MIDR_ALL_VERSIONS(MIDR_AMPERE1),
++ {},
++ };
+ static const struct midr_range spectre_bhb_k8_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+@@ -886,6 +890,8 @@ u8 spectre_bhb_loop_affected(int scope)
+ k = 32;
+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
+ k = 24;
++ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
++ k = 11;
+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
+ k = 8;
+
+--
+2.35.1
+
--- /dev/null
+From fb5940eae883f9201072cea7ab71970e910bd195 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 15 Oct 2022 14:48:50 +0530
+Subject: ASoC: qcom: lpass-cpu: Mark HDMI TX parity register as volatile
+
+From: Srinivasa Rao Mandadapu <quic_srivasam@quicinc.com>
+
+[ Upstream commit 1dd5166102e7ca91e8c5d833110333835e147ddb ]
+
+Update LPASS_HDMI_TX_PARITY_ADDR register as volatile, to fix
+dp audio failures observed with some of external monitors.
+
+Fixes: 7cb37b7bd0d3 ("ASoC: qcom: Add support for lpass hdmi driver")
+
+Signed-off-by: Srinivasa Rao Mandadapu <quic_srivasam@quicinc.com>
+Reviewed-by: Stephen Boyd <swboyd@chromium.org>
+Link: https://lore.kernel.org/r/1665825530-7593-1-git-send-email-quic_srivasam@quicinc.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/qcom/lpass-cpu.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index 9aa7c445b6b0..ecd6c049ace2 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -747,6 +747,8 @@ static bool lpass_hdmi_regmap_volatile(struct device *dev, unsigned int reg)
+ return true;
+ if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
+ return true;
++ if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
++ return true;
+
+ for (i = 0; i < v->hdmi_rdma_channels; ++i) {
+ if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
+--
+2.35.1
+
--- /dev/null
+From 7685599fd31ef55b9eba0bbeb721d80ed173cd24 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Oct 2022 10:38:31 +0530
+Subject: ASoC: qcom: lpass-cpu: mark HDMI TX registers as volatile
+
+From: Srinivasa Rao Mandadapu <quic_srivasam@quicinc.com>
+
+[ Upstream commit c9a3545b1d771fb7b06a487796c40288c02c41c5 ]
+
+Update HDMI volatile registers list as DMA, Channel Selection registers,
+vbit control registers are being reflected by hardware DP port
+disconnection.
+
+This update is required to fix no display and no sound issue observed
+after reconnecting TAMA/SANWA DP cables.
+Once DP cable is unplugged, DMA control registers are being reset by
+hardware, however at second plugin, new dma control values does not
+updated to the dma hardware registers since new register value and
+cached values at the time of first plugin are same.
+
+Fixes: 7cb37b7bd0d3 ("ASoC: qcom: Add support for lpass hdmi driver")
+
+Signed-off-by: Srinivasa Rao Mandadapu <quic_srivasam@quicinc.com>
+Reported-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
+Link: https://lore.kernel.org/r/1665637711-13300-1-git-send-email-quic_srivasam@quicinc.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/qcom/lpass-cpu.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index 03abb3d719d0..9aa7c445b6b0 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -745,10 +745,18 @@ static bool lpass_hdmi_regmap_volatile(struct device *dev, unsigned int reg)
+ return true;
+ if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
+ return true;
++ if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
++ return true;
+
+ for (i = 0; i < v->hdmi_rdma_channels; ++i) {
+ if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
+ return true;
++ if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
++ return true;
++ if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
++ return true;
++ if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
++ return true;
+ }
+ return false;
+ }
+--
+2.35.1
+
--- /dev/null
+From d3bedddcfa465fe28c1379e46ae3266365232153 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Oct 2022 09:53:10 +0200
+Subject: atlantic: fix deadlock at aq_nic_stop
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Íñigo Huguet <ihuguet@redhat.com>
+
+[ Upstream commit 6960d133f66ecddcd3af2b1cbd0c7dcd104268b8 ]
+
+NIC is stopped with rtnl_lock held, and during the stop it cancels the
+'service_task' work and free irqs.
+
+However, if CONFIG_MACSEC is set, rtnl_lock is acquired both from
+aq_nic_service_task and aq_linkstate_threaded_isr. Then a deadlock
+happens if aq_nic_stop tries to cancel/disable them when they've already
+started their execution.
+
+As the deadlock is caused by rtnl_lock, it causes many other processes
+to stall, not only atlantic related stuff.
+
+Fix it by introducing a mutex that protects each NIC's macsec related
+data, and locking it instead of the rtnl_lock from the service task and
+the threaded IRQ.
+
+Before this patch, all macsec data was protected with rtnl_lock, but
+maybe not all of it needs to be protected. With this new mutex, further
+efforts can be made to limit the protected data only to that which
+requires it. However, probably it doesn't worth it because all macsec's
+data accesses are infrequent, and almost all are done from macsec_ops
+or ethtool callbacks, called holding rtnl_lock, so macsec_mutex won't
+never be much contended.
+
+The issue appeared repeteadly attaching and deattaching the NIC to a
+bond interface. Doing that after this patch I cannot reproduce the bug.
+
+Fixes: 62c1c2e606f6 ("net: atlantic: MACSec offload skeleton")
+Reported-by: Li Liang <liali@redhat.com>
+Suggested-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Íñigo Huguet <ihuguet@redhat.com>
+Reviewed-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/aquantia/atlantic/aq_macsec.c | 96 ++++++++++++++-----
+ .../net/ethernet/aquantia/atlantic/aq_nic.h | 2 +
+ 2 files changed, 74 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+index 4a6dfac857ca..7c6e0811f2e6 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+@@ -1451,26 +1451,57 @@ static void aq_check_txsa_expiration(struct aq_nic_s *nic)
+ egress_sa_threshold_expired);
+ }
+
++#define AQ_LOCKED_MDO_DEF(mdo) \
++static int aq_locked_mdo_##mdo(struct macsec_context *ctx) \
++{ \
++ struct aq_nic_s *nic = netdev_priv(ctx->netdev); \
++ int ret; \
++ mutex_lock(&nic->macsec_mutex); \
++ ret = aq_mdo_##mdo(ctx); \
++ mutex_unlock(&nic->macsec_mutex); \
++ return ret; \
++}
++
++AQ_LOCKED_MDO_DEF(dev_open)
++AQ_LOCKED_MDO_DEF(dev_stop)
++AQ_LOCKED_MDO_DEF(add_secy)
++AQ_LOCKED_MDO_DEF(upd_secy)
++AQ_LOCKED_MDO_DEF(del_secy)
++AQ_LOCKED_MDO_DEF(add_rxsc)
++AQ_LOCKED_MDO_DEF(upd_rxsc)
++AQ_LOCKED_MDO_DEF(del_rxsc)
++AQ_LOCKED_MDO_DEF(add_rxsa)
++AQ_LOCKED_MDO_DEF(upd_rxsa)
++AQ_LOCKED_MDO_DEF(del_rxsa)
++AQ_LOCKED_MDO_DEF(add_txsa)
++AQ_LOCKED_MDO_DEF(upd_txsa)
++AQ_LOCKED_MDO_DEF(del_txsa)
++AQ_LOCKED_MDO_DEF(get_dev_stats)
++AQ_LOCKED_MDO_DEF(get_tx_sc_stats)
++AQ_LOCKED_MDO_DEF(get_tx_sa_stats)
++AQ_LOCKED_MDO_DEF(get_rx_sc_stats)
++AQ_LOCKED_MDO_DEF(get_rx_sa_stats)
++
+ const struct macsec_ops aq_macsec_ops = {
+- .mdo_dev_open = aq_mdo_dev_open,
+- .mdo_dev_stop = aq_mdo_dev_stop,
+- .mdo_add_secy = aq_mdo_add_secy,
+- .mdo_upd_secy = aq_mdo_upd_secy,
+- .mdo_del_secy = aq_mdo_del_secy,
+- .mdo_add_rxsc = aq_mdo_add_rxsc,
+- .mdo_upd_rxsc = aq_mdo_upd_rxsc,
+- .mdo_del_rxsc = aq_mdo_del_rxsc,
+- .mdo_add_rxsa = aq_mdo_add_rxsa,
+- .mdo_upd_rxsa = aq_mdo_upd_rxsa,
+- .mdo_del_rxsa = aq_mdo_del_rxsa,
+- .mdo_add_txsa = aq_mdo_add_txsa,
+- .mdo_upd_txsa = aq_mdo_upd_txsa,
+- .mdo_del_txsa = aq_mdo_del_txsa,
+- .mdo_get_dev_stats = aq_mdo_get_dev_stats,
+- .mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats,
+- .mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats,
+- .mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats,
+- .mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats,
++ .mdo_dev_open = aq_locked_mdo_dev_open,
++ .mdo_dev_stop = aq_locked_mdo_dev_stop,
++ .mdo_add_secy = aq_locked_mdo_add_secy,
++ .mdo_upd_secy = aq_locked_mdo_upd_secy,
++ .mdo_del_secy = aq_locked_mdo_del_secy,
++ .mdo_add_rxsc = aq_locked_mdo_add_rxsc,
++ .mdo_upd_rxsc = aq_locked_mdo_upd_rxsc,
++ .mdo_del_rxsc = aq_locked_mdo_del_rxsc,
++ .mdo_add_rxsa = aq_locked_mdo_add_rxsa,
++ .mdo_upd_rxsa = aq_locked_mdo_upd_rxsa,
++ .mdo_del_rxsa = aq_locked_mdo_del_rxsa,
++ .mdo_add_txsa = aq_locked_mdo_add_txsa,
++ .mdo_upd_txsa = aq_locked_mdo_upd_txsa,
++ .mdo_del_txsa = aq_locked_mdo_del_txsa,
++ .mdo_get_dev_stats = aq_locked_mdo_get_dev_stats,
++ .mdo_get_tx_sc_stats = aq_locked_mdo_get_tx_sc_stats,
++ .mdo_get_tx_sa_stats = aq_locked_mdo_get_tx_sa_stats,
++ .mdo_get_rx_sc_stats = aq_locked_mdo_get_rx_sc_stats,
++ .mdo_get_rx_sa_stats = aq_locked_mdo_get_rx_sa_stats,
+ };
+
+ int aq_macsec_init(struct aq_nic_s *nic)
+@@ -1492,6 +1523,7 @@ int aq_macsec_init(struct aq_nic_s *nic)
+
+ nic->ndev->features |= NETIF_F_HW_MACSEC;
+ nic->ndev->macsec_ops = &aq_macsec_ops;
++ mutex_init(&nic->macsec_mutex);
+
+ return 0;
+ }
+@@ -1515,7 +1547,7 @@ int aq_macsec_enable(struct aq_nic_s *nic)
+ if (!nic->macsec_cfg)
+ return 0;
+
+- rtnl_lock();
++ mutex_lock(&nic->macsec_mutex);
+
+ if (nic->aq_fw_ops->send_macsec_req) {
+ struct macsec_cfg_request cfg = { 0 };
+@@ -1564,7 +1596,7 @@ int aq_macsec_enable(struct aq_nic_s *nic)
+ ret = aq_apply_macsec_cfg(nic);
+
+ unlock:
+- rtnl_unlock();
++ mutex_unlock(&nic->macsec_mutex);
+ return ret;
+ }
+
+@@ -1576,9 +1608,9 @@ void aq_macsec_work(struct aq_nic_s *nic)
+ if (!netif_carrier_ok(nic->ndev))
+ return;
+
+- rtnl_lock();
++ mutex_lock(&nic->macsec_mutex);
+ aq_check_txsa_expiration(nic);
+- rtnl_unlock();
++ mutex_unlock(&nic->macsec_mutex);
+ }
+
+ int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
+@@ -1589,21 +1621,30 @@ int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
+ if (!cfg)
+ return 0;
+
++ mutex_lock(&nic->macsec_mutex);
++
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!test_bit(i, &cfg->rxsc_idx_busy))
+ continue;
+ cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy);
+ }
+
++ mutex_unlock(&nic->macsec_mutex);
+ return cnt;
+ }
+
+ int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic)
+ {
++ int cnt;
++
+ if (!nic->macsec_cfg)
+ return 0;
+
+- return hweight_long(nic->macsec_cfg->txsc_idx_busy);
++ mutex_lock(&nic->macsec_mutex);
++ cnt = hweight_long(nic->macsec_cfg->txsc_idx_busy);
++ mutex_unlock(&nic->macsec_mutex);
++
++ return cnt;
+ }
+
+ int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
+@@ -1614,12 +1655,15 @@ int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
+ if (!cfg)
+ return 0;
+
++ mutex_lock(&nic->macsec_mutex);
++
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!test_bit(i, &cfg->txsc_idx_busy))
+ continue;
+ cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy);
+ }
+
++ mutex_unlock(&nic->macsec_mutex);
+ return cnt;
+ }
+
+@@ -1691,6 +1735,8 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
+ if (!cfg)
+ return data;
+
++ mutex_lock(&nic->macsec_mutex);
++
+ aq_macsec_update_stats(nic);
+
+ common_stats = &cfg->stats;
+@@ -1773,5 +1819,7 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
+
+ data += i;
+
++ mutex_unlock(&nic->macsec_mutex);
++
+ return data;
+ }
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+index 926cca9a0c83..6da3efa289a3 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+@@ -152,6 +152,8 @@ struct aq_nic_s {
+ struct mutex fwreq_mutex;
+ #if IS_ENABLED(CONFIG_MACSEC)
+ struct aq_macsec_cfg *macsec_cfg;
++ /* mutex to protect data in macsec_cfg */
++ struct mutex macsec_mutex;
+ #endif
+ /* PTP support */
+ struct aq_ptp_s *aq_ptp;
+--
+2.35.1
+
--- /dev/null
+From dc20a44b9345ab32bb4cd076804b1e8e43d22688 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Oct 2022 17:02:52 +0800
+Subject: can: mcp251x: mcp251x_can_probe(): add missing unregister_candev() in
+ error path
+
+From: Dongliang Mu <dzm91@hust.edu.cn>
+
+[ Upstream commit b1a09b63684cea56774786ca14c13b7041ffee63 ]
+
+In mcp251x_can_probe(), if mcp251x_gpio_setup() fails, it forgets to
+unregister the CAN device.
+
+Fix this by unregistering can device in mcp251x_can_probe().
+
+Fixes: 2d52dabbef60 ("can: mcp251x: add GPIO support")
+Signed-off-by: Dongliang Mu <dzm91@hust.edu.cn>
+Link: https://lore.kernel.org/all/20221024090256.717236-1-dzm91@hust.edu.cn
+[mkl: adjust label]
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/spi/mcp251x.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index 5dde3c42d241..ffcb04aac972 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -1419,11 +1419,14 @@ static int mcp251x_can_probe(struct spi_device *spi)
+
+ ret = mcp251x_gpio_setup(priv);
+ if (ret)
+- goto error_probe;
++ goto out_unregister_candev;
+
+ netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
+ return 0;
+
++out_unregister_candev:
++ unregister_candev(net);
++
+ error_probe:
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+--
+2.35.1
+
--- /dev/null
+From dbff58266a5e4708dafd3c546bfc5fed4cc11325 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Oct 2022 19:48:07 +0800
+Subject: can: mscan: mpc5xxx: mpc5xxx_can_probe(): add missing put_clock() in
+ error path
+
+From: Dongliang Mu <dzm91@hust.edu.cn>
+
+[ Upstream commit 3e5b3418827cefb5e1cc658806f02965791b8f07 ]
+
+The commit 1149108e2fbf ("can: mscan: improve clock API use") only
+adds put_clock() in mpc5xxx_can_remove() function, forgetting to add
+put_clock() in the error handling code.
+
+Fix this bug by adding put_clock() in the error handling code.
+
+Fixes: 1149108e2fbf ("can: mscan: improve clock API use")
+Signed-off-by: Dongliang Mu <dzm91@hust.edu.cn>
+Link: https://lore.kernel.org/all/20221024133828.35881-1-mkl@pengutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/mscan/mpc5xxx_can.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
+index e254e04ae257..ef649764f9b4 100644
+--- a/drivers/net/can/mscan/mpc5xxx_can.c
++++ b/drivers/net/can/mscan/mpc5xxx_can.c
+@@ -325,14 +325,14 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
+ &mscan_clksrc);
+ if (!priv->can.clock.freq) {
+ dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
+- goto exit_free_mscan;
++ goto exit_put_clock;
+ }
+
+ err = register_mscandev(dev, mscan_clksrc);
+ if (err) {
+ dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
+ DRV_NAME, err);
+- goto exit_free_mscan;
++ goto exit_put_clock;
+ }
+
+ dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
+@@ -340,7 +340,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
+
+ return 0;
+
+-exit_free_mscan:
++exit_put_clock:
++ if (data->put_clock)
++ data->put_clock(ofdev);
+ free_candev(dev);
+ exit_dispose_irq:
+ irq_dispose_mapping(irq);
+--
+2.35.1
+
--- /dev/null
+From ae0c6fcb1db05ef9eb82e297f33f028ce600ee1f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Sep 2022 13:55:48 -0700
+Subject: drm/msm: Fix return type of mdp4_lvds_connector_mode_valid
+
+From: Nathan Huckleberry <nhuck@google.com>
+
+[ Upstream commit 0b33a33bd15d5bab73b87152b220a8d0153a4587 ]
+
+The mode_valid field in drm_connector_helper_funcs is expected to be of
+type:
+enum drm_mode_status (* mode_valid) (struct drm_connector *connector,
+ struct drm_display_mode *mode);
+
+The mismatched return type breaks forward edge kCFI since the underlying
+function definition does not match the function hook definition.
+
+The return type of mdp4_lvds_connector_mode_valid should be changed from
+int to enum drm_mode_status.
+
+Reported-by: Dan Carpenter <error27@gmail.com>
+Link: https://github.com/ClangBuiltLinux/linux/issues/1703
+Cc: llvm@lists.linux.dev
+Signed-off-by: Nathan Huckleberry <nhuck@google.com>
+Fixes: 3e87599b68e7 ("drm/msm/mdp4: add LVDS panel support")
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Reviewed-by: Nathan Chancellor <nathan@kernel.org>
+Patchwork: https://patchwork.freedesktop.org/patch/502878/
+Link: https://lore.kernel.org/r/20220913205551.155128-1-nhuck@google.com
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+index 7288041dd86a..7444b75c4215 100644
+--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+@@ -56,8 +56,9 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
+ return ret;
+ }
+
+-static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
+- struct drm_display_mode *mode)
++static enum drm_mode_status
++mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
+ {
+ struct mdp4_lvds_connector *mdp4_lvds_connector =
+ to_mdp4_lvds_connector(connector);
+--
+2.35.1
+
--- /dev/null
+From b2099a479910e2aa6c4d223bc0fad4d20a4cfc2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Oct 2022 03:05:24 -0700
+Subject: i40e: Fix ethtool rx-flow-hash setting for X722
+
+From: Slawomir Laba <slawomirx.laba@intel.com>
+
+[ Upstream commit 54b5af5a438076082d482cab105b1bd484ab5074 ]
+
+When enabling flow type for RSS hash via ethtool:
+
+ethtool -N $pf rx-flow-hash tcp4|tcp6|udp4|udp6 s|d
+
+the driver would fail to setup this setting on X722
+device since it was using the mask on the register
+dedicated for X710 devices.
+
+Apply a different mask on the register when setting the
+RSS hash for the X722 device.
+
+When displaying the flow types enabled via ethtool:
+
+ethtool -n $pf rx-flow-hash tcp4|tcp6|udp4|udp6
+
+the driver would print wrong values for X722 device.
+
+Fix this issue by testing masks for X722 device in
+i40e_get_rss_hash_opts function.
+
+Fixes: eb0dd6e4a3b3 ("i40e: Allow RSS Hash set with less than four parameters")
+Signed-off-by: Slawomir Laba <slawomirx.laba@intel.com>
+Signed-off-by: Michal Jaron <michalx.jaron@intel.com>
+Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
+Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://lore.kernel.org/r/20221024100526.1874914-1-jacob.e.keller@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/intel/i40e/i40e_ethtool.c | 31 ++++++++++++++-----
+ drivers/net/ethernet/intel/i40e/i40e_type.h | 4 +++
+ 2 files changed, 27 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index cc5f5c237774..bcc22b374b4a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -3083,10 +3083,17 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
+
+ if (cmd->flow_type == TCP_V4_FLOW ||
+ cmd->flow_type == UDP_V4_FLOW) {
+- if (i_set & I40E_L3_SRC_MASK)
+- cmd->data |= RXH_IP_SRC;
+- if (i_set & I40E_L3_DST_MASK)
+- cmd->data |= RXH_IP_DST;
++ if (hw->mac.type == I40E_MAC_X722) {
++ if (i_set & I40E_X722_L3_SRC_MASK)
++ cmd->data |= RXH_IP_SRC;
++ if (i_set & I40E_X722_L3_DST_MASK)
++ cmd->data |= RXH_IP_DST;
++ } else {
++ if (i_set & I40E_L3_SRC_MASK)
++ cmd->data |= RXH_IP_SRC;
++ if (i_set & I40E_L3_DST_MASK)
++ cmd->data |= RXH_IP_DST;
++ }
+ } else if (cmd->flow_type == TCP_V6_FLOW ||
+ cmd->flow_type == UDP_V6_FLOW) {
+ if (i_set & I40E_L3_V6_SRC_MASK)
+@@ -3393,12 +3400,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+
+ /**
+ * i40e_get_rss_hash_bits - Read RSS Hash bits from register
++ * @hw: hw structure
+ * @nfc: pointer to user request
+ * @i_setc: bits currently set
+ *
+ * Returns value of bits to be set per user request
+ **/
+-static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
++static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
++ struct ethtool_rxnfc *nfc,
++ u64 i_setc)
+ {
+ u64 i_set = i_setc;
+ u64 src_l3 = 0, dst_l3 = 0;
+@@ -3417,8 +3427,13 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
+ dst_l3 = I40E_L3_V6_DST_MASK;
+ } else if (nfc->flow_type == TCP_V4_FLOW ||
+ nfc->flow_type == UDP_V4_FLOW) {
+- src_l3 = I40E_L3_SRC_MASK;
+- dst_l3 = I40E_L3_DST_MASK;
++ if (hw->mac.type == I40E_MAC_X722) {
++ src_l3 = I40E_X722_L3_SRC_MASK;
++ dst_l3 = I40E_X722_L3_DST_MASK;
++ } else {
++ src_l3 = I40E_L3_SRC_MASK;
++ dst_l3 = I40E_L3_DST_MASK;
++ }
+ } else {
+ /* Any other flow type are not supported here */
+ return i_set;
+@@ -3533,7 +3548,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+ flow_pctype)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
+ flow_pctype)) << 32);
+- i_set = i40e_get_rss_hash_bits(nfc, i_setc);
++ i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
+ (u32)i_set);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
+index 446672a7e39f..0872448c0e80 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
+@@ -1404,6 +1404,10 @@ struct i40e_lldp_variables {
+ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+ /* INPUT SET MASK for RSS, flow director, and flexible payload */
++#define I40E_X722_L3_SRC_SHIFT 49
++#define I40E_X722_L3_SRC_MASK (0x3ULL << I40E_X722_L3_SRC_SHIFT)
++#define I40E_X722_L3_DST_SHIFT 41
++#define I40E_X722_L3_DST_MASK (0x3ULL << I40E_X722_L3_DST_SHIFT)
+ #define I40E_L3_SRC_SHIFT 47
+ #define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
+ #define I40E_L3_V6_SRC_SHIFT 43
+--
+2.35.1
+
--- /dev/null
+From dfe107647eea794dc0ff64a3643ad3bfebdc5cbb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Oct 2022 03:05:26 -0700
+Subject: i40e: Fix flow-type by setting GL_HASH_INSET registers
+
+From: Slawomir Laba <slawomirx.laba@intel.com>
+
+[ Upstream commit 3b32c9932853e11d71f9db012d69e92e4669ba23 ]
+
+Fix setting bits for specific flow_type for GLQF_HASH_INSET register.
+In previous version all of the bits were set only in hena register, while
+in inset only one bit was set. In order for this working correctly on all
+types of cards these bits needs to be set correctly for both hena and inset
+registers.
+
+Fixes: eb0dd6e4a3b3 ("i40e: Allow RSS Hash set with less than four parameters")
+Signed-off-by: Slawomir Laba <slawomirx.laba@intel.com>
+Signed-off-by: Michal Jaron <michalx.jaron@intel.com>
+Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://lore.kernel.org/r/20221024100526.1874914-3-jacob.e.keller@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/intel/i40e/i40e_ethtool.c | 71 ++++++++++---------
+ 1 file changed, 38 insertions(+), 33 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index bcc22b374b4a..144c4824b5e8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -3451,6 +3451,7 @@ static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
+ return i_set;
+ }
+
++#define FLOW_PCTYPES_SIZE 64
+ /**
+ * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @pf: pointer to the physical function struct
+@@ -3463,9 +3464,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+ struct i40e_hw *hw = &pf->hw;
+ u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
+- u8 flow_pctype = 0;
++ DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE);
+ u64 i_set, i_setc;
+
++ bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
++
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev,
+ "Change of RSS hash input set is not supported when MFP mode is enabled\n");
+@@ -3481,36 +3484,35 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
++ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+- hena |=
+- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
++ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
++ flow_pctypes);
+ break;
+ case TCP_V6_FLOW:
+- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+- hena |=
+- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
++ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+- hena |=
+- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
++ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
++ flow_pctypes);
+ break;
+ case UDP_V4_FLOW:
+- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+- hena |=
+- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+-
++ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
++ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
++ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
++ flow_pctypes);
++ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
++ flow_pctypes);
++ }
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ break;
+ case UDP_V6_FLOW:
+- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+- hena |=
+- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+-
++ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
++ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
++ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
++ flow_pctypes);
++ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
++ flow_pctypes);
++ }
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ break;
+ case AH_ESP_V4_FLOW:
+@@ -3543,17 +3545,20 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+ return -EINVAL;
+ }
+
+- if (flow_pctype) {
+- i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
+- flow_pctype)) |
+- ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
+- flow_pctype)) << 32);
+- i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc);
+- i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
+- (u32)i_set);
+- i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
+- (u32)(i_set >> 32));
+- hena |= BIT_ULL(flow_pctype);
++ if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) {
++ u8 flow_id;
++
++ for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) {
++ i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) |
++ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32);
++ i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc);
++
++ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id),
++ (u32)i_set);
++ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id),
++ (u32)(i_set >> 32));
++ hena |= BIT_ULL(flow_id);
++ }
+ }
+
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
+--
+2.35.1
+
--- /dev/null
+From 06e8d012866190c88eb93ee890c3ab5cc4a306d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Oct 2022 03:05:25 -0700
+Subject: i40e: Fix VF hang when reset is triggered on another VF
+
+From: Sylwester Dziedziuch <sylwesterx.dziedziuch@intel.com>
+
+[ Upstream commit 52424f974bc53c26ba3f00300a00e9de9afcd972 ]
+
+When a reset was triggered on one VF with i40e_reset_vf
+global PF state __I40E_VF_DISABLE was set on a PF until
+the reset finished. If immediately after triggering reset
+on one VF there is a request to reset on another
+it will cause a hang on VF side because VF will be notified
+of incoming reset but the reset will never happen because
+of this global state, we will get such error message:
+
+[ +4.890195] iavf 0000:86:02.1: Never saw reset
+
+and VF will hang waiting for the reset to be triggered.
+
+Fix this by introducing new VF state I40E_VF_STATE_RESETTING
+that will be set on a VF if it is currently resetting instead of
+the global __I40E_VF_DISABLE PF state.
+
+Fixes: 3ba9bcb4b68f ("i40e: add locking around VF reset")
+Signed-off-by: Sylwester Dziedziuch <sylwesterx.dziedziuch@intel.com>
+Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
+Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://lore.kernel.org/r/20221024100526.1874914-2-jacob.e.keller@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 43 ++++++++++++++-----
+ .../ethernet/intel/i40e/i40e_virtchnl_pf.h | 1 +
+ 2 files changed, 33 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index ffff7de801af..381b28a08746 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1483,10 +1483,12 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
+ if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
+ return true;
+
+- /* If the VFs have been disabled, this means something else is
+- * resetting the VF, so we shouldn't continue.
+- */
+- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
++ /* Bail out if VFs are disabled. */
++ if (test_bit(__I40E_VF_DISABLE, pf->state))
++ return true;
++
++ /* If VF is being reset already we don't need to continue. */
++ if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ return true;
+
+ i40e_trigger_vf_reset(vf, flr);
+@@ -1523,7 +1525,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
+ i40e_cleanup_reset_vf(vf);
+
+ i40e_flush(hw);
+- clear_bit(__I40E_VF_DISABLE, pf->state);
++ clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
+
+ return true;
+ }
+@@ -1556,8 +1558,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ return false;
+
+ /* Begin reset on all VFs at once */
+- for (v = 0; v < pf->num_alloc_vfs; v++)
+- i40e_trigger_vf_reset(&pf->vf[v], flr);
++ for (v = 0; v < pf->num_alloc_vfs; v++) {
++ vf = &pf->vf[v];
++ /* If VF is being reset no need to trigger reset again */
++ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
++ i40e_trigger_vf_reset(&pf->vf[v], flr);
++ }
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
+@@ -1573,9 +1579,11 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ */
+ while (v < pf->num_alloc_vfs) {
+ vf = &pf->vf[v];
+- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+- if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+- break;
++ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
++ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
++ if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
++ break;
++ }
+
+ /* If the current VF has finished resetting, move on
+ * to the next VF in sequence.
+@@ -1603,6 +1611,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ if (pf->vf[v].lan_vsi_idx == 0)
+ continue;
+
++ /* If VF is reset in another thread just continue */
++ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
++ continue;
++
+ i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ }
+
+@@ -1614,6 +1626,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ if (pf->vf[v].lan_vsi_idx == 0)
+ continue;
+
++ /* If VF is reset in another thread just continue */
++ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
++ continue;
++
+ i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ }
+
+@@ -1623,8 +1639,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ mdelay(50);
+
+ /* Finish the reset on each VF */
+- for (v = 0; v < pf->num_alloc_vfs; v++)
++ for (v = 0; v < pf->num_alloc_vfs; v++) {
++ /* If VF is reset in another thread just continue */
++ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
++ continue;
++
+ i40e_cleanup_reset_vf(&pf->vf[v]);
++ }
+
+ i40e_flush(hw);
+ clear_bit(__I40E_VF_DISABLE, pf->state);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+index a554d0a0b09b..358bbdb58795 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+@@ -39,6 +39,7 @@ enum i40e_vf_states {
+ I40E_VF_STATE_MC_PROMISC,
+ I40E_VF_STATE_UC_PROMISC,
+ I40E_VF_STATE_PRE_ENABLE,
++ I40E_VF_STATE_RESETTING
+ };
+
+ /* VF capabilities */
+--
+2.35.1
+
--- /dev/null
+From 446589578f7eba131f1cfedf4166bb183fade8c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 23 Oct 2022 19:01:24 -0700
+Subject: ipv6: ensure sane device mtu in tunnels
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit d89d7ff01235f218dad37de84457717f699dee79 ]
+
+Another syzbot report [1] with no reproducer hints
+at a bug in ip6_gre tunnel (dev:ip6gretap0)
+
+Since ipv6 mcast code makes sure to read dev->mtu once
+and applies a sanity check on it (see commit b9b312a7a451
+"ipv6: mcast: better catch silly mtu values"), a remaining
+possibility is that a layer is able to set dev->mtu to
+an underflowed value (high order bit set).
+
+This could happen indeed in ip6gre_tnl_link_config_route(),
+ip6_tnl_link_config() and ipip6_tunnel_bind_dev()
+
+Make sure to sanitize mtu value in a local variable before
+it is written once on dev->mtu, as lockless readers could
+catch wrong temporary value.
+
+[1]
+skbuff: skb_over_panic: text:ffff80000b7a2f38 len:40 put:40 head:ffff000149dcf200 data:ffff000149dcf2b0 tail:0xd8 end:0xc0 dev:ip6gretap0
+------------[ cut here ]------------
+kernel BUG at net/core/skbuff.c:120
+Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP
+Modules linked in:
+CPU: 1 PID: 10241 Comm: kworker/1:1 Not tainted 6.0.0-rc7-syzkaller-18095-gbbed346d5a96 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/30/2022
+Workqueue: mld mld_ifc_work
+pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : skb_panic+0x4c/0x50 net/core/skbuff.c:116
+lr : skb_panic+0x4c/0x50 net/core/skbuff.c:116
+sp : ffff800020dd3b60
+x29: ffff800020dd3b70 x28: 0000000000000000 x27: ffff00010df2a800
+x26: 00000000000000c0 x25: 00000000000000b0 x24: ffff000149dcf200
+x23: 00000000000000c0 x22: 00000000000000d8 x21: ffff80000b7a2f38
+x20: ffff00014c2f7800 x19: 0000000000000028 x18: 00000000000001a9
+x17: 0000000000000000 x16: ffff80000db49158 x15: ffff000113bf1a80
+x14: 0000000000000000 x13: 00000000ffffffff x12: ffff000113bf1a80
+x11: ff808000081c0d5c x10: 0000000000000000 x9 : 73f125dc5c63ba00
+x8 : 73f125dc5c63ba00 x7 : ffff800008161d1c x6 : 0000000000000000
+x5 : 0000000000000080 x4 : 0000000000000001 x3 : 0000000000000000
+x2 : ffff0001fefddcd0 x1 : 0000000100000000 x0 : 0000000000000089
+Call trace:
+skb_panic+0x4c/0x50 net/core/skbuff.c:116
+skb_over_panic net/core/skbuff.c:125 [inline]
+skb_put+0xd4/0xdc net/core/skbuff.c:2049
+ip6_mc_hdr net/ipv6/mcast.c:1714 [inline]
+mld_newpack+0x14c/0x270 net/ipv6/mcast.c:1765
+add_grhead net/ipv6/mcast.c:1851 [inline]
+add_grec+0xa20/0xae0 net/ipv6/mcast.c:1989
+mld_send_cr+0x438/0x5a8 net/ipv6/mcast.c:2115
+mld_ifc_work+0x38/0x290 net/ipv6/mcast.c:2653
+process_one_work+0x2d8/0x504 kernel/workqueue.c:2289
+worker_thread+0x340/0x610 kernel/workqueue.c:2436
+kthread+0x12c/0x158 kernel/kthread.c:376
+ret_from_fork+0x10/0x20 arch/arm64/kernel/entry.S:860
+Code: 91011400 aa0803e1 a90027ea 94373093 (d4210000)
+
+Fixes: c12b395a4664 ("gre: Support GRE over IPv6")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20221024020124.3756833-1-eric.dumazet@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/ip6_gre.c | 12 +++++++-----
+ net/ipv6/ip6_tunnel.c | 11 ++++++-----
+ net/ipv6/sit.c | 8 +++++---
+ 3 files changed, 18 insertions(+), 13 deletions(-)
+
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 9e0890738d93..0010f9e54f13 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1153,14 +1153,16 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
+ dev->needed_headroom = dst_len;
+
+ if (set_mtu) {
+- dev->mtu = rt->dst.dev->mtu - t_hlen;
++ int mtu = rt->dst.dev->mtu - t_hlen;
++
+ if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+- dev->mtu -= 8;
++ mtu -= 8;
+ if (dev->type == ARPHRD_ETHER)
+- dev->mtu -= ETH_HLEN;
++ mtu -= ETH_HLEN;
+
+- if (dev->mtu < IPV6_MIN_MTU)
+- dev->mtu = IPV6_MIN_MTU;
++ if (mtu < IPV6_MIN_MTU)
++ mtu = IPV6_MIN_MTU;
++ WRITE_ONCE(dev->mtu, mtu);
+ }
+ }
+ ip6_rt_put(rt);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 3a2741569b84..0d4cab94c5dd 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1476,8 +1476,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
+ struct net_device *tdev = NULL;
+ struct __ip6_tnl_parm *p = &t->parms;
+ struct flowi6 *fl6 = &t->fl.u.ip6;
+- unsigned int mtu;
+ int t_hlen;
++ int mtu;
+
+ memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+ memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
+@@ -1524,12 +1524,13 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
+ dev->hard_header_len = tdev->hard_header_len + t_hlen;
+ mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
+
+- dev->mtu = mtu - t_hlen;
++ mtu = mtu - t_hlen;
+ if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+- dev->mtu -= 8;
++ mtu -= 8;
+
+- if (dev->mtu < IPV6_MIN_MTU)
+- dev->mtu = IPV6_MIN_MTU;
++ if (mtu < IPV6_MIN_MTU)
++ mtu = IPV6_MIN_MTU;
++ WRITE_ONCE(dev->mtu, mtu);
+ }
+ }
+ }
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 3c92e8cacbba..1ce486a9bc07 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1123,10 +1123,12 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
+
+ if (tdev && !netif_is_l3_master(tdev)) {
+ int t_hlen = tunnel->hlen + sizeof(struct iphdr);
++ int mtu;
+
+- dev->mtu = tdev->mtu - t_hlen;
+- if (dev->mtu < IPV6_MIN_MTU)
+- dev->mtu = IPV6_MIN_MTU;
++ mtu = tdev->mtu - t_hlen;
++ if (mtu < IPV6_MIN_MTU)
++ mtu = IPV6_MIN_MTU;
++ WRITE_ONCE(dev->mtu, mtu);
+ }
+ }
+
+--
+2.35.1
+
--- /dev/null
+From ded9bb3c6f7f7e1b24fefb76c02d84f8ea16720a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Oct 2022 22:45:11 +0000
+Subject: kcm: annotate data-races around kcm->rx_psock
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 15e4dabda11b0fa31d510a915d1a580f47dfc92e ]
+
+kcm->rx_psock can be read locklessly in kcm_rfree().
+Annotate the read and writes accordingly.
+
+We do the same for kcm->rx_wait in the following patch.
+
+syzbot reported:
+BUG: KCSAN: data-race in kcm_rfree / unreserve_rx_kcm
+
+write to 0xffff888123d827b8 of 8 bytes by task 2758 on cpu 1:
+unreserve_rx_kcm+0x72/0x1f0 net/kcm/kcmsock.c:313
+kcm_rcv_strparser+0x2b5/0x3a0 net/kcm/kcmsock.c:373
+__strp_recv+0x64c/0xd20 net/strparser/strparser.c:301
+strp_recv+0x6d/0x80 net/strparser/strparser.c:335
+tcp_read_sock+0x13e/0x5a0 net/ipv4/tcp.c:1703
+strp_read_sock net/strparser/strparser.c:358 [inline]
+do_strp_work net/strparser/strparser.c:406 [inline]
+strp_work+0xe8/0x180 net/strparser/strparser.c:415
+process_one_work+0x3d3/0x720 kernel/workqueue.c:2289
+worker_thread+0x618/0xa70 kernel/workqueue.c:2436
+kthread+0x1a9/0x1e0 kernel/kthread.c:376
+ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:306
+
+read to 0xffff888123d827b8 of 8 bytes by task 5859 on cpu 0:
+kcm_rfree+0x14c/0x220 net/kcm/kcmsock.c:181
+skb_release_head_state+0x8e/0x160 net/core/skbuff.c:841
+skb_release_all net/core/skbuff.c:852 [inline]
+__kfree_skb net/core/skbuff.c:868 [inline]
+kfree_skb_reason+0x5c/0x260 net/core/skbuff.c:891
+kfree_skb include/linux/skbuff.h:1216 [inline]
+kcm_recvmsg+0x226/0x2b0 net/kcm/kcmsock.c:1161
+____sys_recvmsg+0x16c/0x2e0
+___sys_recvmsg net/socket.c:2743 [inline]
+do_recvmmsg+0x2f1/0x710 net/socket.c:2837
+__sys_recvmmsg net/socket.c:2916 [inline]
+__do_sys_recvmmsg net/socket.c:2939 [inline]
+__se_sys_recvmmsg net/socket.c:2932 [inline]
+__x64_sys_recvmmsg+0xde/0x160 net/socket.c:2932
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x2b/0x70 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+value changed: 0xffff88812971ce00 -> 0x0000000000000000
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 5859 Comm: syz-executor.3 Not tainted 6.0.0-syzkaller-12189-g19d17ab7c68b-dirty #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/22/2022
+
+Fixes: ab7ac4eb9832 ("kcm: Kernel Connection Multiplexor module")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/kcm/kcmsock.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 18469f1f707e..a1dc2b74b52f 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -177,7 +177,7 @@ static void kcm_rfree(struct sk_buff *skb)
+ /* For reading rx_wait and rx_psock without holding lock */
+ smp_mb__after_atomic();
+
+- if (!kcm->rx_wait && !kcm->rx_psock &&
++ if (!kcm->rx_wait && !READ_ONCE(kcm->rx_psock) &&
+ sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
+ spin_lock_bh(&mux->rx_lock);
+ kcm_rcv_ready(kcm);
+@@ -282,7 +282,8 @@ static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
+ kcm->rx_wait = false;
+
+ psock->rx_kcm = kcm;
+- kcm->rx_psock = psock;
++ /* paired with lockless reads in kcm_rfree() */
++ WRITE_ONCE(kcm->rx_psock, psock);
+
+ spin_unlock_bh(&mux->rx_lock);
+
+@@ -309,7 +310,8 @@ static void unreserve_rx_kcm(struct kcm_psock *psock,
+ spin_lock_bh(&mux->rx_lock);
+
+ psock->rx_kcm = NULL;
+- kcm->rx_psock = NULL;
++ /* paired with lockless reads in kcm_rfree() */
++ WRITE_ONCE(kcm->rx_psock, NULL);
+
+ /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
+ * kcm_rfree
+--
+2.35.1
+
--- /dev/null
+From 7a0a9ba67c0351cf09ebbd3cafeaec364e53fee9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Oct 2022 22:45:12 +0000
+Subject: kcm: annotate data-races around kcm->rx_wait
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 0c745b5141a45a076f1cb9772a399f7ebcb0948a ]
+
+kcm->rx_psock can be read locklessly in kcm_rfree().
+Annotate the read and writes accordingly.
+
+syzbot reported:
+
+BUG: KCSAN: data-race in kcm_rcv_strparser / kcm_rfree
+
+write to 0xffff88810784e3d0 of 1 bytes by task 1823 on cpu 1:
+reserve_rx_kcm net/kcm/kcmsock.c:283 [inline]
+kcm_rcv_strparser+0x250/0x3a0 net/kcm/kcmsock.c:363
+__strp_recv+0x64c/0xd20 net/strparser/strparser.c:301
+strp_recv+0x6d/0x80 net/strparser/strparser.c:335
+tcp_read_sock+0x13e/0x5a0 net/ipv4/tcp.c:1703
+strp_read_sock net/strparser/strparser.c:358 [inline]
+do_strp_work net/strparser/strparser.c:406 [inline]
+strp_work+0xe8/0x180 net/strparser/strparser.c:415
+process_one_work+0x3d3/0x720 kernel/workqueue.c:2289
+worker_thread+0x618/0xa70 kernel/workqueue.c:2436
+kthread+0x1a9/0x1e0 kernel/kthread.c:376
+ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:306
+
+read to 0xffff88810784e3d0 of 1 bytes by task 17869 on cpu 0:
+kcm_rfree+0x121/0x220 net/kcm/kcmsock.c:181
+skb_release_head_state+0x8e/0x160 net/core/skbuff.c:841
+skb_release_all net/core/skbuff.c:852 [inline]
+__kfree_skb net/core/skbuff.c:868 [inline]
+kfree_skb_reason+0x5c/0x260 net/core/skbuff.c:891
+kfree_skb include/linux/skbuff.h:1216 [inline]
+kcm_recvmsg+0x226/0x2b0 net/kcm/kcmsock.c:1161
+____sys_recvmsg+0x16c/0x2e0
+___sys_recvmsg net/socket.c:2743 [inline]
+do_recvmmsg+0x2f1/0x710 net/socket.c:2837
+__sys_recvmmsg net/socket.c:2916 [inline]
+__do_sys_recvmmsg net/socket.c:2939 [inline]
+__se_sys_recvmmsg net/socket.c:2932 [inline]
+__x64_sys_recvmmsg+0xde/0x160 net/socket.c:2932
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x2b/0x70 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+value changed: 0x01 -> 0x00
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 17869 Comm: syz-executor.2 Not tainted 6.1.0-rc1-syzkaller-00010-gbb1a1146467a-dirty #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/22/2022
+
+Fixes: ab7ac4eb9832 ("kcm: Kernel Connection Multiplexor module")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/kcm/kcmsock.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index a1dc2b74b52f..6b362b362f79 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -161,7 +161,8 @@ static void kcm_rcv_ready(struct kcm_sock *kcm)
+ /* Buffer limit is okay now, add to ready list */
+ list_add_tail(&kcm->wait_rx_list,
+ &kcm->mux->kcm_rx_waiters);
+- kcm->rx_wait = true;
++ /* paired with lockless reads in kcm_rfree() */
++ WRITE_ONCE(kcm->rx_wait, true);
+ }
+
+ static void kcm_rfree(struct sk_buff *skb)
+@@ -177,7 +178,7 @@ static void kcm_rfree(struct sk_buff *skb)
+ /* For reading rx_wait and rx_psock without holding lock */
+ smp_mb__after_atomic();
+
+- if (!kcm->rx_wait && !READ_ONCE(kcm->rx_psock) &&
++ if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
+ sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
+ spin_lock_bh(&mux->rx_lock);
+ kcm_rcv_ready(kcm);
+@@ -236,7 +237,8 @@ static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
+ if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
+ /* Should mean socket buffer full */
+ list_del(&kcm->wait_rx_list);
+- kcm->rx_wait = false;
++ /* paired with lockless reads in kcm_rfree() */
++ WRITE_ONCE(kcm->rx_wait, false);
+
+ /* Commit rx_wait to read in kcm_free */
+ smp_wmb();
+@@ -279,7 +281,8 @@ static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
+ kcm = list_first_entry(&mux->kcm_rx_waiters,
+ struct kcm_sock, wait_rx_list);
+ list_del(&kcm->wait_rx_list);
+- kcm->rx_wait = false;
++ /* paired with lockless reads in kcm_rfree() */
++ WRITE_ONCE(kcm->rx_wait, false);
+
+ psock->rx_kcm = kcm;
+ /* paired with lockless reads in kcm_rfree() */
+@@ -1241,7 +1244,8 @@ static void kcm_recv_disable(struct kcm_sock *kcm)
+ if (!kcm->rx_psock) {
+ if (kcm->rx_wait) {
+ list_del(&kcm->wait_rx_list);
+- kcm->rx_wait = false;
++ /* paired with lockless reads in kcm_rfree() */
++ WRITE_ONCE(kcm->rx_wait, false);
+ }
+
+ requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
+@@ -1794,7 +1798,8 @@ static void kcm_done(struct kcm_sock *kcm)
+
+ if (kcm->rx_wait) {
+ list_del(&kcm->wait_rx_list);
+- kcm->rx_wait = false;
++ /* paired with lockless reads in kcm_rfree() */
++ WRITE_ONCE(kcm->rx_wait, false);
+ }
+ /* Move any pending receive messages to other kcm sockets */
+ requeue_rx_msgs(mux, &sk->sk_receive_queue);
+--
+2.35.1
+
--- /dev/null
+From a3604a7bfe0761bc964a24cae2ea220782106877 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Apr 2021 17:40:12 +0200
+Subject: media: atomisp: pci: reposition braces as per coding style
+
+From: Deepak R Varma <drv@mailo.com>
+
+[ Upstream commit c27479d762de4eda72ba9e0aa150d439970f2077 ]
+
+Misplaced braces makes it difficult to follow the code easily. This also
+goes against the code style guidelines. This resolved following checkpatch
+complaints:
+
+ERROR: open brace '{' following function definitions go on the next line
+ERROR: that open brace { should be on the previous line
+
+Link: https://lore.kernel.org/linux-media/YIwk3KbVGRPJwKa4@dU2104
+
+[mchehab: dropped a hunk with a merge conflict]
+Cc: linux-media@vger.kernel.org, devel@driverdev.osuosl.org, linux-kernel@vger.kernel.org, drv@mailo.com # X-LSpam-Score: -7.3 (-------)
+Signed-off-by: Deepak R Varma <drv@mailo.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Stable-dep-of: 3ad290194bb0 ("media: atomisp: prevent integer overflow in sh_css_set_black_frame()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../staging/media/atomisp/pci/sh_css_mipi.c | 69 +++----
+ .../staging/media/atomisp/pci/sh_css_params.c | 171 ++++++++----------
+ drivers/staging/media/atomisp/pci/sh_css_sp.c | 108 +++++------
+ .../media/atomisp/pci/sh_css_version.c | 3 +-
+ 4 files changed, 155 insertions(+), 196 deletions(-)
+
+diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
+index 651eda0469b2..71f4ee807644 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css_mipi.c
++++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
+@@ -102,7 +102,8 @@ ia_css_mipi_frame_calculate_size(const unsigned int width,
+ const enum atomisp_input_format format,
+ const bool hasSOLandEOL,
+ const unsigned int embedded_data_size_words,
+- unsigned int *size_mem_words) {
++ unsigned int *size_mem_words)
++{
+ int err = 0;
+
+ unsigned int bits_per_pixel = 0;
+@@ -129,8 +130,7 @@ ia_css_mipi_frame_calculate_size(const unsigned int width,
+ IA_CSS_ENTER("padded_width=%d, height=%d, format=%d, hasSOLandEOL=%d, embedded_data_size_words=%d\n",
+ width_padded, height, format, hasSOLandEOL, embedded_data_size_words);
+
+- switch (format)
+- {
++ switch (format) {
+ case ATOMISP_INPUT_FORMAT_RAW_6: /* 4p, 3B, 24bits */
+ bits_per_pixel = 6;
+ break;
+@@ -189,12 +189,10 @@ ia_css_mipi_frame_calculate_size(const unsigned int width,
+ /* Even lines for YUV420 formats are double in bits_per_pixel. */
+ if (format == ATOMISP_INPUT_FORMAT_YUV420_8
+ || format == ATOMISP_INPUT_FORMAT_YUV420_10
+- || format == ATOMISP_INPUT_FORMAT_YUV420_16)
+- {
++ || format == ATOMISP_INPUT_FORMAT_YUV420_16) {
+ even_line_bytes = (width_padded * 2 * bits_per_pixel + 7) >>
+ 3; /* ceil ( bits per line / 8) */
+- } else
+- {
++ } else {
+ even_line_bytes = odd_line_bytes;
+ }
+
+@@ -247,7 +245,8 @@ ia_css_mipi_frame_calculate_size(const unsigned int width,
+ #if !defined(ISP2401)
+ int
+ ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
+- const unsigned int size_mem_words) {
++ const unsigned int size_mem_words)
++{
+ u32 idx;
+
+ int err = -EBUSY;
+@@ -257,11 +256,9 @@ ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
+
+ for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT &&
+ my_css.mipi_sizes_for_check[port][idx] != 0;
+- idx++) /* do nothing */
+- {
++ idx++) { /* do nothing */
+ }
+- if (idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT)
+- {
++ if (idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT) {
+ my_css.mipi_sizes_for_check[port][idx] = size_mem_words;
+ err = 0;
+ }
+@@ -282,7 +279,8 @@ mipi_init(void)
+ int
+ calculate_mipi_buff_size(
+ struct ia_css_stream_config *stream_cfg,
+- unsigned int *size_mem_words) {
++ unsigned int *size_mem_words)
++{
+ #if !defined(ISP2401)
+ int err = -EINVAL;
+ (void)stream_cfg;
+@@ -357,12 +355,10 @@ calculate_mipi_buff_size(
+
+ /* Even lines for YUV420 formats are double in bits_per_pixel. */
+ if (format == ATOMISP_INPUT_FORMAT_YUV420_8
+- || format == ATOMISP_INPUT_FORMAT_YUV420_10)
+- {
++ || format == ATOMISP_INPUT_FORMAT_YUV420_10) {
+ even_line_bytes = (width_padded * 2 * bits_per_pixel + 7) >>
+ 3; /* ceil ( bits per line / 8) */
+- } else
+- {
++ } else {
+ even_line_bytes = odd_line_bytes;
+ }
+
+@@ -404,7 +400,8 @@ static bool buffers_needed(struct ia_css_pipe *pipe)
+
+ int
+ allocate_mipi_frames(struct ia_css_pipe *pipe,
+- struct ia_css_stream_info *info) {
++ struct ia_css_stream_info *info)
++{
+ int err = -EINVAL;
+ unsigned int port;
+
+@@ -413,8 +410,7 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
+
+ assert(pipe);
+ assert(pipe->stream);
+- if ((!pipe) || (!pipe->stream))
+- {
++ if ((!pipe) || (!pipe->stream)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: pipe or stream is null.\n",
+ pipe);
+@@ -422,8 +418,7 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
+ }
+
+ #ifdef ISP2401
+- if (pipe->stream->config.online)
+- {
++ if (pipe->stream->config.online) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: no buffers needed for 2401 pipe mode.\n",
+ pipe);
+@@ -463,8 +458,7 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
+ #endif
+
+ #if !defined(ISP2401)
+- if (ref_count_mipi_allocation[port] != 0)
+- {
++ if (ref_count_mipi_allocation[port] != 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: already allocated for this port (port=%d).\n",
+ pipe, port);
+@@ -476,8 +470,7 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
+ * TODO AM: Once that is changed (removed) this code should be removed as well.
+ * In that case only 2400 related code should remain.
+ */
+- if (ref_count_mipi_allocation[port] != 0)
+- {
++ if (ref_count_mipi_allocation[port] != 0) {
+ ref_count_mipi_allocation[port]++;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) leave: nothing to do, already allocated for this port (port=%d).\n",
+@@ -495,8 +488,7 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
+ { /* limit the scope of i,j */
+ unsigned int i, j;
+
+- for (i = 0; i < my_css.num_mipi_frames[port]; i++)
+- {
++ for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
+ /* free previous frame */
+ if (my_css.mipi_frames[port][i]) {
+ ia_css_frame_free(my_css.mipi_frames[port][i]);
+@@ -549,7 +541,8 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
+ }
+
+ int
+-free_mipi_frames(struct ia_css_pipe *pipe) {
++free_mipi_frames(struct ia_css_pipe *pipe)
++{
+ int err = -EINVAL;
+ unsigned int port;
+
+@@ -557,8 +550,7 @@ free_mipi_frames(struct ia_css_pipe *pipe) {
+ "free_mipi_frames(%p) enter:\n", pipe);
+
+ /* assert(pipe != NULL); TEMP: TODO: Should be assert only. */
+- if (pipe)
+- {
++ if (pipe) {
+ assert(pipe->stream);
+ if ((!pipe) || (!pipe->stream)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+@@ -637,8 +629,7 @@ free_mipi_frames(struct ia_css_pipe *pipe) {
+ }
+ #endif
+ }
+- } else /* pipe ==NULL */
+- {
++ } else { /* pipe ==NULL */
+ /* AM TEMP: free-ing all mipi buffers just like a legacy code. */
+ for (port = CSI_PORT0_ID; port < N_CSI_PORTS; port++) {
+ unsigned int i;
+@@ -662,7 +653,8 @@ free_mipi_frames(struct ia_css_pipe *pipe) {
+ }
+
+ int
+-send_mipi_frames(struct ia_css_pipe *pipe) {
++send_mipi_frames(struct ia_css_pipe *pipe)
++{
+ int err = -EINVAL;
+ unsigned int i;
+ #ifndef ISP2401
+@@ -675,8 +667,7 @@ send_mipi_frames(struct ia_css_pipe *pipe) {
+
+ assert(pipe);
+ assert(pipe->stream);
+- if (!pipe || !pipe->stream)
+- {
++ if (!pipe || !pipe->stream) {
+ IA_CSS_ERROR("pipe or stream is null");
+ return -EINVAL;
+ }
+@@ -706,8 +697,7 @@ send_mipi_frames(struct ia_css_pipe *pipe) {
+ }
+
+ /* Hand-over the SP-internal mipi buffers */
+- for (i = 0; i < my_css.num_mipi_frames[port]; i++)
+- {
++ for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
+ /* Need to include the ofset for port. */
+ sh_css_update_host2sp_mipi_frame(port * NUM_MIPI_FRAMES_PER_STREAM + i,
+ my_css.mipi_frames[port][i]);
+@@ -720,8 +710,7 @@ send_mipi_frames(struct ia_css_pipe *pipe) {
+ * Send an event to inform the SP
+ * that all MIPI frames are passed.
+ **********************************/
+- if (!sh_css_sp_is_running())
+- {
++ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_ERROR("sp is not running");
+ return err;
+diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c
+index 8d6514c45eeb..b7b3fb416e2b 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css_params.c
++++ b/drivers/staging/media/atomisp/pci/sh_css_params.c
+@@ -813,15 +813,15 @@ convert_allocate_fpntbl(struct ia_css_isp_parameters *params)
+ }
+
+ static int
+-store_fpntbl(struct ia_css_isp_parameters *params, ia_css_ptr ptr) {
++store_fpntbl(struct ia_css_isp_parameters *params, ia_css_ptr ptr)
++{
+ struct ia_css_host_data *isp_data;
+
+ assert(params);
+ assert(ptr != mmgr_NULL);
+
+ isp_data = convert_allocate_fpntbl(params);
+- if (!isp_data)
+- {
++ if (!isp_data) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+ }
+@@ -894,7 +894,8 @@ ia_css_process_kernel(struct ia_css_stream *stream,
+
+ static int
+ sh_css_select_dp_10bpp_config(const struct ia_css_pipe *pipe,
+- bool *is_dp_10bpp) {
++ bool *is_dp_10bpp)
++{
+ int err = 0;
+ /* Currently we check if 10bpp DPC configuration is required based
+ * on the use case,i.e. if BDS and DPC is both enabled. The more cleaner
+@@ -903,12 +904,10 @@ sh_css_select_dp_10bpp_config(const struct ia_css_pipe *pipe,
+ * implementation. (This is because the configuration is set before a
+ * binary is selected, and the binary info is not available)
+ */
+- if ((!pipe) || (!is_dp_10bpp))
+- {
++ if ((!pipe) || (!is_dp_10bpp)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ err = -EINVAL;
+- } else
+- {
++ } else {
+ *is_dp_10bpp = false;
+
+ /* check if DPC is enabled from the host */
+@@ -936,7 +935,8 @@ sh_css_select_dp_10bpp_config(const struct ia_css_pipe *pipe,
+
+ int
+ sh_css_set_black_frame(struct ia_css_stream *stream,
+- const struct ia_css_frame *raw_black_frame) {
++ const struct ia_css_frame *raw_black_frame)
++{
+ struct ia_css_isp_parameters *params;
+ /* this function desperately needs to be moved to the ISP or SP such
+ * that it can use the DMA.
+@@ -957,13 +957,11 @@ sh_css_set_black_frame(struct ia_css_stream *stream,
+ IA_CSS_ENTER_PRIVATE("black_frame=%p", raw_black_frame);
+
+ if (params->fpn_config.data &&
+- (params->fpn_config.width != width || params->fpn_config.height != height))
+- {
++ (params->fpn_config.width != width || params->fpn_config.height != height)) {
+ kvfree(params->fpn_config.data);
+ params->fpn_config.data = NULL;
+ }
+- if (!params->fpn_config.data)
+- {
++ if (!params->fpn_config.data) {
+ params->fpn_config.data = kvmalloc(height * width *
+ sizeof(short), GFP_KERNEL);
+ if (!params->fpn_config.data) {
+@@ -977,8 +975,7 @@ sh_css_set_black_frame(struct ia_css_stream *stream,
+ }
+
+ /* store raw to fpntbl */
+- for (y = 0; y < height; y++)
+- {
++ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += (ISP_VEC_NELEMS * 2)) {
+ int ofs = y * width + x;
+
+@@ -1181,7 +1178,8 @@ sh_css_enable_pipeline(const struct ia_css_binary *binary)
+ static int
+ ia_css_process_zoom_and_motion(
+ struct ia_css_isp_parameters *params,
+- const struct ia_css_pipeline_stage *first_stage) {
++ const struct ia_css_pipeline_stage *first_stage)
++{
+ /* first_stage can be NULL */
+ const struct ia_css_pipeline_stage *stage;
+ int err = 0;
+@@ -1195,8 +1193,7 @@ ia_css_process_zoom_and_motion(
+ IA_CSS_ENTER_PRIVATE("");
+
+ /* Go through all stages to udate uds and cropping */
+- for (stage = first_stage; stage; stage = stage->next)
+- {
++ for (stage = first_stage; stage; stage = stage->next) {
+ struct ia_css_binary *binary;
+ /* note: the var below is made static as it is quite large;
+ if it is not static it ends up on the stack which could
+@@ -1582,7 +1579,8 @@ ia_css_isp_3a_statistics_map_allocate(
+
+ int
+ ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats,
+- const struct ia_css_isp_3a_statistics *isp_stats) {
++ const struct ia_css_isp_3a_statistics *isp_stats)
++{
+ struct ia_css_isp_3a_statistics_map *map;
+ int ret = 0;
+
+@@ -1592,13 +1590,11 @@ ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats,
+ assert(isp_stats);
+
+ map = ia_css_isp_3a_statistics_map_allocate(isp_stats, NULL);
+- if (map)
+- {
++ if (map) {
+ hmm_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
+ ia_css_translate_3a_statistics(host_stats, map);
+ ia_css_isp_3a_statistics_map_free(map);
+- } else
+- {
++ } else {
+ IA_CSS_ERROR("out of memory");
+ ret = -ENOMEM;
+ }
+@@ -1895,7 +1891,8 @@ sh_css_pipe_isp_config_get(struct ia_css_pipe *pipe)
+ int
+ ia_css_stream_set_isp_config(
+ struct ia_css_stream *stream,
+- const struct ia_css_isp_config *config) {
++ const struct ia_css_isp_config *config)
++{
+ return ia_css_stream_set_isp_config_on_pipe(stream, config, NULL);
+ }
+
+@@ -1903,7 +1900,8 @@ int
+ ia_css_stream_set_isp_config_on_pipe(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+- struct ia_css_pipe *pipe) {
++ struct ia_css_pipe *pipe)
++{
+ int err = 0;
+
+ if ((!stream) || (!config))
+@@ -1924,7 +1922,8 @@ ia_css_stream_set_isp_config_on_pipe(
+
+ int
+ ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
+- struct ia_css_isp_config *config) {
++ struct ia_css_isp_config *config)
++{
+ struct ia_css_pipe *pipe_in = pipe;
+ int err = 0;
+
+@@ -1949,7 +1948,8 @@ static int
+ sh_css_set_global_isp_config_on_pipe(
+ struct ia_css_pipe *curr_pipe,
+ const struct ia_css_isp_config *config,
+- struct ia_css_pipe *pipe) {
++ struct ia_css_pipe *pipe)
++{
+ int err = 0;
+ int err1 = 0;
+ int err2 = 0;
+@@ -1978,7 +1978,8 @@ static int
+ sh_css_set_per_frame_isp_config_on_pipe(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+- struct ia_css_pipe *pipe) {
++ struct ia_css_pipe *pipe)
++{
+ unsigned int i;
+ bool per_frame_config_created = false;
+ int err = 0;
+@@ -1992,8 +1993,7 @@ sh_css_set_per_frame_isp_config_on_pipe(
+
+ IA_CSS_ENTER_PRIVATE("stream=%p, config=%p, pipe=%p", stream, config, pipe);
+
+- if (!pipe)
+- {
++ if (!pipe) {
+ err = -EINVAL;
+ goto exit;
+ }
+@@ -2001,8 +2001,7 @@ sh_css_set_per_frame_isp_config_on_pipe(
+ /* create per-frame ISP params object with default values
+ * from stream->isp_params_configs if one doesn't already exist
+ */
+- if (!stream->per_frame_isp_params_configs)
+- {
++ if (!stream->per_frame_isp_params_configs) {
+ err = sh_css_create_isp_params(stream,
+ &stream->per_frame_isp_params_configs);
+ if (err)
+@@ -2013,15 +2012,13 @@ sh_css_set_per_frame_isp_config_on_pipe(
+ params = stream->per_frame_isp_params_configs;
+
+ /* update new ISP params object with the new config */
+- if (!sh_css_init_isp_params_from_global(stream, params, false, pipe))
+- {
++ if (!sh_css_init_isp_params_from_global(stream, params, false, pipe)) {
+ err1 = -EINVAL;
+ }
+
+ err2 = sh_css_init_isp_params_from_config(stream->pipes[0], params, config, pipe);
+
+- if (per_frame_config_created)
+- {
++ if (per_frame_config_created) {
+ ddr_ptrs = ¶ms->ddr_ptrs;
+ ddr_ptrs_size = ¶ms->ddr_ptrs_size;
+ /* create per pipe reference to general ddr_ptrs */
+@@ -2052,7 +2049,8 @@ static int
+ sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config,
+- struct ia_css_pipe *pipe_in) {
++ struct ia_css_pipe *pipe_in)
++{
+ int err = 0;
+ bool is_dp_10bpp = true;
+
+@@ -2097,8 +2095,7 @@ sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
+ }
+
+ if (0 ==
+- sh_css_select_dp_10bpp_config(pipe, &is_dp_10bpp))
+- {
++ sh_css_select_dp_10bpp_config(pipe, &is_dp_10bpp)) {
+ /* return an error when both DPC and BDS is enabled by the
+ * user. */
+ /* we do not exit from this point immediately to allow internal
+@@ -2106,8 +2103,7 @@ sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
+ if (is_dp_10bpp) {
+ err = -EINVAL;
+ }
+- } else
+- {
++ } else {
+ err = -EINVAL;
+ goto exit;
+ }
+@@ -2360,7 +2356,8 @@ static unsigned int g_param_buffer_dequeue_count;
+ static unsigned int g_param_buffer_enqueue_count;
+
+ int
+-ia_css_stream_isp_parameters_init(struct ia_css_stream *stream) {
++ia_css_stream_isp_parameters_init(struct ia_css_stream *stream)
++{
+ int err = 0;
+ unsigned int i;
+ struct sh_css_ddr_address_map *ddr_ptrs;
+@@ -2370,8 +2367,7 @@ ia_css_stream_isp_parameters_init(struct ia_css_stream *stream) {
+ assert(stream);
+ IA_CSS_ENTER_PRIVATE("void");
+
+- if (!stream)
+- {
++ if (!stream) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+@@ -2386,8 +2382,7 @@ ia_css_stream_isp_parameters_init(struct ia_css_stream *stream) {
+ goto ERR;
+
+ params = stream->isp_params_configs;
+- if (!sh_css_init_isp_params_from_global(stream, params, true, NULL))
+- {
++ if (!sh_css_init_isp_params_from_global(stream, params, true, NULL)) {
+ /* we do not return the error immediately to enable internal
+ * firmware feature testing */
+ err = -EINVAL;
+@@ -2397,8 +2392,7 @@ ia_css_stream_isp_parameters_init(struct ia_css_stream *stream) {
+ ddr_ptrs_size = ¶ms->ddr_ptrs_size;
+
+ /* create per pipe reference to general ddr_ptrs */
+- for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+- {
++ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ ref_sh_css_ddr_address_map(ddr_ptrs, ¶ms->pipe_ddr_ptrs[i]);
+ params->pipe_ddr_ptrs_size[i] = *ddr_ptrs_size;
+ }
+@@ -2432,7 +2426,8 @@ ia_css_set_sdis2_config(
+
+ static int
+ sh_css_create_isp_params(struct ia_css_stream *stream,
+- struct ia_css_isp_parameters **isp_params_out) {
++ struct ia_css_isp_parameters **isp_params_out)
++{
+ bool succ = true;
+ unsigned int i;
+ struct sh_css_ddr_address_map *ddr_ptrs;
+@@ -2442,23 +2437,20 @@ sh_css_create_isp_params(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params =
+ kvmalloc(sizeof(struct ia_css_isp_parameters), GFP_KERNEL);
+
+- if (!params)
+- {
++ if (!params) {
+ *isp_params_out = NULL;
+ err = -ENOMEM;
+ IA_CSS_ERROR("%s:%d error: cannot allocate memory", __FILE__, __LINE__);
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+- } else
+- {
++ } else {
+ memset(params, 0, sizeof(struct ia_css_isp_parameters));
+ }
+
+ ddr_ptrs = ¶ms->ddr_ptrs;
+ ddr_ptrs_size = ¶ms->ddr_ptrs_size;
+
+- for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+- {
++ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ memset(¶ms->pipe_ddr_ptrs[i], 0,
+ sizeof(params->pipe_ddr_ptrs[i]));
+ memset(¶ms->pipe_ddr_ptrs_size[i], 0,
+@@ -2719,7 +2711,8 @@ sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
+ }
+
+ int
+-sh_css_params_init(void) {
++sh_css_params_init(void)
++{
+ int i, p;
+
+ IA_CSS_ENTER_PRIVATE("void");
+@@ -2728,8 +2721,7 @@ sh_css_params_init(void) {
+ g_param_buffer_dequeue_count = 0;
+ g_param_buffer_enqueue_count = 0;
+
+- for (p = 0; p < IA_CSS_PIPE_ID_NUM; p++)
+- {
++ for (p = 0; p < IA_CSS_PIPE_ID_NUM; p++) {
+ for (i = 0; i < SH_CSS_MAX_STAGES; i++) {
+ xmem_sp_stage_ptrs[p][i] =
+ ia_css_refcount_increment(-1,
+@@ -2767,8 +2759,7 @@ sh_css_params_init(void) {
+ ATOMISP_MAP_FLAG_CLEARED));
+
+ if ((sp_ddr_ptrs == mmgr_NULL) ||
+- (xmem_sp_group_ptrs == mmgr_NULL))
+- {
++ (xmem_sp_group_ptrs == mmgr_NULL)) {
+ ia_css_uninit();
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+@@ -3100,14 +3091,14 @@ store_morph_plane(
+ unsigned int width,
+ unsigned int height,
+ ia_css_ptr dest,
+- unsigned int aligned_width) {
++ unsigned int aligned_width)
++{
+ struct ia_css_host_data *isp_data;
+
+ assert(dest != mmgr_NULL);
+
+ isp_data = convert_allocate_morph_plane(data, width, height, aligned_width);
+- if (!isp_data)
+- {
++ if (!isp_data) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+ }
+@@ -3227,7 +3218,8 @@ int
+ sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
+ struct ia_css_isp_parameters *params,
+ bool commit,
+- struct ia_css_pipe *pipe_in) {
++ struct ia_css_pipe *pipe_in)
++{
+ int err = 0;
+ ia_css_ptr cpy;
+ int i;
+@@ -3244,15 +3236,13 @@ sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
+ raw_bit_depth = ia_css_stream_input_format_bits_per_pixel(curr_pipe->stream);
+
+ /* now make the map available to the sp */
+- if (!commit)
+- {
++ if (!commit) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* enqueue a copies of the mem_map to
+ the designated pipelines */
+- for (i = 0; i < curr_pipe->stream->num_pipes; i++)
+- {
++ for (i = 0; i < curr_pipe->stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe;
+ struct sh_css_ddr_address_map *cur_map;
+ struct sh_css_ddr_address_map_size *cur_map_size;
+@@ -3436,7 +3426,8 @@ sh_css_params_write_to_ddr_internal(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_pipeline_stage *stage,
+ struct sh_css_ddr_address_map *ddr_map,
+- struct sh_css_ddr_address_map_size *ddr_map_size) {
++ struct sh_css_ddr_address_map_size *ddr_map_size)
++{
+ int err;
+ const struct ia_css_binary *binary;
+
+@@ -3458,8 +3449,7 @@ sh_css_params_write_to_ddr_internal(
+
+ stage_num = stage->stage_num;
+
+- if (binary->info->sp.enable.fpnr)
+- {
++ if (binary->info->sp.enable.fpnr) {
+ buff_realloced = reallocate_buffer(&ddr_map->fpn_tbl,
+ &ddr_map_size->fpn_tbl,
+ (size_t)(FPNTBL_BYTES(binary)),
+@@ -3480,8 +3470,7 @@ sh_css_params_write_to_ddr_internal(
+ }
+ }
+
+- if (binary->info->sp.enable.sc)
+- {
++ if (binary->info->sp.enable.sc) {
+ u32 enable_conv;
+ size_t bytes;
+
+@@ -3583,8 +3572,7 @@ sh_css_params_write_to_ddr_internal(
+ * DPC kernel. The code below sets the pipe specific configuration to
+ * individual binaries. */
+ if (IS_ISP2401 &&
+- params->pipe_dpc_config_changed[pipe_id] && binary->info->sp.enable.dpc)
+- {
++ params->pipe_dpc_config_changed[pipe_id] && binary->info->sp.enable.dpc) {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.dp.size;
+
+@@ -3602,8 +3590,7 @@ sh_css_params_write_to_ddr_internal(
+ }
+ }
+
+- if (params->config_changed[IA_CSS_MACC_ID] && binary->info->sp.enable.macc)
+- {
++ if (params->config_changed[IA_CSS_MACC_ID] && binary->info->sp.enable.macc) {
+ unsigned int i, j, idx;
+ unsigned int idx_map[] = {
+ 0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8
+@@ -3652,8 +3639,7 @@ sh_css_params_write_to_ddr_internal(
+ sizeof(converted_macc_table.data));
+ }
+
+- if (binary->info->sp.enable.dvs_6axis)
+- {
++ if (binary->info->sp.enable.dvs_6axis) {
+ /* because UV is packed into the Y plane, calc total
+ * YYU size = /2 gives size of UV-only,
+ * total YYU size = UV-only * 3.
+@@ -3711,8 +3697,7 @@ sh_css_params_write_to_ddr_internal(
+ }
+ }
+
+- if (binary->info->sp.enable.ca_gdc)
+- {
++ if (binary->info->sp.enable.ca_gdc) {
+ unsigned int i;
+ ia_css_ptr *virt_addr_tetra_x[
+
+@@ -3817,8 +3802,7 @@ sh_css_params_write_to_ddr_internal(
+ }
+
+ /* After special cases like SC, FPN since they may change parameters */
+- for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++)
+- {
++ for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++) {
+ const struct ia_css_isp_data *isp_data =
+ ia_css_isp_param_get_isp_mem_init(&binary->info->sp.mem_initializers,
+ IA_CSS_PARAM_CLASS_PARAM, mem);
+@@ -4031,7 +4015,8 @@ static int write_ia_css_isp_parameter_set_info_to_ddr(
+
+ static int
+ free_ia_css_isp_parameter_set_info(
+- ia_css_ptr ptr) {
++ ia_css_ptr ptr)
++{
+ int err = 0;
+ struct ia_css_isp_parameter_set_info isp_params_info;
+ unsigned int i;
+@@ -4040,8 +4025,7 @@ free_ia_css_isp_parameter_set_info(
+ IA_CSS_ENTER_PRIVATE("ptr = %u", ptr);
+
+ /* sanity check - ptr must be valid */
+- if (!ia_css_refcount_is_valid(ptr))
+- {
++ if (!ia_css_refcount_is_valid(ptr)) {
+ IA_CSS_ERROR("%s: IA_CSS_REFCOUNT_PARAM_SET_POOL(0x%x) invalid arg", __func__,
+ ptr);
+ err = -EINVAL;
+@@ -4052,8 +4036,7 @@ free_ia_css_isp_parameter_set_info(
+ hmm_load(ptr, &isp_params_info.mem_map, sizeof(struct sh_css_ddr_address_map));
+ /* copy map using size info */
+ for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size) /
+- sizeof(size_t)); i++)
+- {
++ sizeof(size_t)); i++) {
+ if (addrs[i] == mmgr_NULL)
+ continue;
+
+@@ -4260,7 +4243,8 @@ sh_css_update_uds_and_crop_info_based_on_zoom_region(
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+ struct ia_css_resolution pipe_in_res,
+- bool enable_zoom) {
++ bool enable_zoom)
++{
+ unsigned int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
+ int err = 0;
+ /* Note:
+@@ -4291,19 +4275,16 @@ sh_css_update_uds_and_crop_info_based_on_zoom_region(
+ if ((x0 > x1) || (y0 > y1) || (x1 > pipe_in_res.width) || (y1 > pipe_in_res.height))
+ return -EINVAL;
+
+- if (!enable_zoom)
+- {
++ if (!enable_zoom) {
+ uds->curr_dx = HRT_GDC_N;
+ uds->curr_dy = HRT_GDC_N;
+ }
+
+- if (info->enable.dvs_envelope)
+- {
++ if (info->enable.dvs_envelope) {
+ /* Zoom region is only supported by the UDS module on ISP
+ * 2 and higher. It is not supported in video mode on ISP 1 */
+ return -EINVAL;
+- } else
+- {
++ } else {
+ if (enable_zoom) {
+ /* A. Calculate dx/dy based on crop region using in_frame_info
+ * Scale the crop region if in_frame_info to the stage is not same as
+diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.c b/drivers/staging/media/atomisp/pci/sh_css_sp.c
+index 02f5a73b4096..a73e8ca1e225 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css_sp.c
++++ b/drivers/staging/media/atomisp/pci/sh_css_sp.c
+@@ -535,12 +535,12 @@ sh_css_copy_frame_to_spframe(struct ia_css_frame_sp *sp_frame_out,
+ }
+
+ static int
+-set_input_frame_buffer(const struct ia_css_frame *frame) {
++set_input_frame_buffer(const struct ia_css_frame *frame)
++{
+ if (!frame)
+ return -EINVAL;
+
+- switch (frame->info.format)
+- {
++ switch (frame->info.format) {
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+@@ -566,12 +566,12 @@ set_input_frame_buffer(const struct ia_css_frame *frame) {
+
+ static int
+ set_output_frame_buffer(const struct ia_css_frame *frame,
+- unsigned int idx) {
++ unsigned int idx)
++{
+ if (!frame)
+ return -EINVAL;
+
+- switch (frame->info.format)
+- {
++ switch (frame->info.format) {
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ case IA_CSS_FRAME_FORMAT_YUV444:
+@@ -607,12 +607,12 @@ set_output_frame_buffer(const struct ia_css_frame *frame,
+ }
+
+ static int
+-set_view_finder_buffer(const struct ia_css_frame *frame) {
++set_view_finder_buffer(const struct ia_css_frame *frame)
++{
+ if (!frame)
+ return -EINVAL;
+
+- switch (frame->info.format)
+- {
++ switch (frame->info.format) {
+ /* the dual output pin */
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+@@ -732,7 +732,8 @@ sh_css_sp_set_disable_continuous_viewfinder(bool flag)
+ }
+
+ static int
+-sh_css_sp_write_frame_pointers(const struct sh_css_binary_args *args) {
++sh_css_sp_write_frame_pointers(const struct sh_css_binary_args *args)
++{
+ int err = 0;
+ int i;
+
+@@ -742,8 +743,7 @@ sh_css_sp_write_frame_pointers(const struct sh_css_binary_args *args) {
+ err = set_input_frame_buffer(args->in_frame);
+ if (!err && args->out_vf_frame)
+ err = set_view_finder_buffer(args->out_vf_frame);
+- for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+- {
++ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (!err && args->out_frame[i])
+ err = set_output_frame_buffer(args->out_frame[i], i);
+ }
+@@ -786,7 +786,8 @@ sh_css_stage_write_binary_info(struct ia_css_binary_info *info)
+ }
+
+ static int
+-copy_isp_mem_if_to_ddr(struct ia_css_binary *binary) {
++copy_isp_mem_if_to_ddr(struct ia_css_binary *binary)
++{
+ int err;
+
+ err = ia_css_isp_param_copy_isp_mem_if_to_ddr(
+@@ -817,7 +818,8 @@ configure_isp_from_args(
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args,
+ bool two_ppc,
+- bool deinterleaved) {
++ bool deinterleaved)
++{
+ ia_css_fpn_configure(binary, &binary->in_frame_info);
+ ia_css_crop_configure(binary, &args->delay_frames[0]->info);
+ ia_css_qplane_configure(pipeline, binary, &binary->in_frame_info);
+@@ -896,7 +898,8 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
+ bool xnr,
+ const struct ia_css_isp_param_css_segments *isp_mem_if,
+ unsigned int if_config_index,
+- bool two_ppc) {
++ bool two_ppc)
++{
+ const struct ia_css_binary_xinfo *xinfo;
+ const struct ia_css_binary_info *info;
+ int err = 0;
+@@ -928,8 +931,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+
+- if (!info)
+- {
++ if (!info) {
+ sh_css_sp_group.pipe[thread_id].sp_stage_addr[stage] = mmgr_NULL;
+ return 0;
+ }
+@@ -961,8 +963,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
+
+ ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.in.info,
+ &binary->in_frame_info);
+- for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+- {
++ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.out[i].info,
+ &binary->out_frame_info[i]);
+ }
+@@ -996,16 +997,14 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
+
+ err = sh_css_sp_write_frame_pointers(args);
+ /* TODO: move it to a better place */
+- if (binary->info->sp.enable.s3a)
+- {
++ if (binary->info->sp.enable.s3a) {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_3A_STATISTICS, thread_id,
+ &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.s3a_buf, queue_id,
+ mmgr_EXCEPTION,
+ IA_CSS_BUFFER_TYPE_3A_STATISTICS);
+ }
+- if (binary->info->sp.enable.dis)
+- {
++ if (binary->info->sp.enable.dis) {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_DIS_STATISTICS, thread_id,
+ &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.dvs_buf, queue_id,
+@@ -1046,8 +1045,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
+ * the original out res. for video pipe, it has two output pins --- out and
+ * vf_out, so it can keep these two resolutions already. */
+ if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW &&
+- (binary->vf_downscale_log2 > 0))
+- {
++ (binary->vf_downscale_log2 > 0)) {
+ /* TODO: Remove this after preview output decimation is fixed
+ * by configuring out&vf info fiels properly */
+ sh_css_sp_stage.frames.out[0].info.padded_width
+@@ -1069,7 +1067,8 @@ sp_init_stage(struct ia_css_pipeline_stage *stage,
+ unsigned int pipe_num,
+ bool xnr,
+ unsigned int if_config_index,
+- bool two_ppc) {
++ bool two_ppc)
++{
+ struct ia_css_binary *binary;
+ const struct ia_css_fw_info *firmware;
+ const struct sh_css_binary_args *args;
+@@ -1105,14 +1104,12 @@ sp_init_stage(struct ia_css_pipeline_stage *stage,
+ args = &stage->args;
+ stage_num = stage->stage_num;
+
+- if (binary)
+- {
++ if (binary) {
+ info = binary->info;
+ binary_name = (const char *)(info->blob->name);
+ blob_info = &info->blob->header.blob;
+ ia_css_init_memory_interface(mem_if, &binary->mem_params, &binary->css_params);
+- } else if (firmware)
+- {
++ } else if (firmware) {
+ const struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
+
+ if (args->out_frame[0])
+@@ -1133,8 +1130,7 @@ sp_init_stage(struct ia_css_pipeline_stage *stage,
+ binary_name = IA_CSS_EXT_ISP_PROG_NAME(firmware);
+ blob_info = &firmware->blob;
+ mem_if = (struct ia_css_isp_param_css_segments *)&firmware->mem_initializers;
+- } else
+- {
++ } else {
+ /* SP stage */
+ assert(stage->sp_func != IA_CSS_PIPELINE_NO_FUNC);
+ /* binary and blob_info are now NULL.
+@@ -1205,7 +1201,8 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+ *internal_frame_origin_bqs_on_sctbl, /* Origin of internal frame
+ positioned on shading table at shading correction in ISP. */
+ const struct ia_css_isp_parameters *params
+- ) {
++ )
++{
+ /* Get first stage */
+ struct ia_css_pipeline_stage *stage = NULL;
+ struct ia_css_binary *first_binary = NULL;
+@@ -1223,17 +1220,14 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+ first_binary = me->stages->binary;
+
+ if (input_mode == IA_CSS_INPUT_MODE_SENSOR ||
+- input_mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+- {
++ input_mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ assert(port_id < N_MIPI_PORT_ID);
+ if (port_id >= N_MIPI_PORT_ID) /* should not happen but KW does not know */
+ return; /* we should be able to return an error */
+ if_config_index = (uint8_t)(port_id - MIPI_PORT0_ID);
+- } else if (input_mode == IA_CSS_INPUT_MODE_MEMORY)
+- {
++ } else if (input_mode == IA_CSS_INPUT_MODE_MEMORY) {
+ if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
+- } else
+- {
++ } else {
+ if_config_index = 0x0;
+ }
+
+@@ -1241,15 +1235,13 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+ memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline));
+
+ /* Count stages */
+- for (stage = me->stages, num = 0; stage; stage = stage->next, num++)
+- {
++ for (stage = me->stages, num = 0; stage; stage = stage->next, num++) {
+ stage->stage_num = num;
+ ia_css_debug_pipe_graph_dump_stage(stage, id);
+ }
+ me->num_stages = num;
+
+- if (first_binary)
+- {
++ if (first_binary) {
+ /* Init pipeline data */
+ sh_css_sp_init_group(two_ppc, first_binary->input_format,
+ offline, if_config_index);
+@@ -1277,8 +1269,7 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+
+ /* TODO: next indicates from which queues parameters need to be
+ sampled, needs checking/improvement */
+- if (ia_css_pipeline_uses_params(me))
+- {
++ if (ia_css_pipeline_uses_params(me)) {
+ sh_css_sp_group.pipe[thread_id].pipe_config =
+ SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << thread_id;
+ }
+@@ -1292,15 +1283,13 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+
+ pipe = find_pipe_by_num(pipe_num);
+ assert(pipe);
+- if (!pipe)
+- {
++ if (!pipe) {
+ return;
+ }
+ sh_css_sp_group.pipe[thread_id].scaler_pp_lut = sh_css_pipe_get_pp_gdc_lut(pipe);
+
+ #if defined(SH_CSS_ENABLE_METADATA)
+- if (md_info && md_info->size > 0)
+- {
++ if (md_info && md_info->size > 0) {
+ sh_css_sp_group.pipe[thread_id].metadata.width = md_info->resolution.width;
+ sh_css_sp_group.pipe[thread_id].metadata.height = md_info->resolution.height;
+ sh_css_sp_group.pipe[thread_id].metadata.stride = md_info->stride;
+@@ -1316,8 +1305,7 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+
+ #if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ sh_css_sp_group.pipe[thread_id].output_frame_queue_id = (uint32_t)SH_CSS_INVALID_QUEUE_ID;
+- if (pipe_id != IA_CSS_PIPE_ID_COPY)
+- {
++ if (pipe_id != IA_CSS_PIPE_ID_COPY) {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id,
+ (enum sh_css_queue_id *)(
+ &sh_css_sp_group.pipe[thread_id].output_frame_queue_id));
+@@ -1329,14 +1317,12 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+ * the parameters are passed to the isp for the shading table centering.
+ */
+ if (internal_frame_origin_bqs_on_sctbl &&
+- params && params->shading_settings.enable_shading_table_conversion == 0)
+- {
++ params && params->shading_settings.enable_shading_table_conversion == 0) {
+ sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_x_bqs_on_sctbl
+ = (uint32_t)internal_frame_origin_bqs_on_sctbl->x;
+ sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_y_bqs_on_sctbl
+ = (uint32_t)internal_frame_origin_bqs_on_sctbl->y;
+- } else
+- {
++ } else {
+ sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_x_bqs_on_sctbl =
+ 0;
+ sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_y_bqs_on_sctbl =
+@@ -1347,8 +1333,7 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+ IA_CSS_LOG("pipe_id %d port_config %08x",
+ pipe_id, sh_css_sp_group.pipe[thread_id].inout_port_config);
+
+- for (stage = me->stages, num = 0; stage; stage = stage->next, num++)
+- {
++ for (stage = me->stages, num = 0; stage; stage = stage->next, num++) {
+ sh_css_sp_group.pipe[thread_id].num_stages++;
+ if (is_sp_stage(stage)) {
+ sp_init_sp_stage(stage, pipe_num, two_ppc,
+@@ -1400,7 +1385,8 @@ bool sh_css_write_host2sp_command(enum host2sp_commands host2sp_command)
+ }
+
+ enum host2sp_commands
+-sh_css_read_host2sp_command(void) {
++sh_css_read_host2sp_command(void)
++{
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_command)
+ / sizeof(int);
+@@ -1586,7 +1572,8 @@ sh_css_event_init_irq_mask(void)
+ int
+ ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
+ unsigned int or_mask,
+- unsigned int and_mask) {
++ unsigned int and_mask)
++{
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset;
+ struct sh_css_event_irq_mask event_irq_mask;
+@@ -1625,7 +1612,8 @@ ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
+ int
+ ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe,
+ unsigned int *or_mask,
+- unsigned int *and_mask) {
++ unsigned int *and_mask)
++{
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset;
+ struct sh_css_event_irq_mask event_irq_mask;
+diff --git a/drivers/staging/media/atomisp/pci/sh_css_version.c b/drivers/staging/media/atomisp/pci/sh_css_version.c
+index fa6de61e4995..f5ff8ca66b50 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css_version.c
++++ b/drivers/staging/media/atomisp/pci/sh_css_version.c
+@@ -21,7 +21,8 @@
+ #include "sh_css_firmware.h"
+
+ int
+-ia_css_get_version(char *version, int max_size) {
++ia_css_get_version(char *version, int max_size)
++{
+ char *css_version;
+
+ if (!IS_ISP2401)
+--
+2.35.1
+
--- /dev/null
+From e50078b9c7a3fd8fc49124caa46fd861e875d31d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Sep 2022 07:20:09 +0200
+Subject: media: atomisp: prevent integer overflow in sh_css_set_black_frame()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 3ad290194bb06979367622e47357462836c1d3b4 ]
+
+The "height" and "width" values come from the user so the "height * width"
+multiplication can overflow.
+
+Link: https://lore.kernel.org/r/YxBBCRnm3mmvaiuR@kili
+
+Fixes: a49d25364dfb ("staging/atomisp: Add support for the Intel IPU v2")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/media/atomisp/pci/sh_css_params.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c
+index b7b3fb416e2b..92f65e453797 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css_params.c
++++ b/drivers/staging/media/atomisp/pci/sh_css_params.c
+@@ -962,8 +962,8 @@ sh_css_set_black_frame(struct ia_css_stream *stream,
+ params->fpn_config.data = NULL;
+ }
+ if (!params->fpn_config.data) {
+- params->fpn_config.data = kvmalloc(height * width *
+- sizeof(short), GFP_KERNEL);
++ params->fpn_config.data = kvmalloc(array3_size(height, width, sizeof(short)),
++ GFP_KERNEL);
+ if (!params->fpn_config.data) {
+ IA_CSS_ERROR("out of memory");
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+--
+2.35.1
+
--- /dev/null
+From a3a7e4fb37ae889d580e384fbe9f0152eda61eef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Oct 2022 09:00:34 +0100
+Subject: media: v4l2-dv-timings: add sanity checks for blanking values
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+[ Upstream commit 4b6d66a45ed34a15721cb9e11492fa1a24bc83df ]
+
+Add sanity checks to v4l2_valid_dv_timings() to ensure that the provided
+blanking values are reasonable.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Fixes: b18787ed1ce3 ([media] v4l2-dv-timings: add new helper module)
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/v4l2-core/v4l2-dv-timings.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
+index af48705c704f..003c32fed3f7 100644
+--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
+@@ -161,6 +161,20 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
+ (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
+ (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
+ return false;
++
++ /* sanity checks for the blanking timings */
++ if (!bt->interlaced &&
++ (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
++ return false;
++ if (bt->hfrontporch > 2 * bt->width ||
++ bt->hsync > 1024 || bt->hbackporch > 1024)
++ return false;
++ if (bt->vfrontporch > 4096 ||
++ bt->vsync > 128 || bt->vbackporch > 4096)
++ return false;
++ if (bt->interlaced && (bt->il_vfrontporch > 4096 ||
++ bt->il_vsync > 128 || bt->il_vbackporch > 4096))
++ return false;
+ return fnc == NULL || fnc(t, fnc_handle);
+ }
+ EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);
+--
+2.35.1
+
--- /dev/null
+From 605643b4a02a47ce06e14dbfb0da5067dfc0833a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Jul 2022 09:11:31 +0200
+Subject: media: v4l2: Fix v4l2_i2c_subdev_set_name function documentation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alexander Stein <alexander.stein@ew.tq-group.com>
+
+[ Upstream commit bb9ea2c31fa11b789ade4c3abcdda3c5370a76ab ]
+
+The doc says the I²C device's name is used if devname is NULL, but
+actually the I²C device driver's name is used.
+
+Fixes: 0658293012af ("media: v4l: subdev: Add a function to set an I²C sub-device's name")
+Signed-off-by: Alexander Stein <alexander.stein@ew.tq-group.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/media/v4l2-common.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
+index a3083529b698..2e53ee1c8db4 100644
+--- a/include/media/v4l2-common.h
++++ b/include/media/v4l2-common.h
+@@ -175,7 +175,8 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ *
+ * @sd: pointer to &struct v4l2_subdev
+ * @client: pointer to struct i2c_client
+- * @devname: the name of the device; if NULL, the I²C device's name will be used
++ * @devname: the name of the device; if NULL, the I²C device drivers's name
++ * will be used
+ * @postfix: sub-device specific string to put right after the I²C device name;
+ * may be NULL
+ */
+--
+2.35.1
+
--- /dev/null
+From 22d72db07c302a0931d8d8bd70cb34f06f7c5146 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Oct 2022 16:46:17 +0100
+Subject: media: videodev2.h: V4L2_DV_BT_BLANKING_HEIGHT should check
+ 'interlaced'
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+[ Upstream commit 8da7f0976b9071b528c545008de9d10cc81883b1 ]
+
+If it is a progressive (non-interlaced) format, then ignore the
+interlaced timing values.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Fixes: 7f68127fa11f ([media] videodev2.h: defines to calculate blanking and frame sizes)
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/uapi/linux/videodev2.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
+index 534eaa4d39bc..b28817c59fdf 100644
+--- a/include/uapi/linux/videodev2.h
++++ b/include/uapi/linux/videodev2.h
+@@ -1552,7 +1552,8 @@ struct v4l2_bt_timings {
+ ((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
+ #define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
+ ((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
+- (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
++ ((bt)->interlaced ? \
++ ((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0))
+ #define V4L2_DV_BT_FRAME_HEIGHT(bt) \
+ ((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
+
+--
+2.35.1
+
--- /dev/null
+From 02e007a3fe34ae0d00a9baea2efcc0915f0f3579 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Oct 2022 15:18:46 +0100
+Subject: media: vivid: dev->bitmap_cap wasn't freed in all cases
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+[ Upstream commit 1f65ea411cc7b6ff128d82a3493d7b5648054e6f ]
+
+Whenever the compose width/height values change, the dev->bitmap_cap
+vmalloc'ed array must be freed and dev->bitmap_cap set to NULL.
+
+This was done in some places, but not all. This is only an issue if
+overlay support is enabled and the bitmap clipping is used.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Fixes: ef834f7836ec ([media] vivid: add the video capture and output parts)
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../media/test-drivers/vivid/vivid-vid-cap.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+index d4e30cf64e5f..d493bd17481b 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -452,6 +452,12 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
+ tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
+ dev->crop_cap = dev->src_rect;
+ dev->crop_bounds_cap = dev->src_rect;
++ if (dev->bitmap_cap &&
++ (dev->compose_cap.width != dev->crop_cap.width ||
++ dev->compose_cap.height != dev->crop_cap.height)) {
++ vfree(dev->bitmap_cap);
++ dev->bitmap_cap = NULL;
++ }
+ dev->compose_cap = dev->crop_cap;
+ if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
+ dev->compose_cap.height /= 2;
+@@ -909,6 +915,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_rect *crop = &dev->crop_cap;
+ struct v4l2_rect *compose = &dev->compose_cap;
++ unsigned orig_compose_w = compose->width;
++ unsigned orig_compose_h = compose->height;
+ unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
+ int ret;
+
+@@ -1025,17 +1033,17 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
+ s->r.height /= factor;
+ }
+ v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
+- if (dev->bitmap_cap && (compose->width != s->r.width ||
+- compose->height != s->r.height)) {
+- vfree(dev->bitmap_cap);
+- dev->bitmap_cap = NULL;
+- }
+ *compose = s->r;
+ break;
+ default:
+ return -EINVAL;
+ }
+
++ if (dev->bitmap_cap && (compose->width != orig_compose_w ||
++ compose->height != orig_compose_h)) {
++ vfree(dev->bitmap_cap);
++ dev->bitmap_cap = NULL;
++ }
+ tpg_s_crop_compose(&dev->tpg, crop, compose);
+ return 0;
+ }
+--
+2.35.1
+
--- /dev/null
+From b093c91b09ff4645bcdecb0cbba2c49376883f6e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Oct 2022 15:32:28 +0100
+Subject: media: vivid: s_fbuf: add more sanity checks
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+[ Upstream commit f8bcaf714abfc94818dff8c0db84d750433984f4 ]
+
+VIDIOC_S_FBUF is by definition a scary ioctl, which is why only root
+can use it. But at least check if the framebuffer parameters match that
+of one of the framebuffer created by vivid, and reject anything else.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Fixes: ef834f7836ec ([media] vivid: add the video capture and output parts)
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/test-drivers/vivid/vivid-core.c | 22 +++++++++++++++++++
+ drivers/media/test-drivers/vivid/vivid-core.h | 2 ++
+ .../media/test-drivers/vivid/vivid-vid-cap.c | 9 +++++++-
+ 3 files changed, 32 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
+index 1e356dc65d31..f69c64e5a149 100644
+--- a/drivers/media/test-drivers/vivid/vivid-core.c
++++ b/drivers/media/test-drivers/vivid/vivid-core.c
+@@ -330,6 +330,28 @@ static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a
+ return vivid_vid_out_g_fbuf(file, fh, a);
+ }
+
++/*
++ * Only support the framebuffer of one of the vivid instances.
++ * Anything else is rejected.
++ */
++bool vivid_validate_fb(const struct v4l2_framebuffer *a)
++{
++ struct vivid_dev *dev;
++ int i;
++
++ for (i = 0; i < n_devs; i++) {
++ dev = vivid_devs[i];
++ if (!dev || !dev->video_pbase)
++ continue;
++ if ((unsigned long)a->base == dev->video_pbase &&
++ a->fmt.width <= dev->display_width &&
++ a->fmt.height <= dev->display_height &&
++ a->fmt.bytesperline <= dev->display_byte_stride)
++ return true;
++ }
++ return false;
++}
++
+ static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a)
+ {
+ struct video_device *vdev = video_devdata(file);
+diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h
+index 99e69b8f770f..6aa32c8e6fb5 100644
+--- a/drivers/media/test-drivers/vivid/vivid-core.h
++++ b/drivers/media/test-drivers/vivid/vivid-core.h
+@@ -609,4 +609,6 @@ static inline bool vivid_is_hdmi_out(const struct vivid_dev *dev)
+ return dev->output_type[dev->output] == HDMI;
+ }
+
++bool vivid_validate_fb(const struct v4l2_framebuffer *a);
++
+ #endif
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+index eadf28ab1e39..d4e30cf64e5f 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -1276,7 +1276,14 @@ int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
+ return -EINVAL;
+ if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
+ return -EINVAL;
+- if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
++ if (a->fmt.bytesperline > a->fmt.sizeimage / a->fmt.height)
++ return -EINVAL;
++
++ /*
++ * Only support the framebuffer of one of the vivid instances.
++ * Anything else is rejected.
++ */
++ if (!vivid_validate_fb(a))
+ return -EINVAL;
+
+ dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
+--
+2.35.1
+
--- /dev/null
+From ac762af6aaf9a70568ba67261922cc77629c615d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Oct 2022 15:09:06 +0100
+Subject: media: vivid: set num_in/outputs to 0 if not supported
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+[ Upstream commit 69d78a80da4ef12faf2a6f9cfa2097ab4ac43983 ]
+
+If node_types does not have video/vbi/meta inputs or outputs,
+then set num_inputs/num_outputs to 0 instead of 1.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Fixes: 0c90f649d2f5 (media: vivid: add vivid_create_queue() helper)
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/test-drivers/vivid/vivid-core.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
+index f69c64e5a149..761d2abd4006 100644
+--- a/drivers/media/test-drivers/vivid/vivid-core.c
++++ b/drivers/media/test-drivers/vivid/vivid-core.c
+@@ -872,8 +872,12 @@ static int vivid_detect_feature_set(struct vivid_dev *dev, int inst,
+
+ /* how many inputs do we have and of what type? */
+ dev->num_inputs = num_inputs[inst];
+- if (dev->num_inputs < 1)
+- dev->num_inputs = 1;
++ if (node_type & 0x20007) {
++ if (dev->num_inputs < 1)
++ dev->num_inputs = 1;
++ } else {
++ dev->num_inputs = 0;
++ }
+ if (dev->num_inputs >= MAX_INPUTS)
+ dev->num_inputs = MAX_INPUTS;
+ for (i = 0; i < dev->num_inputs; i++) {
+@@ -890,8 +894,12 @@ static int vivid_detect_feature_set(struct vivid_dev *dev, int inst,
+
+ /* how many outputs do we have and of what type? */
+ dev->num_outputs = num_outputs[inst];
+- if (dev->num_outputs < 1)
+- dev->num_outputs = 1;
++ if (node_type & 0x40300) {
++ if (dev->num_outputs < 1)
++ dev->num_outputs = 1;
++ } else {
++ dev->num_outputs = 0;
++ }
+ if (dev->num_outputs >= MAX_OUTPUTS)
+ dev->num_outputs = MAX_OUTPUTS;
+ for (i = 0; i < dev->num_outputs; i++) {
+--
+2.35.1
+
--- /dev/null
+From 72b54a9bed3c7d8cb3b2f261baa371a18f3483ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Oct 2022 21:00:11 +0800
+Subject: net: ehea: fix possible memory leak in ehea_register_port()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+[ Upstream commit 0e7ce23a917a9cc83ca3c779fbba836bca3bcf1e ]
+
+If of_device_register() returns error, the of node and the
+name allocated in dev_set_name() is leaked, call put_device()
+to give up the reference that was set in device_initialize(),
+so that of node is put in logical_port_release() and the name
+is freed in kobject_cleanup().
+
+Fixes: 1acf2318dd13 ("ehea: dynamic add / remove port")
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Link: https://lore.kernel.org/r/20221025130011.1071357-1-yangyingliang@huawei.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ibm/ehea/ehea_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+index f63066736425..28a5f8d73a61 100644
+--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+@@ -2897,6 +2897,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
+ ret = of_device_register(&port->ofdev);
+ if (ret) {
+ pr_err("failed to register device. ret=%d\n", ret);
++ put_device(&port->ofdev.dev);
+ goto out;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 4984d740bb1e71f6e4b12ae1f3045d5c9dd6757c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Oct 2022 21:29:25 +0300
+Subject: net: enetc: survive memory pressure without crashing
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 84ce1ca3fe9e1249bf21176ff162200f1c4e5ed1 ]
+
+Under memory pressure, enetc_refill_rx_ring() may fail, and when called
+during the enetc_open() -> enetc_setup_rxbdr() procedure, this is not
+checked for.
+
+An extreme case of memory pressure will result in exactly zero buffers
+being allocated for the RX ring, and in such a case it is expected that
+hardware drops all RX packets due to lack of buffers.
+
+This does not happen, because the reset-default value of the consumer
+and produces index is 0, and this makes the ENETC think that all buffers
+have been initialized and that it owns them (when in reality none were).
+
+The hardware guide explains this best:
+
+| Configure the receive ring producer index register RBaPIR with a value
+| of 0. The producer index is initially configured by software but owned
+| by hardware after the ring has been enabled. Hardware increments the
+| index when a frame is received which may consume one or more BDs.
+| Hardware is not allowed to increment the producer index to match the
+| consumer index since it is used to indicate an empty condition. The ring
+| can hold at most RBLENR[LENGTH]-1 received BDs.
+|
+| Configure the receive ring consumer index register RBaCIR. The
+| consumer index is owned by software and updated during operation of the
+| of the BD ring by software, to indicate that any receive data occupied
+| in the BD has been processed and it has been prepared for new data.
+| - If consumer index and producer index are initialized to the same
+| value, it indicates that all BDs in the ring have been prepared and
+| hardware owns all of the entries.
+| - If consumer index is initialized to producer index plus N, it would
+| indicate N BDs have been prepared. Note that hardware cannot start if
+| only a single buffer is prepared due to the restrictions described in
+| (2).
+| - Software may write consumer index to match producer index anytime
+| while the ring is operational to indicate all received BDs prior have
+| been processed and new BDs prepared for hardware.
+
+Normally, the value of rx_ring->rcir (consumer index) is brought in sync
+with the rx_ring->next_to_use software index, but this only happens if
+page allocation ever succeeded.
+
+When PI==CI==0, the hardware appears to receive frames and write them to
+DMA address 0x0 (?!), then set the READY bit in the BD.
+
+The enetc_clean_rx_ring() function (and its XDP derivative) is naturally
+not prepared to handle such a condition. It will attempt to process
+those frames using the rx_swbd structure associated with index i of the
+RX ring, but that structure is not fully initialized (enetc_new_page()
+does all of that). So what happens next is undefined behavior.
+
+To operate using no buffer, we must initialize the CI to PI + 1, which
+will block the hardware from advancing the CI any further, and drop
+everything.
+
+The issue was seen while adding support for zero-copy AF_XDP sockets,
+where buffer memory comes from user space, which can even decide to
+supply no buffers at all (example: "xdpsock --txonly"). However, the bug
+is present also with the network stack code, even though it would take a
+very determined person to trigger a page allocation failure at the
+perfect time (a series of ifup/ifdown under memory pressure should
+eventually reproduce it given enough retries).
+
+Fixes: d4fd0404c1c9 ("enetc: Introduce basic PF and VF ENETC ethernet drivers")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Claudiu Manoil <claudiu.manoil@nxp.com>
+Link: https://lore.kernel.org/r/20221027182925.3256653-1-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/enetc/enetc.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 4af253825957..ca62c72eb772 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -1241,7 +1241,12 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+
++ /* Also prepare the consumer index in case page allocation never
++ * succeeds. In that case, hardware will never advance producer index
++ * to match consumer index, and will drop all frames.
++ */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
++ enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);
+
+ /* enable Rx ints by setting pkt thr to 1 */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
+--
+2.35.1
+
--- /dev/null
+From f24ccb034070d9e1accf0c29ee4fd864d08925f8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Oct 2022 10:05:52 +0200
+Subject: net: fec: limit register access on i.MX6UL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Juergen Borleis <jbe@pengutronix.de>
+
+[ Upstream commit 0a8b43b12dd78daa77a7dc007b92770d262a2714 ]
+
+Using 'ethtool -d […]' on an i.MX6UL leads to a kernel crash:
+
+ Unhandled fault: external abort on non-linefetch (0x1008) at […]
+
+due to this SoC has less registers in its FEC implementation compared to other
+i.MX6 variants. Thus, a run-time decision is required to avoid access to
+non-existing registers.
+
+Fixes: a51d3ab50702 ("net: fec: use a more proper compatible string for i.MX6UL type device")
+Signed-off-by: Juergen Borleis <jbe@pengutronix.de>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/20221024080552.21004-1-jbe@pengutronix.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 46 ++++++++++++++++++++++-
+ 1 file changed, 44 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index d8bdaf2e5365..e183caf38176 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2251,6 +2251,31 @@ static u32 fec_enet_register_offset[] = {
+ IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+ IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+ };
++/* for i.MX6ul */
++static u32 fec_enet_register_offset_6ul[] = {
++ FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
++ FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
++ FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
++ FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
++ FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
++ FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
++ FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
++ RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
++ RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
++ RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
++ RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
++ RMON_T_P_GTE2048, RMON_T_OCTETS,
++ IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
++ IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
++ IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
++ RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
++ RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
++ RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
++ RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
++ RMON_R_P_GTE2048, RMON_R_OCTETS,
++ IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
++ IEEE_R_FDXFC, IEEE_R_OCTETS_OK
++};
+ #else
+ static __u32 fec_enet_register_version = 1;
+ static u32 fec_enet_register_offset[] = {
+@@ -2275,7 +2300,24 @@ static void fec_enet_get_regs(struct net_device *ndev,
+ u32 *buf = (u32 *)regbuf;
+ u32 i, off;
+ int ret;
++#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
++ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
++ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
++ u32 *reg_list;
++ u32 reg_cnt;
+
++ if (!of_machine_is_compatible("fsl,imx6ul")) {
++ reg_list = fec_enet_register_offset;
++ reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
++ } else {
++ reg_list = fec_enet_register_offset_6ul;
++ reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
++ }
++#else
++ /* coldfire */
++ static u32 *reg_list = fec_enet_register_offset;
++ static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
++#endif
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return;
+@@ -2284,8 +2326,8 @@ static void fec_enet_get_regs(struct net_device *ndev,
+
+ memset(buf, 0, regs->len);
+
+- for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
+- off = fec_enet_register_offset[i];
++ for (i = 0; i < reg_cnt; i++) {
++ off = reg_list[i];
+
+ if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
+ !(fep->quirks & FEC_QUIRK_HAS_FRREG))
+--
+2.35.1
+
--- /dev/null
+From 18e08a78e38185040b51d9c03402e79d286bba4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Oct 2022 10:42:13 +0800
+Subject: net: fix UAF issue in nfqnl_nf_hook_drop() when ops_init() failed
+
+From: Zhengchao Shao <shaozhengchao@huawei.com>
+
+[ Upstream commit d266935ac43d57586e311a087510fe6a084af742 ]
+
+When the ops_init() interface is invoked to initialize the net, but
+ops->init() fails, data is released. However, the ptr pointer in
+net->gen is invalid. In this case, when nfqnl_nf_hook_drop() is invoked
+to release the net, invalid address access occurs.
+
+The process is as follows:
+setup_net()
+ ops_init()
+ data = kzalloc(...) ---> alloc "data"
+ net_assign_generic() ---> assign "date" to ptr in net->gen
+ ...
+ ops->init() ---> failed
+ ...
+ kfree(data); ---> ptr in net->gen is invalid
+ ...
+ ops_exit_list()
+ ...
+ nfqnl_nf_hook_drop()
+ *q = nfnl_queue_pernet(net) ---> q is invalid
+
+The following is the Call Trace information:
+BUG: KASAN: use-after-free in nfqnl_nf_hook_drop+0x264/0x280
+Read of size 8 at addr ffff88810396b240 by task ip/15855
+Call Trace:
+<TASK>
+dump_stack_lvl+0x8e/0xd1
+print_report+0x155/0x454
+kasan_report+0xba/0x1f0
+nfqnl_nf_hook_drop+0x264/0x280
+nf_queue_nf_hook_drop+0x8b/0x1b0
+__nf_unregister_net_hook+0x1ae/0x5a0
+nf_unregister_net_hooks+0xde/0x130
+ops_exit_list+0xb0/0x170
+setup_net+0x7ac/0xbd0
+copy_net_ns+0x2e6/0x6b0
+create_new_namespaces+0x382/0xa50
+unshare_nsproxy_namespaces+0xa6/0x1c0
+ksys_unshare+0x3a4/0x7e0
+__x64_sys_unshare+0x2d/0x40
+do_syscall_64+0x35/0x80
+entry_SYSCALL_64_after_hwframe+0x46/0xb0
+</TASK>
+
+Allocated by task 15855:
+kasan_save_stack+0x1e/0x40
+kasan_set_track+0x21/0x30
+__kasan_kmalloc+0xa1/0xb0
+__kmalloc+0x49/0xb0
+ops_init+0xe7/0x410
+setup_net+0x5aa/0xbd0
+copy_net_ns+0x2e6/0x6b0
+create_new_namespaces+0x382/0xa50
+unshare_nsproxy_namespaces+0xa6/0x1c0
+ksys_unshare+0x3a4/0x7e0
+__x64_sys_unshare+0x2d/0x40
+do_syscall_64+0x35/0x80
+entry_SYSCALL_64_after_hwframe+0x46/0xb0
+
+Freed by task 15855:
+kasan_save_stack+0x1e/0x40
+kasan_set_track+0x21/0x30
+kasan_save_free_info+0x2a/0x40
+____kasan_slab_free+0x155/0x1b0
+slab_free_freelist_hook+0x11b/0x220
+__kmem_cache_free+0xa4/0x360
+ops_init+0xb9/0x410
+setup_net+0x5aa/0xbd0
+copy_net_ns+0x2e6/0x6b0
+create_new_namespaces+0x382/0xa50
+unshare_nsproxy_namespaces+0xa6/0x1c0
+ksys_unshare+0x3a4/0x7e0
+__x64_sys_unshare+0x2d/0x40
+do_syscall_64+0x35/0x80
+entry_SYSCALL_64_after_hwframe+0x46/0xb0
+
+Fixes: f875bae06533 ("net: Automatically allocate per namespace data.")
+Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/net_namespace.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index cbff7d94b993..a3b7d965e9c0 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -135,6 +135,7 @@ static int net_assign_generic(struct net *net, unsigned int id, void *data)
+
+ static int ops_init(const struct pernet_operations *ops, struct net *net)
+ {
++ struct net_generic *ng;
+ int err = -ENOMEM;
+ void *data = NULL;
+
+@@ -153,7 +154,13 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
+ if (!err)
+ return 0;
+
++ if (ops->id && ops->size) {
+ cleanup:
++ ng = rcu_dereference_protected(net->gen,
++ lockdep_is_held(&pernet_ops_rwsem));
++ ng->ptr[*ops->id] = NULL;
++ }
++
+ kfree(data);
+
+ out:
+--
+2.35.1
+
--- /dev/null
+From 26c3dd76121de4172aa11cc3eae3e1fcb7126e9d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Oct 2022 17:57:51 +0800
+Subject: net: hinic: fix incorrect assignment issue in
+ hinic_set_interrupt_cfg()
+
+From: Zhengchao Shao <shaozhengchao@huawei.com>
+
+[ Upstream commit c0605cd6750f2db9890c43a91ea4d77be8fb4908 ]
+
+The value of lli_credit_cnt is incorrectly assigned, fix it.
+
+Fixes: a0337c0dee68 ("hinic: add support to set and get irq coalesce")
+Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+index 799b85c88eff..bcf2476512a5 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+@@ -892,7 +892,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
+ if (err)
+ return -EINVAL;
+
+- interrupt_info->lli_credit_cnt = temp_info.lli_timer_cnt;
++ interrupt_info->lli_credit_cnt = temp_info.lli_credit_cnt;
+ interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt;
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+--
+2.35.1
+
--- /dev/null
+From 2dd7b86e537ec37d55cc914d25c6fa1faaddacdd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Oct 2022 17:57:52 +0800
+Subject: net: hinic: fix memory leak when reading function table
+
+From: Zhengchao Shao <shaozhengchao@huawei.com>
+
+[ Upstream commit 4c1f602df8956bc0decdafd7e4fc7eef50c550b1 ]
+
+When the input parameter idx meets the expected case option in
+hinic_dbg_get_func_table(), read_data is not released. Fix it.
+
+Fixes: 5215e16244ee ("hinic: add support to query function table")
+Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/huawei/hinic/hinic_debugfs.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
+index 19eb839177ec..061952c6c21a 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
++++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
+@@ -85,6 +85,7 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
+ struct tag_sml_funcfg_tbl *funcfg_table_elem;
+ struct hinic_cmd_lt_rd *read_data;
+ u16 out_size = sizeof(*read_data);
++ int ret = ~0;
+ int err;
+
+ read_data = kzalloc(sizeof(*read_data), GFP_KERNEL);
+@@ -111,20 +112,25 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
+
+ switch (idx) {
+ case VALID:
+- return funcfg_table_elem->dw0.bs.valid;
++ ret = funcfg_table_elem->dw0.bs.valid;
++ break;
+ case RX_MODE:
+- return funcfg_table_elem->dw0.bs.nic_rx_mode;
++ ret = funcfg_table_elem->dw0.bs.nic_rx_mode;
++ break;
+ case MTU:
+- return funcfg_table_elem->dw1.bs.mtu;
++ ret = funcfg_table_elem->dw1.bs.mtu;
++ break;
+ case RQ_DEPTH:
+- return funcfg_table_elem->dw13.bs.cfg_rq_depth;
++ ret = funcfg_table_elem->dw13.bs.cfg_rq_depth;
++ break;
+ case QUEUE_NUM:
+- return funcfg_table_elem->dw13.bs.cfg_q_num;
++ ret = funcfg_table_elem->dw13.bs.cfg_q_num;
++ break;
+ }
+
+ kfree(read_data);
+
+- return ~0;
++ return ret;
+ }
+
+ static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count,
+--
+2.35.1
+
--- /dev/null
+From 7fb3b030ebef40ecb5b61bb75057a14a0085ce02 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Oct 2022 17:57:53 +0800
+Subject: net: hinic: fix the issue of CMDQ memory leaks
+
+From: Zhengchao Shao <shaozhengchao@huawei.com>
+
+[ Upstream commit 363cc87767f6ddcfb9158ad2e2afa2f8d5c4b94e ]
+
+When hinic_set_cmdq_depth() fails in hinic_init_cmdqs(), the cmdq memory is
+not released correctly. Fix it.
+
+Fixes: 72ef908bb3ff ("hinic: add three net_device_ops of vf")
+Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+index 21b8235952d3..dff979f5d08b 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+@@ -929,7 +929,7 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
+
+ err_set_cmdq_depth:
+ hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
+-
++ free_cmdq(&cmdqs->cmdq[HINIC_CMDQ_SYNC]);
+ err_cmdq_ctxt:
+ hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
+ HINIC_MAX_CMDQ_TYPES);
+--
+2.35.1
+
--- /dev/null
+From 3a7fb0a9d620b698791087b46a4a9e76579c23f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Oct 2022 17:57:54 +0800
+Subject: net: hinic: fix the issue of double release MBOX callback of VF
+
+From: Zhengchao Shao <shaozhengchao@huawei.com>
+
+[ Upstream commit 8ec2f4c6b2e11a4249bba77460f0cfe6d95a82f8 ]
+
+In hinic_vf_func_init(), if VF fails to register information with PF
+through the MBOX, the MBOX callback function of VF is released once. But
+it is released again in hinic_init_hwdev(). Remove one.
+
+Fixes: 7dd29ee12865 ("hinic: add sriov feature support")
+Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/huawei/hinic/hinic_sriov.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+index f8a26459ff65..4d82ebfe27f9 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
++++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+@@ -1178,7 +1178,6 @@ int hinic_vf_func_init(struct hinic_hwdev *hwdev)
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, register_info.status, out_size);
+- hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ return -EIO;
+ }
+ } else {
+--
+2.35.1
+
--- /dev/null
+From 819ffea071af2820cce3b1c4638699505ac8d302 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Oct 2022 21:13:38 +0800
+Subject: net: ksz884x: fix missing pci_disable_device() on error in
+ pcidev_init()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+[ Upstream commit 5da6d65590a0698199df44d095e54b0ed1708178 ]
+
+pci_disable_device() need be called while module exiting, switch to use
+pcim_enable(), pci_disable_device() will be called in pcim_release()
+while unbinding device.
+
+Fixes: 8ca86fd83eae ("net: Micrel KSZ8841/2 PCI Ethernet driver")
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Link: https://lore.kernel.org/r/20221024131338.2848959-1-yangyingliang@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/micrel/ksz884x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
+index 9ed264ed7070..1fa16064142d 100644
+--- a/drivers/net/ethernet/micrel/ksz884x.c
++++ b/drivers/net/ethernet/micrel/ksz884x.c
+@@ -6923,7 +6923,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
+ char banner[sizeof(version)];
+ struct ksz_switch *sw = NULL;
+
+- result = pci_enable_device(pdev);
++ result = pcim_enable_device(pdev);
+ if (result)
+ return result;
+
+--
+2.35.1
+
--- /dev/null
+From 8af7c5c4b455f6a71897d0f4462701cc5f125020 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Oct 2022 09:32:24 +0800
+Subject: net: lantiq_etop: don't free skb when returning NETDEV_TX_BUSY
+
+From: Zhang Changzhong <zhangchangzhong@huawei.com>
+
+[ Upstream commit 9c1eaa27ec599fcc25ed4970c0b73c247d147a2b ]
+
+The ndo_start_xmit() method must not free skb when returning
+NETDEV_TX_BUSY, since caller is going to requeue freed skb.
+
+Fixes: 504d4721ee8e ("MIPS: Lantiq: Add ethernet driver")
+Signed-off-by: Zhang Changzhong <zhangchangzhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/lantiq_etop.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
+index 2d0c52f7106b..5ea626b1e578 100644
+--- a/drivers/net/ethernet/lantiq_etop.c
++++ b/drivers/net/ethernet/lantiq_etop.c
+@@ -466,7 +466,6 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
+ len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
+- dev_kfree_skb_any(skb);
+ netdev_err(dev, "tx ring full\n");
+ netif_tx_stop_queue(txq);
+ return NETDEV_TX_BUSY;
+--
+2.35.1
+
--- /dev/null
+From dac08efa4dcc1183143d32e66af8132b67d67a0a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Oct 2022 14:51:49 +0100
+Subject: net/mlx5: Fix crash during sync firmware reset
+
+From: Suresh Devarakonda <ramad@nvidia.com>
+
+[ Upstream commit aefb62a9988749703435e941704624949a80a2a9 ]
+
+When setting Bluefield to DPU NIC mode using mlxconfig tool + sync
+firmware reset flow, we run into scenario where the host was not
+eswitch manager at the time of mlx5 driver load but becomes eswitch manager
+after the sync firmware reset flow. This results in null pointer
+access of mpfs structure during mac filter add. This change prevents null
+pointer access but mpfs table entries will not be added.
+
+Fixes: 5ec697446f46 ("net/mlx5: Add support for devlink reload action fw activate")
+Signed-off-by: Suresh Devarakonda <ramad@nvidia.com>
+Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
+Reviewed-by: Bodong Wang <bodong@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Link: https://lore.kernel.org/r/20221026135153.154807-12-saeed@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+index 839a01da110f..8ff16318e32d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+@@ -122,7 +122,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_mpfs *mpfs = dev->priv.mpfs;
+
+- if (!MLX5_ESWITCH_MANAGER(dev))
++ if (!mpfs)
+ return;
+
+ WARN_ON(!hlist_empty(mpfs->hash));
+@@ -137,7 +137,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
+ int err = 0;
+ u32 index;
+
+- if (!MLX5_ESWITCH_MANAGER(dev))
++ if (!mpfs)
+ return 0;
+
+ mutex_lock(&mpfs->lock);
+@@ -185,7 +185,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
+ int err = 0;
+ u32 index;
+
+- if (!MLX5_ESWITCH_MANAGER(dev))
++ if (!mpfs)
+ return 0;
+
+ mutex_lock(&mpfs->lock);
+--
+2.35.1
+
--- /dev/null
+From bffaaa874cdb7299c313f9d6febff3f6a82cfc1c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Oct 2022 14:51:45 +0100
+Subject: net/mlx5: Fix possible use-after-free in async command interface
+
+From: Tariq Toukan <tariqt@nvidia.com>
+
+[ Upstream commit bacd22df95147ed673bec4692ab2d4d585935241 ]
+
+mlx5_cmd_cleanup_async_ctx should return only after all its callback
+handlers were completed. Before this patch, the below race between
+mlx5_cmd_cleanup_async_ctx and mlx5_cmd_exec_cb_handler was possible and
+lead to a use-after-free:
+
+1. mlx5_cmd_cleanup_async_ctx is called while num_inflight is 2 (i.e.
+ elevated by 1, a single inflight callback).
+2. mlx5_cmd_cleanup_async_ctx decreases num_inflight to 1.
+3. mlx5_cmd_exec_cb_handler is called, decreases num_inflight to 0 and
+ is about to call wake_up().
+4. mlx5_cmd_cleanup_async_ctx calls wait_event, which returns
+ immediately as the condition (num_inflight == 0) holds.
+5. mlx5_cmd_cleanup_async_ctx returns.
+6. The caller of mlx5_cmd_cleanup_async_ctx frees the mlx5_async_ctx
+ object.
+7. mlx5_cmd_exec_cb_handler goes on and calls wake_up() on the freed
+ object.
+
+Fix it by syncing using a completion object. Mark it completed when
+num_inflight reaches 0.
+
+Trace:
+
+BUG: KASAN: use-after-free in do_raw_spin_lock+0x23d/0x270
+Read of size 4 at addr ffff888139cd12f4 by task swapper/5/0
+
+CPU: 5 PID: 0 Comm: swapper/5 Not tainted 6.0.0-rc3_for_upstream_debug_2022_08_30_13_10 #1
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+Call Trace:
+ <IRQ>
+ dump_stack_lvl+0x57/0x7d
+ print_report.cold+0x2d5/0x684
+ ? do_raw_spin_lock+0x23d/0x270
+ kasan_report+0xb1/0x1a0
+ ? do_raw_spin_lock+0x23d/0x270
+ do_raw_spin_lock+0x23d/0x270
+ ? rwlock_bug.part.0+0x90/0x90
+ ? __delete_object+0xb8/0x100
+ ? lock_downgrade+0x6e0/0x6e0
+ _raw_spin_lock_irqsave+0x43/0x60
+ ? __wake_up_common_lock+0xb9/0x140
+ __wake_up_common_lock+0xb9/0x140
+ ? __wake_up_common+0x650/0x650
+ ? destroy_tis_callback+0x53/0x70 [mlx5_core]
+ ? kasan_set_track+0x21/0x30
+ ? destroy_tis_callback+0x53/0x70 [mlx5_core]
+ ? kfree+0x1ba/0x520
+ ? do_raw_spin_unlock+0x54/0x220
+ mlx5_cmd_exec_cb_handler+0x136/0x1a0 [mlx5_core]
+ ? mlx5_cmd_cleanup_async_ctx+0x220/0x220 [mlx5_core]
+ ? mlx5_cmd_cleanup_async_ctx+0x220/0x220 [mlx5_core]
+ mlx5_cmd_comp_handler+0x65a/0x12b0 [mlx5_core]
+ ? dump_command+0xcc0/0xcc0 [mlx5_core]
+ ? lockdep_hardirqs_on_prepare+0x400/0x400
+ ? cmd_comp_notifier+0x7e/0xb0 [mlx5_core]
+ cmd_comp_notifier+0x7e/0xb0 [mlx5_core]
+ atomic_notifier_call_chain+0xd7/0x1d0
+ mlx5_eq_async_int+0x3ce/0xa20 [mlx5_core]
+ atomic_notifier_call_chain+0xd7/0x1d0
+ ? irq_release+0x140/0x140 [mlx5_core]
+ irq_int_handler+0x19/0x30 [mlx5_core]
+ __handle_irq_event_percpu+0x1f2/0x620
+ handle_irq_event+0xb2/0x1d0
+ handle_edge_irq+0x21e/0xb00
+ __common_interrupt+0x79/0x1a0
+ common_interrupt+0x78/0xa0
+ </IRQ>
+ <TASK>
+ asm_common_interrupt+0x22/0x40
+RIP: 0010:default_idle+0x42/0x60
+Code: c1 83 e0 07 48 c1 e9 03 83 c0 03 0f b6 14 11 38 d0 7c 04 84 d2 75 14 8b 05 eb 47 22 02 85 c0 7e 07 0f 00 2d e0 9f 48 00 fb f4 <c3> 48 c7 c7 80 08 7f 85 e8 d1 d3 3e fe eb de 66 66 2e 0f 1f 84 00
+RSP: 0018:ffff888100dbfdf0 EFLAGS: 00000242
+RAX: 0000000000000001 RBX: ffffffff84ecbd48 RCX: 1ffffffff0afe110
+RDX: 0000000000000004 RSI: 0000000000000000 RDI: ffffffff835cc9bc
+RBP: 0000000000000005 R08: 0000000000000001 R09: ffff88881dec4ac3
+R10: ffffed1103bd8958 R11: 0000017d0ca571c9 R12: 0000000000000005
+R13: ffffffff84f024e0 R14: 0000000000000000 R15: dffffc0000000000
+ ? default_idle_call+0xcc/0x450
+ default_idle_call+0xec/0x450
+ do_idle+0x394/0x450
+ ? arch_cpu_idle_exit+0x40/0x40
+ ? do_idle+0x17/0x450
+ cpu_startup_entry+0x19/0x20
+ start_secondary+0x221/0x2b0
+ ? set_cpu_sibling_map+0x2070/0x2070
+ secondary_startup_64_no_verify+0xcd/0xdb
+ </TASK>
+
+Allocated by task 49502:
+ kasan_save_stack+0x1e/0x40
+ __kasan_kmalloc+0x81/0xa0
+ kvmalloc_node+0x48/0xe0
+ mlx5e_bulk_async_init+0x35/0x110 [mlx5_core]
+ mlx5e_tls_priv_tx_list_cleanup+0x84/0x3e0 [mlx5_core]
+ mlx5e_ktls_cleanup_tx+0x38f/0x760 [mlx5_core]
+ mlx5e_cleanup_nic_tx+0xa7/0x100 [mlx5_core]
+ mlx5e_detach_netdev+0x1ca/0x2b0 [mlx5_core]
+ mlx5e_suspend+0xdb/0x140 [mlx5_core]
+ mlx5e_remove+0x89/0x190 [mlx5_core]
+ auxiliary_bus_remove+0x52/0x70
+ device_release_driver_internal+0x40f/0x650
+ driver_detach+0xc1/0x180
+ bus_remove_driver+0x125/0x2f0
+ auxiliary_driver_unregister+0x16/0x50
+ mlx5e_cleanup+0x26/0x30 [mlx5_core]
+ cleanup+0xc/0x4e [mlx5_core]
+ __x64_sys_delete_module+0x2b5/0x450
+ do_syscall_64+0x3d/0x90
+ entry_SYSCALL_64_after_hwframe+0x46/0xb0
+
+Freed by task 49502:
+ kasan_save_stack+0x1e/0x40
+ kasan_set_track+0x21/0x30
+ kasan_set_free_info+0x20/0x30
+ ____kasan_slab_free+0x11d/0x1b0
+ kfree+0x1ba/0x520
+ mlx5e_tls_priv_tx_list_cleanup+0x2e7/0x3e0 [mlx5_core]
+ mlx5e_ktls_cleanup_tx+0x38f/0x760 [mlx5_core]
+ mlx5e_cleanup_nic_tx+0xa7/0x100 [mlx5_core]
+ mlx5e_detach_netdev+0x1ca/0x2b0 [mlx5_core]
+ mlx5e_suspend+0xdb/0x140 [mlx5_core]
+ mlx5e_remove+0x89/0x190 [mlx5_core]
+ auxiliary_bus_remove+0x52/0x70
+ device_release_driver_internal+0x40f/0x650
+ driver_detach+0xc1/0x180
+ bus_remove_driver+0x125/0x2f0
+ auxiliary_driver_unregister+0x16/0x50
+ mlx5e_cleanup+0x26/0x30 [mlx5_core]
+ cleanup+0xc/0x4e [mlx5_core]
+ __x64_sys_delete_module+0x2b5/0x450
+ do_syscall_64+0x3d/0x90
+ entry_SYSCALL_64_after_hwframe+0x46/0xb0
+
+Fixes: e355477ed9e4 ("net/mlx5: Make mlx5_cmd_exec_cb() a safe API")
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Link: https://lore.kernel.org/r/20221026135153.154807-8-saeed@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 10 +++++-----
+ include/linux/mlx5/driver.h | 2 +-
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 94426d29025e..6612b2c0be48 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -1853,7 +1853,7 @@ void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
+ ctx->dev = dev;
+ /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
+ atomic_set(&ctx->num_inflight, 1);
+- init_waitqueue_head(&ctx->wait);
++ init_completion(&ctx->inflight_done);
+ }
+ EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
+
+@@ -1867,8 +1867,8 @@ EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
+ */
+ void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
+ {
+- atomic_dec(&ctx->num_inflight);
+- wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
++ if (!atomic_dec_and_test(&ctx->num_inflight))
++ wait_for_completion(&ctx->inflight_done);
+ }
+ EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
+
+@@ -1879,7 +1879,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
+
+ work->user_callback(status, work);
+ if (atomic_dec_and_test(&ctx->num_inflight))
+- wake_up(&ctx->wait);
++ complete(&ctx->inflight_done);
+ }
+
+ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
+@@ -1895,7 +1895,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
+ ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
+ mlx5_cmd_exec_cb_handler, work, false);
+ if (ret && atomic_dec_and_test(&ctx->num_inflight))
+- wake_up(&ctx->wait);
++ complete(&ctx->inflight_done);
+
+ return ret;
+ }
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 41fbb4793394..ae88362216a4 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -899,7 +899,7 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
+ struct mlx5_async_ctx {
+ struct mlx5_core_dev *dev;
+ atomic_t num_inflight;
+- struct wait_queue_head wait;
++ struct completion inflight_done;
+ };
+
+ struct mlx5_async_work;
+--
+2.35.1
+
--- /dev/null
+From 3d76c7870c91a163b1457f5459db10dbef82ed24 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Oct 2022 14:51:39 +0100
+Subject: net/mlx5e: Do not increment ESN when updating IPsec ESN state
+
+From: Hyong Youb Kim <hyonkim@cisco.com>
+
+[ Upstream commit 888be6b279b7257b5f6e4c9527675bff0a335596 ]
+
+An offloaded SA stops receiving after about 2^32 + replay_window
+packets. For example, when SA reaches <seq-hi 0x1, seq 0x2c>, all
+subsequent packets get dropped with SA-icv-failure (integrity_failed).
+
+To reproduce the bug:
+- ConnectX-6 Dx with crypto enabled (FW 22.30.1004)
+- ipsec.conf:
+ nic-offload = yes
+ replay-window = 32
+ esn = yes
+ salifetime=24h
+- Run netperf for a long time to send more than 2^32 packets
+ netperf -H <device-under-test> -t TCP_STREAM -l 20000
+
+When 2^32 + replay_window packets are received, the replay window
+moves from the 2nd half of subspace (overlap=1) to the 1st half
+(overlap=0). The driver then updates the 'esn' value in NIC
+(i.e. seq_hi) as follows.
+
+ seq_hi = xfrm_replay_seqhi(seq_bottom)
+ new esn in NIC = seq_hi + 1
+
+The +1 increment is wrong, as seq_hi already contains the correct
+seq_hi. For example, when seq_hi=1, the driver actually tells NIC to
+use seq_hi=2 (esn). This incorrect esn value causes all subsequent
+packets to fail integrity checks (SA-icv-failure). So, do not
+increment.
+
+Fixes: cb01008390bb ("net/mlx5: IPSec, Add support for ESN")
+Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
+Acked-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Link: https://lore.kernel.org/r/20221026135153.154807-2-saeed@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+index 26f7fab109d9..d08bd22dc569 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+@@ -113,7 +113,6 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
+ struct xfrm_replay_state_esn *replay_esn;
+ u32 seq_bottom = 0;
+ u8 overlap;
+- u32 *esn;
+
+ if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
+ sa_entry->esn_state.trigger = 0;
+@@ -128,11 +127,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
+
+ sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
+ htonl(seq_bottom));
+- esn = &sa_entry->esn_state.esn;
+
+ sa_entry->esn_state.trigger = 1;
+ if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
+- ++(*esn);
+ sa_entry->esn_state.overlap = 0;
+ return true;
+ } else if (unlikely(!overlap &&
+--
+2.35.1
+
--- /dev/null
+From d4722366c55c89dd2e4b46fa5ffeba43a95a76ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Oct 2022 14:41:04 +0800
+Subject: net: netsec: fix error handling in netsec_register_mdio()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+[ Upstream commit 94423589689124e8cd145b38a1034be7f25835b2 ]
+
+If phy_device_register() fails, phy_device_free() need be called to
+put refcount, so memory of phy device and device name can be freed
+in callback function.
+
+If get_phy_device() fails, mdiobus_unregister() need be called,
+or it will cause warning in mdiobus_free() and kobject is leaked.
+
+Fixes: 533dd11a12f6 ("net: socionext: Add Synquacer NetSec driver")
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Link: https://lore.kernel.org/r/20221019064104.3228892-1-yangyingliang@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/socionext/netsec.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
+index ef3634d1b9f7..b9acee214bb6 100644
+--- a/drivers/net/ethernet/socionext/netsec.c
++++ b/drivers/net/ethernet/socionext/netsec.c
+@@ -1958,11 +1958,13 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
+ ret = PTR_ERR(priv->phydev);
+ dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
+ priv->phydev = NULL;
++ mdiobus_unregister(bus);
+ return -ENODEV;
+ }
+
+ ret = phy_device_register(priv->phydev);
+ if (ret) {
++ phy_device_free(priv->phydev);
+ mdiobus_unregister(bus);
+ dev_err(priv->dev,
+ "phy_device_register err(%d)\n", ret);
+--
+2.35.1
+
--- /dev/null
+From 46b7605eb26f426c726b670bb21044bbe9c6efc5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Oct 2022 12:09:52 +0200
+Subject: nh: fix scope used to find saddr when adding non gw nh
+
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+
+[ Upstream commit bac0f937c343d651874f83b265ca8f5070ed4f06 ]
+
+As explained by Julian, fib_nh_scope is related to fib_nh_gw4, but
+fib_info_update_nhc_saddr() needs the scope of the route, which is
+the scope "before" fib_nh_scope, ie fib_nh_scope - 1.
+
+This patch fixes the problem described in commit 747c14307214 ("ip: fix
+dflt addr selection for connected nexthop").
+
+Fixes: 597cfe4fc339 ("nexthop: Add support for IPv4 nexthops")
+Link: https://lore.kernel.org/netdev/6c8a44ba-c2d5-cdf-c5c7-5baf97cba38@ssi.bg/
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Reviewed-by: Julian Anastasov <ja@ssi.bg>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/nexthop.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index 2a17dc9413ae..7a0102a4b1de 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -1346,7 +1346,7 @@ static int nh_create_ipv4(struct net *net, struct nexthop *nh,
+ if (!err) {
+ nh->nh_flags = fib_nh->fib_nh_flags;
+ fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
+- fib_nh->fib_nh_scope);
++ !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
+ } else {
+ fib_nh_release(net, fib_nh);
+ }
+--
+2.35.1
+
--- /dev/null
+From 34456680dc919aa6965ca758974b8012a8c0704b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Oct 2022 06:50:17 -0400
+Subject: openvswitch: switch from WARN to pr_warn
+
+From: Aaron Conole <aconole@redhat.com>
+
+[ Upstream commit fd954cc1919e35cb92f78671cab6e42d661945a3 ]
+
+As noted by Paolo Abeni, pr_warn doesn't generate any splat and can still
+preserve the warning to the user that feature downgrade occurred. We
+likely cannot introduce other kinds of checks / enforcement here because
+syzbot can generate different genl versions to the datapath.
+
+Reported-by: syzbot+31cde0bef4bbf8ba2d86@syzkaller.appspotmail.com
+Fixes: 44da5ae5fbea ("openvswitch: Drop user features if old user space attempted to create datapath")
+Cc: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: Aaron Conole <aconole@redhat.com>
+Acked-by: Ilya Maximets <i.maximets@ovn.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/openvswitch/datapath.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 6b5c0abf7f1b..7ed97dc0b561 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -1592,7 +1592,8 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb,
+ if (IS_ERR(dp))
+ return;
+
+- WARN(dp->user_features, "Dropping previously announced user features\n");
++ pr_warn("%s: Dropping previously announced user features\n",
++ ovs_dp_name(dp));
+ dp->user_features = 0;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 7e1031624c7a637766c551b7485a9e1a4b293d08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Jul 2022 17:11:19 +0300
+Subject: perf/x86/intel/lbr: Use setup_clear_cpu_cap() instead of
+ clear_cpu_cap()
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+[ Upstream commit b329f5ddc9ce4b622d9c7aaf5c6df4de52caf91a ]
+
+clear_cpu_cap(&boot_cpu_data) is very similar to setup_clear_cpu_cap()
+except that the latter also sets a bit in 'cpu_caps_cleared' which
+later clears the same cap in secondary cpus, which is likely what is
+meant here.
+
+Fixes: 47125db27e47 ("perf/x86/intel/lbr: Support Architectural LBR")
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
+Link: https://lkml.kernel.org/r/20220718141123.136106-2-mlevitsk@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/intel/lbr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
+index 42173a7be3bb..4b6c39c5facb 100644
+--- a/arch/x86/events/intel/lbr.c
++++ b/arch/x86/events/intel/lbr.c
+@@ -1847,7 +1847,7 @@ void __init intel_pmu_arch_lbr_init(void)
+ return;
+
+ clear_arch_lbr:
+- clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR);
++ setup_clear_cpu_cap(X86_FEATURE_ARCH_LBR);
+ }
+
+ /**
+--
+2.35.1
+
--- /dev/null
+From 49413d8748bb8f1deb43795a652ff9608cac5f35 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Oct 2022 13:34:32 +0100
+Subject: PM: domains: Fix handling of unavailable/disabled idle states
+
+From: Sudeep Holla <sudeep.holla@arm.com>
+
+[ Upstream commit e0c57a5c70c13317238cb19a7ded0eab4a5f7de5 ]
+
+Platforms can provide the information about the availability of each
+idle states via status flag. Platforms may have to disable one or more
+idle states for various reasons like broken firmware or other unmet
+dependencies.
+
+Fix handling of such unavailable/disabled idle states by ignoring them
+while parsing the states.
+
+Fixes: a3381e3a65cb ("PM / domains: Fix up domain-idle-states OF parsing")
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/power/domain.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 743268996336..d0ba5459ce0b 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -2789,6 +2789,10 @@ static int genpd_iterate_idle_states(struct device_node *dn,
+ np = it.node;
+ if (!of_match_node(idle_state_match, np))
+ continue;
++
++ if (!of_device_is_available(np))
++ continue;
++
+ if (states) {
+ ret = genpd_parse_state(&states[i], np);
+ if (ret) {
+--
+2.35.1
+
--- /dev/null
+From 25e5ddb3a4fb0cffb753e4a6432521c78114f4b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Oct 2022 22:50:17 -0500
+Subject: PM: hibernate: Allow hybrid sleep to work with s2idle
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit 85850af4fc47132f3f2f0dd698b90f67906600b4 ]
+
+Hybrid sleep is currently hardcoded to only operate with S3 even
+on systems that might not support it.
+
+Instead of assuming this mode is what the user wants to use, for
+hybrid sleep follow the setting of `mem_sleep_current` which
+will respect mem_sleep_default kernel command line and policy
+decisions made by the presence of the FADT low power idle bit.
+
+Fixes: 81d45bdf8913 ("PM / hibernate: Untangle power_down()")
+Reported-and-tested-by: kolAflash <kolAflash@kolahilft.de>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=216574
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/power/hibernate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 522cb1387462..59a1b126c369 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -637,7 +637,7 @@ static void power_down(void)
+ int error;
+
+ if (hibernation_mode == HIBERNATION_SUSPEND) {
+- error = suspend_devices_and_enter(PM_SUSPEND_MEM);
++ error = suspend_devices_and_enter(mem_sleep_current);
+ if (error) {
+ hibernation_mode = hibernation_ops ?
+ HIBERNATION_PLATFORM :
+--
+2.35.1
+
mm-memory-add-non-anonymous-page-check-in-the-copy_present_page.patch
mm-hugetlb-take-hugetlb_lock-before-decrementing-h-resv_huge_pages.patch
net-ieee802154-fix-error-return-code-in-dgram_bind.patch
+media-v4l2-fix-v4l2_i2c_subdev_set_name-function-doc.patch
+media-atomisp-pci-reposition-braces-as-per-coding-st.patch
+media-atomisp-prevent-integer-overflow-in-sh_css_set.patch
+drm-msm-fix-return-type-of-mdp4_lvds_connector_mode_.patch
+asoc-qcom-lpass-cpu-mark-hdmi-tx-registers-as-volati.patch
+arc-iounmap-arg-is-volatile.patch
+asoc-qcom-lpass-cpu-mark-hdmi-tx-parity-register-as-.patch
+alsa-ac97-fix-possible-memory-leak-in-snd_ac97_dev_r.patch
+perf-x86-intel-lbr-use-setup_clear_cpu_cap-instead-o.patch
+tipc-fix-a-null-ptr-deref-in-tipc_topsrv_accept.patch
+net-netsec-fix-error-handling-in-netsec_register_mdi.patch
+net-hinic-fix-incorrect-assignment-issue-in-hinic_se.patch
+net-hinic-fix-memory-leak-when-reading-function-tabl.patch
+net-hinic-fix-the-issue-of-cmdq-memory-leaks.patch
+net-hinic-fix-the-issue-of-double-release-mbox-callb.patch
+x86-unwind-orc-fix-unreliable-stack-dump-with-gcov.patch
+amd-xgbe-fix-the-sfp-compliance-codes-check-for-dac-.patch
+amd-xgbe-add-the-bit-rate-quirk-for-molex-cables.patch
+atlantic-fix-deadlock-at-aq_nic_stop.patch
+kcm-annotate-data-races-around-kcm-rx_psock.patch
+kcm-annotate-data-races-around-kcm-rx_wait.patch
+net-fix-uaf-issue-in-nfqnl_nf_hook_drop-when-ops_ini.patch
+net-lantiq_etop-don-t-free-skb-when-returning-netdev.patch
+tcp-minor-optimization-in-tcp_add_backlog.patch
+tcp-fix-a-signed-integer-overflow-bug-in-tcp_add_bac.patch
+tcp-fix-indefinite-deferral-of-rto-with-sack-renegin.patch
+can-mscan-mpc5xxx-mpc5xxx_can_probe-add-missing-put_.patch
+can-mcp251x-mcp251x_can_probe-add-missing-unregister.patch
+pm-hibernate-allow-hybrid-sleep-to-work-with-s2idle.patch
+media-vivid-s_fbuf-add-more-sanity-checks.patch
+media-vivid-dev-bitmap_cap-wasn-t-freed-in-all-cases.patch
+media-v4l2-dv-timings-add-sanity-checks-for-blanking.patch
+media-videodev2.h-v4l2_dv_bt_blanking_height-should-.patch
+media-vivid-set-num_in-outputs-to-0-if-not-supported.patch
+ipv6-ensure-sane-device-mtu-in-tunnels.patch
+i40e-fix-ethtool-rx-flow-hash-setting-for-x722.patch
+i40e-fix-vf-hang-when-reset-is-triggered-on-another-.patch
+i40e-fix-flow-type-by-setting-gl_hash_inset-register.patch
+net-ksz884x-fix-missing-pci_disable_device-on-error-.patch
+pm-domains-fix-handling-of-unavailable-disabled-idle.patch
+net-fec-limit-register-access-on-i.mx6ul.patch
+alsa-aoa-i2sbus-fix-possible-memory-leak-in-i2sbus_a.patch
+alsa-aoa-fix-i2s-device-accounting.patch
+openvswitch-switch-from-warn-to-pr_warn.patch
+net-ehea-fix-possible-memory-leak-in-ehea_register_p.patch
+nh-fix-scope-used-to-find-saddr-when-adding-non-gw-n.patch
+net-mlx5e-do-not-increment-esn-when-updating-ipsec-e.patch
+net-mlx5-fix-possible-use-after-free-in-async-comman.patch
+net-mlx5-fix-crash-during-sync-firmware-reset.patch
+net-enetc-survive-memory-pressure-without-crashing.patch
+arm64-add-ampere1-to-the-spectre-bhb-affected-list.patch
--- /dev/null
+From fcae209f4221e5f0facd8ee678b7b60a1eba5d90 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Oct 2022 12:06:22 +0800
+Subject: tcp: fix a signed-integer-overflow bug in tcp_add_backlog()
+
+From: Lu Wei <luwei32@huawei.com>
+
+[ Upstream commit ec791d8149ff60c40ad2074af3b92a39c916a03f ]
+
+The type of sk_rcvbuf and sk_sndbuf in struct sock is int, and
+in tcp_add_backlog(), the variable limit is caculated by adding
+sk_rcvbuf, sk_sndbuf and 64 * 1024, it may exceed the max value
+of int and overflow. This patch reduces the limit budget by
+halving the sndbuf to solve this issue since ACK packets are much
+smaller than the payload.
+
+Fixes: c9c3321257e1 ("tcp: add tcp_add_backlog()")
+Signed-off-by: Lu Wei <luwei32@huawei.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_ipv4.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 78cef6930484..31a8009f74ee 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1873,11 +1873,13 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+ __skb_push(skb, hdrlen);
+
+ no_coalesce:
++ limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
++
+ /* Only socket owner can try to collapse/prune rx queues
+ * to reduce memory overhead, so add a little headroom here.
+ * Few sockets backlog are possibly concurrently non empty.
+ */
+- limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf) + 64*1024;
++ limit += 64 * 1024;
+
+ if (unlikely(sk_add_backlog(sk, skb, limit))) {
+ bh_unlock_sock(sk);
+--
+2.35.1
+
--- /dev/null
+From da9cf967bc52c582e053d0e9a7cb62a6a205d943 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Oct 2022 17:08:21 +0000
+Subject: tcp: fix indefinite deferral of RTO with SACK reneging
+
+From: Neal Cardwell <ncardwell@google.com>
+
+[ Upstream commit 3d2af9cce3133b3bc596a9d065c6f9d93419ccfb ]
+
+This commit fixes a bug that can cause a TCP data sender to repeatedly
+defer RTOs when encountering SACK reneging.
+
+The bug is that when we're in fast recovery in a scenario with SACK
+reneging, every time we get an ACK we call tcp_check_sack_reneging()
+and it can note the apparent SACK reneging and rearm the RTO timer for
+srtt/2 into the future. In some SACK reneging scenarios that can
+happen repeatedly until the receive window fills up, at which point
+the sender can't send any more, the ACKs stop arriving, and the RTO
+fires at srtt/2 after the last ACK. But that can take far too long
+(O(10 secs)), since the connection is stuck in fast recovery with a
+low cwnd that cannot grow beyond ssthresh, even if more bandwidth is
+available.
+
+This fix changes the logic in tcp_check_sack_reneging() to only rearm
+the RTO timer if data is cumulatively ACKed, indicating forward
+progress. This avoids this kind of nearly infinite loop of RTO timer
+re-arming. In addition, this meets the goals of
+tcp_check_sack_reneging() in handling Windows TCP behavior that looks
+temporarily like SACK reneging but is not really.
+
+Many thanks to Jakub Kicinski and Neil Spring, who reported this issue
+and provided critical packet traces that enabled root-causing this
+issue. Also, many thanks to Jakub Kicinski for testing this fix.
+
+Fixes: 5ae344c949e7 ("tcp: reduce spurious retransmits due to transient SACK reneging")
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Reported-by: Neil Spring <ntspring@fb.com>
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Tested-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/r/20221021170821.1093930-1-ncardwell.kernel@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_input.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 377cba9b124d..541758cd0b81 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2175,7 +2175,8 @@ void tcp_enter_loss(struct sock *sk)
+ */
+ static bool tcp_check_sack_reneging(struct sock *sk, int flag)
+ {
+- if (flag & FLAG_SACK_RENEGING) {
++ if (flag & FLAG_SACK_RENEGING &&
++ flag & FLAG_SND_UNA_ADVANCED) {
+ struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
+ msecs_to_jiffies(10));
+--
+2.35.1
+
--- /dev/null
+From f32e844a839fbd423d742ca4dd826aaf9cc6c8ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Nov 2021 11:02:30 -0800
+Subject: tcp: minor optimization in tcp_add_backlog()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit d519f350967a60b85a574ad8aeac43f2b4384746 ]
+
+If packet is going to be coalesced, sk_sndbuf/sk_rcvbuf values
+are not used. Defer their access to the point we need them.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: ec791d8149ff ("tcp: fix a signed-integer-overflow bug in tcp_add_backlog()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_ipv4.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 5c1e6b0687e2..78cef6930484 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1770,8 +1770,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
+
+ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+ {
+- u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
+- u32 tail_gso_size, tail_gso_segs;
++ u32 limit, tail_gso_size, tail_gso_segs;
+ struct skb_shared_info *shinfo;
+ const struct tcphdr *th;
+ struct tcphdr *thtail;
+@@ -1878,7 +1877,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+ * to reduce memory overhead, so add a little headroom here.
+ * Few sockets backlog are possibly concurrently non empty.
+ */
+- limit += 64*1024;
++ limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf) + 64*1024;
+
+ if (unlikely(sk_add_backlog(sk, skb, limit))) {
+ bh_unlock_sock(sk);
+--
+2.35.1
+
--- /dev/null
+From 911125224aa7858ab3061a5abd898828830daa60 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Oct 2022 15:19:50 -0400
+Subject: tipc: fix a null-ptr-deref in tipc_topsrv_accept
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 82cb4e4612c633a9ce320e1773114875604a3cce ]
+
+syzbot found a crash in tipc_topsrv_accept:
+
+ KASAN: null-ptr-deref in range [0x0000000000000008-0x000000000000000f]
+ Workqueue: tipc_rcv tipc_topsrv_accept
+ RIP: 0010:kernel_accept+0x22d/0x350 net/socket.c:3487
+ Call Trace:
+ <TASK>
+ tipc_topsrv_accept+0x197/0x280 net/tipc/topsrv.c:460
+ process_one_work+0x991/0x1610 kernel/workqueue.c:2289
+ worker_thread+0x665/0x1080 kernel/workqueue.c:2436
+ kthread+0x2e4/0x3a0 kernel/kthread.c:376
+ ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:306
+
+It was caused by srv->listener that might be set to null by
+tipc_topsrv_stop() in net .exit whereas it's still used in
+tipc_topsrv_accept() worker.
+
+srv->listener is protected by srv->idr_lock in tipc_topsrv_stop(), so add
+a check for srv->listener under srv->idr_lock in tipc_topsrv_accept() to
+avoid the null-ptr-deref. To ensure the lsock is not released during the
+tipc_topsrv_accept(), move sock_release() after tipc_topsrv_work_stop()
+where it's waiting until the tipc_topsrv_accept worker to be done.
+
+Note that sk_callback_lock is used to protect sk->sk_user_data instead of
+srv->listener, and it should check srv in tipc_topsrv_listener_data_ready()
+instead. This also ensures that no more tipc_topsrv_accept worker will be
+started after tipc_conn_close() is called in tipc_topsrv_stop() where it
+sets sk->sk_user_data to null.
+
+Fixes: 0ef897be12b8 ("tipc: separate topology server listener socket from subcsriber sockets")
+Reported-by: syzbot+c5ce866a8d30f4be0651@syzkaller.appspotmail.com
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Jon Maloy <jmaloy@redhat.com>
+Link: https://lore.kernel.org/r/4eee264380c409c61c6451af1059b7fb271a7e7b.1666120790.git.lucien.xin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/topsrv.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
+index d9e2c0fea3f2..561e709ae06a 100644
+--- a/net/tipc/topsrv.c
++++ b/net/tipc/topsrv.c
+@@ -450,12 +450,19 @@ static void tipc_conn_data_ready(struct sock *sk)
+ static void tipc_topsrv_accept(struct work_struct *work)
+ {
+ struct tipc_topsrv *srv = container_of(work, struct tipc_topsrv, awork);
+- struct socket *lsock = srv->listener;
+- struct socket *newsock;
++ struct socket *newsock, *lsock;
+ struct tipc_conn *con;
+ struct sock *newsk;
+ int ret;
+
++ spin_lock_bh(&srv->idr_lock);
++ if (!srv->listener) {
++ spin_unlock_bh(&srv->idr_lock);
++ return;
++ }
++ lsock = srv->listener;
++ spin_unlock_bh(&srv->idr_lock);
++
+ while (1) {
+ ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
+ if (ret < 0)
+@@ -489,7 +496,7 @@ static void tipc_topsrv_listener_data_ready(struct sock *sk)
+
+ read_lock_bh(&sk->sk_callback_lock);
+ srv = sk->sk_user_data;
+- if (srv->listener)
++ if (srv)
+ queue_work(srv->rcv_wq, &srv->awork);
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+@@ -699,8 +706,9 @@ static void tipc_topsrv_stop(struct net *net)
+ __module_get(lsock->sk->sk_prot_creator->owner);
+ srv->listener = NULL;
+ spin_unlock_bh(&srv->idr_lock);
+- sock_release(lsock);
++
+ tipc_topsrv_work_stop(srv);
++ sock_release(lsock);
+ idr_destroy(&srv->conn_idr);
+ kfree(srv);
+ }
+--
+2.35.1
+
--- /dev/null
+From 9d28233d77652dea99a2017c2a5812684953e4b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Jul 2022 11:15:06 +0800
+Subject: x86/unwind/orc: Fix unreliable stack dump with gcov
+
+From: Chen Zhongjin <chenzhongjin@huawei.com>
+
+[ Upstream commit 230db82413c091bc16acee72650f48d419cebe49 ]
+
+When a console stack dump is initiated with CONFIG_GCOV_PROFILE_ALL
+enabled, show_trace_log_lvl() gets out of sync with the ORC unwinder,
+causing the stack trace to show all text addresses as unreliable:
+
+ # echo l > /proc/sysrq-trigger
+ [ 477.521031] sysrq: Show backtrace of all active CPUs
+ [ 477.523813] NMI backtrace for cpu 0
+ [ 477.524492] CPU: 0 PID: 1021 Comm: bash Not tainted 6.0.0 #65
+ [ 477.525295] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.0-1.fc36 04/01/2014
+ [ 477.526439] Call Trace:
+ [ 477.526854] <TASK>
+ [ 477.527216] ? dump_stack_lvl+0xc7/0x114
+ [ 477.527801] ? dump_stack+0x13/0x1f
+ [ 477.528331] ? nmi_cpu_backtrace.cold+0xb5/0x10d
+ [ 477.528998] ? lapic_can_unplug_cpu+0xa0/0xa0
+ [ 477.529641] ? nmi_trigger_cpumask_backtrace+0x16a/0x1f0
+ [ 477.530393] ? arch_trigger_cpumask_backtrace+0x1d/0x30
+ [ 477.531136] ? sysrq_handle_showallcpus+0x1b/0x30
+ [ 477.531818] ? __handle_sysrq.cold+0x4e/0x1ae
+ [ 477.532451] ? write_sysrq_trigger+0x63/0x80
+ [ 477.533080] ? proc_reg_write+0x92/0x110
+ [ 477.533663] ? vfs_write+0x174/0x530
+ [ 477.534265] ? handle_mm_fault+0x16f/0x500
+ [ 477.534940] ? ksys_write+0x7b/0x170
+ [ 477.535543] ? __x64_sys_write+0x1d/0x30
+ [ 477.536191] ? do_syscall_64+0x6b/0x100
+ [ 477.536809] ? entry_SYSCALL_64_after_hwframe+0x63/0xcd
+ [ 477.537609] </TASK>
+
+This happens when the compiled code for show_stack() has a single word
+on the stack, and doesn't use a tail call to show_stack_log_lvl().
+(CONFIG_GCOV_PROFILE_ALL=y is the only known case of this.) Then the
+__unwind_start() skip logic hits an off-by-one bug and fails to unwind
+all the way to the intended starting frame.
+
+Fix it by reverting the following commit:
+
+ f1d9a2abff66 ("x86/unwind/orc: Don't skip the first frame for inactive tasks")
+
+The original justification for that commit no longer exists. That
+original issue was later fixed in a different way, with the following
+commit:
+
+ f2ac57a4c49d ("x86/unwind/orc: Fix inactive tasks with stack pointer in %sp on GCC 10 compiled kernels")
+
+Fixes: f1d9a2abff66 ("x86/unwind/orc: Don't skip the first frame for inactive tasks")
+Signed-off-by: Chen Zhongjin <chenzhongjin@huawei.com>
+[jpoimboe: rewrite commit log]
+Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/unwind_orc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
+index cc071c4c6524..d557a545f4bc 100644
+--- a/arch/x86/kernel/unwind_orc.c
++++ b/arch/x86/kernel/unwind_orc.c
+@@ -697,7 +697,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
+ /* Otherwise, skip ahead to the user-specified starting frame: */
+ while (!unwind_done(state) &&
+ (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
+- state->sp < (unsigned long)first_frame))
++ state->sp <= (unsigned long)first_frame))
+ unwind_next_frame(state);
+
+ return;
+--
+2.35.1
+