--- /dev/null
+From b19c89c04e16afb5a692eec517e9702418cf146f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Mar 2024 11:25:59 -0800
+Subject: ACPI: CPPC: Use access_width over bit_width for system memory
+ accesses
+
+From: Jarred White <jarredwhite@linux.microsoft.com>
+
+[ Upstream commit 2f4a4d63a193be6fd530d180bb13c3592052904c ]
+
+To align with ACPI 6.3+, since bit_width can be any 8-bit value, it
+cannot be depended on to be always on a clean 8b boundary. This was
+uncovered on the Cobalt 100 platform.
+
+SError Interrupt on CPU26, code 0xbe000011 -- SError
+ CPU: 26 PID: 1510 Comm: systemd-udevd Not tainted 5.15.2.1-13 #1
+ Hardware name: MICROSOFT CORPORATION, BIOS MICROSOFT CORPORATION
+ pstate: 62400009 (nZCv daif +PAN -UAO +TCO -DIT -SSBS BTYPE=--)
+ pc : cppc_get_perf_caps+0xec/0x410
+ lr : cppc_get_perf_caps+0xe8/0x410
+ sp : ffff8000155ab730
+ x29: ffff8000155ab730 x28: ffff0080139d0038 x27: ffff0080139d0078
+ x26: 0000000000000000 x25: ffff0080139d0058 x24: 00000000ffffffff
+ x23: ffff0080139d0298 x22: ffff0080139d0278 x21: 0000000000000000
+ x20: ffff00802b251910 x19: ffff0080139d0000 x18: ffffffffffffffff
+ x17: 0000000000000000 x16: ffffdc7e111bad04 x15: ffff00802b251008
+ x14: ffffffffffffffff x13: ffff013f1fd63300 x12: 0000000000000006
+ x11: ffffdc7e128f4420 x10: 0000000000000000 x9 : ffffdc7e111badec
+ x8 : ffff00802b251980 x7 : 0000000000000000 x6 : ffff0080139d0028
+ x5 : 0000000000000000 x4 : ffff0080139d0018 x3 : 00000000ffffffff
+ x2 : 0000000000000008 x1 : ffff8000155ab7a0 x0 : 0000000000000000
+ Kernel panic - not syncing: Asynchronous SError Interrupt
+ CPU: 26 PID: 1510 Comm: systemd-udevd Not tainted
+5.15.2.1-13 #1
+ Hardware name: MICROSOFT CORPORATION, BIOS MICROSOFT CORPORATION
+ Call trace:
+ dump_backtrace+0x0/0x1e0
+ show_stack+0x24/0x30
+ dump_stack_lvl+0x8c/0xb8
+ dump_stack+0x18/0x34
+ panic+0x16c/0x384
+ add_taint+0x0/0xc0
+ arm64_serror_panic+0x7c/0x90
+ arm64_is_fatal_ras_serror+0x34/0xa4
+ do_serror+0x50/0x6c
+ el1h_64_error_handler+0x40/0x74
+ el1h_64_error+0x7c/0x80
+ cppc_get_perf_caps+0xec/0x410
+ cppc_cpufreq_cpu_init+0x74/0x400 [cppc_cpufreq]
+ cpufreq_online+0x2dc/0xa30
+ cpufreq_add_dev+0xc0/0xd4
+ subsys_interface_register+0x134/0x14c
+ cpufreq_register_driver+0x1b0/0x354
+ cppc_cpufreq_init+0x1a8/0x1000 [cppc_cpufreq]
+ do_one_initcall+0x50/0x250
+ do_init_module+0x60/0x27c
+ load_module+0x2300/0x2570
+ __do_sys_finit_module+0xa8/0x114
+ __arm64_sys_finit_module+0x2c/0x3c
+ invoke_syscall+0x78/0x100
+ el0_svc_common.constprop.0+0x180/0x1a0
+ do_el0_svc+0x84/0xa0
+ el0_svc+0x2c/0xc0
+ el0t_64_sync_handler+0xa4/0x12c
+ el0t_64_sync+0x1a4/0x1a8
+
+Instead, use access_width to determine the size and use the offset and
+width to shift and mask the bits to read/write out. Make sure to add a
+check for system memory since pcc redefines the access_width to
+subspace id.
+
+If access_width is not set, then fall back to using bit_width.
+
+Signed-off-by: Jarred White <jarredwhite@linux.microsoft.com>
+Reviewed-by: Easwar Hariharan <eahariha@linux.microsoft.com>
+Cc: 5.15+ <stable@vger.kernel.org> # 5.15+
+[ rjw: Subject and changelog edits, comment adjustments ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/cppc_acpi.c | 31 ++++++++++++++++++++++++++-----
+ 1 file changed, 26 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
+index 093675b1a1ffb..c123fdbca693e 100644
+--- a/drivers/acpi/cppc_acpi.c
++++ b/drivers/acpi/cppc_acpi.c
+@@ -163,6 +163,13 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
+ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
+ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
+
++/* Check for valid access_width, otherwise, fallback to using bit_width */
++#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
++
++/* Shift and apply the mask for CPC reads/writes */
++#define MASK_VAL(reg, val) ((val) >> ((reg)->bit_offset & \
++ GENMASK(((reg)->bit_width), 0)))
++
+ static ssize_t show_feedback_ctrs(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+ {
+@@ -776,6 +783,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
+ } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ if (gas_t->address) {
+ void __iomem *addr;
++ size_t access_width;
+
+ if (!osc_cpc_flexible_adr_space_confirmed) {
+ pr_debug("Flexible address space capability not supported\n");
+@@ -783,7 +791,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
+ goto out_free;
+ }
+
+- addr = ioremap(gas_t->address, gas_t->bit_width/8);
++ access_width = GET_BIT_WIDTH(gas_t) / 8;
++ addr = ioremap(gas_t->address, access_width);
+ if (!addr)
+ goto out_free;
+ cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
+@@ -979,6 +988,7 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
+ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
+ {
+ void __iomem *vaddr = NULL;
++ int size;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cpc_reg *reg = ®_res->cpc_entry.reg;
+
+@@ -990,7 +1000,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
+ *val = 0;
+
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+- u32 width = 8 << (reg->access_width - 1);
++ u32 width = GET_BIT_WIDTH(reg);
+ u32 val_u32;
+ acpi_status status;
+
+@@ -1014,7 +1024,9 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
+ return acpi_os_read_memory((acpi_physical_address)reg->address,
+ val, reg->bit_width);
+
+- switch (reg->bit_width) {
++ size = GET_BIT_WIDTH(reg);
++
++ switch (size) {
+ case 8:
+ *val = readb_relaxed(vaddr);
+ break;
+@@ -1033,18 +1045,22 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
+ return -EFAULT;
+ }
+
++ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
++ *val = MASK_VAL(reg, *val);
++
+ return 0;
+ }
+
+ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
+ {
+ int ret_val = 0;
++ int size;
+ void __iomem *vaddr = NULL;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cpc_reg *reg = ®_res->cpc_entry.reg;
+
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+- u32 width = 8 << (reg->access_width - 1);
++ u32 width = GET_BIT_WIDTH(reg);
+ acpi_status status;
+
+ status = acpi_os_write_port((acpi_io_address)reg->address,
+@@ -1066,7 +1082,12 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
+ return acpi_os_write_memory((acpi_physical_address)reg->address,
+ val, reg->bit_width);
+
+- switch (reg->bit_width) {
++ size = GET_BIT_WIDTH(reg);
++
++ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
++ val = MASK_VAL(reg, val);
++
++ switch (size) {
+ case 8:
+ writeb_relaxed(val, vaddr);
+ break;
+--
+2.43.0
+
--- /dev/null
+From 566fec93a8a4c676fb5da9f05b5706cbf08aa274 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Mar 2024 22:46:50 +0100
+Subject: ahci: asm1064: asm1166: don't limit reported ports
+
+From: Conrad Kostecki <conikost@gentoo.org>
+
+[ Upstream commit 6cd8adc3e18960f6e59d797285ed34ef473cc896 ]
+
+Previously, patches have been added to limit the reported count of SATA
+ports for asm1064 and asm1166 SATA controllers, as those controllers do
+report more ports than physically having.
+
+While it is allowed to report more ports than physically having in CAP.NP,
+it is not allowed to report more ports than physically having in the PI
+(Ports Implemented) register, which is what these HBAs do.
+(This is a AHCI spec violation.)
+
+Unfortunately, it seems that the PMP implementation in these ASMedia HBAs
+is also violating the AHCI and SATA-IO PMP specification.
+
+What these HBAs do is that they do not report that they support PMP
+(CAP.SPM (Supports Port Multiplier) is not set).
+
+Instead, they have decided to add extra "virtual" ports in the PI register
+that is used if a port multiplier is connected to any of the physical
+ports of the HBA.
+
+Enumerating the devices behind the PMP as specified in the AHCI and
+SATA-IO specifications, by using PMP READ and PMP WRITE commands to the
+physical ports of the HBA is not possible, you have to use the "virtual"
+ports.
+
+This is of course bad, because this gives us no way to detect the device
+and vendor ID of the PMP actually connected to the HBA, which means that
+we can not apply the proper PMP quirks for the PMP that is connected to
+the HBA.
+
+Limiting the port map will thus stop these controllers from working with
+SATA Port Multipliers.
+
+This patch reverts both patches for asm1064 and asm1166, so old behavior
+is restored and SATA PMP will work again, but it will also reintroduce the
+(minutes long) extra boot time for the ASMedia controllers that do not
+have a PMP connected (either on the PCIe card itself, or an external PMP).
+
+However, a longer boot time for some, is the lesser evil compared to some
+other users not being able to detect their drives at all.
+
+Fixes: 0077a504e1a4 ("ahci: asm1166: correct count of reported ports")
+Fixes: 9815e3961754 ("ahci: asm1064: correct count of reported ports")
+Cc: stable@vger.kernel.org
+Reported-by: Matt <cryptearth@googlemail.com>
+Signed-off-by: Conrad Kostecki <conikost@gentoo.org>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+[cassel: rewrote commit message]
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/ahci.c | 13 -------------
+ 1 file changed, 13 deletions(-)
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 9de1731b6b444..17119e8dc8c30 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -671,19 +671,6 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
+ {
+- if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA) {
+- switch (pdev->device) {
+- case 0x1166:
+- dev_info(&pdev->dev, "ASM1166 has only six ports\n");
+- hpriv->saved_port_map = 0x3f;
+- break;
+- case 0x1064:
+- dev_info(&pdev->dev, "ASM1064 has only four ports\n");
+- hpriv->saved_port_map = 0xf;
+- break;
+- }
+- }
+-
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
+ dev_info(&pdev->dev, "JMB361 has only one port\n");
+ hpriv->saved_port_map = 1;
+--
+2.43.0
+
--- /dev/null
+From 78c14b56a1f75ae1d2e22d98d8bafb5013bb83e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Feb 2024 17:57:57 +0100
+Subject: ahci: asm1064: correct count of reported ports
+
+From: Andrey Jr. Melnikov <temnota.am@gmail.com>
+
+[ Upstream commit 9815e39617541ef52d0dfac4be274ad378c6dc09 ]
+
+The ASM1064 SATA host controller always reports wrongly,
+that it has 24 ports. But in reality, it only has four ports.
+
+before:
+ahci 0000:04:00.0: SSS flag set, parallel bus scan disabled
+ahci 0000:04:00.0: AHCI 0001.0301 32 slots 24 ports 6 Gbps 0xffff0f impl SATA mode
+ahci 0000:04:00.0: flags: 64bit ncq sntf stag pm led only pio sxs deso sadm sds apst
+
+after:
+ahci 0000:04:00.0: ASM1064 has only four ports
+ahci 0000:04:00.0: forcing port_map 0xffff0f -> 0xf
+ahci 0000:04:00.0: SSS flag set, parallel bus scan disabled
+ahci 0000:04:00.0: AHCI 0001.0301 32 slots 24 ports 6 Gbps 0xf impl SATA mode
+ahci 0000:04:00.0: flags: 64bit ncq sntf stag pm led only pio sxs deso sadm sds apst
+
+Signed-off-by: "Andrey Jr. Melnikov" <temnota.am@gmail.com>
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Stable-dep-of: 6cd8adc3e189 ("ahci: asm1064: asm1166: don't limit reported ports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/ahci.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 1790a2ecb9fac..9de1731b6b444 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -671,9 +671,17 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
+ {
+- if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
+- dev_info(&pdev->dev, "ASM1166 has only six ports\n");
+- hpriv->saved_port_map = 0x3f;
++ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA) {
++ switch (pdev->device) {
++ case 0x1166:
++ dev_info(&pdev->dev, "ASM1166 has only six ports\n");
++ hpriv->saved_port_map = 0x3f;
++ break;
++ case 0x1064:
++ dev_info(&pdev->dev, "ASM1064 has only four ports\n");
++ hpriv->saved_port_map = 0xf;
++ break;
++ }
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
+--
+2.43.0
+
--- /dev/null
+From a372d27a33a402d137f679772a578889ace2f1c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Jan 2024 19:39:32 +0100
+Subject: arm: dts: marvell: Fix maxium->maxim typo in brownstone dts
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Duje Mihanović <duje.mihanovic@skole.hr>
+
+[ Upstream commit 831e0cd4f9ee15a4f02ae10b67e7fdc10eb2b4fc ]
+
+Fix an obvious spelling error in the PMIC compatible in the MMP2
+Brownstone DTS file.
+
+Fixes: 58f1193e6210 ("mfd: max8925: Add dts")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Duje Mihanović <duje.mihanovic@skole.hr>
+Reported-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Closes: https://lore.kernel.org/linux-devicetree/1410884282-18041-1-git-send-email-k.kozlowski@samsung.com/
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/20240125-brownstone-typo-fix-v2-1-45bc48a0c81c@skole.hr
+[krzysztof: Just 10 years to take a patch, not bad! Rephrased commit
+ msg]
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/mmp2-brownstone.dts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/mmp2-brownstone.dts b/arch/arm/boot/dts/mmp2-brownstone.dts
+index 04f1ae1382e7a..bc64348b82185 100644
+--- a/arch/arm/boot/dts/mmp2-brownstone.dts
++++ b/arch/arm/boot/dts/mmp2-brownstone.dts
+@@ -28,7 +28,7 @@ &uart3 {
+ &twsi1 {
+ status = "okay";
+ pmic: max8925@3c {
+- compatible = "maxium,max8925";
++ compatible = "maxim,max8925";
+ reg = <0x3c>;
+ interrupts = <1>;
+ interrupt-parent = <&intcmux4>;
+--
+2.43.0
+
--- /dev/null
+From 90efd769713a2150722e1b8bb4ebf64d18c5b468 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Dec 2023 19:32:36 +0530
+Subject: arm64: dts: qcom: sc7280: Add additional MSI interrupts
+
+From: Krishna chaitanya chundru <quic_krichai@quicinc.com>
+
+[ Upstream commit b8ba66b40da3230a8675cb5dd5c2dea5bce24d62 ]
+
+Current MSI's mapping doesn't have all the vectors. This platform
+supports 8 vectors each vector supports 32 MSI's, so total MSI's
+supported is 256.
+
+Add all the MSI groups supported for this PCIe instance in this platform.
+
+Fixes: 92e0ee9f83b3 ("arm64: dts: qcom: sc7280: Add PCIe and PHY related nodes")
+cc: stable@vger.kernel.org
+Signed-off-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
+Link: https://lore.kernel.org/r/20231218-additional_msi-v1-1-de6917392684@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/qcom/sc7280.dtsi | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 04106d7254000..b5cd24d59ad9a 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -2028,8 +2028,16 @@ pcie1: pci@1c08000 {
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+
+- interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+- interrupt-names = "msi";
++ interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "msi0", "msi1", "msi2", "msi3",
++ "msi4", "msi5", "msi6", "msi7";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>,
+--
+2.43.0
+
--- /dev/null
+From e442ec07ba7a777fc29fbe93dbb161406812b0bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Feb 2024 22:17:23 +0900
+Subject: block: Clear zone limits for a non-zoned stacked queue
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit c8f6f88d25929ad2f290b428efcae3b526f3eab0 ]
+
+Device mapper may create a non-zoned mapped device out of a zoned device
+(e.g., the dm-zoned target). In such case, some queue limit such as the
+max_zone_append_sectors and zone_write_granularity endup being non zero
+values for a block device that is not zoned. Avoid this by clearing
+these limits in blk_stack_limits() when the stacked zoned limit is
+false.
+
+Fixes: 3093a479727b ("block: inherit the zoned characteristics in blk_stack_limits")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://lore.kernel.org/r/20240222131724.1803520-1-dlemoal@kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-settings.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index bbca4ce77a2d3..c702f408bbc0a 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -680,6 +680,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ t->zone_write_granularity = max(t->zone_write_granularity,
+ b->zone_write_granularity);
+ t->zoned = max(t->zoned, b->zoned);
++ if (!t->zoned) {
++ t->zone_write_granularity = 0;
++ t->max_zone_append_sectors = 0;
++ }
+ return ret;
+ }
+ EXPORT_SYMBOL(blk_stack_limits);
+--
+2.43.0
+
--- /dev/null
+From 7031625a359d7bfe025e710b02b9bf0b097dad24 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Oct 2023 15:55:49 +0100
+Subject: bounds: support non-power-of-two CONFIG_NR_CPUS
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit f2d5dcb48f7ba9e3ff249d58fc1fa963d374e66a ]
+
+ilog2() rounds down, so for example when PowerPC 85xx sets CONFIG_NR_CPUS
+to 24, we will only allocate 4 bits to store the number of CPUs instead of
+5. Use bits_per() instead, which rounds up. Found by code inspection.
+The effect of this would probably be a misaccounting when doing NUMA
+balancing, so to a user, it would only be a performance penalty. The
+effects may be more wide-spread; it's hard to tell.
+
+Link: https://lkml.kernel.org/r/20231010145549.1244748-1-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Fixes: 90572890d202 ("mm: numa: Change page last {nid,pid} into {cpu,pid}")
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Acked-by: Mel Gorman <mgorman@techsingularity.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bounds.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/bounds.c b/kernel/bounds.c
+index b529182e8b04f..c5a9fcd2d6228 100644
+--- a/kernel/bounds.c
++++ b/kernel/bounds.c
+@@ -19,7 +19,7 @@ int main(void)
+ DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
+ DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
+ #ifdef CONFIG_SMP
+- DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
++ DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS));
+ #endif
+ DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
+ #ifdef CONFIG_LRU_GEN
+--
+2.43.0
+
--- /dev/null
+From b11bff89fcfde1735c9f2db4b72f449dd71d538f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Feb 2024 10:37:04 +0000
+Subject: btrfs: fix off-by-one chunk length calculation at
+ contains_pending_extent()
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit ae6bd7f9b46a29af52ebfac25d395757e2031d0d ]
+
+At contains_pending_extent() the value of the end offset of a chunk we
+found in the device's allocation state io tree is inclusive, so when
+we calculate the length we pass to the in_range() macro, we must sum
+1 to the expression "physical_end - physical_offset".
+
+In practice the wrong calculation should be harmless as chunks sizes
+are never 1 byte and we should never have 1 byte ranges of unallocated
+space. Nevertheless fix the wrong calculation.
+
+Reported-by: Alex Lyakas <alex.lyakas@zadara.com>
+Link: https://lore.kernel.org/linux-btrfs/CAOcd+r30e-f4R-5x-S7sV22RJPe7+pgwherA6xqN2_qe7o4XTg@mail.gmail.com/
+Fixes: 1c11b63eff2a ("btrfs: replace pending/pinned chunks lists with io tree")
+CC: stable@vger.kernel.org # 6.1+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/volumes.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 6fc2d99270c18..03cfb425ea4ea 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1444,7 +1444,7 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
+
+ if (in_range(physical_start, *start, len) ||
+ in_range(*start, physical_start,
+- physical_end - physical_start)) {
++ physical_end + 1 - physical_start)) {
+ *start = physical_end + 1;
+ return true;
+ }
+--
+2.43.0
+
--- /dev/null
+From 1d58c364fe8cf40cad2fb6a5f97c352a25bf78a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Feb 2024 18:13:38 +1030
+Subject: btrfs: qgroup: always free reserved space for extent records
+
+From: Qu Wenruo <wqu@suse.com>
+
+[ Upstream commit d139ded8b9cdb897bb9539eb33311daf9a177fd2 ]
+
+[BUG]
+If qgroup is marked inconsistent (e.g. caused by operations needing full
+subtree rescan, like creating a snapshot and assign to a higher level
+qgroup), btrfs would immediately start leaking its data reserved space.
+
+The following script can easily reproduce it:
+
+ mkfs.btrfs -O quota -f $dev
+ mount $dev $mnt
+ btrfs subvolume create $mnt/subv1
+ btrfs qgroup create 1/0 $mnt
+
+ # This snapshot creation would mark qgroup inconsistent,
+ # as the ownership involves different higher level qgroup, thus
+ # we have to rescan both source and snapshot, which can be very
+ # time consuming, thus here btrfs just choose to mark qgroup
+ # inconsistent, and let users to determine when to do the rescan.
+ btrfs subv snapshot -i 1/0 $mnt/subv1 $mnt/snap1
+
+ # Now this write would lead to qgroup rsv leak.
+ xfs_io -f -c "pwrite 0 64k" $mnt/file1
+
+ # And at unmount time, btrfs would report 64K DATA rsv space leaked.
+ umount $mnt
+
+And we would have the following dmesg output for the unmount:
+
+ BTRFS info (device dm-1): last unmount of filesystem 14a3d84e-f47b-4f72-b053-a8a36eef74d3
+ BTRFS warning (device dm-1): qgroup 0/5 has unreleased space, type 0 rsv 65536
+
+[CAUSE]
+Since commit e15e9f43c7ca ("btrfs: introduce
+BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING to skip qgroup accounting"),
+we introduce a mode for btrfs qgroup to skip the timing consuming
+backref walk, if the qgroup is already inconsistent.
+
+But this skip also covered the data reserved freeing, thus the qgroup
+reserved space for each newly created data extent would not be freed,
+thus cause the leakage.
+
+[FIX]
+Make the data extent reserved space freeing mandatory.
+
+The qgroup reserved space handling is way cheaper compared to the
+backref walking part, and we always have the super sensitive leak
+detector, thus it's definitely worth to always free the qgroup
+reserved data space.
+
+Reported-by: Fabian Vogt <fvogt@suse.com>
+Fixes: e15e9f43c7ca ("btrfs: introduce BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING to skip qgroup accounting")
+CC: stable@vger.kernel.org # 6.1+
+Link: https://bugzilla.suse.com/show_bug.cgi?id=1216196
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/qgroup.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index b3472bf6b288f..c14d4f70e84bd 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2800,11 +2800,6 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
+ goto cleanup;
+ }
+
+- /* Free the reserved data space */
+- btrfs_qgroup_free_refroot(fs_info,
+- record->data_rsv_refroot,
+- record->data_rsv,
+- BTRFS_QGROUP_RSV_DATA);
+ /*
+ * Use BTRFS_SEQ_LAST as time_seq to do special search,
+ * which doesn't lock tree or delayed_refs and search
+@@ -2826,6 +2821,11 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
+ record->old_roots = NULL;
+ new_roots = NULL;
+ }
++ /* Free the reserved data space */
++ btrfs_qgroup_free_refroot(fs_info,
++ record->data_rsv_refroot,
++ record->data_rsv,
++ BTRFS_QGROUP_RSV_DATA);
+ cleanup:
+ ulist_free(record->old_roots);
+ ulist_free(new_roots);
+--
+2.43.0
+
--- /dev/null
+From 38ab4b28aa43dee5e03a808f6d3e921ab50ebdbf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Mar 2024 17:53:44 +0300
+Subject: cifs: open_cached_dir(): add FILE_READ_EA to desired access
+
+From: Eugene Korenevsky <ekorenevsky@astralinux.ru>
+
+[ Upstream commit f1b8224b4e6ed59e7e6f5c548673c67410098d8d ]
+
+Since smb2_query_eas() reads EA and uses cached directory,
+open_cached_dir() should request FILE_READ_EA access.
+
+Otherwise listxattr() and getxattr() will fail with EACCES
+(0xc0000022 STATUS_ACCESS_DENIED SMB status).
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=218543
+Cc: stable@vger.kernel.org
+Signed-off-by: Eugene Korenevsky <ekorenevsky@astralinux.ru>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cached_dir.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index fd082151c5f9b..86fe433b1d324 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -218,7 +218,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ .tcon = tcon,
+ .path = path,
+ .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
+- .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES,
++ .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES |
++ FILE_READ_EA,
+ .disposition = FILE_OPEN,
+ .fid = pfid,
+ };
+--
+2.43.0
+
--- /dev/null
+From ed311e200af932b542e998a694636996751de6a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Feb 2024 19:07:47 +0100
+Subject: clk: qcom: gcc-ipq6018: fix terminating of frequency table arrays
+
+From: Gabor Juhos <j4g8y7@gmail.com>
+
+[ Upstream commit cdbc6e2d8108bc47895e5a901cfcaf799b00ca8d ]
+
+The frequency table arrays are supposed to be terminated with an
+empty element. Add such entry to the end of the arrays where it
+is missing in order to avoid possible out-of-bound access when
+the table is traversed by functions like qcom_find_freq() or
+qcom_find_freq_floor().
+
+Only compile tested.
+
+Fixes: d9db07f088af ("clk: qcom: Add ipq6018 Global Clock Controller support")
+Signed-off-by: Gabor Juhos <j4g8y7@gmail.com>
+Reviewed-by: Stephen Boyd <sboyd@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240229-freq-table-terminator-v1-2-074334f0905c@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/qcom/gcc-ipq6018.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
+index 4c5c7a8f41d08..b9844e41cf99d 100644
+--- a/drivers/clk/qcom/gcc-ipq6018.c
++++ b/drivers/clk/qcom/gcc-ipq6018.c
+@@ -1557,6 +1557,7 @@ static struct clk_regmap_div nss_ubi0_div_clk_src = {
+
+ static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
++ { }
+ };
+
+ static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = {
+@@ -1737,6 +1738,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(216000000, P_GPLL6, 5, 0, 0),
+ F(308570000, P_GPLL6, 3.5, 0, 0),
++ { }
+ };
+
+ static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
+--
+2.43.0
+
--- /dev/null
+From c50081465f76f74c7b62a4d6b11beacc021df038 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Feb 2024 19:07:48 +0100
+Subject: clk: qcom: gcc-ipq8074: fix terminating of frequency table arrays
+
+From: Gabor Juhos <j4g8y7@gmail.com>
+
+[ Upstream commit 1040ef5ed95d6fd2628bad387d78a61633e09429 ]
+
+The frequency table arrays are supposed to be terminated with an
+empty element. Add such entry to the end of the arrays where it
+is missing in order to avoid possible out-of-bound access when
+the table is traversed by functions like qcom_find_freq() or
+qcom_find_freq_floor().
+
+Only compile tested.
+
+Fixes: 9607f6224b39 ("clk: qcom: ipq8074: add PCIE, USB and SDCC clocks")
+Signed-off-by: Gabor Juhos <j4g8y7@gmail.com>
+Reviewed-by: Stephen Boyd <sboyd@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240229-freq-table-terminator-v1-3-074334f0905c@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/qcom/gcc-ipq8074.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
+index b2e83b38976e5..b52c923a2fbca 100644
+--- a/drivers/clk/qcom/gcc-ipq8074.c
++++ b/drivers/clk/qcom/gcc-ipq8074.c
+@@ -973,6 +973,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = {
+
+ static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 pcie0_aux_clk_src = {
+@@ -1078,6 +1079,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(308570000, P_GPLL6, 3.5, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+--
+2.43.0
+
--- /dev/null
+From 178f3ba8e423cb93b7f9b60891c309b69543c98d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jan 2024 11:58:14 +0530
+Subject: clk: qcom: gcc-sdm845: Add soft dependency on rpmhpd
+
+From: Amit Pundir <amit.pundir@linaro.org>
+
+[ Upstream commit 1d9054e3a4fd36e2949e616f7360bdb81bcc1921 ]
+
+With the addition of RPMh power domain to the GCC node in
+device tree, we noticed a significant delay in getting the
+UFS driver probed on AOSP which futher led to mount failures
+because Android do not support rootwait. So adding a soft
+dependency on RPMh power domain which informs modprobe to
+load rpmhpd module before gcc-sdm845.
+
+Cc: stable@vger.kernel.org # v5.4+
+Fixes: 4b6ea15c0a11 ("arm64: dts: qcom: sdm845: Add missing RPMh power domain to GCC")
+Suggested-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/20240123062814.2555649-1-amit.pundir@linaro.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/qcom/gcc-sdm845.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
+index 6af08e0ca8475..ef15e8f114027 100644
+--- a/drivers/clk/qcom/gcc-sdm845.c
++++ b/drivers/clk/qcom/gcc-sdm845.c
+@@ -4038,3 +4038,4 @@ module_exit(gcc_sdm845_exit);
+ MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:gcc-sdm845");
++MODULE_SOFTDEP("pre: rpmhpd");
+--
+2.43.0
+
--- /dev/null
+From 54ecb6a1a6be83941968d86f84907cad27cc55c7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Feb 2024 19:07:51 +0100
+Subject: clk: qcom: mmcc-apq8084: fix terminating of frequency table arrays
+
+From: Gabor Juhos <j4g8y7@gmail.com>
+
+[ Upstream commit a903cfd38d8dee7e754fb89fd1bebed99e28003d ]
+
+The frequency table arrays are supposed to be terminated with an
+empty element. Add such entry to the end of the arrays where it
+is missing in order to avoid possible out-of-bound access when
+the table is traversed by functions like qcom_find_freq() or
+qcom_find_freq_floor().
+
+Only compile tested.
+
+Fixes: 2b46cd23a5a2 ("clk: qcom: Add APQ8084 Multimedia Clock Controller (MMCC) support")
+Signed-off-by: Gabor Juhos <j4g8y7@gmail.com>
+Reviewed-by: Stephen Boyd <sboyd@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240229-freq-table-terminator-v1-6-074334f0905c@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/qcom/mmcc-apq8084.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
+index e9f9713591558..5f373c10ec6ee 100644
+--- a/drivers/clk/qcom/mmcc-apq8084.c
++++ b/drivers/clk/qcom/mmcc-apq8084.c
+@@ -334,6 +334,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
+ F(333430000, P_MMPLL1, 3.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ F(466800000, P_MMPLL1, 2.5, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 mmss_axi_clk_src = {
+@@ -358,6 +359,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(228570000, P_MMPLL0, 3.5, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 ocmemnoc_clk_src = {
+--
+2.43.0
+
--- /dev/null
+From 09f3412819c4c2828e19796cf64773c9239742db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Feb 2024 19:07:52 +0100
+Subject: clk: qcom: mmcc-msm8974: fix terminating of frequency table arrays
+
+From: Gabor Juhos <j4g8y7@gmail.com>
+
+[ Upstream commit e2c02a85bf53ae86d79b5fccf0a75ac0b78e0c96 ]
+
+The frequency table arrays are supposed to be terminated with an
+empty element. Add such entry to the end of the arrays where it
+is missing in order to avoid possible out-of-bound access when
+the table is traversed by functions like qcom_find_freq() or
+qcom_find_freq_floor().
+
+Only compile tested.
+
+Fixes: d8b212014e69 ("clk: qcom: Add support for MSM8974's multimedia clock controller (MMCC)")
+Signed-off-by: Gabor Juhos <j4g8y7@gmail.com>
+Reviewed-by: Stephen Boyd <sboyd@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240229-freq-table-terminator-v1-7-074334f0905c@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/qcom/mmcc-msm8974.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
+index 17ed52046170a..eb2b0e2200d23 100644
+--- a/drivers/clk/qcom/mmcc-msm8974.c
++++ b/drivers/clk/qcom/mmcc-msm8974.c
+@@ -279,6 +279,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
+ F(291750000, P_MMPLL1, 4, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ F(466800000, P_MMPLL1, 2.5, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 mmss_axi_clk_src = {
+@@ -303,6 +304,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(291750000, P_MMPLL1, 4, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 ocmemnoc_clk_src = {
+--
+2.43.0
+
--- /dev/null
+From 5811343e522adee76f55f46c50109b2dbf162635 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Feb 2024 16:42:26 +0100
+Subject: cpufreq: amd-pstate: Fix min_perf assignment in
+ amd_pstate_adjust_perf()
+
+From: Tor Vic <torvic9@mailbox.org>
+
+[ Upstream commit b26ffbf800ae3c8d01bdf90d9cd8a37e1606ff06 ]
+
+In the function amd_pstate_adjust_perf(), the 'min_perf' variable is set
+to 'highest_perf' instead of 'lowest_perf'.
+
+Fixes: 1d215f0319c2 ("cpufreq: amd-pstate: Add fast switch function for AMD P-State")
+Reported-by: Oleksandr Natalenko <oleksandr@natalenko.name>
+Reviewed-by: Perry Yuan <Perry.Yuan@amd.com>
+Signed-off-by: Tor Vic <torvic9@mailbox.org>
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
+Cc: 6.1+ <stable@vger.kernel.org> # 6.1+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/amd-pstate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index edc294ee5a5bc..90dcf26f09731 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -320,7 +320,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+ if (target_perf < capacity)
+ des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
+
+- min_perf = READ_ONCE(cpudata->highest_perf);
++ min_perf = READ_ONCE(cpudata->lowest_perf);
+ if (_min_perf < capacity)
+ min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
+
+--
+2.43.0
+
--- /dev/null
+From 43247556035b296039738d24cc8166dfbc379185 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Mar 2024 13:54:57 +0100
+Subject: cpufreq: dt: always allocate zeroed cpumask
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+[ Upstream commit d2399501c2c081eac703ca9597ceb83c7875a537 ]
+
+Commit 0499a78369ad ("ARM64: Dynamically allocate cpumasks and increase
+supported CPUs to 512") changed the handling of cpumasks on ARM 64bit,
+what resulted in the strange issues and warnings during cpufreq-dt
+initialization on some big.LITTLE platforms.
+
+This was caused by mixing OPPs between big and LITTLE cores, because
+OPP-sharing information between big and LITTLE cores is computed on
+cpumask, which in turn was not zeroed on allocation. Fix this by
+switching to zalloc_cpumask_var() call.
+
+Fixes: dc279ac6e5b4 ("cpufreq: dt: Refactor initialization to handle probe deferral properly")
+CC: stable@vger.kernel.org # v5.10+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Christoph Lameter (Ampere) <cl@linux.com>
+Reviewed-by: Dhruva Gole <d-gole@ti.com>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/cpufreq-dt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
+index 4aec4b2a52259..8f8f1949d66f6 100644
+--- a/drivers/cpufreq/cpufreq-dt.c
++++ b/drivers/cpufreq/cpufreq-dt.c
+@@ -208,7 +208,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
+ if (!priv)
+ return -ENOMEM;
+
+- if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
++ if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_set_cpu(cpu, priv->cpus);
+--
+2.43.0
+
--- /dev/null
+From 26818c5e085e6ad986d7a6ba877e44c6e89c8738 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Feb 2024 14:43:51 +0530
+Subject: cpufreq: Limit resolving a frequency to policy min/max
+
+From: Shivnandan Kumar <quic_kshivnan@quicinc.com>
+
+[ Upstream commit d394abcb12bb1a6f309c1221fdb8e73594ecf1b4 ]
+
+Resolving a frequency to an efficient one should not transgress
+policy->max (which can be set for thermal reason) and policy->min.
+
+Currently, there is possibility where scaling_cur_freq can exceed
+scaling_max_freq when scaling_max_freq is an inefficient frequency.
+
+Add a check to ensure that resolving a frequency will respect
+policy->min/max.
+
+Cc: All applicable <stable@vger.kernel.org>
+Fixes: 1f39fa0dccff ("cpufreq: Introducing CPUFREQ_RELATION_E")
+Signed-off-by: Shivnandan Kumar <quic_kshivnan@quicinc.com>
+[ rjw: Whitespace adjustment, changelog edits ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/cpufreq.h | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index d5595d57f4e53..9d208648c84d5 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -1023,6 +1023,18 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
+ efficiencies);
+ }
+
++static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
++{
++ unsigned int freq;
++
++ if (idx < 0)
++ return false;
++
++ freq = policy->freq_table[idx].frequency;
++
++ return freq == clamp_val(freq, policy->min, policy->max);
++}
++
+ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+@@ -1056,7 +1068,8 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+ return 0;
+ }
+
+- if (idx < 0 && efficiencies) {
++ /* Limit frequency index to honor policy->min/max */
++ if (!cpufreq_is_in_limits(policy, idx) && efficiencies) {
+ efficiencies = false;
+ goto retry;
+ }
+--
+2.43.0
+
--- /dev/null
+From 5413563ee1ee391d35de3c3dab0a55d23d362772 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Oct 2023 13:27:19 +0100
+Subject: crypto: qat - fix double free during reset
+
+From: Svyatoslav Pankratov <svyatoslav.pankratov@intel.com>
+
+[ Upstream commit 01aed663e6c421aeafc9c330bda630976b50a764 ]
+
+There is no need to free the reset_data structure if the recovery is
+unsuccessful and the reset is synchronous. The function
+adf_dev_aer_schedule_reset() handles the cleanup properly. Only
+asynchronous resets require such structure to be freed inside the reset
+worker.
+
+Fixes: d8cba25d2c68 ("crypto: qat - Intel(R) QAT driver framework")
+Signed-off-by: Svyatoslav Pankratov <svyatoslav.pankratov@intel.com>
+Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Stable-dep-of: 7d42e097607c ("crypto: qat - resolve race condition during AER recovery")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/qat/qat_common/adf_aer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
+index fe9bb2f3536a9..fa6b7ecd4c08d 100644
+--- a/drivers/crypto/qat/qat_common/adf_aer.c
++++ b/drivers/crypto/qat/qat_common/adf_aer.c
+@@ -95,7 +95,8 @@ static void adf_device_reset_worker(struct work_struct *work)
+ if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
+ /* The device hanged and we can't restart it so stop here */
+ dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+- kfree(reset_data);
++ if (reset_data->mode == ADF_DEV_RESET_ASYNC)
++ kfree(reset_data);
+ WARN(1, "QAT: device restart failed. Device is unusable\n");
+ return;
+ }
+--
+2.43.0
+
--- /dev/null
+From 4cb1824072454e47ee0953248c57172591975ecc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Feb 2024 13:43:42 +0100
+Subject: crypto: qat - resolve race condition during AER recovery
+
+From: Damian Muszynski <damian.muszynski@intel.com>
+
+[ Upstream commit 7d42e097607c4d246d99225bf2b195b6167a210c ]
+
+During the PCI AER system's error recovery process, the kernel driver
+may encounter a race condition with freeing the reset_data structure's
+memory. If the device restart will take more than 10 seconds the function
+scheduling that restart will exit due to a timeout, and the reset_data
+structure will be freed. However, this data structure is used for
+completion notification after the restart is completed, which leads
+to a UAF bug.
+
+This results in a KFENCE bug notice.
+
+ BUG: KFENCE: use-after-free read in adf_device_reset_worker+0x38/0xa0 [intel_qat]
+ Use-after-free read at 0x00000000bc56fddf (in kfence-#142):
+ adf_device_reset_worker+0x38/0xa0 [intel_qat]
+ process_one_work+0x173/0x340
+
+To resolve this race condition, the memory associated to the container
+of the work_struct is freed on the worker if the timeout expired,
+otherwise on the function that schedules the worker.
+The timeout detection can be done by checking if the caller is
+still waiting for completion or not by using completion_done() function.
+
+Fixes: d8cba25d2c68 ("crypto: qat - Intel(R) QAT driver framework")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Damian Muszynski <damian.muszynski@intel.com>
+Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/qat/qat_common/adf_aer.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
+index fa6b7ecd4c08d..4f36b5a9164a7 100644
+--- a/drivers/crypto/qat/qat_common/adf_aer.c
++++ b/drivers/crypto/qat/qat_common/adf_aer.c
+@@ -95,7 +95,8 @@ static void adf_device_reset_worker(struct work_struct *work)
+ if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
+ /* The device hanged and we can't restart it so stop here */
+ dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+- if (reset_data->mode == ADF_DEV_RESET_ASYNC)
++ if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
++ completion_done(&reset_data->compl))
+ kfree(reset_data);
+ WARN(1, "QAT: device restart failed. Device is unusable\n");
+ return;
+@@ -103,11 +104,19 @@ static void adf_device_reset_worker(struct work_struct *work)
+ adf_dev_restarted_notify(accel_dev);
+ clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+
+- /* The dev is back alive. Notify the caller if in sync mode */
+- if (reset_data->mode == ADF_DEV_RESET_SYNC)
+- complete(&reset_data->compl);
+- else
++ /*
++ * The dev is back alive. Notify the caller if in sync mode
++ *
++ * If device restart will take a more time than expected,
++ * the schedule_reset() function can timeout and exit. This can be
++ * detected by calling the completion_done() function. In this case
++ * the reset_data structure needs to be freed here.
++ */
++ if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
++ completion_done(&reset_data->compl))
+ kfree(reset_data);
++ else
++ complete(&reset_data->compl);
+ }
+
+ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+@@ -140,8 +149,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+ dev_err(&GET_DEV(accel_dev),
+ "Reset device timeout expired\n");
+ ret = -EFAULT;
++ } else {
++ kfree(reset_data);
+ }
+- kfree(reset_data);
+ return ret;
+ }
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 8b5d27db21256ac02299f073de624f3774bfd465 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Mar 2024 15:23:06 +0800
+Subject: dm-raid: fix lockdep waring in "pers->hot_add_disk"
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+[ Upstream commit 95009ae904b1e9dca8db6f649f2d7c18a6e42c75 ]
+
+The lockdep assert is added by commit a448af25becf ("md/raid10: remove
+rcu protection to access rdev from conf") in print_conf(). And I didn't
+notice that dm-raid is calling "pers->hot_add_disk" without holding
+'reconfig_mutex'.
+
+"pers->hot_add_disk" read and write many fields that is protected by
+'reconfig_mutex', and raid_resume() already grab the lock in other
+contex. Hence fix this problem by protecting "pers->host_add_disk"
+with the lock.
+
+Fixes: 9092c02d9435 ("DM RAID: Add ability to restore transiently failed devices on resume")
+Fixes: a448af25becf ("md/raid10: remove rcu protection to access rdev from conf")
+Cc: stable@vger.kernel.org # v6.7+
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Xiao Ni <xni@redhat.com>
+Acked-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20240305072306.2562024-10-yukuai1@huaweicloud.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-raid.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index bf833ca880bc1..99b4738e867a8 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -4046,7 +4046,9 @@ static void raid_resume(struct dm_target *ti)
+ * Take this opportunity to check whether any failed
+ * devices are reachable again.
+ */
++ mddev_lock_nointr(mddev);
+ attempt_restore_of_faulty_devices(rs);
++ mddev_unlock(mddev);
+ }
+
+ if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+--
+2.43.0
+
--- /dev/null
+From 7917bc412a91720d1fcde6e8a51c563f68f603b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Mar 2024 18:43:11 +0100
+Subject: dm snapshot: fix lockup in dm_exception_table_exit
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+[ Upstream commit 6e7132ed3c07bd8a6ce3db4bb307ef2852b322dc ]
+
+There was reported lockup when we exit a snapshot with many exceptions.
+Fix this by adding "cond_resched" to the loop that frees the exceptions.
+
+Reported-by: John Pittman <jpittman@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-snap.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index b748901a4fb55..1c601508ce0b4 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -679,8 +679,10 @@ static void dm_exception_table_exit(struct dm_exception_table *et,
+ for (i = 0; i < size; i++) {
+ slot = et->table + i;
+
+- hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
++ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) {
+ kmem_cache_free(mem, ex);
++ cond_resched();
++ }
+ }
+
+ kvfree(et->table);
+--
+2.43.0
+
--- /dev/null
+From 025fdadc2ed62d1e55a6a2d6af5380d4d290a760 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Jul 2023 08:35:07 -0400
+Subject: drm/amd/display: Fix noise issue on HDMI AV mute
+
+From: Leo Ma <hanghong.ma@amd.com>
+
+[ Upstream commit 69e3be6893a7e668660b05a966bead82bbddb01d ]
+
+[Why]
+When mode switching is triggered there is momentary noise visible on
+some HDMI TV or displays.
+
+[How]
+Wait for 2 frames to make sure we have enough time to send out AV mute
+and sink receives a full frame.
+
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Wenjing Liu <wenjing.liu@amd.com>
+Acked-by: Wayne Lin <wayne.lin@amd.com>
+Signed-off-by: Leo Ma <hanghong.ma@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index 72bec33e371f3..0225b2c96041d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -651,10 +651,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+ if (pipe_ctx == NULL)
+ return;
+
+- if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
++ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
+ pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
+ pipe_ctx->stream_res.stream_enc,
+ enable);
++
++ /* Wait for two frame to make sure AV mute is sent out */
++ if (enable) {
++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
++ }
++ }
+ }
+
+ void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
+--
+2.43.0
+
--- /dev/null
+From f4ca7279c3647d95eb6de56c19b8cc4412532474 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Feb 2024 13:29:51 -0700
+Subject: drm/amd/display: Return the correct HDCP error code
+
+From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+
+[ Upstream commit e64b3f55e458ce7e2087a0051f47edabf74545e7 ]
+
+[WHY & HOW]
+If the display is null when creating an HDCP session, return a proper
+error code.
+
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Acked-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index ee67a35c2a8ed..ff930a71e496a 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
++ if (!display)
++ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
++
+ hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
+
+ if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
+--
+2.43.0
+
--- /dev/null
+From 8fe074cc530cb816ac8e42f4d667e539f4804d78 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Mar 2024 18:07:34 -0400
+Subject: drm/amdgpu: amdgpu_ttm_gart_bind set gtt bound flag
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Philip Yang <Philip.Yang@amd.com>
+
+[ Upstream commit 6c6064cbe58b43533e3451ad6a8ba9736c109ac3 ]
+
+Otherwise after the GTT bo is released, the GTT and gart space is freed
+but amdgpu_ttm_backend_unbind will not clear the gart page table entry
+and leave valid mapping entry pointing to the stale system page. Then
+if GPU access the gart address mistakely, it will read undefined value
+instead page fault, harder to debug and reproduce the real issue.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Philip Yang <Philip.Yang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 158b791883f03..dfb9d42007730 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -838,6 +838,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
+ amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+ gtt->ttm.dma_address, flags);
+ }
++ gtt->bound = true;
+ }
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From ef858c359b64216ca001fcff3a0ebe829b869126 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Mar 2024 14:28:11 +0100
+Subject: drm/etnaviv: Restore some id values
+
+From: Christian Gmeiner <cgmeiner@igalia.com>
+
+[ Upstream commit b735ee173f84d5d0d0733c53946a83c12d770d05 ]
+
+The hwdb selection logic as a feature that allows it to mark some fields
+as 'don't care'. If we match with such a field we memcpy(..)
+the current etnaviv_chip_identity into ident.
+
+This step can overwrite some id values read from the GPU with the
+'don't care' value.
+
+Fix this issue by restoring the affected values after the memcpy(..).
+
+As this is crucial for user space to know when this feature works as
+expected increment the minor version too.
+
+Fixes: 4078a1186dd3 ("drm/etnaviv: update hwdb selection logic")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christian Gmeiner <cgmeiner@igalia.com>
+Reviewed-by: Tomeu Vizoso <tomeu@tomeuvizoso.net>
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/etnaviv/etnaviv_drv.c | 2 +-
+ drivers/gpu/drm/etnaviv/etnaviv_hwdb.c | 9 +++++++++
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+index 1d2b4fb4bcf8b..f29952a55c05d 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+@@ -488,7 +488,7 @@ static const struct drm_driver etnaviv_drm_driver = {
+ .desc = "etnaviv DRM",
+ .date = "20151214",
+ .major = 1,
+- .minor = 3,
++ .minor = 4,
+ };
+
+ /*
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+index f2fc645c79569..212e7050c4ba6 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+@@ -135,6 +135,9 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
+ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
+ {
+ struct etnaviv_chip_identity *ident = &gpu->identity;
++ const u32 product_id = ident->product_id;
++ const u32 customer_id = ident->customer_id;
++ const u32 eco_id = ident->eco_id;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
+@@ -148,6 +151,12 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
+ etnaviv_chip_identities[i].eco_id == ~0U)) {
+ memcpy(ident, &etnaviv_chip_identities[i],
+ sizeof(*ident));
++
++ /* Restore some id values as ~0U aka 'don't care' might been used. */
++ ident->product_id = product_id;
++ ident->customer_id = customer_id;
++ ident->eco_id = eco_id;
++
+ return true;
+ }
+ }
+--
+2.43.0
+
--- /dev/null
+From 33e3c145b39f77ae8421de01344f55db2c7e1d04 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 18:03:41 +0200
+Subject: drm/exynos: do not return negative values from .get_modes()
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+[ Upstream commit 13d5b040363c7ec0ac29c2de9cf661a24a8aa531 ]
+
+The .get_modes() hooks aren't supposed to return negative error
+codes. Return 0 for no modes, whatever the reason.
+
+Cc: Inki Dae <inki.dae@samsung.com>
+Cc: Seung-Woo Kim <sw0312.kim@samsung.com>
+Cc: Kyungmin Park <kyungmin.park@samsung.com>
+Cc: stable@vger.kernel.org
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/d8665f620d9c252aa7d5a4811ff6b16e773903a2.1709913674.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/exynos/exynos_drm_vidi.c | 4 ++--
+ drivers/gpu/drm/exynos/exynos_hdmi.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+index f5e1adfcaa514..fb941a8c99f0f 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+@@ -316,14 +316,14 @@ static int vidi_get_modes(struct drm_connector *connector)
+ */
+ if (!ctx->raw_edid) {
+ DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
+- return -EFAULT;
++ return 0;
+ }
+
+ edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
+ edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
+ if (!edid) {
+ DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
+- return -ENOMEM;
++ return 0;
+ }
+
+ drm_connector_update_edid_property(connector, edid);
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
+index 1a7194a653ae5..be2d9cbaaef2e 100644
+--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
+@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
+ int ret;
+
+ if (!hdata->ddc_adpt)
+- return -ENODEV;
++ return 0;
+
+ edid = drm_get_edid(connector, hdata->ddc_adpt);
+ if (!edid)
+- return -ENODEV;
++ return 0;
+
+ hdata->dvi_mode = !connector->display_info.is_hdmi;
+ DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
+--
+2.43.0
+
--- /dev/null
+From dd1ab6fb1e592ea8734be84bab08455e211c5650 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 18:03:43 +0200
+Subject: drm/imx/ipuv3: do not return negative values from .get_modes()
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+[ Upstream commit c2da9ada64962fcd2e6395ed9987b9874ea032d3 ]
+
+The .get_modes() hooks aren't supposed to return negative error
+codes. Return 0 for no modes, whatever the reason.
+
+Cc: Philipp Zabel <p.zabel@pengutronix.de>
+Cc: stable@vger.kernel.org
+Acked-by: Philipp Zabel <p.zabel@pengutronix.de>
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/311f6eec96d47949b16a670529f4d89fcd97aefa.1709913674.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/imx/parallel-display.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
+index 06723b2e9b847..64b6bc2de873e 100644
+--- a/drivers/gpu/drm/imx/parallel-display.c
++++ b/drivers/gpu/drm/imx/parallel-display.c
+@@ -72,14 +72,14 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
+ int ret;
+
+ if (!mode)
+- return -EINVAL;
++ return 0;
+
+ ret = of_get_drm_display_mode(np, &imxpd->mode,
+ &imxpd->bus_flags,
+ OF_USE_NATIVE_MODE);
+ if (ret) {
+ drm_mode_destroy(connector->dev, mode);
+- return ret;
++ return 0;
+ }
+
+ drm_mode_copy(mode, &imxpd->mode);
+--
+2.43.0
+
--- /dev/null
+From e6df37bcb49997f7e695e1c69919becf40525590 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 18:03:40 +0200
+Subject: drm/panel: do not return negative error codes from
+ drm_panel_get_modes()
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+[ Upstream commit fc4e97726530241d96dd7db72eb65979217422c9 ]
+
+None of the callers of drm_panel_get_modes() expect it to return
+negative error codes. Either they propagate the return value in their
+struct drm_connector_helper_funcs .get_modes() hook (which is also not
+supposed to return negative codes), or add it to other counts leading to
+bogus values.
+
+On the other hand, many of the struct drm_panel_funcs .get_modes() hooks
+do return negative error codes, so handle them gracefully instead of
+propagating further.
+
+Return 0 for no modes, whatever the reason.
+
+Cc: Neil Armstrong <neil.armstrong@linaro.org>
+Cc: Jessica Zhang <quic_jesszhan@quicinc.com>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Cc: stable@vger.kernel.org
+Reviewed-by: Neil Armstrong <neil.armstrong@linaro.org>
+Reviewed-by: Jessica Zhang <quic_jesszhan@quicinc.com>
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/79f559b72d8c493940417304e222a4b04dfa19c4.1709913674.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_panel.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
+index f634371c717a8..7fd3de89ed079 100644
+--- a/drivers/gpu/drm/drm_panel.c
++++ b/drivers/gpu/drm/drm_panel.c
+@@ -207,19 +207,24 @@ EXPORT_SYMBOL(drm_panel_disable);
+ * The modes probed from the panel are automatically added to the connector
+ * that the panel is attached to.
+ *
+- * Return: The number of modes available from the panel on success or a
+- * negative error code on failure.
++ * Return: The number of modes available from the panel on success, or 0 on
++ * failure (no modes).
+ */
+ int drm_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+ {
+ if (!panel)
+- return -EINVAL;
++ return 0;
+
+- if (panel->funcs && panel->funcs->get_modes)
+- return panel->funcs->get_modes(panel, connector);
++ if (panel->funcs && panel->funcs->get_modes) {
++ int num;
+
+- return -EOPNOTSUPP;
++ num = panel->funcs->get_modes(panel, connector);
++ if (num > 0)
++ return num;
++ }
++
++ return 0;
+ }
+ EXPORT_SYMBOL(drm_panel_get_modes);
+
+--
+2.43.0
+
--- /dev/null
+From d69cbc7b8c3557feb22c173c1df875744770829d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 18:03:39 +0200
+Subject: drm/probe-helper: warn about negative .get_modes()
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+[ Upstream commit 7af03e688792293ba33149fb8df619a8dff90e80 ]
+
+The .get_modes() callback is supposed to return the number of modes,
+never a negative error code. If a negative value is returned, it'll just
+be interpreted as a negative count, and added to previous calculations.
+
+Document the rules, but handle the negative values gracefully with an
+error message.
+
+Cc: stable@vger.kernel.org
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/50208c866facc33226a3c77b82bb96aeef8ef310.1709913674.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_probe_helper.c | 7 +++++++
+ include/drm/drm_modeset_helper_vtables.h | 3 ++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index 3b968ad187cf3..52dbaf74fe164 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -362,6 +362,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
+
+ count = connector_funcs->get_modes(connector);
+
++ /* The .get_modes() callback should not return negative values. */
++ if (count < 0) {
++ drm_err(connector->dev, ".get_modes() returned %pe\n",
++ ERR_PTR(count));
++ count = 0;
++ }
++
+ /*
+ * Fallback for when DDC probe failed in drm_get_edid() and thus skipped
+ * override/firmware EDID.
+diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
+index fafa70ac1337f..6f19cf5c210e5 100644
+--- a/include/drm/drm_modeset_helper_vtables.h
++++ b/include/drm/drm_modeset_helper_vtables.h
+@@ -896,7 +896,8 @@ struct drm_connector_helper_funcs {
+ *
+ * RETURNS:
+ *
+- * The number of modes added by calling drm_mode_probed_add().
++ * The number of modes added by calling drm_mode_probed_add(). Return 0
++ * on failures (no modes) instead of negative error codes.
+ */
+ int (*get_modes)(struct drm_connector *connector);
+
+--
+2.43.0
+
--- /dev/null
+From cf63a08a26d77fa411f1a524d3b3f78f1cf7119a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 18:03:44 +0200
+Subject: drm/vc4: hdmi: do not return negative values from .get_modes()
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+[ Upstream commit abf493988e380f25242c1023275c68bd3579c9ce ]
+
+The .get_modes() hooks aren't supposed to return negative error
+codes. Return 0 for no modes, whatever the reason.
+
+Cc: Maxime Ripard <mripard@kernel.org>
+Cc: stable@vger.kernel.org
+Acked-by: Maxime Ripard <mripard@kernel.org>
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/dcda6d4003e2c6192987916b35c7304732800e08.1709913674.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vc4/vc4_hdmi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index ea2eaf6032caa..f696818913499 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -497,7 +497,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
+ edid = drm_get_edid(connector, vc4_hdmi->ddc);
+ cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
+ if (!edid)
+- return -ENODEV;
++ return 0;
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+--
+2.43.0
+
--- /dev/null
+From 65ea0b8dc387326fd4f8e5ab7407bbf909831666 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jan 2024 15:03:05 -0500
+Subject: drm/vmwgfx: Fix possible null pointer derefence with invalid contexts
+
+From: Zack Rusin <zack.rusin@broadcom.com>
+
+[ Upstream commit 517621b7060096e48e42f545fa6646fc00252eac ]
+
+vmw_context_cotable can return either an error or a null pointer and its
+usage sometimes went unchecked. Subsequent code would then try to access
+either a null pointer or an error value.
+
+The invalid dereferences were only possible with malformed userspace
+apps which never properly initialized the rendering contexts.
+
+Check the results of vmw_context_cotable to fix the invalid derefs.
+
+Thanks:
+ziming zhang(@ezrak1e) from Ant Group Light-Year Security Lab
+who was the first person to discover it.
+Niels De Graef who reported it and helped to track down the poc.
+
+Fixes: 9c079b8ce8bf ("drm/vmwgfx: Adapt execbuf to the new validation api")
+Cc: <stable@vger.kernel.org> # v4.20+
+Reported-by: Niels De Graef <ndegraef@redhat.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Cc: Martin Krastev <martin.krastev@broadcom.com>
+Cc: Maaz Mombasawala <maaz.mombasawala@broadcom.com>
+Cc: Ian Forbes <ian.forbes@broadcom.com>
+Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+Cc: dri-devel@lists.freedesktop.org
+Reviewed-by: Maaz Mombasawala <maaz.mombasawala@broadcom.com>
+Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240110200305.94086-1-zack.rusin@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index bc7f02e4ecebb..2f7ac91149fc0 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -447,7 +447,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ vmw_res_type(ctx) == vmw_res_dx_context) {
+ for (i = 0; i < cotable_max; ++i) {
+ res = vmw_context_cotable(ctx, i);
+- if (IS_ERR(res))
++ if (IS_ERR_OR_NULL(res))
+ continue;
+
+ ret = vmw_execbuf_res_val_add(sw_context, res,
+@@ -1259,6 +1259,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
+ return -EINVAL;
+
+ cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
++ if (IS_ERR_OR_NULL(cotable_res))
++ return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
+ ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
+
+ return ret;
+@@ -2477,6 +2479,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
+ return ret;
+
+ res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
++ if (IS_ERR_OR_NULL(res))
++ return res ? PTR_ERR(res) : -EINVAL;
+ ret = vmw_cotable_notify(res, cmd->defined_id);
+ if (unlikely(ret != 0))
+ return ret;
+@@ -2562,8 +2566,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
+
+ so_type = vmw_so_cmd_to_type(header->id);
+ res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
+- if (IS_ERR(res))
+- return PTR_ERR(res);
++ if (IS_ERR_OR_NULL(res))
++ return res ? PTR_ERR(res) : -EINVAL;
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cotable_notify(res, cmd->defined_id);
+
+@@ -2682,6 +2686,8 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
+ return -EINVAL;
+
+ res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
++ if (IS_ERR_OR_NULL(res))
++ return res ? PTR_ERR(res) : -EINVAL;
+ ret = vmw_cotable_notify(res, cmd->body.shaderId);
+ if (ret)
+ return ret;
+@@ -3003,6 +3009,8 @@ static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
+ }
+
+ res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
++ if (IS_ERR_OR_NULL(res))
++ return res ? PTR_ERR(res) : -EINVAL;
+ ret = vmw_cotable_notify(res, cmd->body.soid);
+ if (ret)
+ return ret;
+--
+2.43.0
+
--- /dev/null
+From 8d8ecbff17320b2385249183a9e0ad0aff8832b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Feb 2024 22:18:45 +0800
+Subject: ext4: correct best extent lstart adjustment logic
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 4fbf8bc733d14bceb16dda46a3f5e19c6a9621c5 ]
+
+When yangerkun review commit 93cdf49f6eca ("ext4: Fix best extent lstart
+adjustment logic in ext4_mb_new_inode_pa()"), it was found that the best
+extent did not completely cover the original request after adjusting the
+best extent lstart in ext4_mb_new_inode_pa() as follows:
+
+ original request: 2/10(8)
+ normalized request: 0/64(64)
+ best extent: 0/9(9)
+
+When we check if best ex can be kept at start of goal, ac_o_ex.fe_logical
+is 2 less than the adjusted best extent logical end 9, so we think the
+adjustment is done. But obviously 0/9(9) doesn't cover 2/10(8), so we
+should determine here if the original request logical end is less than or
+equal to the adjusted best extent logical end.
+
+In addition, add a comment stating when adjusted best_ex will not cover
+the original request, and remove the duplicate assertion because adjusting
+lstart makes no change to b_ex.fe_len.
+
+Link: https://lore.kernel.org/r/3630fa7f-b432-7afd-5f79-781bc3b2c5ea@huawei.com
+Fixes: 93cdf49f6eca ("ext4: Fix best extent lstart adjustment logic in ext4_mb_new_inode_pa()")
+Cc: <stable@kernel.org>
+Signed-off-by: yangerkun <yangerkun@huawei.com>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://lore.kernel.org/r/20240201141845.1879253-1-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 6a3e27771df73..bc0ca45a5d817 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4684,10 +4684,16 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ .fe_len = ac->ac_g_ex.fe_len,
+ };
+ loff_t orig_goal_end = extent_logical_end(sbi, &ex);
++ loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
+
+- /* we can't allocate as much as normalizer wants.
+- * so, found space must get proper lstart
+- * to cover original request */
++ /*
++ * We can't allocate as much as normalizer wants, so we try
++ * to get proper lstart to cover the original request, except
++ * when the goal doesn't cover the original request as below:
++ *
++ * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
++ * best_ex:0/200(200) -> adjusted: 1848/2048(200)
++ */
+ BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
+ BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
+
+@@ -4699,7 +4705,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ * 1. Check if best ex can be kept at end of goal and still
+ * cover original start
+ * 2. Else, check if best ex can be kept at start of goal and
+- * still cover original start
++ * still cover original end
+ * 3. Else, keep the best ex at start of original request.
+ */
+ ex.fe_len = ac->ac_b_ex.fe_len;
+@@ -4709,7 +4715,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ goto adjust_bex;
+
+ ex.fe_logical = ac->ac_g_ex.fe_logical;
+- if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
++ if (o_ex_end <= extent_logical_end(sbi, &ex))
+ goto adjust_bex;
+
+ ex.fe_logical = ac->ac_o_ex.fe_logical;
+@@ -4717,7 +4723,6 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ ac->ac_b_ex.fe_logical = ex.fe_logical;
+
+ BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
+- BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
+ BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 0b1aa9dd785c92c06c3c95494be33a433d8f5284 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Feb 2024 15:50:09 +0000
+Subject: ext4: fix corruption during on-line resize
+
+From: Maximilian Heyne <mheyne@amazon.de>
+
+[ Upstream commit a6b3bfe176e8a5b05ec4447404e412c2a3fc92cc ]
+
+We observed a corruption during on-line resize of a file system that is
+larger than 16 TiB with 4k block size. With having more then 2^32 blocks
+resize_inode is turned off by default by mke2fs. The issue can be
+reproduced on a smaller file system for convenience by explicitly
+turning off resize_inode. An on-line resize across an 8 GiB boundary (the
+size of a meta block group in this setup) then leads to a corruption:
+
+ dev=/dev/<some_dev> # should be >= 16 GiB
+ mkdir -p /corruption
+ /sbin/mke2fs -t ext4 -b 4096 -O ^resize_inode $dev $((2 * 2**21 - 2**15))
+ mount -t ext4 $dev /corruption
+
+ dd if=/dev/zero bs=4096 of=/corruption/test count=$((2*2**21 - 4*2**15))
+ sha1sum /corruption/test
+ # 79d2658b39dcfd77274e435b0934028adafaab11 /corruption/test
+
+ /sbin/resize2fs $dev $((2*2**21))
+ # drop page cache to force reload the block from disk
+ echo 1 > /proc/sys/vm/drop_caches
+
+ sha1sum /corruption/test
+ # 3c2abc63cbf1a94c9e6977e0fbd72cd832c4d5c3 /corruption/test
+
+2^21 = 2^15*2^6 equals 8 GiB whereof 2^15 is the number of blocks per
+block group and 2^6 are the number of block groups that make a meta
+block group.
+
+The last checksum might be different depending on how the file is laid
+out across the physical blocks. The actual corruption occurs at physical
+block 63*2^15 = 2064384 which would be the location of the backup of the
+meta block group's block descriptor. During the on-line resize the file
+system will be converted to meta_bg starting at s_first_meta_bg which is
+2 in the example - meaning all block groups after 16 GiB. However, in
+ext4_flex_group_add we might add block groups that are not part of the
+first meta block group yet. In the reproducer we achieved this by
+substracting the size of a whole block group from the point where the
+meta block group would start. This must be considered when updating the
+backup block group descriptors to follow the non-meta_bg layout. The fix
+is to add a test whether the group to add is already part of the meta
+block group or not.
+
+Fixes: 01f795f9e0d67 ("ext4: add online resizing support for meta_bg and 64-bit file systems")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Maximilian Heyne <mheyne@amazon.de>
+Tested-by: Srivathsa Dara <srivathsa.d.dara@oracle.com>
+Reviewed-by: Srivathsa Dara <srivathsa.d.dara@oracle.com>
+Link: https://lore.kernel.org/r/20240215155009.94493-1-mheyne@amazon.de
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/resize.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index f2ed15af703a8..38ce42396758d 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1606,7 +1606,8 @@ static int ext4_flex_group_add(struct super_block *sb,
+ int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
+ int gdb_num_end = ((group + flex_gd->count - 1) /
+ EXT4_DESC_PER_BLOCK(sb));
+- int meta_bg = ext4_has_feature_meta_bg(sb);
++ int meta_bg = ext4_has_feature_meta_bg(sb) &&
++ gdb_num >= le32_to_cpu(es->s_first_meta_bg);
+ sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
+ ext4_group_first_block_no(sb, 0);
+ sector_t old_gdb = 0;
+--
+2.43.0
+
--- /dev/null
+From 7179904760c542ae34bf196ec2e0db79f5bb8d20 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Mar 2024 20:26:19 +0900
+Subject: f2fs: mark inode dirty for FI_ATOMIC_COMMITTED flag
+
+From: Sunmin Jeong <s_min.jeong@samsung.com>
+
+[ Upstream commit 4bf78322346f6320313683dc9464e5423423ad5c ]
+
+In f2fs_update_inode, i_size of the atomic file isn't updated until
+FI_ATOMIC_COMMITTED flag is set. When committing atomic write right
+after the writeback of the inode, i_size of the raw inode will not be
+updated. It can cause the atomicity corruption due to a mismatch between
+old file size and new data.
+
+To prevent the problem, let's mark inode dirty for FI_ATOMIC_COMMITTED
+
+Atomic write thread Writeback thread
+ __writeback_single_inode
+ write_inode
+ f2fs_update_inode
+ - skip i_size update
+ f2fs_ioc_commit_atomic_write
+ f2fs_commit_atomic_write
+ set_inode_flag(inode, FI_ATOMIC_COMMITTED)
+ f2fs_do_sync_file
+ f2fs_fsync_node_pages
+ - skip f2fs_update_inode since the inode is clean
+
+Fixes: 3db1de0e582c ("f2fs: change the current atomic write way")
+Cc: stable@vger.kernel.org #v5.19+
+Reviewed-by: Sungjong Seo <sj1557.seo@samsung.com>
+Reviewed-by: Yeongjin Gil <youngjin.gil@samsung.com>
+Signed-off-by: Sunmin Jeong <s_min.jeong@samsung.com>
+Reviewed-by: Daeho Jeong <daehojeong@google.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/f2fs.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 5ae1c4aa3ae92..b54d681c6457d 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3000,6 +3000,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
+ case FI_INLINE_DOTS:
+ case FI_PIN_FILE:
+ case FI_COMPRESS_RELEASED:
++ case FI_ATOMIC_COMMITTED:
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
+ }
+--
+2.43.0
+
--- /dev/null
+From b812a4f5632969bed00daf1b201d315554c4153e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Mar 2024 20:26:20 +0900
+Subject: f2fs: truncate page cache before clearing flags when aborting atomic
+ write
+
+From: Sunmin Jeong <s_min.jeong@samsung.com>
+
+[ Upstream commit 74b0ebcbdde4c7fe23c979e4cfc2fdbf349c39a3 ]
+
+In f2fs_do_write_data_page, FI_ATOMIC_FILE flag selects the target inode
+between the original inode and COW inode. When aborting atomic write and
+writeback occur simultaneously, invalid data can be written to original
+inode if the FI_ATOMIC_FILE flag is cleared meanwhile.
+
+To prevent the problem, let's truncate all pages before clearing the flag
+
+Atomic write thread Writeback thread
+ f2fs_abort_atomic_write
+ clear_inode_flag(inode, FI_ATOMIC_FILE)
+ __writeback_single_inode
+ do_writepages
+ f2fs_do_write_data_page
+ - use dn of original inode
+ truncate_inode_pages_final
+
+Fixes: 3db1de0e582c ("f2fs: change the current atomic write way")
+Cc: stable@vger.kernel.org #v5.19+
+Reviewed-by: Sungjong Seo <sj1557.seo@samsung.com>
+Reviewed-by: Yeongjin Gil <youngjin.gil@samsung.com>
+Signed-off-by: Sunmin Jeong <s_min.jeong@samsung.com>
+Reviewed-by: Daeho Jeong <daehojeong@google.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/segment.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index aa1ba2fdfe00d..205216c1db91f 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -192,6 +192,9 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
+ if (!f2fs_is_atomic_file(inode))
+ return;
+
++ if (clean)
++ truncate_inode_pages_final(inode->i_mapping);
++
+ release_atomic_write_cnt(inode);
+ clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+@@ -200,7 +203,6 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
+ F2FS_I(inode)->atomic_write_task = NULL;
+
+ if (clean) {
+- truncate_inode_pages_final(inode->i_mapping);
+ f2fs_i_size_write(inode, fi->original_i_size);
+ fi->original_i_size = 0;
+ }
+--
+2.43.0
+
--- /dev/null
+From e7aa61bbe4ea5a342ad07830b69da42fc1b18f52 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Feb 2024 13:26:26 +0100
+Subject: fat: fix uninitialized field in nostale filehandles
+
+From: Jan Kara <jack@suse.cz>
+
+[ Upstream commit fde2497d2bc3a063d8af88b258dbadc86bd7b57c ]
+
+When fat_encode_fh_nostale() encodes file handle without a parent it
+stores only first 10 bytes of the file handle. However the length of the
+file handle must be a multiple of 4 so the file handle is actually 12
+bytes long and the last two bytes remain uninitialized. This is not
+great at we potentially leak uninitialized information with the handle
+to userspace. Properly initialize the full handle length.
+
+Link: https://lkml.kernel.org/r/20240205122626.13701-1-jack@suse.cz
+Reported-by: syzbot+3ce5dea5b1539ff36769@syzkaller.appspotmail.com
+Fixes: ea3983ace6b7 ("fat: restructure export_operations")
+Signed-off-by: Jan Kara <jack@suse.cz>
+Acked-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
+Cc: Amir Goldstein <amir73il@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fat/nfs.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c
+index af191371c3529..bab63eeaf9cbc 100644
+--- a/fs/fat/nfs.c
++++ b/fs/fat/nfs.c
+@@ -130,6 +130,12 @@ fat_encode_fh_nostale(struct inode *inode, __u32 *fh, int *lenp,
+ fid->parent_i_gen = parent->i_generation;
+ type = FILEID_FAT_WITH_PARENT;
+ *lenp = FAT_FID_SIZE_WITH_PARENT;
++ } else {
++ /*
++ * We need to initialize this field because the fh is actually
++ * 12 bytes long
++ */
++ fid->parent_i_pos_hi = 0;
+ }
+
+ return type;
+--
+2.43.0
+
--- /dev/null
+From 06c031fa057f875e21845b22ae51627b63134dc9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Feb 2024 16:50:49 +0100
+Subject: fuse: don't unhash root
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+[ Upstream commit b1fe686a765e6c0d71811d825b5a1585a202b777 ]
+
+The root inode is assumed to be always hashed. Do not unhash the root
+inode even if it is marked BAD.
+
+Fixes: 5d069dbe8aaf ("fuse: fix bad inode")
+Cc: <stable@vger.kernel.org> # v5.11
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fuse/fuse_i.h | 1 -
+ fs/fuse/inode.c | 7 +++++--
+ 2 files changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index a9681fecbd91f..253b9b78d6f13 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -923,7 +923,6 @@ static inline bool fuse_stale_inode(const struct inode *inode, int generation,
+
+ static inline void fuse_make_bad(struct inode *inode)
+ {
+- remove_inode_hash(inode);
+ set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state);
+ }
+
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index f81000d968875..367e3b276092f 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -445,8 +445,11 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
+ } else if (fuse_stale_inode(inode, generation, attr)) {
+ /* nodeid was reused, any I/O on the old inode should fail */
+ fuse_make_bad(inode);
+- iput(inode);
+- goto retry;
++ if (inode != d_inode(sb->s_root)) {
++ remove_inode_hash(inode);
++ iput(inode);
++ goto retry;
++ }
+ }
+ fi = get_fuse_inode(inode);
+ spin_lock(&fi->lock);
+--
+2.43.0
+
--- /dev/null
+From d41f6e0bc9cccf96b9e8e05f8a564208198d3009 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Feb 2024 16:50:49 +0100
+Subject: fuse: fix root lookup with nonzero generation
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+[ Upstream commit 68ca1b49e430f6534d0774a94147a823e3b8b26e ]
+
+The root inode has a fixed nodeid and generation (1, 0).
+
+Prior to the commit 15db16837a35 ("fuse: fix illegal access to inode with
+reused nodeid") generation number on lookup was ignored. After this commit
+lookup with the wrong generation number resulted in the inode being
+unhashed. This is correct for non-root inodes, but replacing the root
+inode is wrong and results in weird behavior.
+
+Fix by reverting to the old behavior if ignoring the generation for the
+root inode, but issuing a warning in dmesg.
+
+Reported-by: Antonio SJ Musumeci <trapexit@spawn.link>
+Closes: https://lore.kernel.org/all/CAOQ4uxhek5ytdN8Yz2tNEOg5ea4NkBb4nk0FGPjPk_9nz-VG3g@mail.gmail.com/
+Fixes: 15db16837a35 ("fuse: fix illegal access to inode with reused nodeid")
+Cc: <stable@vger.kernel.org> # v5.14
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fuse/dir.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 5e408e7ec4c6b..936a24b646cef 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -399,6 +399,10 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
+ goto out_put_forget;
+ if (fuse_invalid_attr(&outarg->attr))
+ goto out_put_forget;
++ if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) {
++ pr_warn_once("root generation should be zero\n");
++ outarg->generation = 0;
++ }
+
+ *inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
+ &outarg->attr, entry_attr_timeout(outarg),
+--
+2.43.0
+
--- /dev/null
+From 104650f066062a86da67fdd51403d93d0297b47c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Mar 2024 12:06:58 +0100
+Subject: hwmon: (amc6821) add of_match table
+
+From: Josua Mayer <josua@solid-run.com>
+
+[ Upstream commit 3f003fda98a7a8d5f399057d92e6ed56b468657c ]
+
+Add of_match table for "ti,amc6821" compatible string.
+This fixes automatic driver loading by userspace when using device-tree,
+and if built as a module like major linux distributions do.
+
+While devices probe just fine with i2c_device_id table, userspace can't
+match the "ti,amc6821" compatible string from dt with the plain
+"amc6821" device id. As a result, the kernel module can not be loaded.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Josua Mayer <josua@solid-run.com>
+Link: https://lore.kernel.org/r/20240307-amc6821-of-match-v1-1-5f40464a3110@solid-run.com
+[groeck: Cleaned up patch description]
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/amc6821.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
+index 3bfd12ff4b3ca..6868db4ac84f3 100644
+--- a/drivers/hwmon/amc6821.c
++++ b/drivers/hwmon/amc6821.c
+@@ -934,10 +934,21 @@ static const struct i2c_device_id amc6821_id[] = {
+
+ MODULE_DEVICE_TABLE(i2c, amc6821_id);
+
++static const struct of_device_id __maybe_unused amc6821_of_match[] = {
++ {
++ .compatible = "ti,amc6821",
++ .data = (void *)amc6821,
++ },
++ { }
++};
++
++MODULE_DEVICE_TABLE(of, amc6821_of_match);
++
+ static struct i2c_driver amc6821_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "amc6821",
++ .of_match_table = of_match_ptr(amc6821_of_match),
+ },
+ .probe_new = amc6821_probe,
+ .id_table = amc6821_id,
+--
+2.43.0
+
--- /dev/null
+From 35abf88ed16080b4ad49e9b36f2906ee4b3377e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Mar 2024 17:48:03 -0700
+Subject: io_uring/net: correctly handle multishot recvmsg retry setup
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit deaef31bc1ec7966698a427da8c161930830e1cf ]
+
+If we loop for multishot receive on the initial attempt, and then abort
+later on to wait for more, we miss a case where we should be copying the
+io_async_msghdr from the stack to stable storage. This leads to the next
+retry potentially failing, if the application had the msghdr on the
+stack.
+
+Cc: stable@vger.kernel.org
+Fixes: 9bb66906f23e ("io_uring: support multishot in recvmsg")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ io_uring/net.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 0d4ee3d738fbf..b1b564c04d1e7 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -876,7 +876,8 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
+ kfree(kmsg->free_iov);
+ io_netmsg_recycle(req, issue_flags);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+- }
++ } else if (ret == -EAGAIN)
++ return io_setup_async_msg(req, kmsg, issue_flags);
+
+ return ret;
+ }
+--
+2.43.0
+
--- /dev/null
+From c4c6d34cc60bbe125fbafac79b45761a3f1c6e2a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Feb 2024 12:15:52 +0100
+Subject: kasan/test: avoid gcc warning for intentional overflow
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit e10aea105e9ed14b62a11844fec6aaa87c6935a3 ]
+
+The out-of-bounds test allocates an object that is three bytes too short
+in order to validate the bounds checking. Starting with gcc-14, this
+causes a compile-time warning as gcc has grown smart enough to understand
+the sizeof() logic:
+
+mm/kasan/kasan_test.c: In function 'kmalloc_oob_16':
+mm/kasan/kasan_test.c:443:14: error: allocation of insufficient size '13' for type 'struct <anonymous>' with size '16' [-Werror=alloc-size]
+ 443 | ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
+ | ^
+
+Hide the actual computation behind a RELOC_HIDE() that ensures
+the compiler misses the intentional bug.
+
+Link: https://lkml.kernel.org/r/20240212111609.869266-1-arnd@kernel.org
+Fixes: 3f15801cdc23 ("lib: add kasan test module")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Marco Elver <elver@google.com>
+Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/kasan/kasan_test.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
+index 0d59098f08761..cef683a2e0d2e 100644
+--- a/mm/kasan/kasan_test.c
++++ b/mm/kasan/kasan_test.c
+@@ -415,7 +415,8 @@ static void kmalloc_oob_16(struct kunit *test)
+ /* This test is specifically crafted for the generic mode. */
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+
+- ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
++ /* RELOC_HIDE to prevent gcc from warning about short alloc */
++ ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
+
+ ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
+--
+2.43.0
+
--- /dev/null
+From 78ef3c6e5114b88382f57bd7f326d2f387954ec7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Mar 2024 15:12:47 -0700
+Subject: kbuild: Move -Wenum-{compare-conditional,enum-conversion} into W=1
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+[ Upstream commit 75b5ab134bb5f657ef7979a59106dce0657e8d87 ]
+
+Clang enables -Wenum-enum-conversion and -Wenum-compare-conditional
+under -Wenum-conversion. A recent change in Clang strengthened these
+warnings and they appear frequently in common builds, primarily due to
+several instances in common headers but there are quite a few drivers
+that have individual instances as well.
+
+ include/linux/vmstat.h:508:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
+ 508 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
+ | ~~~~~~~~~~~~~~~~~~~~~ ^
+ 509 | item];
+ | ~~~~
+
+ drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c:955:24: warning: conditional expression between different enumeration types ('enum iwl_mac_beacon_flags' and 'enum iwl_mac_beacon_flags_v1') [-Wenum-compare-conditional]
+ 955 | flags |= is_new_rate ? IWL_MAC_BEACON_CCK
+ | ^ ~~~~~~~~~~~~~~~~~~
+ 956 | : IWL_MAC_BEACON_CCK_V1;
+ | ~~~~~~~~~~~~~~~~~~~~~
+ drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c:1120:21: warning: conditional expression between different enumeration types ('enum iwl_mac_beacon_flags' and 'enum iwl_mac_beacon_flags_v1') [-Wenum-compare-conditional]
+ 1120 | 0) > 10 ?
+ | ^
+ 1121 | IWL_MAC_BEACON_FILS :
+ | ~~~~~~~~~~~~~~~~~~~
+ 1122 | IWL_MAC_BEACON_FILS_V1;
+ | ~~~~~~~~~~~~~~~~~~~~~~
+
+Doing arithmetic between or returning two different types of enums could
+be a bug, so each of the instance of the warning needs to be evaluated.
+Unfortunately, as mentioned above, there are many instances of this
+warning in many different configurations, which can break the build when
+CONFIG_WERROR is enabled.
+
+To avoid introducing new instances of the warnings while cleaning up the
+disruption for the majority of users, disable these warnings for the
+default build while leaving them on for W=1 builds.
+
+Cc: stable@vger.kernel.org
+Closes: https://github.com/ClangBuiltLinux/linux/issues/2002
+Link: https://github.com/llvm/llvm-project/commit/8c2ae42b3e1c6aa7c18f873edcebff7c0b45a37e
+Acked-by: Yonghong Song <yonghong.song@linux.dev>
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ scripts/Makefile.extrawarn | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index 6bbba36c59695..fa5ef41806882 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -65,6 +65,8 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
+ KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
+ KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
+ KBUILD_CFLAGS += $(call cc-disable-warning, cast-function-type-strict)
++KBUILD_CFLAGS += -Wno-enum-compare-conditional
++KBUILD_CFLAGS += -Wno-enum-enum-conversion
+ endif
+
+ endif
+--
+2.43.0
+
--- /dev/null
+From 2e82d7bd000192a2decd6feb2f442504c16f576c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Feb 2024 10:58:21 +0100
+Subject: ksmbd: retrieve number of blocks using vfs_getattr in
+ set_file_allocation_info
+
+From: Marios Makassikis <mmakassikis@freebox.fr>
+
+[ Upstream commit 34cd86b6632718b7df3999d96f51e63de41c5e4f ]
+
+Use vfs_getattr() to retrieve stat information, rather than make
+assumptions about how a filesystem fills inode structs.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Marios Makassikis <mmakassikis@freebox.fr>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/smb2pdu.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 66d25d0e34d8b..39fc078284c8e 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -5757,15 +5757,21 @@ static int set_file_allocation_info(struct ksmbd_work *work,
+
+ loff_t alloc_blks;
+ struct inode *inode;
++ struct kstat stat;
+ int rc;
+
+ if (!(fp->daccess & FILE_WRITE_DATA_LE))
+ return -EACCES;
+
++ rc = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
++ AT_STATX_SYNC_AS_STAT);
++ if (rc)
++ return rc;
++
+ alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;
+ inode = file_inode(fp->filp);
+
+- if (alloc_blks > inode->i_blocks) {
++ if (alloc_blks > stat.blocks) {
+ smb_break_all_levII_oplock(work, fp, 1);
+ rc = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0,
+ alloc_blks * 512);
+@@ -5773,7 +5779,7 @@ static int set_file_allocation_info(struct ksmbd_work *work,
+ pr_err("vfs_fallocate is failed : %d\n", rc);
+ return rc;
+ }
+- } else if (alloc_blks < inode->i_blocks) {
++ } else if (alloc_blks < stat.blocks) {
+ loff_t size;
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From d223e7075afa68c255cfbaa4a7cd9ffb8d2c1492 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Jan 2024 17:15:30 -0800
+Subject: KVM: Always flush async #PF workqueue when vCPU is being destroyed
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 3d75b8aa5c29058a512db29da7cbee8052724157 ]
+
+Always flush the per-vCPU async #PF workqueue when a vCPU is clearing its
+completion queue, e.g. when a VM and all its vCPUs is being destroyed.
+KVM must ensure that none of its workqueue callbacks is running when the
+last reference to the KVM _module_ is put. Gifting a reference to the
+associated VM prevents the workqueue callback from dereferencing freed
+vCPU/VM memory, but does not prevent the KVM module from being unloaded
+before the callback completes.
+
+Drop the misguided VM refcount gifting, as calling kvm_put_kvm() from
+async_pf_execute() if kvm_put_kvm() flushes the async #PF workqueue will
+result in deadlock. async_pf_execute() can't return until kvm_put_kvm()
+finishes, and kvm_put_kvm() can't return until async_pf_execute() finishes:
+
+ WARNING: CPU: 8 PID: 251 at virt/kvm/kvm_main.c:1435 kvm_put_kvm+0x2d/0x320 [kvm]
+ Modules linked in: vhost_net vhost vhost_iotlb tap kvm_intel kvm irqbypass
+ CPU: 8 PID: 251 Comm: kworker/8:1 Tainted: G W 6.6.0-rc1-e7af8d17224a-x86/gmem-vm #119
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
+ Workqueue: events async_pf_execute [kvm]
+ RIP: 0010:kvm_put_kvm+0x2d/0x320 [kvm]
+ Call Trace:
+ <TASK>
+ async_pf_execute+0x198/0x260 [kvm]
+ process_one_work+0x145/0x2d0
+ worker_thread+0x27e/0x3a0
+ kthread+0xba/0xe0
+ ret_from_fork+0x2d/0x50
+ ret_from_fork_asm+0x11/0x20
+ </TASK>
+ ---[ end trace 0000000000000000 ]---
+ INFO: task kworker/8:1:251 blocked for more than 120 seconds.
+ Tainted: G W 6.6.0-rc1-e7af8d17224a-x86/gmem-vm #119
+ "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+ task:kworker/8:1 state:D stack:0 pid:251 ppid:2 flags:0x00004000
+ Workqueue: events async_pf_execute [kvm]
+ Call Trace:
+ <TASK>
+ __schedule+0x33f/0xa40
+ schedule+0x53/0xc0
+ schedule_timeout+0x12a/0x140
+ __wait_for_common+0x8d/0x1d0
+ __flush_work.isra.0+0x19f/0x2c0
+ kvm_clear_async_pf_completion_queue+0x129/0x190 [kvm]
+ kvm_arch_destroy_vm+0x78/0x1b0 [kvm]
+ kvm_put_kvm+0x1c1/0x320 [kvm]
+ async_pf_execute+0x198/0x260 [kvm]
+ process_one_work+0x145/0x2d0
+ worker_thread+0x27e/0x3a0
+ kthread+0xba/0xe0
+ ret_from_fork+0x2d/0x50
+ ret_from_fork_asm+0x11/0x20
+ </TASK>
+
+If kvm_clear_async_pf_completion_queue() actually flushes the workqueue,
+then there's no need to gift async_pf_execute() a reference because all
+invocations of async_pf_execute() will be forced to complete before the
+vCPU and its VM are destroyed/freed. And that in turn fixes the module
+unloading bug as __fput() won't do module_put() on the last vCPU reference
+until the vCPU has been freed, e.g. if closing the vCPU file also puts the
+last reference to the KVM module.
+
+Note that kvm_check_async_pf_completion() may also take the work item off
+the completion queue and so also needs to flush the work queue, as the
+work will not be seen by kvm_clear_async_pf_completion_queue(). Waiting
+on the workqueue could theoretically delay a vCPU due to waiting for the
+work to complete, but that's a very, very small chance, and likely a very
+small delay. kvm_arch_async_page_present_queued() unconditionally makes a
+new request, i.e. will effectively delay entering the guest, so the
+remaining work is really just:
+
+ trace_kvm_async_pf_completed(addr, cr2_or_gpa);
+
+ __kvm_vcpu_wake_up(vcpu);
+
+ mmput(mm);
+
+and mmput() can't drop the last reference to the page tables if the vCPU is
+still alive, i.e. the vCPU won't get stuck tearing down page tables.
+
+Add a helper to do the flushing, specifically to deal with "wakeup all"
+work items, as they aren't actually work items, i.e. are never placed in a
+workqueue. Trying to flush a bogus workqueue entry rightly makes
+__flush_work() complain (kudos to whoever added that sanity check).
+
+Note, commit 5f6de5cbebee ("KVM: Prevent module exit until all VMs are
+freed") *tried* to fix the module refcounting issue by having VMs grab a
+reference to the module, but that only made the bug slightly harder to hit
+as it gave async_pf_execute() a bit more time to complete before the KVM
+module could be unloaded.
+
+Fixes: af585b921e5d ("KVM: Halt vcpu if page it tries to access is swapped out")
+Cc: stable@vger.kernel.org
+Cc: David Matlack <dmatlack@google.com>
+Reviewed-by: Xu Yilun <yilun.xu@intel.com>
+Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Link: https://lore.kernel.org/r/20240110011533.503302-2-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/async_pf.c | 31 ++++++++++++++++++++++++++-----
+ 1 file changed, 26 insertions(+), 5 deletions(-)
+
+diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
+index 9bfe1d6f6529a..adaf6f141804f 100644
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -88,7 +88,27 @@ static void async_pf_execute(struct work_struct *work)
+ __kvm_vcpu_wake_up(vcpu);
+
+ mmput(mm);
+- kvm_put_kvm(vcpu->kvm);
++}
++
++static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
++{
++ /*
++ * The async #PF is "done", but KVM must wait for the work item itself,
++ * i.e. async_pf_execute(), to run to completion. If KVM is a module,
++ * KVM must ensure *no* code owned by the KVM (the module) can be run
++ * after the last call to module_put(). Note, flushing the work item
++ * is always required when the item is taken off the completion queue.
++ * E.g. even if the vCPU handles the item in the "normal" path, the VM
++ * could be terminated before async_pf_execute() completes.
++ *
++ * Wake all events skip the queue and go straight done, i.e. don't
++ * need to be flushed (but sanity check that the work wasn't queued).
++ */
++ if (work->wakeup_all)
++ WARN_ON_ONCE(work->work.func);
++ else
++ flush_work(&work->work);
++ kmem_cache_free(async_pf_cache, work);
+ }
+
+ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+@@ -115,7 +135,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+ #else
+ if (cancel_work_sync(&work->work)) {
+ mmput(work->mm);
+- kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
+ kmem_cache_free(async_pf_cache, work);
+ }
+ #endif
+@@ -127,7 +146,10 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+ list_first_entry(&vcpu->async_pf.done,
+ typeof(*work), link);
+ list_del(&work->link);
+- kmem_cache_free(async_pf_cache, work);
++
++ spin_unlock(&vcpu->async_pf.lock);
++ kvm_flush_and_free_async_pf_work(work);
++ spin_lock(&vcpu->async_pf.lock);
+ }
+ spin_unlock(&vcpu->async_pf.lock);
+
+@@ -152,7 +174,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
+
+ list_del(&work->queue);
+ vcpu->async_pf.queued--;
+- kmem_cache_free(async_pf_cache, work);
++ kvm_flush_and_free_async_pf_work(work);
+ }
+ }
+
+@@ -187,7 +209,6 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ work->arch = *arch;
+ work->mm = current->mm;
+ mmget(work->mm);
+- kvm_get_kvm(work->vcpu->kvm);
+
+ INIT_WORK(&work->work, async_pf_execute);
+
+--
+2.43.0
+
--- /dev/null
+From d9c632ee13a296787aed5430958d435527c5ac33 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Feb 2024 11:49:16 +0000
+Subject: KVM: x86/xen: inject vCPU upcall vector when local APIC is enabled
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+[ Upstream commit 8e62bf2bfa46367e14d0ffdcde5aada08759497c ]
+
+Linux guests since commit b1c3497e604d ("x86/xen: Add support for
+HVMOP_set_evtchn_upcall_vector") in v6.0 onwards will use the per-vCPU
+upcall vector when it's advertised in the Xen CPUID leaves.
+
+This upcall is injected through the guest's local APIC as an MSI, unlike
+the older system vector which was merely injected by the hypervisor any
+time the CPU was able to receive an interrupt and the upcall_pending
+flags is set in its vcpu_info.
+
+Effectively, that makes the per-CPU upcall edge triggered instead of
+level triggered, which results in the upcall being lost if the MSI is
+delivered when the local APIC is *disabled*.
+
+Xen checks the vcpu_info->evtchn_upcall_pending flag when the local APIC
+for a vCPU is software enabled (in fact, on any write to the SPIV
+register which doesn't disable the APIC). Do the same in KVM since KVM
+doesn't provide a way for userspace to intervene and trap accesses to
+the SPIV register of a local APIC emulated by KVM.
+
+Fixes: fde0451be8fb3 ("KVM: x86/xen: Support per-vCPU event channel upcall via local APIC")
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Reviewed-by: Paul Durrant <paul@xen.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240227115648.3104-3-dwmw2@infradead.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/lapic.c | 5 ++++-
+ arch/x86/kvm/xen.c | 2 +-
+ arch/x86/kvm/xen.h | 18 ++++++++++++++++++
+ 3 files changed, 23 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index edcf45e312b99..bfeafe4855528 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -40,6 +40,7 @@
+ #include "ioapic.h"
+ #include "trace.h"
+ #include "x86.h"
++#include "xen.h"
+ #include "cpuid.h"
+ #include "hyperv.h"
+
+@@ -338,8 +339,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
+ }
+
+ /* Check if there are APF page ready requests pending */
+- if (enabled)
++ if (enabled) {
+ kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
++ kvm_xen_sw_enable_lapic(apic->vcpu);
++ }
+ }
+
+ static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
+diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
+index a58a426e6b1c0..684a39df60d9e 100644
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -314,7 +314,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
+ mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
+ }
+
+-static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
++void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
+ {
+ struct kvm_lapic_irq irq = { };
+ int r;
+diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h
+index 532a535a9e99f..500d9593a5a38 100644
+--- a/arch/x86/kvm/xen.h
++++ b/arch/x86/kvm/xen.h
+@@ -16,6 +16,7 @@ extern struct static_key_false_deferred kvm_xen_enabled;
+
+ int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
+ void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
++void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu);
+ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
+ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
+ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
+@@ -33,6 +34,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
+ const struct kvm_irq_routing_entry *ue);
+
++static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
++{
++ /*
++ * The local APIC is being enabled. If the per-vCPU upcall vector is
++ * set and the vCPU's evtchn_upcall_pending flag is set, inject the
++ * interrupt.
++ */
++ if (static_branch_unlikely(&kvm_xen_enabled.key) &&
++ vcpu->arch.xen.vcpu_info_cache.active &&
++ vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu))
++ kvm_xen_inject_vcpu_vector(vcpu);
++}
++
+ static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
+ {
+ return static_branch_unlikely(&kvm_xen_enabled.key) &&
+@@ -98,6 +112,10 @@ static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
+ {
+ }
+
++static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
++{
++}
++
+ static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
+ {
+ return false;
+--
+2.43.0
+
--- /dev/null
+From 81b348a0d8455ef3cd56b4c076b3657dbc759303 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Feb 2024 12:05:50 +0100
+Subject: landlock: Warn once if a Landlock action is requested while disabled
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mickaël Salaün <mic@digikod.net>
+
+[ Upstream commit 782191c74875cc33b50263e21d76080b1411884d ]
+
+Because sandboxing can be used as an opportunistic security measure,
+user space may not log unsupported features. Let the system
+administrator know if an application tries to use Landlock but failed
+because it isn't enabled at boot time. This may be caused by boot
+loader configurations with outdated "lsm" kernel's command-line
+parameter.
+
+Cc: stable@vger.kernel.org
+Fixes: 265885daf3e5 ("landlock: Add syscall implementations")
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Günther Noack <gnoack3000@gmail.com>
+Link: https://lore.kernel.org/r/20240227110550.3702236-2-mic@digikod.net
+Signed-off-by: Mickaël Salaün <mic@digikod.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/landlock/syscalls.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
+index 2ca0ccbd905ae..d0cb3d0cbf985 100644
+--- a/security/landlock/syscalls.c
++++ b/security/landlock/syscalls.c
+@@ -32,6 +32,18 @@
+ #include "ruleset.h"
+ #include "setup.h"
+
++static bool is_initialized(void)
++{
++ if (likely(landlock_initialized))
++ return true;
++
++ pr_warn_once(
++ "Disabled but requested by user space. "
++ "You should enable Landlock at boot time: "
++ "https://docs.kernel.org/userspace-api/landlock.html#boot-time-configuration\n");
++ return false;
++}
++
+ /**
+ * copy_min_struct_from_user - Safe future-proof argument copying
+ *
+@@ -165,7 +177,7 @@ SYSCALL_DEFINE3(landlock_create_ruleset,
+ /* Build-time checks. */
+ build_check_abi();
+
+- if (!landlock_initialized)
++ if (!is_initialized())
+ return -EOPNOTSUPP;
+
+ if (flags) {
+@@ -311,7 +323,7 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
+ struct landlock_ruleset *ruleset;
+ int res, err;
+
+- if (!landlock_initialized)
++ if (!is_initialized())
+ return -EOPNOTSUPP;
+
+ /* No flag for now. */
+@@ -402,7 +414,7 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
+ struct landlock_cred_security *new_llcred;
+ int err;
+
+- if (!landlock_initialized)
++ if (!is_initialized())
+ return -EOPNOTSUPP;
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From 0ff2c698dfd9e1fecd4a9d3e8aa9c9086f475641 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Mar 2024 15:50:34 +0800
+Subject: LoongArch: Change __my_cpu_offset definition to avoid
+ mis-optimization
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+[ Upstream commit c87e12e0e8c1241410e758e181ca6bf23efa5b5b ]
+
+From GCC commit 3f13154553f8546a ("df-scan: remove ad-hoc handling of
+global regs in asms"), global registers will no longer be forced to add
+to the def-use chain. Then current_thread_info(), current_stack_pointer
+and __my_cpu_offset may be lifted out of the loop because they are no
+longer treated as "volatile variables".
+
+This optimization is still correct for the current_thread_info() and
+current_stack_pointer usages because they are associated to a thread.
+However it is wrong for __my_cpu_offset because it is associated to a
+CPU rather than a thread: if the thread migrates to a different CPU in
+the loop, __my_cpu_offset should be changed.
+
+Change __my_cpu_offset definition to treat it as a "volatile variable",
+in order to avoid such a mis-optimization.
+
+Cc: stable@vger.kernel.org
+Reported-by: Xiaotian Wu <wuxiaotian@loongson.cn>
+Reported-by: Miao Wang <shankerwangmiao@gmail.com>
+Signed-off-by: Xing Li <lixing@loongson.cn>
+Signed-off-by: Hongchen Zhang <zhanghongchen@loongson.cn>
+Signed-off-by: Rui Wang <wangrui@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/include/asm/percpu.h | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
+index 302f0e33975a2..c90c560941685 100644
+--- a/arch/loongarch/include/asm/percpu.h
++++ b/arch/loongarch/include/asm/percpu.h
+@@ -25,7 +25,12 @@ static inline void set_my_cpu_offset(unsigned long off)
+ __my_cpu_offset = off;
+ csr_write64(off, PERCPU_BASE_KS);
+ }
+-#define __my_cpu_offset __my_cpu_offset
++
++#define __my_cpu_offset \
++({ \
++ __asm__ __volatile__("":"+r"(__my_cpu_offset)); \
++ __my_cpu_offset; \
++})
+
+ #define PERCPU_OP(op, asm_op, c_op) \
+ static __always_inline unsigned long __percpu_##op(void *ptr, \
+--
+2.43.0
+
--- /dev/null
+From 5814ac13d976cd458a3d47053bbab5181dfdadff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Mar 2024 15:50:34 +0800
+Subject: LoongArch: Define the __io_aw() hook as mmiowb()
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+[ Upstream commit 9c68ece8b2a5c5ff9b2fcaea923dd73efeb174cd ]
+
+Commit fb24ea52f78e0d595852e ("drivers: Remove explicit invocations of
+mmiowb()") remove all mmiowb() in drivers, but it says:
+
+"NOTE: mmiowb() has only ever guaranteed ordering in conjunction with
+spin_unlock(). However, pairing each mmiowb() removal in this patch with
+the corresponding call to spin_unlock() is not at all trivial, so there
+is a small chance that this change may regress any drivers incorrectly
+relying on mmiowb() to order MMIO writes between CPUs using lock-free
+synchronisation."
+
+The mmio in radeon_ring_commit() is protected by a mutex rather than a
+spinlock, but in the mutex fastpath it behaves similar to spinlock. We
+can add mmiowb() calls in the radeon driver but the maintainer says he
+doesn't like such a workaround, and radeon is not the only example of
+mutex protected mmio.
+
+So we should extend the mmiowb tracking system from spinlock to mutex,
+and maybe other locking primitives. This is not easy and error prone, so
+we solve it in the architectural code, by simply defining the __io_aw()
+hook as mmiowb(). And we no longer need to override queued_spin_unlock()
+so use the generic definition.
+
+Without this, we get such an error when run 'glxgears' on weak ordering
+architectures such as LoongArch:
+
+radeon 0000:04:00.0: ring 0 stalled for more than 10324msec
+radeon 0000:04:00.0: ring 3 stalled for more than 10240msec
+radeon 0000:04:00.0: GPU lockup (current fence id 0x000000000001f412 last fence id 0x000000000001f414 on ring 3)
+radeon 0000:04:00.0: GPU lockup (current fence id 0x000000000000f940 last fence id 0x000000000000f941 on ring 0)
+radeon 0000:04:00.0: scheduling IB failed (-35).
+[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35)
+radeon 0000:04:00.0: scheduling IB failed (-35).
+[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35)
+radeon 0000:04:00.0: scheduling IB failed (-35).
+[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35)
+radeon 0000:04:00.0: scheduling IB failed (-35).
+[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35)
+radeon 0000:04:00.0: scheduling IB failed (-35).
+[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35)
+radeon 0000:04:00.0: scheduling IB failed (-35).
+[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35)
+radeon 0000:04:00.0: scheduling IB failed (-35).
+[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35)
+
+Link: https://lore.kernel.org/dri-devel/29df7e26-d7a8-4f67-b988-44353c4270ac@amd.com/T/#t
+Link: https://lore.kernel.org/linux-arch/20240301130532.3953167-1-chenhuacai@loongson.cn/T/#t
+Cc: stable@vger.kernel.org
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/include/asm/io.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
+index 402a7d9e3a53e..427d147f30d7f 100644
+--- a/arch/loongarch/include/asm/io.h
++++ b/arch/loongarch/include/asm/io.h
+@@ -72,6 +72,8 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t
+ #define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
+ #define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l))
+
++#define __io_aw() mmiowb()
++
+ #include <asm-generic/io.h>
+
+ #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+--
+2.43.0
+
--- /dev/null
+From 6273f863039294f1631db10cc76ef82fbb10e20c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Feb 2024 19:38:39 +0300
+Subject: mac802154: fix llsec key resources release in mac802154_llsec_key_del
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+[ Upstream commit e8a1e58345cf40b7b272e08ac7b32328b2543e40 ]
+
+mac802154_llsec_key_del() can free resources of a key directly without
+following the RCU rules for waiting before the end of a grace period. This
+may lead to use-after-free in case llsec_lookup_key() is traversing the
+list of keys in parallel with a key deletion:
+
+refcount_t: addition on 0; use-after-free.
+WARNING: CPU: 4 PID: 16000 at lib/refcount.c:25 refcount_warn_saturate+0x162/0x2a0
+Modules linked in:
+CPU: 4 PID: 16000 Comm: wpan-ping Not tainted 6.7.0 #19
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
+RIP: 0010:refcount_warn_saturate+0x162/0x2a0
+Call Trace:
+ <TASK>
+ llsec_lookup_key.isra.0+0x890/0x9e0
+ mac802154_llsec_encrypt+0x30c/0x9c0
+ ieee802154_subif_start_xmit+0x24/0x1e0
+ dev_hard_start_xmit+0x13e/0x690
+ sch_direct_xmit+0x2ae/0xbc0
+ __dev_queue_xmit+0x11dd/0x3c20
+ dgram_sendmsg+0x90b/0xd60
+ __sys_sendto+0x466/0x4c0
+ __x64_sys_sendto+0xe0/0x1c0
+ do_syscall_64+0x45/0xf0
+ entry_SYSCALL_64_after_hwframe+0x6e/0x76
+
+Also, ieee802154_llsec_key_entry structures are not freed by
+mac802154_llsec_key_del():
+
+unreferenced object 0xffff8880613b6980 (size 64):
+ comm "iwpan", pid 2176, jiffies 4294761134 (age 60.475s)
+ hex dump (first 32 bytes):
+ 78 0d 8f 18 80 88 ff ff 22 01 00 00 00 00 ad de x.......".......
+ 00 00 00 00 00 00 00 00 03 00 cd ab 00 00 00 00 ................
+ backtrace:
+ [<ffffffff81dcfa62>] __kmem_cache_alloc_node+0x1e2/0x2d0
+ [<ffffffff81c43865>] kmalloc_trace+0x25/0xc0
+ [<ffffffff88968b09>] mac802154_llsec_key_add+0xac9/0xcf0
+ [<ffffffff8896e41a>] ieee802154_add_llsec_key+0x5a/0x80
+ [<ffffffff8892adc6>] nl802154_add_llsec_key+0x426/0x5b0
+ [<ffffffff86ff293e>] genl_family_rcv_msg_doit+0x1fe/0x2f0
+ [<ffffffff86ff46d1>] genl_rcv_msg+0x531/0x7d0
+ [<ffffffff86fee7a9>] netlink_rcv_skb+0x169/0x440
+ [<ffffffff86ff1d88>] genl_rcv+0x28/0x40
+ [<ffffffff86fec15c>] netlink_unicast+0x53c/0x820
+ [<ffffffff86fecd8b>] netlink_sendmsg+0x93b/0xe60
+ [<ffffffff86b91b35>] ____sys_sendmsg+0xac5/0xca0
+ [<ffffffff86b9c3dd>] ___sys_sendmsg+0x11d/0x1c0
+ [<ffffffff86b9c65a>] __sys_sendmsg+0xfa/0x1d0
+ [<ffffffff88eadbf5>] do_syscall_64+0x45/0xf0
+ [<ffffffff890000ea>] entry_SYSCALL_64_after_hwframe+0x6e/0x76
+
+Handle the proper resource release in the RCU callback function
+mac802154_llsec_key_del_rcu().
+
+Note that if llsec_lookup_key() finds a key, it gets a refcount via
+llsec_key_get() and locally copies key id from key_entry (which is a
+list element). So it's safe to call llsec_key_put() and free the list
+entry after the RCU grace period elapses.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 5d637d5aabd8 ("mac802154: add llsec structures and mutators")
+Cc: stable@vger.kernel.org
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Acked-by: Alexander Aring <aahringo@redhat.com>
+Message-ID: <20240228163840.6667-1-pchelkin@ispras.ru>
+Signed-off-by: Stefan Schmidt <stefan@datenfreihafen.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/cfg802154.h | 1 +
+ net/mac802154/llsec.c | 18 +++++++++++++-----
+ 2 files changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
+index d8d8719315fd8..5f7f28c9edcb6 100644
+--- a/include/net/cfg802154.h
++++ b/include/net/cfg802154.h
+@@ -267,6 +267,7 @@ struct ieee802154_llsec_key {
+
+ struct ieee802154_llsec_key_entry {
+ struct list_head list;
++ struct rcu_head rcu;
+
+ struct ieee802154_llsec_key_id id;
+ struct ieee802154_llsec_key *key;
+diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
+index 55550ead2ced8..a4cc9d077c59c 100644
+--- a/net/mac802154/llsec.c
++++ b/net/mac802154/llsec.c
+@@ -265,19 +265,27 @@ int mac802154_llsec_key_add(struct mac802154_llsec *sec,
+ return -ENOMEM;
+ }
+
++static void mac802154_llsec_key_del_rcu(struct rcu_head *rcu)
++{
++ struct ieee802154_llsec_key_entry *pos;
++ struct mac802154_llsec_key *mkey;
++
++ pos = container_of(rcu, struct ieee802154_llsec_key_entry, rcu);
++ mkey = container_of(pos->key, struct mac802154_llsec_key, key);
++
++ llsec_key_put(mkey);
++ kfree_sensitive(pos);
++}
++
+ int mac802154_llsec_key_del(struct mac802154_llsec *sec,
+ const struct ieee802154_llsec_key_id *key)
+ {
+ struct ieee802154_llsec_key_entry *pos;
+
+ list_for_each_entry(pos, &sec->table.keys, list) {
+- struct mac802154_llsec_key *mkey;
+-
+- mkey = container_of(pos->key, struct mac802154_llsec_key, key);
+-
+ if (llsec_key_id_equal(&pos->id, key)) {
+ list_del_rcu(&pos->list);
+- llsec_key_put(mkey);
++ call_rcu(&pos->rcu, mac802154_llsec_key_del_rcu);
+ return 0;
+ }
+ }
+--
+2.43.0
+
--- /dev/null
+From 099fe23afbbc96faf0ea00b971e79d4cb6e0b705 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Jan 2024 15:10:17 +0800
+Subject: md/raid5: fix atomicity violation in raid5_cache_count
+
+From: Gui-Dong Han <2045gemini@gmail.com>
+
+[ Upstream commit dfd2bf436709b2bccb78c2dda550dde93700efa7 ]
+
+In raid5_cache_count():
+ if (conf->max_nr_stripes < conf->min_nr_stripes)
+ return 0;
+ return conf->max_nr_stripes - conf->min_nr_stripes;
+The current check is ineffective, as the values could change immediately
+after being checked.
+
+In raid5_set_cache_size():
+ ...
+ conf->min_nr_stripes = size;
+ ...
+ while (size > conf->max_nr_stripes)
+ conf->min_nr_stripes = conf->max_nr_stripes;
+ ...
+
+Due to intermediate value updates in raid5_set_cache_size(), concurrent
+execution of raid5_cache_count() and raid5_set_cache_size() may lead to
+inconsistent reads of conf->max_nr_stripes and conf->min_nr_stripes.
+The current checks are ineffective as values could change immediately
+after being checked, raising the risk of conf->min_nr_stripes exceeding
+conf->max_nr_stripes and potentially causing an integer overflow.
+
+This possible bug is found by an experimental static analysis tool
+developed by our team. This tool analyzes the locking APIs to extract
+function pairs that can be concurrently executed, and then analyzes the
+instructions in the paired functions to identify possible concurrency bugs
+including data races and atomicity violations. The above possible bug is
+reported when our tool analyzes the source code of Linux 6.2.
+
+To resolve this issue, it is suggested to introduce local variables
+'min_stripes' and 'max_stripes' in raid5_cache_count() to ensure the
+values remain stable throughout the check. Adding locks in
+raid5_cache_count() fails to resolve atomicity violations, as
+raid5_set_cache_size() may hold intermediate values of
+conf->min_nr_stripes while unlocked. With this patch applied, our tool no
+longer reports the bug, with the kernel configuration allyesconfig for
+x86_64. Due to the lack of associated hardware, we cannot test the patch
+in runtime testing, and just verify it according to the code logic.
+
+Fixes: edbe83ab4c27 ("md/raid5: allow the stripe_cache to grow and shrink.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gui-Dong Han <2045gemini@gmail.com>
+Reviewed-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20240112071017.16313-1-2045gemini@gmail.com
+Signed-off-by: Song Liu <song@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/raid5.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index e4564ca1f2434..8cf2317857e0a 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2420,7 +2420,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
+ atomic_inc(&conf->active_stripes);
+
+ raid5_release_stripe(sh);
+- conf->max_nr_stripes++;
++ WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1);
+ return 1;
+ }
+
+@@ -2717,7 +2717,7 @@ static int drop_one_stripe(struct r5conf *conf)
+ shrink_buffers(sh);
+ free_stripe(conf->slab_cache, sh);
+ atomic_dec(&conf->active_stripes);
+- conf->max_nr_stripes--;
++ WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1);
+ return 1;
+ }
+
+@@ -6891,7 +6891,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
+ if (size <= 16 || size > 32768)
+ return -EINVAL;
+
+- conf->min_nr_stripes = size;
++ WRITE_ONCE(conf->min_nr_stripes, size);
+ mutex_lock(&conf->cache_size_mutex);
+ while (size < conf->max_nr_stripes &&
+ drop_one_stripe(conf))
+@@ -6903,7 +6903,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
+ mutex_lock(&conf->cache_size_mutex);
+ while (size > conf->max_nr_stripes)
+ if (!grow_one_stripe(conf, GFP_KERNEL)) {
+- conf->min_nr_stripes = conf->max_nr_stripes;
++ WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes);
+ result = -ENOMEM;
+ break;
+ }
+@@ -7468,11 +7468,13 @@ static unsigned long raid5_cache_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+ {
+ struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
++ int max_stripes = READ_ONCE(conf->max_nr_stripes);
++ int min_stripes = READ_ONCE(conf->min_nr_stripes);
+
+- if (conf->max_nr_stripes < conf->min_nr_stripes)
++ if (max_stripes < min_stripes)
+ /* unlikely, but not impossible */
+ return 0;
+- return conf->max_nr_stripes - conf->min_nr_stripes;
++ return max_stripes - min_stripes;
+ }
+
+ static struct r5conf *setup_conf(struct mddev *mddev)
+--
+2.43.0
+
--- /dev/null
+From 8a72868fc33f5271fbf4805194ff2c4897c6be3d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 14 Jan 2024 15:55:40 +0200
+Subject: media: mc: Add local pad to pipeline regardless of the link state
+
+From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+[ Upstream commit 78f0daa026d4c5e192d31801d1be6caf88250220 ]
+
+When building pipelines by following links, the
+media_pipeline_explore_next_link() function only traverses enabled
+links. The remote pad of a disabled link is not added to the pipeline,
+and neither is the local pad. While the former is correct as disabled
+links should not be followed, not adding the local pad breaks processing
+of the MEDIA_PAD_FL_MUST_CONNECT flag.
+
+The MEDIA_PAD_FL_MUST_CONNECT flag is checked in the
+__media_pipeline_start() function that iterates over all pads after
+populating the pipeline. If the pad is not present, the check gets
+skipped, rendering it useless.
+
+Fix this by adding the local pad of all links regardless of their state,
+only skipping the remote pad for disabled links.
+
+Cc: stable@vger.kernel.org # 6.1
+Fixes: ae219872834a ("media: mc: entity: Rewrite media_pipeline_start()")
+Reported-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Closes: https://lore.kernel.org/linux-media/7658a15a-80c5-219f-2477-2a94ba6c6ba1@kontron.de
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/mc/mc-entity.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index f268cf66053e1..20a2630455f2c 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -594,13 +594,6 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index);
+
+- /* Skip links that are not enabled. */
+- if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
+- dev_dbg(walk->mdev->dev,
+- "media pipeline: skipping link (disabled)\n");
+- return 0;
+- }
+-
+ /* Get the local pad and remote pad. */
+ if (link->source->entity == pad->entity) {
+ local = link->source;
+@@ -622,13 +615,20 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ }
+
+ /*
+- * Add the local and remote pads of the link to the pipeline and push
+- * them to the stack, if they're not already present.
++ * Add the local pad of the link to the pipeline and push it to the
++ * stack, if not already present.
+ */
+ ret = media_pipeline_add_pad(pipe, walk, local);
+ if (ret)
+ return ret;
+
++ /* Similarly, add the remote pad, but only if the link is enabled. */
++ if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
++ dev_dbg(walk->mdev->dev,
++ "media pipeline: skipping link (disabled)\n");
++ return 0;
++ }
++
+ ret = media_pipeline_add_pad(pipe, walk, remote);
+ if (ret)
+ return ret;
+--
+2.43.0
+
--- /dev/null
+From 74a5201555d3927a4bb638a85ab7b0a4cfe2b076 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jan 2024 00:30:02 +0200
+Subject: media: mc: Add num_links flag to media_pad
+
+From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+[ Upstream commit baeddf94aa61879b118f2faa37ed126d772670cc ]
+
+Maintain a counter of the links connected to a pad in the media_pad
+structure. This helps checking if a pad is connected to anything, which
+will be used in the pipeline building code.
+
+Cc: stable@vger.kernel.org # 6.1
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/mc/mc-entity.c | 6 ++++++
+ include/media/media-entity.h | 2 ++
+ 2 files changed, 8 insertions(+)
+
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index 688780c8734d4..c7cb49205b017 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -957,6 +957,9 @@ static void __media_entity_remove_link(struct media_entity *entity,
+
+ /* Remove the reverse links for a data link. */
+ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == MEDIA_LNK_FL_DATA_LINK) {
++ link->source->num_links--;
++ link->sink->num_links--;
++
+ if (link->source->entity == entity)
+ remote = link->sink->entity;
+ else
+@@ -1068,6 +1071,9 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
+ sink->num_links++;
+ source->num_links++;
+
++ link->source->num_links++;
++ link->sink->num_links++;
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(media_create_pad_link);
+diff --git a/include/media/media-entity.h b/include/media/media-entity.h
+index 28c9de8a1f348..03bb0963942bd 100644
+--- a/include/media/media-entity.h
++++ b/include/media/media-entity.h
+@@ -205,6 +205,7 @@ enum media_pad_signal_type {
+ * @graph_obj: Embedded structure containing the media object common data
+ * @entity: Entity this pad belongs to
+ * @index: Pad index in the entity pads array, numbered from 0 to n
++ * @num_links: Number of links connected to this pad
+ * @sig_type: Type of the signal inside a media pad
+ * @flags: Pad flags, as defined in
+ * :ref:`include/uapi/linux/media.h <media_header>`
+@@ -216,6 +217,7 @@ struct media_pad {
+ struct media_gobj graph_obj; /* must be first field in struct */
+ struct media_entity *entity;
+ u16 index;
++ u16 num_links;
+ enum media_pad_signal_type sig_type;
+ unsigned long flags;
+
+--
+2.43.0
+
--- /dev/null
+From 2e22b4e9476fee2d0d918fd577a6129a4d2efbd7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jan 2024 01:04:52 +0200
+Subject: media: mc: Expand MUST_CONNECT flag to always require an enabled link
+
+From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+[ Upstream commit b3decc5ce7d778224d266423b542326ad469cb5f ]
+
+The MEDIA_PAD_FL_MUST_CONNECT flag indicates that the pad requires an
+enabled link to stream, but only if it has any link at all. This makes
+little sense, as if a pad is part of a pipeline, there are very few use
+cases for an active link to be mandatory only if links exist at all. A
+review of in-tree drivers confirms they all need an enabled link for
+pads marked with the MEDIA_PAD_FL_MUST_CONNECT flag.
+
+Expand the scope of the flag by rejecting pads that have no links at
+all. This requires modifying the pipeline build code to add those pads
+to the pipeline.
+
+Cc: stable@vger.kernel.org # 6.1
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../media/mediactl/media-types.rst | 11 ++--
+ drivers/media/mc/mc-entity.c | 53 +++++++++++++++----
+ 2 files changed, 48 insertions(+), 16 deletions(-)
+
+diff --git a/Documentation/userspace-api/media/mediactl/media-types.rst b/Documentation/userspace-api/media/mediactl/media-types.rst
+index 0ffeece1e0c8e..6332e8395263b 100644
+--- a/Documentation/userspace-api/media/mediactl/media-types.rst
++++ b/Documentation/userspace-api/media/mediactl/media-types.rst
+@@ -375,12 +375,11 @@ Types and flags used to represent the media graph elements
+ are origins of links.
+
+ * - ``MEDIA_PAD_FL_MUST_CONNECT``
+- - If this flag is set and the pad is linked to any other pad, then
+- at least one of those links must be enabled for the entity to be
+- able to stream. There could be temporary reasons (e.g. device
+- configuration dependent) for the pad to need enabled links even
+- when this flag isn't set; the absence of the flag doesn't imply
+- there is none.
++ - If this flag is set, then for this pad to be able to stream, it must
++ be connected by at least one enabled link. There could be temporary
++ reasons (e.g. device configuration dependent) for the pad to need
++ enabled links even when this flag isn't set; the absence of the flag
++ doesn't imply there is none.
+
+
+ One and only one of ``MEDIA_PAD_FL_SINK`` and ``MEDIA_PAD_FL_SOURCE``
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index 50b68b4dde5d0..8919df09e3e8d 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -509,14 +509,15 @@ static int media_pipeline_walk_push(struct media_pipeline_walk *walk,
+
+ /*
+ * Move the top entry link cursor to the next link. If all links of the entry
+- * have been visited, pop the entry itself.
++ * have been visited, pop the entry itself. Return true if the entry has been
++ * popped.
+ */
+-static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
++static bool media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+ {
+ struct media_pipeline_walk_entry *entry;
+
+ if (WARN_ON(walk->stack.top < 0))
+- return;
++ return false;
+
+ entry = media_pipeline_walk_top(walk);
+
+@@ -526,7 +527,7 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+ walk->stack.top);
+
+ walk->stack.top--;
+- return;
++ return true;
+ }
+
+ entry->links = entry->links->next;
+@@ -534,6 +535,8 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: moved entry %u to next link\n",
+ walk->stack.top);
++
++ return false;
+ }
+
+ /* Free all memory allocated while walking the pipeline. */
+@@ -583,11 +586,12 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ struct media_link *link;
+ struct media_pad *local;
+ struct media_pad *remote;
++ bool last_link;
+ int ret;
+
+ origin = entry->pad;
+ link = list_entry(entry->links, typeof(*link), list);
+- media_pipeline_walk_pop(walk);
++ last_link = media_pipeline_walk_pop(walk);
+
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: exploring link '%s':%u -> '%s':%u\n",
+@@ -612,7 +616,7 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ local->index)) {
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: skipping link (no route)\n");
+- return 0;
++ goto done;
+ }
+
+ /*
+@@ -627,13 +631,44 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: skipping link (disabled)\n");
+- return 0;
++ goto done;
+ }
+
+ ret = media_pipeline_add_pad(pipe, walk, remote);
+ if (ret)
+ return ret;
+
++done:
++ /*
++ * If we're done iterating over links, iterate over pads of the entity.
++ * This is necessary to discover pads that are not connected with any
++ * link. Those are dead ends from a pipeline exploration point of view,
++ * but are still part of the pipeline and need to be added to enable
++ * proper validation.
++ */
++ if (!last_link)
++ return 0;
++
++ dev_dbg(walk->mdev->dev,
++ "media pipeline: adding unconnected pads of '%s'\n",
++ local->entity->name);
++
++ media_entity_for_each_pad(origin->entity, local) {
++ /*
++ * Skip the origin pad (already handled), pad that have links
++ * (already discovered through iterating over links) and pads
++ * not internally connected.
++ */
++ if (origin == local || !local->num_links ||
++ !media_entity_has_pad_interdep(origin->entity, origin->index,
++ local->index))
++ continue;
++
++ ret = media_pipeline_add_pad(pipe, walk, local);
++ if (ret)
++ return ret;
++ }
++
+ return 0;
+ }
+
+@@ -745,7 +780,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
+ struct media_pad *pad = ppad->pad;
+ struct media_entity *entity = pad->entity;
+ bool has_enabled_link = false;
+- bool has_link = false;
+ struct media_link *link;
+
+ dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name,
+@@ -775,7 +809,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
+ /* Record if the pad has links and enabled links. */
+ if (link->flags & MEDIA_LNK_FL_ENABLED)
+ has_enabled_link = true;
+- has_link = true;
+
+ /*
+ * Validate the link if it's enabled and has the
+@@ -813,7 +846,7 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
+ * 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set,
+ * ensure that it has either no link or an enabled link.
+ */
+- if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link &&
++ if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) &&
+ !has_enabled_link) {
+ dev_dbg(mdev->dev,
+ "Pad '%s':%u must be connected by an enabled link\n",
+--
+2.43.0
+
--- /dev/null
+From 93cddf2dca9890a2a81ca28f1dbb972ca92a0ecd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jan 2024 00:24:12 +0200
+Subject: media: mc: Fix flags handling when creating pad links
+
+From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+[ Upstream commit 422f7af75d03d50895938d38bc9cb8be759c440f ]
+
+The media_create_pad_link() function doesn't correctly clear reject link
+type flags, nor does it set the DATA_LINK flag. It only works because
+the MEDIA_LNK_FL_DATA_LINK flag's value is 0.
+
+Fix it by returning an error if any link type flag is set. This doesn't
+introduce any regression, as nobody calls the media_create_pad_link()
+function with link type flags (easily checked by grepping for the flag
+in the source code, there are very few hits).
+
+Set the MEDIA_LNK_FL_DATA_LINK explicitly, which is a no-op that the
+compiler will optimize out, but is still useful to make the code more
+explicit and easier to understand.
+
+Cc: stable@vger.kernel.org # 6.1
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/mc/mc-entity.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index 20a2630455f2c..688780c8734d4 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -1017,6 +1017,11 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
+ struct media_link *link;
+ struct media_link *backlink;
+
++ if (flags & MEDIA_LNK_FL_LINK_TYPE)
++ return -EINVAL;
++
++ flags |= MEDIA_LNK_FL_DATA_LINK;
++
+ if (WARN_ON(!source || !sink) ||
+ WARN_ON(source_pad >= source->num_pads) ||
+ WARN_ON(sink_pad >= sink->num_pads))
+@@ -1032,7 +1037,7 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
+
+ link->source = &source->pads[source_pad];
+ link->sink = &sink->pads[sink_pad];
+- link->flags = flags & ~MEDIA_LNK_FL_INTERFACE_LINK;
++ link->flags = flags;
+
+ /* Initialize graph object embedded at the new link */
+ media_gobj_create(source->graph_obj.mdev, MEDIA_GRAPH_LINK,
+--
+2.43.0
+
--- /dev/null
+From 5412d9e977575363a897cc6097f9063afaea8e23 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jan 2024 00:30:02 +0200
+Subject: media: mc: Rename pad variable to clarify intent
+
+From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+[ Upstream commit 9ec9109cf9f611e3ec9ed0355afcc7aae5e73176 ]
+
+The pad local variable in the media_pipeline_explore_next_link()
+function is used to store the pad through which the entity has been
+reached. Rename it to origin to reflect that and make the code easier to
+read. This will be even more important in subsequent commits when
+expanding the function with additional logic.
+
+Cc: stable@vger.kernel.org # 6.1
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/mc/mc-entity.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index c7cb49205b017..50b68b4dde5d0 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -579,13 +579,13 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ struct media_pipeline_walk *walk)
+ {
+ struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk);
+- struct media_pad *pad;
++ struct media_pad *origin;
+ struct media_link *link;
+ struct media_pad *local;
+ struct media_pad *remote;
+ int ret;
+
+- pad = entry->pad;
++ origin = entry->pad;
+ link = list_entry(entry->links, typeof(*link), list);
+ media_pipeline_walk_pop(walk);
+
+@@ -595,7 +595,7 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ link->sink->entity->name, link->sink->index);
+
+ /* Get the local pad and remote pad. */
+- if (link->source->entity == pad->entity) {
++ if (link->source->entity == origin->entity) {
+ local = link->source;
+ remote = link->sink;
+ } else {
+@@ -607,8 +607,9 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ * Skip links that originate from a different pad than the incoming pad
+ * that is not connected internally in the entity to the incoming pad.
+ */
+- if (pad != local &&
+- !media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) {
++ if (origin != local &&
++ !media_entity_has_pad_interdep(origin->entity, origin->index,
++ local->index)) {
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: skipping link (no route)\n");
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From a462a948d7f18612634db105e1de5231b7ae4831 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Jan 2024 17:09:09 +0900
+Subject: media: staging: ipu3-imgu: Set fields before media_entity_pads_init()
+
+From: Hidenori Kobayashi <hidenorik@chromium.org>
+
+[ Upstream commit 87318b7092670d4086bfec115a0280a60c51c2dd ]
+
+The imgu driver fails to probe with the following message because it
+does not set the pad's flags before calling media_entity_pads_init().
+
+[ 14.596315] ipu3-imgu 0000:00:05.0: failed initialize subdev media entity (-22)
+[ 14.596322] ipu3-imgu 0000:00:05.0: failed to register subdev0 ret (-22)
+[ 14.596327] ipu3-imgu 0000:00:05.0: failed to register pipes (-22)
+[ 14.596331] ipu3-imgu 0000:00:05.0: failed to create V4L2 devices (-22)
+
+Fix the initialization order so that the driver probe succeeds. The ops
+initialization is also moved together for readability.
+
+Fixes: a0ca1627b450 ("media: staging/intel-ipu3: Add v4l2 driver based on media framework")
+Cc: <stable@vger.kernel.org> # 6.7
+Cc: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Hidenori Kobayashi <hidenorik@chromium.org>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/media/ipu3/ipu3-v4l2.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
+index e530767e80a5d..55cc44a401bc4 100644
+--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
++++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
+@@ -1069,6 +1069,11 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ /* Initialize subdev media entity */
++ imgu_sd->subdev.entity.ops = &imgu_media_ops;
++ for (i = 0; i < IMGU_NODE_NUM; i++) {
++ imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
++ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
++ }
+ r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM,
+ imgu_sd->subdev_pads);
+ if (r) {
+@@ -1076,11 +1081,6 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
+ "failed initialize subdev media entity (%d)\n", r);
+ return r;
+ }
+- imgu_sd->subdev.entity.ops = &imgu_media_ops;
+- for (i = 0; i < IMGU_NODE_NUM; i++) {
+- imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
+- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+- }
+
+ /* Initialize subdev */
+ v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops);
+@@ -1177,15 +1177,15 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
+ }
+
+ /* Initialize media entities */
++ node->vdev_pad.flags = node->output ?
++ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
++ vdev->entity.ops = NULL;
+ r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad);
+ if (r) {
+ dev_err(dev, "failed initialize media entity (%d)\n", r);
+ mutex_destroy(&node->lock);
+ return r;
+ }
+- node->vdev_pad.flags = node->output ?
+- MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+- vdev->entity.ops = NULL;
+
+ /* Initialize vbq */
+ vbq->type = node->vdev_fmt.type;
+--
+2.43.0
+
--- /dev/null
+From d9f143d954a08a6cc8d71830da6bd49b2b28b597 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Dec 2023 13:50:30 +0800
+Subject: media: xc4000: Fix atomicity violation in xc4000_get_frequency
+
+From: Gui-Dong Han <2045gemini@gmail.com>
+
+[ Upstream commit 36d503ad547d1c75758a6fcdbec2806f1b6aeb41 ]
+
+In xc4000_get_frequency():
+ *freq = priv->freq_hz + priv->freq_offset;
+The code accesses priv->freq_hz and priv->freq_offset without holding any
+lock.
+
+In xc4000_set_params():
+ // Code that updates priv->freq_hz and priv->freq_offset
+ ...
+
+xc4000_get_frequency() and xc4000_set_params() may execute concurrently,
+risking inconsistent reads of priv->freq_hz and priv->freq_offset. Since
+these related data may update during reading, it can result in incorrect
+frequency calculation, leading to atomicity violations.
+
+This possible bug is found by an experimental static analysis tool
+developed by our team, BassCheck[1]. This tool analyzes the locking APIs
+to extract function pairs that can be concurrently executed, and then
+analyzes the instructions in the paired functions to identify possible
+concurrency bugs including data races and atomicity violations. The above
+possible bug is reported when our tool analyzes the source code of
+Linux 6.2.
+
+To address this issue, it is proposed to add a mutex lock pair in
+xc4000_get_frequency() to ensure atomicity. With this patch applied, our
+tool no longer reports the possible bug, with the kernel configuration
+allyesconfig for x86_64. Due to the lack of associated hardware, we cannot
+test the patch in runtime testing, and just verify it according to the
+code logic.
+
+[1] https://sites.google.com/view/basscheck/
+
+Fixes: 4c07e32884ab ("[media] xc4000: Fix get_frequency()")
+Cc: stable@vger.kernel.org
+Reported-by: BassCheck <bass@buaa.edu.cn>
+Signed-off-by: Gui-Dong Han <2045gemini@gmail.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/tuners/xc4000.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
+index 57ded9ff3f043..29bc63021c5aa 100644
+--- a/drivers/media/tuners/xc4000.c
++++ b/drivers/media/tuners/xc4000.c
+@@ -1515,10 +1515,10 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+ {
+ struct xc4000_priv *priv = fe->tuner_priv;
+
++ mutex_lock(&priv->lock);
+ *freq = priv->freq_hz + priv->freq_offset;
+
+ if (debug) {
+- mutex_lock(&priv->lock);
+ if ((priv->cur_fw.type
+ & (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) {
+ u16 snr = 0;
+@@ -1529,8 +1529,8 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+ return 0;
+ }
+ }
+- mutex_unlock(&priv->lock);
+ }
++ mutex_unlock(&priv->lock);
+
+ dprintk(1, "%s()\n", __func__);
+
+--
+2.43.0
+
--- /dev/null
+From 2a360a124410f0d7f2d805c3381b5e8a0cfe859e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Mar 2024 16:04:23 +0800
+Subject: memtest: use {READ,WRITE}_ONCE in memory scanning
+
+From: Qiang Zhang <qiang4.zhang@intel.com>
+
+[ Upstream commit 82634d7e24271698e50a3ec811e5f50de790a65f ]
+
+memtest failed to find bad memory when compiled with clang. So use
+{WRITE,READ}_ONCE to access memory to avoid compiler over optimization.
+
+Link: https://lkml.kernel.org/r/20240312080422.691222-1-qiang4.zhang@intel.com
+Signed-off-by: Qiang Zhang <qiang4.zhang@intel.com>
+Cc: Bill Wendling <morbo@google.com>
+Cc: Justin Stitt <justinstitt@google.com>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/memtest.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/mm/memtest.c b/mm/memtest.c
+index f53ace709ccd8..d407373f225b4 100644
+--- a/mm/memtest.c
++++ b/mm/memtest.c
+@@ -46,10 +46,10 @@ static void __init memtest(u64 pattern, phys_addr_t start_phys, phys_addr_t size
+ last_bad = 0;
+
+ for (p = start; p < end; p++)
+- *p = pattern;
++ WRITE_ONCE(*p, pattern);
+
+ for (p = start; p < end; p++, start_phys_aligned += incr) {
+- if (*p == pattern)
++ if (READ_ONCE(*p) == pattern)
+ continue;
+ if (start_phys_aligned == last_bad + incr) {
+ last_bad += incr;
+--
+2.43.0
+
--- /dev/null
+From 75539056c6465b424394f4417e4ebe34400175da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Mar 2024 14:03:56 +0000
+Subject: mm: swap: fix race between free_swap_and_cache() and swapoff()
+
+From: Ryan Roberts <ryan.roberts@arm.com>
+
+[ Upstream commit 82b1c07a0af603e3c47b906c8e991dc96f01688e ]
+
+There was previously a theoretical window where swapoff() could run and
+teardown a swap_info_struct while a call to free_swap_and_cache() was
+running in another thread. This could cause, amongst other bad
+possibilities, swap_page_trans_huge_swapped() (called by
+free_swap_and_cache()) to access the freed memory for swap_map.
+
+This is a theoretical problem and I haven't been able to provoke it from a
+test case. But there has been agreement based on code review that this is
+possible (see link below).
+
+Fix it by using get_swap_device()/put_swap_device(), which will stall
+swapoff(). There was an extra check in _swap_info_get() to confirm that
+the swap entry was not free. This isn't present in get_swap_device()
+because it doesn't make sense in general due to the race between getting
+the reference and swapoff. So I've added an equivalent check directly in
+free_swap_and_cache().
+
+Details of how to provoke one possible issue (thanks to David Hildenbrand
+for deriving this):
+
+--8<-----
+
+__swap_entry_free() might be the last user and result in
+"count == SWAP_HAS_CACHE".
+
+swapoff->try_to_unuse() will stop as soon as soon as si->inuse_pages==0.
+
+So the question is: could someone reclaim the folio and turn
+si->inuse_pages==0, before we completed swap_page_trans_huge_swapped().
+
+Imagine the following: 2 MiB folio in the swapcache. Only 2 subpages are
+still references by swap entries.
+
+Process 1 still references subpage 0 via swap entry.
+Process 2 still references subpage 1 via swap entry.
+
+Process 1 quits. Calls free_swap_and_cache().
+-> count == SWAP_HAS_CACHE
+[then, preempted in the hypervisor etc.]
+
+Process 2 quits. Calls free_swap_and_cache().
+-> count == SWAP_HAS_CACHE
+
+Process 2 goes ahead, passes swap_page_trans_huge_swapped(), and calls
+__try_to_reclaim_swap().
+
+__try_to_reclaim_swap()->folio_free_swap()->delete_from_swap_cache()->
+put_swap_folio()->free_swap_slot()->swapcache_free_entries()->
+swap_entry_free()->swap_range_free()->
+...
+WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
+
+What stops swapoff to succeed after process 2 reclaimed the swap cache
+but before process1 finished its call to swap_page_trans_huge_swapped()?
+
+--8<-----
+
+Link: https://lkml.kernel.org/r/20240306140356.3974886-1-ryan.roberts@arm.com
+Fixes: 7c00bafee87c ("mm/swap: free swap slots in batch")
+Closes: https://lore.kernel.org/linux-mm/65a66eb9-41f8-4790-8db2-0c70ea15979f@redhat.com/
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: "Huang, Ying" <ying.huang@intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/swapfile.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 324844f98d67c..0d6182db44a6a 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1229,6 +1229,11 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
+ * with get_swap_device() and put_swap_device(), unless the swap
+ * functions call get/put_swap_device() by themselves.
+ *
++ * Note that when only holding the PTL, swapoff might succeed immediately
++ * after freeing a swap entry. Therefore, immediately after
++ * __swap_entry_free(), the swap info might become stale and should not
++ * be touched without a prior get_swap_device().
++ *
+ * Check whether swap entry is valid in the swap device. If so,
+ * return pointer to swap_info_struct, and keep the swap entry valid
+ * via preventing the swap device from being swapoff, until
+@@ -1630,13 +1635,19 @@ int free_swap_and_cache(swp_entry_t entry)
+ if (non_swap_entry(entry))
+ return 1;
+
+- p = _swap_info_get(entry);
++ p = get_swap_device(entry);
+ if (p) {
++ if (WARN_ON(data_race(!p->swap_map[swp_offset(entry)]))) {
++ put_swap_device(p);
++ return 0;
++ }
++
+ count = __swap_entry_free(p, entry);
+ if (count == SWAP_HAS_CACHE &&
+ !swap_page_trans_huge_swapped(p, entry))
+ __try_to_reclaim_swap(p, swp_offset(entry),
+ TTRS_UNMAPPED | TTRS_FULL);
++ put_swap_device(p);
+ }
+ return p != NULL;
+ }
+--
+2.43.0
+
--- /dev/null
+From 6e96794594357cc1da9669164e69617dd2f66a1c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Mar 2024 10:44:38 +0900
+Subject: mmc: core: Fix switch on gp3 partition
+
+From: Dominique Martinet <dominique.martinet@atmark-techno.com>
+
+[ Upstream commit 4af59a8df5ea930038cd3355e822f5eedf4accc1 ]
+
+Commit e7794c14fd73 ("mmc: rpmb: fixes pause retune on all RPMB
+partitions.") added a mask check for 'part_type', but the mask used was
+wrong leading to the code intended for rpmb also being executed for GP3.
+
+On some MMCs (but not all) this would make gp3 partition inaccessible:
+armadillo:~# head -c 1 < /dev/mmcblk2gp3
+head: standard input: I/O error
+armadillo:~# dmesg -c
+[ 422.976583] mmc2: running CQE recovery
+[ 423.058182] mmc2: running CQE recovery
+[ 423.137607] mmc2: running CQE recovery
+[ 423.137802] blk_update_request: I/O error, dev mmcblk2gp3, sector 0 op 0x0:(READ) flags 0x80700 phys_seg 4 prio class 0
+[ 423.237125] mmc2: running CQE recovery
+[ 423.318206] mmc2: running CQE recovery
+[ 423.397680] mmc2: running CQE recovery
+[ 423.397837] blk_update_request: I/O error, dev mmcblk2gp3, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 0
+[ 423.408287] Buffer I/O error on dev mmcblk2gp3, logical block 0, async page read
+
+the part_type values of interest here are defined as follow:
+main 0
+boot0 1
+boot1 2
+rpmb 3
+gp0 4
+gp1 5
+gp2 6
+gp3 7
+
+so mask with EXT_CSD_PART_CONFIG_ACC_MASK (7) to correctly identify rpmb
+
+Fixes: e7794c14fd73 ("mmc: rpmb: fixes pause retune on all RPMB partitions.")
+Cc: stable@vger.kernel.org
+Cc: Jorge Ramirez-Ortiz <jorge@foundries.io>
+Signed-off-by: Dominique Martinet <dominique.martinet@atmark-techno.com>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Link: https://lore.kernel.org/r/20240306-mmc-partswitch-v1-1-bf116985d950@codewreck.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/core/block.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index ea60efaecb0dd..4688a658d6a6d 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -889,10 +889,11 @@ static const struct block_device_operations mmc_bdops = {
+ static int mmc_blk_part_switch_pre(struct mmc_card *card,
+ unsigned int part_type)
+ {
+- const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
++ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
++ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ int ret = 0;
+
+- if ((part_type & mask) == mask) {
++ if ((part_type & mask) == rpmb) {
+ if (card->ext_csd.cmdq_en) {
+ ret = mmc_cmdq_disable(card);
+ if (ret)
+@@ -907,10 +908,11 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
+ static int mmc_blk_part_switch_post(struct mmc_card *card,
+ unsigned int part_type)
+ {
+- const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
++ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
++ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ int ret = 0;
+
+- if ((part_type & mask) == mask) {
++ if ((part_type & mask) == rpmb) {
+ mmc_retune_unpause(card->host);
+ if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
+ ret = mmc_cmdq_enable(card);
+--
+2.43.0
+
--- /dev/null
+From 610c5f40c2b97edcddc2f8d43c55fce6055201d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Mar 2024 11:42:56 +0100
+Subject: mmc: tmio: avoid concurrent runs of mmc_request_done()
+
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+[ Upstream commit e8d1b41e69d72c62865bebe8f441163ec00b3d44 ]
+
+With the to-be-fixed commit, the reset_work handler cleared 'host->mrq'
+outside of the spinlock protected critical section. That leaves a small
+race window during execution of 'tmio_mmc_reset()' where the done_work
+handler could grab a pointer to the now invalid 'host->mrq'. Both would
+use it to call mmc_request_done() causing problems (see link below).
+
+However, 'host->mrq' cannot simply be cleared earlier inside the
+critical section. That would allow new mrqs to come in asynchronously
+while the actual reset of the controller still needs to be done. So,
+like 'tmio_mmc_set_ios()', an ERR_PTR is used to prevent new mrqs from
+coming in but still avoiding concurrency between work handlers.
+
+Reported-by: Dirk Behme <dirk.behme@de.bosch.com>
+Closes: https://lore.kernel.org/all/20240220061356.3001761-1-dirk.behme@de.bosch.com/
+Fixes: df3ef2d3c92c ("mmc: protect the tmio_mmc driver against a theoretical race")
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Tested-by: Dirk Behme <dirk.behme@de.bosch.com>
+Reviewed-by: Dirk Behme <dirk.behme@de.bosch.com>
+Cc: stable@vger.kernel.org # 3.0+
+Link: https://lore.kernel.org/r/20240305104423.3177-2-wsa+renesas@sang-engineering.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/tmio_mmc_core.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
+index 437048bb80273..5024cae411d3a 100644
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -259,6 +259,8 @@ static void tmio_mmc_reset_work(struct work_struct *work)
+ else
+ mrq->cmd->error = -ETIMEDOUT;
+
++ /* No new calls yet, but disallow concurrent tmio_mmc_done_work() */
++ host->mrq = ERR_PTR(-EBUSY);
+ host->cmd = NULL;
+ host->data = NULL;
+
+--
+2.43.0
+
--- /dev/null
+From f4558a8d8da6e14b2e7acbab7456a22485936876 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 11 Feb 2024 00:45:51 +0300
+Subject: mtd: rawnand: meson: fix scrambling mode value in command macro
+
+From: Arseniy Krasnov <avkrasnov@salutedevices.com>
+
+[ Upstream commit ef6f463599e16924cdd02ce5056ab52879dc008c ]
+
+Scrambling mode is enabled by value (1 << 19). NFC_CMD_SCRAMBLER_ENABLE
+is already (1 << 19), so there is no need to shift it again in CMDRWGEN
+macro.
+
+Signed-off-by: Arseniy Krasnov <avkrasnov@salutedevices.com>
+Cc: <Stable@vger.kernel.org>
+Fixes: 8fae856c5350 ("mtd: rawnand: meson: add support for Amlogic NAND flash controller")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/linux-mtd/20240210214551.441610-1-avkrasnov@salutedevices.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/nand/raw/meson_nand.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index 0aeac8ccbd0ee..05925fb694602 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -63,7 +63,7 @@
+ #define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
+ ( \
+ (cmd_dir) | \
+- ((ran) << 19) | \
++ (ran) | \
+ ((bch) << 14) | \
+ ((short_mode) << 13) | \
+ (((page_size) & 0x7f) << 6) | \
+--
+2.43.0
+
--- /dev/null
+From e2afd665d18c274e6d383ca8013f2ed29e8c6479 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Mar 2024 09:34:54 -0400
+Subject: net: hns3: tracing: fix hclgevf trace event strings
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+[ Upstream commit 3f9952e8d80cca2da3b47ecd5ad9ec16cfd1a649 ]
+
+The __string() and __assign_str() helper macros of the TRACE_EVENT() macro
+are going through some optimizations where only the source string of
+__string() will be used and the __assign_str() source will be ignored and
+later removed.
+
+To make sure that there's no issues, a new check is added between the
+__string() src argument and the __assign_str() src argument that does a
+strcmp() to make sure they are the same string.
+
+The hclgevf trace events have:
+
+ __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+
+Which triggers the warning:
+
+hclgevf_trace.h:34:39: error: passing argument 1 of ‘strcmp’ from incompatible pointer type [-Werror=incompatible-pointer-types]
+ 34 | __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ [..]
+arch/x86/include/asm/string_64.h:75:24: note: expected ‘const char *’ but argument is of type ‘char (*)[16]’
+ 75 | int strcmp(const char *cs, const char *ct);
+ | ~~~~~~~~~~~~^~
+
+Because __assign_str() now has:
+
+ WARN_ON_ONCE(__builtin_constant_p(src) ? \
+ strcmp((src), __data_offsets.dst##_ptr_) : \
+ (src) != __data_offsets.dst##_ptr_); \
+
+The problem is the '&' on hdev->nic.kinfo.netdev->name. That's because
+that name is:
+
+ char name[IFNAMSIZ]
+
+Where passing an address '&' of a char array is not compatible with strcmp().
+
+The '&' is not necessary, remove it.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20240313093454.3909afe7@gandalf.local.home
+
+Cc: netdev <netdev@vger.kernel.org>
+Cc: Yisen Zhuang <yisen.zhuang@huawei.com>
+Cc: Salil Mehta <salil.mehta@huawei.com>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Jakub Kicinski <kuba@kernel.org>
+Cc: Yufeng Mo <moyufeng@huawei.com>
+Cc: Huazhong Tan <tanhuazhong@huawei.com>
+Cc: stable@vger.kernel.org
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Jijie Shao <shaojijie@huawei.com>
+Fixes: d8355240cf8fb ("net: hns3: add trace event support for PF/VF mailbox")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h | 8 ++++----
+ .../net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h | 8 ++++----
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+index 8510b88d49820..f3cd5a376eca9 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+@@ -24,7 +24,7 @@ TRACE_EVENT(hclge_pf_mbx_get,
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+- __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
++ __string(devname, hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_GET_MBX_LEN)
+ ),
+
+@@ -33,7 +33,7 @@ TRACE_EVENT(hclge_pf_mbx_get,
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+- __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
++ __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+@@ -56,7 +56,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+- __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
++ __string(devname, hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_SEND_MBX_LEN)
+ ),
+
+@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
+ __entry->vfid = req->dest_vfid;
+ __entry->code = le16_to_cpu(req->msg.code);
+ __assign_str(pciname, pci_name(hdev->pdev));
+- __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
++ __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+index 5d4895bb57a17..b259e95dd53c2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+@@ -23,7 +23,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+- __string(devname, &hdev->nic.kinfo.netdev->name)
++ __string(devname, hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_GET_MBX_LEN)
+ ),
+
+@@ -31,7 +31,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
+ __entry->vfid = req->dest_vfid;
+ __entry->code = le16_to_cpu(req->msg.code);
+ __assign_str(pciname, pci_name(hdev->pdev));
+- __assign_str(devname, &hdev->nic.kinfo.netdev->name);
++ __assign_str(devname, hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+@@ -55,7 +55,7 @@ TRACE_EVENT(hclge_vf_mbx_send,
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+- __string(devname, &hdev->nic.kinfo.netdev->name)
++ __string(devname, hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_SEND_MBX_LEN)
+ ),
+
+@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_vf_mbx_send,
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+- __assign_str(devname, &hdev->nic.kinfo.netdev->name);
++ __assign_str(devname, hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+--
+2.43.0
+
--- /dev/null
+From e39c900df3f901bc530fba4aea0d5d473319c6c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Mar 2024 11:49:57 -0500
+Subject: nfs: fix UAF in direct writes
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 17f46b803d4f23c66cacce81db35fef3adb8f2af ]
+
+In production we have been hitting the following warning consistently
+
+------------[ cut here ]------------
+refcount_t: underflow; use-after-free.
+WARNING: CPU: 17 PID: 1800359 at lib/refcount.c:28 refcount_warn_saturate+0x9c/0xe0
+Workqueue: nfsiod nfs_direct_write_schedule_work [nfs]
+RIP: 0010:refcount_warn_saturate+0x9c/0xe0
+PKRU: 55555554
+Call Trace:
+ <TASK>
+ ? __warn+0x9f/0x130
+ ? refcount_warn_saturate+0x9c/0xe0
+ ? report_bug+0xcc/0x150
+ ? handle_bug+0x3d/0x70
+ ? exc_invalid_op+0x16/0x40
+ ? asm_exc_invalid_op+0x16/0x20
+ ? refcount_warn_saturate+0x9c/0xe0
+ nfs_direct_write_schedule_work+0x237/0x250 [nfs]
+ process_one_work+0x12f/0x4a0
+ worker_thread+0x14e/0x3b0
+ ? ZSTD_getCParams_internal+0x220/0x220
+ kthread+0xdc/0x120
+ ? __btf_name_valid+0xa0/0xa0
+ ret_from_fork+0x1f/0x30
+
+This is because we're completing the nfs_direct_request twice in a row.
+
+The source of this is when we have our commit requests to submit, we
+process them and send them off, and then in the completion path for the
+commit requests we have
+
+if (nfs_commit_end(cinfo.mds))
+ nfs_direct_write_complete(dreq);
+
+However since we're submitting asynchronous requests we sometimes have
+one that completes before we submit the next one, so we end up calling
+complete on the nfs_direct_request twice.
+
+The only other place we use nfs_generic_commit_list() is in
+__nfs_commit_inode, which wraps this call in a
+
+nfs_commit_begin();
+nfs_commit_end();
+
+Which is a common pattern for this style of completion handling, one
+that is also repeated in the direct code with get_dreq()/put_dreq()
+calls around where we process events as well as in the completion paths.
+
+Fix this by using the same pattern for the commit requests.
+
+Before with my 200 node rocksdb stress running this warning would pop
+every 10ish minutes. With my patch the stress test has been running for
+several hours without popping.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/direct.c | 11 +++++++++--
+ fs/nfs/write.c | 2 +-
+ include/linux/nfs_fs.h | 1 +
+ 3 files changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 8fdb65e1b14a3..b555efca01d20 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -647,10 +647,17 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
+ LIST_HEAD(mds_list);
+
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
++ nfs_commit_begin(cinfo.mds);
+ nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
+ res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
+- if (res < 0) /* res == -ENOMEM */
+- nfs_direct_write_reschedule(dreq);
++ if (res < 0) { /* res == -ENOMEM */
++ spin_lock(&dreq->lock);
++ if (dreq->flags == 0)
++ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
++ spin_unlock(&dreq->lock);
++ }
++ if (nfs_commit_end(cinfo.mds))
++ nfs_direct_write_complete(dreq);
+ }
+
+ static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 6a06066684172..8e21caae4cae2 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1656,7 +1656,7 @@ static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
+ !atomic_read(&cinfo->rpcs_out));
+ }
+
+-static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
++void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
+ {
+ atomic_inc(&cinfo->rpcs_out);
+ }
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 7931fa4725612..ac7d799d9d387 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -582,6 +582,7 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
+ extern int nfs_commit_inode(struct inode *, int);
+ extern struct nfs_commit_data *nfs_commitdata_alloc(void);
+ extern void nfs_commit_free(struct nfs_commit_data *data);
++void nfs_commit_begin(struct nfs_mds_commit_info *cinfo);
+ bool nfs_commit_end(struct nfs_mds_commit_info *cinfo);
+
+ static inline bool nfs_have_writebacks(const struct inode *inode)
+--
+2.43.0
+
--- /dev/null
+From 9fa5508828ed29cf022dff57d732b44c5d47dd7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Feb 2024 12:28:28 -0500
+Subject: NFSD: Fix nfsd_clid_class use of __string_len() macro
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+[ Upstream commit 9388a2aa453321bcf1ad2603959debea9e6ab6d4 ]
+
+I'm working on restructuring the __string* macros so that it doesn't need
+to recalculate the string twice. That is, it will save it off when
+processing __string() and the __assign_str() will not need to do the work
+again as it currently does.
+
+Currently __string_len(item, src, len) doesn't actually use "src", but my
+changes will require src to be correct as that is where the __assign_str()
+will get its value from.
+
+The event class nfsd_clid_class has:
+
+ __string_len(name, name, clp->cl_name.len)
+
+But the second "name" does not exist and causes my changes to fail to
+build. That second parameter should be: clp->cl_name.data.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20240222122828.3d8d213c@gandalf.local.home
+
+Cc: Neil Brown <neilb@suse.de>
+Cc: Olga Kornievskaia <kolga@netapp.com>
+Cc: Dai Ngo <Dai.Ngo@oracle.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: stable@vger.kernel.org
+Fixes: d27b74a8675ca ("NFSD: Use new __string_len C macros for nfsd_clid_class")
+Acked-by: Chuck Lever <chuck.lever@oracle.com>
+Acked-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/trace.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index 4183819ea0829..84f26f281fe9f 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -842,7 +842,7 @@ DECLARE_EVENT_CLASS(nfsd_clid_class,
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __field(unsigned long, flavor)
+ __array(unsigned char, verifier, NFS4_VERIFIER_SIZE)
+- __string_len(name, name, clp->cl_name.len)
++ __string_len(name, clp->cl_name.data, clp->cl_name.len)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+--
+2.43.0
+
--- /dev/null
+From 65404314913e86889743dad3c43de087410a8174 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Mar 2024 19:58:26 +0900
+Subject: nilfs2: fix failure to detect DAT corruption in btree and direct
+ mappings
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+[ Upstream commit f2f26b4a84a0ef41791bd2d70861c8eac748f4ba ]
+
+Patch series "nilfs2: fix kernel bug at submit_bh_wbc()".
+
+This resolves a kernel BUG reported by syzbot. Since there are two
+flaws involved, I've made each one a separate patch.
+
+The first patch alone resolves the syzbot-reported bug, but I think
+both fixes should be sent to stable, so I've tagged them as such.
+
+This patch (of 2):
+
+Syzbot has reported a kernel bug in submit_bh_wbc() when writing file data
+to a nilfs2 file system whose metadata is corrupted.
+
+There are two flaws involved in this issue.
+
+The first flaw is that when nilfs_get_block() locates a data block using
+btree or direct mapping, if the disk address translation routine
+nilfs_dat_translate() fails with internal code -ENOENT due to DAT metadata
+corruption, it can be passed back to nilfs_get_block(). This causes
+nilfs_get_block() to misidentify an existing block as non-existent,
+causing both data block lookup and insertion to fail inconsistently.
+
+The second flaw is that nilfs_get_block() returns a successful status in
+this inconsistent state. This causes the caller __block_write_begin_int()
+or others to request a read even though the buffer is not mapped,
+resulting in a BUG_ON check for the BH_Mapped flag in submit_bh_wbc()
+failing.
+
+This fixes the first issue by changing the return value to code -EINVAL
+when a conversion using DAT fails with code -ENOENT, avoiding the
+conflicting condition that leads to the kernel bug described above. Here,
+code -EINVAL indicates that metadata corruption was detected during the
+block lookup, which will be properly handled as a file system error and
+converted to -EIO when passing through the nilfs2 bmap layer.
+
+Link: https://lkml.kernel.org/r/20240313105827.5296-1-konishi.ryusuke@gmail.com
+Link: https://lkml.kernel.org/r/20240313105827.5296-2-konishi.ryusuke@gmail.com
+Fixes: c3a7abf06ce7 ("nilfs2: support contiguous lookup of blocks")
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: syzbot+cfed5b56649bddf80d6e@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=cfed5b56649bddf80d6e
+Tested-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nilfs2/btree.c | 9 +++++++--
+ fs/nilfs2/direct.c | 9 +++++++--
+ 2 files changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index 40ce92a332fe7..146640f0607a3 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -724,7 +724,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
+ dat = nilfs_bmap_get_dat(btree);
+ ret = nilfs_dat_translate(dat, ptr, &blocknr);
+ if (ret < 0)
+- goto out;
++ goto dat_error;
+ ptr = blocknr;
+ }
+ cnt = 1;
+@@ -743,7 +743,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
+ if (dat) {
+ ret = nilfs_dat_translate(dat, ptr2, &blocknr);
+ if (ret < 0)
+- goto out;
++ goto dat_error;
+ ptr2 = blocknr;
+ }
+ if (ptr2 != ptr + cnt || ++cnt == maxblocks)
+@@ -781,6 +781,11 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
+ out:
+ nilfs_btree_free_path(path);
+ return ret;
++
++ dat_error:
++ if (ret == -ENOENT)
++ ret = -EINVAL; /* Notify bmap layer of metadata corruption */
++ goto out;
+ }
+
+ static void nilfs_btree_promote_key(struct nilfs_bmap *btree,
+diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
+index a35f2795b2422..8f802f7b0840b 100644
+--- a/fs/nilfs2/direct.c
++++ b/fs/nilfs2/direct.c
+@@ -66,7 +66,7 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
+ dat = nilfs_bmap_get_dat(direct);
+ ret = nilfs_dat_translate(dat, ptr, &blocknr);
+ if (ret < 0)
+- return ret;
++ goto dat_error;
+ ptr = blocknr;
+ }
+
+@@ -79,7 +79,7 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
+ if (dat) {
+ ret = nilfs_dat_translate(dat, ptr2, &blocknr);
+ if (ret < 0)
+- return ret;
++ goto dat_error;
+ ptr2 = blocknr;
+ }
+ if (ptr2 != ptr + cnt)
+@@ -87,6 +87,11 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
+ }
+ *ptrp = ptr;
+ return cnt;
++
++ dat_error:
++ if (ret == -ENOENT)
++ ret = -EINVAL; /* Notify bmap layer of metadata corruption */
++ return ret;
+ }
+
+ static __u64
+--
+2.43.0
+
--- /dev/null
+From 4f5c71b48bf593024d8abc045702d1503cc7e749 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Mar 2024 19:58:27 +0900
+Subject: nilfs2: prevent kernel bug at submit_bh_wbc()
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+[ Upstream commit 269cdf353b5bdd15f1a079671b0f889113865f20 ]
+
+Fix a bug where nilfs_get_block() returns a successful status when
+searching and inserting the specified block both fail inconsistently. If
+this inconsistent behavior is not due to a previously fixed bug, then an
+unexpected race is occurring, so return a temporary error -EAGAIN instead.
+
+This prevents callers such as __block_write_begin_int() from requesting a
+read into a buffer that is not mapped, which would cause the BUG_ON check
+for the BH_Mapped flag in submit_bh_wbc() to fail.
+
+Link: https://lkml.kernel.org/r/20240313105827.5296-3-konishi.ryusuke@gmail.com
+Fixes: 1f5abe7e7dbc ("nilfs2: replace BUG_ON and BUG calls triggerable from ioctl")
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nilfs2/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index f625872321cca..8eb4288d46fe0 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -112,7 +112,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
+ "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
+ __func__, inode->i_ino,
+ (unsigned long long)blkoff);
+- err = 0;
++ err = -EAGAIN;
+ }
+ nilfs_transaction_abort(inode->i_sb);
+ goto out;
+--
+2.43.0
+
--- /dev/null
+From ec48d2a2320e1a6db9b7b3ba0a08f5bf648f85d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 24 Feb 2024 11:40:23 +0000
+Subject: nvmem: meson-efuse: fix function pointer type mismatch
+
+From: Jerome Brunet <jbrunet@baylibre.com>
+
+[ Upstream commit cbd38332c140829ab752ba4e727f98be5c257f18 ]
+
+clang-16 warns about casting functions to incompatible types, as is done
+here to call clk_disable_unprepare:
+
+drivers/nvmem/meson-efuse.c:78:12: error: cast from 'void (*)(struct clk *)' to 'void (*)(void *)' converts to incompatible function type [-Werror,-Wcast-function-type-strict]
+ 78 | (void(*)(void *))clk_disable_unprepare,
+
+The pattern of getting, enabling and setting a disable callback for a
+clock can be replaced with devm_clk_get_enabled(), which also fixes
+this warning.
+
+Fixes: 611fbca1c861 ("nvmem: meson-efuse: add peripheral clock")
+Cc: Stable@vger.kernel.org
+Reported-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Jerome Brunet <jbrunet@baylibre.com>
+Reviewed-by: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Justin Stitt <justinstitt@google.com>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20240224114023.85535-2-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvmem/meson-efuse.c | 25 +++----------------------
+ 1 file changed, 3 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
+index d6b533497ce1a..ba2714bef8d0e 100644
+--- a/drivers/nvmem/meson-efuse.c
++++ b/drivers/nvmem/meson-efuse.c
+@@ -47,7 +47,6 @@ static int meson_efuse_probe(struct platform_device *pdev)
+ struct nvmem_config *econfig;
+ struct clk *clk;
+ unsigned int size;
+- int ret;
+
+ sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0);
+ if (!sm_np) {
+@@ -60,27 +59,9 @@ static int meson_efuse_probe(struct platform_device *pdev)
+ if (!fw)
+ return -EPROBE_DEFER;
+
+- clk = devm_clk_get(dev, NULL);
+- if (IS_ERR(clk)) {
+- ret = PTR_ERR(clk);
+- if (ret != -EPROBE_DEFER)
+- dev_err(dev, "failed to get efuse gate");
+- return ret;
+- }
+-
+- ret = clk_prepare_enable(clk);
+- if (ret) {
+- dev_err(dev, "failed to enable gate");
+- return ret;
+- }
+-
+- ret = devm_add_action_or_reset(dev,
+- (void(*)(void *))clk_disable_unprepare,
+- clk);
+- if (ret) {
+- dev_err(dev, "failed to add disable callback");
+- return ret;
+- }
++ clk = devm_clk_get_enabled(dev, NULL);
++ if (IS_ERR(clk))
++ return dev_err_probe(dev, PTR_ERR(clk), "failed to get efuse gate");
+
+ if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) {
+ dev_err(dev, "failed to get max user");
+--
+2.43.0
+
--- /dev/null
+From e722dd24a39e27a953c9d4abfbf3307b0be2efe4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Feb 2024 16:40:51 +0100
+Subject: parisc: Avoid clobbering the C/B bits in the PSW with tophys and
+ tovirt macros
+
+From: John David Anglin <dave.anglin@bell.net>
+
+[ Upstream commit 4603fbaa76b5e703b38ac8cc718102834eb6e330 ]
+
+Use add,l to avoid clobbering the C/B bits in the PSW.
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: stable@vger.kernel.org # v5.10+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/parisc/include/asm/assembly.h | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
+index 5937d5edaba1e..000a28e1c5e8d 100644
+--- a/arch/parisc/include/asm/assembly.h
++++ b/arch/parisc/include/asm/assembly.h
+@@ -97,26 +97,28 @@
+ * version takes two arguments: a src and destination register.
+ * However, the source and destination registers can not be
+ * the same register.
++ *
++ * We use add,l to avoid clobbering the C/B bits in the PSW.
+ */
+
+ .macro tophys grvirt, grphys
+- ldil L%(__PAGE_OFFSET), \grphys
+- sub \grvirt, \grphys, \grphys
++ ldil L%(-__PAGE_OFFSET), \grphys
++ addl \grvirt, \grphys, \grphys
+ .endm
+-
++
+ .macro tovirt grphys, grvirt
+ ldil L%(__PAGE_OFFSET), \grvirt
+- add \grphys, \grvirt, \grvirt
++ addl \grphys, \grvirt, \grvirt
+ .endm
+
+ .macro tophys_r1 gr
+- ldil L%(__PAGE_OFFSET), %r1
+- sub \gr, %r1, \gr
++ ldil L%(-__PAGE_OFFSET), %r1
++ addl \gr, %r1, \gr
+ .endm
+-
++
+ .macro tovirt_r1 gr
+ ldil L%(__PAGE_OFFSET), %r1
+- add \gr, %r1, \gr
++ addl \gr, %r1, \gr
+ .endm
+
+ .macro delay value
+--
+2.43.0
+
--- /dev/null
+From e2f622889a362353b08ad4d8b462c3a95d8b9441 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 10 Feb 2024 11:15:56 -0800
+Subject: parisc: Fix csum_ipv6_magic on 32-bit systems
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 4408ba75e4ba80c91fde7e10bccccf388f5c09be ]
+
+Calculating the IPv6 checksum on 32-bit systems missed overflows when
+adding the proto+len fields into the checksum. This results in the
+following unit test failure.
+
+ # test_csum_ipv6_magic: ASSERTION FAILED at lib/checksum_kunit.c:506
+ Expected ( u64)csum_result == ( u64)expected, but
+ ( u64)csum_result == 46722 (0xb682)
+ ( u64)expected == 46721 (0xb681)
+ not ok 5 test_csum_ipv6_magic
+
+This is probably rarely seen in the real world because proto+len are
+usually small values which will rarely result in overflows when calculating
+the checksum. However, the unit test code uses large values for the length
+field, causing the test to fail.
+
+Fix the problem by adding the missing carry into the final checksum.
+
+Cc: Palmer Dabbelt <palmer@rivosinc.com>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Charlie Jenkins <charlie@rivosinc.com>
+Reviewed-by: Charlie Jenkins <charlie@rivosinc.com>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/parisc/include/asm/checksum.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
+index f705e5dd10742..e619e67440db9 100644
+--- a/arch/parisc/include/asm/checksum.h
++++ b/arch/parisc/include/asm/checksum.h
+@@ -163,7 +163,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ " ldw,ma 4(%2), %7\n" /* 4th daddr */
+ " addc %6, %0, %0\n"
+ " addc %7, %0, %0\n"
+-" addc %3, %0, %0\n" /* fold in proto+len, catch carry */
++" addc %3, %0, %0\n" /* fold in proto+len */
++" addc 0, %0, %0\n" /* add carry */
+
+ #endif
+ : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len),
+--
+2.43.0
+
--- /dev/null
+From 68ac893f5bb4e0f332ff18f97c374a5eff26f818 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Feb 2024 15:46:31 -0800
+Subject: parisc: Fix csum_ipv6_magic on 64-bit systems
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 4b75b12d70506e31fc02356bbca60f8d5ca012d0 ]
+
+hppa 64-bit systems calculates the IPv6 checksum using 64-bit add
+operations. The last add folds protocol and length fields into the 64-bit
+result. While unlikely, this operation can overflow. The overflow can be
+triggered with a code sequence such as the following.
+
+ /* try to trigger massive overflows */
+ memset(tmp_buf, 0xff, sizeof(struct in6_addr));
+ csum_result = csum_ipv6_magic((struct in6_addr *)tmp_buf,
+ (struct in6_addr *)tmp_buf,
+ 0xffff, 0xff, 0xffffffff);
+
+Fix the problem by adding any overflows from the final add operation into
+the calculated checksum. Fortunately, we can do this without additional
+cost by replacing the add operation used to fold the checksum into 32 bit
+with "add,dc" to add in the missing carry.
+
+Cc: Palmer Dabbelt <palmer@rivosinc.com>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Charlie Jenkins <charlie@rivosinc.com>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/parisc/include/asm/checksum.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
+index e619e67440db9..c949aa20fa162 100644
+--- a/arch/parisc/include/asm/checksum.h
++++ b/arch/parisc/include/asm/checksum.h
+@@ -137,8 +137,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ " add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */
+ " extrd,u %0, 31, 32, %4\n"/* copy upper half down */
+ " depdi 0, 31, 32, %0\n"/* clear upper half */
+-" add %4, %0, %0\n" /* fold into 32-bits */
+-" addc 0, %0, %0\n" /* add carry */
++" add,dc %4, %0, %0\n" /* fold into 32-bits, plus carry */
++" addc 0, %0, %0\n" /* add final carry */
+
+ #else
+
+--
+2.43.0
+
--- /dev/null
+From d04c5ec7d9a57f15e83d9f219ed8cffd4efe07fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 10 Feb 2024 09:55:26 -0800
+Subject: parisc: Fix ip_fast_csum
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit a2abae8f0b638c31bb9799d9dd847306e0d005bd ]
+
+IP checksum unit tests report the following error when run on hppa/hppa64.
+
+ # test_ip_fast_csum: ASSERTION FAILED at lib/checksum_kunit.c:463
+ Expected ( u64)csum_result == ( u64)expected, but
+ ( u64)csum_result == 33754 (0x83da)
+ ( u64)expected == 10946 (0x2ac2)
+ not ok 4 test_ip_fast_csum
+
+0x83da is the expected result if the IP header length is 20 bytes. 0x2ac2
+is the expected result if the IP header length is 24 bytes. The test fails
+with an IP header length of 24 bytes. It appears that ip_fast_csum()
+always returns the checksum for a 20-byte header, no matter how long
+the header actually is.
+
+Code analysis shows a suspicious assembler sequence in ip_fast_csum().
+
+ " addc %0, %3, %0\n"
+ "1: ldws,ma 4(%1), %3\n"
+ " addib,< 0, %2, 1b\n" <---
+
+While my understanding of HPPA assembler is limited, it does not seem
+to make much sense to subtract 0 from a register and to expect the result
+to ever be negative. Subtracting 1 from the length parameter makes more
+sense. On top of that, the operation should be repeated if and only if
+the result is still > 0, so change the suspicious instruction to
+ " addib,> -1, %2, 1b\n"
+
+The IP checksum unit test passes after this change.
+
+Cc: Palmer Dabbelt <palmer@rivosinc.com>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Charlie Jenkins <charlie@rivosinc.com>
+Reviewed-by: Charlie Jenkins <charlie@rivosinc.com>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/parisc/include/asm/checksum.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
+index 3c43baca7b397..f705e5dd10742 100644
+--- a/arch/parisc/include/asm/checksum.h
++++ b/arch/parisc/include/asm/checksum.h
+@@ -40,7 +40,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+ " addc %0, %5, %0\n"
+ " addc %0, %3, %0\n"
+ "1: ldws,ma 4(%1), %3\n"
+-" addib,< 0, %2, 1b\n"
++" addib,> -1, %2, 1b\n"
+ " addc %0, %3, %0\n"
+ "\n"
+ " extru %0, 31, 16, %4\n"
+--
+2.43.0
+
--- /dev/null
+From cc5ab73007b30d63c39ac4af55431f23cf0803ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Feb 2024 12:33:51 -0800
+Subject: parisc: Strip upper 32 bit of sum in csum_ipv6_magic for 64-bit
+ builds
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 0568b6f0d863643db2edcc7be31165740c89fa82 ]
+
+IPv6 checksum tests with unaligned addresses on 64-bit builds result
+in unexpected failures.
+
+Expected expected == csum_result, but
+ expected == 46591 (0xb5ff)
+ csum_result == 46381 (0xb52d)
+with alignment offset 1
+
+Oddly enough, the problem disappeared after adding test code into
+the beginning of csum_ipv6_magic().
+
+As it turns out, the 'sum' parameter of csum_ipv6_magic() is declared as
+__wsum, which is a 32-bit variable. However, it is treated as 64-bit
+variable in the 64-bit assembler code. Tests showed that the upper 32 bit
+of the register used to pass the variable are _not_ cleared when entering
+the function. This can result in checksum calculation errors.
+
+Clearing the upper 32 bit of 'sum' as first operation in the assembler
+code fixes the problem.
+
+Acked-by: Helge Deller <deller@gmx.de>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/parisc/include/asm/checksum.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
+index c949aa20fa162..2aceebcd695c8 100644
+--- a/arch/parisc/include/asm/checksum.h
++++ b/arch/parisc/include/asm/checksum.h
+@@ -126,6 +126,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ ** Try to keep 4 registers with "live" values ahead of the ALU.
+ */
+
++" depdi 0, 31, 32, %0\n"/* clear upper half of incoming checksum */
+ " ldd,ma 8(%1), %4\n" /* get 1st saddr word */
+ " ldd,ma 8(%2), %5\n" /* get 1st daddr word */
+ " add %4, %0, %0\n"
+--
+2.43.0
+
--- /dev/null
+From d7557f21240e5d9fcc95e121dc6ede74e574bac0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Feb 2024 14:26:55 +0100
+Subject: parisc/unaligned: Rewrite 64-bit inline assembly of emulate_ldd()
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit e5db6a74571a8baf87a116ea39aab946283362ff ]
+
+Convert to use real temp variables instead of clobbering processor
+registers. This aligns the 64-bit inline assembly code with the 32-bit
+assembly code which was rewritten with commit 427c1073a2a1
+("parisc/unaligned: Rewrite 32-bit inline assembly of emulate_ldd()").
+
+While at it, fix comment in 32-bit rewrite code. Temporary variables are
+now used for both 32-bit and 64-bit code, so move their declarations
+to the function header.
+
+No functional change intended.
+
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Cc: stable@vger.kernel.org # v6.0+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/parisc/kernel/unaligned.c | 27 ++++++++++++---------------
+ 1 file changed, 12 insertions(+), 15 deletions(-)
+
+diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
+index 8a8e7d7224a26..782ee05e20889 100644
+--- a/arch/parisc/kernel/unaligned.c
++++ b/arch/parisc/kernel/unaligned.c
+@@ -167,6 +167,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
+ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+ {
+ unsigned long saddr = regs->ior;
++ unsigned long shift, temp1;
+ __u64 val = 0;
+ ASM_EXCEPTIONTABLE_VAR(ret);
+
+@@ -178,25 +179,22 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+
+ #ifdef CONFIG_64BIT
+ __asm__ __volatile__ (
+-" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */
+-" mtsp %4, %%sr1\n"
+-" depd %%r0,63,3,%3\n"
+-"1: ldd 0(%%sr1,%3),%0\n"
+-"2: ldd 8(%%sr1,%3),%%r20\n"
+-" subi 64,%%r19,%%r19\n"
+-" mtsar %%r19\n"
+-" shrpd %0,%%r20,%%sar,%0\n"
++" depd,z %2,60,3,%3\n" /* shift=(ofs&7)*8 */
++" mtsp %5, %%sr1\n"
++" depd %%r0,63,3,%2\n"
++"1: ldd 0(%%sr1,%2),%0\n"
++"2: ldd 8(%%sr1,%2),%4\n"
++" subi 64,%3,%3\n"
++" mtsar %3\n"
++" shrpd %0,%4,%%sar,%0\n"
+ "3: \n"
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
+- : "=r" (val), "+r" (ret)
+- : "0" (val), "r" (saddr), "r" (regs->isr)
+- : "r19", "r20" );
++ : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
++ : "r" (regs->isr) );
+ #else
+- {
+- unsigned long shift, temp1;
+ __asm__ __volatile__ (
+-" zdep %2,29,2,%3\n" /* r19=(ofs&3)*8 */
++" zdep %2,29,2,%3\n" /* shift=(ofs&3)*8 */
+ " mtsp %5, %%sr1\n"
+ " dep %%r0,31,2,%2\n"
+ "1: ldw 0(%%sr1,%2),%0\n"
+@@ -212,7 +210,6 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1")
+ : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
+ : "r" (regs->isr) );
+- }
+ #endif
+
+ DPRINTF("val = 0x%llx\n", val);
+--
+2.43.0
+
--- /dev/null
+From 690edd506a6f0ea342dce64b40b402276c624faa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Feb 2024 13:01:35 +0100
+Subject: PCI/AER: Block runtime suspend when handling errors
+
+From: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+
+[ Upstream commit 002bf2fbc00e5c4b95fb167287e2ae7d1973281e ]
+
+PM runtime can be done simultaneously with AER error handling. Avoid that
+by using pm_runtime_get_sync() before and pm_runtime_put() after reset in
+pcie_do_recovery() for all recovering devices.
+
+pm_runtime_get_sync() will increase dev->power.usage_count counter to
+prevent any possible future request to runtime suspend a device. It will
+also resume a device, if it was previously in D3hot state.
+
+I tested with igc device by doing simultaneous aer_inject and rpm
+suspend/resume via /sys/bus/pci/devices/PCI_ID/power/control and can
+reproduce:
+
+ igc 0000:02:00.0: not ready 65535ms after bus reset; giving up
+ pcieport 0000:00:1c.2: AER: Root Port link has been reset (-25)
+ pcieport 0000:00:1c.2: AER: subordinate device reset failed
+ pcieport 0000:00:1c.2: AER: device recovery failed
+ igc 0000:02:00.0: Unable to change power state from D3hot to D0, device inaccessible
+
+The problem disappears when this patch is applied.
+
+Link: https://lore.kernel.org/r/20240212120135.146068-1-stanislaw.gruszka@linux.intel.com
+Signed-off-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
+Acked-by: Rafael J. Wysocki <rafael@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/pcie/err.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
+index 59c90d04a609a..705893b5f7b09 100644
+--- a/drivers/pci/pcie/err.c
++++ b/drivers/pci/pcie/err.c
+@@ -13,6 +13,7 @@
+ #define dev_fmt(fmt) "AER: " fmt
+
+ #include <linux/pci.h>
++#include <linux/pm_runtime.h>
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+@@ -85,6 +86,18 @@ static int report_error_detected(struct pci_dev *dev,
+ return 0;
+ }
+
++static int pci_pm_runtime_get_sync(struct pci_dev *pdev, void *data)
++{
++ pm_runtime_get_sync(&pdev->dev);
++ return 0;
++}
++
++static int pci_pm_runtime_put(struct pci_dev *pdev, void *data)
++{
++ pm_runtime_put(&pdev->dev);
++ return 0;
++}
++
+ static int report_frozen_detected(struct pci_dev *dev, void *data)
+ {
+ return report_error_detected(dev, pci_channel_io_frozen, data);
+@@ -207,6 +220,8 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ else
+ bridge = pci_upstream_bridge(dev);
+
++ pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL);
++
+ pci_dbg(bridge, "broadcast error_detected message\n");
+ if (state == pci_channel_io_frozen) {
+ pci_walk_bridge(bridge, report_frozen_detected, &status);
+@@ -251,10 +266,15 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ pcie_clear_device_status(dev);
+ pci_aer_clear_nonfatal_status(dev);
+ }
++
++ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
++
+ pci_info(bridge, "device recovery successful\n");
+ return status;
+
+ failed:
++ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
++
+ pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
+
+ /* TODO: Should kernel panic here? */
+--
+2.43.0
+
--- /dev/null
+From f31b69afd6d5098a13683842e6b72dcd5e64e098 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Mar 2024 12:30:56 +0100
+Subject: PCI/DPC: Quirk PIO log size for Intel Raptor Lake Root Ports
+
+From: Paul Menzel <pmenzel@molgen.mpg.de>
+
+[ Upstream commit 627c6db20703b5d18d928464f411d0d4ec327508 ]
+
+Commit 5459c0b70467 ("PCI/DPC: Quirk PIO log size for certain Intel Root
+Ports") and commit 3b8803494a06 ("PCI/DPC: Quirk PIO log size for Intel Ice
+Lake Root Ports") add quirks for Ice, Tiger and Alder Lake Root Ports.
+System firmware for Raptor Lake still has the bug, so Linux logs the
+warning below on several Raptor Lake systems like Dell Precision 3581 with
+Intel Raptor Lake processor (0W18NX) system firmware/BIOS version 1.10.1.
+
+ pci 0000:00:07.0: [8086:a76e] type 01 class 0x060400
+ pci 0000:00:07.0: DPC: RP PIO log size 0 is invalid
+ pci 0000:00:07.1: [8086:a73f] type 01 class 0x060400
+ pci 0000:00:07.1: DPC: RP PIO log size 0 is invalid
+
+Apply the quirk for Raptor Lake Root Ports as well.
+
+This also enables the DPC driver to dump the RP PIO Log registers when DPC
+is triggered.
+
+Link: https://lore.kernel.org/r/20240305113057.56468-1-pmenzel@molgen.mpg.de
+Reported-by: Niels van Aert <nvaert1986@hotmail.com>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218560
+Signed-off-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Cc: <stable@vger.kernel.org>
+Cc: Mika Westerberg <mika.westerberg@linux.intel.com>
+Cc: Niels van Aert <nvaert1986@hotmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/quirks.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index c175b70a984c6..289ba6902e41b 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -6078,6 +6078,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
+ #endif
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From bc46d89aec83aa2671383d9e3bea8c2f81a8a607 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Mar 2024 12:15:20 +0100
+Subject: PCI: dwc: endpoint: Fix advertised resizable BAR size
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Niklas Cassel <cassel@kernel.org>
+
+[ Upstream commit 72e34b8593e08a0ee759b7a038e0b178418ea6f8 ]
+
+The commit message in commit fc9a77040b04 ("PCI: designware-ep: Configure
+Resizable BAR cap to advertise the smallest size") claims that it modifies
+the Resizable BAR capability to only advertise support for 1 MB size BARs.
+
+However, the commit writes all zeroes to PCI_REBAR_CAP (the register which
+contains the possible BAR sizes that a BAR be resized to).
+
+According to the spec, it is illegal to not have a bit set in
+PCI_REBAR_CAP, and 1 MB is the smallest size allowed.
+
+Set bit 4 in PCI_REBAR_CAP, so that we actually advertise support for a
+1 MB BAR size.
+
+Before:
+ Capabilities: [2e8 v1] Physical Resizable BAR
+ BAR 0: current size: 1MB
+ BAR 1: current size: 1MB
+ BAR 2: current size: 1MB
+ BAR 3: current size: 1MB
+ BAR 4: current size: 1MB
+ BAR 5: current size: 1MB
+After:
+ Capabilities: [2e8 v1] Physical Resizable BAR
+ BAR 0: current size: 1MB, supported: 1MB
+ BAR 1: current size: 1MB, supported: 1MB
+ BAR 2: current size: 1MB, supported: 1MB
+ BAR 3: current size: 1MB, supported: 1MB
+ BAR 4: current size: 1MB, supported: 1MB
+ BAR 5: current size: 1MB, supported: 1MB
+
+Fixes: fc9a77040b04 ("PCI: designware-ep: Configure Resizable BAR cap to advertise the smallest size")
+Link: https://lore.kernel.org/linux-pci/20240307111520.3303774-1-cassel@kernel.org
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Cc: <stable@vger.kernel.org> # 5.2
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-designware-ep.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 4086a7818981a..506d6d061d4cd 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -669,8 +669,13 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
++ /*
++ * PCIe r6.0, sec 7.8.6.2 require us to support at least one
++ * size in the range from 1 MB to 512 GB. Advertise support
++ * for 1 MB BAR size only.
++ */
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
++ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
+ }
+
+ dw_pcie_setup(pci);
+--
+2.43.0
+
--- /dev/null
+From 17fd243b322e02b526d8a25f069eefb3b20a8a0e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Feb 2024 12:22:40 -0800
+Subject: PCI: hv: Fix ring buffer size calculation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michael Kelley <mhklinux@outlook.com>
+
+[ Upstream commit b5ff74c1ef50fe08e384026875fec660fadfaedd ]
+
+For a physical PCI device that is passed through to a Hyper-V guest VM,
+current code specifies the VMBus ring buffer size as 4 pages. But this
+is an inappropriate dependency, since the amount of ring buffer space
+needed is unrelated to PAGE_SIZE. For example, on x86 the ring buffer
+size ends up as 16 Kbytes, while on ARM64 with 64 Kbyte pages, the ring
+size bloats to 256 Kbytes. The ring buffer for PCI pass-thru devices
+is used for only a few messages during device setup and removal, so any
+space above a few Kbytes is wasted.
+
+Fix this by declaring the ring buffer size to be a fixed 16 Kbytes.
+Furthermore, use the VMBUS_RING_SIZE() macro so that the ring buffer
+header is properly accounted for, and so the size is rounded up to a
+page boundary, using the page size for which the kernel is built. While
+w/64 Kbyte pages this results in a 64 Kbyte ring buffer header plus a
+64 Kbyte ring buffer, that's the smallest possible with that page size.
+It's still 128 Kbytes better than the current code.
+
+Link: https://lore.kernel.org/linux-pci/20240216202240.251818-1-mhklinux@outlook.com
+Signed-off-by: Michael Kelley <mhklinux@outlook.com>
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Reviewed-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
+Reviewed-by: Ilpo Jarvinen <ilpo.jarvinen@linux.intel.com>
+Reviewed-by: Long Li <longli@microsoft.com>
+Cc: <stable@vger.kernel.org> # 5.15.x
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/pci-hyperv.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index 9693bab59bf7c..b36cbc9136ae1 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -49,6 +49,7 @@
+ #include <linux/refcount.h>
+ #include <linux/irqdomain.h>
+ #include <linux/acpi.h>
++#include <linux/sizes.h>
+ #include <asm/mshyperv.h>
+
+ /*
+@@ -465,7 +466,7 @@ struct pci_eject_response {
+ u32 status;
+ } __packed;
+
+-static int pci_ring_size = (4 * PAGE_SIZE);
++static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
+
+ /*
+ * Driver specific state.
+--
+2.43.0
+
--- /dev/null
+From e2b3f73580cfb9f772f8a6c27132af9da369ff01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Mar 2024 11:45:38 +0100
+Subject: PCI/PM: Drain runtime-idle callbacks before driver removal
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 9d5286d4e7f68beab450deddbb6a32edd5ecf4bf ]
+
+A race condition between the .runtime_idle() callback and the .remove()
+callback in the rtsx_pcr PCI driver leads to a kernel crash due to an
+unhandled page fault [1].
+
+The problem is that rtsx_pci_runtime_idle() is not expected to be running
+after pm_runtime_get_sync() has been called, but the latter doesn't really
+guarantee that. It only guarantees that the suspend and resume callbacks
+will not be running when it returns.
+
+However, if a .runtime_idle() callback is already running when
+pm_runtime_get_sync() is called, the latter will notice that the runtime PM
+status of the device is RPM_ACTIVE and it will return right away without
+waiting for the former to complete. In fact, it cannot wait for
+.runtime_idle() to complete because it may be called from that callback (it
+arguably does not make much sense to do that, but it is not strictly
+prohibited).
+
+Thus in general, whoever is providing a .runtime_idle() callback needs
+to protect it from running in parallel with whatever code runs after
+pm_runtime_get_sync(). [Note that .runtime_idle() will not start after
+pm_runtime_get_sync() has returned, but it may continue running then if it
+has started earlier.]
+
+One way to address that race condition is to call pm_runtime_barrier()
+after pm_runtime_get_sync() (not before it, because a nonzero value of the
+runtime PM usage counter is necessary to prevent runtime PM callbacks from
+being invoked) to wait for the .runtime_idle() callback to complete should
+it be running at that point. A suitable place for doing that is in
+pci_device_remove() which calls pm_runtime_get_sync() before removing the
+driver, so it may as well call pm_runtime_barrier() subsequently, which
+will prevent the race in question from occurring, not just in the rtsx_pcr
+driver, but in any PCI drivers providing .runtime_idle() callbacks.
+
+Link: https://lore.kernel.org/lkml/20240229062201.49500-1-kai.heng.feng@canonical.com/ # [1]
+Link: https://lore.kernel.org/r/5761426.DvuYhMxLoT@kreacher
+Reported-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Tested-by: Ricky Wu <ricky_wu@realtek.com>
+Acked-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/pci-driver.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index f47a3b10bf504..8dda3b205dfd0 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -473,6 +473,13 @@ static void pci_device_remove(struct device *dev)
+
+ if (drv->remove) {
+ pm_runtime_get_sync(dev);
++ /*
++ * If the driver provides a .runtime_idle() callback and it has
++ * started to run already, it may continue to run in parallel
++ * with the code below, so wait until all of the runtime PM
++ * activity has completed.
++ */
++ pm_runtime_barrier(dev);
+ drv->remove(pci_dev);
+ pm_runtime_put_noidle(dev);
+ }
+--
+2.43.0
+
--- /dev/null
+From d35f50d901ddfb88ef34bb2107bced64ba786346 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Mar 2024 16:35:15 +0530
+Subject: PCI: qcom: Enable BDF to SID translation properly
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+[ Upstream commit bf79e33cdd89db498e00a6131e937259de5f2705 ]
+
+Qcom SoCs making use of ARM SMMU require BDF to SID translation table in
+the driver to properly map the SID for the PCIe devices based on their BDF
+identifier. This is currently achieved with the help of
+qcom_pcie_config_sid_1_9_0() function for SoCs supporting the 1_9_0 config.
+
+But With newer Qcom SoCs starting from SM8450, BDF to SID translation is
+set to bypass mode by default in hardware. Due to this, the translation
+table that is set in the qcom_pcie_config_sid_1_9_0() is essentially
+unused and the default SID is used for all endpoints in SoCs starting from
+SM8450.
+
+This is a security concern and also warrants swapping the DeviceID in DT
+while using the GIC ITS to handle MSIs from endpoints. The swapping is
+currently done like below in DT when using GIC ITS:
+
+ /*
+ * MSIs for BDF (1:0.0) only works with Device ID 0x5980.
+ * Hence, the IDs are swapped.
+ */
+ msi-map = <0x0 &gic_its 0x5981 0x1>,
+ <0x100 &gic_its 0x5980 0x1>;
+
+Here, swapping of the DeviceIDs ensure that the endpoint with BDF (1:0.0)
+gets the DeviceID 0x5980 which is associated with the default SID as per
+the iommu mapping in DT. So MSIs were delivered with IDs swapped so far.
+But this also means the Root Port (0:0.0) won't receive any MSIs (for PME,
+AER etc...)
+
+So let's fix these issues by clearing the BDF to SID bypass mode for all
+SoCs making use of the 1_9_0 config. This allows the PCIe devices to use
+the correct SID, thus avoiding the DeviceID swapping hack in DT and also
+achieving the isolation between devices.
+
+Fixes: 4c9398822106 ("PCI: qcom: Add support for configuring BDF to SID mapping for SM8250")
+Link: https://lore.kernel.org/linux-pci/20240307-pci-bdf-sid-fix-v1-1-9423a7e2d63c@linaro.org
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Cc: stable@vger.kernel.org # 5.11
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-qcom.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 9202d2395b507..0bad23ec53ee8 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -50,6 +50,7 @@
+ #define PARF_SLV_ADDR_SPACE_SIZE 0x358
+ #define PARF_DEVICE_TYPE 0x1000
+ #define PARF_BDF_TO_SID_TABLE_N 0x2000
++#define PARF_BDF_TO_SID_CFG 0x2c00
+
+ /* ELBI registers */
+ #define ELBI_SYS_CTRL 0x04
+@@ -102,6 +103,9 @@
+ /* PARF_DEVICE_TYPE register fields */
+ #define DEVICE_TYPE_RC 0x4
+
++/* PARF_BDF_TO_SID_CFG fields */
++#define BDF_TO_SID_BYPASS BIT(0)
++
+ /* ELBI_SYS_CTRL register fields */
+ #define ELBI_SYS_CTRL_LT_ENABLE BIT(0)
+
+@@ -1326,11 +1330,17 @@ static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
+ u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
+ int i, nr_map, size = 0;
+ u32 smmu_sid_base;
++ u32 val;
+
+ of_get_property(dev->of_node, "iommu-map", &size);
+ if (!size)
+ return 0;
+
++ /* Enable BDF to SID translation by disabling bypass mode (default) */
++ val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
++ val &= ~BDF_TO_SID_BYPASS;
++ writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
++
+ map = kzalloc(size, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+--
+2.43.0
+
--- /dev/null
+From bf1c66ec280ab5fc64e80d1868a8450f337722c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Mar 2023 13:41:12 +0530
+Subject: PCI: qcom: Rename qcom_pcie_config_sid_sm8250() to reflect IP version
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+[ Upstream commit 1f70939871b260b52e9d1941f1cad740b7295c2c ]
+
+qcom_pcie_config_sid_sm8250() function no longer applies only to SM8250.
+So let's rename it to reflect the actual IP version and also move its
+definition to keep it sorted as per IP revisions.
+
+Link: https://lore.kernel.org/r/20230316081117.14288-15-manivannan.sadhasivam@linaro.org
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Lorenzo Pieralisi <lpieralisi@kernel.org>
+Stable-dep-of: bf79e33cdd89 ("PCI: qcom: Enable BDF to SID translation properly")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-qcom.c | 143 ++++++++++++-------------
+ 1 file changed, 71 insertions(+), 72 deletions(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 0ccd92faf078a..9202d2395b507 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1312,6 +1312,76 @@ static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ }
+
++static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
++{
++ /* iommu map structure */
++ struct {
++ u32 bdf;
++ u32 phandle;
++ u32 smmu_sid;
++ u32 smmu_sid_len;
++ } *map;
++ void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
++ struct device *dev = pcie->pci->dev;
++ u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
++ int i, nr_map, size = 0;
++ u32 smmu_sid_base;
++
++ of_get_property(dev->of_node, "iommu-map", &size);
++ if (!size)
++ return 0;
++
++ map = kzalloc(size, GFP_KERNEL);
++ if (!map)
++ return -ENOMEM;
++
++ of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
++ size / sizeof(u32));
++
++ nr_map = size / (sizeof(*map));
++
++ crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
++
++ /* Registers need to be zero out first */
++ memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
++
++ /* Extract the SMMU SID base from the first entry of iommu-map */
++ smmu_sid_base = map[0].smmu_sid;
++
++ /* Look for an available entry to hold the mapping */
++ for (i = 0; i < nr_map; i++) {
++ __be16 bdf_be = cpu_to_be16(map[i].bdf);
++ u32 val;
++ u8 hash;
++
++ hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0);
++
++ val = readl(bdf_to_sid_base + hash * sizeof(u32));
++
++ /* If the register is already populated, look for next available entry */
++ while (val) {
++ u8 current_hash = hash++;
++ u8 next_mask = 0xff;
++
++ /* If NEXT field is NULL then update it with next hash */
++ if (!(val & next_mask)) {
++ val |= (u32)hash;
++ writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
++ }
++
++ val = readl(bdf_to_sid_base + hash * sizeof(u32));
++ }
++
++ /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
++ val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
++ writel(val, bdf_to_sid_base + hash * sizeof(u32));
++ }
++
++ kfree(map);
++
++ return 0;
++}
++
+ static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
+ {
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+@@ -1429,77 +1499,6 @@ static int qcom_pcie_link_up(struct dw_pcie *pci)
+ return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ }
+
+-static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
+-{
+- /* iommu map structure */
+- struct {
+- u32 bdf;
+- u32 phandle;
+- u32 smmu_sid;
+- u32 smmu_sid_len;
+- } *map;
+- void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
+- struct device *dev = pcie->pci->dev;
+- u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
+- int i, nr_map, size = 0;
+- u32 smmu_sid_base;
+-
+- of_get_property(dev->of_node, "iommu-map", &size);
+- if (!size)
+- return 0;
+-
+- map = kzalloc(size, GFP_KERNEL);
+- if (!map)
+- return -ENOMEM;
+-
+- of_property_read_u32_array(dev->of_node,
+- "iommu-map", (u32 *)map, size / sizeof(u32));
+-
+- nr_map = size / (sizeof(*map));
+-
+- crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
+-
+- /* Registers need to be zero out first */
+- memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
+-
+- /* Extract the SMMU SID base from the first entry of iommu-map */
+- smmu_sid_base = map[0].smmu_sid;
+-
+- /* Look for an available entry to hold the mapping */
+- for (i = 0; i < nr_map; i++) {
+- __be16 bdf_be = cpu_to_be16(map[i].bdf);
+- u32 val;
+- u8 hash;
+-
+- hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be),
+- 0);
+-
+- val = readl(bdf_to_sid_base + hash * sizeof(u32));
+-
+- /* If the register is already populated, look for next available entry */
+- while (val) {
+- u8 current_hash = hash++;
+- u8 next_mask = 0xff;
+-
+- /* If NEXT field is NULL then update it with next hash */
+- if (!(val & next_mask)) {
+- val |= (u32)hash;
+- writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
+- }
+-
+- val = readl(bdf_to_sid_base + hash * sizeof(u32));
+- }
+-
+- /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
+- val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
+- writel(val, bdf_to_sid_base + hash * sizeof(u32));
+- }
+-
+- kfree(map);
+-
+- return 0;
+-}
+-
+ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
+ {
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+@@ -1616,7 +1615,7 @@ static const struct qcom_pcie_ops ops_1_9_0 = {
+ .init = qcom_pcie_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+- .config_sid = qcom_pcie_config_sid_sm8250,
++ .config_sid = qcom_pcie_config_sid_1_9_0,
+ };
+
+ /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
+--
+2.43.0
+
--- /dev/null
+From 1cdff27133f0e2153edc4a20eff364e1d6e833c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Jan 2024 10:00:20 +0100
+Subject: pci_iounmap(): Fix MMIO mapping leak
+
+From: Philipp Stanner <pstanner@redhat.com>
+
+[ Upstream commit 7626913652cc786c238e2dd7d8740b17d41b2637 ]
+
+The #ifdef ARCH_HAS_GENERIC_IOPORT_MAP accidentally also guards iounmap(),
+which means MMIO mappings are leaked.
+
+Move the guard so we call iounmap() for MMIO mappings.
+
+Fixes: 316e8d79a095 ("pci_iounmap'2: Electric Boogaloo: try to make sense of it all")
+Link: https://lore.kernel.org/r/20240131090023.12331-2-pstanner@redhat.com
+Reported-by: Danilo Krummrich <dakr@redhat.com>
+Suggested-by: Arnd Bergmann <arnd@kernel.org>
+Signed-off-by: Philipp Stanner <pstanner@redhat.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Cc: <stable@vger.kernel.org> # v5.15+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/pci_iomap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
+index ce39ce9f3526e..2829ddb0e316b 100644
+--- a/lib/pci_iomap.c
++++ b/lib/pci_iomap.c
+@@ -170,8 +170,8 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+- iounmap(p);
+ #endif
++ iounmap(p);
+ }
+ EXPORT_SYMBOL(pci_iounmap);
+
+--
+2.43.0
+
--- /dev/null
+From e8d21b4c757f51751cf49e4a498b9cf8ab35430f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Mar 2024 11:03:27 +0800
+Subject: phy: tegra: xusb: Add API to retrieve the port number of phy
+
+From: Wayne Chang <waynec@nvidia.com>
+
+[ Upstream commit d843f031d9e90462253015bc0bd9e3852d206bf2 ]
+
+This patch introduces a new API, tegra_xusb_padctl_get_port_number,
+to the Tegra XUSB Pad Controller driver. This API is used to identify
+the USB port that is associated with a given PHY.
+
+The function takes a PHY pointer for either a USB2 PHY or USB3 PHY as input
+and returns the corresponding port number. If the PHY pointer is invalid,
+it returns -ENODEV.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Wayne Chang <waynec@nvidia.com>
+Reviewed-by: Jon Hunter <jonathanh@nvidia.com>
+Tested-by: Jon Hunter <jonathanh@nvidia.com>
+Link: https://lore.kernel.org/r/20240307030328.1487748-2-waynec@nvidia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/tegra/xusb.c | 13 +++++++++++++
+ include/linux/phy/tegra/xusb.h | 1 +
+ 2 files changed, 14 insertions(+)
+
+diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
+index 4d5b4071d47d5..dc22b1dd2c8ba 100644
+--- a/drivers/phy/tegra/xusb.c
++++ b/drivers/phy/tegra/xusb.c
+@@ -1518,6 +1518,19 @@ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
+ }
+ EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion);
+
++int tegra_xusb_padctl_get_port_number(struct phy *phy)
++{
++ struct tegra_xusb_lane *lane;
++
++ if (!phy)
++ return -ENODEV;
++
++ lane = phy_get_drvdata(phy);
++
++ return lane->index;
++}
++EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_port_number);
++
+ MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+ MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
+index 70998e6dd6fdc..6ca51e0080ec0 100644
+--- a/include/linux/phy/tegra/xusb.h
++++ b/include/linux/phy/tegra/xusb.h
+@@ -26,6 +26,7 @@ void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy);
+ int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
+ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
+ unsigned int port);
++int tegra_xusb_padctl_get_port_number(struct phy *phy);
+ int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy,
+ enum usb_device_speed speed);
+ int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy);
+--
+2.43.0
+
--- /dev/null
+From 03366c7205d287212659f4b5e049885e2a2d1ee0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Mar 2024 17:26:57 +0800
+Subject: PM: sleep: wakeirq: fix wake irq warning in system suspend
+
+From: Qingliang Li <qingliang.li@mediatek.com>
+
+[ Upstream commit e7a7681c859643f3f2476b2a28a494877fd89442 ]
+
+When driver uses pm_runtime_force_suspend() as the system suspend callback
+function and registers the wake irq with reverse enable ordering, the wake
+irq will be re-enabled when entering system suspend, triggering an
+'Unbalanced enable for IRQ xxx' warning. In this scenario, the call
+sequence during system suspend is as follows:
+ suspend_devices_and_enter()
+ -> dpm_suspend_start()
+ -> dpm_run_callback()
+ -> pm_runtime_force_suspend()
+ -> dev_pm_enable_wake_irq_check()
+ -> dev_pm_enable_wake_irq_complete()
+
+ -> suspend_enter()
+ -> dpm_suspend_noirq()
+ -> device_wakeup_arm_wake_irqs()
+ -> dev_pm_arm_wake_irq()
+
+To fix this issue, complete the setting of WAKE_IRQ_DEDICATED_ENABLED flag
+in dev_pm_enable_wake_irq_complete() to avoid redundant irq enablement.
+
+Fixes: 8527beb12087 ("PM: sleep: wakeirq: fix wake irq arming")
+Reviewed-by: Dhruva Gole <d-gole@ti.com>
+Signed-off-by: Qingliang Li <qingliang.li@mediatek.com>
+Reviewed-by: Johan Hovold <johan+linaro@kernel.org>
+Cc: 5.16+ <stable@vger.kernel.org> # 5.16+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/power/wakeirq.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
+index afd094dec5ca3..ca0c092ba47fb 100644
+--- a/drivers/base/power/wakeirq.c
++++ b/drivers/base/power/wakeirq.c
+@@ -362,8 +362,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev)
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
+- wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
++ wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
+ enable_irq(wirq->irq);
++ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
++ }
+ }
+
+ /**
+--
+2.43.0
+
--- /dev/null
+From 88745b6bbfc7e4c7f1398a58e2160bc03161ed5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Feb 2024 12:14:59 +0530
+Subject: PM: suspend: Set mem_sleep_current during kernel command line setup
+
+From: Maulik Shah <quic_mkshah@quicinc.com>
+
+[ Upstream commit 9bc4ffd32ef8943f5c5a42c9637cfd04771d021b ]
+
+psci_init_system_suspend() invokes suspend_set_ops() very early during
+bootup even before kernel command line for mem_sleep_default is setup.
+This leads to kernel command line mem_sleep_default=s2idle not working
+as mem_sleep_current gets changed to deep via suspend_set_ops() and never
+changes back to s2idle.
+
+Set mem_sleep_current along with mem_sleep_default during kernel command
+line setup as default suspend mode.
+
+Fixes: faf7ec4a92c0 ("drivers: firmware: psci: add system suspend support")
+CC: stable@vger.kernel.org # 5.4+
+Signed-off-by: Maulik Shah <quic_mkshah@quicinc.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/power/suspend.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index fa3bf161d13f7..a718067deecee 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -192,6 +192,7 @@ static int __init mem_sleep_default_setup(char *str)
+ if (mem_sleep_labels[state] &&
+ !strcmp(str, mem_sleep_labels[state])) {
+ mem_sleep_default = state;
++ mem_sleep_current = state;
+ break;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 857af9fd2a1d2048cd84473b6dd3ad087c101464 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Feb 2024 23:25:19 +1100
+Subject: powerpc/fsl: Fix mfpmr build errors with newer binutils
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+[ Upstream commit 5f491356b7149564ab22323ccce79c8d595bfd0c ]
+
+Binutils 2.38 complains about the use of mfpmr when building
+ppc6xx_defconfig:
+
+ CC arch/powerpc/kernel/pmc.o
+ {standard input}: Assembler messages:
+ {standard input}:45: Error: unrecognized opcode: `mfpmr'
+ {standard input}:56: Error: unrecognized opcode: `mtpmr'
+
+This is because by default the kernel is built with -mcpu=powerpc, and
+the mt/mfpmr instructions are not defined.
+
+It can be avoided by enabling CONFIG_E300C3_CPU, but just adding that to
+the defconfig will leave open the possibility of randconfig failures.
+
+So add machine directives around the mt/mfpmr instructions to tell
+binutils how to assemble them.
+
+Cc: stable@vger.kernel.org
+Reported-by: Jan-Benedict Glaw <jbglaw@lug-owl.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240229122521.762431-3-mpe@ellerman.id.au
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/reg_fsl_emb.h | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
+index a21f529c43d96..8359c06d92d9f 100644
+--- a/arch/powerpc/include/asm/reg_fsl_emb.h
++++ b/arch/powerpc/include/asm/reg_fsl_emb.h
+@@ -12,9 +12,16 @@
+ #ifndef __ASSEMBLY__
+ /* Performance Monitor Registers */
+ #define mfpmr(rn) ({unsigned int rval; \
+- asm volatile("mfpmr %0," __stringify(rn) \
++ asm volatile(".machine push; " \
++ ".machine e300; " \
++ "mfpmr %0," __stringify(rn) ";" \
++ ".machine pop; " \
+ : "=r" (rval)); rval;})
+-#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
++#define mtpmr(rn, v) asm volatile(".machine push; " \
++ ".machine e300; " \
++ "mtpmr " __stringify(rn) ",%0; " \
++ ".machine pop; " \
++ : : "r" (v))
+ #endif /* __ASSEMBLY__ */
+
+ /* Freescale Book E Performance Monitor APU Registers */
+--
+2.43.0
+
--- /dev/null
+From 22d1f0c4288dadb4edbbe13dbc37537f3d571360 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Feb 2024 00:14:04 +1100
+Subject: powerpc/smp: Adjust nr_cpu_ids to cover all threads of a core
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+[ Upstream commit 5580e96dad5a439d561d9648ffcbccb739c2a120 ]
+
+If nr_cpu_ids is too low to include at least all the threads of a single
+core adjust nr_cpu_ids upwards. This avoids triggering odd bugs in code
+that assumes all threads of a core are available.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20231229120107.2281153-1-mpe@ellerman.id.au
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/prom.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 8537c354c560b..a64f4fb332893 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -369,6 +369,12 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ if (IS_ENABLED(CONFIG_PPC64))
+ boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
+
++ if (nr_cpu_ids % nthreads != 0) {
++ set_nr_cpu_ids(ALIGN(nr_cpu_ids, nthreads));
++ pr_warn("nr_cpu_ids was not a multiple of threads_per_core, adjusted to %d\n",
++ nr_cpu_ids);
++ }
++
+ /*
+ * PAPR defines "logical" PVR values for cpus that
+ * meet various levels of the architecture:
+--
+2.43.0
+
--- /dev/null
+From 7d7f2ad778d43581263660d4324e0c0791832153 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Feb 2024 00:14:04 +1100
+Subject: powerpc/smp: Increase nr_cpu_ids to include the boot CPU
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+[ Upstream commit 777f81f0a9c780a6443bcf2c7785f0cc2e87c1ef ]
+
+If nr_cpu_ids is too low to include the boot CPU adjust nr_cpu_ids
+upward. Otherwise the kernel will BUG when trying to allocate a paca
+for the boot CPU and fail to boot.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20231229120107.2281153-2-mpe@ellerman.id.au
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/prom.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index a64f4fb332893..9531ab90feb8a 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -375,6 +375,12 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ nr_cpu_ids);
+ }
+
++ if (boot_cpuid >= nr_cpu_ids) {
++ set_nr_cpu_ids(min(CONFIG_NR_CPUS, ALIGN(boot_cpuid + 1, nthreads)));
++ pr_warn("Boot CPU %d >= nr_cpu_ids, adjusted nr_cpu_ids to %d\n",
++ boot_cpuid, nr_cpu_ids);
++ }
++
+ /*
+ * PAPR defines "logical" PVR values for cpus that
+ * meet various levels of the architecture:
+--
+2.43.0
+
--- /dev/null
+From a3c4db899b22a7c90a37cdc078f1d188938a20fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 27 Jan 2024 11:07:43 -0700
+Subject: powerpc: xor_vmx: Add '-mhard-float' to CFLAGS
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+[ Upstream commit 35f20786c481d5ced9283ff42de5c69b65e5ed13 ]
+
+arch/powerpc/lib/xor_vmx.o is built with '-msoft-float' (from the main
+powerpc Makefile) and '-maltivec' (from its CFLAGS), which causes an
+error when building with clang after a recent change in main:
+
+ error: option '-msoft-float' cannot be specified with '-maltivec'
+ make[6]: *** [scripts/Makefile.build:243: arch/powerpc/lib/xor_vmx.o] Error 1
+
+Explicitly add '-mhard-float' before '-maltivec' in xor_vmx.o's CFLAGS
+to override the previous inclusion of '-msoft-float' (as the last option
+wins), which matches how other areas of the kernel use '-maltivec', such
+as AMDGPU.
+
+Cc: stable@vger.kernel.org
+Closes: https://github.com/ClangBuiltLinux/linux/issues/1986
+Link: https://github.com/llvm/llvm-project/commit/4792f912b232141ecba4cbae538873be3c28556c
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240127-ppc-xor_vmx-drop-msoft-float-v1-1-f24140e81376@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/lib/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
+index 9b394bab17eba..374b82cf13d9d 100644
+--- a/arch/powerpc/lib/Makefile
++++ b/arch/powerpc/lib/Makefile
+@@ -72,7 +72,7 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
+ obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
+
+ obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
+-CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
++CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec)
+ # Enable <altivec.h>
+ CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include)
+
+--
+2.43.0
+
--- /dev/null
+From 7c8ac0e3d470d8fa36066cdce03f122f3e9c7a87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 17 Dec 2023 13:36:59 +0800
+Subject: remoteproc: virtio: Fix wdg cannot recovery remote processor
+
+From: Joakim Zhang <joakim.zhang@cixtech.com>
+
+[ Upstream commit b327c72753d6a78de37aed6c35756f2ef62897ee ]
+
+Recovery remote processor failed when wdg irq received:
+[ 0.842574] remoteproc remoteproc0: crash detected in cix-dsp-rproc: type watchdog
+[ 0.842750] remoteproc remoteproc0: handling crash #1 in cix-dsp-rproc
+[ 0.842824] remoteproc remoteproc0: recovering cix-dsp-rproc
+[ 0.843342] remoteproc remoteproc0: stopped remote processor cix-dsp-rproc
+[ 0.847901] rproc-virtio rproc-virtio.0.auto: Failed to associate buffer
+[ 0.847979] remoteproc remoteproc0: failed to probe subdevices for cix-dsp-rproc: -16
+
+The reason is that dma coherent mem would not be released when
+recovering the remote processor, due to rproc_virtio_remove()
+would not be called, where the mem released. It will fail when
+it try to allocate and associate buffer again.
+
+Releasing reserved memory from rproc_virtio_dev_release(), instead of
+rproc_virtio_remove().
+
+Fixes: 1d7b61c06dc3 ("remoteproc: virtio: Create platform device for the remoteproc_virtio")
+Signed-off-by: Joakim Zhang <joakim.zhang@cixtech.com>
+Acked-by: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20231217053659.3245745-1-joakim.zhang@cixtech.com
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/remoteproc/remoteproc_virtio.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
+index 0e95525c11581..ab5e4f02ab225 100644
+--- a/drivers/remoteproc/remoteproc_virtio.c
++++ b/drivers/remoteproc/remoteproc_virtio.c
+@@ -351,6 +351,9 @@ static void rproc_virtio_dev_release(struct device *dev)
+
+ kfree(vdev);
+
++ of_reserved_mem_device_release(&rvdev->pdev->dev);
++ dma_release_coherent_memory(&rvdev->pdev->dev);
++
+ put_device(&rvdev->pdev->dev);
+ }
+
+@@ -584,9 +587,6 @@ static int rproc_virtio_remove(struct platform_device *pdev)
+ rproc_remove_subdev(rproc, &rvdev->subdev);
+ rproc_remove_rvdev(rvdev);
+
+- of_reserved_mem_device_release(&pdev->dev);
+- dma_release_coherent_memory(&pdev->dev);
+-
+ put_device(&rproc->dev);
+
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 9e85cbcbc007a21ab4074828ba06fa5486238b06 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Mar 2024 14:42:18 -0700
+Subject: Revert "block/mq-deadline: use correct way to throttling write
+ requests"
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 256aab46e31683d76d45ccbedc287b4d3f3e322b ]
+
+The code "max(1U, 3 * (1U << shift) / 4)" comes from the Kyber I/O
+scheduler. The Kyber I/O scheduler maintains one internal queue per hwq
+and hence derives its async_depth from the number of hwq tags. Using
+this approach for the mq-deadline scheduler is wrong since the
+mq-deadline scheduler maintains one internal queue for all hwqs
+combined. Hence this revert.
+
+Cc: stable@vger.kernel.org
+Cc: Damien Le Moal <dlemoal@kernel.org>
+Cc: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Cc: Zhiguo Niu <Zhiguo.Niu@unisoc.com>
+Fixes: d47f9717e5cf ("block/mq-deadline: use correct way to throttling write requests")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20240313214218.1736147-1-bvanassche@acm.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/mq-deadline.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 55e26065c2e27..f10c2a0d18d41 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -622,9 +622,8 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
+ struct request_queue *q = hctx->queue;
+ struct deadline_data *dd = q->elevator->elevator_data;
+ struct blk_mq_tags *tags = hctx->sched_tags;
+- unsigned int shift = tags->bitmap_tags.sb.shift;
+
+- dd->async_depth = max(1U, 3 * (1U << shift) / 4);
++ dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
+
+ sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
+ }
+--
+2.43.0
+
--- /dev/null
+From 6eb4b56b37d2ec4e24371856593ccdc9f2a5dadc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Mar 2024 11:56:41 -0400
+Subject: ring-buffer: Do not set shortest_full when full target is hit
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+[ Upstream commit 761d9473e27f0c8782895013a3e7b52a37c8bcfc ]
+
+The rb_watermark_hit() checks if the amount of data in the ring buffer is
+above the percentage level passed in by the "full" variable. If it is, it
+returns true.
+
+But it also sets the "shortest_full" field of the cpu_buffer that informs
+writers that it needs to call the irq_work if the amount of data on the
+ring buffer is above the requested amount.
+
+The rb_watermark_hit() always sets the shortest_full even if the amount in
+the ring buffer is what it wants. As it is not going to wait, because it
+has what it wants, there's no reason to set shortest_full.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20240312115641.6aa8ba08@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Fixes: 42fb0a1e84ff5 ("tracing/ring-buffer: Have polling block on watermark")
+Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ring_buffer.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 3c4d62f499505..c934839f625df 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -974,9 +974,10 @@ static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
+ pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+ ret = !pagebusy && full_hit(buffer, cpu, full);
+
+- if (!cpu_buffer->shortest_full ||
+- cpu_buffer->shortest_full > full)
+- cpu_buffer->shortest_full = full;
++ if (!ret && (!cpu_buffer->shortest_full ||
++ cpu_buffer->shortest_full > full)) {
++ cpu_buffer->shortest_full = full;
++ }
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ }
+ return ret;
+--
+2.43.0
+
--- /dev/null
+From 23b569afd8e1e30a3763f31871dfe2574e8c6d98 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Mar 2024 09:19:20 -0400
+Subject: ring-buffer: Fix full_waiters_pending in poll
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+[ Upstream commit 8145f1c35fa648da662078efab299c4467b85ad5 ]
+
+If a reader of the ring buffer is doing a poll, and waiting for the ring
+buffer to hit a specific watermark, there could be a case where it gets
+into an infinite ping-pong loop.
+
+The poll code has:
+
+ rbwork->full_waiters_pending = true;
+ if (!cpu_buffer->shortest_full ||
+ cpu_buffer->shortest_full > full)
+ cpu_buffer->shortest_full = full;
+
+The writer will see full_waiters_pending and check if the ring buffer is
+filled over the percentage of the shortest_full value. If it is, it calls
+an irq_work to wake up all the waiters.
+
+But the code could get into a circular loop:
+
+ CPU 0 CPU 1
+ ----- -----
+ [ Poll ]
+ [ shortest_full = 0 ]
+ rbwork->full_waiters_pending = true;
+ if (rbwork->full_waiters_pending &&
+ [ buffer percent ] > shortest_full) {
+ rbwork->wakeup_full = true;
+ [ queue_irqwork ]
+
+ cpu_buffer->shortest_full = full;
+
+ [ IRQ work ]
+ if (rbwork->wakeup_full) {
+ cpu_buffer->shortest_full = 0;
+ wakeup poll waiters;
+ [woken]
+ if ([ buffer percent ] > full)
+ break;
+ rbwork->full_waiters_pending = true;
+ if (rbwork->full_waiters_pending &&
+ [ buffer percent ] > shortest_full) {
+ rbwork->wakeup_full = true;
+ [ queue_irqwork ]
+
+ cpu_buffer->shortest_full = full;
+
+ [ IRQ work ]
+ if (rbwork->wakeup_full) {
+ cpu_buffer->shortest_full = 0;
+ wakeup poll waiters;
+ [woken]
+
+ [ Wash, rinse, repeat! ]
+
+In the poll, the shortest_full needs to be set before the
+full_pending_waiters, as once that is set, the writer will compare the
+current shortest_full (which is incorrect) to decide to call the irq_work,
+which will reset the shortest_full (expecting the readers to update it).
+
+Also move the setting of full_waiters_pending after the check if the ring
+buffer has the required percentage filled. There's no reason to tell the
+writer to wake up waiters if there are no waiters.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20240312131952.630922155@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Fixes: 42fb0a1e84ff5 ("tracing/ring-buffer: Have polling block on watermark")
+Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ring_buffer.c | 27 ++++++++++++++++++++-------
+ 1 file changed, 20 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 3b50e17e2c9ab..e07f45d1890d3 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1116,16 +1116,32 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ poll_wait(filp, &rbwork->full_waiters, poll_table);
+
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+- rbwork->full_waiters_pending = true;
+ if (!cpu_buffer->shortest_full ||
+ cpu_buffer->shortest_full > full)
+ cpu_buffer->shortest_full = full;
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+- } else {
+- poll_wait(filp, &rbwork->waiters, poll_table);
+- rbwork->waiters_pending = true;
++ if (full_hit(buffer, cpu, full))
++ return EPOLLIN | EPOLLRDNORM;
++ /*
++ * Only allow full_waiters_pending update to be seen after
++ * the shortest_full is set. If the writer sees the
++ * full_waiters_pending flag set, it will compare the
++ * amount in the ring buffer to shortest_full. If the amount
++ * in the ring buffer is greater than the shortest_full
++ * percent, it will call the irq_work handler to wake up
++ * this list. The irq_handler will reset shortest_full
++ * back to zero. That's done under the reader_lock, but
++ * the below smp_mb() makes sure that the update to
++ * full_waiters_pending doesn't leak up into the above.
++ */
++ smp_mb();
++ rbwork->full_waiters_pending = true;
++ return 0;
+ }
+
++ poll_wait(filp, &rbwork->waiters, poll_table);
++ rbwork->waiters_pending = true;
++
+ /*
+ * There's a tight race between setting the waiters_pending and
+ * checking if the ring buffer is empty. Once the waiters_pending bit
+@@ -1141,9 +1157,6 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ */
+ smp_mb();
+
+- if (full)
+- return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
+-
+ if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
+ (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
+ return EPOLLIN | EPOLLRDNORM;
+--
+2.43.0
+
--- /dev/null
+From 9e117b47712d9e51b86e22639eb04d28fa25eece Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 15:24:04 -0500
+Subject: ring-buffer: Fix resetting of shortest_full
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+[ Upstream commit 68282dd930ea38b068ce2c109d12405f40df3f93 ]
+
+The "shortest_full" variable is used to keep track of the waiter that is
+waiting for the smallest amount on the ring buffer before being woken up.
+When a tasks waits on the ring buffer, it passes in a "full" value that is
+a percentage. 0 means wake up on any data. 1-100 means wake up from 1% to
+100% full buffer.
+
+As all waiters are on the same wait queue, the wake up happens for the
+waiter with the smallest percentage.
+
+The problem is that the smallest_full on the cpu_buffer that stores the
+smallest amount doesn't get reset when all the waiters are woken up. It
+does get reset when the ring buffer is reset (echo > /sys/kernel/tracing/trace).
+
+This means that tasks may be woken up more often then when they want to
+be. Instead, have the shortest_full field get reset just before waking up
+all the tasks. If the tasks wait again, they will update the shortest_full
+before sleeping.
+
+Also add locking around setting of shortest_full in the poll logic, and
+change "work" to "rbwork" to match the variable name for rb_irq_work
+structures that are used in other places.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20240308202431.948914369@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: linke li <lilinke99@qq.com>
+Cc: Rabin Vincent <rabin@rab.in>
+Fixes: 2c2b0a78b3739 ("ring-buffer: Add percentage of ring buffer full to wake up reader")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Stable-dep-of: 8145f1c35fa6 ("ring-buffer: Fix full_waiters_pending in poll")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ring_buffer.c | 30 +++++++++++++++++++++++-------
+ 1 file changed, 23 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index c934839f625df..3b50e17e2c9ab 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -907,8 +907,19 @@ static void rb_wake_up_waiters(struct irq_work *work)
+
+ wake_up_all(&rbwork->waiters);
+ if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
++ /* Only cpu_buffer sets the above flags */
++ struct ring_buffer_per_cpu *cpu_buffer =
++ container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
++
++ /* Called from interrupt context */
++ raw_spin_lock(&cpu_buffer->reader_lock);
+ rbwork->wakeup_full = false;
+ rbwork->full_waiters_pending = false;
++
++ /* Waking up all waiters, they will reset the shortest full */
++ cpu_buffer->shortest_full = 0;
++ raw_spin_unlock(&cpu_buffer->reader_lock);
++
+ wake_up_all(&rbwork->full_waiters);
+ }
+ }
+@@ -1086,28 +1097,33 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ struct file *filp, poll_table *poll_table, int full)
+ {
+ struct ring_buffer_per_cpu *cpu_buffer;
+- struct rb_irq_work *work;
++ struct rb_irq_work *rbwork;
+
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+- work = &buffer->irq_work;
++ rbwork = &buffer->irq_work;
+ full = 0;
+ } else {
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return EPOLLERR;
+
+ cpu_buffer = buffer->buffers[cpu];
+- work = &cpu_buffer->irq_work;
++ rbwork = &cpu_buffer->irq_work;
+ }
+
+ if (full) {
+- poll_wait(filp, &work->full_waiters, poll_table);
+- work->full_waiters_pending = true;
++ unsigned long flags;
++
++ poll_wait(filp, &rbwork->full_waiters, poll_table);
++
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ rbwork->full_waiters_pending = true;
+ if (!cpu_buffer->shortest_full ||
+ cpu_buffer->shortest_full > full)
+ cpu_buffer->shortest_full = full;
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ } else {
+- poll_wait(filp, &work->waiters, poll_table);
+- work->waiters_pending = true;
++ poll_wait(filp, &rbwork->waiters, poll_table);
++ rbwork->waiters_pending = true;
+ }
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From 46c16e019ab79a54f4c5482511276afa36667f60 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 15:24:03 -0500
+Subject: ring-buffer: Fix waking up ring buffer readers
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+[ Upstream commit b3594573681b53316ec0365332681a30463edfd6 ]
+
+A task can wait on a ring buffer for when it fills up to a specific
+watermark. The writer will check the minimum watermark that waiters are
+waiting for and if the ring buffer is past that, it will wake up all the
+waiters.
+
+The waiters are in a wait loop, and will first check if a signal is
+pending and then check if the ring buffer is at the desired level where it
+should break out of the loop.
+
+If a file that uses a ring buffer closes, and there's threads waiting on
+the ring buffer, it needs to wake up those threads. To do this, a
+"wait_index" was used.
+
+Before entering the wait loop, the waiter will read the wait_index. On
+wakeup, it will check if the wait_index is different than when it entered
+the loop, and will exit the loop if it is. The waker will only need to
+update the wait_index before waking up the waiters.
+
+This had a couple of bugs. One trivial one and one broken by design.
+
+The trivial bug was that the waiter checked the wait_index after the
+schedule() call. It had to be checked between the prepare_to_wait() and
+the schedule() which it was not.
+
+The main bug is that the first check to set the default wait_index will
+always be outside the prepare_to_wait() and the schedule(). That's because
+the ring_buffer_wait() doesn't have enough context to know if it should
+break out of the loop.
+
+The loop itself is not needed, because all the callers to the
+ring_buffer_wait() also has their own loop, as the callers have a better
+sense of what the context is to decide whether to break out of the loop
+or not.
+
+Just have the ring_buffer_wait() block once, and if it gets woken up, exit
+the function and let the callers decide what to do next.
+
+Link: https://lore.kernel.org/all/CAHk-=whs5MdtNjzFkTyaUy=vHi=qwWgPi0JgTe6OYUYMNSRZfg@mail.gmail.com/
+Link: https://lore.kernel.org/linux-trace-kernel/20240308202431.792933613@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: linke li <lilinke99@qq.com>
+Cc: Rabin Vincent <rabin@rab.in>
+Fixes: e30f53aad2202 ("tracing: Do not busy wait in buffer splice")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Stable-dep-of: 761d9473e27f ("ring-buffer: Do not set shortest_full when full target is hit")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ring_buffer.c | 139 ++++++++++++++++++-------------------
+ 1 file changed, 68 insertions(+), 71 deletions(-)
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index e019a9278794f..3c4d62f499505 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -414,7 +414,6 @@ struct rb_irq_work {
+ struct irq_work work;
+ wait_queue_head_t waiters;
+ wait_queue_head_t full_waiters;
+- long wait_index;
+ bool waiters_pending;
+ bool full_waiters_pending;
+ bool wakeup_full;
+@@ -949,14 +948,40 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
+ rbwork = &cpu_buffer->irq_work;
+ }
+
+- rbwork->wait_index++;
+- /* make sure the waiters see the new index */
+- smp_wmb();
+-
+ /* This can be called in any context */
+ irq_work_queue(&rbwork->work);
+ }
+
++static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
++{
++ struct ring_buffer_per_cpu *cpu_buffer;
++ bool ret = false;
++
++ /* Reads of all CPUs always waits for any data */
++ if (cpu == RING_BUFFER_ALL_CPUS)
++ return !ring_buffer_empty(buffer);
++
++ cpu_buffer = buffer->buffers[cpu];
++
++ if (!ring_buffer_empty_cpu(buffer, cpu)) {
++ unsigned long flags;
++ bool pagebusy;
++
++ if (!full)
++ return true;
++
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
++ ret = !pagebusy && full_hit(buffer, cpu, full);
++
++ if (!cpu_buffer->shortest_full ||
++ cpu_buffer->shortest_full > full)
++ cpu_buffer->shortest_full = full;
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ }
++ return ret;
++}
++
+ /**
+ * ring_buffer_wait - wait for input to the ring buffer
+ * @buffer: buffer to wait on
+@@ -972,7 +997,6 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
+ struct ring_buffer_per_cpu *cpu_buffer;
+ DEFINE_WAIT(wait);
+ struct rb_irq_work *work;
+- long wait_index;
+ int ret = 0;
+
+ /*
+@@ -991,81 +1015,54 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
+ work = &cpu_buffer->irq_work;
+ }
+
+- wait_index = READ_ONCE(work->wait_index);
+-
+- while (true) {
+- if (full)
+- prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
+- else
+- prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+-
+- /*
+- * The events can happen in critical sections where
+- * checking a work queue can cause deadlocks.
+- * After adding a task to the queue, this flag is set
+- * only to notify events to try to wake up the queue
+- * using irq_work.
+- *
+- * We don't clear it even if the buffer is no longer
+- * empty. The flag only causes the next event to run
+- * irq_work to do the work queue wake up. The worse
+- * that can happen if we race with !trace_empty() is that
+- * an event will cause an irq_work to try to wake up
+- * an empty queue.
+- *
+- * There's no reason to protect this flag either, as
+- * the work queue and irq_work logic will do the necessary
+- * synchronization for the wake ups. The only thing
+- * that is necessary is that the wake up happens after
+- * a task has been queued. It's OK for spurious wake ups.
+- */
+- if (full)
+- work->full_waiters_pending = true;
+- else
+- work->waiters_pending = true;
+-
+- if (signal_pending(current)) {
+- ret = -EINTR;
+- break;
+- }
+-
+- if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
+- break;
+-
+- if (cpu != RING_BUFFER_ALL_CPUS &&
+- !ring_buffer_empty_cpu(buffer, cpu)) {
+- unsigned long flags;
+- bool pagebusy;
+- bool done;
+-
+- if (!full)
+- break;
+-
+- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+- pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+- done = !pagebusy && full_hit(buffer, cpu, full);
++ if (full)
++ prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
++ else
++ prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+
+- if (!cpu_buffer->shortest_full ||
+- cpu_buffer->shortest_full > full)
+- cpu_buffer->shortest_full = full;
+- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+- if (done)
+- break;
+- }
++ /*
++ * The events can happen in critical sections where
++ * checking a work queue can cause deadlocks.
++ * After adding a task to the queue, this flag is set
++ * only to notify events to try to wake up the queue
++ * using irq_work.
++ *
++ * We don't clear it even if the buffer is no longer
++ * empty. The flag only causes the next event to run
++ * irq_work to do the work queue wake up. The worse
++ * that can happen if we race with !trace_empty() is that
++ * an event will cause an irq_work to try to wake up
++ * an empty queue.
++ *
++ * There's no reason to protect this flag either, as
++ * the work queue and irq_work logic will do the necessary
++ * synchronization for the wake ups. The only thing
++ * that is necessary is that the wake up happens after
++ * a task has been queued. It's OK for spurious wake ups.
++ */
++ if (full)
++ work->full_waiters_pending = true;
++ else
++ work->waiters_pending = true;
+
+- schedule();
++ if (rb_watermark_hit(buffer, cpu, full))
++ goto out;
+
+- /* Make sure to see the new wait index */
+- smp_rmb();
+- if (wait_index != work->wait_index)
+- break;
++ if (signal_pending(current)) {
++ ret = -EINTR;
++ goto out;
+ }
+
++ schedule();
++ out:
+ if (full)
+ finish_wait(&work->full_waiters, &wait);
+ else
+ finish_wait(&work->waiters, &wait);
+
++ if (!ret && !rb_watermark_hit(buffer, cpu, full) && signal_pending(current))
++ ret = -EINTR;
++
+ return ret;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 376f52996c123d9371b1d8cc7c33be2f48e1a3fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Mar 2024 08:15:07 -0400
+Subject: ring-buffer: Use wait_event_interruptible() in ring_buffer_wait()
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+[ Upstream commit 7af9ded0c2caac0a95f33df5cb04706b0f502588 ]
+
+Convert ring_buffer_wait() over to wait_event_interruptible(). The default
+condition is to execute the wait loop inside __wait_event() just once.
+
+This does not change the ring_buffer_wait() prototype yet, but
+restructures the code so that it can take a "cond" and "data" parameter
+and will call wait_event_interruptible() with a helper function as the
+condition.
+
+The helper function (rb_wait_cond) takes the cond function and data
+parameters. It will first check if the buffer hit the watermark defined by
+the "full" parameter and then call the passed in condition parameter. If
+either are true, it returns true.
+
+If rb_wait_cond() does not return true, it will set the appropriate
+"waiters_pending" flag and returns false.
+
+Link: https://lore.kernel.org/linux-trace-kernel/CAHk-=wgsNgewHFxZAJiAQznwPMqEtQmi1waeS2O1v6L4c_Um5A@mail.gmail.com/
+Link: https://lore.kernel.org/linux-trace-kernel/20240312121703.399598519@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: linke li <lilinke99@qq.com>
+Cc: Rabin Vincent <rabin@rab.in>
+Fixes: f3ddb74ad0790 ("tracing: Wake up ring buffer waiters on closing of the file")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/ring_buffer.h | 1 +
+ kernel/trace/ring_buffer.c | 116 +++++++++++++++++++++---------------
+ 2 files changed, 69 insertions(+), 48 deletions(-)
+
+diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
+index 3c7d295746f67..3e7bfc0f65aee 100644
+--- a/include/linux/ring_buffer.h
++++ b/include/linux/ring_buffer.h
+@@ -98,6 +98,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
+ __ring_buffer_alloc((size), (flags), &__key); \
+ })
+
++typedef bool (*ring_buffer_cond_fn)(void *data);
+ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
+ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ struct file *filp, poll_table *poll_table, int full);
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index e07f45d1890d3..431a922e5c89e 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -994,43 +994,15 @@ static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
+ return ret;
+ }
+
+-/**
+- * ring_buffer_wait - wait for input to the ring buffer
+- * @buffer: buffer to wait on
+- * @cpu: the cpu buffer to wait on
+- * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
+- *
+- * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
+- * as data is added to any of the @buffer's cpu buffers. Otherwise
+- * it will wait for data to be added to a specific cpu buffer.
+- */
+-int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
++static inline bool
++rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
++ int cpu, int full, ring_buffer_cond_fn cond, void *data)
+ {
+- struct ring_buffer_per_cpu *cpu_buffer;
+- DEFINE_WAIT(wait);
+- struct rb_irq_work *work;
+- int ret = 0;
+-
+- /*
+- * Depending on what the caller is waiting for, either any
+- * data in any cpu buffer, or a specific buffer, put the
+- * caller on the appropriate wait queue.
+- */
+- if (cpu == RING_BUFFER_ALL_CPUS) {
+- work = &buffer->irq_work;
+- /* Full only makes sense on per cpu reads */
+- full = 0;
+- } else {
+- if (!cpumask_test_cpu(cpu, buffer->cpumask))
+- return -ENODEV;
+- cpu_buffer = buffer->buffers[cpu];
+- work = &cpu_buffer->irq_work;
+- }
++ if (rb_watermark_hit(buffer, cpu, full))
++ return true;
+
+- if (full)
+- prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
+- else
+- prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
++ if (cond(data))
++ return true;
+
+ /*
+ * The events can happen in critical sections where
+@@ -1053,27 +1025,75 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
+ * a task has been queued. It's OK for spurious wake ups.
+ */
+ if (full)
+- work->full_waiters_pending = true;
++ rbwork->full_waiters_pending = true;
+ else
+- work->waiters_pending = true;
++ rbwork->waiters_pending = true;
+
+- if (rb_watermark_hit(buffer, cpu, full))
+- goto out;
++ return false;
++}
+
+- if (signal_pending(current)) {
+- ret = -EINTR;
+- goto out;
++/*
++ * The default wait condition for ring_buffer_wait() is to just to exit the
++ * wait loop the first time it is woken up.
++ */
++static bool rb_wait_once(void *data)
++{
++ long *once = data;
++
++ /* wait_event() actually calls this twice before scheduling*/
++ if (*once > 1)
++ return true;
++
++ (*once)++;
++ return false;
++}
++
++/**
++ * ring_buffer_wait - wait for input to the ring buffer
++ * @buffer: buffer to wait on
++ * @cpu: the cpu buffer to wait on
++ * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
++ *
++ * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
++ * as data is added to any of the @buffer's cpu buffers. Otherwise
++ * it will wait for data to be added to a specific cpu buffer.
++ */
++int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
++{
++ struct ring_buffer_per_cpu *cpu_buffer;
++ struct wait_queue_head *waitq;
++ ring_buffer_cond_fn cond;
++ struct rb_irq_work *rbwork;
++ void *data;
++ long once = 0;
++ int ret = 0;
++
++ cond = rb_wait_once;
++ data = &once;
++
++ /*
++ * Depending on what the caller is waiting for, either any
++ * data in any cpu buffer, or a specific buffer, put the
++ * caller on the appropriate wait queue.
++ */
++ if (cpu == RING_BUFFER_ALL_CPUS) {
++ rbwork = &buffer->irq_work;
++ /* Full only makes sense on per cpu reads */
++ full = 0;
++ } else {
++ if (!cpumask_test_cpu(cpu, buffer->cpumask))
++ return -ENODEV;
++ cpu_buffer = buffer->buffers[cpu];
++ rbwork = &cpu_buffer->irq_work;
+ }
+
+- schedule();
+- out:
+ if (full)
+- finish_wait(&work->full_waiters, &wait);
++ waitq = &rbwork->full_waiters;
+ else
+- finish_wait(&work->waiters, &wait);
++ waitq = &rbwork->waiters;
+
+- if (!ret && !rb_watermark_hit(buffer, cpu, full) && signal_pending(current))
+- ret = -EINTR;
++ ret = wait_event_interruptible((*waitq),
++ rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
+
+ return ret;
+ }
+--
+2.43.0
+
--- /dev/null
+From 9baea8f96d51ed55ac72f05c440f900d6e9dfc82 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Feb 2024 15:20:09 +0100
+Subject: s390/zcrypt: fix reference counting on zcrypt card objects
+
+From: Harald Freudenberger <freude@linux.ibm.com>
+
+[ Upstream commit 50ed48c80fecbe17218afed4f8bed005c802976c ]
+
+Tests with hot-plugging crytpo cards on KVM guests with debug
+kernel build revealed an use after free for the load field of
+the struct zcrypt_card. The reason was an incorrect reference
+handling of the zcrypt card object which could lead to a free
+of the zcrypt card object while it was still in use.
+
+This is an example of the slab message:
+
+ kernel: 0x00000000885a7512-0x00000000885a7513 @offset=1298. First byte 0x68 instead of 0x6b
+ kernel: Allocated in zcrypt_card_alloc+0x36/0x70 [zcrypt] age=18046 cpu=3 pid=43
+ kernel: kmalloc_trace+0x3f2/0x470
+ kernel: zcrypt_card_alloc+0x36/0x70 [zcrypt]
+ kernel: zcrypt_cex4_card_probe+0x26/0x380 [zcrypt_cex4]
+ kernel: ap_device_probe+0x15c/0x290
+ kernel: really_probe+0xd2/0x468
+ kernel: driver_probe_device+0x40/0xf0
+ kernel: __device_attach_driver+0xc0/0x140
+ kernel: bus_for_each_drv+0x8c/0xd0
+ kernel: __device_attach+0x114/0x198
+ kernel: bus_probe_device+0xb4/0xc8
+ kernel: device_add+0x4d2/0x6e0
+ kernel: ap_scan_adapter+0x3d0/0x7c0
+ kernel: ap_scan_bus+0x5a/0x3b0
+ kernel: ap_scan_bus_wq_callback+0x40/0x60
+ kernel: process_one_work+0x26e/0x620
+ kernel: worker_thread+0x21c/0x440
+ kernel: Freed in zcrypt_card_put+0x54/0x80 [zcrypt] age=9024 cpu=3 pid=43
+ kernel: kfree+0x37e/0x418
+ kernel: zcrypt_card_put+0x54/0x80 [zcrypt]
+ kernel: ap_device_remove+0x4c/0xe0
+ kernel: device_release_driver_internal+0x1c4/0x270
+ kernel: bus_remove_device+0x100/0x188
+ kernel: device_del+0x164/0x3c0
+ kernel: device_unregister+0x30/0x90
+ kernel: ap_scan_adapter+0xc8/0x7c0
+ kernel: ap_scan_bus+0x5a/0x3b0
+ kernel: ap_scan_bus_wq_callback+0x40/0x60
+ kernel: process_one_work+0x26e/0x620
+ kernel: worker_thread+0x21c/0x440
+ kernel: kthread+0x150/0x168
+ kernel: __ret_from_fork+0x3c/0x58
+ kernel: ret_from_fork+0xa/0x30
+ kernel: Slab 0x00000372022169c0 objects=20 used=18 fp=0x00000000885a7c88 flags=0x3ffff00000000a00(workingset|slab|node=0|zone=1|lastcpupid=0x1ffff)
+ kernel: Object 0x00000000885a74b8 @offset=1208 fp=0x00000000885a7c88
+ kernel: Redzone 00000000885a74b0: bb bb bb bb bb bb bb bb ........
+ kernel: Object 00000000885a74b8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ kernel: Object 00000000885a74c8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ kernel: Object 00000000885a74d8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ kernel: Object 00000000885a74e8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ kernel: Object 00000000885a74f8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ kernel: Object 00000000885a7508: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 68 4b 6b 6b 6b a5 kkkkkkkkkkhKkkk.
+ kernel: Redzone 00000000885a7518: bb bb bb bb bb bb bb bb ........
+ kernel: Padding 00000000885a756c: 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZZZZZ
+ kernel: CPU: 0 PID: 387 Comm: systemd-udevd Not tainted 6.8.0-HF #2
+ kernel: Hardware name: IBM 3931 A01 704 (KVM/Linux)
+ kernel: Call Trace:
+ kernel: [<00000000ca5ab5b8>] dump_stack_lvl+0x90/0x120
+ kernel: [<00000000c99d78bc>] check_bytes_and_report+0x114/0x140
+ kernel: [<00000000c99d53cc>] check_object+0x334/0x3f8
+ kernel: [<00000000c99d820c>] alloc_debug_processing+0xc4/0x1f8
+ kernel: [<00000000c99d852e>] get_partial_node.part.0+0x1ee/0x3e0
+ kernel: [<00000000c99d94ec>] ___slab_alloc+0xaf4/0x13c8
+ kernel: [<00000000c99d9e38>] __slab_alloc.constprop.0+0x78/0xb8
+ kernel: [<00000000c99dc8dc>] __kmalloc+0x434/0x590
+ kernel: [<00000000c9b4c0ce>] ext4_htree_store_dirent+0x4e/0x1c0
+ kernel: [<00000000c9b908a2>] htree_dirblock_to_tree+0x17a/0x3f0
+ kernel: [<00000000c9b919dc>] ext4_htree_fill_tree+0x134/0x400
+ kernel: [<00000000c9b4b3d0>] ext4_dx_readdir+0x160/0x2f0
+ kernel: [<00000000c9b4bedc>] ext4_readdir+0x5f4/0x760
+ kernel: [<00000000c9a7efc4>] iterate_dir+0xb4/0x280
+ kernel: [<00000000c9a7f1ea>] __do_sys_getdents64+0x5a/0x120
+ kernel: [<00000000ca5d6946>] __do_syscall+0x256/0x310
+ kernel: [<00000000ca5eea10>] system_call+0x70/0x98
+ kernel: INFO: lockdep is turned off.
+ kernel: FIX kmalloc-96: Restoring Poison 0x00000000885a7512-0x00000000885a7513=0x6b
+ kernel: FIX kmalloc-96: Marking all objects used
+
+The fix is simple: Before use of the queue not only the queue object
+but also the card object needs to increase it's reference count
+with a call to zcrypt_card_get(). Similar after use of the queue
+not only the queue but also the card object's reference count is
+decreased with zcrypt_card_put().
+
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Reviewed-by: Holger Dengler <dengler@linux.ibm.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/s390/crypto/zcrypt_api.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index 28e34d155334b..6f44963d34bbf 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -617,6 +617,7 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
+ {
+ if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
+ return NULL;
++ zcrypt_card_get(zc);
+ zcrypt_queue_get(zq);
+ get_device(&zq->queue->ap_dev.device);
+ atomic_add(weight, &zc->load);
+@@ -636,6 +637,7 @@ static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
+ atomic_sub(weight, &zq->load);
+ put_device(&zq->queue->ap_dev.device);
+ zcrypt_queue_put(zq);
++ zcrypt_card_put(zc);
+ module_put(mod);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 0589609a5243ba6b8cc1a0bfffa1a200f6edcf23 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Feb 2024 16:08:02 -0800
+Subject: selftests/mqueue: Set timeout to 180 seconds
+
+From: SeongJae Park <sj@kernel.org>
+
+[ Upstream commit 85506aca2eb4ea41223c91c5fe25125953c19b13 ]
+
+While mq_perf_tests runs with the default kselftest timeout limit, which
+is 45 seconds, the test takes about 60 seconds to complete on i3.metal
+AWS instances. Hence, the test always times out. Increase the timeout
+to 180 seconds.
+
+Fixes: 852c8cbf34d3 ("selftests/kselftest/runner.sh: Add 45 second timeout per test")
+Cc: <stable@vger.kernel.org> # 5.4.x
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/mqueue/setting | 1 +
+ 1 file changed, 1 insertion(+)
+ create mode 100644 tools/testing/selftests/mqueue/setting
+
+diff --git a/tools/testing/selftests/mqueue/setting b/tools/testing/selftests/mqueue/setting
+new file mode 100644
+index 0000000000000..a953c96aa16e1
+--- /dev/null
++++ b/tools/testing/selftests/mqueue/setting
+@@ -0,0 +1 @@
++timeout=180
+--
+2.43.0
+
--- /dev/null
+From b7fc9e30906c0ef18ffff9851ba603963fc6939f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Mar 2024 13:43:49 -0800
+Subject: serial: Lock console when calling into driver before registration
+
+From: Peter Collingbourne <pcc@google.com>
+
+[ Upstream commit 801410b26a0e8b8a16f7915b2b55c9528b69ca87 ]
+
+During the handoff from earlycon to the real console driver, we have
+two separate drivers operating on the same device concurrently. In the
+case of the 8250 driver these concurrent accesses cause problems due
+to the driver's use of banked registers, controlled by LCR.DLAB. It is
+possible for the setup(), config_port(), pm() and set_mctrl() callbacks
+to set DLAB, which can cause the earlycon code that intends to access
+TX to instead access DLL, leading to missed output and corruption on
+the serial line due to unintended modifications to the baud rate.
+
+In particular, for setup() we have:
+
+univ8250_console_setup()
+-> serial8250_console_setup()
+-> uart_set_options()
+-> serial8250_set_termios()
+-> serial8250_do_set_termios()
+-> serial8250_do_set_divisor()
+
+For config_port() we have:
+
+serial8250_config_port()
+-> autoconfig()
+
+For pm() we have:
+
+serial8250_pm()
+-> serial8250_do_pm()
+-> serial8250_set_sleep()
+
+For set_mctrl() we have (for some devices):
+
+serial8250_set_mctrl()
+-> omap8250_set_mctrl()
+-> __omap8250_set_mctrl()
+
+To avoid such problems, let's make it so that the console is locked
+during pre-registration calls to these callbacks, which will prevent
+the earlycon driver from running concurrently.
+
+Remove the partial solution to this problem in the 8250 driver
+that locked the console only during autoconfig_irq(), as this would
+result in a deadlock with the new approach. The console continues
+to be locked during autoconfig_irq() because it can only be called
+through uart_configure_port().
+
+Although this patch introduces more locking than strictly necessary
+(and in particular it also locks during the call to rs485_config()
+which is not affected by this issue as far as I can tell), it follows
+the principle that it is the responsibility of the generic console
+code to manage the earlycon handoff by ensuring that earlycon and real
+console driver code cannot run concurrently, and not the individual
+drivers.
+
+Signed-off-by: Peter Collingbourne <pcc@google.com>
+Reviewed-by: John Ogness <john.ogness@linutronix.de>
+Link: https://linux-review.googlesource.com/id/I7cf8124dcebf8618e6b2ee543fa5b25532de55d8
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240304214350.501253-1-pcc@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/8250/8250_port.c | 6 ------
+ drivers/tty/serial/serial_core.c | 12 ++++++++++++
+ kernel/printk/printk.c | 21 ++++++++++++++++++---
+ 3 files changed, 30 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 8efe31448df3c..c744feabd7cdd 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1377,9 +1377,6 @@ static void autoconfig_irq(struct uart_8250_port *up)
+ inb_p(ICP);
+ }
+
+- if (uart_console(port))
+- console_lock();
+-
+ /* forget possible initially masked and pending IRQ */
+ probe_irq_off(probe_irq_on());
+ save_mcr = serial8250_in_MCR(up);
+@@ -1410,9 +1407,6 @@ static void autoconfig_irq(struct uart_8250_port *up)
+ if (port->flags & UPF_FOURPORT)
+ outb_p(save_ICP, ICP);
+
+- if (uart_console(port))
+- console_unlock();
+-
+ port->irq = (irq > 0) ? irq : 0;
+ }
+
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index f0ed30d0a697c..fe3f1d655dfe2 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -2561,7 +2561,12 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+ port->type = PORT_UNKNOWN;
+ flags |= UART_CONFIG_TYPE;
+ }
++ /* Synchronize with possible boot console. */
++ if (uart_console(port))
++ console_lock();
+ port->ops->config_port(port, flags);
++ if (uart_console(port))
++ console_unlock();
+ }
+
+ if (port->type != PORT_UNKNOWN) {
+@@ -2569,6 +2574,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+
+ uart_report_port(drv, port);
+
++ /* Synchronize with possible boot console. */
++ if (uart_console(port))
++ console_lock();
++
+ /* Power up port for set_mctrl() */
+ uart_change_pm(state, UART_PM_STATE_ON);
+
+@@ -2585,6 +2594,9 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+
+ uart_rs485_config(port);
+
++ if (uart_console(port))
++ console_unlock();
++
+ /*
+ * If this driver supports console, and it hasn't been
+ * successfully registered yet, try to re-register it.
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 981cdb00b8722..c55ee859dbd08 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3045,6 +3045,21 @@ static int __init keep_bootcon_setup(char *str)
+
+ early_param("keep_bootcon", keep_bootcon_setup);
+
++static int console_call_setup(struct console *newcon, char *options)
++{
++ int err;
++
++ if (!newcon->setup)
++ return 0;
++
++ /* Synchronize with possible boot console. */
++ console_lock();
++ err = newcon->setup(newcon, options);
++ console_unlock();
++
++ return err;
++}
++
+ /*
+ * This is called by register_console() to try to match
+ * the newly registered console with any of the ones selected
+@@ -3080,8 +3095,8 @@ static int try_enable_preferred_console(struct console *newcon,
+ if (_braille_register_console(newcon, c))
+ return 0;
+
+- if (newcon->setup &&
+- (err = newcon->setup(newcon, c->options)) != 0)
++ err = console_call_setup(newcon, c->options);
++ if (err)
+ return err;
+ }
+ newcon->flags |= CON_ENABLED;
+@@ -3107,7 +3122,7 @@ static void try_enable_default_console(struct console *newcon)
+ if (newcon->index < 0)
+ newcon->index = 0;
+
+- if (newcon->setup && newcon->setup(newcon, NULL) != 0)
++ if (console_call_setup(newcon, NULL) != 0)
+ return;
+
+ newcon->flags |= CON_ENABLED;
+--
+2.43.0
+
--- /dev/null
+From 7c56985a0ab755f2c793801abb488c460e2bfac3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Jan 2024 10:21:57 -0500
+Subject: serial: max310x: fix NULL pointer dereference in I2C instantiation
+
+From: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+
+[ Upstream commit 0d27056c24efd3d63a03f3edfbcfc4827086b110 ]
+
+When trying to instantiate a max14830 device from userspace:
+
+ echo max14830 0x60 > /sys/bus/i2c/devices/i2c-2/new_device
+
+we get the following error:
+
+ Unable to handle kernel NULL pointer dereference at virtual address...
+ ...
+ Call trace:
+ max310x_i2c_probe+0x48/0x170 [max310x]
+ i2c_device_probe+0x150/0x2a0
+ ...
+
+Add check for validity of devtype to prevent the error, and abort probe
+with a meaningful error message.
+
+Fixes: 2e1f2d9a9bdb ("serial: max310x: implement I2C support")
+Cc: stable@vger.kernel.org
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Signed-off-by: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+Link: https://lore.kernel.org/r/20240118152213.2644269-2-hugo@hugovil.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/max310x.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index 444f89eb2d4b7..d409ef3887212 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -1633,13 +1633,16 @@ static unsigned short max310x_i2c_slave_addr(unsigned short addr,
+
+ static int max310x_i2c_probe(struct i2c_client *client)
+ {
+- const struct max310x_devtype *devtype =
+- device_get_match_data(&client->dev);
++ const struct max310x_devtype *devtype;
+ struct i2c_client *port_client;
+ struct regmap *regmaps[4];
+ unsigned int i;
+ u8 port_addr;
+
++ devtype = device_get_match_data(&client->dev);
++ if (!devtype)
++ return dev_err_probe(&client->dev, -ENODEV, "Failed to match device\n");
++
+ if (client->addr < devtype->slave_addr.min ||
+ client->addr > devtype->slave_addr.max)
+ return dev_err_probe(&client->dev, -EINVAL,
+--
+2.43.0
+
kvm-x86-update-kvm-only-leaf-handling-to-allow-for-100-kvm-only-leafs.patch
kvm-x86-advertise-cpuid.-eax-7-ecx-2-edx-to-userspace.patch
kvm-x86-use-a-switch-statement-and-macros-in-__feature_translate.patch
+timers-update-kernel-doc-for-various-functions.patch
+timers-use-del_timer_sync-even-on-up.patch
+timers-rename-del_timer_sync-to-timer_delete_sync.patch
+wifi-brcmfmac-fix-use-after-free-bug-in-brcmf_cfg802.patch
+media-staging-ipu3-imgu-set-fields-before-media_enti.patch
+arm64-dts-qcom-sc7280-add-additional-msi-interrupts.patch
+remoteproc-virtio-fix-wdg-cannot-recovery-remote-pro.patch
+clk-qcom-gcc-sdm845-add-soft-dependency-on-rpmhpd.patch
+smack-set-smack64transmute-only-for-dirs-in-smack_in.patch
+smack-handle-smack64transmute-in-smack_inode_setsecu.patch
+arm-dts-marvell-fix-maxium-maxim-typo-in-brownstone-.patch
+drm-vmwgfx-fix-possible-null-pointer-derefence-with-.patch
+serial-max310x-fix-null-pointer-dereference-in-i2c-i.patch
+pci_iounmap-fix-mmio-mapping-leak.patch
+media-xc4000-fix-atomicity-violation-in-xc4000_get_f.patch
+media-mc-add-local-pad-to-pipeline-regardless-of-the.patch
+media-mc-fix-flags-handling-when-creating-pad-links.patch
+media-mc-add-num_links-flag-to-media_pad.patch
+media-mc-rename-pad-variable-to-clarify-intent.patch
+media-mc-expand-must_connect-flag-to-always-require-.patch
+kvm-always-flush-async-pf-workqueue-when-vcpu-is-bei.patch
+cpufreq-amd-pstate-fix-min_perf-assignment-in-amd_ps.patch
+powerpc-smp-adjust-nr_cpu_ids-to-cover-all-threads-o.patch
+powerpc-smp-increase-nr_cpu_ids-to-include-the-boot-.patch
+sparc64-nmi-watchdog-fix-return-value-of-__setup-han.patch
+sparc-vdso-fix-return-value-of-__setup-handler.patch
+crypto-qat-fix-double-free-during-reset.patch
+crypto-qat-resolve-race-condition-during-aer-recover.patch
+selftests-mqueue-set-timeout-to-180-seconds.patch
+ext4-correct-best-extent-lstart-adjustment-logic.patch
+block-clear-zone-limits-for-a-non-zoned-stacked-queu.patch
+kasan-test-avoid-gcc-warning-for-intentional-overflo.patch
+bounds-support-non-power-of-two-config_nr_cpus.patch
+fat-fix-uninitialized-field-in-nostale-filehandles.patch
+ubifs-set-page-uptodate-in-the-correct-place.patch
+ubi-check-for-too-small-leb-size-in-vtbl-code.patch
+ubi-correct-the-calculation-of-fastmap-size.patch
+mtd-rawnand-meson-fix-scrambling-mode-value-in-comma.patch
+parisc-unaligned-rewrite-64-bit-inline-assembly-of-e.patch
+parisc-avoid-clobbering-the-c-b-bits-in-the-psw-with.patch
+parisc-fix-ip_fast_csum.patch
+parisc-fix-csum_ipv6_magic-on-32-bit-systems.patch
+parisc-fix-csum_ipv6_magic-on-64-bit-systems.patch
+parisc-strip-upper-32-bit-of-sum-in-csum_ipv6_magic-.patch
+md-raid5-fix-atomicity-violation-in-raid5_cache_coun.patch
+cpufreq-limit-resolving-a-frequency-to-policy-min-ma.patch
+pm-suspend-set-mem_sleep_current-during-kernel-comma.patch
+clk-qcom-gcc-ipq6018-fix-terminating-of-frequency-ta.patch
+clk-qcom-gcc-ipq8074-fix-terminating-of-frequency-ta.patch
+clk-qcom-mmcc-apq8084-fix-terminating-of-frequency-t.patch
+clk-qcom-mmcc-msm8974-fix-terminating-of-frequency-t.patch
+usb-xhci-add-error-handling-in-xhci_map_urb_for_dma.patch
+powerpc-fsl-fix-mfpmr-build-errors-with-newer-binuti.patch
+usb-serial-ftdi_sio-add-support-for-gmc-z216c-adapte.patch
+usb-serial-add-device-id-for-verifone-adapter.patch
+usb-serial-cp210x-add-id-for-mgp-instruments-pds100.patch
+usb-serial-option-add-meig-smart-slm320-product.patch
+kvm-x86-xen-inject-vcpu-upcall-vector-when-local-api.patch
+usb-serial-cp210x-add-pid-vid-for-tdk-nc0110013m-and.patch
+pm-sleep-wakeirq-fix-wake-irq-warning-in-system-susp.patch
+mmc-tmio-avoid-concurrent-runs-of-mmc_request_done.patch
+fuse-fix-root-lookup-with-nonzero-generation.patch
+fuse-don-t-unhash-root.patch
+usb-typec-ucsi-clean-up-ucsi_cable_prop-macros.patch
+serial-lock-console-when-calling-into-driver-before-.patch
+btrfs-qgroup-always-free-reserved-space-for-extent-r.patch
+btrfs-fix-off-by-one-chunk-length-calculation-at-con.patch
+pci-pm-drain-runtime-idle-callbacks-before-driver-re.patch
+pci-dpc-quirk-pio-log-size-for-intel-raptor-lake-roo.patch
+acpi-cppc-use-access_width-over-bit_width-for-system.patch
+dm-raid-fix-lockdep-waring-in-pers-hot_add_disk.patch
+powerpc-xor_vmx-add-mhard-float-to-cflags.patch
+mac802154-fix-llsec-key-resources-release-in-mac8021.patch
+swap-comments-get_swap_device-with-usage-rule.patch
+mm-swap-fix-race-between-free_swap_and_cache-and-swa.patch
+mmc-core-fix-switch-on-gp3-partition.patch
+drm-etnaviv-restore-some-id-values.patch
+landlock-warn-once-if-a-landlock-action-is-requested.patch
+hwmon-amc6821-add-of_match-table.patch
+ext4-fix-corruption-during-on-line-resize.patch
+nvmem-meson-efuse-fix-function-pointer-type-mismatch.patch
+slimbus-core-remove-usage-of-the-deprecated-ida_simp.patch
+phy-tegra-xusb-add-api-to-retrieve-the-port-number-o.patch
+usb-gadget-tegra-xudc-fix-usb3-phy-retrieval-logic.patch
+speakup-fix-8bit-characters-from-direct-synth.patch
+pci-aer-block-runtime-suspend-when-handling-errors.patch
+io_uring-net-correctly-handle-multishot-recvmsg-retr.patch
+sparc-explicitly-include-correct-dt-includes.patch
+sparc32-fix-parport-build-with-sparc32.patch
+nfs-fix-uaf-in-direct-writes.patch
+kbuild-move-wenum-compare-conditional-enum-conversio.patch
+pci-qcom-rename-qcom_pcie_config_sid_sm8250-to-refle.patch
+pci-qcom-enable-bdf-to-sid-translation-properly.patch
+pci-dwc-endpoint-fix-advertised-resizable-bar-size.patch
+pci-hv-fix-ring-buffer-size-calculation.patch
+vfio-use-gfp_kernel_account-for-userspace-persistent.patch
+vfio-pci-consolidate-irq-cleanup-on-msi-msi-x-disabl.patch
+vfio-pci-remove-negative-check-on-unsigned-vector.patch
+vfio-pci-prepare-for-dynamic-interrupt-context-stora.patch
+vfio-pci-disable-auto-enable-of-exclusive-intx-irq.patch
+vfio-pci-lock-external-intx-masking-ops.patch
+vfio-platform-disable-virqfds-on-cleanup.patch
+ksmbd-retrieve-number-of-blocks-using-vfs_getattr-in.patch
+ring-buffer-fix-waking-up-ring-buffer-readers.patch
+ring-buffer-do-not-set-shortest_full-when-full-targe.patch
+ring-buffer-fix-resetting-of-shortest_full.patch
+ring-buffer-fix-full_waiters_pending-in-poll.patch
+ring-buffer-use-wait_event_interruptible-in-ring_buf.patch
+soc-fsl-qbman-always-disable-interrupts-when-taking-.patch
+soc-fsl-qbman-use-raw-spinlock-for-cgr_lock.patch
+s390-zcrypt-fix-reference-counting-on-zcrypt-card-ob.patch
+drm-probe-helper-warn-about-negative-.get_modes.patch
+drm-panel-do-not-return-negative-error-codes-from-dr.patch
+drm-exynos-do-not-return-negative-values-from-.get_m.patch
+drm-imx-ipuv3-do-not-return-negative-values-from-.ge.patch
+drm-vc4-hdmi-do-not-return-negative-values-from-.get.patch
+memtest-use-read-write-_once-in-memory-scanning.patch
+revert-block-mq-deadline-use-correct-way-to-throttli.patch
+f2fs-mark-inode-dirty-for-fi_atomic_committed-flag.patch
+f2fs-truncate-page-cache-before-clearing-flags-when-.patch
+nilfs2-fix-failure-to-detect-dat-corruption-in-btree.patch
+nilfs2-prevent-kernel-bug-at-submit_bh_wbc.patch
+cifs-open_cached_dir-add-file_read_ea-to-desired-acc.patch
+cpufreq-dt-always-allocate-zeroed-cpumask.patch
+x86-cpu-amd-update-the-zenbleed-microcode-revisions.patch
+nfsd-fix-nfsd_clid_class-use-of-__string_len-macro.patch
+net-hns3-tracing-fix-hclgevf-trace-event-strings.patch
+loongarch-change-__my_cpu_offset-definition-to-avoid.patch
+loongarch-define-the-__io_aw-hook-as-mmiowb.patch
+wireguard-netlink-check-for-dangling-peer-via-is_dea.patch
+wireguard-netlink-access-device-through-ctx-instead-.patch
+ahci-asm1064-correct-count-of-reported-ports.patch
+ahci-asm1064-asm1166-don-t-limit-reported-ports.patch
+drm-amdgpu-amdgpu_ttm_gart_bind-set-gtt-bound-flag.patch
+drm-amd-display-return-the-correct-hdcp-error-code.patch
+drm-amd-display-fix-noise-issue-on-hdmi-av-mute.patch
+dm-snapshot-fix-lockup-in-dm_exception_table_exit.patch
+x86-pm-work-around-false-positive-kmemleak-report-in.patch
--- /dev/null
+From 8c3bf1aca8738a8c9848023574644cc18590ede4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 24 Feb 2024 11:41:37 +0000
+Subject: slimbus: core: Remove usage of the deprecated ida_simple_xx() API
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 89ffa4cccec54467446f141a79b9e36893079fb8 ]
+
+ida_alloc() and ida_free() should be preferred to the deprecated
+ida_simple_get() and ida_simple_remove().
+
+Note that the upper limit of ida_simple_get() is exclusive, but the one of
+ida_alloc_range() is inclusive. So change this change allows one more
+device. Previously address 0xFE was never used.
+
+Fixes: 46a2bb5a7f7e ("slimbus: core: Add slim controllers support")
+Cc: Stable@vger.kernel.org
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20240224114137.85781-2-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/slimbus/core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
+index 219483b79c09c..37fd655994ef3 100644
+--- a/drivers/slimbus/core.c
++++ b/drivers/slimbus/core.c
+@@ -436,8 +436,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
+ if (ret < 0)
+ goto err;
+ } else if (report_present) {
+- ret = ida_simple_get(&ctrl->laddr_ida,
+- 0, SLIM_LA_MANAGER - 1, GFP_KERNEL);
++ ret = ida_alloc_max(&ctrl->laddr_ida,
++ SLIM_LA_MANAGER - 1, GFP_KERNEL);
+ if (ret < 0)
+ goto err;
+
+--
+2.43.0
+
--- /dev/null
+From 7fe278b0bb8e49dbb2a6111abe0b1ae8ee1ec292 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Nov 2023 10:01:22 +0100
+Subject: smack: Handle SMACK64TRANSMUTE in smack_inode_setsecurity()
+
+From: Roberto Sassu <roberto.sassu@huawei.com>
+
+[ Upstream commit ac02f007d64eb2769d0bde742aac4d7a5fc6e8a5 ]
+
+If the SMACK64TRANSMUTE xattr is provided, and the inode is a directory,
+update the in-memory inode flags by setting SMK_INODE_TRANSMUTE.
+
+Cc: stable@vger.kernel.org
+Fixes: 5c6d1125f8db ("Smack: Transmute labels on specified directories") # v2.6.38.x
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Casey Schaufler <casey@schaufler-ca.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/smack/smack_lsm.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 07f7351148ecf..feba69549d086 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -2783,6 +2783,15 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
+ if (value == NULL || size > SMK_LONGLABEL || size == 0)
+ return -EINVAL;
+
++ if (strcmp(name, XATTR_SMACK_TRANSMUTE) == 0) {
++ if (!S_ISDIR(inode->i_mode) || size != TRANS_TRUE_SIZE ||
++ strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
++ return -EINVAL;
++
++ nsp->smk_flags |= SMK_INODE_TRANSMUTE;
++ return 0;
++ }
++
+ skp = smk_import_entry(value, size);
+ if (IS_ERR(skp))
+ return PTR_ERR(skp);
+--
+2.43.0
+
--- /dev/null
+From 7066b7f1936c569128b79902bbb7b689147da2dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Nov 2023 10:01:21 +0100
+Subject: smack: Set SMACK64TRANSMUTE only for dirs in smack_inode_setxattr()
+
+From: Roberto Sassu <roberto.sassu@huawei.com>
+
+[ Upstream commit 9c82169208dde516510aaba6bbd8b13976690c5d ]
+
+Since the SMACK64TRANSMUTE xattr makes sense only for directories, enforce
+this restriction in smack_inode_setxattr().
+
+Cc: stable@vger.kernel.org
+Fixes: 5c6d1125f8db ("Smack: Transmute labels on specified directories") # v2.6.38.x
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Casey Schaufler <casey@schaufler-ca.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/smack/smack_lsm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index fbadc61feedd1..07f7351148ecf 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -1309,7 +1309,8 @@ static int smack_inode_setxattr(struct user_namespace *mnt_userns,
+ check_star = 1;
+ } else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
+ check_priv = 1;
+- if (size != TRANS_TRUE_SIZE ||
++ if (!S_ISDIR(d_backing_inode(dentry)->i_mode) ||
++ size != TRANS_TRUE_SIZE ||
+ strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
+ rc = -EINVAL;
+ } else
+--
+2.43.0
+
--- /dev/null
+From 951c21d1ca27c9ab965111d8e72b8875af46d20c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Mar 2024 12:38:29 -0400
+Subject: soc: fsl: qbman: Always disable interrupts when taking cgr_lock
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+[ Upstream commit 584c2a9184a33a40fceee838f856de3cffa19be3 ]
+
+smp_call_function_single disables IRQs when executing the callback. To
+prevent deadlocks, we must disable IRQs when taking cgr_lock elsewhere.
+This is already done by qman_update_cgr and qman_delete_cgr; fix the
+other lockers.
+
+Fixes: 96f413f47677 ("soc/fsl/qbman: fix issue in qman_delete_cgr_safe()")
+CC: stable@vger.kernel.org
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Reviewed-by: Camelia Groza <camelia.groza@nxp.com>
+Tested-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/soc/fsl/qbman/qman.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
+index 739e4eee6b75c..1bf1f1ea67f00 100644
+--- a/drivers/soc/fsl/qbman/qman.c
++++ b/drivers/soc/fsl/qbman/qman.c
+@@ -1456,11 +1456,11 @@ static void qm_congestion_task(struct work_struct *work)
+ union qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+- spin_lock(&p->cgr_lock);
++ spin_lock_irq(&p->cgr_lock);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+- spin_unlock(&p->cgr_lock);
++ spin_unlock_irq(&p->cgr_lock);
+ dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+ return;
+@@ -1476,7 +1476,7 @@ static void qm_congestion_task(struct work_struct *work)
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+- spin_unlock(&p->cgr_lock);
++ spin_unlock_irq(&p->cgr_lock);
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+ }
+
+@@ -2440,7 +2440,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ preempt_enable();
+
+ cgr->chan = p->config->channel;
+- spin_lock(&p->cgr_lock);
++ spin_lock_irq(&p->cgr_lock);
+
+ if (opts) {
+ struct qm_mcc_initcgr local_opts = *opts;
+@@ -2477,7 +2477,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+ out:
+- spin_unlock(&p->cgr_lock);
++ spin_unlock_irq(&p->cgr_lock);
+ put_affine_portal();
+ return ret;
+ }
+--
+2.43.0
+
--- /dev/null
+From 25e1a5ab644095f55879d833877199adba1b7576 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Mar 2024 12:38:30 -0400
+Subject: soc: fsl: qbman: Use raw spinlock for cgr_lock
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+[ Upstream commit fbec4e7fed89b579f2483041fabf9650fb0dd6bc ]
+
+smp_call_function always runs its callback in hard IRQ context, even on
+PREEMPT_RT, where spinlocks can sleep. So we need to use a raw spinlock
+for cgr_lock to ensure we aren't waiting on a sleeping task.
+
+Although this bug has existed for a while, it was not apparent until
+commit ef2a8d5478b9 ("net: dpaa: Adjust queue depth on rate change")
+which invokes smp_call_function_single via qman_update_cgr_safe every
+time a link goes up or down.
+
+Fixes: 96f413f47677 ("soc/fsl/qbman: fix issue in qman_delete_cgr_safe()")
+CC: stable@vger.kernel.org
+Reported-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Closes: https://lore.kernel.org/all/20230323153935.nofnjucqjqnz34ej@skbuf/
+Reported-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
+Closes: https://lore.kernel.org/linux-arm-kernel/87wmsyvclu.fsf@pengutronix.de/
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Reviewed-by: Camelia Groza <camelia.groza@nxp.com>
+Tested-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/soc/fsl/qbman/qman.c | 25 ++++++++++++++-----------
+ 1 file changed, 14 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
+index 1bf1f1ea67f00..7e9074519ad22 100644
+--- a/drivers/soc/fsl/qbman/qman.c
++++ b/drivers/soc/fsl/qbman/qman.c
+@@ -991,7 +991,7 @@ struct qman_portal {
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+- spinlock_t cgr_lock;
++ raw_spinlock_t cgr_lock;
+ struct work_struct congestion_work;
+ struct work_struct mr_work;
+ char irqname[MAX_IRQNAME];
+@@ -1281,7 +1281,7 @@ static int qman_create_portal(struct qman_portal *portal,
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+- spin_lock_init(&portal->cgr_lock);
++ raw_spin_lock_init(&portal->cgr_lock);
+ INIT_WORK(&portal->congestion_work, qm_congestion_task);
+ INIT_WORK(&portal->mr_work, qm_mr_process_task);
+ portal->bits = 0;
+@@ -1456,11 +1456,14 @@ static void qm_congestion_task(struct work_struct *work)
+ union qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+- spin_lock_irq(&p->cgr_lock);
++ /*
++ * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock!
++ */
++ raw_spin_lock_irq(&p->cgr_lock);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+- spin_unlock_irq(&p->cgr_lock);
++ raw_spin_unlock_irq(&p->cgr_lock);
+ dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+ return;
+@@ -1476,7 +1479,7 @@ static void qm_congestion_task(struct work_struct *work)
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+- spin_unlock_irq(&p->cgr_lock);
++ raw_spin_unlock_irq(&p->cgr_lock);
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+ }
+
+@@ -2440,7 +2443,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ preempt_enable();
+
+ cgr->chan = p->config->channel;
+- spin_lock_irq(&p->cgr_lock);
++ raw_spin_lock_irq(&p->cgr_lock);
+
+ if (opts) {
+ struct qm_mcc_initcgr local_opts = *opts;
+@@ -2477,7 +2480,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+ out:
+- spin_unlock_irq(&p->cgr_lock);
++ raw_spin_unlock_irq(&p->cgr_lock);
+ put_affine_portal();
+ return ret;
+ }
+@@ -2512,7 +2515,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
+ return -EINVAL;
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+- spin_lock_irqsave(&p->cgr_lock, irqflags);
++ raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+@@ -2537,7 +2540,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ release_lock:
+- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
++ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+ }
+@@ -2577,9 +2580,9 @@ static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
+ if (!p)
+ return -EINVAL;
+
+- spin_lock_irqsave(&p->cgr_lock, irqflags);
++ raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
+ ret = qm_modify_cgr(cgr, 0, opts);
+- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
++ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+ }
+--
+2.43.0
+
--- /dev/null
+From ea3c8f49c89933b7300d4b64ec04159cb77018fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Jul 2023 14:45:20 -0600
+Subject: sparc: Explicitly include correct DT includes
+
+From: Rob Herring <robh@kernel.org>
+
+[ Upstream commit 263291fa44ff0909b5b7c43ff40babc1c43362f2 ]
+
+The DT of_device.h and of_platform.h date back to the separate
+of_platform_bus_type before it was merged into the regular platform bus.
+As part of that merge prepping Arm DT support 13 years ago, they
+"temporarily" include each other. They also include platform_device.h
+and of.h. As a result, there's a pretty much random mix of those include
+files used throughout the tree. In order to detangle these headers and
+replace the implicit includes with struct declarations, users need to
+explicitly include the correct includes.
+
+Acked-by: Sam Ravnborg <sam@ravnborg.org>
+Link: https://lore.kernel.org/all/20230718143211.1066810-1-robh@kernel.org/
+Signed-off-by: Rob Herring <robh@kernel.org>
+Stable-dep-of: 91d3ff922c34 ("sparc32: Fix parport build with sparc32")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/sparc/crypto/crop_devid.c | 2 +-
+ arch/sparc/include/asm/floppy_32.h | 2 +-
+ arch/sparc/include/asm/floppy_64.h | 2 +-
+ arch/sparc/include/asm/parport.h | 3 ++-
+ arch/sparc/kernel/apc.c | 2 +-
+ arch/sparc/kernel/auxio_32.c | 1 -
+ arch/sparc/kernel/auxio_64.c | 3 ++-
+ arch/sparc/kernel/central.c | 2 +-
+ arch/sparc/kernel/chmc.c | 3 ++-
+ arch/sparc/kernel/ioport.c | 2 +-
+ arch/sparc/kernel/leon_kernel.c | 2 --
+ arch/sparc/kernel/leon_pci.c | 3 ++-
+ arch/sparc/kernel/leon_pci_grpci1.c | 3 ++-
+ arch/sparc/kernel/leon_pci_grpci2.c | 4 +++-
+ arch/sparc/kernel/of_device_32.c | 2 +-
+ arch/sparc/kernel/of_device_64.c | 4 ++--
+ arch/sparc/kernel/of_device_common.c | 4 ++--
+ arch/sparc/kernel/pci.c | 3 ++-
+ arch/sparc/kernel/pci_common.c | 3 ++-
+ arch/sparc/kernel/pci_fire.c | 3 ++-
+ arch/sparc/kernel/pci_impl.h | 1 -
+ arch/sparc/kernel/pci_msi.c | 2 ++
+ arch/sparc/kernel/pci_psycho.c | 4 +++-
+ arch/sparc/kernel/pci_sun4v.c | 3 ++-
+ arch/sparc/kernel/pmc.c | 2 +-
+ arch/sparc/kernel/power.c | 3 ++-
+ arch/sparc/kernel/prom_irqtrans.c | 1 +
+ arch/sparc/kernel/psycho_common.c | 1 +
+ arch/sparc/kernel/sbus.c | 3 ++-
+ arch/sparc/kernel/time_32.c | 1 -
+ arch/sparc/mm/io-unit.c | 3 ++-
+ arch/sparc/mm/iommu.c | 5 +++--
+ 32 files changed, 49 insertions(+), 33 deletions(-)
+
+diff --git a/arch/sparc/crypto/crop_devid.c b/arch/sparc/crypto/crop_devid.c
+index 83fc4536dcd57..93f4e0fdd38c1 100644
+--- a/arch/sparc/crypto/crop_devid.c
++++ b/arch/sparc/crypto/crop_devid.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
++#include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+-#include <linux/of_device.h>
+
+ /* This is a dummy device table linked into all of the crypto
+ * opcode drivers. It serves to trigger the module autoloading
+diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
+index e10ab9ad3097d..836f6575aa1d7 100644
+--- a/arch/sparc/include/asm/floppy_32.h
++++ b/arch/sparc/include/asm/floppy_32.h
+@@ -8,7 +8,7 @@
+ #define __ASM_SPARC_FLOPPY_H
+
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
+ #include <linux/pgtable.h>
+
+ #include <asm/idprom.h>
+diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
+index 070c8c1f5c8fd..6efeb24b0a92c 100644
+--- a/arch/sparc/include/asm/floppy_64.h
++++ b/arch/sparc/include/asm/floppy_64.h
+@@ -11,7 +11,7 @@
+ #define __ASM_SPARC64_FLOPPY_H
+
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
+ #include <linux/dma-mapping.h>
+
+ #include <asm/auxio.h>
+diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
+index 03b27090c0c8c..0a7ffcfd59cda 100644
+--- a/arch/sparc/include/asm/parport.h
++++ b/arch/sparc/include/asm/parport.h
+@@ -7,7 +7,8 @@
+ #ifndef _ASM_SPARC64_PARPORT_H
+ #define _ASM_SPARC64_PARPORT_H 1
+
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+
+ #include <asm/ebus_dma.h>
+ #include <asm/ns87303.h>
+diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
+index ecd05bc0a1045..d44725d37e30f 100644
+--- a/arch/sparc/kernel/apc.c
++++ b/arch/sparc/kernel/apc.c
+@@ -13,7 +13,7 @@
+ #include <linux/miscdevice.h>
+ #include <linux/pm.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/platform_device.h>
+ #include <linux/module.h>
+
+ #include <asm/io.h>
+diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
+index a32d588174f2f..989860e890c4f 100644
+--- a/arch/sparc/kernel/auxio_32.c
++++ b/arch/sparc/kernel/auxio_32.c
+@@ -8,7 +8,6 @@
+ #include <linux/init.h>
+ #include <linux/spinlock.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/export.h>
+
+ #include <asm/oplib.h>
+diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
+index 774a82b0c649f..2a2800d213256 100644
+--- a/arch/sparc/kernel/auxio_64.c
++++ b/arch/sparc/kernel/auxio_64.c
+@@ -10,7 +10,8 @@
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/ioport.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+
+ #include <asm/prom.h>
+ #include <asm/io.h>
+diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
+index 23f8838dd96e3..a1a6485c91831 100644
+--- a/arch/sparc/kernel/central.c
++++ b/arch/sparc/kernel/central.c
+@@ -10,7 +10,7 @@
+ #include <linux/export.h>
+ #include <linux/string.h>
+ #include <linux/init.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
+ #include <linux/platform_device.h>
+
+ #include <asm/fhc.h>
+diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
+index 6ff43df740e08..d5fad5fb04c1d 100644
+--- a/arch/sparc/kernel/chmc.c
++++ b/arch/sparc/kernel/chmc.c
+@@ -15,7 +15,8 @@
+ #include <linux/errno.h>
+ #include <linux/init.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ #include <asm/spitfire.h>
+ #include <asm/chmctrl.h>
+ #include <asm/cpudata.h>
+diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
+index 4e4f3d3263e46..e5a327799e574 100644
+--- a/arch/sparc/kernel/ioport.c
++++ b/arch/sparc/kernel/ioport.c
+@@ -39,7 +39,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/scatterlist.h>
+ #include <linux/dma-map-ops.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
+
+ #include <asm/io.h>
+ #include <asm/vaddrs.h>
+diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
+index 39229940d725d..4c61da491fee1 100644
+--- a/arch/sparc/kernel/leon_kernel.c
++++ b/arch/sparc/kernel/leon_kernel.c
+@@ -8,9 +8,7 @@
+ #include <linux/errno.h>
+ #include <linux/mutex.h>
+ #include <linux/of.h>
+-#include <linux/of_platform.h>
+ #include <linux/interrupt.h>
+-#include <linux/of_device.h>
+ #include <linux/clocksource.h>
+ #include <linux/clockchips.h>
+
+diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
+index e5e5ff6b9a5c5..3a73bc466f95d 100644
+--- a/arch/sparc/kernel/leon_pci.c
++++ b/arch/sparc/kernel/leon_pci.c
+@@ -7,7 +7,8 @@
+ * Code is partially derived from pcic.c
+ */
+
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ #include <linux/kernel.h>
+ #include <linux/pci.h>
+ #include <linux/export.h>
+diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
+index c32590bdd3120..b2b639bee0684 100644
+--- a/arch/sparc/kernel/leon_pci_grpci1.c
++++ b/arch/sparc/kernel/leon_pci_grpci1.c
+@@ -13,10 +13,11 @@
+ * Contributors: Daniel Hellstrom <daniel@gaisler.com>
+ */
+
+-#include <linux/of_device.h>
+ #include <linux/export.h>
+ #include <linux/kernel.h>
++#include <linux/of.h>
+ #include <linux/of_irq.h>
++#include <linux/platform_device.h>
+ #include <linux/delay.h>
+ #include <linux/pci.h>
+
+diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
+index dd06abc61657f..ac2acd62a24ec 100644
+--- a/arch/sparc/kernel/leon_pci_grpci2.c
++++ b/arch/sparc/kernel/leon_pci_grpci2.c
+@@ -6,12 +6,14 @@
+ *
+ */
+
+-#include <linux/of_device.h>
+ #include <linux/kernel.h>
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <linux/delay.h>
+ #include <linux/export.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++
+ #include <asm/io.h>
+ #include <asm/leon.h>
+ #include <asm/vaddrs.h>
+diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
+index 4ebf51e6e78ec..9ac6853b34c1b 100644
+--- a/arch/sparc/kernel/of_device_32.c
++++ b/arch/sparc/kernel/of_device_32.c
+@@ -7,8 +7,8 @@
+ #include <linux/slab.h>
+ #include <linux/errno.h>
+ #include <linux/irq.h>
+-#include <linux/of_device.h>
+ #include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ #include <linux/dma-mapping.h>
+ #include <asm/leon.h>
+ #include <asm/leon_amba.h>
+diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
+index 5a9f86b1d4e7e..a8ccd7260fe7f 100644
+--- a/arch/sparc/kernel/of_device_64.c
++++ b/arch/sparc/kernel/of_device_64.c
+@@ -1,7 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/string.h>
+ #include <linux/kernel.h>
+-#include <linux/of.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/init.h>
+ #include <linux/export.h>
+@@ -9,8 +8,9 @@
+ #include <linux/slab.h>
+ #include <linux/errno.h>
+ #include <linux/irq.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
+ #include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ #include <asm/spitfire.h>
+
+ #include "of_device_common.h"
+diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
+index e717a56efc5d3..a09724381bd40 100644
+--- a/arch/sparc/kernel/of_device_common.c
++++ b/arch/sparc/kernel/of_device_common.c
+@@ -1,15 +1,15 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ #include <linux/string.h>
+ #include <linux/kernel.h>
+-#include <linux/of.h>
+ #include <linux/export.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/errno.h>
+ #include <linux/irq.h>
++#include <linux/of.h>
+ #include <linux/of_platform.h>
+ #include <linux/of_address.h>
+-#include <linux/of_device.h>
+ #include <linux/of_irq.h>
++#include <linux/platform_device.h>
+
+ #include "of_device_common.h"
+
+diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
+index cb1ef25116e94..5637b37ba9114 100644
+--- a/arch/sparc/kernel/pci.c
++++ b/arch/sparc/kernel/pci.c
+@@ -20,8 +20,9 @@
+ #include <linux/irq.h>
+ #include <linux/init.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
+ #include <linux/pgtable.h>
++#include <linux/platform_device.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/irq.h>
+diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
+index 4759ccd542fe6..5eeec9ad68457 100644
+--- a/arch/sparc/kernel/pci_common.c
++++ b/arch/sparc/kernel/pci_common.c
+@@ -8,7 +8,8 @@
+ #include <linux/slab.h>
+ #include <linux/pci.h>
+ #include <linux/device.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+
+ #include <asm/prom.h>
+ #include <asm/oplib.h>
+diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c
+index 0ca08d455e805..0b91bde80fdc5 100644
+--- a/arch/sparc/kernel/pci_fire.c
++++ b/arch/sparc/kernel/pci_fire.c
+@@ -10,7 +10,8 @@
+ #include <linux/msi.h>
+ #include <linux/export.h>
+ #include <linux/irq.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ #include <linux/numa.h>
+
+ #include <asm/prom.h>
+diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h
+index 4e3d15189fa95..f31761f517575 100644
+--- a/arch/sparc/kernel/pci_impl.h
++++ b/arch/sparc/kernel/pci_impl.h
+@@ -11,7 +11,6 @@
+ #include <linux/spinlock.h>
+ #include <linux/pci.h>
+ #include <linux/msi.h>
+-#include <linux/of_device.h>
+ #include <asm/io.h>
+ #include <asm/prom.h>
+ #include <asm/iommu.h>
+diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
+index 9ed11985768e1..fc7402948b7bc 100644
+--- a/arch/sparc/kernel/pci_msi.c
++++ b/arch/sparc/kernel/pci_msi.c
+@@ -5,6 +5,8 @@
+ */
+ #include <linux/kernel.h>
+ #include <linux/interrupt.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ #include <linux/slab.h>
+ #include <linux/irq.h>
+
+diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c
+index f413371da3871..1efc98305ec76 100644
+--- a/arch/sparc/kernel/pci_psycho.c
++++ b/arch/sparc/kernel/pci_psycho.c
+@@ -13,7 +13,9 @@
+ #include <linux/export.h>
+ #include <linux/slab.h>
+ #include <linux/interrupt.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+
+ #include <asm/iommu.h>
+ #include <asm/irq.h>
+diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
+index 3844809718052..0ddef827e0f99 100644
+--- a/arch/sparc/kernel/pci_sun4v.c
++++ b/arch/sparc/kernel/pci_sun4v.c
+@@ -15,7 +15,8 @@
+ #include <linux/msi.h>
+ #include <linux/export.h>
+ #include <linux/log2.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ #include <linux/dma-map-ops.h>
+ #include <asm/iommu-common.h>
+
+diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
+index b5c1eb33b9518..69a0206e56f01 100644
+--- a/arch/sparc/kernel/pmc.c
++++ b/arch/sparc/kernel/pmc.c
+@@ -11,7 +11,7 @@
+ #include <linux/init.h>
+ #include <linux/pm.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/platform_device.h>
+ #include <linux/module.h>
+
+ #include <asm/io.h>
+diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c
+index d941875dd7186..2f6c909e1755d 100644
+--- a/arch/sparc/kernel/power.c
++++ b/arch/sparc/kernel/power.c
+@@ -9,7 +9,8 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/reboot.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+
+ #include <asm/prom.h>
+ #include <asm/io.h>
+diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c
+index 28aff1c524b58..426bd08cb2ab1 100644
+--- a/arch/sparc/kernel/prom_irqtrans.c
++++ b/arch/sparc/kernel/prom_irqtrans.c
+@@ -4,6 +4,7 @@
+ #include <linux/init.h>
+ #include <linux/of.h>
+ #include <linux/of_platform.h>
++#include <linux/platform_device.h>
+
+ #include <asm/oplib.h>
+ #include <asm/prom.h>
+diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c
+index e90bcb6bad7fc..5ee74b4c0cf40 100644
+--- a/arch/sparc/kernel/psycho_common.c
++++ b/arch/sparc/kernel/psycho_common.c
+@@ -6,6 +6,7 @@
+ #include <linux/kernel.h>
+ #include <linux/interrupt.h>
+ #include <linux/numa.h>
++#include <linux/platform_device.h>
+
+ #include <asm/upa.h>
+
+diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
+index 32141e1006c4a..0bababf6f2bcd 100644
+--- a/arch/sparc/kernel/sbus.c
++++ b/arch/sparc/kernel/sbus.c
+@@ -14,7 +14,8 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ #include <linux/numa.h>
+
+ #include <asm/page.h>
+diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
+index 8a08830e4a653..79934beba03a6 100644
+--- a/arch/sparc/kernel/time_32.c
++++ b/arch/sparc/kernel/time_32.c
+@@ -33,7 +33,6 @@
+ #include <linux/ioport.h>
+ #include <linux/profile.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/platform_device.h>
+
+ #include <asm/mc146818rtc.h>
+diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
+index bf3e6d2fe5d94..3afbbe5fba46b 100644
+--- a/arch/sparc/mm/io-unit.c
++++ b/arch/sparc/mm/io-unit.c
+@@ -13,7 +13,8 @@
+ #include <linux/bitops.h>
+ #include <linux/dma-map-ops.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+
+ #include <asm/io.h>
+ #include <asm/io-unit.h>
+diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
+index 9e3f6933ca13f..14e178bfe33ab 100644
+--- a/arch/sparc/mm/iommu.c
++++ b/arch/sparc/mm/iommu.c
+@@ -7,14 +7,15 @@
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+-
++
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/mm.h>
+ #include <linux/slab.h>
+ #include <linux/dma-map-ops.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+
+ #include <asm/io.h>
+ #include <asm/mxcc.h>
+--
+2.43.0
+
--- /dev/null
+From ed3ca3c256b9c955850b5fc1b498f719efcf790e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 10 Feb 2024 21:28:08 -0800
+Subject: sparc: vDSO: fix return value of __setup handler
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit 5378f00c935bebb846b1fdb0e79cb76c137c56b5 ]
+
+__setup() handlers should return 1 to obsolete_checksetup() in
+init/main.c to indicate that the boot option has been handled.
+A return of 0 causes the boot option/value to be listed as an Unknown
+kernel parameter and added to init's (limited) argument or environment
+strings. Also, error return codes don't mean anything to
+obsolete_checksetup() -- only non-zero (usually 1) or zero.
+So return 1 from vdso_setup().
+
+Fixes: 9a08862a5d2e ("vDSO for sparc")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Reported-by: Igor Zhbanov <izh1979@gmail.com>
+Link: lore.kernel.org/r/64644a2f-4a20-bab3-1e15-3b2cdd0defe3@omprussia.ru
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: sparclinux@vger.kernel.org
+Cc: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: Nick Alcock <nick.alcock@oracle.com>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Andreas Larsson <andreas@gaisler.com>
+Signed-off-by: Andreas Larsson <andreas@gaisler.com>
+Link: https://lore.kernel.org/r/20240211052808.22635-1-rdunlap@infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/sparc/vdso/vma.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
+index ae9a86cb6f3d9..2b97df0850aa7 100644
+--- a/arch/sparc/vdso/vma.c
++++ b/arch/sparc/vdso/vma.c
+@@ -449,9 +449,8 @@ static __init int vdso_setup(char *s)
+ unsigned long val;
+
+ err = kstrtoul(s, 10, &val);
+- if (err)
+- return err;
+- vdso_enabled = val;
+- return 0;
++ if (!err)
++ vdso_enabled = val;
++ return 1;
+ }
+ __setup("vdso=", vdso_setup);
+--
+2.43.0
+
--- /dev/null
+From aa3557c371f06debd275a3a412450ab2514a3004 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 24 Feb 2024 18:42:27 +0100
+Subject: sparc32: Fix parport build with sparc32
+
+From: Sam Ravnborg <sam@ravnborg.org>
+
+[ Upstream commit 91d3ff922c346d6d8cb8de5ff8d504fe0ca9e17e ]
+
+include/asm/parport.h is sparc64 specific.
+Rename it to parport_64.h and use the generic version for sparc32.
+
+This fixed all{mod,yes}config build errors like:
+
+parport_pc.c:(.text):undefined-reference-to-ebus_dma_enable
+parport_pc.c:(.text):undefined-reference-to-ebus_dma_irq_enable
+parport_pc.c:(.text):undefined-reference-to-ebus_dma_register
+
+The errors occur as the sparc32 build references sparc64 symbols.
+
+Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Andreas Larsson <andreas@gaisler.com>
+Cc: Randy Dunlap <rdunlap@infradead.org>
+Cc: Maciej W. Rozycki <macro@orcam.me.uk>
+Closes: https://lore.kernel.org/r/20230406160548.25721-1-rdunlap@infradead.org/
+Fixes: 66bcd06099bb ("parport_pc: Also enable driver for PCI systems")
+Cc: stable@vger.kernel.org # v5.18+
+Tested-by: Randy Dunlap <rdunlap@infradead.org> # build-tested
+Reviewed-by: Andreas Larsson <andreas@gaisler.com>
+Signed-off-by: Andreas Larsson <andreas@gaisler.com>
+Link: https://lore.kernel.org/r/20240224-sam-fix-sparc32-all-builds-v2-6-1f186603c5c4@ravnborg.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/sparc/include/asm/parport.h | 259 +---------------------------
+ arch/sparc/include/asm/parport_64.h | 256 +++++++++++++++++++++++++++
+ 2 files changed, 263 insertions(+), 252 deletions(-)
+ create mode 100644 arch/sparc/include/asm/parport_64.h
+
+diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
+index 0a7ffcfd59cda..e2eed8f97665f 100644
+--- a/arch/sparc/include/asm/parport.h
++++ b/arch/sparc/include/asm/parport.h
+@@ -1,256 +1,11 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* parport.h: sparc64 specific parport initialization and dma.
+- *
+- * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be)
+- */
++#ifndef ___ASM_SPARC_PARPORT_H
++#define ___ASM_SPARC_PARPORT_H
+
+-#ifndef _ASM_SPARC64_PARPORT_H
+-#define _ASM_SPARC64_PARPORT_H 1
+-
+-#include <linux/of.h>
+-#include <linux/platform_device.h>
+-
+-#include <asm/ebus_dma.h>
+-#include <asm/ns87303.h>
+-#include <asm/prom.h>
+-
+-#define PARPORT_PC_MAX_PORTS PARPORT_MAX
+-
+-/*
+- * While sparc64 doesn't have an ISA DMA API, we provide something that looks
+- * close enough to make parport_pc happy
+- */
+-#define HAS_DMA
+-
+-#ifdef CONFIG_PARPORT_PC_FIFO
+-static DEFINE_SPINLOCK(dma_spin_lock);
+-
+-#define claim_dma_lock() \
+-({ unsigned long flags; \
+- spin_lock_irqsave(&dma_spin_lock, flags); \
+- flags; \
+-})
+-
+-#define release_dma_lock(__flags) \
+- spin_unlock_irqrestore(&dma_spin_lock, __flags);
++#if defined(__sparc__) && defined(__arch64__)
++#include <asm/parport_64.h>
++#else
++#include <asm-generic/parport.h>
++#endif
+ #endif
+
+-static struct sparc_ebus_info {
+- struct ebus_dma_info info;
+- unsigned int addr;
+- unsigned int count;
+- int lock;
+-
+- struct parport *port;
+-} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
+-
+-static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS);
+-
+-static inline int request_dma(unsigned int dmanr, const char *device_id)
+-{
+- if (dmanr >= PARPORT_PC_MAX_PORTS)
+- return -EINVAL;
+- if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0)
+- return -EBUSY;
+- return 0;
+-}
+-
+-static inline void free_dma(unsigned int dmanr)
+-{
+- if (dmanr >= PARPORT_PC_MAX_PORTS) {
+- printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
+- return;
+- }
+- if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) {
+- printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
+- return;
+- }
+-}
+-
+-static inline void enable_dma(unsigned int dmanr)
+-{
+- ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
+-
+- if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info,
+- sparc_ebus_dmas[dmanr].addr,
+- sparc_ebus_dmas[dmanr].count))
+- BUG();
+-}
+-
+-static inline void disable_dma(unsigned int dmanr)
+-{
+- ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0);
+-}
+-
+-static inline void clear_dma_ff(unsigned int dmanr)
+-{
+- /* nothing */
+-}
+-
+-static inline void set_dma_mode(unsigned int dmanr, char mode)
+-{
+- ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE));
+-}
+-
+-static inline void set_dma_addr(unsigned int dmanr, unsigned int addr)
+-{
+- sparc_ebus_dmas[dmanr].addr = addr;
+-}
+-
+-static inline void set_dma_count(unsigned int dmanr, unsigned int count)
+-{
+- sparc_ebus_dmas[dmanr].count = count;
+-}
+-
+-static inline unsigned int get_dma_residue(unsigned int dmanr)
+-{
+- return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
+-}
+-
+-static int ecpp_probe(struct platform_device *op)
+-{
+- unsigned long base = op->resource[0].start;
+- unsigned long config = op->resource[1].start;
+- unsigned long d_base = op->resource[2].start;
+- unsigned long d_len;
+- struct device_node *parent;
+- struct parport *p;
+- int slot, err;
+-
+- parent = op->dev.of_node->parent;
+- if (of_node_name_eq(parent, "dma")) {
+- p = parport_pc_probe_port(base, base + 0x400,
+- op->archdata.irqs[0], PARPORT_DMA_NOFIFO,
+- op->dev.parent->parent, 0);
+- if (!p)
+- return -ENOMEM;
+- dev_set_drvdata(&op->dev, p);
+- return 0;
+- }
+-
+- for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) {
+- if (!test_and_set_bit(slot, dma_slot_map))
+- break;
+- }
+- err = -ENODEV;
+- if (slot >= PARPORT_PC_MAX_PORTS)
+- goto out_err;
+-
+- spin_lock_init(&sparc_ebus_dmas[slot].info.lock);
+-
+- d_len = (op->resource[2].end - d_base) + 1UL;
+- sparc_ebus_dmas[slot].info.regs =
+- of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA");
+-
+- if (!sparc_ebus_dmas[slot].info.regs)
+- goto out_clear_map;
+-
+- sparc_ebus_dmas[slot].info.flags = 0;
+- sparc_ebus_dmas[slot].info.callback = NULL;
+- sparc_ebus_dmas[slot].info.client_cookie = NULL;
+- sparc_ebus_dmas[slot].info.irq = 0xdeadbeef;
+- strcpy(sparc_ebus_dmas[slot].info.name, "parport");
+- if (ebus_dma_register(&sparc_ebus_dmas[slot].info))
+- goto out_unmap_regs;
+-
+- ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1);
+-
+- /* Configure IRQ to Push Pull, Level Low */
+- /* Enable ECP, set bit 2 of the CTR first */
+- outb(0x04, base + 0x02);
+- ns87303_modify(config, PCR,
+- PCR_EPP_ENABLE |
+- PCR_IRQ_ODRAIN,
+- PCR_ECP_ENABLE |
+- PCR_ECP_CLK_ENA |
+- PCR_IRQ_POLAR);
+-
+- /* CTR bit 5 controls direction of port */
+- ns87303_modify(config, PTR,
+- 0, PTR_LPT_REG_DIR);
+-
+- p = parport_pc_probe_port(base, base + 0x400,
+- op->archdata.irqs[0],
+- slot,
+- op->dev.parent,
+- 0);
+- err = -ENOMEM;
+- if (!p)
+- goto out_disable_irq;
+-
+- dev_set_drvdata(&op->dev, p);
+-
+- return 0;
+-
+-out_disable_irq:
+- ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
+- ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
+-
+-out_unmap_regs:
+- of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len);
+-
+-out_clear_map:
+- clear_bit(slot, dma_slot_map);
+-
+-out_err:
+- return err;
+-}
+-
+-static int ecpp_remove(struct platform_device *op)
+-{
+- struct parport *p = dev_get_drvdata(&op->dev);
+- int slot = p->dma;
+-
+- parport_pc_unregister_port(p);
+-
+- if (slot != PARPORT_DMA_NOFIFO) {
+- unsigned long d_base = op->resource[2].start;
+- unsigned long d_len;
+-
+- d_len = (op->resource[2].end - d_base) + 1UL;
+-
+- ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
+- ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
+- of_iounmap(&op->resource[2],
+- sparc_ebus_dmas[slot].info.regs,
+- d_len);
+- clear_bit(slot, dma_slot_map);
+- }
+-
+- return 0;
+-}
+-
+-static const struct of_device_id ecpp_match[] = {
+- {
+- .name = "ecpp",
+- },
+- {
+- .name = "parallel",
+- .compatible = "ecpp",
+- },
+- {
+- .name = "parallel",
+- .compatible = "ns87317-ecpp",
+- },
+- {
+- .name = "parallel",
+- .compatible = "pnpALI,1533,3",
+- },
+- {},
+-};
+-
+-static struct platform_driver ecpp_driver = {
+- .driver = {
+- .name = "ecpp",
+- .of_match_table = ecpp_match,
+- },
+- .probe = ecpp_probe,
+- .remove = ecpp_remove,
+-};
+-
+-static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
+-{
+- return platform_driver_register(&ecpp_driver);
+-}
+-
+-#endif /* !(_ASM_SPARC64_PARPORT_H */
+diff --git a/arch/sparc/include/asm/parport_64.h b/arch/sparc/include/asm/parport_64.h
+new file mode 100644
+index 0000000000000..0a7ffcfd59cda
+--- /dev/null
++++ b/arch/sparc/include/asm/parport_64.h
+@@ -0,0 +1,256 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* parport.h: sparc64 specific parport initialization and dma.
++ *
++ * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be)
++ */
++
++#ifndef _ASM_SPARC64_PARPORT_H
++#define _ASM_SPARC64_PARPORT_H 1
++
++#include <linux/of.h>
++#include <linux/platform_device.h>
++
++#include <asm/ebus_dma.h>
++#include <asm/ns87303.h>
++#include <asm/prom.h>
++
++#define PARPORT_PC_MAX_PORTS PARPORT_MAX
++
++/*
++ * While sparc64 doesn't have an ISA DMA API, we provide something that looks
++ * close enough to make parport_pc happy
++ */
++#define HAS_DMA
++
++#ifdef CONFIG_PARPORT_PC_FIFO
++static DEFINE_SPINLOCK(dma_spin_lock);
++
++#define claim_dma_lock() \
++({ unsigned long flags; \
++ spin_lock_irqsave(&dma_spin_lock, flags); \
++ flags; \
++})
++
++#define release_dma_lock(__flags) \
++ spin_unlock_irqrestore(&dma_spin_lock, __flags);
++#endif
++
++static struct sparc_ebus_info {
++ struct ebus_dma_info info;
++ unsigned int addr;
++ unsigned int count;
++ int lock;
++
++ struct parport *port;
++} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
++
++static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS);
++
++static inline int request_dma(unsigned int dmanr, const char *device_id)
++{
++ if (dmanr >= PARPORT_PC_MAX_PORTS)
++ return -EINVAL;
++ if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0)
++ return -EBUSY;
++ return 0;
++}
++
++static inline void free_dma(unsigned int dmanr)
++{
++ if (dmanr >= PARPORT_PC_MAX_PORTS) {
++ printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
++ return;
++ }
++ if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) {
++ printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
++ return;
++ }
++}
++
++static inline void enable_dma(unsigned int dmanr)
++{
++ ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
++
++ if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info,
++ sparc_ebus_dmas[dmanr].addr,
++ sparc_ebus_dmas[dmanr].count))
++ BUG();
++}
++
++static inline void disable_dma(unsigned int dmanr)
++{
++ ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0);
++}
++
++static inline void clear_dma_ff(unsigned int dmanr)
++{
++ /* nothing */
++}
++
++static inline void set_dma_mode(unsigned int dmanr, char mode)
++{
++ ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE));
++}
++
++static inline void set_dma_addr(unsigned int dmanr, unsigned int addr)
++{
++ sparc_ebus_dmas[dmanr].addr = addr;
++}
++
++static inline void set_dma_count(unsigned int dmanr, unsigned int count)
++{
++ sparc_ebus_dmas[dmanr].count = count;
++}
++
++static inline unsigned int get_dma_residue(unsigned int dmanr)
++{
++ return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
++}
++
++static int ecpp_probe(struct platform_device *op)
++{
++ unsigned long base = op->resource[0].start;
++ unsigned long config = op->resource[1].start;
++ unsigned long d_base = op->resource[2].start;
++ unsigned long d_len;
++ struct device_node *parent;
++ struct parport *p;
++ int slot, err;
++
++ parent = op->dev.of_node->parent;
++ if (of_node_name_eq(parent, "dma")) {
++ p = parport_pc_probe_port(base, base + 0x400,
++ op->archdata.irqs[0], PARPORT_DMA_NOFIFO,
++ op->dev.parent->parent, 0);
++ if (!p)
++ return -ENOMEM;
++ dev_set_drvdata(&op->dev, p);
++ return 0;
++ }
++
++ for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) {
++ if (!test_and_set_bit(slot, dma_slot_map))
++ break;
++ }
++ err = -ENODEV;
++ if (slot >= PARPORT_PC_MAX_PORTS)
++ goto out_err;
++
++ spin_lock_init(&sparc_ebus_dmas[slot].info.lock);
++
++ d_len = (op->resource[2].end - d_base) + 1UL;
++ sparc_ebus_dmas[slot].info.regs =
++ of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA");
++
++ if (!sparc_ebus_dmas[slot].info.regs)
++ goto out_clear_map;
++
++ sparc_ebus_dmas[slot].info.flags = 0;
++ sparc_ebus_dmas[slot].info.callback = NULL;
++ sparc_ebus_dmas[slot].info.client_cookie = NULL;
++ sparc_ebus_dmas[slot].info.irq = 0xdeadbeef;
++ strcpy(sparc_ebus_dmas[slot].info.name, "parport");
++ if (ebus_dma_register(&sparc_ebus_dmas[slot].info))
++ goto out_unmap_regs;
++
++ ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1);
++
++ /* Configure IRQ to Push Pull, Level Low */
++ /* Enable ECP, set bit 2 of the CTR first */
++ outb(0x04, base + 0x02);
++ ns87303_modify(config, PCR,
++ PCR_EPP_ENABLE |
++ PCR_IRQ_ODRAIN,
++ PCR_ECP_ENABLE |
++ PCR_ECP_CLK_ENA |
++ PCR_IRQ_POLAR);
++
++ /* CTR bit 5 controls direction of port */
++ ns87303_modify(config, PTR,
++ 0, PTR_LPT_REG_DIR);
++
++ p = parport_pc_probe_port(base, base + 0x400,
++ op->archdata.irqs[0],
++ slot,
++ op->dev.parent,
++ 0);
++ err = -ENOMEM;
++ if (!p)
++ goto out_disable_irq;
++
++ dev_set_drvdata(&op->dev, p);
++
++ return 0;
++
++out_disable_irq:
++ ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
++ ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
++
++out_unmap_regs:
++ of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len);
++
++out_clear_map:
++ clear_bit(slot, dma_slot_map);
++
++out_err:
++ return err;
++}
++
++static int ecpp_remove(struct platform_device *op)
++{
++ struct parport *p = dev_get_drvdata(&op->dev);
++ int slot = p->dma;
++
++ parport_pc_unregister_port(p);
++
++ if (slot != PARPORT_DMA_NOFIFO) {
++ unsigned long d_base = op->resource[2].start;
++ unsigned long d_len;
++
++ d_len = (op->resource[2].end - d_base) + 1UL;
++
++ ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
++ ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
++ of_iounmap(&op->resource[2],
++ sparc_ebus_dmas[slot].info.regs,
++ d_len);
++ clear_bit(slot, dma_slot_map);
++ }
++
++ return 0;
++}
++
++static const struct of_device_id ecpp_match[] = {
++ {
++ .name = "ecpp",
++ },
++ {
++ .name = "parallel",
++ .compatible = "ecpp",
++ },
++ {
++ .name = "parallel",
++ .compatible = "ns87317-ecpp",
++ },
++ {
++ .name = "parallel",
++ .compatible = "pnpALI,1533,3",
++ },
++ {},
++};
++
++static struct platform_driver ecpp_driver = {
++ .driver = {
++ .name = "ecpp",
++ .of_match_table = ecpp_match,
++ },
++ .probe = ecpp_probe,
++ .remove = ecpp_remove,
++};
++
++static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
++{
++ return platform_driver_register(&ecpp_driver);
++}
++
++#endif /* !(_ASM_SPARC64_PARPORT_H */
+--
+2.43.0
+
--- /dev/null
+From 09ba13fd750c4c7a33912f15ad0c806913f4fa4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 10 Feb 2024 21:28:02 -0800
+Subject: sparc64: NMI watchdog: fix return value of __setup handler
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit 3ed7c61e49d65dacb96db798c0ab6fcd55a1f20f ]
+
+__setup() handlers should return 1 to obsolete_checksetup() in
+init/main.c to indicate that the boot option has been handled.
+A return of 0 causes the boot option/value to be listed as an Unknown
+kernel parameter and added to init's (limited) argument or environment
+strings. Also, error return codes don't mean anything to
+obsolete_checksetup() -- only non-zero (usually 1) or zero.
+So return 1 from setup_nmi_watchdog().
+
+Fixes: e5553a6d0442 ("sparc64: Implement NMI watchdog on capable cpus.")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Reported-by: Igor Zhbanov <izh1979@gmail.com>
+Link: lore.kernel.org/r/64644a2f-4a20-bab3-1e15-3b2cdd0defe3@omprussia.ru
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: sparclinux@vger.kernel.org
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Andreas Larsson <andreas@gaisler.com>
+Signed-off-by: Andreas Larsson <andreas@gaisler.com>
+Link: https://lore.kernel.org/r/20240211052802.22612-1-rdunlap@infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/sparc/kernel/nmi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
+index 060fff95a305c..fbf25e926f67c 100644
+--- a/arch/sparc/kernel/nmi.c
++++ b/arch/sparc/kernel/nmi.c
+@@ -274,7 +274,7 @@ static int __init setup_nmi_watchdog(char *str)
+ if (!strncmp(str, "panic", 5))
+ panic_on_timeout = 1;
+
+- return 0;
++ return 1;
+ }
+ __setup("nmi_watchdog=", setup_nmi_watchdog);
+
+--
+2.43.0
+
--- /dev/null
+From dab358c28a4436289de8a05ff4771cb8f3b2ca1d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 4 Feb 2024 16:57:36 +0100
+Subject: speakup: Fix 8bit characters from direct synth
+
+From: Samuel Thibault <samuel.thibault@ens-lyon.org>
+
+[ Upstream commit b6c8dafc9d86eb77e502bb018ec4105e8d2fbf78 ]
+
+When userland echoes 8bit characters to /dev/synth with e.g.
+
+echo -e '\xe9' > /dev/synth
+
+synth_write would get characters beyond 0x7f, and thus negative when
+char is signed. When given to synth_buffer_add which takes a u16, this
+would sign-extend and produce a U+ffxy character rather than U+xy.
+Users thus get garbled text instead of accents in their output.
+
+Let's fix this by making sure that we read unsigned characters.
+
+Signed-off-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Fixes: 89fc2ae80bb1 ("speakup: extend synth buffer to 16bit unicode characters")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240204155736.2oh4ot7tiaa2wpbh@begin
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accessibility/speakup/synth.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
+index eea2a2fa4f015..45f9061031338 100644
+--- a/drivers/accessibility/speakup/synth.c
++++ b/drivers/accessibility/speakup/synth.c
+@@ -208,8 +208,10 @@ void spk_do_flush(void)
+ wake_up_process(speakup_task);
+ }
+
+-void synth_write(const char *buf, size_t count)
++void synth_write(const char *_buf, size_t count)
+ {
++ const unsigned char *buf = (const unsigned char *) _buf;
++
+ while (count--)
+ synth_buffer_add(*buf++);
+ synth_start();
+--
+2.43.0
+
--- /dev/null
+From 6b3e1bc31762ef05aabd52295eeb0dbc9866a643 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 May 2023 14:13:55 +0800
+Subject: swap: comments get_swap_device() with usage rule
+
+From: Huang Ying <ying.huang@intel.com>
+
+[ Upstream commit a95722a047724ef75567381976a36f0e44230bd9 ]
+
+The general rule to use a swap entry is as follows.
+
+When we get a swap entry, if there aren't some other ways to prevent
+swapoff, such as the folio in swap cache is locked, page table lock is
+held, etc., the swap entry may become invalid because of swapoff.
+Then, we need to enclose all swap related functions with
+get_swap_device() and put_swap_device(), unless the swap functions
+call get/put_swap_device() by themselves.
+
+Add the rule as comments of get_swap_device().
+
+Link: https://lkml.kernel.org/r/20230529061355.125791-6-ying.huang@intel.com
+Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Yosry Ahmed <yosryahmed@google.com>
+Reviewed-by: Chris Li (Google) <chrisl@kernel.org>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Yu Zhao <yuzhao@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 82b1c07a0af6 ("mm: swap: fix race between free_swap_and_cache() and swapoff()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/swapfile.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index cca9fda9d036f..324844f98d67c 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1222,6 +1222,13 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
+ }
+
+ /*
++ * When we get a swap entry, if there aren't some other ways to
++ * prevent swapoff, such as the folio in swap cache is locked, page
++ * table lock is held, etc., the swap entry may become invalid because
++ * of swapoff. Then, we need to enclose all swap related functions
++ * with get_swap_device() and put_swap_device(), unless the swap
++ * functions call get/put_swap_device() by themselves.
++ *
+ * Check whether swap entry is valid in the swap device. If so,
+ * return pointer to swap_info_struct, and keep the swap entry valid
+ * via preventing the swap device from being swapoff, until
+@@ -1230,9 +1237,8 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
+ * Notice that swapoff or swapoff+swapon can still happen before the
+ * percpu_ref_tryget_live() in get_swap_device() or after the
+ * percpu_ref_put() in put_swap_device() if there isn't any other way
+- * to prevent swapoff, such as page lock, page table lock, etc. The
+- * caller must be prepared for that. For example, the following
+- * situation is possible.
++ * to prevent swapoff. The caller must be prepared for that. For
++ * example, the following situation is possible.
+ *
+ * CPU1 CPU2
+ * do_swap_page()
+--
+2.43.0
+
--- /dev/null
+From 74bdb5f0257e0db031de8b3282510b374359f164 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Nov 2022 21:18:44 +0100
+Subject: timers: Rename del_timer_sync() to timer_delete_sync()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 9b13df3fb64ee95e2397585404e442afee2c7d4f ]
+
+The timer related functions do not have a strict timer_ prefixed namespace
+which is really annoying.
+
+Rename del_timer_sync() to timer_delete_sync() and provide del_timer_sync()
+as a wrapper. Document that del_timer_sync() is not for new code.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Reviewed-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
+Link: https://lore.kernel.org/r/20221123201624.954785441@linutronix.de
+Stable-dep-of: 0f7352557a35 ("wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/timer.h | 15 ++++++++++++++-
+ kernel/time/timer.c | 18 +++++++++---------
+ 2 files changed, 23 insertions(+), 10 deletions(-)
+
+diff --git a/include/linux/timer.h b/include/linux/timer.h
+index 82bb2e4d3b7c2..6d18f04ad7039 100644
+--- a/include/linux/timer.h
++++ b/include/linux/timer.h
+@@ -183,7 +183,20 @@ extern int timer_reduce(struct timer_list *timer, unsigned long expires);
+ extern void add_timer(struct timer_list *timer);
+
+ extern int try_to_del_timer_sync(struct timer_list *timer);
+-extern int del_timer_sync(struct timer_list *timer);
++extern int timer_delete_sync(struct timer_list *timer);
++
++/**
++ * del_timer_sync - Delete a pending timer and wait for a running callback
++ * @timer: The timer to be deleted
++ *
++ * See timer_delete_sync() for detailed explanation.
++ *
++ * Do not use in new code. Use timer_delete_sync() instead.
++ */
++static inline int del_timer_sync(struct timer_list *timer)
++{
++ return timer_delete_sync(timer);
++}
+
+ #define del_singleshot_timer_sync(t) del_timer_sync(t)
+
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 9d09a2a0ad708..59469897432bc 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1083,7 +1083,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
+ /*
+ * We are trying to schedule the timer on the new base.
+ * However we can't change timer's base while it is running,
+- * otherwise del_timer_sync() can't detect that the timer's
++ * otherwise timer_delete_sync() can't detect that the timer's
+ * handler yet has not finished. This also guarantees that the
+ * timer is serialized wrt itself.
+ */
+@@ -1259,7 +1259,7 @@ EXPORT_SYMBOL_GPL(add_timer_on);
+ * @timer: The timer to be deactivated
+ *
+ * The function only deactivates a pending timer, but contrary to
+- * del_timer_sync() it does not take into account whether the timer's
++ * timer_delete_sync() it does not take into account whether the timer's
+ * callback function is concurrently executed on a different CPU or not.
+ * It neither prevents rearming of the timer. If @timer can be rearmed
+ * concurrently then the return value of this function is meaningless.
+@@ -1395,7 +1395,7 @@ static inline void del_timer_wait_running(struct timer_list *timer) { }
+ #endif
+
+ /**
+- * del_timer_sync - Deactivate a timer and wait for the handler to finish.
++ * timer_delete_sync - Deactivate a timer and wait for the handler to finish.
+ * @timer: The timer to be deactivated
+ *
+ * Synchronization rules: Callers must prevent restarting of the timer,
+@@ -1417,10 +1417,10 @@ static inline void del_timer_wait_running(struct timer_list *timer) { }
+ * spin_lock_irq(somelock);
+ * <IRQ>
+ * spin_lock(somelock);
+- * del_timer_sync(mytimer);
++ * timer_delete_sync(mytimer);
+ * while (base->running_timer == mytimer);
+ *
+- * Now del_timer_sync() will never return and never release somelock.
++ * Now timer_delete_sync() will never return and never release somelock.
+ * The interrupt on the other CPU is waiting to grab somelock but it has
+ * interrupted the softirq that CPU0 is waiting to finish.
+ *
+@@ -1433,7 +1433,7 @@ static inline void del_timer_wait_running(struct timer_list *timer) { }
+ * * %0 - The timer was not pending
+ * * %1 - The timer was pending and deactivated
+ */
+-int del_timer_sync(struct timer_list *timer)
++int timer_delete_sync(struct timer_list *timer)
+ {
+ int ret;
+
+@@ -1473,7 +1473,7 @@ int del_timer_sync(struct timer_list *timer)
+
+ return ret;
+ }
+-EXPORT_SYMBOL(del_timer_sync);
++EXPORT_SYMBOL(timer_delete_sync);
+
+ static void call_timer_fn(struct timer_list *timer,
+ void (*fn)(struct timer_list *),
+@@ -1495,8 +1495,8 @@ static void call_timer_fn(struct timer_list *timer,
+ #endif
+ /*
+ * Couple the lock chain with the lock chain at
+- * del_timer_sync() by acquiring the lock_map around the fn()
+- * call here and in del_timer_sync().
++ * timer_delete_sync() by acquiring the lock_map around the fn()
++ * call here and in timer_delete_sync().
+ */
+ lock_map_acquire(&lockdep_map);
+
+--
+2.43.0
+
--- /dev/null
+From ad63c47c4d2c282497ce873dae18a52bf6c81f1e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Nov 2022 21:18:40 +0100
+Subject: timers: Update kernel-doc for various functions
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 14f043f1340bf30bc60af127bff39f55889fef26 ]
+
+The kernel-doc of timer related functions is partially uncomprehensible
+word salad. Rewrite it to make it useful.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Reviewed-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
+Link: https://lore.kernel.org/r/20221123201624.828703870@linutronix.de
+Stable-dep-of: 0f7352557a35 ("wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/timer.c | 148 +++++++++++++++++++++++++++-----------------
+ 1 file changed, 90 insertions(+), 58 deletions(-)
+
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 717fcb9fb14aa..ab9688a2ae190 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1121,14 +1121,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
+ }
+
+ /**
+- * mod_timer_pending - modify a pending timer's timeout
+- * @timer: the pending timer to be modified
+- * @expires: new timeout in jiffies
++ * mod_timer_pending - Modify a pending timer's timeout
++ * @timer: The pending timer to be modified
++ * @expires: New absolute timeout in jiffies
+ *
+- * mod_timer_pending() is the same for pending timers as mod_timer(),
+- * but will not re-activate and modify already deleted timers.
++ * mod_timer_pending() is the same for pending timers as mod_timer(), but
++ * will not activate inactive timers.
+ *
+- * It is useful for unserialized use of timers.
++ * Return:
++ * * %0 - The timer was inactive and not modified
++ * * %1 - The timer was active and requeued to expire at @expires
+ */
+ int mod_timer_pending(struct timer_list *timer, unsigned long expires)
+ {
+@@ -1137,24 +1139,27 @@ int mod_timer_pending(struct timer_list *timer, unsigned long expires)
+ EXPORT_SYMBOL(mod_timer_pending);
+
+ /**
+- * mod_timer - modify a timer's timeout
+- * @timer: the timer to be modified
+- * @expires: new timeout in jiffies
+- *
+- * mod_timer() is a more efficient way to update the expire field of an
+- * active timer (if the timer is inactive it will be activated)
++ * mod_timer - Modify a timer's timeout
++ * @timer: The timer to be modified
++ * @expires: New absolute timeout in jiffies
+ *
+ * mod_timer(timer, expires) is equivalent to:
+ *
+ * del_timer(timer); timer->expires = expires; add_timer(timer);
+ *
++ * mod_timer() is more efficient than the above open coded sequence. In
++ * case that the timer is inactive, the del_timer() part is a NOP. The
++ * timer is in any case activated with the new expiry time @expires.
++ *
+ * Note that if there are multiple unserialized concurrent users of the
+ * same timer, then mod_timer() is the only safe way to modify the timeout,
+ * since add_timer() cannot modify an already running timer.
+ *
+- * The function returns whether it has modified a pending timer or not.
+- * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
+- * active timer returns 1.)
++ * Return:
++ * * %0 - The timer was inactive and started
++ * * %1 - The timer was active and requeued to expire at @expires or
++ * the timer was active and not modified because @expires did
++ * not change the effective expiry time
+ */
+ int mod_timer(struct timer_list *timer, unsigned long expires)
+ {
+@@ -1165,11 +1170,18 @@ EXPORT_SYMBOL(mod_timer);
+ /**
+ * timer_reduce - Modify a timer's timeout if it would reduce the timeout
+ * @timer: The timer to be modified
+- * @expires: New timeout in jiffies
++ * @expires: New absolute timeout in jiffies
+ *
+ * timer_reduce() is very similar to mod_timer(), except that it will only
+- * modify a running timer if that would reduce the expiration time (it will
+- * start a timer that isn't running).
++ * modify an enqueued timer if that would reduce the expiration time. If
++ * @timer is not enqueued it starts the timer.
++ *
++ * Return:
++ * * %0 - The timer was inactive and started
++ * * %1 - The timer was active and requeued to expire at @expires or
++ * the timer was active and not modified because @expires
++ * did not change the effective expiry time such that the
++ * timer would expire earlier than already scheduled
+ */
+ int timer_reduce(struct timer_list *timer, unsigned long expires)
+ {
+@@ -1178,18 +1190,21 @@ int timer_reduce(struct timer_list *timer, unsigned long expires)
+ EXPORT_SYMBOL(timer_reduce);
+
+ /**
+- * add_timer - start a timer
+- * @timer: the timer to be added
++ * add_timer - Start a timer
++ * @timer: The timer to be started
+ *
+- * The kernel will do a ->function(@timer) callback from the
+- * timer interrupt at the ->expires point in the future. The
+- * current time is 'jiffies'.
++ * Start @timer to expire at @timer->expires in the future. @timer->expires
++ * is the absolute expiry time measured in 'jiffies'. When the timer expires
++ * timer->function(timer) will be invoked from soft interrupt context.
+ *
+- * The timer's ->expires, ->function fields must be set prior calling this
+- * function.
++ * The @timer->expires and @timer->function fields must be set prior
++ * to calling this function.
++ *
++ * If @timer->expires is already in the past @timer will be queued to
++ * expire at the next timer tick.
+ *
+- * Timers with an ->expires field in the past will be executed in the next
+- * timer tick.
++ * This can only operate on an inactive timer. Attempts to invoke this on
++ * an active timer are rejected with a warning.
+ */
+ void add_timer(struct timer_list *timer)
+ {
+@@ -1199,11 +1214,13 @@ void add_timer(struct timer_list *timer)
+ EXPORT_SYMBOL(add_timer);
+
+ /**
+- * add_timer_on - start a timer on a particular CPU
+- * @timer: the timer to be added
+- * @cpu: the CPU to start it on
++ * add_timer_on - Start a timer on a particular CPU
++ * @timer: The timer to be started
++ * @cpu: The CPU to start it on
++ *
++ * Same as add_timer() except that it starts the timer on the given CPU.
+ *
+- * This is not very scalable on SMP. Double adds are not possible.
++ * See add_timer() for further details.
+ */
+ void add_timer_on(struct timer_list *timer, int cpu)
+ {
+@@ -1238,15 +1255,18 @@ void add_timer_on(struct timer_list *timer, int cpu)
+ EXPORT_SYMBOL_GPL(add_timer_on);
+
+ /**
+- * del_timer - deactivate a timer.
+- * @timer: the timer to be deactivated
+- *
+- * del_timer() deactivates a timer - this works on both active and inactive
+- * timers.
+- *
+- * The function returns whether it has deactivated a pending timer or not.
+- * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
+- * active timer returns 1.)
++ * del_timer - Deactivate a timer.
++ * @timer: The timer to be deactivated
++ *
++ * The function only deactivates a pending timer, but contrary to
++ * del_timer_sync() it does not take into account whether the timer's
++ * callback function is concurrently executed on a different CPU or not.
++ * It neither prevents rearming of the timer. If @timer can be rearmed
++ * concurrently then the return value of this function is meaningless.
++ *
++ * Return:
++ * * %0 - The timer was not pending
++ * * %1 - The timer was pending and deactivated
+ */
+ int del_timer(struct timer_list *timer)
+ {
+@@ -1268,10 +1288,19 @@ EXPORT_SYMBOL(del_timer);
+
+ /**
+ * try_to_del_timer_sync - Try to deactivate a timer
+- * @timer: timer to delete
++ * @timer: Timer to deactivate
++ *
++ * This function tries to deactivate a timer. On success the timer is not
++ * queued and the timer callback function is not running on any CPU.
+ *
+- * This function tries to deactivate a timer. Upon successful (ret >= 0)
+- * exit the timer is not queued and the handler is not running on any CPU.
++ * This function does not guarantee that the timer cannot be rearmed right
++ * after dropping the base lock. That needs to be prevented by the calling
++ * code if necessary.
++ *
++ * Return:
++ * * %0 - The timer was not pending
++ * * %1 - The timer was pending and deactivated
++ * * %-1 - The timer callback function is running on a different CPU
+ */
+ int try_to_del_timer_sync(struct timer_list *timer)
+ {
+@@ -1367,23 +1396,19 @@ static inline void del_timer_wait_running(struct timer_list *timer) { }
+
+ #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+ /**
+- * del_timer_sync - deactivate a timer and wait for the handler to finish.
+- * @timer: the timer to be deactivated
+- *
+- * This function only differs from del_timer() on SMP: besides deactivating
+- * the timer it also makes sure the handler has finished executing on other
+- * CPUs.
++ * del_timer_sync - Deactivate a timer and wait for the handler to finish.
++ * @timer: The timer to be deactivated
+ *
+ * Synchronization rules: Callers must prevent restarting of the timer,
+ * otherwise this function is meaningless. It must not be called from
+ * interrupt contexts unless the timer is an irqsafe one. The caller must
+- * not hold locks which would prevent completion of the timer's
+- * handler. The timer's handler must not call add_timer_on(). Upon exit the
+- * timer is not queued and the handler is not running on any CPU.
++ * not hold locks which would prevent completion of the timer's callback
++ * function. The timer's handler must not call add_timer_on(). Upon exit
++ * the timer is not queued and the handler is not running on any CPU.
+ *
+- * Note: For !irqsafe timers, you must not hold locks that are held in
+- * interrupt context while calling this function. Even if the lock has
+- * nothing to do with the timer in question. Here's why::
++ * For !irqsafe timers, the caller must not hold locks that are held in
++ * interrupt context. Even if the lock has nothing to do with the timer in
++ * question. Here's why::
+ *
+ * CPU0 CPU1
+ * ---- ----
+@@ -1397,10 +1422,17 @@ static inline void del_timer_wait_running(struct timer_list *timer) { }
+ * while (base->running_timer == mytimer);
+ *
+ * Now del_timer_sync() will never return and never release somelock.
+- * The interrupt on the other CPU is waiting to grab somelock but
+- * it has interrupted the softirq that CPU0 is waiting to finish.
++ * The interrupt on the other CPU is waiting to grab somelock but it has
++ * interrupted the softirq that CPU0 is waiting to finish.
++ *
++ * This function cannot guarantee that the timer is not rearmed again by
++ * some concurrent or preempting code, right after it dropped the base
++ * lock. If there is the possibility of a concurrent rearm then the return
++ * value of the function is meaningless.
+ *
+- * The function returns whether it has deactivated a pending timer or not.
++ * Return:
++ * * %0 - The timer was not pending
++ * * %1 - The timer was pending and deactivated
+ */
+ int del_timer_sync(struct timer_list *timer)
+ {
+--
+2.43.0
+
--- /dev/null
+From 52420ed7dccbb1880cffe984b83f45f0a1424abb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Nov 2022 21:18:42 +0100
+Subject: timers: Use del_timer_sync() even on UP
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 168f6b6ffbeec0b9333f3582e4cf637300858db5 ]
+
+del_timer_sync() is assumed to be pointless on uniprocessor systems and can
+be mapped to del_timer() because in theory del_timer() can never be invoked
+while the timer callback function is executed.
+
+This is not entirely true because del_timer() can be invoked from interrupt
+context and therefore hit in the middle of a running timer callback.
+
+Contrary to that del_timer_sync() is not allowed to be invoked from
+interrupt context unless the affected timer is marked with TIMER_IRQSAFE.
+del_timer_sync() has proper checks in place to detect such a situation.
+
+Give up on the UP optimization and make del_timer_sync() unconditionally
+available.
+
+Co-developed-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Reviewed-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
+Link: https://lore.kernel.org/all/20220407161745.7d6754b3@gandalf.local.home
+Link: https://lore.kernel.org/all/20221110064101.429013735@goodmis.org
+Link: https://lore.kernel.org/r/20221123201624.888306160@linutronix.de
+Stable-dep-of: 0f7352557a35 ("wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/timer.h | 7 +------
+ kernel/time/timer.c | 2 --
+ 2 files changed, 1 insertion(+), 8 deletions(-)
+
+diff --git a/include/linux/timer.h b/include/linux/timer.h
+index 648f00105f588..82bb2e4d3b7c2 100644
+--- a/include/linux/timer.h
++++ b/include/linux/timer.h
+@@ -183,12 +183,7 @@ extern int timer_reduce(struct timer_list *timer, unsigned long expires);
+ extern void add_timer(struct timer_list *timer);
+
+ extern int try_to_del_timer_sync(struct timer_list *timer);
+-
+-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+- extern int del_timer_sync(struct timer_list *timer);
+-#else
+-# define del_timer_sync(t) del_timer(t)
+-#endif
++extern int del_timer_sync(struct timer_list *timer);
+
+ #define del_singleshot_timer_sync(t) del_timer_sync(t)
+
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index ab9688a2ae190..9d09a2a0ad708 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1394,7 +1394,6 @@ static inline void timer_sync_wait_running(struct timer_base *base) { }
+ static inline void del_timer_wait_running(struct timer_list *timer) { }
+ #endif
+
+-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+ /**
+ * del_timer_sync - Deactivate a timer and wait for the handler to finish.
+ * @timer: The timer to be deactivated
+@@ -1475,7 +1474,6 @@ int del_timer_sync(struct timer_list *timer)
+ return ret;
+ }
+ EXPORT_SYMBOL(del_timer_sync);
+-#endif
+
+ static void call_timer_fn(struct timer_list *timer,
+ void (*fn)(struct timer_list *),
+--
+2.43.0
+
--- /dev/null
+From 92ba6e34d52108e6552648d6e40cac8b01a8ba76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jan 2024 07:37:02 +0100
+Subject: ubi: Check for too small LEB size in VTBL code
+
+From: Richard Weinberger <richard@nod.at>
+
+[ Upstream commit 68a24aba7c593eafa8fd00f2f76407b9b32b47a9 ]
+
+If the LEB size is smaller than a volume table record we cannot
+have volumes.
+In this case abort attaching.
+
+Cc: Chenyuan Yang <cy54@illinois.edu>
+Cc: stable@vger.kernel.org
+Fixes: 801c135ce73d ("UBI: Unsorted Block Images")
+Reported-by: Chenyuan Yang <cy54@illinois.edu>
+Closes: https://lore.kernel.org/linux-mtd/1433EB7A-FC89-47D6-8F47-23BE41B263B3@illinois.edu/
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Reviewed-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/ubi/vtbl.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
+index f700f0e4f2ec4..6e5489e233dd2 100644
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ * The number of supported volumes is limited by the eraseblock size
+ * and by the UBI_MAX_VOLUMES constant.
+ */
++
++ if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) {
++ ubi_err(ubi, "LEB size too small for a volume record");
++ return -EINVAL;
++ }
++
+ ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
+ if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
+ ubi->vtbl_slots = UBI_MAX_VOLUMES;
+--
+2.43.0
+
--- /dev/null
+From a7ba3fe753d2762880a1012213f8d8a139562662 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Feb 2024 10:49:03 +0800
+Subject: ubi: correct the calculation of fastmap size
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 7f174ae4f39e8475adcc09d26c5a43394689ad6c ]
+
+Now that the calculation of fastmap size in ubi_calc_fm_size() is
+incorrect since it miss each user volume's ubi_fm_eba structure and the
+Internal UBI volume info. Let's correct the calculation.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/ubi/fastmap.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index ca2d9efe62c3c..1060e19205d2a 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -85,9 +85,10 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
+ sizeof(struct ubi_fm_scan_pool) +
+ sizeof(struct ubi_fm_scan_pool) +
+ (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
+- (sizeof(struct ubi_fm_eba) +
+- (ubi->peb_count * sizeof(__be32))) +
+- sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
++ ((sizeof(struct ubi_fm_eba) +
++ sizeof(struct ubi_fm_volhdr)) *
++ (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
++ (ubi->peb_count * sizeof(__be32));
+ return roundup(size, ubi->leb_size);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 88da43ff1198386184b7520474187a56df60a9a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jan 2024 17:52:44 +0000
+Subject: ubifs: Set page uptodate in the correct place
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit 723012cab779eee8228376754e22c6594229bf8f ]
+
+Page cache reads are lockless, so setting the freshly allocated page
+uptodate before we've overwritten it with the data it's supposed to have
+in it will allow a simultaneous reader to see old data. Move the call
+to SetPageUptodate into ubifs_write_end(), which is after we copied the
+new data into the page.
+
+Fixes: 1e51764a3c2a ("UBIFS: add new flash file system")
+Cc: stable@vger.kernel.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ubifs/file.c | 13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index 10c1779af9c51..f7b1f9ece1364 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -261,9 +261,6 @@ static int write_begin_slow(struct address_space *mapping,
+ return err;
+ }
+ }
+-
+- SetPageUptodate(page);
+- ClearPageError(page);
+ }
+
+ if (PagePrivate(page))
+@@ -462,9 +459,6 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
+ return err;
+ }
+ }
+-
+- SetPageUptodate(page);
+- ClearPageError(page);
+ }
+
+ err = allocate_budget(c, page, ui, appending);
+@@ -474,10 +468,8 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
+ * If we skipped reading the page because we were going to
+ * write all of it, then it is not up to date.
+ */
+- if (skipped_read) {
++ if (skipped_read)
+ ClearPageChecked(page);
+- ClearPageUptodate(page);
+- }
+ /*
+ * Budgeting failed which means it would have to force
+ * write-back but didn't, because we set the @fast flag in the
+@@ -568,6 +560,9 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
+ goto out;
+ }
+
++ if (len == PAGE_SIZE)
++ SetPageUptodate(page);
++
+ if (!PagePrivate(page)) {
+ attach_page_private(page, (void *)1);
+ atomic_long_inc(&c->dirty_pg_cnt);
+--
+2.43.0
+
--- /dev/null
+From cf040ed55b15c8753145ff105f08f4a749ef87ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Mar 2024 11:03:28 +0800
+Subject: usb: gadget: tegra-xudc: Fix USB3 PHY retrieval logic
+
+From: Wayne Chang <waynec@nvidia.com>
+
+[ Upstream commit 84fa943d93c31ee978355e6c6c69592dae3c9f59 ]
+
+This commit resolves an issue in the tegra-xudc USB gadget driver that
+incorrectly fetched USB3 PHY instances. The problem stemmed from the
+assumption of a one-to-one correspondence between USB2 and USB3 PHY
+names and their association with physical USB ports in the device tree.
+
+Previously, the driver associated USB3 PHY names directly with the USB3
+instance number, leading to mismatches when mapping the physical USB
+ports. For instance, if using USB3-1 PHY, the driver expect the
+corresponding PHY name as 'usb3-1'. However, the physical USB ports in
+the device tree were designated as USB2-0 and USB3-0 as we only have
+one device controller, causing a misalignment.
+
+This commit rectifies the issue by adjusting the PHY naming logic.
+Now, the driver correctly correlates the USB2 and USB3 PHY instances,
+allowing the USB2-0 and USB3-1 PHYs to form a physical USB port pair
+while accurately reflecting their configuration in the device tree by
+naming them USB2-0 and USB3-0, respectively.
+
+The change ensures that the PHY and PHY names align appropriately,
+resolving the mismatch between physical USB ports and their associated
+names in the device tree.
+
+Fixes: b4e19931c98a ("usb: gadget: tegra-xudc: Support multiple device modes")
+Cc: stable@vger.kernel.org
+Signed-off-by: Wayne Chang <waynec@nvidia.com>
+Reviewed-by: Jon Hunter <jonathanh@nvidia.com>
+Tested-by: Jon Hunter <jonathanh@nvidia.com>
+Link: https://lore.kernel.org/r/20240307030328.1487748-3-waynec@nvidia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/udc/tegra-xudc.c | 39 ++++++++++++++++++-----------
+ 1 file changed, 25 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
+index a8cadc45c65aa..fd7a9535973ed 100644
+--- a/drivers/usb/gadget/udc/tegra-xudc.c
++++ b/drivers/usb/gadget/udc/tegra-xudc.c
+@@ -3486,8 +3486,8 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
+
+ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
+ {
+- int err = 0, usb3;
+- unsigned int i;
++ int err = 0, usb3_companion_port;
++ unsigned int i, j;
+
+ xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
+ sizeof(*xudc->utmi_phy), GFP_KERNEL);
+@@ -3515,7 +3515,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
+ if (IS_ERR(xudc->utmi_phy[i])) {
+ err = PTR_ERR(xudc->utmi_phy[i]);
+ dev_err_probe(xudc->dev, err,
+- "failed to get usb2-%d PHY\n", i);
++ "failed to get PHY for phy-name usb2-%d\n", i);
+ goto clean_up;
+ } else if (xudc->utmi_phy[i]) {
+ /* Get usb-phy, if utmi phy is available */
+@@ -3534,19 +3534,30 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
+ }
+
+ /* Get USB3 phy */
+- usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
+- if (usb3 < 0)
++ usb3_companion_port = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
++ if (usb3_companion_port < 0)
+ continue;
+
+- snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
+- xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
+- if (IS_ERR(xudc->usb3_phy[i])) {
+- err = PTR_ERR(xudc->usb3_phy[i]);
+- dev_err_probe(xudc->dev, err,
+- "failed to get usb3-%d PHY\n", usb3);
+- goto clean_up;
+- } else if (xudc->usb3_phy[i])
+- dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
++ for (j = 0; j < xudc->soc->num_phys; j++) {
++ snprintf(phy_name, sizeof(phy_name), "usb3-%d", j);
++ xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
++ if (IS_ERR(xudc->usb3_phy[i])) {
++ err = PTR_ERR(xudc->usb3_phy[i]);
++ dev_err_probe(xudc->dev, err,
++ "failed to get PHY for phy-name usb3-%d\n", j);
++ goto clean_up;
++ } else if (xudc->usb3_phy[i]) {
++ int usb2_port =
++ tegra_xusb_padctl_get_port_number(xudc->utmi_phy[i]);
++ int usb3_port =
++ tegra_xusb_padctl_get_port_number(xudc->usb3_phy[i]);
++ if (usb3_port == usb3_companion_port) {
++ dev_dbg(xudc->dev, "USB2 port %d is paired with USB3 port %d for device mode port %d\n",
++ usb2_port, usb3_port, i);
++ break;
++ }
++ }
++ }
+ }
+
+ return err;
+--
+2.43.0
+
--- /dev/null
+From 1347a33293a60d67648f49b37a8a8ac790b91eef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Feb 2024 21:53:29 +0000
+Subject: USB: serial: add device ID for VeriFone adapter
+
+From: Cameron Williams <cang1@live.co.uk>
+
+[ Upstream commit cda704809797a8a86284f9df3eef5e62ec8a3175 ]
+
+Add device ID for a (probably fake) CP2102 UART device.
+
+lsusb -v output:
+
+Device Descriptor:
+ bLength 18
+ bDescriptorType 1
+ bcdUSB 1.10
+ bDeviceClass 0 [unknown]
+ bDeviceSubClass 0 [unknown]
+ bDeviceProtocol 0
+ bMaxPacketSize0 64
+ idVendor 0x11ca VeriFone Inc
+ idProduct 0x0212 Verifone USB to Printer
+ bcdDevice 1.00
+ iManufacturer 1 Silicon Labs
+ iProduct 2 Verifone USB to Printer
+ iSerial 3 0001
+ bNumConfigurations 1
+ Configuration Descriptor:
+ bLength 9
+ bDescriptorType 2
+ wTotalLength 0x0020
+ bNumInterfaces 1
+ bConfigurationValue 1
+ iConfiguration 0
+ bmAttributes 0x80
+ (Bus Powered)
+ MaxPower 100mA
+ Interface Descriptor:
+ bLength 9
+ bDescriptorType 4
+ bInterfaceNumber 0
+ bAlternateSetting 0
+ bNumEndpoints 2
+ bInterfaceClass 255 Vendor Specific Class
+ bInterfaceSubClass 0 [unknown]
+ bInterfaceProtocol 0
+ iInterface 2 Verifone USB to Printer
+ Endpoint Descriptor:
+ bLength 7
+ bDescriptorType 5
+ bEndpointAddress 0x81 EP 1 IN
+ bmAttributes 2
+ Transfer Type Bulk
+ Synch Type None
+ Usage Type Data
+ wMaxPacketSize 0x0040 1x 64 bytes
+ bInterval 0
+ Endpoint Descriptor:
+ bLength 7
+ bDescriptorType 5
+ bEndpointAddress 0x01 EP 1 OUT
+ bmAttributes 2
+ Transfer Type Bulk
+ Synch Type None
+ Usage Type Data
+ wMaxPacketSize 0x0040 1x 64 bytes
+ bInterval 0
+Device Status: 0x0000
+ (Bus Powered)
+
+Signed-off-by: Cameron Williams <cang1@live.co.uk>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/serial/cp210x.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index b3e60b3847941..bd0632e77d8b0 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
+ { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
+ { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
++ { USB_DEVICE(0x11CA, 0x0212) }, /* Verifone USB to Printer (UART, CP2102) */
+ { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
+ { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
+ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+--
+2.43.0
+
--- /dev/null
+From abd12ca52007f79aaae59752e0f828467bca9a6f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Feb 2024 11:47:29 +0100
+Subject: USB: serial: cp210x: add ID for MGP Instruments PDS100
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christian Häggström <christian.haggstrom@orexplore.com>
+
+[ Upstream commit a0d9d868491a362d421521499d98308c8e3a0398 ]
+
+The radiation meter has the text MGP Instruments PDS-100G or PDS-100GN
+produced by Mirion Technologies. Tested by forcing the driver
+association with
+
+ echo 10c4 863c > /sys/bus/usb-serial/drivers/cp210x/new_id
+
+and then setting the serial port in 115200 8N1 mode. The device
+announces ID_USB_VENDOR_ENC=Silicon\x20Labs and ID_USB_MODEL_ENC=PDS100
+
+Signed-off-by: Christian Häggström <christian.haggstrom@orexplore.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/serial/cp210x.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index bd0632e77d8b0..e9ee8da8cc296 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -144,6 +144,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
+ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
++ { USB_DEVICE(0x10C4, 0x863C) }, /* MGP Instruments PDS100 */
+ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ { USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
+--
+2.43.0
+
--- /dev/null
+From f79d90c697ee5a9256894121bef7879faf75ba53 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Mar 2024 08:46:14 +0900
+Subject: USB: serial: cp210x: add pid/vid for TDK NC0110013M and MM0110113M
+
+From: Toru Katagiri <Toru.Katagiri@tdk.com>
+
+[ Upstream commit b1a8da9ff1395c4879b4bd41e55733d944f3d613 ]
+
+TDK NC0110013M and MM0110113M have custom USB IDs for CP210x,
+so we need to add them to the driver.
+
+Signed-off-by: Toru Katagiri <Toru.Katagiri@tdk.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/serial/cp210x.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index e9ee8da8cc296..aa30288c8a8e0 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -56,6 +56,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
+ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
++ { USB_DEVICE(0x04BF, 0x1301) }, /* TDK Corporation NC0110013M - Network Controller */
++ { USB_DEVICE(0x04BF, 0x1303) }, /* TDK Corporation MM0110113M - i3 Micro Module */
+ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
+ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
+ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+--
+2.43.0
+
--- /dev/null
+From ba9972ee4b05054da9830fa518c84fa50d64ff7c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 11 Feb 2024 15:42:46 +0100
+Subject: USB: serial: ftdi_sio: add support for GMC Z216C Adapter IR-USB
+
+From: Daniel Vogelbacher <daniel@chaospixel.com>
+
+[ Upstream commit 3fb7bc4f3a98c48981318b87cf553c5f115fd5ca ]
+
+The GMC IR-USB adapter cable utilizes a FTDI FT232R chip.
+
+Add VID/PID for this adapter so it can be used as serial device via
+ftdi_sio.
+
+Signed-off-by: Daniel Vogelbacher <daniel@chaospixel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/serial/ftdi_sio.c | 2 ++
+ drivers/usb/serial/ftdi_sio_ids.h | 6 ++++++
+ 2 files changed, 8 insertions(+)
+
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index fe2173e37b061..248cbc9c48fd1 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1077,6 +1077,8 @@ static const struct usb_device_id id_table_combined[] = {
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++ /* GMC devices */
++ { USB_DEVICE(GMC_VID, GMC_Z216C_PID) },
+ { } /* Terminating entry */
+ };
+
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 21a2b5a25fc09..5ee60ba2a73cd 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1606,3 +1606,9 @@
+ #define UBLOX_VID 0x1546
+ #define UBLOX_C099F9P_ZED_PID 0x0502
+ #define UBLOX_C099F9P_ODIN_PID 0x0503
++
++/*
++ * GMC devices
++ */
++#define GMC_VID 0x1cd7
++#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */
+--
+2.43.0
+
--- /dev/null
+From 193d4b5f01f6d9f9963f65d6fb1e9475a334b41d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Jan 2024 18:49:17 +0100
+Subject: USB: serial: option: add MeiG Smart SLM320 product
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Aurélien Jacobs <aurel@gnuage.org>
+
+[ Upstream commit 46809c51565b83881aede6cdf3b0d25254966a41 ]
+
+Update the USB serial option driver to support MeiG Smart SLM320.
+
+ID 2dee:4d41 UNISOC UNISOC-8910
+
+T: Bus=01 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 9 Spd=480 MxCh= 0
+D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
+P: Vendor=2dee ProdID=4d41 Rev=00.00
+S: Manufacturer=UNISOC
+S: Product=UNISOC-8910
+C: #Ifs= 8 Cfg#= 1 Atr=e0 MxPwr=400mA
+I: If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I: If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I: If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I: If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I: If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=85(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I: If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=06(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I: If#= 6 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=07(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I: If#= 7 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=08(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=88(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+Tested successfully a PPP LTE connection using If#= 0.
+Not sure of the purpose of every other serial interfaces.
+
+Signed-off-by: Aurélien Jacobs <aurel@gnuage.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/serial/option.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c0a0cca65437f..1a3e5a9414f07 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -613,6 +613,11 @@ static void option_instat_callback(struct urb *urb);
+ /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
+ #define LUAT_PRODUCT_AIR720U 0x4e00
+
++/* MeiG Smart Technology products */
++#define MEIGSMART_VENDOR_ID 0x2dee
++/* MeiG Smart SLM320 based on UNISOC UIS8910 */
++#define MEIGSMART_PRODUCT_SLM320 0x4d41
++
+ /* Device flags */
+
+ /* Highest interface number which can be used with NCTRL() and RSVD() */
+@@ -2282,6 +2287,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+--
+2.43.0
+
--- /dev/null
+From 07d025ba28b9e5b77f095f2d5664a308a7ded124 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Mar 2024 02:58:01 +0000
+Subject: usb: typec: ucsi: Clean up UCSI_CABLE_PROP macros
+
+From: Jameson Thies <jthies@google.com>
+
+[ Upstream commit 4d0a5a9915793377c0fe1a8d78de6bcd92cea963 ]
+
+Clean up UCSI_CABLE_PROP macros by fixing a bitmask shifting error for
+plug type and updating the modal support macro for consistent naming.
+
+Fixes: 3cf657f07918 ("usb: typec: ucsi: Remove all bit-fields")
+Cc: stable@vger.kernel.org
+Reviewed-by: Benson Leung <bleung@chromium.org>
+Reviewed-by: Prashant Malani <pmalani@chromium.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Jameson Thies <jthies@google.com>
+Link: https://lore.kernel.org/r/20240305025804.1290919-2-jthies@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/typec/ucsi/ucsi.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index 60ce9fb6e7450..dbb10cb310d4c 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -220,12 +220,12 @@ struct ucsi_cable_property {
+ #define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0)
+ #define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1)
+ #define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2)
+-#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0))
++#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) (((_f_) & GENMASK(4, 3)) >> 3)
+ #define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
+ #define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
+ #define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
+ #define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
+-#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5)
++#define UCSI_CABLE_PROP_FLAG_MODE_SUPPORT BIT(5)
+ u8 latency;
+ } __packed;
+
+--
+2.43.0
+
--- /dev/null
+From 57f7ed9e28b34a5cb5843938f5166bca7734e5bf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Feb 2024 16:14:38 +0200
+Subject: usb: xhci: Add error handling in xhci_map_urb_for_dma
+
+From: Prashanth K <quic_prashk@quicinc.com>
+
+[ Upstream commit be95cc6d71dfd0cba66e3621c65413321b398052 ]
+
+Currently xhci_map_urb_for_dma() creates a temporary buffer and copies
+the SG list to the new linear buffer. But if the kzalloc_node() fails,
+then the following sg_pcopy_to_buffer() can lead to crash since it
+tries to memcpy to NULL pointer.
+
+So return -ENOMEM if kzalloc returns null pointer.
+
+Cc: stable@vger.kernel.org # 5.11
+Fixes: 2017a1e58472 ("usb: xhci: Use temporary buffer to consolidate SG")
+Signed-off-by: Prashanth K <quic_prashk@quicinc.com>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20240229141438.619372-10-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/host/xhci.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index c02ad4f76bb3c..565aba6b99860 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1334,6 +1334,8 @@ static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
+
+ temp = kzalloc_node(buf_len, GFP_ATOMIC,
+ dev_to_node(hcd->self.sysdev));
++ if (!temp)
++ return -ENOMEM;
+
+ if (usb_urb_dir_out(urb))
+ sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+--
+2.43.0
+
--- /dev/null
+From 0381b07b8f2cb80dc01d812c3f16ed4e8ea7db3e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 May 2023 08:44:28 -0700
+Subject: vfio/pci: Consolidate irq cleanup on MSI/MSI-X disable
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+[ Upstream commit a65f35cfd504e5135540939cffd4323083190b36 ]
+
+vfio_msi_disable() releases all previously allocated state
+associated with each interrupt before disabling MSI/MSI-X.
+
+vfio_msi_disable() iterates twice over the interrupt state:
+first directly with a for loop to do virqfd cleanup, followed
+by another for loop within vfio_msi_set_block() that removes
+the interrupt handler and its associated state using
+vfio_msi_set_vector_signal().
+
+Simplify interrupt cleanup by iterating over allocated interrupts
+once.
+
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Link: https://lore.kernel.org/r/837acb8cbe86a258a50da05e56a1f17c1a19abbe.1683740667.git.reinette.chatre@intel.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Stable-dep-of: fe9a7082684e ("vfio/pci: Disable auto-enable of exclusive INTx IRQ")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci_intrs.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index bffb0741518b9..6a9c6a143cc3a 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -426,10 +426,9 @@ static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
+ for (i = 0; i < vdev->num_ctx; i++) {
+ vfio_virqfd_disable(&vdev->ctx[i].unmask);
+ vfio_virqfd_disable(&vdev->ctx[i].mask);
++ vfio_msi_set_vector_signal(vdev, i, -1, msix);
+ }
+
+- vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+-
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ pci_free_irq_vectors(pdev);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+--
+2.43.0
+
--- /dev/null
+From 4450a484681c5c31687830485192c35625232427 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 16:05:22 -0700
+Subject: vfio/pci: Disable auto-enable of exclusive INTx IRQ
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+[ Upstream commit fe9a7082684eb059b925c535682e68c34d487d43 ]
+
+Currently for devices requiring masking at the irqchip for INTx, ie.
+devices without DisINTx support, the IRQ is enabled in request_irq()
+and subsequently disabled as necessary to align with the masked status
+flag. This presents a window where the interrupt could fire between
+these events, resulting in the IRQ incrementing the disable depth twice.
+This would be unrecoverable for a user since the masked flag prevents
+nested enables through vfio.
+
+Instead, invert the logic using IRQF_NO_AUTOEN such that exclusive INTx
+is never auto-enabled, then unmask as required.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 89e1f7d4c66d ("vfio: Add PCI device driver")
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Link: https://lore.kernel.org/r/20240308230557.805580-2-alex.williamson@redhat.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci_intrs.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index 6094679349d9c..e64f118c4156f 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -297,8 +297,15 @@ static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
+
+ ctx->trigger = trigger;
+
++ /*
++ * Devices without DisINTx support require an exclusive interrupt,
++ * IRQ masking is performed at the IRQ chip. The masked status is
++ * protected by vdev->irqlock. Setup the IRQ without auto-enable and
++ * unmask as necessary below under lock. DisINTx is unmodified by
++ * the IRQ configuration and may therefore use auto-enable.
++ */
+ if (!vdev->pci_2_3)
+- irqflags = 0;
++ irqflags = IRQF_NO_AUTOEN;
+
+ ret = request_irq(pdev->irq, vfio_intx_handler,
+ irqflags, ctx->name, vdev);
+@@ -309,13 +316,9 @@ static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
+ return ret;
+ }
+
+- /*
+- * INTx disable will stick across the new irq setup,
+- * disable_irq won't.
+- */
+ spin_lock_irqsave(&vdev->irqlock, flags);
+- if (!vdev->pci_2_3 && ctx->masked)
+- disable_irq_nosync(pdev->irq);
++ if (!vdev->pci_2_3 && !ctx->masked)
++ enable_irq(pdev->irq);
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 37638b96f3e41a704c63c891840cdeb4e02c66a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 16:05:23 -0700
+Subject: vfio/pci: Lock external INTx masking ops
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+[ Upstream commit 810cd4bb53456d0503cc4e7934e063835152c1b7 ]
+
+Mask operations through config space changes to DisINTx may race INTx
+configuration changes via ioctl. Create wrappers that add locking for
+paths outside of the core interrupt code.
+
+In particular, irq_type is updated holding igate, therefore testing
+is_intx() requires holding igate. For example clearing DisINTx from
+config space can otherwise race changes of the interrupt configuration.
+
+This aligns interfaces which may trigger the INTx eventfd into two
+camps, one side serialized by igate and the other only enabled while
+INTx is configured. A subsequent patch introduces synchronization for
+the latter flows.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 89e1f7d4c66d ("vfio: Add PCI device driver")
+Reported-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Link: https://lore.kernel.org/r/20240308230557.805580-3-alex.williamson@redhat.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci_intrs.c | 34 +++++++++++++++++++++++++------
+ 1 file changed, 28 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index e64f118c4156f..0deb51c820d2e 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -91,13 +91,15 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused)
+ }
+
+ /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
+-bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
++static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+ {
+ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_pci_irq_ctx *ctx;
+ unsigned long flags;
+ bool masked_changed = false;
+
++ lockdep_assert_held(&vdev->igate);
++
+ spin_lock_irqsave(&vdev->irqlock, flags);
+
+ /*
+@@ -135,6 +137,17 @@ bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+ return masked_changed;
+ }
+
++bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
++{
++ bool mask_changed;
++
++ mutex_lock(&vdev->igate);
++ mask_changed = __vfio_pci_intx_mask(vdev);
++ mutex_unlock(&vdev->igate);
++
++ return mask_changed;
++}
++
+ /*
+ * If this is triggered by an eventfd, we can't call eventfd_signal
+ * or else we'll deadlock on the eventfd wait queue. Return >0 when
+@@ -186,12 +199,21 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
+ return ret;
+ }
+
+-void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
++static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
+ {
++ lockdep_assert_held(&vdev->igate);
++
+ if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
+ vfio_send_intx_eventfd(vdev, NULL);
+ }
+
++void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
++{
++ mutex_lock(&vdev->igate);
++ __vfio_pci_intx_unmask(vdev);
++ mutex_unlock(&vdev->igate);
++}
++
+ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
+ {
+ struct vfio_pci_core_device *vdev = dev_id;
+@@ -537,11 +559,11 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+- vfio_pci_intx_unmask(vdev);
++ __vfio_pci_intx_unmask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t unmask = *(uint8_t *)data;
+ if (unmask)
+- vfio_pci_intx_unmask(vdev);
++ __vfio_pci_intx_unmask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
+ int32_t fd = *(int32_t *)data;
+@@ -568,11 +590,11 @@ static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+- vfio_pci_intx_mask(vdev);
++ __vfio_pci_intx_mask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t mask = *(uint8_t *)data;
+ if (mask)
+- vfio_pci_intx_mask(vdev);
++ __vfio_pci_intx_mask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ return -ENOTTY; /* XXX implement me */
+ }
+--
+2.43.0
+
--- /dev/null
+From bca808da62c6a87ef168554caa318c2801d19b70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 May 2023 08:44:30 -0700
+Subject: vfio/pci: Prepare for dynamic interrupt context storage
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+[ Upstream commit d977e0f7663961368f6442589e52d27484c2f5c2 ]
+
+Interrupt context storage is statically allocated at the time
+interrupts are allocated. Following allocation, the interrupt
+context is managed by directly accessing the elements of the
+array using the vector as index.
+
+It is possible to allocate additional MSI-X vectors after
+MSI-X has been enabled. Dynamic storage of interrupt context
+is needed to support adding new MSI-X vectors after initial
+allocation.
+
+Replace direct access of array elements with pointers to the
+array elements. Doing so reduces impact of moving to a new data
+structure. Move interactions with the array to helpers to
+mostly contain changes needed to transition to a dynamic
+data structure.
+
+No functional change intended.
+
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Link: https://lore.kernel.org/r/eab289693c8325ede9aba99380f8b8d5143980a4.1683740667.git.reinette.chatre@intel.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Stable-dep-of: fe9a7082684e ("vfio/pci: Disable auto-enable of exclusive INTx IRQ")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci_intrs.c | 215 +++++++++++++++++++++---------
+ 1 file changed, 149 insertions(+), 66 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index 258de57ef9564..6094679349d9c 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -48,6 +48,31 @@ static bool is_irq_none(struct vfio_pci_core_device *vdev)
+ vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
+ }
+
++static
++struct vfio_pci_irq_ctx *vfio_irq_ctx_get(struct vfio_pci_core_device *vdev,
++ unsigned long index)
++{
++ if (index >= vdev->num_ctx)
++ return NULL;
++ return &vdev->ctx[index];
++}
++
++static void vfio_irq_ctx_free_all(struct vfio_pci_core_device *vdev)
++{
++ kfree(vdev->ctx);
++}
++
++static int vfio_irq_ctx_alloc_num(struct vfio_pci_core_device *vdev,
++ unsigned long num)
++{
++ vdev->ctx = kcalloc(num, sizeof(struct vfio_pci_irq_ctx),
++ GFP_KERNEL_ACCOUNT);
++ if (!vdev->ctx)
++ return -ENOMEM;
++
++ return 0;
++}
++
+ /*
+ * INTx
+ */
+@@ -55,14 +80,21 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused)
+ {
+ struct vfio_pci_core_device *vdev = opaque;
+
+- if (likely(is_intx(vdev) && !vdev->virq_disabled))
+- eventfd_signal(vdev->ctx[0].trigger, 1);
++ if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
++ struct vfio_pci_irq_ctx *ctx;
++
++ ctx = vfio_irq_ctx_get(vdev, 0);
++ if (WARN_ON_ONCE(!ctx))
++ return;
++ eventfd_signal(ctx->trigger, 1);
++ }
+ }
+
+ /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
+ bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+ {
+ struct pci_dev *pdev = vdev->pdev;
++ struct vfio_pci_irq_ctx *ctx;
+ unsigned long flags;
+ bool masked_changed = false;
+
+@@ -77,7 +109,14 @@ bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+ if (unlikely(!is_intx(vdev))) {
+ if (vdev->pci_2_3)
+ pci_intx(pdev, 0);
+- } else if (!vdev->ctx[0].masked) {
++ goto out_unlock;
++ }
++
++ ctx = vfio_irq_ctx_get(vdev, 0);
++ if (WARN_ON_ONCE(!ctx))
++ goto out_unlock;
++
++ if (!ctx->masked) {
+ /*
+ * Can't use check_and_mask here because we always want to
+ * mask, not just when something is pending.
+@@ -87,10 +126,11 @@ bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+ else
+ disable_irq_nosync(pdev->irq);
+
+- vdev->ctx[0].masked = true;
++ ctx->masked = true;
+ masked_changed = true;
+ }
+
++out_unlock:
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+ return masked_changed;
+ }
+@@ -105,6 +145,7 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
+ {
+ struct vfio_pci_core_device *vdev = opaque;
+ struct pci_dev *pdev = vdev->pdev;
++ struct vfio_pci_irq_ctx *ctx;
+ unsigned long flags;
+ int ret = 0;
+
+@@ -117,7 +158,14 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
+ if (unlikely(!is_intx(vdev))) {
+ if (vdev->pci_2_3)
+ pci_intx(pdev, 1);
+- } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
++ goto out_unlock;
++ }
++
++ ctx = vfio_irq_ctx_get(vdev, 0);
++ if (WARN_ON_ONCE(!ctx))
++ goto out_unlock;
++
++ if (ctx->masked && !vdev->virq_disabled) {
+ /*
+ * A pending interrupt here would immediately trigger,
+ * but we can avoid that overhead by just re-sending
+@@ -129,9 +177,10 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
+ } else
+ enable_irq(pdev->irq);
+
+- vdev->ctx[0].masked = (ret > 0);
++ ctx->masked = (ret > 0);
+ }
+
++out_unlock:
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+
+ return ret;
+@@ -146,18 +195,23 @@ void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
+ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
+ {
+ struct vfio_pci_core_device *vdev = dev_id;
++ struct vfio_pci_irq_ctx *ctx;
+ unsigned long flags;
+ int ret = IRQ_NONE;
+
++ ctx = vfio_irq_ctx_get(vdev, 0);
++ if (WARN_ON_ONCE(!ctx))
++ return ret;
++
+ spin_lock_irqsave(&vdev->irqlock, flags);
+
+ if (!vdev->pci_2_3) {
+ disable_irq_nosync(vdev->pdev->irq);
+- vdev->ctx[0].masked = true;
++ ctx->masked = true;
+ ret = IRQ_HANDLED;
+- } else if (!vdev->ctx[0].masked && /* may be shared */
++ } else if (!ctx->masked && /* may be shared */
+ pci_check_and_mask_intx(vdev->pdev)) {
+- vdev->ctx[0].masked = true;
++ ctx->masked = true;
+ ret = IRQ_HANDLED;
+ }
+
+@@ -171,15 +225,24 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
+
+ static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
+ {
++ struct vfio_pci_irq_ctx *ctx;
++ int ret;
++
+ if (!is_irq_none(vdev))
+ return -EINVAL;
+
+ if (!vdev->pdev->irq)
+ return -ENODEV;
+
+- vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL_ACCOUNT);
+- if (!vdev->ctx)
+- return -ENOMEM;
++ ret = vfio_irq_ctx_alloc_num(vdev, 1);
++ if (ret)
++ return ret;
++
++ ctx = vfio_irq_ctx_get(vdev, 0);
++ if (!ctx) {
++ vfio_irq_ctx_free_all(vdev);
++ return -EINVAL;
++ }
+
+ vdev->num_ctx = 1;
+
+@@ -189,9 +252,9 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
+ * here, non-PCI-2.3 devices will have to wait until the
+ * interrupt is enabled.
+ */
+- vdev->ctx[0].masked = vdev->virq_disabled;
++ ctx->masked = vdev->virq_disabled;
+ if (vdev->pci_2_3)
+- pci_intx(vdev->pdev, !vdev->ctx[0].masked);
++ pci_intx(vdev->pdev, !ctx->masked);
+
+ vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
+
+@@ -202,41 +265,46 @@ static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
+ {
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned long irqflags = IRQF_SHARED;
++ struct vfio_pci_irq_ctx *ctx;
+ struct eventfd_ctx *trigger;
+ unsigned long flags;
+ int ret;
+
+- if (vdev->ctx[0].trigger) {
++ ctx = vfio_irq_ctx_get(vdev, 0);
++ if (WARN_ON_ONCE(!ctx))
++ return -EINVAL;
++
++ if (ctx->trigger) {
+ free_irq(pdev->irq, vdev);
+- kfree(vdev->ctx[0].name);
+- eventfd_ctx_put(vdev->ctx[0].trigger);
+- vdev->ctx[0].trigger = NULL;
++ kfree(ctx->name);
++ eventfd_ctx_put(ctx->trigger);
++ ctx->trigger = NULL;
+ }
+
+ if (fd < 0) /* Disable only */
+ return 0;
+
+- vdev->ctx[0].name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)",
+- pci_name(pdev));
+- if (!vdev->ctx[0].name)
++ ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)",
++ pci_name(pdev));
++ if (!ctx->name)
+ return -ENOMEM;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+- kfree(vdev->ctx[0].name);
++ kfree(ctx->name);
+ return PTR_ERR(trigger);
+ }
+
+- vdev->ctx[0].trigger = trigger;
++ ctx->trigger = trigger;
+
+ if (!vdev->pci_2_3)
+ irqflags = 0;
+
+ ret = request_irq(pdev->irq, vfio_intx_handler,
+- irqflags, vdev->ctx[0].name, vdev);
++ irqflags, ctx->name, vdev);
+ if (ret) {
+- vdev->ctx[0].trigger = NULL;
+- kfree(vdev->ctx[0].name);
++ ctx->trigger = NULL;
++ kfree(ctx->name);
+ eventfd_ctx_put(trigger);
+ return ret;
+ }
+@@ -246,7 +314,7 @@ static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
+ * disable_irq won't.
+ */
+ spin_lock_irqsave(&vdev->irqlock, flags);
+- if (!vdev->pci_2_3 && vdev->ctx[0].masked)
++ if (!vdev->pci_2_3 && ctx->masked)
+ disable_irq_nosync(pdev->irq);
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+
+@@ -255,12 +323,18 @@ static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
+
+ static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
+ {
+- vfio_virqfd_disable(&vdev->ctx[0].unmask);
+- vfio_virqfd_disable(&vdev->ctx[0].mask);
++ struct vfio_pci_irq_ctx *ctx;
++
++ ctx = vfio_irq_ctx_get(vdev, 0);
++ WARN_ON_ONCE(!ctx);
++ if (ctx) {
++ vfio_virqfd_disable(&ctx->unmask);
++ vfio_virqfd_disable(&ctx->mask);
++ }
+ vfio_intx_set_signal(vdev, -1);
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ vdev->num_ctx = 0;
+- kfree(vdev->ctx);
++ vfio_irq_ctx_free_all(vdev);
+ }
+
+ /*
+@@ -284,10 +358,9 @@ static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msi
+ if (!is_irq_none(vdev))
+ return -EINVAL;
+
+- vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx),
+- GFP_KERNEL_ACCOUNT);
+- if (!vdev->ctx)
+- return -ENOMEM;
++ ret = vfio_irq_ctx_alloc_num(vdev, nvec);
++ if (ret)
++ return ret;
+
+ /* return the number of supported vectors if we can't get all: */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+@@ -296,7 +369,7 @@ static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msi
+ if (ret > 0)
+ pci_free_irq_vectors(pdev);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+- kfree(vdev->ctx);
++ vfio_irq_ctx_free_all(vdev);
+ return ret;
+ }
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+@@ -320,6 +393,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
+ unsigned int vector, int fd, bool msix)
+ {
+ struct pci_dev *pdev = vdev->pdev;
++ struct vfio_pci_irq_ctx *ctx;
+ struct eventfd_ctx *trigger;
+ int irq, ret;
+ u16 cmd;
+@@ -327,33 +401,33 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
+ if (vector >= vdev->num_ctx)
+ return -EINVAL;
+
++ ctx = vfio_irq_ctx_get(vdev, vector);
++ if (!ctx)
++ return -EINVAL;
+ irq = pci_irq_vector(pdev, vector);
+
+- if (vdev->ctx[vector].trigger) {
+- irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
++ if (ctx->trigger) {
++ irq_bypass_unregister_producer(&ctx->producer);
+
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+- free_irq(irq, vdev->ctx[vector].trigger);
++ free_irq(irq, ctx->trigger);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+-
+- kfree(vdev->ctx[vector].name);
+- eventfd_ctx_put(vdev->ctx[vector].trigger);
+- vdev->ctx[vector].trigger = NULL;
++ kfree(ctx->name);
++ eventfd_ctx_put(ctx->trigger);
++ ctx->trigger = NULL;
+ }
+
+ if (fd < 0)
+ return 0;
+
+- vdev->ctx[vector].name = kasprintf(GFP_KERNEL_ACCOUNT,
+- "vfio-msi%s[%d](%s)",
+- msix ? "x" : "", vector,
+- pci_name(pdev));
+- if (!vdev->ctx[vector].name)
++ ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)",
++ msix ? "x" : "", vector, pci_name(pdev));
++ if (!ctx->name)
+ return -ENOMEM;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+- kfree(vdev->ctx[vector].name);
++ kfree(ctx->name);
+ return PTR_ERR(trigger);
+ }
+
+@@ -372,26 +446,25 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
+ pci_write_msi_msg(irq, &msg);
+ }
+
+- ret = request_irq(irq, vfio_msihandler, 0,
+- vdev->ctx[vector].name, trigger);
++ ret = request_irq(irq, vfio_msihandler, 0, ctx->name, trigger);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+ if (ret) {
+- kfree(vdev->ctx[vector].name);
++ kfree(ctx->name);
+ eventfd_ctx_put(trigger);
+ return ret;
+ }
+
+- vdev->ctx[vector].producer.token = trigger;
+- vdev->ctx[vector].producer.irq = irq;
+- ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
++ ctx->producer.token = trigger;
++ ctx->producer.irq = irq;
++ ret = irq_bypass_register_producer(&ctx->producer);
+ if (unlikely(ret)) {
+ dev_info(&pdev->dev,
+ "irq bypass producer (token %p) registration fails: %d\n",
+- vdev->ctx[vector].producer.token, ret);
++ ctx->producer.token, ret);
+
+- vdev->ctx[vector].producer.token = NULL;
++ ctx->producer.token = NULL;
+ }
+- vdev->ctx[vector].trigger = trigger;
++ ctx->trigger = trigger;
+
+ return 0;
+ }
+@@ -421,13 +494,17 @@ static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
+ static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
+ {
+ struct pci_dev *pdev = vdev->pdev;
++ struct vfio_pci_irq_ctx *ctx;
+ unsigned int i;
+ u16 cmd;
+
+ for (i = 0; i < vdev->num_ctx; i++) {
+- vfio_virqfd_disable(&vdev->ctx[i].unmask);
+- vfio_virqfd_disable(&vdev->ctx[i].mask);
+- vfio_msi_set_vector_signal(vdev, i, -1, msix);
++ ctx = vfio_irq_ctx_get(vdev, i);
++ if (ctx) {
++ vfio_virqfd_disable(&ctx->unmask);
++ vfio_virqfd_disable(&ctx->mask);
++ vfio_msi_set_vector_signal(vdev, i, -1, msix);
++ }
+ }
+
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+@@ -443,7 +520,7 @@ static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
+
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ vdev->num_ctx = 0;
+- kfree(vdev->ctx);
++ vfio_irq_ctx_free_all(vdev);
+ }
+
+ /*
+@@ -463,14 +540,18 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
+ if (unmask)
+ vfio_pci_intx_unmask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
++ struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
+ int32_t fd = *(int32_t *)data;
++
++ if (WARN_ON_ONCE(!ctx))
++ return -EINVAL;
+ if (fd >= 0)
+ return vfio_virqfd_enable((void *) vdev,
+ vfio_pci_intx_unmask_handler,
+ vfio_send_intx_eventfd, NULL,
+- &vdev->ctx[0].unmask, fd);
++ &ctx->unmask, fd);
+
+- vfio_virqfd_disable(&vdev->ctx[0].unmask);
++ vfio_virqfd_disable(&ctx->unmask);
+ }
+
+ return 0;
+@@ -543,6 +624,7 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+ {
++ struct vfio_pci_irq_ctx *ctx;
+ unsigned int i;
+ bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
+
+@@ -577,14 +659,15 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
+ return -EINVAL;
+
+ for (i = start; i < start + count; i++) {
+- if (!vdev->ctx[i].trigger)
++ ctx = vfio_irq_ctx_get(vdev, i);
++ if (!ctx || !ctx->trigger)
+ continue;
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+- eventfd_signal(vdev->ctx[i].trigger, 1);
++ eventfd_signal(ctx->trigger, 1);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t *bools = data;
+ if (bools[i - start])
+- eventfd_signal(vdev->ctx[i].trigger, 1);
++ eventfd_signal(ctx->trigger, 1);
+ }
+ }
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 415a9f2ffc00d04958f15833411e9be9f0e29a3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 May 2023 08:44:29 -0700
+Subject: vfio/pci: Remove negative check on unsigned vector
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+[ Upstream commit 6578ed85c7d63693669bfede01e0237d0e24211a ]
+
+User space provides the vector as an unsigned int that is checked
+early for validity (vfio_set_irqs_validate_and_prepare()).
+
+A later negative check of the provided vector is not necessary.
+
+Remove the negative check and ensure the type used
+for the vector is consistent as an unsigned int.
+
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Link: https://lore.kernel.org/r/28521e1b0b091849952b0ecb8c118729fc8cdc4f.1683740667.git.reinette.chatre@intel.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Stable-dep-of: fe9a7082684e ("vfio/pci: Disable auto-enable of exclusive INTx IRQ")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci_intrs.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index 6a9c6a143cc3a..258de57ef9564 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -317,14 +317,14 @@ static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msi
+ }
+
+ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
+- int vector, int fd, bool msix)
++ unsigned int vector, int fd, bool msix)
+ {
+ struct pci_dev *pdev = vdev->pdev;
+ struct eventfd_ctx *trigger;
+ int irq, ret;
+ u16 cmd;
+
+- if (vector < 0 || vector >= vdev->num_ctx)
++ if (vector >= vdev->num_ctx)
+ return -EINVAL;
+
+ irq = pci_irq_vector(pdev, vector);
+@@ -399,7 +399,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
+ static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
+ unsigned count, int32_t *fds, bool msix)
+ {
+- int i, j, ret = 0;
++ unsigned int i, j;
++ int ret = 0;
+
+ if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
+ return -EINVAL;
+@@ -410,8 +411,8 @@ static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
+ }
+
+ if (ret) {
+- for (--j; j >= (int)start; j--)
+- vfio_msi_set_vector_signal(vdev, j, -1, msix);
++ for (i = start; i < j; i++)
++ vfio_msi_set_vector_signal(vdev, i, -1, msix);
+ }
+
+ return ret;
+@@ -420,7 +421,7 @@ static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
+ static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
+ {
+ struct pci_dev *pdev = vdev->pdev;
+- int i;
++ unsigned int i;
+ u16 cmd;
+
+ for (i = 0; i < vdev->num_ctx; i++) {
+@@ -542,7 +543,7 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+ {
+- int i;
++ unsigned int i;
+ bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
+
+ if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
+--
+2.43.0
+
--- /dev/null
+From b2c84706811243be32b1a5728dd963fa7ead39a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 16:05:26 -0700
+Subject: vfio/platform: Disable virqfds on cleanup
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+[ Upstream commit fcdc0d3d40bc26c105acf8467f7d9018970944ae ]
+
+irqfds for mask and unmask that are not specifically disabled by the
+user are leaked. Remove any irqfds during cleanup
+
+Cc: Eric Auger <eric.auger@redhat.com>
+Cc: <stable@vger.kernel.org>
+Fixes: a7fa7c77cf15 ("vfio/platform: implement IRQ masking/unmasking via an eventfd")
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Link: https://lore.kernel.org/r/20240308230557.805580-6-alex.williamson@redhat.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/platform/vfio_platform_irq.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c
+index c5b09ec0a3c98..f2893f2fcaabd 100644
+--- a/drivers/vfio/platform/vfio_platform_irq.c
++++ b/drivers/vfio/platform/vfio_platform_irq.c
+@@ -321,8 +321,11 @@ void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
+ {
+ int i;
+
+- for (i = 0; i < vdev->num_irqs; i++)
++ for (i = 0; i < vdev->num_irqs; i++) {
++ vfio_virqfd_disable(&vdev->irqs[i].mask);
++ vfio_virqfd_disable(&vdev->irqs[i].unmask);
+ vfio_set_trigger(vdev, i, -1, NULL);
++ }
+
+ vdev->num_irqs = 0;
+ kfree(vdev->irqs);
+--
+2.43.0
+
--- /dev/null
+From eece737f251619b048dacaecc1f21d6b1cf447b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 8 Jan 2023 17:44:24 +0200
+Subject: vfio: Use GFP_KERNEL_ACCOUNT for userspace persistent allocations
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+[ Upstream commit 0886196ca8810c5b1f5097b71c4bc0df40b10208 ]
+
+Use GFP_KERNEL_ACCOUNT for userspace persistent allocations.
+
+The GFP_KERNEL_ACCOUNT option lets the memory allocator know that this
+is untrusted allocation triggered from userspace and should be a subject
+of kmem accounting, and as such it is controlled by the cgroup
+mechanism.
+
+The way to find the relevant allocations was for example to look at the
+close_device function and trace back all the kfrees to their
+allocations.
+
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Link: https://lore.kernel.org/r/20230108154427.32609-4-yishaih@nvidia.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Stable-dep-of: fe9a7082684e ("vfio/pci: Disable auto-enable of exclusive INTx IRQ")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/container.c | 2 +-
+ drivers/vfio/pci/vfio_pci_config.c | 6 +++---
+ drivers/vfio/pci/vfio_pci_core.c | 7 ++++---
+ drivers/vfio/pci/vfio_pci_igd.c | 2 +-
+ drivers/vfio/pci/vfio_pci_intrs.c | 10 ++++++----
+ drivers/vfio/pci/vfio_pci_rdwr.c | 2 +-
+ drivers/vfio/virqfd.c | 2 +-
+ 7 files changed, 17 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/vfio/container.c b/drivers/vfio/container.c
+index d74164abbf401..ab9d8e3481f75 100644
+--- a/drivers/vfio/container.c
++++ b/drivers/vfio/container.c
+@@ -366,7 +366,7 @@ static int vfio_fops_open(struct inode *inode, struct file *filep)
+ {
+ struct vfio_container *container;
+
+- container = kzalloc(sizeof(*container), GFP_KERNEL);
++ container = kzalloc(sizeof(*container), GFP_KERNEL_ACCOUNT);
+ if (!container)
+ return -ENOMEM;
+
+diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
+index 4a350421c5f62..523e0144c86fa 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -1244,7 +1244,7 @@ static int vfio_msi_cap_len(struct vfio_pci_core_device *vdev, u8 pos)
+ if (vdev->msi_perm)
+ return len;
+
+- vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL);
++ vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL_ACCOUNT);
+ if (!vdev->msi_perm)
+ return -ENOMEM;
+
+@@ -1731,11 +1731,11 @@ int vfio_config_init(struct vfio_pci_core_device *vdev)
+ * no requirements on the length of a capability, so the gap between
+ * capabilities needs byte granularity.
+ */
+- map = kmalloc(pdev->cfg_size, GFP_KERNEL);
++ map = kmalloc(pdev->cfg_size, GFP_KERNEL_ACCOUNT);
+ if (!map)
+ return -ENOMEM;
+
+- vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL);
++ vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL_ACCOUNT);
+ if (!vconfig) {
+ kfree(map);
+ return -ENOMEM;
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index e030c2120183e..f357fd157e1ed 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -141,7 +141,8 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_core_device *vdev)
+ * of the exclusive page in case that hot-add
+ * device's bar is assigned into it.
+ */
+- dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
++ dummy_res =
++ kzalloc(sizeof(*dummy_res), GFP_KERNEL_ACCOUNT);
+ if (dummy_res == NULL)
+ goto no_mmap;
+
+@@ -856,7 +857,7 @@ int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
+
+ region = krealloc(vdev->region,
+ (vdev->num_regions + 1) * sizeof(*region),
+- GFP_KERNEL);
++ GFP_KERNEL_ACCOUNT);
+ if (!region)
+ return -ENOMEM;
+
+@@ -1637,7 +1638,7 @@ static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
+ {
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+- mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
++ mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT);
+ if (!mmap_vma)
+ return -ENOMEM;
+
+diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
+index 5e6ca59269548..dd70e2431bd74 100644
+--- a/drivers/vfio/pci/vfio_pci_igd.c
++++ b/drivers/vfio/pci/vfio_pci_igd.c
+@@ -180,7 +180,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
+ if (!addr || !(~addr))
+ return -ENODEV;
+
+- opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL);
++ opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL_ACCOUNT);
+ if (!opregionvbt)
+ return -ENOMEM;
+
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index 40c3d7cf163f6..bffb0741518b9 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -177,7 +177,7 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
+ if (!vdev->pdev->irq)
+ return -ENODEV;
+
+- vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
++ vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL_ACCOUNT);
+ if (!vdev->ctx)
+ return -ENOMEM;
+
+@@ -216,7 +216,7 @@ static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
+ if (fd < 0) /* Disable only */
+ return 0;
+
+- vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
++ vdev->ctx[0].name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)",
+ pci_name(pdev));
+ if (!vdev->ctx[0].name)
+ return -ENOMEM;
+@@ -284,7 +284,8 @@ static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msi
+ if (!is_irq_none(vdev))
+ return -EINVAL;
+
+- vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
++ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx),
++ GFP_KERNEL_ACCOUNT);
+ if (!vdev->ctx)
+ return -ENOMEM;
+
+@@ -343,7 +344,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
+ if (fd < 0)
+ return 0;
+
+- vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
++ vdev->ctx[vector].name = kasprintf(GFP_KERNEL_ACCOUNT,
++ "vfio-msi%s[%d](%s)",
+ msix ? "x" : "", vector,
+ pci_name(pdev));
+ if (!vdev->ctx[vector].name)
+diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
+index e352a033b4aef..e27de61ac9fe7 100644
+--- a/drivers/vfio/pci/vfio_pci_rdwr.c
++++ b/drivers/vfio/pci/vfio_pci_rdwr.c
+@@ -470,7 +470,7 @@ int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
+ goto out_unlock;
+ }
+
+- ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL);
++ ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL_ACCOUNT);
+ if (!ioeventfd) {
+ ret = -ENOMEM;
+ goto out_unlock;
+diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
+index 414e98d82b02e..a928c68df4763 100644
+--- a/drivers/vfio/virqfd.c
++++ b/drivers/vfio/virqfd.c
+@@ -115,7 +115,7 @@ int vfio_virqfd_enable(void *opaque,
+ int ret = 0;
+ __poll_t events;
+
+- virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
++ virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL_ACCOUNT);
+ if (!virqfd)
+ return -ENOMEM;
+
+--
+2.43.0
+
--- /dev/null
+From dbdf22bf577a54d736b41aeefef811344d0c496b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 7 Jan 2024 08:25:04 +0100
+Subject: wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach
+
+From: Zheng Wang <zyytlz.wz@163.com>
+
+[ Upstream commit 0f7352557a35ab7888bc7831411ec8a3cbe20d78 ]
+
+This is the candidate patch of CVE-2023-47233 :
+https://nvd.nist.gov/vuln/detail/CVE-2023-47233
+
+In brcm80211 driver,it starts with the following invoking chain
+to start init a timeout worker:
+
+->brcmf_usb_probe
+ ->brcmf_usb_probe_cb
+ ->brcmf_attach
+ ->brcmf_bus_started
+ ->brcmf_cfg80211_attach
+ ->wl_init_priv
+ ->brcmf_init_escan
+ ->INIT_WORK(&cfg->escan_timeout_work,
+ brcmf_cfg80211_escan_timeout_worker);
+
+If we disconnect the USB by hotplug, it will call
+brcmf_usb_disconnect to make cleanup. The invoking chain is :
+
+brcmf_usb_disconnect
+ ->brcmf_usb_disconnect_cb
+ ->brcmf_detach
+ ->brcmf_cfg80211_detach
+ ->kfree(cfg);
+
+While the timeout woker may still be running. This will cause
+a use-after-free bug on cfg in brcmf_cfg80211_escan_timeout_worker.
+
+Fix it by deleting the timer and canceling the worker in
+brcmf_cfg80211_detach.
+
+Fixes: e756af5b30b0 ("brcmfmac: add e-scan support.")
+Signed-off-by: Zheng Wang <zyytlz.wz@163.com>
+Cc: stable@vger.kernel.org
+[arend.vanspriel@broadcom.com: keep timer delete as is and cancel work just before free]
+Signed-off-by: Arend van Spriel <arend.vanspriel@broadcom.com>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://msgid.link/20240107072504.392713-1-arend.vanspriel@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index ad5a8d61d9385..24a3d5a593f15 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -791,8 +791,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+ scan_request = cfg->scan_request;
+ cfg->scan_request = NULL;
+
+- if (timer_pending(&cfg->escan_timeout))
+- del_timer_sync(&cfg->escan_timeout);
++ timer_delete_sync(&cfg->escan_timeout);
+
+ if (fw_abort) {
+ /* Do a scan abort to stop the driver's scan engine */
+@@ -7805,6 +7804,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
+ brcmf_btcoex_detach(cfg);
+ wiphy_unregister(cfg->wiphy);
+ wl_deinit_priv(cfg);
++ cancel_work_sync(&cfg->escan_timeout_work);
+ brcmf_free_wiphy(cfg->wiphy);
+ kfree(cfg);
+ }
+--
+2.43.0
+
--- /dev/null
+From 620d4d2e3e1cba373a6b63fd639717967a732ace Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Mar 2024 16:49:10 -0600
+Subject: wireguard: netlink: access device through ctx instead of peer
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+[ Upstream commit 71cbd32e3db82ea4a74e3ef9aeeaa6971969c86f ]
+
+The previous commit fixed a bug that led to a NULL peer->device being
+dereferenced. It's actually easier and faster performance-wise to
+instead get the device from ctx->wg. This semantically makes more sense
+too, since ctx->wg->peer_allowedips.seq is compared with
+ctx->allowedips_seq, basing them both in ctx. This also acts as a
+defence in depth provision against freed peers.
+
+Cc: stable@vger.kernel.org
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireguard/netlink.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
+index 81eef56773a23..81b716e6612e2 100644
+--- a/drivers/net/wireguard/netlink.c
++++ b/drivers/net/wireguard/netlink.c
+@@ -164,8 +164,8 @@ get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx)
+ if (!allowedips_node)
+ goto no_allowedips;
+ if (!ctx->allowedips_seq)
+- ctx->allowedips_seq = peer->device->peer_allowedips.seq;
+- else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq)
++ ctx->allowedips_seq = ctx->wg->peer_allowedips.seq;
++ else if (ctx->allowedips_seq != ctx->wg->peer_allowedips.seq)
+ goto no_allowedips;
+
+ allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
+--
+2.43.0
+
--- /dev/null
+From e516024aaf3e3e93e6ac56b2e7ac71b81812ac01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Mar 2024 16:49:09 -0600
+Subject: wireguard: netlink: check for dangling peer via is_dead instead of
+ empty list
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+[ Upstream commit 55b6c738673871c9b0edae05d0c97995c1ff08c4 ]
+
+If all peers are removed via wg_peer_remove_all(), rather than setting
+peer_list to empty, the peer is added to a temporary list with a head on
+the stack of wg_peer_remove_all(). If a netlink dump is resumed and the
+cursored peer is one that has been removed via wg_peer_remove_all(), it
+will iterate from that peer and then attempt to dump freed peers.
+
+Fix this by instead checking peer->is_dead, which was explictly created
+for this purpose. Also move up the device_update_lock lockdep assertion,
+since reading is_dead relies on that.
+
+It can be reproduced by a small script like:
+
+ echo "Setting config..."
+ ip link add dev wg0 type wireguard
+ wg setconf wg0 /big-config
+ (
+ while true; do
+ echo "Showing config..."
+ wg showconf wg0 > /dev/null
+ done
+ ) &
+ sleep 4
+ wg setconf wg0 <(printf "[Peer]\nPublicKey=$(wg genkey)\n")
+
+Resulting in:
+
+ BUG: KASAN: slab-use-after-free in __lock_acquire+0x182a/0x1b20
+ Read of size 8 at addr ffff88811956ec70 by task wg/59
+ CPU: 2 PID: 59 Comm: wg Not tainted 6.8.0-rc2-debug+ #5
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x47/0x70
+ print_address_description.constprop.0+0x2c/0x380
+ print_report+0xab/0x250
+ kasan_report+0xba/0xf0
+ __lock_acquire+0x182a/0x1b20
+ lock_acquire+0x191/0x4b0
+ down_read+0x80/0x440
+ get_peer+0x140/0xcb0
+ wg_get_device_dump+0x471/0x1130
+
+Cc: stable@vger.kernel.org
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Reported-by: Lillian Berry <lillian@star-ark.net>
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireguard/netlink.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
+index 6d1bd9f52d02a..81eef56773a23 100644
+--- a/drivers/net/wireguard/netlink.c
++++ b/drivers/net/wireguard/netlink.c
+@@ -255,17 +255,17 @@ static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ if (!peers_nest)
+ goto out;
+ ret = 0;
+- /* If the last cursor was removed via list_del_init in peer_remove, then
++ lockdep_assert_held(&wg->device_update_lock);
++ /* If the last cursor was removed in peer_remove or peer_remove_all, then
+ * we just treat this the same as there being no more peers left. The
+ * reason is that seq_nr should indicate to userspace that this isn't a
+ * coherent dump anyway, so they'll try again.
+ */
+ if (list_empty(&wg->peer_list) ||
+- (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) {
++ (ctx->next_peer && ctx->next_peer->is_dead)) {
+ nla_nest_cancel(skb, peers_nest);
+ goto out;
+ }
+- lockdep_assert_held(&wg->device_update_lock);
+ peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list);
+ list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
+ if (get_peer(peer, skb, ctx)) {
+--
+2.43.0
+
--- /dev/null
+From ff2d5f9b40aed7d6bf607ddd970a02e764976e87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Mar 2024 22:42:27 +0100
+Subject: x86/CPU/AMD: Update the Zenbleed microcode revisions
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+[ Upstream commit 5c84b051bd4e777cf37aaff983277e58c99618d5 ]
+
+Update them to the correct revision numbers.
+
+Fixes: 522b1d69219d ("x86/cpu/amd: Add a Zenbleed fix")
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: <stable@kernel.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/cpu/amd.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index c1d09c8844d67..425092806f8fe 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -997,11 +997,11 @@ static bool cpu_has_zenbleed_microcode(void)
+ u32 good_rev = 0;
+
+ switch (boot_cpu_data.x86_model) {
+- case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
+- case 0x60 ... 0x67: good_rev = 0x0860010b; break;
+- case 0x68 ... 0x6f: good_rev = 0x08608105; break;
+- case 0x70 ... 0x7f: good_rev = 0x08701032; break;
+- case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
++ case 0x30 ... 0x3f: good_rev = 0x0830107b; break;
++ case 0x60 ... 0x67: good_rev = 0x0860010c; break;
++ case 0x68 ... 0x6f: good_rev = 0x08608107; break;
++ case 0x70 ... 0x7f: good_rev = 0x08701033; break;
++ case 0xa0 ... 0xaf: good_rev = 0x08a00009; break;
+
+ default:
+ return false;
+--
+2.43.0
+
--- /dev/null
+From a499d873987684e09e589a23d1b98bddb7b5134d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Mar 2024 14:26:56 +0000
+Subject: x86/pm: Work around false positive kmemleak report in
+ msr_build_context()
+
+From: Anton Altaparmakov <anton@tuxera.com>
+
+[ Upstream commit e3f269ed0accbb22aa8f25d2daffa23c3fccd407 ]
+
+Since:
+
+ 7ee18d677989 ("x86/power: Make restore_processor_context() sane")
+
+kmemleak reports this issue:
+
+ unreferenced object 0xf68241e0 (size 32):
+ comm "swapper/0", pid 1, jiffies 4294668610 (age 68.432s)
+ hex dump (first 32 bytes):
+ 00 cc cc cc 29 10 01 c0 00 00 00 00 00 00 00 00 ....)...........
+ 00 42 82 f6 cc cc cc cc cc cc cc cc cc cc cc cc .B..............
+ backtrace:
+ [<461c1d50>] __kmem_cache_alloc_node+0x106/0x260
+ [<ea65e13b>] __kmalloc+0x54/0x160
+ [<c3858cd2>] msr_build_context.constprop.0+0x35/0x100
+ [<46635aff>] pm_check_save_msr+0x63/0x80
+ [<6b6bb938>] do_one_initcall+0x41/0x1f0
+ [<3f3add60>] kernel_init_freeable+0x199/0x1e8
+ [<3b538fde>] kernel_init+0x1a/0x110
+ [<938ae2b2>] ret_from_fork+0x1c/0x28
+
+Which is a false positive.
+
+Reproducer:
+
+ - Run rsync of whole kernel tree (multiple times if needed).
+ - start a kmemleak scan
+ - Note this is just an example: a lot of our internal tests hit these.
+
+The root cause is similar to the fix in:
+
+ b0b592cf0836 x86/pm: Fix false positive kmemleak report in msr_build_context()
+
+ie. the alignment within the packed struct saved_context
+which has everything unaligned as there is only "u16 gs;" at start of
+struct where in the past there were four u16 there thus aligning
+everything afterwards. The issue is with the fact that Kmemleak only
+searches for pointers that are aligned (see how pointers are scanned in
+kmemleak.c) so when the struct members are not aligned it doesn't see
+them.
+
+Testing:
+
+We run a lot of tests with our CI, and after applying this fix we do not
+see any kmemleak issues any more whilst without it we see hundreds of
+the above report. From a single, simple test run consisting of 416 individual test
+cases on kernel 5.10 x86 with kmemleak enabled we got 20 failures due to this,
+which is quite a lot. With this fix applied we get zero kmemleak related failures.
+
+Fixes: 7ee18d677989 ("x86/power: Make restore_processor_context() sane")
+Signed-off-by: Anton Altaparmakov <anton@tuxera.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: "Rafael J. Wysocki" <rafael@kernel.org>
+Cc: stable@vger.kernel.org
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lore.kernel.org/r/20240314142656.17699-1-anton@tuxera.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/suspend_32.h | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
+index a800abb1a9925..d8416b3bf832e 100644
+--- a/arch/x86/include/asm/suspend_32.h
++++ b/arch/x86/include/asm/suspend_32.h
+@@ -12,11 +12,6 @@
+
+ /* image of the saved processor state */
+ struct saved_context {
+- /*
+- * On x86_32, all segment registers except gs are saved at kernel
+- * entry in pt_regs.
+- */
+- u16 gs;
+ unsigned long cr0, cr2, cr3, cr4;
+ u64 misc_enable;
+ struct saved_msrs saved_msrs;
+@@ -27,6 +22,11 @@ struct saved_context {
+ unsigned long tr;
+ unsigned long safety;
+ unsigned long return_address;
++ /*
++ * On x86_32, all segment registers except gs are saved at kernel
++ * entry in pt_regs.
++ */
++ u16 gs;
+ bool misc_enable_saved;
+ } __attribute__((packed));
+
+--
+2.43.0
+