From: Sasha Levin Date: Wed, 27 Mar 2024 11:08:35 +0000 (-0400) Subject: Fixes for 6.8 X-Git-Tag: v6.7.12~226 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a13c25c5ef6a0d413ca8d01c0f7b7ad596798438;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 6.8 Signed-off-by: Sasha Levin --- diff --git a/queue-6.8/acpi-cppc-use-access_width-over-bit_width-for-system.patch b/queue-6.8/acpi-cppc-use-access_width-over-bit_width-for-system.patch new file mode 100644 index 00000000000..665864a6dd1 --- /dev/null +++ b/queue-6.8/acpi-cppc-use-access_width-over-bit_width-for-system.patch @@ -0,0 +1,188 @@ +From 5fa2b684d3530d8e83039b9e9338f3cd97e09711 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Mar 2024 11:25:59 -0800 +Subject: ACPI: CPPC: Use access_width over bit_width for system memory + accesses + +From: Jarred White + +[ Upstream commit 2f4a4d63a193be6fd530d180bb13c3592052904c ] + +To align with ACPI 6.3+, since bit_width can be any 8-bit value, it +cannot be depended on to be always on a clean 8b boundary. This was +uncovered on the Cobalt 100 platform. + +SError Interrupt on CPU26, code 0xbe000011 -- SError + CPU: 26 PID: 1510 Comm: systemd-udevd Not tainted 5.15.2.1-13 #1 + Hardware name: MICROSOFT CORPORATION, BIOS MICROSOFT CORPORATION + pstate: 62400009 (nZCv daif +PAN -UAO +TCO -DIT -SSBS BTYPE=--) + pc : cppc_get_perf_caps+0xec/0x410 + lr : cppc_get_perf_caps+0xe8/0x410 + sp : ffff8000155ab730 + x29: ffff8000155ab730 x28: ffff0080139d0038 x27: ffff0080139d0078 + x26: 0000000000000000 x25: ffff0080139d0058 x24: 00000000ffffffff + x23: ffff0080139d0298 x22: ffff0080139d0278 x21: 0000000000000000 + x20: ffff00802b251910 x19: ffff0080139d0000 x18: ffffffffffffffff + x17: 0000000000000000 x16: ffffdc7e111bad04 x15: ffff00802b251008 + x14: ffffffffffffffff x13: ffff013f1fd63300 x12: 0000000000000006 + x11: ffffdc7e128f4420 x10: 0000000000000000 x9 : ffffdc7e111badec + x8 : ffff00802b251980 x7 : 0000000000000000 x6 : ffff0080139d0028 + x5 : 0000000000000000 x4 : ffff0080139d0018 x3 : 00000000ffffffff + x2 : 0000000000000008 x1 : ffff8000155ab7a0 x0 : 0000000000000000 + Kernel panic - not syncing: Asynchronous SError Interrupt + CPU: 26 PID: 1510 Comm: systemd-udevd Not tainted +5.15.2.1-13 #1 + Hardware name: MICROSOFT CORPORATION, BIOS MICROSOFT CORPORATION + Call trace: + dump_backtrace+0x0/0x1e0 + show_stack+0x24/0x30 + dump_stack_lvl+0x8c/0xb8 + dump_stack+0x18/0x34 + panic+0x16c/0x384 + add_taint+0x0/0xc0 + arm64_serror_panic+0x7c/0x90 + arm64_is_fatal_ras_serror+0x34/0xa4 + do_serror+0x50/0x6c + el1h_64_error_handler+0x40/0x74 + el1h_64_error+0x7c/0x80 + cppc_get_perf_caps+0xec/0x410 + cppc_cpufreq_cpu_init+0x74/0x400 [cppc_cpufreq] + cpufreq_online+0x2dc/0xa30 + cpufreq_add_dev+0xc0/0xd4 + subsys_interface_register+0x134/0x14c + cpufreq_register_driver+0x1b0/0x354 + cppc_cpufreq_init+0x1a8/0x1000 [cppc_cpufreq] + do_one_initcall+0x50/0x250 + do_init_module+0x60/0x27c + load_module+0x2300/0x2570 + __do_sys_finit_module+0xa8/0x114 + __arm64_sys_finit_module+0x2c/0x3c + invoke_syscall+0x78/0x100 + el0_svc_common.constprop.0+0x180/0x1a0 + do_el0_svc+0x84/0xa0 + el0_svc+0x2c/0xc0 + el0t_64_sync_handler+0xa4/0x12c + el0t_64_sync+0x1a4/0x1a8 + +Instead, use access_width to determine the size and use the offset and +width to shift and mask the bits to read/write out. Make sure to add a +check for system memory since pcc redefines the access_width to +subspace id. + +If access_width is not set, then fall back to using bit_width. + +Signed-off-by: Jarred White +Reviewed-by: Easwar Hariharan +Cc: 5.15+ # 5.15+ +[ rjw: Subject and changelog edits, comment adjustments ] +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + drivers/acpi/cppc_acpi.c | 31 ++++++++++++++++++++++++++----- + 1 file changed, 26 insertions(+), 5 deletions(-) + +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c +index d155a86a86148..b954ce3638a9c 100644 +--- a/drivers/acpi/cppc_acpi.c ++++ b/drivers/acpi/cppc_acpi.c +@@ -166,6 +166,13 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq); + show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf); + show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); + ++/* Check for valid access_width, otherwise, fallback to using bit_width */ ++#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width) ++ ++/* Shift and apply the mask for CPC reads/writes */ ++#define MASK_VAL(reg, val) ((val) >> ((reg)->bit_offset & \ ++ GENMASK(((reg)->bit_width), 0))) ++ + static ssize_t show_feedback_ctrs(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) + { +@@ -780,6 +787,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) + } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + if (gas_t->address) { + void __iomem *addr; ++ size_t access_width; + + if (!osc_cpc_flexible_adr_space_confirmed) { + pr_debug("Flexible address space capability not supported\n"); +@@ -787,7 +795,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) + goto out_free; + } + +- addr = ioremap(gas_t->address, gas_t->bit_width/8); ++ access_width = GET_BIT_WIDTH(gas_t) / 8; ++ addr = ioremap(gas_t->address, access_width); + if (!addr) + goto out_free; + cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; +@@ -983,6 +992,7 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) + static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) + { + void __iomem *vaddr = NULL; ++ int size; + int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); + struct cpc_reg *reg = ®_res->cpc_entry.reg; + +@@ -994,7 +1004,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) + *val = 0; + + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { +- u32 width = 8 << (reg->access_width - 1); ++ u32 width = GET_BIT_WIDTH(reg); + u32 val_u32; + acpi_status status; + +@@ -1018,7 +1028,9 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) + return acpi_os_read_memory((acpi_physical_address)reg->address, + val, reg->bit_width); + +- switch (reg->bit_width) { ++ size = GET_BIT_WIDTH(reg); ++ ++ switch (size) { + case 8: + *val = readb_relaxed(vaddr); + break; +@@ -1037,18 +1049,22 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) + return -EFAULT; + } + ++ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) ++ *val = MASK_VAL(reg, *val); ++ + return 0; + } + + static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) + { + int ret_val = 0; ++ int size; + void __iomem *vaddr = NULL; + int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); + struct cpc_reg *reg = ®_res->cpc_entry.reg; + + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { +- u32 width = 8 << (reg->access_width - 1); ++ u32 width = GET_BIT_WIDTH(reg); + acpi_status status; + + status = acpi_os_write_port((acpi_io_address)reg->address, +@@ -1070,7 +1086,12 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) + return acpi_os_write_memory((acpi_physical_address)reg->address, + val, reg->bit_width); + +- switch (reg->bit_width) { ++ size = GET_BIT_WIDTH(reg); ++ ++ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) ++ val = MASK_VAL(reg, val); ++ ++ switch (size) { + case 8: + writeb_relaxed(val, vaddr); + break; +-- +2.43.0 + diff --git a/queue-6.8/ahci-asm1064-asm1166-don-t-limit-reported-ports.patch b/queue-6.8/ahci-asm1064-asm1166-don-t-limit-reported-ports.patch new file mode 100644 index 00000000000..a134d851601 --- /dev/null +++ b/queue-6.8/ahci-asm1064-asm1166-don-t-limit-reported-ports.patch @@ -0,0 +1,89 @@ +From 14c8d8517035d1e59801d29b0e2233ee249ef0c8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Mar 2024 22:46:50 +0100 +Subject: ahci: asm1064: asm1166: don't limit reported ports + +From: Conrad Kostecki + +[ Upstream commit 6cd8adc3e18960f6e59d797285ed34ef473cc896 ] + +Previously, patches have been added to limit the reported count of SATA +ports for asm1064 and asm1166 SATA controllers, as those controllers do +report more ports than physically having. + +While it is allowed to report more ports than physically having in CAP.NP, +it is not allowed to report more ports than physically having in the PI +(Ports Implemented) register, which is what these HBAs do. +(This is a AHCI spec violation.) + +Unfortunately, it seems that the PMP implementation in these ASMedia HBAs +is also violating the AHCI and SATA-IO PMP specification. + +What these HBAs do is that they do not report that they support PMP +(CAP.SPM (Supports Port Multiplier) is not set). + +Instead, they have decided to add extra "virtual" ports in the PI register +that is used if a port multiplier is connected to any of the physical +ports of the HBA. + +Enumerating the devices behind the PMP as specified in the AHCI and +SATA-IO specifications, by using PMP READ and PMP WRITE commands to the +physical ports of the HBA is not possible, you have to use the "virtual" +ports. + +This is of course bad, because this gives us no way to detect the device +and vendor ID of the PMP actually connected to the HBA, which means that +we can not apply the proper PMP quirks for the PMP that is connected to +the HBA. + +Limiting the port map will thus stop these controllers from working with +SATA Port Multipliers. + +This patch reverts both patches for asm1064 and asm1166, so old behavior +is restored and SATA PMP will work again, but it will also reintroduce the +(minutes long) extra boot time for the ASMedia controllers that do not +have a PMP connected (either on the PCIe card itself, or an external PMP). + +However, a longer boot time for some, is the lesser evil compared to some +other users not being able to detect their drives at all. + +Fixes: 0077a504e1a4 ("ahci: asm1166: correct count of reported ports") +Fixes: 9815e3961754 ("ahci: asm1064: correct count of reported ports") +Cc: stable@vger.kernel.org +Reported-by: Matt +Signed-off-by: Conrad Kostecki +Reviewed-by: Hans de Goede +[cassel: rewrote commit message] +Signed-off-by: Niklas Cassel +Signed-off-by: Sasha Levin +--- + drivers/ata/ahci.c | 13 ------------- + 1 file changed, 13 deletions(-) + +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c +index 682ff550ccfb9..df3fd6474bf21 100644 +--- a/drivers/ata/ahci.c ++++ b/drivers/ata/ahci.c +@@ -671,19 +671,6 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets"); + static void ahci_pci_save_initial_config(struct pci_dev *pdev, + struct ahci_host_priv *hpriv) + { +- if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA) { +- switch (pdev->device) { +- case 0x1166: +- dev_info(&pdev->dev, "ASM1166 has only six ports\n"); +- hpriv->saved_port_map = 0x3f; +- break; +- case 0x1064: +- dev_info(&pdev->dev, "ASM1064 has only four ports\n"); +- hpriv->saved_port_map = 0xf; +- break; +- } +- } +- + if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { + dev_info(&pdev->dev, "JMB361 has only one port\n"); + hpriv->saved_port_map = 1; +-- +2.43.0 + diff --git a/queue-6.8/arm-dts-marvell-fix-maxium-maxim-typo-in-brownstone-.patch b/queue-6.8/arm-dts-marvell-fix-maxium-maxim-typo-in-brownstone-.patch new file mode 100644 index 00000000000..21596c4fa77 --- /dev/null +++ b/queue-6.8/arm-dts-marvell-fix-maxium-maxim-typo-in-brownstone-.patch @@ -0,0 +1,46 @@ +From 536b24aedabc60f185a81f88e619dff698751025 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 25 Jan 2024 19:39:32 +0100 +Subject: arm: dts: marvell: Fix maxium->maxim typo in brownstone dts +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Duje Mihanović + +[ Upstream commit 831e0cd4f9ee15a4f02ae10b67e7fdc10eb2b4fc ] + +Fix an obvious spelling error in the PMIC compatible in the MMP2 +Brownstone DTS file. + +Fixes: 58f1193e6210 ("mfd: max8925: Add dts") +Cc: +Signed-off-by: Duje Mihanović +Reported-by: Krzysztof Kozlowski +Closes: https://lore.kernel.org/linux-devicetree/1410884282-18041-1-git-send-email-k.kozlowski@samsung.com/ +Reviewed-by: Andrew Lunn +Link: https://lore.kernel.org/r/20240125-brownstone-typo-fix-v2-1-45bc48a0c81c@skole.hr +[krzysztof: Just 10 years to take a patch, not bad! Rephrased commit + msg] +Signed-off-by: Krzysztof Kozlowski +Signed-off-by: Sasha Levin +--- + arch/arm/boot/dts/marvell/mmp2-brownstone.dts | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/arm/boot/dts/marvell/mmp2-brownstone.dts b/arch/arm/boot/dts/marvell/mmp2-brownstone.dts +index 04f1ae1382e7a..bc64348b82185 100644 +--- a/arch/arm/boot/dts/marvell/mmp2-brownstone.dts ++++ b/arch/arm/boot/dts/marvell/mmp2-brownstone.dts +@@ -28,7 +28,7 @@ &uart3 { + &twsi1 { + status = "okay"; + pmic: max8925@3c { +- compatible = "maxium,max8925"; ++ compatible = "maxim,max8925"; + reg = <0x3c>; + interrupts = <1>; + interrupt-parent = <&intcmux4>; +-- +2.43.0 + diff --git a/queue-6.8/arm64-dts-qcom-sc7280-add-additional-msi-interrupts.patch b/queue-6.8/arm64-dts-qcom-sc7280-add-additional-msi-interrupts.patch new file mode 100644 index 00000000000..9b9c6186ca3 --- /dev/null +++ b/queue-6.8/arm64-dts-qcom-sc7280-add-additional-msi-interrupts.patch @@ -0,0 +1,51 @@ +From 88ffc30abd93a2d6143c29a9e0e9ba73bd56f3dc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 18 Dec 2023 19:32:36 +0530 +Subject: arm64: dts: qcom: sc7280: Add additional MSI interrupts + +From: Krishna chaitanya chundru + +[ Upstream commit b8ba66b40da3230a8675cb5dd5c2dea5bce24d62 ] + +Current MSI's mapping doesn't have all the vectors. This platform +supports 8 vectors each vector supports 32 MSI's, so total MSI's +supported is 256. + +Add all the MSI groups supported for this PCIe instance in this platform. + +Fixes: 92e0ee9f83b3 ("arm64: dts: qcom: sc7280: Add PCIe and PHY related nodes") +cc: stable@vger.kernel.org +Signed-off-by: Krishna chaitanya chundru +Link: https://lore.kernel.org/r/20231218-additional_msi-v1-1-de6917392684@quicinc.com +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + arch/arm64/boot/dts/qcom/sc7280.dtsi | 12 ++++++++++-- + 1 file changed, 10 insertions(+), 2 deletions(-) + +diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi +index ce0d24ee7eedb..7dc2c37716e84 100644 +--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi ++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi +@@ -2178,8 +2178,16 @@ pcie1: pcie@1c08000 { + ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>, + <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>; + +- interrupts = ; +- interrupt-names = "msi"; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ interrupt-names = "msi0", "msi1", "msi2", "msi3", ++ "msi4", "msi5", "msi6", "msi7"; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0x7>; + interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>, +-- +2.43.0 + diff --git a/queue-6.8/arm64-dts-qcom-sm8450-hdk-correct-amic4-and-amic5-mi.patch b/queue-6.8/arm64-dts-qcom-sm8450-hdk-correct-amic4-and-amic5-mi.patch new file mode 100644 index 00000000000..07c5773012b --- /dev/null +++ b/queue-6.8/arm64-dts-qcom-sm8450-hdk-correct-amic4-and-amic5-mi.patch @@ -0,0 +1,45 @@ +From b6fb86f7d64c7c0bfa283732d3202b833aa1a9ba Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 24 Jan 2024 13:18:55 +0100 +Subject: arm64: dts: qcom: sm8450-hdk: correct AMIC4 and AMIC5 microphones + +From: Krzysztof Kozlowski + +[ Upstream commit 915253bdd64f2372fa5f6c58d75cb99972c7401d ] + +Due to lack of documentation the AMIC4 and AMIC5 analogue microphones +were never actually working, so the audio routing for them was added +hoping it is correct. It turned out not correct - their routing should +point to SWR_INPUT0 (so audio mixer TX SMIC MUX0 = SWR_MIC0) and +SWR_INPUT1 (so audio mixer TX SMIC MUX0 = SWR_MIC1), respectively. With +proper mixer settings and fixed LPASS TX macr codec TX SMIC MUXn +widgets, this makes all microphones working on HDK8450. + +Cc: stable@vger.kernel.org +Fixes: f20cf2bc3f77 ("arm64: dts: qcom: sm8450-hdk: add other analogue microphones") +Signed-off-by: Krzysztof Kozlowski +Link: https://lore.kernel.org/r/20240124121855.162730-1-krzysztof.kozlowski@linaro.org +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + arch/arm64/boot/dts/qcom/sm8450-hdk.dts | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/arch/arm64/boot/dts/qcom/sm8450-hdk.dts b/arch/arm64/boot/dts/qcom/sm8450-hdk.dts +index a20d5d76af352..31e74160b8c13 100644 +--- a/arch/arm64/boot/dts/qcom/sm8450-hdk.dts ++++ b/arch/arm64/boot/dts/qcom/sm8450-hdk.dts +@@ -938,8 +938,8 @@ &sound { + "TX DMIC3", "MIC BIAS1", + "TX SWR_INPUT0", "ADC1_OUTPUT", + "TX SWR_INPUT1", "ADC2_OUTPUT", +- "TX SWR_INPUT2", "ADC3_OUTPUT", +- "TX SWR_INPUT3", "ADC4_OUTPUT"; ++ "TX SWR_INPUT0", "ADC3_OUTPUT", ++ "TX SWR_INPUT1", "ADC4_OUTPUT"; + + wcd-playback-dai-link { + link-name = "WCD Playback"; +-- +2.43.0 + diff --git a/queue-6.8/arm64-dts-qcom-sm8550-mtp-correct-wcd9385-tx-port-ma.patch b/queue-6.8/arm64-dts-qcom-sm8550-mtp-correct-wcd9385-tx-port-ma.patch new file mode 100644 index 00000000000..bce1b744d9d --- /dev/null +++ b/queue-6.8/arm64-dts-qcom-sm8550-mtp-correct-wcd9385-tx-port-ma.patch @@ -0,0 +1,44 @@ +From ccef121b2d890ad6d3d4e3b328e2043b52b145e2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 24 Jan 2024 17:45:03 +0100 +Subject: arm64: dts: qcom: sm8550-mtp: correct WCD9385 TX port mapping + +From: Krzysztof Kozlowski + +[ Upstream commit b66966b1bbc0aa58f7af83cbd56d5a206892857c ] + +WCD9385 audio codec TX port mapping was copied form HDK8450, but in fact +it is offset by one. Correct it to fix recording via analogue +microphones. + +The change is based on QRD8550 and should be correct here as well, but +was not tested on MTP8550. + +Cc: stable@vger.kernel.org +Fixes: a541667c86a9 ("arm64: dts: qcom: sm8550-mtp: add WCD9385 audio-codec") +Signed-off-by: Krzysztof Kozlowski +Reviewed-by: Neil Armstrong +Reviewed-by: Konrad Dybcio +Link: https://lore.kernel.org/r/20240124164505.293202-2-krzysztof.kozlowski@linaro.org +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + arch/arm64/boot/dts/qcom/sm8550-mtp.dts | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts +index c1135ad5fa696..a26e169a802a4 100644 +--- a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts ++++ b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts +@@ -874,7 +874,7 @@ &swr2 { + wcd_tx: codec@0,3 { + compatible = "sdw20217010d00"; + reg = <0 3>; +- qcom,tx-port-mapping = <1 1 2 3>; ++ qcom,tx-port-mapping = <2 2 3 4>; + }; + }; + +-- +2.43.0 + diff --git a/queue-6.8/arm64-dts-qcom-sm8550-qrd-correct-wcd9385-tx-port-ma.patch b/queue-6.8/arm64-dts-qcom-sm8550-qrd-correct-wcd9385-tx-port-ma.patch new file mode 100644 index 00000000000..b854a0e50f2 --- /dev/null +++ b/queue-6.8/arm64-dts-qcom-sm8550-qrd-correct-wcd9385-tx-port-ma.patch @@ -0,0 +1,40 @@ +From cb890aac8be5a46038590651553cd40422a9a825 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 24 Jan 2024 17:45:02 +0100 +Subject: arm64: dts: qcom: sm8550-qrd: correct WCD9385 TX port mapping + +From: Krzysztof Kozlowski + +[ Upstream commit 8ca7fbd92c1b28edb5d5df7aeb8bb4886ddb9829 ] + +WCD9385 audio codec TX port mapping was copied form HDK8450, but in fact +it is offset by one. Correct it to fix recording via analogue +microphones. + +Cc: stable@vger.kernel.org +Fixes: 83fae950c992 ("arm64: dts: qcom: sm8550-qrd: add WCD9385 audio-codec") +Signed-off-by: Krzysztof Kozlowski +Reviewed-by: Konrad Dybcio +Link: https://lore.kernel.org/r/20240124164505.293202-1-krzysztof.kozlowski@linaro.org +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + arch/arm64/boot/dts/qcom/sm8550-qrd.dts | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts +index d401d63e5c4d2..54dfee40d6059 100644 +--- a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts ++++ b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts +@@ -978,7 +978,7 @@ &swr2 { + wcd_tx: codec@0,3 { + compatible = "sdw20217010d00"; + reg = <0 3>; +- qcom,tx-port-mapping = <1 1 2 3>; ++ qcom,tx-port-mapping = <2 2 3 4>; + }; + }; + +-- +2.43.0 + diff --git a/queue-6.8/block-clear-zone-limits-for-a-non-zoned-stacked-queu.patch b/queue-6.8/block-clear-zone-limits-for-a-non-zoned-stacked-queu.patch new file mode 100644 index 00000000000..e7e570a4220 --- /dev/null +++ b/queue-6.8/block-clear-zone-limits-for-a-non-zoned-stacked-queu.patch @@ -0,0 +1,44 @@ +From a39c13747453a2e58d6e769421712ce1c1ba4b62 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 22 Feb 2024 22:17:23 +0900 +Subject: block: Clear zone limits for a non-zoned stacked queue + +From: Damien Le Moal + +[ Upstream commit c8f6f88d25929ad2f290b428efcae3b526f3eab0 ] + +Device mapper may create a non-zoned mapped device out of a zoned device +(e.g., the dm-zoned target). In such case, some queue limit such as the +max_zone_append_sectors and zone_write_granularity endup being non zero +values for a block device that is not zoned. Avoid this by clearing +these limits in blk_stack_limits() when the stacked zoned limit is +false. + +Fixes: 3093a479727b ("block: inherit the zoned characteristics in blk_stack_limits") +Cc: stable@vger.kernel.org +Signed-off-by: Damien Le Moal +Link: https://lore.kernel.org/r/20240222131724.1803520-1-dlemoal@kernel.org +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + block/blk-settings.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/block/blk-settings.c b/block/blk-settings.c +index 06ea91e51b8b2..5adadce084086 100644 +--- a/block/blk-settings.c ++++ b/block/blk-settings.c +@@ -689,6 +689,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + t->zone_write_granularity = max(t->zone_write_granularity, + b->zone_write_granularity); + t->zoned = max(t->zoned, b->zoned); ++ if (!t->zoned) { ++ t->zone_write_granularity = 0; ++ t->max_zone_append_sectors = 0; ++ } + return ret; + } + EXPORT_SYMBOL(blk_stack_limits); +-- +2.43.0 + diff --git a/queue-6.8/block-fix-page-refcounts-for-unaligned-buffers-in-__.patch b/queue-6.8/block-fix-page-refcounts-for-unaligned-buffers-in-__.patch new file mode 100644 index 00000000000..9b16f998c6f --- /dev/null +++ b/queue-6.8/block-fix-page-refcounts-for-unaligned-buffers-in-__.patch @@ -0,0 +1,54 @@ +From 894fe962dd4294fc9dc0d4f0e5343a5c944885e2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 13:08:09 -0500 +Subject: block: Fix page refcounts for unaligned buffers in + __bio_release_pages() + +From: Tony Battersby + +[ Upstream commit 38b43539d64b2fa020b3b9a752a986769f87f7a6 ] + +Fix an incorrect number of pages being released for buffers that do not +start at the beginning of a page. + +Fixes: 1b151e2435fc ("block: Remove special-casing of compound pages") +Cc: stable@vger.kernel.org +Signed-off-by: Tony Battersby +Tested-by: Greg Edwards +Link: https://lore.kernel.org/r/86e592a9-98d4-4cff-a646-0c0084328356@cybernetics.com +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + block/bio.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/block/bio.c b/block/bio.c +index b9642a41f286e..b52b56067e792 100644 +--- a/block/bio.c ++++ b/block/bio.c +@@ -1152,7 +1152,7 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty) + + bio_for_each_folio_all(fi, bio) { + struct page *page; +- size_t done = 0; ++ size_t nr_pages; + + if (mark_dirty) { + folio_lock(fi.folio); +@@ -1160,10 +1160,11 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty) + folio_unlock(fi.folio); + } + page = folio_page(fi.folio, fi.offset / PAGE_SIZE); ++ nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE - ++ fi.offset / PAGE_SIZE + 1; + do { + bio_release_page(bio, page++); +- done += PAGE_SIZE; +- } while (done < fi.length); ++ } while (--nr_pages != 0); + } + } + EXPORT_SYMBOL_GPL(__bio_release_pages); +-- +2.43.0 + diff --git a/queue-6.8/bluetooth-btnxpuart-fix-btnxpuart_close.patch b/queue-6.8/bluetooth-btnxpuart-fix-btnxpuart_close.patch new file mode 100644 index 00000000000..bf7d3a9452c --- /dev/null +++ b/queue-6.8/bluetooth-btnxpuart-fix-btnxpuart_close.patch @@ -0,0 +1,59 @@ +From 62b8dd4a8aa25829da492ea8f8acf7a771bcfe19 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 4 Mar 2024 19:14:21 +0100 +Subject: Bluetooth: btnxpuart: Fix btnxpuart_close + +From: Marcel Ziswiler + +[ Upstream commit 664130c0b0309b360bc5bdd40a30604a9387bde8 ] + +Fix scheduling while atomic BUG in btnxpuart_close(), properly +purge the transmit queue and free the receive skb. + +[ 10.973809] BUG: scheduling while atomic: kworker/u9:0/80/0x00000002 +... +[ 10.980740] CPU: 3 PID: 80 Comm: kworker/u9:0 Not tainted 6.8.0-rc7-0.0.0-devel-00005-g61fdfceacf09 #1 +[ 10.980751] Hardware name: Toradex Verdin AM62 WB on Dahlia Board (DT) +[ 10.980760] Workqueue: hci0 hci_power_off [bluetooth] +[ 10.981169] Call trace: +... +[ 10.981363] uart_update_mctrl+0x58/0x78 +[ 10.981373] uart_dtr_rts+0x104/0x114 +[ 10.981381] tty_port_shutdown+0xd4/0xdc +[ 10.981396] tty_port_close+0x40/0xbc +[ 10.981407] uart_close+0x34/0x9c +[ 10.981414] ttyport_close+0x50/0x94 +[ 10.981430] serdev_device_close+0x40/0x50 +[ 10.981442] btnxpuart_close+0x24/0x98 [btnxpuart] +[ 10.981469] hci_dev_close_sync+0x2d8/0x718 [bluetooth] +[ 10.981728] hci_dev_do_close+0x2c/0x70 [bluetooth] +[ 10.981862] hci_power_off+0x20/0x64 [bluetooth] + +Fixes: 689ca16e5232 ("Bluetooth: NXP: Add protocol support for NXP Bluetooth chipsets") +Cc: stable@vger.kernel.org +Signed-off-by: Marcel Ziswiler +Reviewed-by: Neeraj Sanjay Kale +Signed-off-by: Francesco Dolcini +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Sasha Levin +--- + drivers/bluetooth/btnxpuart.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c +index 1d592ac413d1f..c19dc8a2987f3 100644 +--- a/drivers/bluetooth/btnxpuart.c ++++ b/drivers/bluetooth/btnxpuart.c +@@ -1234,6 +1234,9 @@ static int btnxpuart_close(struct hci_dev *hdev) + + ps_wakeup(nxpdev); + serdev_device_close(nxpdev->serdev); ++ skb_queue_purge(&nxpdev->txq); ++ kfree_skb(nxpdev->rx_skb); ++ nxpdev->rx_skb = NULL; + clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state); + return 0; + } +-- +2.43.0 + diff --git a/queue-6.8/bounds-support-non-power-of-two-config_nr_cpus.patch b/queue-6.8/bounds-support-non-power-of-two-config_nr_cpus.patch new file mode 100644 index 00000000000..7ad037ebe48 --- /dev/null +++ b/queue-6.8/bounds-support-non-power-of-two-config_nr_cpus.patch @@ -0,0 +1,46 @@ +From 062fed1128348903c983716997ec195c46c77f13 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 10 Oct 2023 15:55:49 +0100 +Subject: bounds: support non-power-of-two CONFIG_NR_CPUS + +From: Matthew Wilcox (Oracle) + +[ Upstream commit f2d5dcb48f7ba9e3ff249d58fc1fa963d374e66a ] + +ilog2() rounds down, so for example when PowerPC 85xx sets CONFIG_NR_CPUS +to 24, we will only allocate 4 bits to store the number of CPUs instead of +5. Use bits_per() instead, which rounds up. Found by code inspection. +The effect of this would probably be a misaccounting when doing NUMA +balancing, so to a user, it would only be a performance penalty. The +effects may be more wide-spread; it's hard to tell. + +Link: https://lkml.kernel.org/r/20231010145549.1244748-1-willy@infradead.org +Signed-off-by: Matthew Wilcox (Oracle) +Fixes: 90572890d202 ("mm: numa: Change page last {nid,pid} into {cpu,pid}") +Reviewed-by: Rik van Riel +Acked-by: Mel Gorman +Cc: Peter Zijlstra +Cc: Ingo Molnar +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Sasha Levin +--- + kernel/bounds.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/kernel/bounds.c b/kernel/bounds.c +index b529182e8b04f..c5a9fcd2d6228 100644 +--- a/kernel/bounds.c ++++ b/kernel/bounds.c +@@ -19,7 +19,7 @@ int main(void) + DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS); + DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES); + #ifdef CONFIG_SMP +- DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); ++ DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS)); + #endif + DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t)); + #ifdef CONFIG_LRU_GEN +-- +2.43.0 + diff --git a/queue-6.8/btrfs-add-helper-to-get-fs_info-from-struct-inode-po.patch b/queue-6.8/btrfs-add-helper-to-get-fs_info-from-struct-inode-po.patch new file mode 100644 index 00000000000..8e17fae53f3 --- /dev/null +++ b/queue-6.8/btrfs-add-helper-to-get-fs_info-from-struct-inode-po.patch @@ -0,0 +1,729 @@ +From 6ec674d1cf065c9ed5414463d89380885881ac9c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Sep 2023 16:45:41 +0200 +Subject: btrfs: add helper to get fs_info from struct inode pointer + +From: David Sterba + +[ Upstream commit 41044b41ad2c8c8165a42ec6e9a4096826dcf153 ] + +Add a convenience helper to get a fs_info from a VFS inode pointer +instead of open coding the chain or using btrfs_sb() that in some cases +does one more pointer hop. This is implemented as a macro (still with +type checking) so we don't need full definitions of struct btrfs_inode, +btrfs_root or btrfs_fs_info. + +Reviewed-by: Johannes Thumshirn +Reviewed-by: Anand Jain +Signed-off-by: David Sterba +Stable-dep-of: 86211eea8ae1 ("btrfs: qgroup: validate btrfs_qgroup_inherit parameter") +Signed-off-by: Sasha Levin +--- + fs/btrfs/compression.c | 6 +++--- + fs/btrfs/defrag.c | 4 ++-- + fs/btrfs/disk-io.c | 6 +++--- + fs/btrfs/export.c | 2 +- + fs/btrfs/extent_io.c | 12 +++++------ + fs/btrfs/file.c | 14 ++++++------- + fs/btrfs/free-space-cache.c | 2 +- + fs/btrfs/fs.h | 3 +++ + fs/btrfs/inode.c | 39 ++++++++++++++++++------------------ + fs/btrfs/ioctl.c | 40 ++++++++++++++++++------------------- + fs/btrfs/lzo.c | 2 +- + fs/btrfs/props.c | 2 +- + fs/btrfs/reflink.c | 6 +++--- + fs/btrfs/relocation.c | 2 +- + 14 files changed, 72 insertions(+), 68 deletions(-) + +diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c +index aeb3b2aa73310..0041613a36ae5 100644 +--- a/fs/btrfs/compression.c ++++ b/fs/btrfs/compression.c +@@ -284,7 +284,7 @@ static void end_bbio_comprssed_read(struct btrfs_bio *bbio) + static noinline void end_compressed_writeback(const struct compressed_bio *cb) + { + struct inode *inode = &cb->bbio.inode->vfs_inode; +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + unsigned long index = cb->start >> PAGE_SHIFT; + unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; + struct folio_batch fbatch; +@@ -415,7 +415,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, + struct compressed_bio *cb, + int *memstall, unsigned long *pflags) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + unsigned long end_index; + struct bio *orig_bio = &cb->orig_bbio->bio; + u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size; +@@ -441,7 +441,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, + * This makes readahead less effective, so here disable readahead for + * subpage for now, until full compressed write is supported. + */ +- if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE) ++ if (fs_info->sectorsize < PAGE_SIZE) + return 0; + + end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; +diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c +index 5b0b645714183..a77be9896dbd1 100644 +--- a/fs/btrfs/defrag.c ++++ b/fs/btrfs/defrag.c +@@ -810,7 +810,7 @@ static u32 get_extent_max_capacity(const struct btrfs_fs_info *fs_info, + static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em, + u32 extent_thresh, u64 newer_than, bool locked) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct extent_map *next; + bool ret = false; + +@@ -1366,7 +1366,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra, + struct btrfs_ioctl_defrag_range_args *range, + u64 newer_than, unsigned long max_to_defrag) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + unsigned long sectors_defragged = 0; + u64 isize = i_size_read(inode); + u64 cur; +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 6096628fdb21f..256782122482a 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -498,15 +498,15 @@ static int btree_migrate_folio(struct address_space *mapping, + static int btree_writepages(struct address_space *mapping, + struct writeback_control *wbc) + { +- struct btrfs_fs_info *fs_info; + int ret; + + if (wbc->sync_mode == WB_SYNC_NONE) { ++ struct btrfs_fs_info *fs_info; + + if (wbc->for_kupdate) + return 0; + +- fs_info = BTRFS_I(mapping->host)->root->fs_info; ++ fs_info = inode_to_fs_info(mapping->host); + /* this is a bit racy, but that's ok */ + ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, + BTRFS_DIRTY_METADATA_THRESH, +@@ -545,7 +545,7 @@ static void btree_invalidate_folio(struct folio *folio, size_t offset, + static bool btree_dirty_folio(struct address_space *mapping, + struct folio *folio) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host); + struct btrfs_subpage_info *spi = fs_info->subpage_info; + struct btrfs_subpage *subpage; + struct extent_buffer *eb; +diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c +index 744a02b7fd671..63d4cc338b81a 100644 +--- a/fs/btrfs/export.c ++++ b/fs/btrfs/export.c +@@ -215,7 +215,7 @@ static int btrfs_get_name(struct dentry *parent, char *name, + { + struct inode *inode = d_inode(child); + struct inode *dir = d_inode(parent); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_path *path; + struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_inode_ref *iref; +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index a6c712429fd2d..993b740277605 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -207,7 +207,7 @@ static void __process_pages_contig(struct address_space *mapping, + struct page *locked_page, u64 start, u64 end, + unsigned long page_ops) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host); + pgoff_t start_index = start >> PAGE_SHIFT; + pgoff_t end_index = end >> PAGE_SHIFT; + pgoff_t index = start_index; +@@ -251,7 +251,7 @@ static noinline int lock_delalloc_pages(struct inode *inode, + u64 start, + u64 end) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct address_space *mapping = inode->i_mapping; + pgoff_t start_index = start >> PAGE_SHIFT; + pgoff_t end_index = end >> PAGE_SHIFT; +@@ -323,7 +323,7 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode, + struct page *locked_page, u64 *start, + u64 *end) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; + const u64 orig_start = *start; + const u64 orig_end = *end; +@@ -1011,7 +1011,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, + struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start) + { + struct inode *inode = page->mapping->host; +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + u64 start = page_offset(page); + const u64 end = start + PAGE_SIZE - 1; + u64 cur = start; +@@ -1919,7 +1919,7 @@ int btree_write_cache_pages(struct address_space *mapping, + struct writeback_control *wbc) + { + struct btrfs_eb_write_context ctx = { .wbc = wbc }; +- struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info; ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host); + int ret = 0; + int done = 0; + int nr_to_write_done = 0; +@@ -2207,7 +2207,7 @@ void extent_write_locked_range(struct inode *inode, struct page *locked_page, + bool found_error = false; + int ret = 0; + struct address_space *mapping = inode->i_mapping; +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + const u32 sectorsize = fs_info->sectorsize; + loff_t i_size = i_size_read(inode); + u64 cur = start; +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index 38dfcac476099..616790d6e5028 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -1137,7 +1137,7 @@ static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, + { + struct file *file = iocb->ki_filp; + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + loff_t pos = iocb->ki_pos; + int ret; + loff_t oldsize; +@@ -1185,7 +1185,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, + struct file *file = iocb->ki_filp; + loff_t pos; + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct page **pages = NULL; + struct extent_changeset *data_reserved = NULL; + u64 release_bytes = 0; +@@ -1461,7 +1461,7 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) + { + struct file *file = iocb->ki_filp; + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + loff_t pos; + ssize_t written = 0; + ssize_t written_buffered; +@@ -1787,7 +1787,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + { + struct dentry *dentry = file_dentry(file); + struct inode *inode = d_inode(dentry); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; + struct btrfs_log_ctx ctx; +@@ -2593,7 +2593,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode, + static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len) + { + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_state *cached_state = NULL; + struct btrfs_path *path; +@@ -3049,7 +3049,7 @@ static long btrfs_fallocate(struct file *file, int mode, + int ret; + + /* Do not allow fallocate in ZONED mode */ +- if (btrfs_is_zoned(btrfs_sb(inode->i_sb))) ++ if (btrfs_is_zoned(inode_to_fs_info(inode))) + return -EOPNOTSUPP; + + alloc_start = round_down(offset, blocksize); +@@ -3754,7 +3754,7 @@ static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to) + if (fsverity_active(inode)) + return 0; + +- if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos)) ++ if (check_direct_read(inode_to_fs_info(inode), to, iocb->ki_pos)) + return 0; + + btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED); +diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c +index d372c7ce0e6b4..815bb146b1a5e 100644 +--- a/fs/btrfs/free-space-cache.c ++++ b/fs/btrfs/free-space-cache.c +@@ -399,7 +399,7 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode, + return -ENOMEM; + + io_ctl->num_pages = num_pages; +- io_ctl->fs_info = btrfs_sb(inode->i_sb); ++ io_ctl->fs_info = inode_to_fs_info(inode); + io_ctl->inode = inode; + + return 0; +diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h +index 0e15b2a791bf1..bd59cf0aae267 100644 +--- a/fs/btrfs/fs.h ++++ b/fs/btrfs/fs.h +@@ -837,6 +837,9 @@ struct btrfs_fs_info { + #define page_to_fs_info(_page) (page_to_inode(_page)->root->fs_info) + #define folio_to_fs_info(_folio) (folio_to_inode(_folio)->root->fs_info) + ++#define inode_to_fs_info(_inode) (BTRFS_I(_Generic((_inode), \ ++ struct inode *: (_inode)))->root->fs_info) ++ + static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info) + { + return READ_ONCE(fs_info->generation); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 1e800c8bb4d9f..8151ad5f4650b 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -2829,7 +2829,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) + int btrfs_writepage_cow_fixup(struct page *page) + { + struct inode *inode = page->mapping->host; +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_writepage_fixup *fixup; + + /* This page has ordered extent covering it already */ +@@ -3254,7 +3254,7 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) + + int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered) + { +- if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) && ++ if (btrfs_is_zoned(inode_to_fs_info(ordered->inode)) && + !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) && + list_empty(&ordered->bioc_list)) + btrfs_finish_ordered_zoned(ordered); +@@ -3739,7 +3739,7 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf, + static int btrfs_read_locked_inode(struct inode *inode, + struct btrfs_path *in_path) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_path *path = in_path; + struct extent_buffer *leaf; + struct btrfs_inode_item *inode_item; +@@ -4464,8 +4464,8 @@ static void btrfs_prune_dentries(struct btrfs_root *root) + + int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); + struct btrfs_root *root = dir->root; ++ struct btrfs_fs_info *fs_info = root->fs_info; + struct inode *inode = d_inode(dentry); + struct btrfs_root *dest = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; +@@ -5019,7 +5019,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) + btrfs_drew_write_unlock(&root->snapshot_lock); + btrfs_end_transaction(trans); + } else { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + + if (btrfs_is_zoned(fs_info)) { + ret = btrfs_wait_ordered_range(inode, +@@ -5222,7 +5222,7 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, + + void btrfs_evict_inode(struct inode *inode) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info; + struct btrfs_trans_handle *trans; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_block_rsv *rsv = NULL; +@@ -5236,6 +5236,7 @@ void btrfs_evict_inode(struct inode *inode) + return; + } + ++ fs_info = inode_to_fs_info(inode); + evict_inode_truncate_pages(inode); + + if (inode->i_nlink && +@@ -5661,7 +5662,7 @@ static inline u8 btrfs_inode_type(struct inode *inode) + + struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); + struct inode *inode; + struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_root *sub_root = root; +@@ -6200,7 +6201,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, + struct inode *dir = args->dir; + struct inode *inode = args->inode; + const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; +- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); + struct btrfs_root *root; + struct btrfs_inode_item *inode_item; + struct btrfs_key *location; +@@ -6522,7 +6523,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, + static int btrfs_create_common(struct inode *dir, struct dentry *dentry, + struct inode *inode) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); + struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_new_inode_args new_inode_args = { + .dir = dir, +@@ -6592,7 +6593,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, + struct btrfs_trans_handle *trans = NULL; + struct btrfs_root *root = BTRFS_I(dir)->root; + struct inode *inode = d_inode(old_dentry); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct fscrypt_name fname; + u64 index; + int err; +@@ -7078,7 +7079,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, + u64 *orig_start, u64 *orig_block_len, + u64 *ram_bytes, bool nowait, bool strict) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct can_nocow_file_extent_args nocow_args = { 0 }; + struct btrfs_path *path; + int ret; +@@ -7317,7 +7318,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, + unsigned int iomap_flags) + { + const bool nowait = (iomap_flags & IOMAP_NOWAIT); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct extent_map *em = *map; + int type; + u64 block_start, orig_start, orig_block_len, ram_bytes; +@@ -7457,7 +7458,7 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, + struct iomap *srcmap) + { + struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct extent_map *em; + struct extent_state *cached_state = NULL; + struct btrfs_dio_data *dio_data = iter->private; +@@ -8154,7 +8155,7 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) + struct page *page = vmf->page; + struct folio *folio = page_folio(page); + struct inode *inode = file_inode(vmf->vma->vm_file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_ordered_extent *ordered; + struct extent_state *cached_state = NULL; +@@ -8763,7 +8764,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, + struct inode *new_dir, + struct dentry *new_dentry) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir); + struct btrfs_trans_handle *trans; + unsigned int trans_num_items; + struct btrfs_root *root = BTRFS_I(old_dir)->root; +@@ -9015,7 +9016,7 @@ static int btrfs_rename(struct mnt_idmap *idmap, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir); + struct btrfs_new_inode_args whiteout_args = { + .dir = old_dir, + .dentry = old_dentry, +@@ -9457,7 +9458,7 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, + static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, + struct dentry *dentry, const char *symname) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); + struct btrfs_trans_handle *trans; + struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_path *path; +@@ -9638,7 +9639,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, + loff_t actual_len, u64 *alloc_hint, + struct btrfs_trans_handle *trans) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct extent_map *em; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_key ins; +@@ -9790,7 +9791,7 @@ static int btrfs_permission(struct mnt_idmap *idmap, + static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, + struct file *file, umode_t mode) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); + struct btrfs_trans_handle *trans; + struct btrfs_root *root = BTRFS_I(dir)->root; + struct inode *inode; +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 8d80903e9bff6..738afd56c7e9e 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -247,7 +247,7 @@ int btrfs_fileattr_set(struct mnt_idmap *idmap, + struct dentry *dentry, struct fileattr *fa) + { + struct inode *inode = d_inode(dentry); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_inode *binode = BTRFS_I(inode); + struct btrfs_root *root = binode->root; + struct btrfs_trans_handle *trans; +@@ -584,7 +584,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap, + struct inode *dir, struct dentry *dentry, + struct btrfs_qgroup_inherit *inherit) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); + struct btrfs_trans_handle *trans; + struct btrfs_key key; + struct btrfs_root_item *root_item; +@@ -776,7 +776,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, + struct dentry *dentry, bool readonly, + struct btrfs_qgroup_inherit *inherit) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); + struct inode *inode; + struct btrfs_pending_snapshot *pending_snapshot; + unsigned int trans_num_items; +@@ -962,7 +962,7 @@ static noinline int btrfs_mksubvol(const struct path *parent, + struct btrfs_qgroup_inherit *inherit) + { + struct inode *dir = d_inode(parent->dentry); +- struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); + struct dentry *dentry; + struct fscrypt_str name_str = FSTR_INIT((char *)name, namelen); + int error; +@@ -1097,7 +1097,7 @@ static noinline int btrfs_ioctl_resize(struct file *file, + { + BTRFS_DEV_LOOKUP_ARGS(args); + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + u64 new_size; + u64 old_size; + u64 devid = 1; +@@ -1405,7 +1405,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, + static noinline int btrfs_ioctl_subvol_getflags(struct inode *inode, + void __user *arg) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_root *root = BTRFS_I(inode)->root; + int ret = 0; + u64 flags = 0; +@@ -1428,7 +1428,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, + void __user *arg) + { + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; + u64 root_flags; +@@ -1675,7 +1675,7 @@ static noinline int search_ioctl(struct inode *inode, + u64 *buf_size, + char __user *ubuf) + { +- struct btrfs_fs_info *info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *info = inode_to_fs_info(inode); + struct btrfs_root *root; + struct btrfs_key key; + struct btrfs_path *path; +@@ -2346,9 +2346,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, + bool destroy_v2) + { + struct dentry *parent = file->f_path.dentry; +- struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb); + struct dentry *dentry; + struct inode *dir = d_inode(parent); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); + struct inode *inode; + struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_root *dest = NULL; +@@ -2696,7 +2696,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) + { + BTRFS_DEV_LOOKUP_ARGS(args); + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_ioctl_vol_args_v2 *vol_args; + struct bdev_handle *bdev_handle = NULL; + int ret; +@@ -2761,7 +2761,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) + { + BTRFS_DEV_LOOKUP_ARGS(args); + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_ioctl_vol_args *vol_args; + struct bdev_handle *bdev_handle = NULL; + int ret; +@@ -2904,7 +2904,7 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info, + static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) + { + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *new_root; + struct btrfs_dir_item *di; +@@ -3178,7 +3178,7 @@ static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info, + + static long btrfs_ioctl_scrub(struct file *file, void __user *arg) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(file_inode(file)); + struct btrfs_ioctl_scrub_args *sa; + int ret; + +@@ -3696,7 +3696,7 @@ static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info, + static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg) + { + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_ioctl_quota_ctl_args *sa; + int ret; + +@@ -3738,7 +3738,7 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg) + static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg) + { + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_ioctl_qgroup_assign_args *sa; + struct btrfs_trans_handle *trans; +@@ -3894,7 +3894,7 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg) + static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg) + { + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_ioctl_quota_rescan_args *qsa; + int ret; + +@@ -3958,7 +3958,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file, + struct btrfs_ioctl_received_subvol_args *sa) + { + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root_item *root_item = &root->root_item; + struct btrfs_trans_handle *trans; +@@ -4146,7 +4146,7 @@ static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info, + static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg) + { + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_super_block *super_block = fs_info->super_copy; + struct btrfs_trans_handle *trans; +@@ -4289,7 +4289,7 @@ check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \ + static int btrfs_ioctl_set_features(struct file *file, void __user *arg) + { + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_super_block *super_block = fs_info->super_copy; + struct btrfs_ioctl_feature_flags flags[2]; +@@ -4580,7 +4580,7 @@ long btrfs_ioctl(struct file *file, unsigned int + cmd, unsigned long arg) + { + struct inode *inode = file_inode(file); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_root *root = BTRFS_I(inode)->root; + void __user *argp = (void __user *)arg; + +diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c +index 110a2c304bdc7..3e5d3b7028e8b 100644 +--- a/fs/btrfs/lzo.c ++++ b/fs/btrfs/lzo.c +@@ -214,7 +214,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, + unsigned long *total_in, unsigned long *total_out) + { + struct workspace *workspace = list_entry(ws, struct workspace, list); +- const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize; ++ const u32 sectorsize = inode_to_fs_info(mapping->host)->sectorsize; + struct page *page_in = NULL; + char *sizes_ptr; + const unsigned long max_nr_page = *out_pages; +diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c +index f9bf591a07187..ac4a0af2b5543 100644 +--- a/fs/btrfs/props.c ++++ b/fs/btrfs/props.c +@@ -302,7 +302,7 @@ static int prop_compression_validate(const struct btrfs_inode *inode, + static int prop_compression_apply(struct inode *inode, const char *value, + size_t len) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + int type; + + /* Reset to defaults */ +diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c +index e38cb40e150c9..08d0fb46ceec4 100644 +--- a/fs/btrfs/reflink.c ++++ b/fs/btrfs/reflink.c +@@ -174,7 +174,7 @@ static int clone_copy_inline_extent(struct inode *dst, + char *inline_data, + struct btrfs_trans_handle **trans_out) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(dst); + struct btrfs_root *root = BTRFS_I(dst)->root; + const u64 aligned_end = ALIGN(new_key->offset + datal, + fs_info->sectorsize); +@@ -337,7 +337,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode, + const u64 off, const u64 olen, const u64 olen_aligned, + const u64 destoff, int no_time_update) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + struct btrfs_path *path = NULL; + struct extent_buffer *leaf; + struct btrfs_trans_handle *trans; +@@ -726,7 +726,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, + { + struct inode *inode = file_inode(file); + struct inode *src = file_inode(file_src); +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + int ret; + int wb_ret; + u64 len = olen; +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c +index abe594f77f99c..2fca67f2b39b9 100644 +--- a/fs/btrfs/relocation.c ++++ b/fs/btrfs/relocation.c +@@ -2987,7 +2987,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, + const struct file_extent_cluster *cluster, + int *cluster_nr, unsigned long page_index) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); + u64 offset = BTRFS_I(inode)->index_cnt; + const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT; + gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); +-- +2.43.0 + diff --git a/queue-6.8/btrfs-add-helpers-to-get-fs_info-from-page-folio-poi.patch b/queue-6.8/btrfs-add-helpers-to-get-fs_info-from-page-folio-poi.patch new file mode 100644 index 00000000000..a564d79c9a3 --- /dev/null +++ b/queue-6.8/btrfs-add-helpers-to-get-fs_info-from-page-folio-poi.patch @@ -0,0 +1,176 @@ +From 32dbab6586101b410b4e40afe1303b89fe5bcaa8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Sep 2023 16:24:43 +0200 +Subject: btrfs: add helpers to get fs_info from page/folio pointers + +From: David Sterba + +[ Upstream commit b33d2e535f9b2a1c4210cfc4843ac0dbacbeebcc ] + +Add convenience helpers to get a fs_info from a page or folio pointer +instead of open coding the chain or using btrfs_sb() that in some cases +does one more pointer hop. This is implemented as a macro (still with +type checking) so we don't need full definitions of struct page, folio, +btrfs_root and btrfs_fs_info. The latter can't be static inlines as this +would create loop between ctree.h <-> fs.h, or the headers would have to +be restructured. + +Reviewed-by: Johannes Thumshirn +Reviewed-by: Anand Jain +Signed-off-by: David Sterba +Stable-dep-of: 86211eea8ae1 ("btrfs: qgroup: validate btrfs_qgroup_inherit parameter") +Signed-off-by: Sasha Levin +--- + fs/btrfs/compression.c | 2 +- + fs/btrfs/disk-io.c | 2 +- + fs/btrfs/extent_io.c | 16 ++++++++-------- + fs/btrfs/fs.h | 3 +++ + fs/btrfs/inode.c | 2 +- + fs/btrfs/lzo.c | 2 +- + 6 files changed, 15 insertions(+), 12 deletions(-) + +diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c +index 68345f73d429a..aeb3b2aa73310 100644 +--- a/fs/btrfs/compression.c ++++ b/fs/btrfs/compression.c +@@ -1039,7 +1039,7 @@ static int btrfs_decompress_bio(struct compressed_bio *cb) + int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page, + unsigned long dest_pgoff, size_t srclen, size_t destlen) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb); ++ struct btrfs_fs_info *fs_info = page_to_fs_info(dest_page); + struct list_head *workspace; + const u32 sectorsize = fs_info->sectorsize; + int ret; +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index e9eb3f0f245b2..6096628fdb21f 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -534,7 +534,7 @@ static void btree_invalidate_folio(struct folio *folio, size_t offset, + extent_invalidate_folio(tree, folio, offset); + btree_release_folio(folio, GFP_NOFS); + if (folio_get_private(folio)) { +- btrfs_warn(BTRFS_I(folio->mapping->host)->root->fs_info, ++ btrfs_warn(folio_to_fs_info(folio), + "folio private not zero on folio %llu", + (unsigned long long)folio_pos(folio)); + folio_detach_private(folio); +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index cf94e88bf8d05..a6c712429fd2d 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -433,7 +433,7 @@ static bool btrfs_verify_page(struct page *page, u64 start) + + static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); ++ struct btrfs_fs_info *fs_info = page_to_fs_info(page); + struct folio *folio = page_folio(page); + + ASSERT(page_offset(page) <= start && +@@ -948,7 +948,7 @@ int set_folio_extent_mapped(struct folio *folio) + if (folio_test_private(folio)) + return 0; + +- fs_info = btrfs_sb(folio->mapping->host->i_sb); ++ fs_info = folio_to_fs_info(folio); + + if (btrfs_is_subpage(fs_info, folio->mapping)) + return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA); +@@ -967,7 +967,7 @@ void clear_page_extent_mapped(struct page *page) + if (!folio_test_private(folio)) + return; + +- fs_info = btrfs_sb(page->mapping->host->i_sb); ++ fs_info = page_to_fs_info(page); + if (btrfs_is_subpage(fs_info, page->mapping)) + return btrfs_detach_subpage(fs_info, folio); + +@@ -1770,7 +1770,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, + */ + static int submit_eb_subpage(struct page *page, struct writeback_control *wbc) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); ++ struct btrfs_fs_info *fs_info = page_to_fs_info(page); + struct folio *folio = page_folio(page); + int submitted = 0; + u64 page_start = page_offset(page); +@@ -1861,7 +1861,7 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx) + if (!folio_test_private(folio)) + return 0; + +- if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE) ++ if (page_to_fs_info(page)->nodesize < PAGE_SIZE) + return submit_eb_subpage(page, wbc); + + spin_lock(&mapping->i_private_lock); +@@ -2313,7 +2313,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree, + struct extent_state *cached_state = NULL; + u64 start = folio_pos(folio); + u64 end = start + folio_size(folio) - 1; +- size_t blocksize = btrfs_sb(folio->mapping->host->i_sb)->sectorsize; ++ size_t blocksize = folio_to_fs_info(folio)->sectorsize; + + /* This function is only called for the btree inode */ + ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO); +@@ -4940,7 +4940,7 @@ static struct extent_buffer *get_next_extent_buffer( + + static int try_release_subpage_extent_buffer(struct page *page) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); ++ struct btrfs_fs_info *fs_info = page_to_fs_info(page); + u64 cur = page_offset(page); + const u64 end = page_offset(page) + PAGE_SIZE; + int ret; +@@ -5013,7 +5013,7 @@ int try_release_extent_buffer(struct page *page) + struct folio *folio = page_folio(page); + struct extent_buffer *eb; + +- if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE) ++ if (page_to_fs_info(page)->nodesize < PAGE_SIZE) + return try_release_subpage_extent_buffer(page); + + /* +diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h +index 1cfd16f956e77..0e15b2a791bf1 100644 +--- a/fs/btrfs/fs.h ++++ b/fs/btrfs/fs.h +@@ -834,6 +834,9 @@ struct btrfs_fs_info { + #define folio_to_inode(_folio) (BTRFS_I(_Generic((_folio), \ + struct folio *: (_folio))->mapping->host)) + ++#define page_to_fs_info(_page) (page_to_inode(_page)->root->fs_info) ++#define folio_to_fs_info(_folio) (folio_to_inode(_folio)->root->fs_info) ++ + static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info) + { + return READ_ONCE(fs_info->generation); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 6948440286e55..1e800c8bb4d9f 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -7903,7 +7903,7 @@ static void btrfs_readahead(struct readahead_control *rac) + */ + static void wait_subpage_spinlock(struct page *page) + { +- struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); ++ struct btrfs_fs_info *fs_info = page_to_fs_info(page); + struct folio *folio = page_folio(page); + struct btrfs_subpage *subpage; + +diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c +index e43bc0fdc74ec..110a2c304bdc7 100644 +--- a/fs/btrfs/lzo.c ++++ b/fs/btrfs/lzo.c +@@ -429,7 +429,7 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in, + size_t destlen) + { + struct workspace *workspace = list_entry(ws, struct workspace, list); +- struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb); ++ struct btrfs_fs_info *fs_info = page_to_fs_info(dest_page); + const u32 sectorsize = fs_info->sectorsize; + size_t in_len; + size_t out_len; +-- +2.43.0 + diff --git a/queue-6.8/btrfs-add-helpers-to-get-inode-from-page-folio-point.patch b/queue-6.8/btrfs-add-helpers-to-get-inode-from-page-folio-point.patch new file mode 100644 index 00000000000..34e09992500 --- /dev/null +++ b/queue-6.8/btrfs-add-helpers-to-get-inode-from-page-folio-point.patch @@ -0,0 +1,112 @@ +From ce4f2aa6b59f5e9d9c32f2d8a1df442dac1eba58 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Sep 2023 16:11:29 +0200 +Subject: btrfs: add helpers to get inode from page/folio pointers + +From: David Sterba + +[ Upstream commit c8293894afa718653688b2fa98ab68317c875a00 ] + +Add convenience helpers to get a struct btrfs_inode from a page or folio +pointer instead of open coding the chain or intermediate BTRFS_I. This +is implemented as a macro (still with type checking) so we don't need +full definitions of struct page or address_space. + +Reviewed-by: Johannes Thumshirn +Reviewed-by: Anand Jain +Signed-off-by: David Sterba +Stable-dep-of: 86211eea8ae1 ("btrfs: qgroup: validate btrfs_qgroup_inherit parameter") +Signed-off-by: Sasha Levin +--- + fs/btrfs/disk-io.c | 3 ++- + fs/btrfs/extent_io.c | 8 ++++---- + fs/btrfs/fs.h | 5 +++++ + fs/btrfs/inode.c | 2 +- + 4 files changed, 12 insertions(+), 6 deletions(-) + +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 091104a327326..e9eb3f0f245b2 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -529,7 +529,8 @@ static void btree_invalidate_folio(struct folio *folio, size_t offset, + size_t length) + { + struct extent_io_tree *tree; +- tree = &BTRFS_I(folio->mapping->host)->io_tree; ++ ++ tree = &folio_to_inode(folio)->io_tree; + extent_invalidate_folio(tree, folio, offset); + btree_release_folio(folio, GFP_NOFS); + if (folio_get_private(folio)) { +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 3b47654ed3e8f..cf94e88bf8d05 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -827,7 +827,7 @@ static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl, + u64 disk_bytenr, struct page *page, + size_t size, unsigned long pg_offset) + { +- struct btrfs_inode *inode = BTRFS_I(page->mapping->host); ++ struct btrfs_inode *inode = page_to_inode(page); + + ASSERT(pg_offset + size <= PAGE_SIZE); + ASSERT(bio_ctrl->end_io_func); +@@ -1161,7 +1161,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, + int btrfs_read_folio(struct file *file, struct folio *folio) + { + struct page *page = &folio->page; +- struct btrfs_inode *inode = BTRFS_I(page->mapping->host); ++ struct btrfs_inode *inode = page_to_inode(page); + u64 start = page_offset(page); + u64 end = start + PAGE_SIZE - 1; + struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ }; +@@ -1184,7 +1184,7 @@ static inline void contiguous_readpages(struct page *pages[], int nr_pages, + struct btrfs_bio_ctrl *bio_ctrl, + u64 *prev_em_start) + { +- struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host); ++ struct btrfs_inode *inode = page_to_inode(pages[0]); + int index; + + btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); +@@ -2382,7 +2382,7 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) + struct extent_map *em; + u64 start = page_offset(page); + u64 end = start + PAGE_SIZE - 1; +- struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host); ++ struct btrfs_inode *btrfs_inode = page_to_inode(page); + struct extent_io_tree *tree = &btrfs_inode->io_tree; + struct extent_map_tree *map = &btrfs_inode->extent_tree; + +diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h +index f8bb73d6ab68c..1cfd16f956e77 100644 +--- a/fs/btrfs/fs.h ++++ b/fs/btrfs/fs.h +@@ -829,6 +829,11 @@ struct btrfs_fs_info { + #endif + }; + ++#define page_to_inode(_page) (BTRFS_I(_Generic((_page), \ ++ struct page *: (_page))->mapping->host)) ++#define folio_to_inode(_folio) (BTRFS_I(_Generic((_folio), \ ++ struct folio *: (_folio))->mapping->host)) ++ + static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info) + { + return READ_ONCE(fs_info->generation); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index bd3f348d503dc..6948440286e55 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -7970,7 +7970,7 @@ static int btrfs_migrate_folio(struct address_space *mapping, + static void btrfs_invalidate_folio(struct folio *folio, size_t offset, + size_t length) + { +- struct btrfs_inode *inode = BTRFS_I(folio->mapping->host); ++ struct btrfs_inode *inode = folio_to_inode(folio); + struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct extent_io_tree *tree = &inode->io_tree; + struct extent_state *cached_state = NULL; +-- +2.43.0 + diff --git a/queue-6.8/btrfs-add-set_folio_extent_mapped-helper.patch b/queue-6.8/btrfs-add-set_folio_extent_mapped-helper.patch new file mode 100644 index 00000000000..3c9b3d68e6a --- /dev/null +++ b/queue-6.8/btrfs-add-set_folio_extent_mapped-helper.patch @@ -0,0 +1,69 @@ +From 400a580343a18afb89d345c37c0000abd7cf9091 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Dec 2023 16:13:29 +0000 +Subject: btrfs: add set_folio_extent_mapped() helper + +From: Matthew Wilcox (Oracle) + +[ Upstream commit dfba9f47730604a46c284f6099a11c5686b6289d ] + +Turn set_page_extent_mapped() into a wrapper around this version. +Saves a call to compound_head() for callers who already have a folio +and removes a couple of users of page->mapping. + +Reviewed-by: Johannes Thumshirn +Signed-off-by: Matthew Wilcox (Oracle) +Reviewed-by: David Sterba +Signed-off-by: David Sterba +Stable-dep-of: 86211eea8ae1 ("btrfs: qgroup: validate btrfs_qgroup_inherit parameter") +Signed-off-by: Sasha Levin +--- + fs/btrfs/extent_io.c | 12 ++++++++---- + fs/btrfs/extent_io.h | 1 + + 2 files changed, 9 insertions(+), 4 deletions(-) + +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 7761d7d93ba98..8b88cd08ab053 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -936,17 +936,21 @@ static int attach_extent_buffer_folio(struct extent_buffer *eb, + + int set_page_extent_mapped(struct page *page) + { +- struct folio *folio = page_folio(page); ++ return set_folio_extent_mapped(page_folio(page)); ++} ++ ++int set_folio_extent_mapped(struct folio *folio) ++{ + struct btrfs_fs_info *fs_info; + +- ASSERT(page->mapping); ++ ASSERT(folio->mapping); + + if (folio_test_private(folio)) + return 0; + +- fs_info = btrfs_sb(page->mapping->host->i_sb); ++ fs_info = btrfs_sb(folio->mapping->host->i_sb); + +- if (btrfs_is_subpage(fs_info, page->mapping)) ++ if (btrfs_is_subpage(fs_info, folio->mapping)) + return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA); + + folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE); +diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h +index 46050500529bf..2c9d6570b0a38 100644 +--- a/fs/btrfs/extent_io.h ++++ b/fs/btrfs/extent_io.h +@@ -221,6 +221,7 @@ int btree_write_cache_pages(struct address_space *mapping, + void extent_readahead(struct readahead_control *rac); + int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo, + u64 start, u64 len); ++int set_folio_extent_mapped(struct folio *folio); + int set_page_extent_mapped(struct page *page); + void clear_page_extent_mapped(struct page *page); + +-- +2.43.0 + diff --git a/queue-6.8/btrfs-fix-off-by-one-chunk-length-calculation-at-con.patch b/queue-6.8/btrfs-fix-off-by-one-chunk-length-calculation-at-con.patch new file mode 100644 index 00000000000..8996105a5f2 --- /dev/null +++ b/queue-6.8/btrfs-fix-off-by-one-chunk-length-calculation-at-con.patch @@ -0,0 +1,48 @@ +From bf239a197c04fe4b55e08b8d770a3726ed7e199d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 10:37:04 +0000 +Subject: btrfs: fix off-by-one chunk length calculation at + contains_pending_extent() + +From: Filipe Manana + +[ Upstream commit ae6bd7f9b46a29af52ebfac25d395757e2031d0d ] + +At contains_pending_extent() the value of the end offset of a chunk we +found in the device's allocation state io tree is inclusive, so when +we calculate the length we pass to the in_range() macro, we must sum +1 to the expression "physical_end - physical_offset". + +In practice the wrong calculation should be harmless as chunks sizes +are never 1 byte and we should never have 1 byte ranges of unallocated +space. Nevertheless fix the wrong calculation. + +Reported-by: Alex Lyakas +Link: https://lore.kernel.org/linux-btrfs/CAOcd+r30e-f4R-5x-S7sV22RJPe7+pgwherA6xqN2_qe7o4XTg@mail.gmail.com/ +Fixes: 1c11b63eff2a ("btrfs: replace pending/pinned chunks lists with io tree") +CC: stable@vger.kernel.org # 6.1+ +Reviewed-by: Josef Bacik +Reviewed-by: Qu Wenruo +Signed-off-by: Filipe Manana +Signed-off-by: David Sterba +Signed-off-by: Sasha Levin +--- + fs/btrfs/volumes.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index d67785be2c778..d852d94d51c00 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -1403,7 +1403,7 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start, + + if (in_range(physical_start, *start, len) || + in_range(*start, physical_start, +- physical_end - physical_start)) { ++ physical_end + 1 - physical_start)) { + *start = physical_end + 1; + return true; + } +-- +2.43.0 + diff --git a/queue-6.8/btrfs-qgroup-always-free-reserved-space-for-extent-r.patch b/queue-6.8/btrfs-qgroup-always-free-reserved-space-for-extent-r.patch new file mode 100644 index 00000000000..453b1b6c791 --- /dev/null +++ b/queue-6.8/btrfs-qgroup-always-free-reserved-space-for-extent-r.patch @@ -0,0 +1,100 @@ +From c0e2318b86ba5c428176b4a9318cb7490f2cc10b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 23 Feb 2024 18:13:38 +1030 +Subject: btrfs: qgroup: always free reserved space for extent records + +From: Qu Wenruo + +[ Upstream commit d139ded8b9cdb897bb9539eb33311daf9a177fd2 ] + +[BUG] +If qgroup is marked inconsistent (e.g. caused by operations needing full +subtree rescan, like creating a snapshot and assign to a higher level +qgroup), btrfs would immediately start leaking its data reserved space. + +The following script can easily reproduce it: + + mkfs.btrfs -O quota -f $dev + mount $dev $mnt + btrfs subvolume create $mnt/subv1 + btrfs qgroup create 1/0 $mnt + + # This snapshot creation would mark qgroup inconsistent, + # as the ownership involves different higher level qgroup, thus + # we have to rescan both source and snapshot, which can be very + # time consuming, thus here btrfs just choose to mark qgroup + # inconsistent, and let users to determine when to do the rescan. + btrfs subv snapshot -i 1/0 $mnt/subv1 $mnt/snap1 + + # Now this write would lead to qgroup rsv leak. + xfs_io -f -c "pwrite 0 64k" $mnt/file1 + + # And at unmount time, btrfs would report 64K DATA rsv space leaked. + umount $mnt + +And we would have the following dmesg output for the unmount: + + BTRFS info (device dm-1): last unmount of filesystem 14a3d84e-f47b-4f72-b053-a8a36eef74d3 + BTRFS warning (device dm-1): qgroup 0/5 has unreleased space, type 0 rsv 65536 + +[CAUSE] +Since commit e15e9f43c7ca ("btrfs: introduce +BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING to skip qgroup accounting"), +we introduce a mode for btrfs qgroup to skip the timing consuming +backref walk, if the qgroup is already inconsistent. + +But this skip also covered the data reserved freeing, thus the qgroup +reserved space for each newly created data extent would not be freed, +thus cause the leakage. + +[FIX] +Make the data extent reserved space freeing mandatory. + +The qgroup reserved space handling is way cheaper compared to the +backref walking part, and we always have the super sensitive leak +detector, thus it's definitely worth to always free the qgroup +reserved data space. + +Reported-by: Fabian Vogt +Fixes: e15e9f43c7ca ("btrfs: introduce BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING to skip qgroup accounting") +CC: stable@vger.kernel.org # 6.1+ +Link: https://bugzilla.suse.com/show_bug.cgi?id=1216196 +Reviewed-by: Filipe Manana +Signed-off-by: Qu Wenruo +Signed-off-by: David Sterba +Signed-off-by: Sasha Levin +--- + fs/btrfs/qgroup.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 5470e1cdf10c5..5df54f78db2b9 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -2959,11 +2959,6 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) + ctx.roots = NULL; + } + +- /* Free the reserved data space */ +- btrfs_qgroup_free_refroot(fs_info, +- record->data_rsv_refroot, +- record->data_rsv, +- BTRFS_QGROUP_RSV_DATA); + /* + * Use BTRFS_SEQ_LAST as time_seq to do special search, + * which doesn't lock tree or delayed_refs and search +@@ -2987,6 +2982,11 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) + record->old_roots = NULL; + new_roots = NULL; + } ++ /* Free the reserved data space */ ++ btrfs_qgroup_free_refroot(fs_info, ++ record->data_rsv_refroot, ++ record->data_rsv, ++ BTRFS_QGROUP_RSV_DATA); + cleanup: + ulist_free(record->old_roots); + ulist_free(new_roots); +-- +2.43.0 + diff --git a/queue-6.8/btrfs-qgroup-validate-btrfs_qgroup_inherit-parameter.patch b/queue-6.8/btrfs-qgroup-validate-btrfs_qgroup_inherit-parameter.patch new file mode 100644 index 00000000000..9d26705a25d --- /dev/null +++ b/queue-6.8/btrfs-qgroup-validate-btrfs_qgroup_inherit-parameter.patch @@ -0,0 +1,191 @@ +From 4efc5ade646280dfe2afdd72f319009aee48288e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 27 Feb 2024 13:45:35 +1030 +Subject: btrfs: qgroup: validate btrfs_qgroup_inherit parameter + +From: Qu Wenruo + +[ Upstream commit 86211eea8ae1676cc819d2b4fdc8d995394be07d ] + +[BUG] +Currently btrfs can create subvolume with an invalid qgroup inherit +without triggering any error: + + # mkfs.btrfs -O quota -f $dev + # mount $dev $mnt + # btrfs subvolume create -i 2/0 $mnt/subv1 + # btrfs qgroup show -prce --sync $mnt + Qgroupid Referenced Exclusive Path + -------- ---------- --------- ---- + 0/5 16.00KiB 16.00KiB + 0/256 16.00KiB 16.00KiB subv1 + +[CAUSE] +We only do a very basic size check for btrfs_qgroup_inherit structure, +but never really verify if the values are correct. + +Thus in btrfs_qgroup_inherit() function, we have to skip non-existing +qgroups, and never return any error. + +[FIX] +Fix the behavior and introduce extra checks: + +- Introduce early check for btrfs_qgroup_inherit structure + Not only the size, but also all the qgroup ids would be verified. + + And the timing is very early, so we can return error early. + This early check is very important for snapshot creation, as snapshot + is delayed to transaction commit. + +- Drop support for btrfs_qgroup_inherit::num_ref_copies and + num_excl_copies + Those two members are used to specify to copy refr/excl numbers from + other qgroups. + This would definitely mark qgroup inconsistent, and btrfs-progs has + dropped the support for them for a long time. + It's time to drop the support for kernel. + +- Verify the supported btrfs_qgroup_inherit::flags + Just in case we want to add extra flags for btrfs_qgroup_inherit. + +Now above subvolume creation would fail with -ENOENT other than silently +ignore the non-existing qgroup. + +CC: stable@vger.kernel.org # 6.7+ +Signed-off-by: Qu Wenruo +Reviewed-by: David Sterba +Signed-off-by: David Sterba +Signed-off-by: Sasha Levin +--- + fs/btrfs/ioctl.c | 16 +++--------- + fs/btrfs/qgroup.c | 51 ++++++++++++++++++++++++++++++++++++++ + fs/btrfs/qgroup.h | 3 +++ + include/uapi/linux/btrfs.h | 1 + + 4 files changed, 58 insertions(+), 13 deletions(-) + +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 738afd56c7e9e..bd19aed66605a 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -1362,7 +1362,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, + if (vol_args->flags & BTRFS_SUBVOL_RDONLY) + readonly = true; + if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { +- u64 nums; ++ struct btrfs_fs_info *fs_info = inode_to_fs_info(file_inode(file)); + + if (vol_args->size < sizeof(*inherit) || + vol_args->size > PAGE_SIZE) { +@@ -1375,19 +1375,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, + goto free_args; + } + +- if (inherit->num_qgroups > PAGE_SIZE || +- inherit->num_ref_copies > PAGE_SIZE || +- inherit->num_excl_copies > PAGE_SIZE) { +- ret = -EINVAL; +- goto free_inherit; +- } +- +- nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + +- 2 * inherit->num_excl_copies; +- if (vol_args->size != struct_size(inherit, qgroups, nums)) { +- ret = -EINVAL; ++ ret = btrfs_qgroup_check_inherit(fs_info, inherit, vol_args->size); ++ if (ret < 0) + goto free_inherit; +- } + } + + ret = __btrfs_ioctl_snap_create(file, file_mnt_idmap(file), +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 5df54f78db2b9..a78c6694959aa 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -3048,6 +3048,57 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans) + return ret; + } + ++int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info, ++ struct btrfs_qgroup_inherit *inherit, ++ size_t size) ++{ ++ if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP) ++ return -EOPNOTSUPP; ++ if (size < sizeof(*inherit) || size > PAGE_SIZE) ++ return -EINVAL; ++ ++ /* ++ * In the past we allowed btrfs_qgroup_inherit to specify to copy ++ * rfer/excl numbers directly from other qgroups. This behavior has ++ * been disabled in userspace for a very long time, but here we should ++ * also disable it in kernel, as this behavior is known to mark qgroup ++ * inconsistent, and a rescan would wipe out the changes anyway. ++ * ++ * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies. ++ */ ++ if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0) ++ return -EINVAL; ++ ++ if (inherit->num_qgroups > PAGE_SIZE) ++ return -EINVAL; ++ ++ if (size != struct_size(inherit, qgroups, inherit->num_qgroups)) ++ return -EINVAL; ++ ++ /* ++ * Now check all the remaining qgroups, they should all: ++ * ++ * - Exist ++ * - Be higher level qgroups. ++ */ ++ for (int i = 0; i < inherit->num_qgroups; i++) { ++ struct btrfs_qgroup *qgroup; ++ u64 qgroupid = inherit->qgroups[i]; ++ ++ if (btrfs_qgroup_level(qgroupid) == 0) ++ return -EINVAL; ++ ++ spin_lock(&fs_info->qgroup_lock); ++ qgroup = find_qgroup_rb(fs_info, qgroupid); ++ if (!qgroup) { ++ spin_unlock(&fs_info->qgroup_lock); ++ return -ENOENT; ++ } ++ spin_unlock(&fs_info->qgroup_lock); ++ } ++ return 0; ++} ++ + static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info, + u64 inode_rootid, + struct btrfs_qgroup_inherit **inherit) +diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h +index be18c862e64ed..45a7d8920d039 100644 +--- a/fs/btrfs/qgroup.h ++++ b/fs/btrfs/qgroup.h +@@ -341,6 +341,9 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, + struct ulist *new_roots); + int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans); + int btrfs_run_qgroups(struct btrfs_trans_handle *trans); ++int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info, ++ struct btrfs_qgroup_inherit *inherit, ++ size_t size); + int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, + u64 objectid, u64 inode_rootid, + struct btrfs_qgroup_inherit *inherit); +diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h +index f8bc34a6bcfa2..cdf6ad872149c 100644 +--- a/include/uapi/linux/btrfs.h ++++ b/include/uapi/linux/btrfs.h +@@ -92,6 +92,7 @@ struct btrfs_qgroup_limit { + * struct btrfs_qgroup_inherit.flags + */ + #define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0) ++#define BTRFS_QGROUP_INHERIT_FLAGS_SUPP (BTRFS_QGROUP_INHERIT_SET_LIMITS) + + struct btrfs_qgroup_inherit { + __u64 flags; +-- +2.43.0 + diff --git a/queue-6.8/btrfs-replace-sb-s_blocksize-by-fs_info-sectorsize.patch b/queue-6.8/btrfs-replace-sb-s_blocksize-by-fs_info-sectorsize.patch new file mode 100644 index 00000000000..c24e6794e68 --- /dev/null +++ b/queue-6.8/btrfs-replace-sb-s_blocksize-by-fs_info-sectorsize.patch @@ -0,0 +1,160 @@ +From a0ecea5e5bbac5a43f7af132102f08916f4f3b84 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 16 Jan 2024 17:33:20 +0100 +Subject: btrfs: replace sb::s_blocksize by fs_info::sectorsize + +From: David Sterba + +[ Upstream commit 4e00422ee62663e31e611d7de4d2c4aa3f8555f2 ] + +The block size stored in the super block is used by subsystems outside +of btrfs and it's a copy of fs_info::sectorsize. Unify that to always +use our sectorsize, with the exception of mount where we first need to +use fixed values (4K) until we read the super block and can set the +sectorsize. + +Replace all uses, in most cases it's fewer pointer indirections. + +Reviewed-by: Josef Bacik +Reviewed-by: Anand Jain +Signed-off-by: David Sterba +Stable-dep-of: 86211eea8ae1 ("btrfs: qgroup: validate btrfs_qgroup_inherit parameter") +Signed-off-by: Sasha Levin +--- + fs/btrfs/disk-io.c | 2 ++ + fs/btrfs/extent_io.c | 4 ++-- + fs/btrfs/inode.c | 2 +- + fs/btrfs/ioctl.c | 2 +- + fs/btrfs/reflink.c | 6 +++--- + fs/btrfs/send.c | 2 +- + fs/btrfs/super.c | 2 +- + 7 files changed, 11 insertions(+), 9 deletions(-) + +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index c843563914cad..091104a327326 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2839,6 +2839,7 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block + int ret; + + fs_info->sb = sb; ++ /* Temporary fixed values for block size until we read the superblock. */ + sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; + sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); + +@@ -3356,6 +3357,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device + sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); + sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); + ++ /* Update the values for the current filesystem. */ + sb->s_blocksize = sectorsize; + sb->s_blocksize_bits = blksize_bits(sectorsize); + memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE); +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 8b88cd08ab053..3b47654ed3e8f 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -1022,7 +1022,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, + int ret = 0; + size_t pg_offset = 0; + size_t iosize; +- size_t blocksize = inode->i_sb->s_blocksize; ++ size_t blocksize = fs_info->sectorsize; + struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; + + ret = set_page_extent_mapped(page); +@@ -2313,7 +2313,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree, + struct extent_state *cached_state = NULL; + u64 start = folio_pos(folio); + u64 end = start + folio_size(folio) - 1; +- size_t blocksize = folio->mapping->host->i_sb->s_blocksize; ++ size_t blocksize = btrfs_sb(folio->mapping->host->i_sb)->sectorsize; + + /* This function is only called for the btree inode */ + ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 4795738d5785b..bd3f348d503dc 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -8723,7 +8723,7 @@ static int btrfs_getattr(struct mnt_idmap *idmap, + u64 delalloc_bytes; + u64 inode_bytes; + struct inode *inode = d_inode(path->dentry); +- u32 blocksize = inode->i_sb->s_blocksize; ++ u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize; + u32 bi_flags = BTRFS_I(inode)->flags; + u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; + +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 9d1eac15e09e1..8d80903e9bff6 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -528,7 +528,7 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info, + * block group is in the logical address space, which can be any + * sectorsize aligned bytenr in the range [0, U64_MAX]. + */ +- if (range.len < fs_info->sb->s_blocksize) ++ if (range.len < fs_info->sectorsize) + return -EINVAL; + + range.minlen = max(range.minlen, minlen); +diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c +index ae90894dc7dc7..e38cb40e150c9 100644 +--- a/fs/btrfs/reflink.c ++++ b/fs/btrfs/reflink.c +@@ -663,7 +663,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, + struct inode *dst, u64 dst_loff) + { + struct btrfs_fs_info *fs_info = BTRFS_I(src)->root->fs_info; +- const u64 bs = fs_info->sb->s_blocksize; ++ const u64 bs = fs_info->sectorsize; + int ret; + + /* +@@ -730,7 +730,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, + int ret; + int wb_ret; + u64 len = olen; +- u64 bs = fs_info->sb->s_blocksize; ++ u64 bs = fs_info->sectorsize; + + /* + * VFS's generic_remap_file_range_prep() protects us from cloning the +@@ -796,7 +796,7 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, + { + struct inode *inode_in = file_inode(file_in); + struct inode *inode_out = file_inode(file_out); +- u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize; ++ u64 bs = BTRFS_I(inode_out)->root->fs_info->sectorsize; + u64 wb_len; + int ret; + +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index e48a063ef0851..e9516509b2761 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -6140,7 +6140,7 @@ static int send_write_or_clone(struct send_ctx *sctx, + int ret = 0; + u64 offset = key->offset; + u64 end; +- u64 bs = sctx->send_root->fs_info->sb->s_blocksize; ++ u64 bs = sctx->send_root->fs_info->sectorsize; + + end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size); + if (offset >= end) +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c +index 101f786963d4d..c45fdaf24cd1c 100644 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c +@@ -1767,7 +1767,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) + buf->f_bavail = 0; + + buf->f_type = BTRFS_SUPER_MAGIC; +- buf->f_bsize = dentry->d_sb->s_blocksize; ++ buf->f_bsize = fs_info->sectorsize; + buf->f_namelen = BTRFS_NAME_LEN; + + /* We treat it as constant endianness (it doesn't matter _which_) +-- +2.43.0 + diff --git a/queue-6.8/cifs-allow-changing-password-during-remount.patch b/queue-6.8/cifs-allow-changing-password-during-remount.patch new file mode 100644 index 00000000000..7f36517e00f --- /dev/null +++ b/queue-6.8/cifs-allow-changing-password-during-remount.patch @@ -0,0 +1,138 @@ +From b81a06ac88d8f9b189dd671b478e1d97d6d20f09 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 13 Feb 2024 00:40:01 -0600 +Subject: cifs: allow changing password during remount + +From: Steve French + +[ Upstream commit c1eb537bf4560b3ad4df606c266c665624f3b502 ] + +There are cases where a session is disconnected and password has changed +on the server (or expired) for this user and this currently can not +be fixed without unmount and mounting again. This patch allows +remount to change the password (for the non Kerberos case, Kerberos +ticket refresh is handled differently) when the session is disconnected +and the user can not reconnect due to still using old password. + +Future patches should also allow us to setup the keyring (cifscreds) +to have an "alternate password" so we would be able to change +the password before the session drops (without the risk of races +between when the password changes and the disconnect occurs - +ie cases where the old password is still needed because the new +password has not fully rolled out to all servers yet). + +Cc: stable@vger.kernel.org +Signed-off-by: Steve French +Signed-off-by: Sasha Levin +--- + fs/smb/client/cifs_debug.c | 2 ++ + fs/smb/client/cifsglob.h | 1 + + fs/smb/client/fs_context.c | 27 ++++++++++++++++++++++----- + fs/smb/client/smb2pdu.c | 5 +++++ + 4 files changed, 30 insertions(+), 5 deletions(-) + +diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c +index 3e4209f41c18f..23d2622b969f0 100644 +--- a/fs/smb/client/cifs_debug.c ++++ b/fs/smb/client/cifs_debug.c +@@ -488,6 +488,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) + ses->ses_count, ses->serverOS, ses->serverNOS, + ses->capabilities, ses->ses_status); + } ++ if (ses->expired_pwd) ++ seq_puts(m, "password no longer valid "); + spin_unlock(&ses->ses_lock); + + seq_printf(m, "\n\tSecurity type: %s ", +diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h +index 53c75cfb33ab9..ec9a26bd05a12 100644 +--- a/fs/smb/client/cifsglob.h ++++ b/fs/smb/client/cifsglob.h +@@ -1066,6 +1066,7 @@ struct cifs_ses { + enum securityEnum sectype; /* what security flavor was specified? */ + bool sign; /* is signing required? */ + bool domainAuto:1; ++ bool expired_pwd; /* track if access denied or expired pwd so can know if need to update */ + unsigned int flags; + __u16 session_flags; + __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE]; +diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c +index 4b2f5aa2ea0e1..415e87635d5aa 100644 +--- a/fs/smb/client/fs_context.c ++++ b/fs/smb/client/fs_context.c +@@ -772,7 +772,7 @@ static void smb3_fs_context_free(struct fs_context *fc) + */ + static int smb3_verify_reconfigure_ctx(struct fs_context *fc, + struct smb3_fs_context *new_ctx, +- struct smb3_fs_context *old_ctx) ++ struct smb3_fs_context *old_ctx, bool need_recon) + { + if (new_ctx->posix_paths != old_ctx->posix_paths) { + cifs_errorf(fc, "can not change posixpaths during remount\n"); +@@ -798,8 +798,15 @@ static int smb3_verify_reconfigure_ctx(struct fs_context *fc, + } + if (new_ctx->password && + (!old_ctx->password || strcmp(new_ctx->password, old_ctx->password))) { +- cifs_errorf(fc, "can not change password during remount\n"); +- return -EINVAL; ++ if (need_recon == false) { ++ cifs_errorf(fc, ++ "can not change password of active session during remount\n"); ++ return -EINVAL; ++ } else if (old_ctx->sectype == Kerberos) { ++ cifs_errorf(fc, ++ "can not change password for Kerberos via remount\n"); ++ return -EINVAL; ++ } + } + if (new_ctx->domainname && + (!old_ctx->domainname || strcmp(new_ctx->domainname, old_ctx->domainname))) { +@@ -843,9 +850,14 @@ static int smb3_reconfigure(struct fs_context *fc) + struct smb3_fs_context *ctx = smb3_fc2context(fc); + struct dentry *root = fc->root; + struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); ++ struct cifs_ses *ses = cifs_sb_master_tcon(cifs_sb)->ses; ++ bool need_recon = false; + int rc; + +- rc = smb3_verify_reconfigure_ctx(fc, ctx, cifs_sb->ctx); ++ if (ses->expired_pwd) ++ need_recon = true; ++ ++ rc = smb3_verify_reconfigure_ctx(fc, ctx, cifs_sb->ctx, need_recon); + if (rc) + return rc; + +@@ -858,7 +870,12 @@ static int smb3_reconfigure(struct fs_context *fc) + STEAL_STRING(cifs_sb, ctx, UNC); + STEAL_STRING(cifs_sb, ctx, source); + STEAL_STRING(cifs_sb, ctx, username); +- STEAL_STRING_SENSITIVE(cifs_sb, ctx, password); ++ if (need_recon == false) ++ STEAL_STRING_SENSITIVE(cifs_sb, ctx, password); ++ else { ++ kfree_sensitive(ses->password); ++ ses->password = kstrdup(ctx->password, GFP_KERNEL); ++ } + STEAL_STRING(cifs_sb, ctx, domainname); + STEAL_STRING(cifs_sb, ctx, nodename); + STEAL_STRING(cifs_sb, ctx, iocharset); +diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c +index 608ee05491e26..a500380d1b2e9 100644 +--- a/fs/smb/client/smb2pdu.c ++++ b/fs/smb/client/smb2pdu.c +@@ -1536,6 +1536,11 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) + &sess_data->buf0_type, + CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov); + cifs_small_buf_release(sess_data->iov[0].iov_base); ++ if (rc == 0) ++ sess_data->ses->expired_pwd = false; ++ else if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) ++ sess_data->ses->expired_pwd = true; ++ + memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); + + return rc; +-- +2.43.0 + diff --git a/queue-6.8/cifs-make-sure-server-interfaces-are-requested-only-.patch b/queue-6.8/cifs-make-sure-server-interfaces-are-requested-only-.patch new file mode 100644 index 00000000000..04068b316b9 --- /dev/null +++ b/queue-6.8/cifs-make-sure-server-interfaces-are-requested-only-.patch @@ -0,0 +1,115 @@ +From c26317933842f78a19d03a2b0349a0bef0a3b73a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Mar 2024 10:40:41 +0000 +Subject: cifs: make sure server interfaces are requested only for SMB3+ +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Shyam Prasad N + +[ Upstream commit 13c0a74747cb7fdadf58c5d3a7d52cfca2d51736 ] + +Some code paths for querying server interfaces make a false +assumption that it will only get called for SMB3+. Since this +function now can get called from a generic code paths, the correct +thing to do is to have specific handler for this functionality +per SMB dialect, and call this handler. + +This change adds such a handler and implements this handler only +for SMB 3.0 and 3.1.1. + +Cc: stable@vger.kernel.org +Cc: Jan Čermák +Reported-by: Paulo Alcantara +Signed-off-by: Shyam Prasad N +Signed-off-by: Steve French +Signed-off-by: Sasha Levin +--- + fs/smb/client/cifsglob.h | 3 +++ + fs/smb/client/connect.c | 6 +++++- + fs/smb/client/smb2ops.c | 2 ++ + fs/smb/client/smb2pdu.c | 5 +++-- + 4 files changed, 13 insertions(+), 3 deletions(-) + +diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h +index ec9a26bd05a12..06e81afe31c14 100644 +--- a/fs/smb/client/cifsglob.h ++++ b/fs/smb/client/cifsglob.h +@@ -346,6 +346,9 @@ struct smb_version_operations { + /* informational QFS call */ + void (*qfs_tcon)(const unsigned int, struct cifs_tcon *, + struct cifs_sb_info *); ++ /* query for server interfaces */ ++ int (*query_server_interfaces)(const unsigned int, struct cifs_tcon *, ++ bool); + /* check if a path is accessible or not */ + int (*is_path_accessible)(const unsigned int, struct cifs_tcon *, + struct cifs_sb_info *, const char *); +diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c +index ac9595504f4b1..2341604606157 100644 +--- a/fs/smb/client/connect.c ++++ b/fs/smb/client/connect.c +@@ -123,12 +123,16 @@ static void smb2_query_server_interfaces(struct work_struct *work) + struct cifs_tcon *tcon = container_of(work, + struct cifs_tcon, + query_interfaces.work); ++ struct TCP_Server_Info *server = tcon->ses->server; + + /* + * query server network interfaces, in case they change + */ ++ if (!server->ops->query_server_interfaces) ++ return; ++ + xid = get_xid(); +- rc = SMB3_request_interfaces(xid, tcon, false); ++ rc = server->ops->query_server_interfaces(xid, tcon, false); + free_xid(xid); + + if (rc) { +diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c +index 4695433fcf397..3b8896987197e 100644 +--- a/fs/smb/client/smb2ops.c ++++ b/fs/smb/client/smb2ops.c +@@ -5538,6 +5538,7 @@ struct smb_version_operations smb30_operations = { + .tree_connect = SMB2_tcon, + .tree_disconnect = SMB2_tdis, + .qfs_tcon = smb3_qfs_tcon, ++ .query_server_interfaces = SMB3_request_interfaces, + .is_path_accessible = smb2_is_path_accessible, + .can_echo = smb2_can_echo, + .echo = SMB2_echo, +@@ -5653,6 +5654,7 @@ struct smb_version_operations smb311_operations = { + .tree_connect = SMB2_tcon, + .tree_disconnect = SMB2_tdis, + .qfs_tcon = smb3_qfs_tcon, ++ .query_server_interfaces = SMB3_request_interfaces, + .is_path_accessible = smb2_is_path_accessible, + .can_echo = smb2_can_echo, + .echo = SMB2_echo, +diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c +index a500380d1b2e9..92c3710df6159 100644 +--- a/fs/smb/client/smb2pdu.c ++++ b/fs/smb/client/smb2pdu.c +@@ -409,14 +409,15 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, + spin_unlock(&ses->ses_lock); + + if (!rc && +- (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { ++ (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) && ++ server->ops->query_server_interfaces) { + mutex_unlock(&ses->session_mutex); + + /* + * query server network interfaces, in case they change + */ + xid = get_xid(); +- rc = SMB3_request_interfaces(xid, tcon, false); ++ rc = server->ops->query_server_interfaces(xid, tcon, false); + free_xid(xid); + + if (rc == -EOPNOTSUPP && ses->chan_count > 1) { +-- +2.43.0 + diff --git a/queue-6.8/cifs-open_cached_dir-add-file_read_ea-to-desired-acc.patch b/queue-6.8/cifs-open_cached_dir-add-file_read_ea-to-desired-acc.patch new file mode 100644 index 00000000000..a5b5e1ba798 --- /dev/null +++ b/queue-6.8/cifs-open_cached_dir-add-file_read_ea-to-desired-acc.patch @@ -0,0 +1,41 @@ +From 11a84a2e5a18809e5ec448c19fe18c01bf4cc27d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Mar 2024 17:53:44 +0300 +Subject: cifs: open_cached_dir(): add FILE_READ_EA to desired access + +From: Eugene Korenevsky + +[ Upstream commit f1b8224b4e6ed59e7e6f5c548673c67410098d8d ] + +Since smb2_query_eas() reads EA and uses cached directory, +open_cached_dir() should request FILE_READ_EA access. + +Otherwise listxattr() and getxattr() will fail with EACCES +(0xc0000022 STATUS_ACCESS_DENIED SMB status). + +Link: https://bugzilla.kernel.org/show_bug.cgi?id=218543 +Cc: stable@vger.kernel.org +Signed-off-by: Eugene Korenevsky +Signed-off-by: Steve French +Signed-off-by: Sasha Levin +--- + fs/smb/client/cached_dir.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c +index 3de5047a7ff98..a0017724d5239 100644 +--- a/fs/smb/client/cached_dir.c ++++ b/fs/smb/client/cached_dir.c +@@ -239,7 +239,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, + .tcon = tcon, + .path = path, + .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE), +- .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES, ++ .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES | ++ FILE_READ_EA, + .disposition = FILE_OPEN, + .fid = pfid, + .replay = !!(retries), +-- +2.43.0 + diff --git a/queue-6.8/cifs-prevent-updating-file-size-from-server-if-we-ha.patch b/queue-6.8/cifs-prevent-updating-file-size-from-server-if-we-ha.patch new file mode 100644 index 00000000000..f6544e1ea5f --- /dev/null +++ b/queue-6.8/cifs-prevent-updating-file-size-from-server-if-we-ha.patch @@ -0,0 +1,169 @@ +From e8863f06e30a43759d91436b4ae8b4eb934c80f1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 23:09:52 +0530 +Subject: cifs: prevent updating file size from server if we have a read/write + lease + +From: Bharath SM + +[ Upstream commit e4b61f3b1c67f5068590965f64ea6e8d5d5bd961 ] + +In cases of large directories, the readdir operation may span multiple +round trips to retrieve contents. This introduces a potential race +condition in case of concurrent write and readdir operations. If the +readdir operation initiates before a write has been processed by the +server, it may update the file size attribute to an older value. +Address this issue by avoiding file size updates from readdir when we +have read/write lease. + +Scenario: +1) process1: open dir xyz +2) process1: readdir instance 1 on xyz +3) process2: create file.txt for write +4) process2: write x bytes to file.txt +5) process2: close file.txt +6) process2: open file.txt for read +7) process1: readdir 2 - overwrites file.txt inode size to 0 +8) process2: read contents of file.txt - bug, short read with 0 bytes + +Cc: stable@vger.kernel.org +Reviewed-by: Shyam Prasad N +Signed-off-by: Bharath SM +Signed-off-by: Steve French +Signed-off-by: Sasha Levin +--- + fs/smb/client/cifsproto.h | 6 ++++-- + fs/smb/client/file.c | 8 +++++--- + fs/smb/client/inode.c | 13 +++++++------ + fs/smb/client/readdir.c | 2 +- + 4 files changed, 17 insertions(+), 12 deletions(-) + +diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h +index a841bf4967fa4..58cfbd450a55e 100644 +--- a/fs/smb/client/cifsproto.h ++++ b/fs/smb/client/cifsproto.h +@@ -144,7 +144,8 @@ extern int cifs_reconnect(struct TCP_Server_Info *server, + extern int checkSMB(char *buf, unsigned int len, struct TCP_Server_Info *srvr); + extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *); + extern bool backup_cred(struct cifs_sb_info *); +-extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); ++extern bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 eof, ++ bool from_readdir); + extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, + unsigned int bytes_written); + extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int); +@@ -201,7 +202,8 @@ extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, + struct cifs_sb_info *cifs_sb); + extern void cifs_dir_info_to_fattr(struct cifs_fattr *, FILE_DIRECTORY_INFO *, + struct cifs_sb_info *); +-extern int cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr); ++extern int cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr, ++ bool from_readdir); + extern struct inode *cifs_iget(struct super_block *sb, + struct cifs_fattr *fattr); + +diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c +index 98514f2f2d7b1..9d42a39009076 100644 +--- a/fs/smb/client/file.c ++++ b/fs/smb/client/file.c +@@ -329,7 +329,7 @@ int cifs_posix_open(const char *full_path, struct inode **pinode, + } + } else { + cifs_revalidate_mapping(*pinode); +- rc = cifs_fattr_to_inode(*pinode, &fattr); ++ rc = cifs_fattr_to_inode(*pinode, &fattr, false); + } + + posix_open_ret: +@@ -4769,12 +4769,14 @@ static int is_inode_writable(struct cifsInodeInfo *cifs_inode) + refreshing the inode only on increases in the file size + but this is tricky to do without racing with writebehind + page caching in the current Linux kernel design */ +-bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) ++bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file, ++ bool from_readdir) + { + if (!cifsInode) + return true; + +- if (is_inode_writable(cifsInode)) { ++ if (is_inode_writable(cifsInode) || ++ ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) { + /* This inode is open for write at least once */ + struct cifs_sb_info *cifs_sb; + +diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c +index d02f8ba29cb5b..7f28edf4b20f3 100644 +--- a/fs/smb/client/inode.c ++++ b/fs/smb/client/inode.c +@@ -147,7 +147,8 @@ cifs_nlink_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) + + /* populate an inode with info from a cifs_fattr struct */ + int +-cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) ++cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr, ++ bool from_readdir) + { + struct cifsInodeInfo *cifs_i = CIFS_I(inode); + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); +@@ -199,7 +200,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) + * Can't safely change the file size here if the client is writing to + * it due to potential races. + */ +- if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) { ++ if (is_size_safe_to_change(cifs_i, fattr->cf_eof, from_readdir)) { + i_size_write(inode, fattr->cf_eof); + + /* +@@ -368,7 +369,7 @@ static int update_inode_info(struct super_block *sb, + CIFS_I(*inode)->time = 0; /* force reval */ + return -ESTALE; + } +- return cifs_fattr_to_inode(*inode, fattr); ++ return cifs_fattr_to_inode(*inode, fattr, false); + } + + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY +@@ -403,7 +404,7 @@ cifs_get_file_info_unix(struct file *filp) + } else + goto cifs_gfiunix_out; + +- rc = cifs_fattr_to_inode(inode, &fattr); ++ rc = cifs_fattr_to_inode(inode, &fattr, false); + + cifs_gfiunix_out: + free_xid(xid); +@@ -934,7 +935,7 @@ cifs_get_file_info(struct file *filp) + fattr.cf_uniqueid = CIFS_I(inode)->uniqueid; + fattr.cf_flags |= CIFS_FATTR_NEED_REVAL; + /* if filetype is different, return error */ +- rc = cifs_fattr_to_inode(inode, &fattr); ++ rc = cifs_fattr_to_inode(inode, &fattr, false); + cgfi_exit: + cifs_free_open_info(&data); + free_xid(xid); +@@ -1491,7 +1492,7 @@ cifs_iget(struct super_block *sb, struct cifs_fattr *fattr) + } + + /* can't fail - see cifs_find_inode() */ +- cifs_fattr_to_inode(inode, fattr); ++ cifs_fattr_to_inode(inode, fattr, false); + if (sb->s_flags & SB_NOATIME) + inode->i_flags |= S_NOATIME | S_NOCMTIME; + if (inode->i_state & I_NEW) { +diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c +index b520eea7bfce8..132ae7d884a97 100644 +--- a/fs/smb/client/readdir.c ++++ b/fs/smb/client/readdir.c +@@ -148,7 +148,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, + rc = -ESTALE; + } + } +- if (!rc && !cifs_fattr_to_inode(inode, fattr)) { ++ if (!rc && !cifs_fattr_to_inode(inode, fattr, true)) { + dput(dentry); + return; + } +-- +2.43.0 + diff --git a/queue-6.8/cifs-reduce-warning-log-level-for-server-not-adverti.patch b/queue-6.8/cifs-reduce-warning-log-level-for-server-not-adverti.patch new file mode 100644 index 00000000000..1ca039262f5 --- /dev/null +++ b/queue-6.8/cifs-reduce-warning-log-level-for-server-not-adverti.patch @@ -0,0 +1,55 @@ +From f50c4d11f59b47b70a986bc394671660edb7f741 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Mar 2024 10:40:40 +0000 +Subject: cifs: reduce warning log level for server not advertising interfaces +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Shyam Prasad N + +[ Upstream commit 16a57d7681110b25708c7042688412238e6f73a9 ] + +Several users have reported this log getting dumped too regularly to +kernel log. The likely root cause has been identified, and it suggests +that this situation is expected for some configurations +(for example SMB2.1). + +Since the function returns appropriately even for such cases, it is +fairly harmless to make this a debug log. When needed, the verbosity +can be increased to capture this log. + +Cc: stable@vger.kernel.org +Reported-by: Jan Čermák +Signed-off-by: Shyam Prasad N +Signed-off-by: Steve French +Signed-off-by: Sasha Levin +--- + fs/smb/client/sess.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c +index 8f37373fd3334..3216f786908fb 100644 +--- a/fs/smb/client/sess.c ++++ b/fs/smb/client/sess.c +@@ -230,7 +230,7 @@ int cifs_try_adding_channels(struct cifs_ses *ses) + spin_lock(&ses->iface_lock); + if (!ses->iface_count) { + spin_unlock(&ses->iface_lock); +- cifs_dbg(VFS, "server %s does not advertise interfaces\n", ++ cifs_dbg(ONCE, "server %s does not advertise interfaces\n", + ses->server->hostname); + break; + } +@@ -396,7 +396,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server) + spin_lock(&ses->iface_lock); + if (!ses->iface_count) { + spin_unlock(&ses->iface_lock); +- cifs_dbg(VFS, "server %s does not advertise interfaces\n", ses->server->hostname); ++ cifs_dbg(ONCE, "server %s does not advertise interfaces\n", ses->server->hostname); + return; + } + +-- +2.43.0 + diff --git a/queue-6.8/clk-qcom-camcc-sc8280xp-fix-terminating-of-frequency.patch b/queue-6.8/clk-qcom-camcc-sc8280xp-fix-terminating-of-frequency.patch new file mode 100644 index 00000000000..3354f84f421 --- /dev/null +++ b/queue-6.8/clk-qcom-camcc-sc8280xp-fix-terminating-of-frequency.patch @@ -0,0 +1,203 @@ +From b21580d2ffacacf9f5fd9a1b1176d4c4aba3b244 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 19:07:50 +0100 +Subject: clk: qcom: camcc-sc8280xp: fix terminating of frequency table arrays + +From: Gabor Juhos + +[ Upstream commit 6a3d70f7802a98e6c28a74f997a264118b9f50cd ] + +The frequency table arrays are supposed to be terminated with an +empty element. Add such entry to the end of the arrays where it +is missing in order to avoid possible out-of-bound access when +the table is traversed by functions like qcom_find_freq() or +qcom_find_freq_floor(). + +Only compile tested. + +Fixes: ff93872a9c61 ("clk: qcom: camcc-sc8280xp: Add sc8280xp CAMCC") +Signed-off-by: Gabor Juhos +Reviewed-by: Stephen Boyd +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20240229-freq-table-terminator-v1-5-074334f0905c@gmail.com +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + drivers/clk/qcom/camcc-sc8280xp.c | 21 +++++++++++++++++++++ + 1 file changed, 21 insertions(+) + +diff --git a/drivers/clk/qcom/camcc-sc8280xp.c b/drivers/clk/qcom/camcc-sc8280xp.c +index 3dcd79b015151..7f0ae9a5f28b2 100644 +--- a/drivers/clk/qcom/camcc-sc8280xp.c ++++ b/drivers/clk/qcom/camcc-sc8280xp.c +@@ -630,6 +630,7 @@ static const struct freq_tbl ftbl_camcc_bps_clk_src[] = { + F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0), + F(760000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_bps_clk_src = { +@@ -654,6 +655,7 @@ static const struct freq_tbl ftbl_camcc_camnoc_axi_clk_src[] = { + F(320000000, P_CAMCC_PLL7_OUT_ODD, 1, 0, 0), + F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_camnoc_axi_clk_src = { +@@ -673,6 +675,7 @@ static struct clk_rcg2 camcc_camnoc_axi_clk_src = { + static const struct freq_tbl ftbl_camcc_cci_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(37500000, P_CAMCC_PLL0_OUT_EVEN, 16, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_cci_0_clk_src = { +@@ -735,6 +738,7 @@ static const struct freq_tbl ftbl_camcc_cphy_rx_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(240000000, P_CAMCC_PLL0_OUT_EVEN, 2.5, 0, 0), + F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_cphy_rx_clk_src = { +@@ -754,6 +758,7 @@ static struct clk_rcg2 camcc_cphy_rx_clk_src = { + static const struct freq_tbl ftbl_camcc_csi0phytimer_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(300000000, P_CAMCC_PLL0_OUT_EVEN, 2, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_csi0phytimer_clk_src = { +@@ -818,6 +823,7 @@ static const struct freq_tbl ftbl_camcc_fast_ahb_clk_src[] = { + F(200000000, P_CAMCC_PLL0_OUT_EVEN, 3, 0, 0), + F(300000000, P_CAMCC_PLL0_OUT_MAIN, 4, 0, 0), + F(400000000, P_CAMCC_PLL0_OUT_MAIN, 3, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_fast_ahb_clk_src = { +@@ -838,6 +844,7 @@ static const struct freq_tbl ftbl_camcc_icp_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0), + F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_icp_clk_src = { +@@ -860,6 +867,7 @@ static const struct freq_tbl ftbl_camcc_ife_0_clk_src[] = { + F(558000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0), + F(637000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0), + F(760000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_ife_0_clk_src = { +@@ -883,6 +891,7 @@ static const struct freq_tbl ftbl_camcc_ife_0_csid_clk_src[] = { + F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_ife_0_csid_clk_src = { +@@ -905,6 +914,7 @@ static const struct freq_tbl ftbl_camcc_ife_1_clk_src[] = { + F(558000000, P_CAMCC_PLL4_OUT_EVEN, 1, 0, 0), + F(637000000, P_CAMCC_PLL4_OUT_EVEN, 1, 0, 0), + F(760000000, P_CAMCC_PLL4_OUT_EVEN, 1, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_ife_1_clk_src = { +@@ -941,6 +951,7 @@ static const struct freq_tbl ftbl_camcc_ife_2_clk_src[] = { + F(558000000, P_CAMCC_PLL5_OUT_EVEN, 1, 0, 0), + F(637000000, P_CAMCC_PLL5_OUT_EVEN, 1, 0, 0), + F(760000000, P_CAMCC_PLL5_OUT_EVEN, 1, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_ife_2_clk_src = { +@@ -962,6 +973,7 @@ static const struct freq_tbl ftbl_camcc_ife_2_csid_clk_src[] = { + F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_ife_2_csid_clk_src = { +@@ -984,6 +996,7 @@ static const struct freq_tbl ftbl_camcc_ife_3_clk_src[] = { + F(558000000, P_CAMCC_PLL6_OUT_EVEN, 1, 0, 0), + F(637000000, P_CAMCC_PLL6_OUT_EVEN, 1, 0, 0), + F(760000000, P_CAMCC_PLL6_OUT_EVEN, 1, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_ife_3_clk_src = { +@@ -1020,6 +1033,7 @@ static const struct freq_tbl ftbl_camcc_ife_lite_0_clk_src[] = { + F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_ife_lite_0_clk_src = { +@@ -1140,6 +1154,7 @@ static const struct freq_tbl ftbl_camcc_ipe_0_clk_src[] = { + F(475000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0), + F(520000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_ipe_0_clk_src = { +@@ -1163,6 +1178,7 @@ static const struct freq_tbl ftbl_camcc_jpeg_clk_src[] = { + F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_jpeg_clk_src = { +@@ -1184,6 +1200,7 @@ static const struct freq_tbl ftbl_camcc_lrme_clk_src[] = { + F(300000000, P_CAMCC_PLL0_OUT_EVEN, 2, 0, 0), + F(320000000, P_CAMCC_PLL7_OUT_ODD, 1, 0, 0), + F(400000000, P_CAMCC_PLL0_OUT_MAIN, 3, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_lrme_clk_src = { +@@ -1204,6 +1221,7 @@ static const struct freq_tbl ftbl_camcc_mclk0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(24000000, P_CAMCC_PLL2_OUT_EARLY, 10, 1, 4), + F(64000000, P_CAMCC_PLL2_OUT_EARLY, 15, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_mclk0_clk_src = { +@@ -1320,6 +1338,7 @@ static struct clk_rcg2 camcc_mclk7_clk_src = { + + static const struct freq_tbl ftbl_camcc_sleep_clk_src[] = { + F(32000, P_SLEEP_CLK, 1, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_sleep_clk_src = { +@@ -1339,6 +1358,7 @@ static struct clk_rcg2 camcc_sleep_clk_src = { + static const struct freq_tbl ftbl_camcc_slow_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(80000000, P_CAMCC_PLL7_OUT_EVEN, 6, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_slow_ahb_clk_src = { +@@ -1357,6 +1377,7 @@ static struct clk_rcg2 camcc_slow_ahb_clk_src = { + + static const struct freq_tbl ftbl_camcc_xo_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), ++ { } + }; + + static struct clk_rcg2 camcc_xo_clk_src = { +-- +2.43.0 + diff --git a/queue-6.8/clk-qcom-gcc-ipq5018-fix-terminating-of-frequency-ta.patch b/queue-6.8/clk-qcom-gcc-ipq5018-fix-terminating-of-frequency-ta.patch new file mode 100644 index 00000000000..1f9928d023e --- /dev/null +++ b/queue-6.8/clk-qcom-gcc-ipq5018-fix-terminating-of-frequency-ta.patch @@ -0,0 +1,57 @@ +From 4362223d13de42e7afd4a1e41fe8f8f647e6fa0e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 19:07:46 +0100 +Subject: clk: qcom: gcc-ipq5018: fix terminating of frequency table arrays + +From: Gabor Juhos + +[ Upstream commit 90ad946fff70f312b8d23226afc38c13ddd88c4b ] + +The frequency table arrays are supposed to be terminated with an +empty element. Add such entry to the end of the arrays where it +is missing in order to avoid possible out-of-bound access when +the table is traversed by functions like qcom_find_freq() or +qcom_find_freq_floor(). + +Fixes: e3fdbef1bab8 ("clk: qcom: Add Global Clock controller (GCC) driver for IPQ5018") +Signed-off-by: Gabor Juhos +Reviewed-by: Stephen Boyd +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20240229-freq-table-terminator-v1-1-074334f0905c@gmail.com +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + drivers/clk/qcom/gcc-ipq5018.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c +index e2bd54826a4ce..c1732d70e3a23 100644 +--- a/drivers/clk/qcom/gcc-ipq5018.c ++++ b/drivers/clk/qcom/gcc-ipq5018.c +@@ -857,6 +857,7 @@ static struct clk_rcg2 lpass_sway_clk_src = { + + static const struct freq_tbl ftbl_pcie0_aux_clk_src[] = { + F(2000000, P_XO, 12, 0, 0), ++ { } + }; + + static struct clk_rcg2 pcie0_aux_clk_src = { +@@ -1099,6 +1100,7 @@ static const struct freq_tbl ftbl_qpic_io_macro_clk_src[] = { + F(100000000, P_GPLL0, 8, 0, 0), + F(200000000, P_GPLL0, 4, 0, 0), + F(320000000, P_GPLL0, 2.5, 0, 0), ++ { } + }; + + static struct clk_rcg2 qpic_io_macro_clk_src = { +@@ -1194,6 +1196,7 @@ static struct clk_rcg2 ubi0_axi_clk_src = { + static const struct freq_tbl ftbl_ubi0_core_clk_src[] = { + F(850000000, P_UBI32_PLL, 1, 0, 0), + F(1000000000, P_UBI32_PLL, 1, 0, 0), ++ { } + }; + + static struct clk_rcg2 ubi0_core_clk_src = { +-- +2.43.0 + diff --git a/queue-6.8/clk-qcom-gcc-ipq6018-fix-terminating-of-frequency-ta.patch b/queue-6.8/clk-qcom-gcc-ipq6018-fix-terminating-of-frequency-ta.patch new file mode 100644 index 00000000000..cbd6b5f0ff6 --- /dev/null +++ b/queue-6.8/clk-qcom-gcc-ipq6018-fix-terminating-of-frequency-ta.patch @@ -0,0 +1,51 @@ +From 004738c5f4ce6817a4eabd10fc4865f1c9ea3972 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 19:07:47 +0100 +Subject: clk: qcom: gcc-ipq6018: fix terminating of frequency table arrays + +From: Gabor Juhos + +[ Upstream commit cdbc6e2d8108bc47895e5a901cfcaf799b00ca8d ] + +The frequency table arrays are supposed to be terminated with an +empty element. Add such entry to the end of the arrays where it +is missing in order to avoid possible out-of-bound access when +the table is traversed by functions like qcom_find_freq() or +qcom_find_freq_floor(). + +Only compile tested. + +Fixes: d9db07f088af ("clk: qcom: Add ipq6018 Global Clock Controller support") +Signed-off-by: Gabor Juhos +Reviewed-by: Stephen Boyd +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20240229-freq-table-terminator-v1-2-074334f0905c@gmail.com +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + drivers/clk/qcom/gcc-ipq6018.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c +index b366912cd6480..ef1e2ce4804d2 100644 +--- a/drivers/clk/qcom/gcc-ipq6018.c ++++ b/drivers/clk/qcom/gcc-ipq6018.c +@@ -1554,6 +1554,7 @@ static struct clk_regmap_div nss_ubi0_div_clk_src = { + + static const struct freq_tbl ftbl_pcie_aux_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), ++ { } + }; + + static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = { +@@ -1734,6 +1735,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = { + F(160000000, P_GPLL0, 5, 0, 0), + F(216000000, P_GPLL6, 5, 0, 0), + F(308570000, P_GPLL6, 3.5, 0, 0), ++ { } + }; + + static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = { +-- +2.43.0 + diff --git a/queue-6.8/clk-qcom-gcc-ipq8074-fix-terminating-of-frequency-ta.patch b/queue-6.8/clk-qcom-gcc-ipq8074-fix-terminating-of-frequency-ta.patch new file mode 100644 index 00000000000..5c62888ac3f --- /dev/null +++ b/queue-6.8/clk-qcom-gcc-ipq8074-fix-terminating-of-frequency-ta.patch @@ -0,0 +1,51 @@ +From 1ddc7ccfddef8e33f92a7c9a877f47fae4f5278a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 19:07:48 +0100 +Subject: clk: qcom: gcc-ipq8074: fix terminating of frequency table arrays + +From: Gabor Juhos + +[ Upstream commit 1040ef5ed95d6fd2628bad387d78a61633e09429 ] + +The frequency table arrays are supposed to be terminated with an +empty element. Add such entry to the end of the arrays where it +is missing in order to avoid possible out-of-bound access when +the table is traversed by functions like qcom_find_freq() or +qcom_find_freq_floor(). + +Only compile tested. + +Fixes: 9607f6224b39 ("clk: qcom: ipq8074: add PCIE, USB and SDCC clocks") +Signed-off-by: Gabor Juhos +Reviewed-by: Stephen Boyd +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20240229-freq-table-terminator-v1-3-074334f0905c@gmail.com +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + drivers/clk/qcom/gcc-ipq8074.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c +index b7faf12a511a1..7bc679871f324 100644 +--- a/drivers/clk/qcom/gcc-ipq8074.c ++++ b/drivers/clk/qcom/gcc-ipq8074.c +@@ -644,6 +644,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = { + + static const struct freq_tbl ftbl_pcie_aux_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), ++ { } + }; + + static const struct clk_parent_data gcc_xo_gpll0_sleep_clk[] = { +@@ -795,6 +796,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(160000000, P_GPLL0, 5, 0, 0), + F(308570000, P_GPLL6, 3.5, 0, 0), ++ { } + }; + + static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = { +-- +2.43.0 + diff --git a/queue-6.8/clk-qcom-gcc-ipq9574-fix-terminating-of-frequency-ta.patch b/queue-6.8/clk-qcom-gcc-ipq9574-fix-terminating-of-frequency-ta.patch new file mode 100644 index 00000000000..8eb9470cad0 --- /dev/null +++ b/queue-6.8/clk-qcom-gcc-ipq9574-fix-terminating-of-frequency-ta.patch @@ -0,0 +1,43 @@ +From ffad125fe3f6e15472f2184e20a32a72ea0c956b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 19:07:49 +0100 +Subject: clk: qcom: gcc-ipq9574: fix terminating of frequency table arrays + +From: Gabor Juhos + +[ Upstream commit bd2b6395671d823caa38d8e4d752de2448ae61e1 ] + +The frequency table arrays are supposed to be terminated with an +empty element. Add such entry to the end of the arrays where it +is missing in order to avoid possible out-of-bound access when +the table is traversed by functions like qcom_find_freq() or +qcom_find_freq_floor(). + +Only compile tested. + +Fixes: d75b82cff488 ("clk: qcom: Add Global Clock Controller driver for IPQ9574") +Signed-off-by: Gabor Juhos +Reviewed-by: Stephen Boyd +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20240229-freq-table-terminator-v1-4-074334f0905c@gmail.com +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + drivers/clk/qcom/gcc-ipq9574.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c +index e8190108e1aef..0a3f846695b80 100644 +--- a/drivers/clk/qcom/gcc-ipq9574.c ++++ b/drivers/clk/qcom/gcc-ipq9574.c +@@ -2082,6 +2082,7 @@ static struct clk_branch gcc_sdcc1_apps_clk = { + static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = { + F(150000000, P_GPLL4, 8, 0, 0), + F(300000000, P_GPLL4, 4, 0, 0), ++ { } + }; + + static struct clk_rcg2 sdcc1_ice_core_clk_src = { +-- +2.43.0 + diff --git a/queue-6.8/clk-qcom-gcc-sdm845-add-soft-dependency-on-rpmhpd.patch b/queue-6.8/clk-qcom-gcc-sdm845-add-soft-dependency-on-rpmhpd.patch new file mode 100644 index 00000000000..4df7a2c0e9a --- /dev/null +++ b/queue-6.8/clk-qcom-gcc-sdm845-add-soft-dependency-on-rpmhpd.patch @@ -0,0 +1,40 @@ +From c5c2c91d677706ca0fc293c2417cd92ccffa2fa4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 23 Jan 2024 11:58:14 +0530 +Subject: clk: qcom: gcc-sdm845: Add soft dependency on rpmhpd + +From: Amit Pundir + +[ Upstream commit 1d9054e3a4fd36e2949e616f7360bdb81bcc1921 ] + +With the addition of RPMh power domain to the GCC node in +device tree, we noticed a significant delay in getting the +UFS driver probed on AOSP which futher led to mount failures +because Android do not support rootwait. So adding a soft +dependency on RPMh power domain which informs modprobe to +load rpmhpd module before gcc-sdm845. + +Cc: stable@vger.kernel.org # v5.4+ +Fixes: 4b6ea15c0a11 ("arm64: dts: qcom: sdm845: Add missing RPMh power domain to GCC") +Suggested-by: Manivannan Sadhasivam +Signed-off-by: Amit Pundir +Reviewed-by: Manivannan Sadhasivam +Link: https://lore.kernel.org/r/20240123062814.2555649-1-amit.pundir@linaro.org +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + drivers/clk/qcom/gcc-sdm845.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c +index 725cd52d2398e..ea4c3bf4fb9bf 100644 +--- a/drivers/clk/qcom/gcc-sdm845.c ++++ b/drivers/clk/qcom/gcc-sdm845.c +@@ -4037,3 +4037,4 @@ module_exit(gcc_sdm845_exit); + MODULE_DESCRIPTION("QTI GCC SDM845 Driver"); + MODULE_LICENSE("GPL v2"); + MODULE_ALIAS("platform:gcc-sdm845"); ++MODULE_SOFTDEP("pre: rpmhpd"); +-- +2.43.0 + diff --git a/queue-6.8/clk-qcom-mmcc-apq8084-fix-terminating-of-frequency-t.patch b/queue-6.8/clk-qcom-mmcc-apq8084-fix-terminating-of-frequency-t.patch new file mode 100644 index 00000000000..ba59afbb51a --- /dev/null +++ b/queue-6.8/clk-qcom-mmcc-apq8084-fix-terminating-of-frequency-t.patch @@ -0,0 +1,51 @@ +From f3422c776baa656f72f08229fb45cd42017d0ab1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 19:07:51 +0100 +Subject: clk: qcom: mmcc-apq8084: fix terminating of frequency table arrays + +From: Gabor Juhos + +[ Upstream commit a903cfd38d8dee7e754fb89fd1bebed99e28003d ] + +The frequency table arrays are supposed to be terminated with an +empty element. Add such entry to the end of the arrays where it +is missing in order to avoid possible out-of-bound access when +the table is traversed by functions like qcom_find_freq() or +qcom_find_freq_floor(). + +Only compile tested. + +Fixes: 2b46cd23a5a2 ("clk: qcom: Add APQ8084 Multimedia Clock Controller (MMCC) support") +Signed-off-by: Gabor Juhos +Reviewed-by: Stephen Boyd +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20240229-freq-table-terminator-v1-6-074334f0905c@gmail.com +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + drivers/clk/qcom/mmcc-apq8084.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c +index 02fc21208dd14..c89700ab93f9c 100644 +--- a/drivers/clk/qcom/mmcc-apq8084.c ++++ b/drivers/clk/qcom/mmcc-apq8084.c +@@ -348,6 +348,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = { + F(333430000, P_MMPLL1, 3.5, 0, 0), + F(400000000, P_MMPLL0, 2, 0, 0), + F(466800000, P_MMPLL1, 2.5, 0, 0), ++ { } + }; + + static struct clk_rcg2 mmss_axi_clk_src = { +@@ -372,6 +373,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = { + F(150000000, P_GPLL0, 4, 0, 0), + F(228570000, P_MMPLL0, 3.5, 0, 0), + F(320000000, P_MMPLL0, 2.5, 0, 0), ++ { } + }; + + static struct clk_rcg2 ocmemnoc_clk_src = { +-- +2.43.0 + diff --git a/queue-6.8/clk-qcom-mmcc-msm8974-fix-terminating-of-frequency-t.patch b/queue-6.8/clk-qcom-mmcc-msm8974-fix-terminating-of-frequency-t.patch new file mode 100644 index 00000000000..cb8feaf6edb --- /dev/null +++ b/queue-6.8/clk-qcom-mmcc-msm8974-fix-terminating-of-frequency-t.patch @@ -0,0 +1,51 @@ +From b1971dfdcaea163d1c5e8acf0ff240b1125ab221 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 19:07:52 +0100 +Subject: clk: qcom: mmcc-msm8974: fix terminating of frequency table arrays + +From: Gabor Juhos + +[ Upstream commit e2c02a85bf53ae86d79b5fccf0a75ac0b78e0c96 ] + +The frequency table arrays are supposed to be terminated with an +empty element. Add such entry to the end of the arrays where it +is missing in order to avoid possible out-of-bound access when +the table is traversed by functions like qcom_find_freq() or +qcom_find_freq_floor(). + +Only compile tested. + +Fixes: d8b212014e69 ("clk: qcom: Add support for MSM8974's multimedia clock controller (MMCC)") +Signed-off-by: Gabor Juhos +Reviewed-by: Stephen Boyd +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20240229-freq-table-terminator-v1-7-074334f0905c@gmail.com +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + drivers/clk/qcom/mmcc-msm8974.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c +index a31f6cf0c4e0c..36f460b78be2c 100644 +--- a/drivers/clk/qcom/mmcc-msm8974.c ++++ b/drivers/clk/qcom/mmcc-msm8974.c +@@ -290,6 +290,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = { + F(291750000, P_MMPLL1, 4, 0, 0), + F(400000000, P_MMPLL0, 2, 0, 0), + F(466800000, P_MMPLL1, 2.5, 0, 0), ++ { } + }; + + static struct clk_rcg2 mmss_axi_clk_src = { +@@ -314,6 +315,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = { + F(150000000, P_GPLL0, 4, 0, 0), + F(291750000, P_MMPLL1, 4, 0, 0), + F(400000000, P_MMPLL0, 2, 0, 0), ++ { } + }; + + static struct clk_rcg2 ocmemnoc_clk_src = { +-- +2.43.0 + diff --git a/queue-6.8/clocksource-drivers-timer-riscv-clear-timer-interrup.patch b/queue-6.8/clocksource-drivers-timer-riscv-clear-timer-interrup.patch new file mode 100644 index 00000000000..f324f04fd4d --- /dev/null +++ b/queue-6.8/clocksource-drivers-timer-riscv-clear-timer-interrup.patch @@ -0,0 +1,45 @@ +From 212ce198ef526ae5e52285b38adaf6d39a8973d1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Mar 2024 01:23:30 +0800 +Subject: clocksource/drivers/timer-riscv: Clear timer interrupt on timer + initialization + +From: Ley Foon Tan + +[ Upstream commit 8248ca30ef89f9cc74ace62ae1b9a22b5f16736c ] + +In the RISC-V specification, the stimecmp register doesn't have a default +value. To prevent the timer interrupt from being triggered during timer +initialization, clear the timer interrupt by writing stimecmp with a +maximum value. + +Fixes: 9f7a8ff6391f ("RISC-V: Prefer sstc extension if available") +Cc: +Signed-off-by: Ley Foon Tan +Reviewed-by: Samuel Holland +Tested-by: Samuel Holland +Reviewed-by: Atish Patra +Signed-off-by: Daniel Lezcano +Link: https://lore.kernel.org/r/20240306172330.255844-1-leyfoon.tan@starfivetech.com +Signed-off-by: Sasha Levin +--- + drivers/clocksource/timer-riscv.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c +index e66dcbd665665..79bb9a98baa7b 100644 +--- a/drivers/clocksource/timer-riscv.c ++++ b/drivers/clocksource/timer-riscv.c +@@ -108,6 +108,9 @@ static int riscv_timer_starting_cpu(unsigned int cpu) + { + struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu); + ++ /* Clear timer interrupt */ ++ riscv_clock_event_stop(); ++ + ce->cpumask = cpumask_of(cpu); + ce->irq = riscv_clock_event_irq; + if (riscv_timer_cannot_wake_cpu) +-- +2.43.0 + diff --git a/queue-6.8/cpufreq-amd-pstate-fix-min_perf-assignment-in-amd_ps.patch b/queue-6.8/cpufreq-amd-pstate-fix-min_perf-assignment-in-amd_ps.patch new file mode 100644 index 00000000000..0fd34a29cf4 --- /dev/null +++ b/queue-6.8/cpufreq-amd-pstate-fix-min_perf-assignment-in-amd_ps.patch @@ -0,0 +1,41 @@ +From 5cc8faf31ce6b4c8801cd13d89029082f45a8ee8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 9 Feb 2024 16:42:26 +0100 +Subject: cpufreq: amd-pstate: Fix min_perf assignment in + amd_pstate_adjust_perf() + +From: Tor Vic + +[ Upstream commit b26ffbf800ae3c8d01bdf90d9cd8a37e1606ff06 ] + +In the function amd_pstate_adjust_perf(), the 'min_perf' variable is set +to 'highest_perf' instead of 'lowest_perf'. + +Fixes: 1d215f0319c2 ("cpufreq: amd-pstate: Add fast switch function for AMD P-State") +Reported-by: Oleksandr Natalenko +Reviewed-by: Perry Yuan +Signed-off-by: Tor Vic +Reviewed-by: Mario Limonciello +Cc: 6.1+ # 6.1+ +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + drivers/cpufreq/amd-pstate.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c +index 1791d37fbc53c..07f3419954396 100644 +--- a/drivers/cpufreq/amd-pstate.c ++++ b/drivers/cpufreq/amd-pstate.c +@@ -570,7 +570,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu, + if (target_perf < capacity) + des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity); + +- min_perf = READ_ONCE(cpudata->highest_perf); ++ min_perf = READ_ONCE(cpudata->lowest_perf); + if (_min_perf < capacity) + min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity); + +-- +2.43.0 + diff --git a/queue-6.8/cpufreq-dt-always-allocate-zeroed-cpumask.patch b/queue-6.8/cpufreq-dt-always-allocate-zeroed-cpumask.patch new file mode 100644 index 00000000000..11260d2268f --- /dev/null +++ b/queue-6.8/cpufreq-dt-always-allocate-zeroed-cpumask.patch @@ -0,0 +1,46 @@ +From bb8938d4bd74cc76ce0f6e6c945215a2135026e7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Mar 2024 13:54:57 +0100 +Subject: cpufreq: dt: always allocate zeroed cpumask + +From: Marek Szyprowski + +[ Upstream commit d2399501c2c081eac703ca9597ceb83c7875a537 ] + +Commit 0499a78369ad ("ARM64: Dynamically allocate cpumasks and increase +supported CPUs to 512") changed the handling of cpumasks on ARM 64bit, +what resulted in the strange issues and warnings during cpufreq-dt +initialization on some big.LITTLE platforms. + +This was caused by mixing OPPs between big and LITTLE cores, because +OPP-sharing information between big and LITTLE cores is computed on +cpumask, which in turn was not zeroed on allocation. Fix this by +switching to zalloc_cpumask_var() call. + +Fixes: dc279ac6e5b4 ("cpufreq: dt: Refactor initialization to handle probe deferral properly") +CC: stable@vger.kernel.org # v5.10+ +Signed-off-by: Marek Szyprowski +Reviewed-by: Christoph Lameter (Ampere) +Reviewed-by: Dhruva Gole +Signed-off-by: Viresh Kumar +Signed-off-by: Sasha Levin +--- + drivers/cpufreq/cpufreq-dt.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c +index 8bd6e5e8f121c..2d83bbc65dd0b 100644 +--- a/drivers/cpufreq/cpufreq-dt.c ++++ b/drivers/cpufreq/cpufreq-dt.c +@@ -208,7 +208,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu) + if (!priv) + return -ENOMEM; + +- if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL)) ++ if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL)) + return -ENOMEM; + + cpumask_set_cpu(cpu, priv->cpus); +-- +2.43.0 + diff --git a/queue-6.8/cpufreq-limit-resolving-a-frequency-to-policy-min-ma.patch b/queue-6.8/cpufreq-limit-resolving-a-frequency-to-policy-min-ma.patch new file mode 100644 index 00000000000..dbd340e2481 --- /dev/null +++ b/queue-6.8/cpufreq-limit-resolving-a-frequency-to-policy-min-ma.patch @@ -0,0 +1,64 @@ +From c84af130412837f7a6c08f44d998dedb935c3e4b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 27 Feb 2024 14:43:51 +0530 +Subject: cpufreq: Limit resolving a frequency to policy min/max + +From: Shivnandan Kumar + +[ Upstream commit d394abcb12bb1a6f309c1221fdb8e73594ecf1b4 ] + +Resolving a frequency to an efficient one should not transgress +policy->max (which can be set for thermal reason) and policy->min. + +Currently, there is possibility where scaling_cur_freq can exceed +scaling_max_freq when scaling_max_freq is an inefficient frequency. + +Add a check to ensure that resolving a frequency will respect +policy->min/max. + +Cc: All applicable +Fixes: 1f39fa0dccff ("cpufreq: Introducing CPUFREQ_RELATION_E") +Signed-off-by: Shivnandan Kumar +[ rjw: Whitespace adjustment, changelog edits ] +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + include/linux/cpufreq.h | 15 ++++++++++++++- + 1 file changed, 14 insertions(+), 1 deletion(-) + +diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h +index afda5f24d3ddc..320fab7d2e940 100644 +--- a/include/linux/cpufreq.h ++++ b/include/linux/cpufreq.h +@@ -1021,6 +1021,18 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, + efficiencies); + } + ++static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx) ++{ ++ unsigned int freq; ++ ++ if (idx < 0) ++ return false; ++ ++ freq = policy->freq_table[idx].frequency; ++ ++ return freq == clamp_val(freq, policy->min, policy->max); ++} ++ + static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +@@ -1054,7 +1066,8 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, + return 0; + } + +- if (idx < 0 && efficiencies) { ++ /* Limit frequency index to honor policy->min/max */ ++ if (!cpufreq_is_in_limits(policy, idx) && efficiencies) { + efficiencies = false; + goto retry; + } +-- +2.43.0 + diff --git a/queue-6.8/crypto-qat-change-slas-cleanup-flow-at-shutdown.patch b/queue-6.8/crypto-qat-change-slas-cleanup-flow-at-shutdown.patch new file mode 100644 index 00000000000..bb989b4613a --- /dev/null +++ b/queue-6.8/crypto-qat-change-slas-cleanup-flow-at-shutdown.patch @@ -0,0 +1,77 @@ +From 9cfa110cf490e8ba896525f49a2454a162c93461 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 9 Feb 2024 13:42:07 +0100 +Subject: crypto: qat - change SLAs cleanup flow at shutdown + +From: Damian Muszynski + +[ Upstream commit c2304e1a0b8051a60d4eb9c99a1c509d90380ae5 ] + +The implementation of the Rate Limiting (RL) feature includes the cleanup +of all SLAs during device shutdown. For each SLA, the firmware is notified +of the removal through an admin message, the data structures that take +into account the budgets are updated and the memory is freed. +However, this explicit cleanup is not necessary as (1) the device is +reset, and the firmware state is lost and (2) all RL data structures +are freed anyway. + +In addition, if the device is unresponsive, for example after a PCI +AER error is detected, the admin interface might not be available. +This might slow down the shutdown sequence and cause a timeout in +the recovery flows which in turn makes the driver believe that the +device is not recoverable. + +Fix by replacing the explicit SLAs removal with just a free of the +SLA data structures. + +Fixes: d9fb8408376e ("crypto: qat - add rate limiting feature to qat_4xxx") +Cc: +Signed-off-by: Damian Muszynski +Reviewed-by: Giovanni Cabiddu +Signed-off-by: Herbert Xu +Signed-off-by: Sasha Levin +--- + drivers/crypto/intel/qat/qat_common/adf_rl.c | 20 +++++++++++++++++++- + 1 file changed, 19 insertions(+), 1 deletion(-) + +diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c +index de1b214dba1f9..d4f2db3c53d8c 100644 +--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c ++++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c +@@ -788,6 +788,24 @@ static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla) + sla_type_arr[node_id] = NULL; + } + ++static void free_all_sla(struct adf_accel_dev *accel_dev) ++{ ++ struct adf_rl *rl_data = accel_dev->rate_limiting; ++ int sla_id; ++ ++ mutex_lock(&rl_data->rl_lock); ++ ++ for (sla_id = 0; sla_id < RL_NODES_CNT_MAX; sla_id++) { ++ if (!rl_data->sla[sla_id]) ++ continue; ++ ++ kfree(rl_data->sla[sla_id]); ++ rl_data->sla[sla_id] = NULL; ++ } ++ ++ mutex_unlock(&rl_data->rl_lock); ++} ++ + /** + * add_update_sla() - handles the creation and the update of an SLA + * @accel_dev: pointer to acceleration device structure +@@ -1155,7 +1173,7 @@ void adf_rl_stop(struct adf_accel_dev *accel_dev) + return; + + adf_sysfs_rl_rm(accel_dev); +- adf_rl_remove_sla_all(accel_dev, true); ++ free_all_sla(accel_dev); + } + + void adf_rl_exit(struct adf_accel_dev *accel_dev) +-- +2.43.0 + diff --git a/queue-6.8/crypto-qat-resolve-race-condition-during-aer-recover.patch b/queue-6.8/crypto-qat-resolve-race-condition-during-aer-recover.patch new file mode 100644 index 00000000000..43d01083785 --- /dev/null +++ b/queue-6.8/crypto-qat-resolve-race-condition-during-aer-recover.patch @@ -0,0 +1,92 @@ +From 1e79f260230d40b763004a2001840c888107d3a4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 9 Feb 2024 13:43:42 +0100 +Subject: crypto: qat - resolve race condition during AER recovery + +From: Damian Muszynski + +[ Upstream commit 7d42e097607c4d246d99225bf2b195b6167a210c ] + +During the PCI AER system's error recovery process, the kernel driver +may encounter a race condition with freeing the reset_data structure's +memory. If the device restart will take more than 10 seconds the function +scheduling that restart will exit due to a timeout, and the reset_data +structure will be freed. However, this data structure is used for +completion notification after the restart is completed, which leads +to a UAF bug. + +This results in a KFENCE bug notice. + + BUG: KFENCE: use-after-free read in adf_device_reset_worker+0x38/0xa0 [intel_qat] + Use-after-free read at 0x00000000bc56fddf (in kfence-#142): + adf_device_reset_worker+0x38/0xa0 [intel_qat] + process_one_work+0x173/0x340 + +To resolve this race condition, the memory associated to the container +of the work_struct is freed on the worker if the timeout expired, +otherwise on the function that schedules the worker. +The timeout detection can be done by checking if the caller is +still waiting for completion or not by using completion_done() function. + +Fixes: d8cba25d2c68 ("crypto: qat - Intel(R) QAT driver framework") +Cc: +Signed-off-by: Damian Muszynski +Reviewed-by: Giovanni Cabiddu +Signed-off-by: Herbert Xu +Signed-off-by: Sasha Levin +--- + drivers/crypto/intel/qat/qat_common/adf_aer.c | 22 ++++++++++++++----- + 1 file changed, 16 insertions(+), 6 deletions(-) + +diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c +index a39e70bd4b21b..621d14ea3b81a 100644 +--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c ++++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c +@@ -92,7 +92,8 @@ static void adf_device_reset_worker(struct work_struct *work) + if (adf_dev_restart(accel_dev)) { + /* The device hanged and we can't restart it so stop here */ + dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); +- if (reset_data->mode == ADF_DEV_RESET_ASYNC) ++ if (reset_data->mode == ADF_DEV_RESET_ASYNC || ++ completion_done(&reset_data->compl)) + kfree(reset_data); + WARN(1, "QAT: device restart failed. Device is unusable\n"); + return; +@@ -100,11 +101,19 @@ static void adf_device_reset_worker(struct work_struct *work) + adf_dev_restarted_notify(accel_dev); + clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); + +- /* The dev is back alive. Notify the caller if in sync mode */ +- if (reset_data->mode == ADF_DEV_RESET_SYNC) +- complete(&reset_data->compl); +- else ++ /* ++ * The dev is back alive. Notify the caller if in sync mode ++ * ++ * If device restart will take a more time than expected, ++ * the schedule_reset() function can timeout and exit. This can be ++ * detected by calling the completion_done() function. In this case ++ * the reset_data structure needs to be freed here. ++ */ ++ if (reset_data->mode == ADF_DEV_RESET_ASYNC || ++ completion_done(&reset_data->compl)) + kfree(reset_data); ++ else ++ complete(&reset_data->compl); + } + + static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, +@@ -137,8 +146,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, + dev_err(&GET_DEV(accel_dev), + "Reset device timeout expired\n"); + ret = -EFAULT; ++ } else { ++ kfree(reset_data); + } +- kfree(reset_data); + return ret; + } + return 0; +-- +2.43.0 + diff --git a/queue-6.8/cxl-trace-properly-initialize-cxl_poison-region-name.patch b/queue-6.8/cxl-trace-properly-initialize-cxl_poison-region-name.patch new file mode 100644 index 00000000000..d3f285be020 --- /dev/null +++ b/queue-6.8/cxl-trace-properly-initialize-cxl_poison-region-name.patch @@ -0,0 +1,89 @@ +From 6fc392d3e16071bcb375b29f45e86aeed805bb09 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Mar 2024 13:12:17 -0700 +Subject: cxl/trace: Properly initialize cxl_poison region name + +From: Alison Schofield + +[ Upstream commit 6c871260965255a1c142fb77ccee58b172d1690b ] + +The TP_STRUCT__entry that gets assigned the region name, or an +empty string if no region is present, is erroneously initialized +to the cxl_region pointer. It needs to be properly initialized +otherwise it's length is wrong and garbage chars can appear in +the kernel trace output: /sys/kernel/tracing/trace + +The bad initialization was due in part to a naming conflict with +the parameter: struct cxl_region *region. The field 'region' is +already exposed externally as the region name, so changing that +to something logical, like 'region_name' is not an option. Instead +rename the internal only struct cxl_region to the commonly used +'cxlr'. + +Impact is that tooling depending on that trace data can miss +picking up a valid event when searching by region name. The +TP_printk() output, if enabled, does emit the correct region +names in the dmesg log. + +This was found during testing of the cxl-list option to report +media-errors for a region. + +Cc: Davidlohr Bueso +Cc: Jonathan Cameron +Cc: Dave Jiang +Cc: Vishal Verma +Cc: stable@vger.kernel.org +Fixes: ddf49d57b841 ("cxl/trace: Add TRACE support for CXL media-error records") +Signed-off-by: Alison Schofield +Reviewed-by: Ira Weiny +Acked-by: Dan Williams +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Sasha Levin +--- + drivers/cxl/core/trace.h | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h +index bdf117a33744b..e5f13260fc524 100644 +--- a/drivers/cxl/core/trace.h ++++ b/drivers/cxl/core/trace.h +@@ -646,18 +646,18 @@ u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa); + + TRACE_EVENT(cxl_poison, + +- TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *region, ++ TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr, + const struct cxl_poison_record *record, u8 flags, + __le64 overflow_ts, enum cxl_poison_trace_type trace_type), + +- TP_ARGS(cxlmd, region, record, flags, overflow_ts, trace_type), ++ TP_ARGS(cxlmd, cxlr, record, flags, overflow_ts, trace_type), + + TP_STRUCT__entry( + __string(memdev, dev_name(&cxlmd->dev)) + __string(host, dev_name(cxlmd->dev.parent)) + __field(u64, serial) + __field(u8, trace_type) +- __string(region, region) ++ __string(region, cxlr ? dev_name(&cxlr->dev) : "") + __field(u64, overflow_ts) + __field(u64, hpa) + __field(u64, dpa) +@@ -677,10 +677,10 @@ TRACE_EVENT(cxl_poison, + __entry->source = cxl_poison_record_source(record); + __entry->trace_type = trace_type; + __entry->flags = flags; +- if (region) { +- __assign_str(region, dev_name(®ion->dev)); +- memcpy(__entry->uuid, ®ion->params.uuid, 16); +- __entry->hpa = cxl_trace_hpa(region, cxlmd, ++ if (cxlr) { ++ __assign_str(region, dev_name(&cxlr->dev)); ++ memcpy(__entry->uuid, &cxlr->params.uuid, 16); ++ __entry->hpa = cxl_trace_hpa(cxlr, cxlmd, + __entry->dpa); + } else { + __assign_str(region, ""); +-- +2.43.0 + diff --git a/queue-6.8/debugfs-fix-wait-cancellation-handling-during-remove.patch b/queue-6.8/debugfs-fix-wait-cancellation-handling-during-remove.patch new file mode 100644 index 00000000000..77599fe528b --- /dev/null +++ b/queue-6.8/debugfs-fix-wait-cancellation-handling-during-remove.patch @@ -0,0 +1,73 @@ +From a02b46c46047114877f34ae93063144cc198bea5 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 15:36:20 +0100 +Subject: debugfs: fix wait/cancellation handling during remove + +From: Johannes Berg + +[ Upstream commit 952c3fce297f12c7ff59380adb66b564e2bc9b64 ] + +Ben Greear further reports deadlocks during concurrent debugfs +remove while files are being accessed, even though the code in +question now uses debugfs cancellations. Turns out that despite +all the review on the locking, we missed completely that the +logic is wrong: if the refcount hits zero we can finish (and +need not wait for the completion), but if it doesn't we have +to trigger all the cancellations. As written, we can _never_ +get into the loop triggering the cancellations. Fix this, and +explain it better while at it. + +Cc: stable@vger.kernel.org +Fixes: 8c88a474357e ("debugfs: add API to allow debugfs operations cancellation") +Reported-by: Ben Greear +Closes: https://lore.kernel.org/r/1c9fa9e5-09f1-0522-fdbc-dbcef4d255ca@candelatech.com +Tested-by: Madhan Sai +Signed-off-by: Johannes Berg +Link: https://lore.kernel.org/r/20240229153635.6bfab7eb34d3.I6c7aeff8c9d6628a8bc1ddcf332205a49d801f17@changeid +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + fs/debugfs/inode.c | 25 ++++++++++++++++++++----- + 1 file changed, 20 insertions(+), 5 deletions(-) + +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c +index 034a617cb1a5e..a40da00654336 100644 +--- a/fs/debugfs/inode.c ++++ b/fs/debugfs/inode.c +@@ -751,13 +751,28 @@ static void __debugfs_file_removed(struct dentry *dentry) + if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) + return; + +- /* if we hit zero, just wait for all to finish */ +- if (!refcount_dec_and_test(&fsd->active_users)) { +- wait_for_completion(&fsd->active_users_drained); ++ /* if this was the last reference, we're done */ ++ if (refcount_dec_and_test(&fsd->active_users)) + return; +- } + +- /* if we didn't hit zero, try to cancel any we can */ ++ /* ++ * If there's still a reference, the code that obtained it can ++ * be in different states: ++ * - The common case of not using cancellations, or already ++ * after debugfs_leave_cancellation(), where we just need ++ * to wait for debugfs_file_put() which signals the completion; ++ * - inside a cancellation section, i.e. between ++ * debugfs_enter_cancellation() and debugfs_leave_cancellation(), ++ * in which case we need to trigger the ->cancel() function, ++ * and then wait for debugfs_file_put() just like in the ++ * previous case; ++ * - before debugfs_enter_cancellation() (but obviously after ++ * debugfs_file_get()), in which case we may not see the ++ * cancellation in the list on the first round of the loop, ++ * but debugfs_enter_cancellation() signals the completion ++ * after adding it, so this code gets woken up to call the ++ * ->cancel() function. ++ */ + while (refcount_read(&fsd->active_users)) { + struct debugfs_cancellation *c; + +-- +2.43.0 + diff --git a/queue-6.8/dlm-fix-user-space-lkb-refcounting.patch b/queue-6.8/dlm-fix-user-space-lkb-refcounting.patch new file mode 100644 index 00000000000..694194f1784 --- /dev/null +++ b/queue-6.8/dlm-fix-user-space-lkb-refcounting.patch @@ -0,0 +1,66 @@ +From ed686e6b0c3c2fc91e2d38966b767fa326c3dcde Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 12 Mar 2024 13:05:07 -0400 +Subject: dlm: fix user space lkb refcounting +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Alexander Aring + +[ Upstream commit 2ab3d705ca5d4f7ea345a21c3da41a447a549649 ] + +This patch fixes to check on the right return value if it was the last +callback. The rv variable got overwritten by the return of +copy_result_to_user(). Fixing it by introducing a second variable for +the return value and don't let rv being overwritten. + +Cc: stable@vger.kernel.org +Fixes: 61bed0baa4db ("fs: dlm: use a non-static queue for callbacks") +Reported-by: Valentin Vidić +Closes: https://lore.kernel.org/gfs2/Ze4qSvzGJDt5yxC3@valentin-vidic.from.hr +Signed-off-by: Alexander Aring +Signed-off-by: David Teigland +Signed-off-by: Sasha Levin +--- + fs/dlm/user.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/fs/dlm/user.c b/fs/dlm/user.c +index 695e691b38b31..9f9b68448830e 100644 +--- a/fs/dlm/user.c ++++ b/fs/dlm/user.c +@@ -806,7 +806,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, + struct dlm_lkb *lkb; + DECLARE_WAITQUEUE(wait, current); + struct dlm_callback *cb; +- int rv, copy_lvb = 0; ++ int rv, ret, copy_lvb = 0; + int old_mode, new_mode; + + if (count == sizeof(struct dlm_device_version)) { +@@ -906,9 +906,9 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, + trace_dlm_ast(lkb->lkb_resource->res_ls, lkb); + } + +- rv = copy_result_to_user(lkb->lkb_ua, +- test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), +- cb->flags, cb->mode, copy_lvb, buf, count); ++ ret = copy_result_to_user(lkb->lkb_ua, ++ test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), ++ cb->flags, cb->mode, copy_lvb, buf, count); + + kref_put(&cb->ref, dlm_release_callback); + +@@ -916,7 +916,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, + if (rv == DLM_DEQUEUE_CALLBACK_LAST) + dlm_put_lkb(lkb); + +- return rv; ++ return ret; + } + + static __poll_t device_poll(struct file *file, poll_table *wait) +-- +2.43.0 + diff --git a/queue-6.8/dm-raid-add-a-new-helper-prepare_suspend-in-md_perso.patch b/queue-6.8/dm-raid-add-a-new-helper-prepare_suspend-in-md_perso.patch new file mode 100644 index 00000000000..5b97b50643e --- /dev/null +++ b/queue-6.8/dm-raid-add-a-new-helper-prepare_suspend-in-md_perso.patch @@ -0,0 +1,75 @@ +From ad4e5d3982daa4fb951bf98380959c3f978d69f8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 15:23:04 +0800 +Subject: dm-raid: add a new helper prepare_suspend() in md_personality + +From: Yu Kuai + +[ Upstream commit 5625ff8b72b0e5c13b0fc1fc1f198155af45f729 ] + +There are no functional changes for now, prepare to fix a deadlock for +dm-raid456. + +Cc: stable@vger.kernel.org # v6.7+ +Signed-off-by: Yu Kuai +Signed-off-by: Xiao Ni +Acked-by: Mike Snitzer +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240305072306.2562024-8-yukuai1@huaweicloud.com +Signed-off-by: Sasha Levin +--- + drivers/md/dm-raid.c | 18 ++++++++++++++++++ + drivers/md/md.h | 1 + + 2 files changed, 19 insertions(+) + +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c +index 8d38cdb221453..b8f5304ca00d1 100644 +--- a/drivers/md/dm-raid.c ++++ b/drivers/md/dm-raid.c +@@ -3803,6 +3803,23 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) + blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); + } + ++static void raid_presuspend(struct dm_target *ti) ++{ ++ struct raid_set *rs = ti->private; ++ struct mddev *mddev = &rs->md; ++ ++ if (!reshape_interrupted(mddev)) ++ return; ++ ++ /* ++ * For raid456, if reshape is interrupted, IO across reshape position ++ * will never make progress, while caller will wait for IO to be done. ++ * Inform raid456 to handle those IO to prevent deadlock. ++ */ ++ if (mddev->pers && mddev->pers->prepare_suspend) ++ mddev->pers->prepare_suspend(mddev); ++} ++ + static void raid_postsuspend(struct dm_target *ti) + { + struct raid_set *rs = ti->private; +@@ -4087,6 +4104,7 @@ static struct target_type raid_target = { + .message = raid_message, + .iterate_devices = raid_iterate_devices, + .io_hints = raid_io_hints, ++ .presuspend = raid_presuspend, + .postsuspend = raid_postsuspend, + .preresume = raid_preresume, + .resume = raid_resume, +diff --git a/drivers/md/md.h b/drivers/md/md.h +index ea0fd76c17e75..24261f9b676d5 100644 +--- a/drivers/md/md.h ++++ b/drivers/md/md.h +@@ -649,6 +649,7 @@ struct md_personality + int (*start_reshape) (struct mddev *mddev); + void (*finish_reshape) (struct mddev *mddev); + void (*update_reshape_pos) (struct mddev *mddev); ++ void (*prepare_suspend) (struct mddev *mddev); + /* quiesce suspends or resumes internal processing. + * 1 - stop new actions and wait for action io to complete + * 0 - return to normal behaviour +-- +2.43.0 + diff --git a/queue-6.8/dm-raid-fix-lockdep-waring-in-pers-hot_add_disk.patch b/queue-6.8/dm-raid-fix-lockdep-waring-in-pers-hot_add_disk.patch new file mode 100644 index 00000000000..c8f138464d1 --- /dev/null +++ b/queue-6.8/dm-raid-fix-lockdep-waring-in-pers-hot_add_disk.patch @@ -0,0 +1,49 @@ +From 536e66694259aab8c1e869aa670b4b0d5805f925 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 15:23:06 +0800 +Subject: dm-raid: fix lockdep waring in "pers->hot_add_disk" + +From: Yu Kuai + +[ Upstream commit 95009ae904b1e9dca8db6f649f2d7c18a6e42c75 ] + +The lockdep assert is added by commit a448af25becf ("md/raid10: remove +rcu protection to access rdev from conf") in print_conf(). And I didn't +notice that dm-raid is calling "pers->hot_add_disk" without holding +'reconfig_mutex'. + +"pers->hot_add_disk" read and write many fields that is protected by +'reconfig_mutex', and raid_resume() already grab the lock in other +contex. Hence fix this problem by protecting "pers->host_add_disk" +with the lock. + +Fixes: 9092c02d9435 ("DM RAID: Add ability to restore transiently failed devices on resume") +Fixes: a448af25becf ("md/raid10: remove rcu protection to access rdev from conf") +Cc: stable@vger.kernel.org # v6.7+ +Signed-off-by: Yu Kuai +Signed-off-by: Xiao Ni +Acked-by: Mike Snitzer +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240305072306.2562024-10-yukuai1@huaweicloud.com +Signed-off-by: Sasha Levin +--- + drivers/md/dm-raid.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c +index 063f1266ec462..d97355e9b9a6e 100644 +--- a/drivers/md/dm-raid.c ++++ b/drivers/md/dm-raid.c +@@ -4091,7 +4091,9 @@ static void raid_resume(struct dm_target *ti) + * Take this opportunity to check whether any failed + * devices are reachable again. + */ ++ mddev_lock_nointr(mddev); + attempt_restore_of_faulty_devices(rs); ++ mddev_unlock(mddev); + } + + if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { +-- +2.43.0 + diff --git a/queue-6.8/dm-raid-really-frozen-sync_thread-during-suspend.patch b/queue-6.8/dm-raid-really-frozen-sync_thread-during-suspend.patch new file mode 100644 index 00000000000..7a84a794744 --- /dev/null +++ b/queue-6.8/dm-raid-really-frozen-sync_thread-during-suspend.patch @@ -0,0 +1,147 @@ +From fecac3d81622211812aeed9b97483293a4dbbb43 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 15:23:02 +0800 +Subject: dm-raid: really frozen sync_thread during suspend + +From: Yu Kuai + +[ Upstream commit 16c4770c75b1223998adbeb7286f9a15c65fba73 ] + +1) commit f52f5c71f3d4 ("md: fix stopping sync thread") remove + MD_RECOVERY_FROZEN from __md_stop_writes() and doesn't realize that + dm-raid relies on __md_stop_writes() to frozen sync_thread + indirectly. Fix this problem by adding MD_RECOVERY_FROZEN in + md_stop_writes(), and since stop_sync_thread() is only used for + dm-raid in this case, also move stop_sync_thread() to + md_stop_writes(). +2) The flag MD_RECOVERY_FROZEN doesn't mean that sync thread is frozen, + it only prevent new sync_thread to start, and it can't stop the + running sync thread; In order to frozen sync_thread, after seting the + flag, stop_sync_thread() should be used. +3) The flag MD_RECOVERY_FROZEN doesn't mean that writes are stopped, use + it as condition for md_stop_writes() in raid_postsuspend() doesn't + look correct. Consider that reentrant stop_sync_thread() do nothing, + always call md_stop_writes() in raid_postsuspend(). +4) raid_message can set/clear the flag MD_RECOVERY_FROZEN at anytime, + and if MD_RECOVERY_FROZEN is cleared while the array is suspended, + new sync_thread can start unexpected. Fix this by disallow + raid_message() to change sync_thread status during suspend. + +Note that after commit f52f5c71f3d4 ("md: fix stopping sync thread"), the +test shell/lvconvert-raid-reshape.sh start to hang in stop_sync_thread(), +and with previous fixes, the test won't hang there anymore, however, the +test will still fail and complain that ext4 is corrupted. And with this +patch, the test won't hang due to stop_sync_thread() or fail due to ext4 +is corrupted anymore. However, there is still a deadlock related to +dm-raid456 that will be fixed in following patches. + +Reported-by: Mikulas Patocka +Closes: https://lore.kernel.org/all/e5e8afe2-e9a8-49a2-5ab0-958d4065c55e@redhat.com/ +Fixes: 1af2048a3e87 ("dm raid: fix deadlock caused by premature md_stop_writes()") +Fixes: 9dbd1aa3a81c ("dm raid: add reshaping support to the target") +Fixes: f52f5c71f3d4 ("md: fix stopping sync thread") +Cc: stable@vger.kernel.org # v6.7+ +Signed-off-by: Yu Kuai +Signed-off-by: Xiao Ni +Acked-by: Mike Snitzer +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240305072306.2562024-6-yukuai1@huaweicloud.com +Signed-off-by: Sasha Levin +--- + drivers/md/dm-raid.c | 25 +++++++++++++++---------- + drivers/md/md.c | 3 ++- + 2 files changed, 17 insertions(+), 11 deletions(-) + +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c +index 13eb47b997f94..fff9336fee767 100644 +--- a/drivers/md/dm-raid.c ++++ b/drivers/md/dm-raid.c +@@ -3240,11 +3240,12 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) + rs->md.ro = 1; + rs->md.in_sync = 1; + +- /* Keep array frozen until resume. */ +- set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); +- + /* Has to be held on running the array */ + mddev_suspend_and_lock_nointr(&rs->md); ++ ++ /* Keep array frozen until resume. */ ++ md_frozen_sync_thread(&rs->md); ++ + r = md_run(&rs->md); + rs->md.in_sync = 0; /* Assume already marked dirty */ + if (r) { +@@ -3722,6 +3723,9 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, + if (!mddev->pers || !mddev->pers->sync_request) + return -EINVAL; + ++ if (test_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) ++ return -EBUSY; ++ + if (!strcasecmp(argv[0], "frozen")) + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + else +@@ -3796,10 +3800,11 @@ static void raid_postsuspend(struct dm_target *ti) + struct raid_set *rs = ti->private; + + if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { +- /* Writes have to be stopped before suspending to avoid deadlocks. */ +- if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery)) +- md_stop_writes(&rs->md); +- ++ /* ++ * sync_thread must be stopped during suspend, and writes have ++ * to be stopped before suspending to avoid deadlocks. ++ */ ++ md_stop_writes(&rs->md); + mddev_suspend(&rs->md, false); + } + } +@@ -4012,8 +4017,6 @@ static int raid_preresume(struct dm_target *ti) + } + + /* Check for any resize/reshape on @rs and adjust/initiate */ +- /* Be prepared for mddev_resume() in raid_resume() */ +- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) { + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); + mddev->resync_min = mddev->recovery_cp; +@@ -4055,10 +4058,12 @@ static void raid_resume(struct dm_target *ti) + if (mddev->delta_disks < 0) + rs_set_capacity(rs); + ++ WARN_ON_ONCE(!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)); ++ WARN_ON_ONCE(test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); + mddev_lock_nointr(mddev); +- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + mddev->ro = 0; + mddev->in_sync = 0; ++ md_unfrozen_sync_thread(mddev); + mddev_unlock_and_resume(mddev); + } + } +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 245ef8af8640a..ea68a6f8103bb 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -6344,7 +6344,6 @@ static void md_clean(struct mddev *mddev) + + static void __md_stop_writes(struct mddev *mddev) + { +- stop_sync_thread(mddev, true, false); + del_timer_sync(&mddev->safemode_timer); + + if (mddev->pers && mddev->pers->quiesce) { +@@ -6369,6 +6368,8 @@ static void __md_stop_writes(struct mddev *mddev) + void md_stop_writes(struct mddev *mddev) + { + mddev_lock_nointr(mddev); ++ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ++ stop_sync_thread(mddev, true, false); + __md_stop_writes(mddev); + mddev_unlock(mddev); + } +-- +2.43.0 + diff --git a/queue-6.8/dm-raid456-md-raid456-fix-a-deadlock-for-dm-raid456-.patch b/queue-6.8/dm-raid456-md-raid456-fix-a-deadlock-for-dm-raid456-.patch new file mode 100644 index 00000000000..2bf962ad00c --- /dev/null +++ b/queue-6.8/dm-raid456-md-raid456-fix-a-deadlock-for-dm-raid456-.patch @@ -0,0 +1,338 @@ +From 73b67249eb539ee6af6f3c436d3eea7942f7c158 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 15:23:05 +0800 +Subject: dm-raid456, md/raid456: fix a deadlock for dm-raid456 while io + concurrent with reshape + +From: Yu Kuai + +[ Upstream commit 41425f96d7aa59bc865f60f5dda3d7697b555677 ] + +For raid456, if reshape is still in progress, then IO across reshape +position will wait for reshape to make progress. However, for dm-raid, +in following cases reshape will never make progress hence IO will hang: + +1) the array is read-only; +2) MD_RECOVERY_WAIT is set; +3) MD_RECOVERY_FROZEN is set; + +After commit c467e97f079f ("md/raid6: use valid sector values to determine +if an I/O should wait on the reshape") fix the problem that IO across +reshape position doesn't wait for reshape, the dm-raid test +shell/lvconvert-raid-reshape.sh start to hang: + +[root@fedora ~]# cat /proc/979/stack +[<0>] wait_woken+0x7d/0x90 +[<0>] raid5_make_request+0x929/0x1d70 [raid456] +[<0>] md_handle_request+0xc2/0x3b0 [md_mod] +[<0>] raid_map+0x2c/0x50 [dm_raid] +[<0>] __map_bio+0x251/0x380 [dm_mod] +[<0>] dm_submit_bio+0x1f0/0x760 [dm_mod] +[<0>] __submit_bio+0xc2/0x1c0 +[<0>] submit_bio_noacct_nocheck+0x17f/0x450 +[<0>] submit_bio_noacct+0x2bc/0x780 +[<0>] submit_bio+0x70/0xc0 +[<0>] mpage_readahead+0x169/0x1f0 +[<0>] blkdev_readahead+0x18/0x30 +[<0>] read_pages+0x7c/0x3b0 +[<0>] page_cache_ra_unbounded+0x1ab/0x280 +[<0>] force_page_cache_ra+0x9e/0x130 +[<0>] page_cache_sync_ra+0x3b/0x110 +[<0>] filemap_get_pages+0x143/0xa30 +[<0>] filemap_read+0xdc/0x4b0 +[<0>] blkdev_read_iter+0x75/0x200 +[<0>] vfs_read+0x272/0x460 +[<0>] ksys_read+0x7a/0x170 +[<0>] __x64_sys_read+0x1c/0x30 +[<0>] do_syscall_64+0xc6/0x230 +[<0>] entry_SYSCALL_64_after_hwframe+0x6c/0x74 + +This is because reshape can't make progress. + +For md/raid, the problem doesn't exist because register new sync_thread +doesn't rely on the IO to be done any more: + +1) If array is read-only, it can switch to read-write by ioctl/sysfs; +2) md/raid never set MD_RECOVERY_WAIT; +3) If MD_RECOVERY_FROZEN is set, mddev_suspend() doesn't hold + 'reconfig_mutex', hence it can be cleared and reshape can continue by + sysfs api 'sync_action'. + +However, I'm not sure yet how to avoid the problem in dm-raid yet. This +patch on the one hand make sure raid_message() can't change +sync_thread() through raid_message() after presuspend(), on the other +hand detect the above 3 cases before wait for IO do be done in +dm_suspend(), and let dm-raid requeue those IO. + +Cc: stable@vger.kernel.org # v6.7+ +Signed-off-by: Yu Kuai +Signed-off-by: Xiao Ni +Acked-by: Mike Snitzer +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240305072306.2562024-9-yukuai1@huaweicloud.com +Signed-off-by: Sasha Levin +--- + drivers/md/dm-raid.c | 22 ++++++++++++++++++++-- + drivers/md/md.c | 24 ++++++++++++++++++++++-- + drivers/md/md.h | 3 ++- + drivers/md/raid5.c | 32 ++++++++++++++++++++++++++++++-- + 4 files changed, 74 insertions(+), 7 deletions(-) + +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c +index b8f5304ca00d1..063f1266ec462 100644 +--- a/drivers/md/dm-raid.c ++++ b/drivers/md/dm-raid.c +@@ -213,6 +213,7 @@ struct raid_dev { + #define RT_FLAG_RS_IN_SYNC 6 + #define RT_FLAG_RS_RESYNCING 7 + #define RT_FLAG_RS_GROW 8 ++#define RT_FLAG_RS_FROZEN 9 + + /* Array elements of 64 bit needed for rebuild/failed disk bits */ + #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) +@@ -3340,7 +3341,8 @@ static int raid_map(struct dm_target *ti, struct bio *bio) + if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors)) + return DM_MAPIO_REQUEUE; + +- md_handle_request(mddev, bio); ++ if (unlikely(!md_handle_request(mddev, bio))) ++ return DM_MAPIO_REQUEUE; + + return DM_MAPIO_SUBMITTED; + } +@@ -3724,7 +3726,8 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, + if (!mddev->pers || !mddev->pers->sync_request) + return -EINVAL; + +- if (test_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) ++ if (test_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags) || ++ test_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags)) + return -EBUSY; + + if (!strcasecmp(argv[0], "frozen")) { +@@ -3808,6 +3811,12 @@ static void raid_presuspend(struct dm_target *ti) + struct raid_set *rs = ti->private; + struct mddev *mddev = &rs->md; + ++ /* ++ * From now on, disallow raid_message() to change sync_thread until ++ * resume, raid_postsuspend() is too late. ++ */ ++ set_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags); ++ + if (!reshape_interrupted(mddev)) + return; + +@@ -3820,6 +3829,13 @@ static void raid_presuspend(struct dm_target *ti) + mddev->pers->prepare_suspend(mddev); + } + ++static void raid_presuspend_undo(struct dm_target *ti) ++{ ++ struct raid_set *rs = ti->private; ++ ++ clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags); ++} ++ + static void raid_postsuspend(struct dm_target *ti) + { + struct raid_set *rs = ti->private; +@@ -4085,6 +4101,7 @@ static void raid_resume(struct dm_target *ti) + + WARN_ON_ONCE(!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)); + WARN_ON_ONCE(test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); ++ clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags); + mddev_lock_nointr(mddev); + mddev->ro = 0; + mddev->in_sync = 0; +@@ -4105,6 +4122,7 @@ static struct target_type raid_target = { + .iterate_devices = raid_iterate_devices, + .io_hints = raid_io_hints, + .presuspend = raid_presuspend, ++ .presuspend_undo = raid_presuspend_undo, + .postsuspend = raid_postsuspend, + .preresume = raid_preresume, + .resume = raid_resume, +diff --git a/drivers/md/md.c b/drivers/md/md.c +index ea68a6f8103bb..f54012d684414 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -366,7 +366,7 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio) + return true; + } + +-void md_handle_request(struct mddev *mddev, struct bio *bio) ++bool md_handle_request(struct mddev *mddev, struct bio *bio) + { + check_suspended: + if (is_suspended(mddev, bio)) { +@@ -374,7 +374,7 @@ void md_handle_request(struct mddev *mddev, struct bio *bio) + /* Bail out if REQ_NOWAIT is set for the bio */ + if (bio->bi_opf & REQ_NOWAIT) { + bio_wouldblock_error(bio); +- return; ++ return true; + } + for (;;) { + prepare_to_wait(&mddev->sb_wait, &__wait, +@@ -390,10 +390,13 @@ void md_handle_request(struct mddev *mddev, struct bio *bio) + + if (!mddev->pers->make_request(mddev, bio)) { + percpu_ref_put(&mddev->active_io); ++ if (!mddev->gendisk && mddev->pers->prepare_suspend) ++ return false; + goto check_suspended; + } + + percpu_ref_put(&mddev->active_io); ++ return true; + } + EXPORT_SYMBOL(md_handle_request); + +@@ -8765,6 +8768,23 @@ void md_account_bio(struct mddev *mddev, struct bio **bio) + } + EXPORT_SYMBOL_GPL(md_account_bio); + ++void md_free_cloned_bio(struct bio *bio) ++{ ++ struct md_io_clone *md_io_clone = bio->bi_private; ++ struct bio *orig_bio = md_io_clone->orig_bio; ++ struct mddev *mddev = md_io_clone->mddev; ++ ++ if (bio->bi_status && !orig_bio->bi_status) ++ orig_bio->bi_status = bio->bi_status; ++ ++ if (md_io_clone->start_time) ++ bio_end_io_acct(orig_bio, md_io_clone->start_time); ++ ++ bio_put(bio); ++ percpu_ref_put(&mddev->active_io); ++} ++EXPORT_SYMBOL_GPL(md_free_cloned_bio); ++ + /* md_allow_write(mddev) + * Calling this ensures that the array is marked 'active' so that writes + * may proceed without blocking. It is important to call this before +diff --git a/drivers/md/md.h b/drivers/md/md.h +index 24261f9b676d5..375ad4a2df71d 100644 +--- a/drivers/md/md.h ++++ b/drivers/md/md.h +@@ -783,6 +783,7 @@ extern void md_finish_reshape(struct mddev *mddev); + void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, + struct bio *bio, sector_t start, sector_t size); + void md_account_bio(struct mddev *mddev, struct bio **bio); ++void md_free_cloned_bio(struct bio *bio); + + extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); + extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, +@@ -811,7 +812,7 @@ extern void md_stop_writes(struct mddev *mddev); + extern int md_rdev_init(struct md_rdev *rdev); + extern void md_rdev_clear(struct md_rdev *rdev); + +-extern void md_handle_request(struct mddev *mddev, struct bio *bio); ++extern bool md_handle_request(struct mddev *mddev, struct bio *bio); + extern int mddev_suspend(struct mddev *mddev, bool interruptible); + extern void mddev_resume(struct mddev *mddev); + extern void md_idle_sync_thread(struct mddev *mddev); +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 4357673bee269..69452e4394db0 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -761,6 +761,7 @@ enum stripe_result { + STRIPE_RETRY, + STRIPE_SCHEDULE_AND_RETRY, + STRIPE_FAIL, ++ STRIPE_WAIT_RESHAPE, + }; + + struct stripe_request_ctx { +@@ -5947,7 +5948,8 @@ static enum stripe_result make_stripe_request(struct mddev *mddev, + if (ahead_of_reshape(mddev, logical_sector, + conf->reshape_safe)) { + spin_unlock_irq(&conf->device_lock); +- return STRIPE_SCHEDULE_AND_RETRY; ++ ret = STRIPE_SCHEDULE_AND_RETRY; ++ goto out; + } + } + spin_unlock_irq(&conf->device_lock); +@@ -6026,6 +6028,12 @@ static enum stripe_result make_stripe_request(struct mddev *mddev, + + out_release: + raid5_release_stripe(sh); ++out: ++ if (ret == STRIPE_SCHEDULE_AND_RETRY && reshape_interrupted(mddev)) { ++ bi->bi_status = BLK_STS_RESOURCE; ++ ret = STRIPE_WAIT_RESHAPE; ++ pr_err_ratelimited("dm-raid456: io across reshape position while reshape can't make progress"); ++ } + return ret; + } + +@@ -6147,7 +6155,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) + while (1) { + res = make_stripe_request(mddev, conf, &ctx, logical_sector, + bi); +- if (res == STRIPE_FAIL) ++ if (res == STRIPE_FAIL || res == STRIPE_WAIT_RESHAPE) + break; + + if (res == STRIPE_RETRY) +@@ -6185,6 +6193,11 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) + + if (rw == WRITE) + md_write_end(mddev); ++ if (res == STRIPE_WAIT_RESHAPE) { ++ md_free_cloned_bio(bi); ++ return false; ++ } ++ + bio_endio(bi); + return true; + } +@@ -8923,6 +8936,18 @@ static int raid5_start(struct mddev *mddev) + return r5l_start(conf->log); + } + ++/* ++ * This is only used for dm-raid456, caller already frozen sync_thread, hence ++ * if rehsape is still in progress, io that is waiting for reshape can never be ++ * done now, hence wake up and handle those IO. ++ */ ++static void raid5_prepare_suspend(struct mddev *mddev) ++{ ++ struct r5conf *conf = mddev->private; ++ ++ wake_up(&conf->wait_for_overlap); ++} ++ + static struct md_personality raid6_personality = + { + .name = "raid6", +@@ -8946,6 +8971,7 @@ static struct md_personality raid6_personality = + .quiesce = raid5_quiesce, + .takeover = raid6_takeover, + .change_consistency_policy = raid5_change_consistency_policy, ++ .prepare_suspend = raid5_prepare_suspend, + }; + static struct md_personality raid5_personality = + { +@@ -8970,6 +8996,7 @@ static struct md_personality raid5_personality = + .quiesce = raid5_quiesce, + .takeover = raid5_takeover, + .change_consistency_policy = raid5_change_consistency_policy, ++ .prepare_suspend = raid5_prepare_suspend, + }; + + static struct md_personality raid4_personality = +@@ -8995,6 +9022,7 @@ static struct md_personality raid4_personality = + .quiesce = raid5_quiesce, + .takeover = raid4_takeover, + .change_consistency_policy = raid5_change_consistency_policy, ++ .prepare_suspend = raid5_prepare_suspend, + }; + + static int __init raid5_init(void) +-- +2.43.0 + diff --git a/queue-6.8/dm-snapshot-fix-lockup-in-dm_exception_table_exit.patch b/queue-6.8/dm-snapshot-fix-lockup-in-dm_exception_table_exit.patch new file mode 100644 index 00000000000..862668989da --- /dev/null +++ b/queue-6.8/dm-snapshot-fix-lockup-in-dm_exception_table_exit.patch @@ -0,0 +1,40 @@ +From a21c460bd74304aff94ac8ea5c11d7744e7af33d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 20 Mar 2024 18:43:11 +0100 +Subject: dm snapshot: fix lockup in dm_exception_table_exit + +From: Mikulas Patocka + +[ Upstream commit 6e7132ed3c07bd8a6ce3db4bb307ef2852b322dc ] + +There was reported lockup when we exit a snapshot with many exceptions. +Fix this by adding "cond_resched" to the loop that frees the exceptions. + +Reported-by: John Pittman +Cc: stable@vger.kernel.org +Signed-off-by: Mikulas Patocka +Signed-off-by: Mike Snitzer +Signed-off-by: Sasha Levin +--- + drivers/md/dm-snap.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c +index bf7a574499a34..0ace06d1bee38 100644 +--- a/drivers/md/dm-snap.c ++++ b/drivers/md/dm-snap.c +@@ -684,8 +684,10 @@ static void dm_exception_table_exit(struct dm_exception_table *et, + for (i = 0; i < size; i++) { + slot = et->table + i; + +- hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) ++ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) { + kmem_cache_free(mem, ex); ++ cond_resched(); ++ } + } + + kvfree(et->table); +-- +2.43.0 + diff --git a/queue-6.8/docs-makefile-add-dependency-to-ynl_index-for-target.patch b/queue-6.8/docs-makefile-add-dependency-to-ynl_index-for-target.patch new file mode 100644 index 00000000000..cb8afdda10f --- /dev/null +++ b/queue-6.8/docs-makefile-add-dependency-to-ynl_index-for-target.patch @@ -0,0 +1,62 @@ +From 944504366b876d209544b3ad92660fad1dce2d3a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 13:23:00 +0900 +Subject: docs: Makefile: Add dependency to $(YNL_INDEX) for targets other than + htmldocs + +From: Akira Yokosawa + +[ Upstream commit a304fa1d10fcb974c117d391e5b4d34c2baa9a62 ] + +Commit f061c9f7d058 ("Documentation: Document each netlink family") +added recipes for YAML -> RST conversion. +Then commit 7da8bdbf8f5d ("docs: Makefile: Fix make cleandocs by +deleting generated .rst files") made sure those converted .rst files +are cleaned by "make cleandocs". + +However, they took care of htmldocs build only. + +If one of other targets such as latexdocs or epubdocs is built +without building htmldocs, missing .rst files can cause additional +WARNINGs from sphinx-build as follow: + + ./Documentation/userspace-api/netlink/specs.rst:18: WARNING: undefined label: 'specs' + ./Documentation/userspace-api/netlink/netlink-raw.rst:64: WARNING: unknown document: '../../networking/netlink_spec/rt_link' + ./Documentation/userspace-api/netlink/netlink-raw.rst:64: WARNING: unknown document: '../../networking/netlink_spec/tc' + ./Documentation/userspace-api/netlink/index.rst:21: WARNING: undefined label: 'specs' + +Add dependency to $(YNL_INDEX) for other targets and allow any targets +to be built cleanly right after "make cleandocs". + +Signed-off-by: Akira Yokosawa +Cc: stable@vger.kernel.org # v6.7 +Cc: Thorsten Blum +Cc: Breno Leitao +Cc: Jakub Kicinski +Cc: "David S. Miller" +Reviwed-by: Breno Leitao +Signed-off-by: Jonathan Corbet +Message-ID: +Signed-off-by: Sasha Levin +--- + Documentation/Makefile | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/Documentation/Makefile b/Documentation/Makefile +index 3885bbe260eb2..99cb6cbccb135 100644 +--- a/Documentation/Makefile ++++ b/Documentation/Makefile +@@ -111,7 +111,9 @@ $(YNL_INDEX): $(YNL_RST_FILES) + $(YNL_RST_DIR)/%.rst: $(YNL_YAML_DIR)/%.yaml $(YNL_TOOL) + $(Q)$(YNL_TOOL) -i $< -o $@ + +-htmldocs: $(YNL_INDEX) ++htmldocs texinfodocs latexdocs epubdocs xmldocs: $(YNL_INDEX) ++ ++htmldocs: + @$(srctree)/scripts/sphinx-pre-install --version-check + @+$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,html,$(var),,$(var))) + +-- +2.43.0 + diff --git a/queue-6.8/docs-restore-smart-quotes-for-quotes.patch b/queue-6.8/docs-restore-smart-quotes-for-quotes.patch new file mode 100644 index 00000000000..e8044bf2426 --- /dev/null +++ b/queue-6.8/docs-restore-smart-quotes-for-quotes.patch @@ -0,0 +1,57 @@ +From 51ea09a47a4f521ba5335c6f6f2aa8fa9a6d6705 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 25 Feb 2024 18:46:00 +0900 +Subject: docs: Restore "smart quotes" for quotes +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Akira Yokosawa + +[ Upstream commit fe2562582bffe675721e77e00b3bf5bfa1d7aeab ] + +Commit eaae75754d81 ("docs: turn off "smart quotes" in the HTML build") +disabled conversion of quote marks along with that of dashes. +Despite the short summary, the change affects not only HTML build +but also other build targets including PDF. + +However, as "smart quotes" had been enabled for more than half a +decade already, quite a few readers of HTML pages are likely expecting +conversions of "foo" -> “foo” and 'bar' -> ‘bar’. + +Furthermore, in LaTeX typesetting convention, it is common to use +distinct marks for opening and closing quote marks. + +To satisfy such readers' expectation, restore conversion of quotes +only by setting smartquotes_action [1]. + +Link: [1] https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-smartquotes_action +Cc: stable@vger.kernel.org # v6.4 +Signed-off-by: Akira Yokosawa +Signed-off-by: Jonathan Corbet +Link: https://lore.kernel.org/r/20240225094600.65628-1-akiyks@gmail.com +Signed-off-by: Sasha Levin +--- + Documentation/conf.py | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/Documentation/conf.py b/Documentation/conf.py +index da64c9fb7e072..d148f3e8dd572 100644 +--- a/Documentation/conf.py ++++ b/Documentation/conf.py +@@ -346,9 +346,9 @@ sys.stderr.write("Using %s theme\n" % html_theme) + html_static_path = ['sphinx-static'] + + # If true, Docutils "smart quotes" will be used to convert quotes and dashes +-# to typographically correct entities. This will convert "--" to "—", +-# which is not always what we want, so disable it. +-smartquotes = False ++# to typographically correct entities. However, conversion of "--" to "—" ++# is not always what we want, so enable only quotes. ++smartquotes_action = 'q' + + # Custom sidebar templates, maps document names to template names. + # Note that the RTD theme ignores this +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-add-a-dc_state-null-check-in-dc_stat.patch b/queue-6.8/drm-amd-display-add-a-dc_state-null-check-in-dc_stat.patch new file mode 100644 index 00000000000..d5087e43bc8 --- /dev/null +++ b/queue-6.8/drm-amd-display-add-a-dc_state-null-check-in-dc_stat.patch @@ -0,0 +1,42 @@ +From c40ba53500a7d0250b2baf391f6c22696644cb34 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 23 Feb 2024 18:20:16 -0500 +Subject: drm/amd/display: Add a dc_state NULL check in dc_state_release + +From: Allen Pan + +[ Upstream commit 334b56cea5d9df5989be6cf1a5898114fa70ad98 ] + +[How] +Check wheather state is NULL before releasing it. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Reviewed-by: Charlene Liu +Acked-by: Alex Hung +Signed-off-by: Allen Pan +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/dc/core/dc_state.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c +index 180ac47868c22..5cc7f8da209c5 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c +@@ -334,7 +334,8 @@ static void dc_state_free(struct kref *kref) + + void dc_state_release(struct dc_state *state) + { +- kref_put(&state->refcount, dc_state_free); ++ if (state != NULL) ++ kref_put(&state->refcount, dc_state_free); + } + /* + * dc_state_add_stream() - Add a new dc_stream_state to a dc_state. +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-add-more-checks-for-exiting-idle-in-.patch b/queue-6.8/drm-amd-display-add-more-checks-for-exiting-idle-in-.patch new file mode 100644 index 00000000000..ef43caf20c8 --- /dev/null +++ b/queue-6.8/drm-amd-display-add-more-checks-for-exiting-idle-in-.patch @@ -0,0 +1,325 @@ +From 702454afdab7cc2357e638949030bbd19ea39fd7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 16 Jan 2024 09:52:58 -0500 +Subject: drm/amd/display: Add more checks for exiting idle in DC + +From: Nicholas Kazlauskas + +[ Upstream commit a9b1a4f684b32bcd33431b67acd6f4c275728380 ] + +[Why] +Any interface that touches registers needs to wake up the system. + +[How] +Add a new interface dc_exit_ips_for_hw_access that wraps the check +for IPS support and insert it into the public DC interfaces that +touch registers. + +We don't re-enter, since we expect that the enter/exit to have been done +on the DM side. + +Cc: stable@vger.kernel.org # 6.1+ +Reviewed-by: Ovidiu Bunea +Acked-by: Hamza Mahfooz +Signed-off-by: Nicholas Kazlauskas +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/dc/core/dc.c | 42 +++++++++++++++++++ + .../gpu/drm/amd/display/dc/core/dc_stream.c | 18 ++++++++ + .../gpu/drm/amd/display/dc/core/dc_surface.c | 2 + + drivers/gpu/drm/amd/display/dc/dc.h | 1 + + 4 files changed, 63 insertions(+) + +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index 3b65f216048e1..3c3d613c5f00e 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -417,6 +417,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, + if (!memcmp(&stream->adjust, adjust, sizeof(*adjust))) + return true; + ++ dc_exit_ips_for_hw_access(dc); ++ + stream->adjust.v_total_max = adjust->v_total_max; + stream->adjust.v_total_mid = adjust->v_total_mid; + stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; +@@ -457,6 +459,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, + + int i = 0; + ++ dc_exit_ips_for_hw_access(dc); ++ + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + +@@ -487,6 +491,8 @@ bool dc_stream_get_crtc_position(struct dc *dc, + bool ret = false; + struct crtc_position position; + ++ dc_exit_ips_for_hw_access(dc); ++ + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe = + &dc->current_state->res_ctx.pipe_ctx[i]; +@@ -606,6 +612,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, + if (pipe == NULL) + return false; + ++ dc_exit_ips_for_hw_access(dc); ++ + /* By default, capture the full frame */ + param.windowa_x_start = 0; + param.windowa_y_start = 0; +@@ -665,6 +673,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, + struct pipe_ctx *pipe; + struct timing_generator *tg; + ++ dc_exit_ips_for_hw_access(dc); ++ + for (i = 0; i < MAX_PIPES; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->stream == stream) +@@ -689,6 +699,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, + int i; + struct pipe_ctx *pipe_ctx; + ++ dc_exit_ips_for_hw_access(dc); ++ + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { +@@ -724,6 +736,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream, + if (option > DITHER_OPTION_MAX) + return; + ++ dc_exit_ips_for_hw_access(stream->ctx->dc); ++ + stream->dither_option = option; + + memset(¶ms, 0, sizeof(params)); +@@ -748,6 +762,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre + bool ret = false; + struct pipe_ctx *pipes; + ++ dc_exit_ips_for_hw_access(dc); ++ + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { + pipes = &dc->current_state->res_ctx.pipe_ctx[i]; +@@ -765,6 +781,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) + bool ret = false; + struct pipe_ctx *pipes; + ++ dc_exit_ips_for_hw_access(dc); ++ + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { +@@ -791,6 +809,8 @@ void dc_stream_set_static_screen_params(struct dc *dc, + struct pipe_ctx *pipes_affected[MAX_PIPES]; + int num_pipes_affected = 0; + ++ dc_exit_ips_for_hw_access(dc); ++ + for (i = 0; i < num_streams; i++) { + struct dc_stream_state *stream = streams[i]; + +@@ -1817,6 +1837,8 @@ void dc_enable_stereo( + int i, j; + struct pipe_ctx *pipe; + ++ dc_exit_ips_for_hw_access(dc); ++ + for (i = 0; i < MAX_PIPES; i++) { + if (context != NULL) { + pipe = &context->res_ctx.pipe_ctx[i]; +@@ -1836,6 +1858,8 @@ void dc_enable_stereo( + void dc_trigger_sync(struct dc *dc, struct dc_state *context) + { + if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { ++ dc_exit_ips_for_hw_access(dc); ++ + enable_timing_multisync(dc, context); + program_timing_sync(dc, context); + } +@@ -2097,6 +2121,8 @@ enum dc_status dc_commit_streams(struct dc *dc, + if (!streams_changed(dc, streams, stream_count)) + return res; + ++ dc_exit_ips_for_hw_access(dc); ++ + DC_LOG_DC("%s: %d streams\n", __func__, stream_count); + + for (i = 0; i < stream_count; i++) { +@@ -3429,6 +3455,8 @@ static void commit_planes_for_stream_fast(struct dc *dc, + int i, j; + struct pipe_ctx *top_pipe_to_program = NULL; + struct dc_stream_status *stream_status = NULL; ++ dc_exit_ips_for_hw_access(dc); ++ + dc_z10_restore(dc); + + top_pipe_to_program = resource_get_otg_master_for_stream( +@@ -3557,6 +3585,8 @@ static void commit_planes_for_stream(struct dc *dc, + // dc->current_state anymore, so we have to cache it before we apply + // the new SubVP context + subvp_prev_use = false; ++ dc_exit_ips_for_hw_access(dc); ++ + dc_z10_restore(dc); + if (update_type == UPDATE_TYPE_FULL) + wait_for_outstanding_hw_updates(dc, context); +@@ -4437,6 +4467,8 @@ bool dc_update_planes_and_stream(struct dc *dc, + bool is_plane_addition = 0; + bool is_fast_update_only; + ++ dc_exit_ips_for_hw_access(dc); ++ + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); + is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, + surface_count, stream_update, stream); +@@ -4557,6 +4589,8 @@ void dc_commit_updates_for_stream(struct dc *dc, + int i, j; + struct dc_fast_update fast_update[MAX_SURFACES] = {0}; + ++ dc_exit_ips_for_hw_access(dc); ++ + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); + stream_status = dc_stream_get_status(stream); + context = dc->current_state; +@@ -4741,6 +4775,8 @@ void dc_set_power_state( + case DC_ACPI_CM_POWER_STATE_D0: + dc_state_construct(dc, dc->current_state); + ++ dc_exit_ips_for_hw_access(dc); ++ + dc_z10_restore(dc); + + dc->hwss.init_hw(dc); +@@ -4882,6 +4918,12 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) + dc->idle_optimizations_allowed = allow; + } + ++void dc_exit_ips_for_hw_access(struct dc *dc) ++{ ++ if (dc->caps.ips_support) ++ dc_allow_idle_optimizations(dc, false); ++} ++ + bool dc_dmub_is_ips_idle_state(struct dc *dc) + { + if (dc->debug.disable_idle_power_optimizations) +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +index 54670e0b15189..51a970fcb5d05 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +@@ -423,6 +423,8 @@ bool dc_stream_add_writeback(struct dc *dc, + return false; + } + ++ dc_exit_ips_for_hw_access(dc); ++ + wb_info->dwb_params.out_transfer_func = stream->out_transfer_func; + + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; +@@ -493,6 +495,8 @@ bool dc_stream_fc_disable_writeback(struct dc *dc, + return false; + } + ++ dc_exit_ips_for_hw_access(dc); ++ + if (dwb->funcs->set_fc_enable) + dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE); + +@@ -542,6 +546,8 @@ bool dc_stream_remove_writeback(struct dc *dc, + return false; + } + ++ dc_exit_ips_for_hw_access(dc); ++ + /* disable writeback */ + if (dc->hwss.disable_writeback) { + struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst]; +@@ -557,6 +563,8 @@ bool dc_stream_warmup_writeback(struct dc *dc, + int num_dwb, + struct dc_writeback_info *wb_info) + { ++ dc_exit_ips_for_hw_access(dc); ++ + if (dc->hwss.mmhubbub_warmup) + return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info); + else +@@ -569,6 +577,8 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) + struct resource_context *res_ctx = + &dc->current_state->res_ctx; + ++ dc_exit_ips_for_hw_access(dc); ++ + for (i = 0; i < MAX_PIPES; i++) { + struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; + +@@ -597,6 +607,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream, + dc = stream->ctx->dc; + res_ctx = &dc->current_state->res_ctx; + ++ dc_exit_ips_for_hw_access(dc); ++ + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + +@@ -628,6 +640,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, + struct resource_context *res_ctx = + &dc->current_state->res_ctx; + ++ dc_exit_ips_for_hw_access(dc); ++ + for (i = 0; i < MAX_PIPES; i++) { + struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; + +@@ -664,6 +678,8 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) + if (i == MAX_PIPES) + return true; + ++ dc_exit_ips_for_hw_access(dc); ++ + return dc->hwss.dmdata_status_done(pipe); + } + +@@ -698,6 +714,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, + + pipe_ctx->stream->dmdata_address = attr->address; + ++ dc_exit_ips_for_hw_access(dc); ++ + dc->hwss.program_dmdata_engine(pipe_ctx); + + if (hubp->funcs->dmdata_set_attributes != NULL && +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +index 19a2c7140ae84..19140fb65787c 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +@@ -161,6 +161,8 @@ const struct dc_plane_status *dc_plane_get_status( + break; + } + ++ dc_exit_ips_for_hw_access(dc); ++ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = + &dc->current_state->res_ctx.pipe_ctx[i]; +diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h +index 7aa9954ec8407..f1342314f7f43 100644 +--- a/drivers/gpu/drm/amd/display/dc/dc.h ++++ b/drivers/gpu/drm/amd/display/dc/dc.h +@@ -2324,6 +2324,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_ + struct dc_cursor_attributes *cursor_attr); + + void dc_allow_idle_optimizations(struct dc *dc, bool allow); ++void dc_exit_ips_for_hw_access(struct dc *dc); + bool dc_dmub_is_ips_idle_state(struct dc *dc); + + /* set min and max memory clock to lowest and highest DPM level, respectively */ +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-allow-dirty-rects-to-be-sent-to-dmub.patch b/queue-6.8/drm-amd-display-allow-dirty-rects-to-be-sent-to-dmub.patch new file mode 100644 index 00000000000..58c1f79e9d9 --- /dev/null +++ b/queue-6.8/drm-amd-display-allow-dirty-rects-to-be-sent-to-dmub.patch @@ -0,0 +1,47 @@ +From de8ec6375480f4b63fb48026aae86777ee196400 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 9 Feb 2024 16:05:18 -0500 +Subject: drm/amd/display: Allow dirty rects to be sent to dmub when abm is + active + +From: Josip Pavic + +[ Upstream commit 7fb19d9510937121a1f285894cffd30bc96572e3 ] + +[WHY] +It's beneficial for ABM to know when new frame data are available. + +[HOW] +Add new condition to allow dirty rects to be sent to DMUB when ABM is +active. ABM will use this as a signal that a new frame has arrived. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Reviewed-by: Anthony Koo +Acked-by: Alex Hung +Signed-off-by: Josip Pavic +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/dc/core/dc.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index 2c424e435962d..75b8a0fff48f1 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -3223,6 +3223,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s + if (stream->link->replay_settings.config.replay_supported) + return true; + ++ if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level) ++ return true; ++ + return false; + } + +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-amend-coasting-vtotal-for-replay-low.patch b/queue-6.8/drm-amd-display-amend-coasting-vtotal-for-replay-low.patch new file mode 100644 index 00000000000..894a9946d5a --- /dev/null +++ b/queue-6.8/drm-amd-display-amend-coasting-vtotal-for-replay-low.patch @@ -0,0 +1,159 @@ +From a7612f39973c9e4aab7954c6d039599a7285a47d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 20 Feb 2024 17:08:39 +0800 +Subject: drm/amd/display: Amend coasting vtotal for replay low hz + +From: ChunTao Tso + +[ Upstream commit 8e054b0f1e71531762b8ded7f66c1b4af734671b ] + +[WHY] +The original coasting vtotal is 2 bytes, and it need to +be amended to 4 bytes because low hz case. + +[HOW] +Amend coasting vtotal from 2 bytes to 4 bytes. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Reviewed-by: Alvin Lee +Acked-by: Alex Hung +Signed-off-by: ChunTao Tso +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/dc/dc_types.h | 4 ++-- + drivers/gpu/drm/amd/display/dc/inc/link.h | 4 ++-- + .../display/dc/link/protocols/link_edp_panel_control.c | 4 ++-- + .../display/dc/link/protocols/link_edp_panel_control.h | 4 ++-- + drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 8 ++++++++ + drivers/gpu/drm/amd/display/modules/power/power_helpers.c | 2 +- + drivers/gpu/drm/amd/display/modules/power/power_helpers.h | 2 +- + 7 files changed, 18 insertions(+), 10 deletions(-) + +diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h +index 9900dda2eef5c..be2ac5c442a48 100644 +--- a/drivers/gpu/drm/amd/display/dc/dc_types.h ++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h +@@ -1085,9 +1085,9 @@ struct replay_settings { + /* SMU optimization is enabled */ + bool replay_smu_opt_enable; + /* Current Coasting vtotal */ +- uint16_t coasting_vtotal; ++ uint32_t coasting_vtotal; + /* Coasting vtotal table */ +- uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; ++ uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; + /* Maximum link off frame count */ + enum replay_link_off_frame_count_level link_off_frame_count_level; + /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */ +diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h +index 26fe81f213da5..bf29fc58ea6a6 100644 +--- a/drivers/gpu/drm/amd/display/dc/inc/link.h ++++ b/drivers/gpu/drm/amd/display/dc/inc/link.h +@@ -285,12 +285,12 @@ struct link_service { + enum replay_FW_Message_type msg, + union dmub_replay_cmd_set *cmd_data); + bool (*edp_set_coasting_vtotal)( +- struct dc_link *link, uint16_t coasting_vtotal); ++ struct dc_link *link, uint32_t coasting_vtotal); + bool (*edp_replay_residency)(const struct dc_link *link, + unsigned int *residency, const bool is_start, + const bool is_alpm); + bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link, +- const unsigned int *power_opts, uint16_t coasting_vtotal); ++ const unsigned int *power_opts, uint32_t coasting_vtotal); + + bool (*edp_wait_for_t12)(struct dc_link *link); + bool (*edp_is_ilr_optimization_required)(struct dc_link *link, +diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +index f1489f4a40c12..d01b77fb9811a 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +@@ -1034,7 +1034,7 @@ bool edp_send_replay_cmd(struct dc_link *link, + return true; + } + +-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal) ++bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) + { + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; +@@ -1073,7 +1073,7 @@ bool edp_replay_residency(const struct dc_link *link, + } + + bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, +- const unsigned int *power_opts, uint16_t coasting_vtotal) ++ const unsigned int *power_opts, uint32_t coasting_vtotal) + { + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; +diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +index 34e521af7bb48..a158c6234d422 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +@@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link, + bool edp_send_replay_cmd(struct dc_link *link, + enum replay_FW_Message_type msg, + union dmub_replay_cmd_set *cmd_data); +-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal); ++bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal); + bool edp_replay_residency(const struct dc_link *link, + unsigned int *residency, const bool is_start, const bool is_alpm); + bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); + bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, +- const unsigned int *power_opts, uint16_t coasting_vtotal); ++ const unsigned int *power_opts, uint32_t coasting_vtotal); + bool edp_wait_for_t12(struct dc_link *link); + bool edp_is_ilr_optimization_required(struct dc_link *link, + struct dc_crtc_timing *crtc_timing); +diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +index e699731ee68e9..bb1f69a54c148 100644 +--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h ++++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +@@ -3133,6 +3133,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data { + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; ++ /** ++ * 16-bit value dicated by driver that indicates the coasting vtotal high byte part. ++ */ ++ uint16_t coasting_vtotal_high; ++ /** ++ * Explicit padding to 4 byte boundary. ++ */ ++ uint8_t pad[2]; + }; + + /** +diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +index e304e8435fb8f..2a3698fd2dc24 100644 +--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c ++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +@@ -975,7 +975,7 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, + + void set_replay_coasting_vtotal(struct dc_link *link, + enum replay_coasting_vtotal_type type, +- uint16_t vtotal) ++ uint32_t vtotal) + { + link->replay_settings.coasting_vtotal_table[type] = vtotal; + } +diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +index bef4815e1703d..ff7e6f3cd6be2 100644 +--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h ++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +@@ -56,7 +56,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool, + void init_replay_config(struct dc_link *link, struct replay_config *pr_config); + void set_replay_coasting_vtotal(struct dc_link *link, + enum replay_coasting_vtotal_type type, +- uint16_t vtotal); ++ uint32_t vtotal); + void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal); + void calculate_replay_link_off_frame_count(struct dc_link *link, + uint16_t vtotal, uint16_t htotal); +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-change-default-size-for-dummy-plane-.patch b/queue-6.8/drm-amd-display-change-default-size-for-dummy-plane-.patch new file mode 100644 index 00000000000..409ad9b9403 --- /dev/null +++ b/queue-6.8/drm-amd-display-change-default-size-for-dummy-plane-.patch @@ -0,0 +1,72 @@ +From 6487e852e707e9cd1ea2349f65003cc774974f74 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 13 Feb 2024 08:09:48 -0500 +Subject: drm/amd/display: Change default size for dummy plane in DML2 + +From: Swapnil Patel + +[ Upstream commit 75eb8f7df65c5e6eb22a5aff8deb60ce0b65de1a ] + +[WHY & HOW] +Currently, to map dc states into dml_display_cfg, +We create a dummy plane if the stream doesn't have any planes +attached to it. This dummy plane uses max addersable width height. +This results in certain mode validations failing when they shouldn't. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Reviewed-by: Chaitanya Dhere +Acked-by: Alex Hung +Signed-off-by: Swapnil Patel +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + .../display/dc/dml2/dml2_translation_helper.c | 18 +++++++++++++++--- + 1 file changed, 15 insertions(+), 3 deletions(-) + +diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +index 1ba6933d2b361..17a58f41fc6a8 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c ++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +@@ -824,13 +824,25 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state + + static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in) + { ++ dml_uint_t width, height; ++ ++ if (in->timing.h_addressable > 3840) ++ width = 3840; ++ else ++ width = in->timing.h_addressable; // 4K max ++ ++ if (in->timing.v_addressable > 2160) ++ height = 2160; ++ else ++ height = in->timing.v_addressable; // 4K max ++ + out->CursorBPP[location] = dml_cur_32bit; + out->CursorWidth[location] = 256; + + out->GPUVMMinPageSizeKBytes[location] = 256; + +- out->ViewportWidth[location] = in->timing.h_addressable; +- out->ViewportHeight[location] = in->timing.v_addressable; ++ out->ViewportWidth[location] = width; ++ out->ViewportHeight[location] = height; + out->ViewportStationary[location] = false; + out->ViewportWidthChroma[location] = 0; + out->ViewportHeightChroma[location] = 0; +@@ -849,7 +861,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned + out->HTapsChroma[location] = 0; + out->VTapsChroma[location] = 0; + out->SourceScan[location] = dml_rotation_0; +- out->ScalerRecoutWidth[location] = in->timing.h_addressable; ++ out->ScalerRecoutWidth[location] = width; + + out->LBBitPerPixel[location] = 57; + +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-fix-idle-check-for-shared-firmware-s.patch b/queue-6.8/drm-amd-display-fix-idle-check-for-shared-firmware-s.patch new file mode 100644 index 00000000000..49e320771b8 --- /dev/null +++ b/queue-6.8/drm-amd-display-fix-idle-check-for-shared-firmware-s.patch @@ -0,0 +1,63 @@ +From 030f9dedac1792627f73f817f8467c466932a749 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 21 Feb 2024 12:27:31 -0500 +Subject: drm/amd/display: Fix idle check for shared firmware state + +From: Nicholas Kazlauskas + +[ Upstream commit 3d066f9547dd58329b526db44f42c487a7974703 ] + +[WHY] +We still had an instance of get_idle_state checking the PMFW scratch +register instead of the actual idle allow signal. + +[HOW] +Replace it with the SW state check for whether we had allowed idle +through notify_idle. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Reviewed-by: Duncan Ma +Acked-by: Alex Hung +Signed-off-by: Nicholas Kazlauskas +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/dc/core/dc.c | 12 +++--------- + 1 file changed, 3 insertions(+), 9 deletions(-) + +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index 75b8a0fff48f1..fdff99a1dff7a 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -4834,22 +4834,16 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) + + bool dc_dmub_is_ips_idle_state(struct dc *dc) + { +- uint32_t idle_state = 0; +- + if (dc->debug.disable_idle_power_optimizations) + return false; + + if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) + return false; + +- if (dc->hwss.get_idle_state) +- idle_state = dc->hwss.get_idle_state(dc); +- +- if (!(idle_state & DMUB_IPS1_ALLOW_MASK) || +- !(idle_state & DMUB_IPS2_ALLOW_MASK)) +- return true; ++ if (!dc->ctx->dmub_srv) ++ return false; + +- return false; ++ return dc->ctx->dmub_srv->idle_allowed; + } + + /* set min and max memory clock to lowest and highest DPM level, respectively */ +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-fix-noise-issue-on-hdmi-av-mute.patch b/queue-6.8/drm-amd-display-fix-noise-issue-on-hdmi-av-mute.patch new file mode 100644 index 00000000000..7311b317ab8 --- /dev/null +++ b/queue-6.8/drm-amd-display-fix-noise-issue-on-hdmi-av-mute.patch @@ -0,0 +1,59 @@ +From e96021808e02c80badb8a610c804677db74e6918 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 28 Jul 2023 08:35:07 -0400 +Subject: drm/amd/display: Fix noise issue on HDMI AV mute + +From: Leo Ma + +[ Upstream commit 69e3be6893a7e668660b05a966bead82bbddb01d ] + +[Why] +When mode switching is triggered there is momentary noise visible on +some HDMI TV or displays. + +[How] +Wait for 2 frames to make sure we have enough time to send out AV mute +and sink receives a full frame. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Reviewed-by: Wenjing Liu +Acked-by: Wayne Lin +Signed-off-by: Leo Ma +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + .../gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +index c34c13e1e0a4e..55cf4c9e6aedf 100644 +--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +@@ -663,10 +663,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) + if (pipe_ctx == NULL) + return; + +- if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) ++ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) { + pipe_ctx->stream_res.stream_enc->funcs->set_avmute( + pipe_ctx->stream_res.stream_enc, + enable); ++ ++ /* Wait for two frame to make sure AV mute is sent out */ ++ if (enable) { ++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); ++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); ++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); ++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); ++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); ++ } ++ } + } + + void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx) +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-implement-wait_for_odm_update_pendin.patch b/queue-6.8/drm-amd-display-implement-wait_for_odm_update_pendin.patch new file mode 100644 index 00000000000..1c1bff1de23 --- /dev/null +++ b/queue-6.8/drm-amd-display-implement-wait_for_odm_update_pendin.patch @@ -0,0 +1,300 @@ +From 8b429b10c275d291b583dc663d633acad232f71c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 23 Feb 2024 15:38:40 -0500 +Subject: drm/amd/display: Implement wait_for_odm_update_pending_complete + +From: Wenjing Liu + +[ Upstream commit 2d7f3d1a5866705be2393150e1ffdf67030ab88d ] + +[WHY] +Odm update is doubled buffered. We need to wait for ODM update to be +completed before optimizing bandwidth or programming new udpates. + +[HOW] +implement wait_for_odm_update_pending_complete function to wait for: +1. odm configuration update is no longer pending in timing generator. +2. no pending dpg pattern update for each active OPP. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Reviewed-by: Alvin Lee +Acked-by: Alex Hung +Signed-off-by: Wenjing Liu +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/dc/core/dc.c | 56 ++++++++++++++++++- + .../gpu/drm/amd/display/dc/dcn10/dcn10_opp.c | 1 + + .../gpu/drm/amd/display/dc/dcn20/dcn20_opp.c | 14 +++++ + .../gpu/drm/amd/display/dc/dcn20/dcn20_opp.h | 2 + + .../drm/amd/display/dc/dcn201/dcn201_opp.c | 1 + + .../amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 4 +- + drivers/gpu/drm/amd/display/dc/inc/hw/opp.h | 3 + + .../amd/display/dc/inc/hw/timing_generator.h | 1 + + .../amd/display/dc/optc/dcn10/dcn10_optc.h | 3 +- + .../amd/display/dc/optc/dcn32/dcn32_optc.c | 8 +++ + .../amd/display/dc/optc/dcn32/dcn32_optc.h | 1 + + 11 files changed, 90 insertions(+), 4 deletions(-) + +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index fdff99a1dff7a..02e85b832a7d3 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -1282,6 +1282,54 @@ static void disable_vbios_mode_if_required( + } + } + ++/** ++ * wait_for_blank_complete - wait for all active OPPs to finish pending blank ++ * pattern updates ++ * ++ * @dc: [in] dc reference ++ * @context: [in] hardware context in use ++ */ ++static void wait_for_blank_complete(struct dc *dc, ++ struct dc_state *context) ++{ ++ struct pipe_ctx *opp_head; ++ struct dce_hwseq *hws = dc->hwseq; ++ int i; ++ ++ if (!hws->funcs.wait_for_blank_complete) ++ return; ++ ++ for (i = 0; i < MAX_PIPES; i++) { ++ opp_head = &context->res_ctx.pipe_ctx[i]; ++ ++ if (!resource_is_pipe_type(opp_head, OPP_HEAD) || ++ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM) ++ continue; ++ ++ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp); ++ } ++} ++ ++static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context) ++{ ++ struct pipe_ctx *otg_master; ++ struct timing_generator *tg; ++ int i; ++ ++ for (i = 0; i < MAX_PIPES; i++) { ++ otg_master = &context->res_ctx.pipe_ctx[i]; ++ if (!resource_is_pipe_type(otg_master, OTG_MASTER) || ++ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM) ++ continue; ++ tg = otg_master->stream_res.tg; ++ if (tg->funcs->wait_odm_doublebuffer_pending_clear) ++ tg->funcs->wait_odm_doublebuffer_pending_clear(tg); ++ } ++ ++ /* ODM update may require to reprogram blank pattern for each OPP */ ++ wait_for_blank_complete(dc, context); ++} ++ + static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) + { + int i; +@@ -1969,6 +2017,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c + context->stream_count == 0) { + /* Must wait for no flips to be pending before doing optimize bw */ + wait_for_no_pipes_pending(dc, context); ++ /* ++ * optimized dispclk depends on ODM setup. Need to wait for ODM ++ * update pending complete before optimizing bandwidth. ++ */ ++ wait_for_odm_update_pending_complete(dc, context); + /* pplib is notified if disp_num changed */ + dc->hwss.optimize_bandwidth(dc, context); + /* Need to do otg sync again as otg could be out of sync due to otg +@@ -3447,7 +3500,7 @@ static void commit_planes_for_stream_fast(struct dc *dc, + top_pipe_to_program->stream->update_flags.raw = 0; + } + +-static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context) ++static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context) + { + /* + * This function calls HWSS to wait for any potentially double buffered +@@ -3485,6 +3538,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state + } + } + } ++ wait_for_odm_update_pending_complete(dc, dc_context); + } + + static void commit_planes_for_stream(struct dc *dc, +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +index 0dec57679269b..86bfed5dea2e2 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +@@ -377,6 +377,7 @@ static const struct opp_funcs dcn10_opp_funcs = { + .opp_set_disp_pattern_generator = NULL, + .opp_program_dpg_dimensions = NULL, + .dpg_is_blanked = NULL, ++ .dpg_is_pending = NULL, + .opp_destroy = opp1_destroy + }; + +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +index 0784d01986610..fbf1b6370eb23 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +@@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp) + (double_buffer_pending == 0); + } + ++bool opp2_dpg_is_pending(struct output_pixel_processor *opp) ++{ ++ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); ++ uint32_t double_buffer_pending; ++ uint32_t dpg_en; ++ ++ REG_GET(DPG_CONTROL, DPG_EN, &dpg_en); ++ ++ REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending); ++ ++ return (dpg_en == 1 && double_buffer_pending == 1); ++} ++ + void opp2_program_left_edge_extra_pixel ( + struct output_pixel_processor *opp, + bool count) +@@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = { + .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, + .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, + .dpg_is_blanked = opp2_dpg_is_blanked, ++ .dpg_is_pending = opp2_dpg_is_pending, + .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, + .opp_destroy = opp1_destroy, + .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +index 3ab221bdd27dd..8f186abd558db 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +@@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions( + + bool opp2_dpg_is_blanked(struct output_pixel_processor *opp); + ++bool opp2_dpg_is_pending(struct output_pixel_processor *opp); ++ + void opp2_dpg_set_blank_color( + struct output_pixel_processor *opp, + const struct tg_color *color); +diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c +index 8e77db46a4090..6a71ba3dfc632 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c +@@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = { + .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, + .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, + .dpg_is_blanked = opp2_dpg_is_blanked, ++ .dpg_is_pending = opp2_dpg_is_pending, + .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, + .opp_destroy = opp1_destroy, + .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, +diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +index eb0480aee859d..c29c7eb017c37 100644 +--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +@@ -2345,7 +2345,7 @@ bool dcn20_wait_for_blank_complete( + int counter; + + for (counter = 0; counter < 1000; counter++) { +- if (opp->funcs->dpg_is_blanked(opp)) ++ if (!opp->funcs->dpg_is_pending(opp)) + break; + + udelay(100); +@@ -2356,7 +2356,7 @@ bool dcn20_wait_for_blank_complete( + return false; + } + +- return true; ++ return opp->funcs->dpg_is_blanked(opp); + } + + bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) +diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +index 7617fabbd16ee..0717920812d86 100644 +--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h ++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +@@ -321,6 +321,9 @@ struct opp_funcs { + bool (*dpg_is_blanked)( + struct output_pixel_processor *opp); + ++ bool (*dpg_is_pending)(struct output_pixel_processor *opp); ++ ++ + void (*opp_dpg_set_blank_color)( + struct output_pixel_processor *opp, + const struct tg_color *color); +diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +index 9a00a99317b29..cad3e5f148cf5 100644 +--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h ++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +@@ -333,6 +333,7 @@ struct timing_generator_funcs { + + void (*init_odm)(struct timing_generator *tg); + void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); ++ void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg); + }; + + #endif +diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +index ab81594a7fadc..6c2e84d3967fc 100644 +--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h ++++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +@@ -557,7 +557,8 @@ struct dcn_optc_registers { + type OTG_CRC_DATA_STREAM_SPLIT_MODE;\ + type OTG_CRC_DATA_FORMAT;\ + type OTG_V_TOTAL_LAST_USED_BY_DRR;\ +- type OTG_DRR_TIMING_DBUF_UPDATE_PENDING; ++ type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\ ++ type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING; + + #define TG_REG_FIELD_LIST_DCN3_2(type) \ + type OTG_H_TIMING_DIV_MODE_MANUAL; +diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +index 8234935433254..f07a4c7e48bc2 100644 +--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c ++++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +@@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi + } + } + ++void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg) ++{ ++ struct optc *optc1 = DCN10TG_FROM_TG(tg); ++ ++ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000); ++} ++ + void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) + { + struct optc *optc1 = DCN10TG_FROM_TG(optc); +@@ -345,6 +352,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = { + .set_odm_bypass = optc32_set_odm_bypass, + .set_odm_combine = optc32_set_odm_combine, + .get_odm_combine_segments = optc32_get_odm_combine_segments, ++ .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear, + .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode, + .get_optc_source = optc2_get_optc_source, + .set_out_mux = optc3_set_out_mux, +diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h +index 8ce3b178cab06..0c2c146955619 100644 +--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h ++++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h +@@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man + void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments); + void optc32_set_odm_bypass(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing); ++void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg); + + #endif /* __DC_OPTC_DCN32_H__ */ +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-init-dppclk-from-smu-on-dcn32.patch b/queue-6.8/drm-amd-display-init-dppclk-from-smu-on-dcn32.patch new file mode 100644 index 00000000000..c5ff9f6894c --- /dev/null +++ b/queue-6.8/drm-amd-display-init-dppclk-from-smu-on-dcn32.patch @@ -0,0 +1,156 @@ +From 884d3ec32e11e7938268ee18d5a25a44488ffae3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 21 Feb 2024 13:21:20 -0500 +Subject: drm/amd/display: Init DPPCLK from SMU on dcn32 + +From: Dillon Varone + +[ Upstream commit 4f5b8d78ca43fcc695ba16c83ebfabbfe09506d6 ] + +[WHY & HOW] +DPPCLK ranges should be obtained from the SMU when available. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Reviewed-by: Chaitanya Dhere +Acked-by: Alex Hung +Signed-off-by: Dillon Varone +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + .../display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 14 ++++++++++ + .../drm/amd/display/dc/dml2/dml2_wrapper.c | 28 +++++++++++++------ + .../drm/amd/display/dc/dml2/dml2_wrapper.h | 3 ++ + .../dc/resource/dcn32/dcn32_resource.c | 2 ++ + .../dc/resource/dcn321/dcn321_resource.c | 2 ++ + 5 files changed, 41 insertions(+), 8 deletions(-) + +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +index aadd07bc68c5d..bbdbc78161a00 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +@@ -216,6 +216,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) + if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950) + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950; + ++ /* DPPCLK */ ++ dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK, ++ &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz, ++ &num_entries_per_clk->num_dppclk_levels); ++ num_levels = num_entries_per_clk->num_dppclk_levels; ++ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK); ++ //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x ++ if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950) ++ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950; ++ + if (num_entries_per_clk->num_dcfclk_levels && + num_entries_per_clk->num_dtbclk_levels && + num_entries_per_clk->num_dispclk_levels) +@@ -240,6 +250,10 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) + = khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz); + } + ++ for (i = 0; i < num_levels; i++) ++ if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950) ++ clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950; ++ + /* Get UCLK, update bounding box */ + clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base); + +diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +index 2a58a7687bdb5..72cca367062e1 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c ++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +@@ -703,13 +703,8 @@ static inline struct dml2_context *dml2_allocate_memory(void) + return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL); + } + +-bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) ++static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) + { +- // Allocate Mode Lib Ctx +- *dml2 = dml2_allocate_memory(); +- +- if (!(*dml2)) +- return false; + + // Store config options + (*dml2)->config = *config; +@@ -737,9 +732,18 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options + initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc); + + initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states); ++} ++ ++bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) ++{ ++ // Allocate Mode Lib Ctx ++ *dml2 = dml2_allocate_memory(); ++ ++ if (!(*dml2)) ++ return false; ++ ++ dml2_init(in_dc, config, dml2); + +- /*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/ +- //dml2_initialize_instance(&(*dml_ctx)->v20.dml_init); + return true; + } + +@@ -779,3 +783,11 @@ bool dml2_create_copy(struct dml2_context **dst_dml2, + + return true; + } ++ ++void dml2_reinit(const struct dc *in_dc, ++ const struct dml2_configuration_options *config, ++ struct dml2_context **dml2) ++{ ++ ++ dml2_init(in_dc, config, dml2); ++} +diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +index ee0eb184eb6d7..cc662d682fd4d 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h ++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +@@ -214,6 +214,9 @@ void dml2_copy(struct dml2_context *dst_dml2, + struct dml2_context *src_dml2); + bool dml2_create_copy(struct dml2_context **dst_dml2, + struct dml2_context *src_dml2); ++void dml2_reinit(const struct dc *in_dc, ++ const struct dml2_configuration_options *config, ++ struct dml2_context **dml2); + + /* + * dml2_validate - Determines if a display configuration is supported or not. +diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +index c4a4afd8d1a9d..9042378fa8dfb 100644 +--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +@@ -1931,6 +1931,8 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw + { + DC_FP_START(); + dcn32_update_bw_bounding_box_fpu(dc, bw_params); ++ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) ++ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); + DC_FP_END(); + } + +diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +index 74412e5f03fef..f4dd6443a3551 100644 +--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +@@ -1581,6 +1581,8 @@ static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b + { + DC_FP_START(); + dcn321_update_bw_bounding_box_fpu(dc, bw_params); ++ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) ++ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); + DC_FP_END(); + } + +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-lock-all-enabled-otg-pipes-even-with.patch b/queue-6.8/drm-amd-display-lock-all-enabled-otg-pipes-even-with.patch new file mode 100644 index 00000000000..6ab1b86ffea --- /dev/null +++ b/queue-6.8/drm-amd-display-lock-all-enabled-otg-pipes-even-with.patch @@ -0,0 +1,93 @@ +From b45c2182509cd88bac42ea82e27dc05ab1fbfc95 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 23 Feb 2024 15:17:39 -0500 +Subject: drm/amd/display: Lock all enabled otg pipes even with no planes + +From: Wenjing Liu + +[ Upstream commit 94040c2cbb1a872ff779da06bf034ccfee0f9cba ] + +[WHY] +On DCN32 we support dynamic ODM even when OTG is blanked. When ODM +configuration is dynamically changed and the OTG is on blank pattern, +we will need to reprogram OPP's test pattern based on new ODM +configuration. Therefore we need to lock the OTG pipe to avoid temporary +corruption when we are reprogramming OPP blank patterns. + +[HOW] +Add a new interdependent update lock implementation to lock all enabled +OTG pipes even when there is no plane on the OTG for DCN32. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Reviewed-by: Alvin Lee +Acked-by: Alex Hung +Signed-off-by: Wenjing Liu +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + .../amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 23 +++++++++++++++++++ + .../amd/display/dc/hwss/dcn32/dcn32_hwseq.h | 2 ++ + .../amd/display/dc/hwss/dcn32/dcn32_init.c | 2 +- + 3 files changed, 26 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +index b890db0bfc46b..c0b526cf17865 100644 +--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +@@ -1785,3 +1785,26 @@ void dcn32_prepare_bandwidth(struct dc *dc, + context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; + } + } ++ ++void dcn32_interdependent_update_lock(struct dc *dc, ++ struct dc_state *context, bool lock) ++{ ++ unsigned int i; ++ struct pipe_ctx *pipe; ++ struct timing_generator *tg; ++ ++ for (i = 0; i < dc->res_pool->pipe_count; i++) { ++ pipe = &context->res_ctx.pipe_ctx[i]; ++ tg = pipe->stream_res.tg; ++ ++ if (!resource_is_pipe_type(pipe, OTG_MASTER) || ++ !tg->funcs->is_tg_enabled(tg) || ++ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) ++ continue; ++ ++ if (lock) ++ dc->hwss.pipe_control_lock(dc, pipe, true); ++ else ++ dc->hwss.pipe_control_lock(dc, pipe, false); ++ } ++} +diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +index 069e20bc87c0a..f55c11fc56ec7 100644 +--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h ++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +@@ -129,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, + void dcn32_prepare_bandwidth(struct dc *dc, + struct dc_state *context); + ++void dcn32_interdependent_update_lock(struct dc *dc, ++ struct dc_state *context, bool lock); + #endif /* __DC_HWSS_DCN32_H__ */ +diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +index e8ac94a005b83..03253faeaeac6 100644 +--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c ++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +@@ -58,7 +58,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { + .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, + .pipe_control_lock = dcn20_pipe_control_lock, +- .interdependent_update_lock = dcn10_lock_all_pipes, ++ .interdependent_update_lock = dcn32_interdependent_update_lock, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn32_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-override-min-required-dcfclk-in-dml1.patch b/queue-6.8/drm-amd-display-override-min-required-dcfclk-in-dml1.patch new file mode 100644 index 00000000000..b9d08d410de --- /dev/null +++ b/queue-6.8/drm-amd-display-override-min-required-dcfclk-in-dml1.patch @@ -0,0 +1,82 @@ +From c40abbc7b4894de0546bbcb86bd00df7a57a448a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 14 Feb 2024 13:51:16 -0500 +Subject: drm/amd/display: Override min required DCFCLK in dml1_validate + +From: Sohaib Nadeem + +[ Upstream commit 26fbcb3da77efc77bd7327b7916338d773cca484 ] + +[WHY]: +Increasing min DCFCLK addresses underflow issues that occur when phantom +pipe is turned on for some Sub-Viewport configs + +[HOW]: +dcn32_override_min_req_dcfclk is added to override DCFCLK value in +dml1_validate when subviewport is being used. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Reviewed-by: Alvin Lee +Acked-by: Alex Hung +Signed-off-by: Sohaib Nadeem +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + .../gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c | 6 ++++++ + .../gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c | 1 + + .../gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h | 3 +++ + 3 files changed, 10 insertions(+) + +diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +index 87760600e154d..f98def6c8c2d2 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +@@ -782,3 +782,9 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc + pipe_cnt++; + } + } ++ ++void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context) ++{ ++ if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ) ++ context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ; ++} +diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +index 6f10052caeef0..c4a4afd8d1a9d 100644 +--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +@@ -1771,6 +1771,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + + dcn32_override_min_req_memclk(dc, context); ++ dcn32_override_min_req_dcfclk(dc, context); + + BW_VAL_TRACE_END_WATERMARKS(); + +diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +index 0c87b0fabba7d..2258c5c7212d8 100644 +--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h ++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +@@ -42,6 +42,7 @@ + #define SUBVP_ACTIVE_MARGIN_LIST_LEN 2 + #define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 + #define DCN3_2_VMIN_DISPCLK_HZ 717000000 ++#define MIN_SUBVP_DCFCLK_KHZ 400000 + + #define TO_DCN32_RES_POOL(pool)\ + container_of(pool, struct dcn32_resource_pool, base) +@@ -181,6 +182,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int + + void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); + ++void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context); ++ + /* definitions for run time init of reg offsets */ + + /* CLK SRC */ +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-remove-pixle-rate-limit-for-subvp.patch b/queue-6.8/drm-amd-display-remove-pixle-rate-limit-for-subvp.patch new file mode 100644 index 00000000000..998646e9742 --- /dev/null +++ b/queue-6.8/drm-amd-display-remove-pixle-rate-limit-for-subvp.patch @@ -0,0 +1,39 @@ +From 44aeff260a3fc2fbbc45f8db742e287818b23282 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 9 Feb 2024 10:40:36 -0500 +Subject: drm/amd/display: Remove pixle rate limit for subvp + +From: Alvin Lee + +[ Upstream commit 340383c734f8a4e1663d26356b35fd8050851168 ] + +Subvp bugs related to 8K60 have been fixed, so remove the limit that +blocks 8K60 timings from enabling SubVP. + +Reviewed-by: Nevenko Stupar +Reviewed-by: Chaitanya Dhere +Acked-by: Rodrigo Siqueira +Tested-by: Daniel Wheeler +Signed-off-by: Alvin Lee +Signed-off-by: Alex Deucher +Stable-dep-of: cf8c498694a4 ("drm/amd/display: Revert Remove pixle rate limit for subvp") +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +index a0a65e0991041..b49e1dc9d8ba5 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +@@ -623,7 +623,6 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, + * - Not TMZ surface + */ + if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && +- !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && + (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && + (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-return-the-correct-hdcp-error-code.patch b/queue-6.8/drm-amd-display-return-the-correct-hdcp-error-code.patch new file mode 100644 index 00000000000..022ead5ead1 --- /dev/null +++ b/queue-6.8/drm-amd-display-return-the-correct-hdcp-error-code.patch @@ -0,0 +1,42 @@ +From b29f50928a92752fe04eb1c1b6f70c47cbb64a60 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 14 Feb 2024 13:29:51 -0700 +Subject: drm/amd/display: Return the correct HDCP error code + +From: Rodrigo Siqueira + +[ Upstream commit e64b3f55e458ce7e2087a0051f47edabf74545e7 ] + +[WHY & HOW] +If the display is null when creating an HDCP session, return a proper +error code. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Acked-by: Alex Hung +Signed-off-by: Rodrigo Siqueira +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +index 8c137d7c032e1..7c9805705fd38 100644 +--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c ++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +@@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + ++ if (!display) ++ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; ++ + hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; + + if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-revert-remove-pixle-rate-limit-for-s.patch b/queue-6.8/drm-amd-display-revert-remove-pixle-rate-limit-for-s.patch new file mode 100644 index 00000000000..3cefcea4c33 --- /dev/null +++ b/queue-6.8/drm-amd-display-revert-remove-pixle-rate-limit-for-s.patch @@ -0,0 +1,48 @@ +From 8e3e1c55f77838ae3e3875a5c228ed8d3ea368f7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 4 Mar 2024 11:20:27 -0500 +Subject: drm/amd/display: Revert Remove pixle rate limit for subvp + +From: Wenjing Liu + +[ Upstream commit cf8c498694a443e28dc1222f3ab94677114a4724 ] + +This reverts commit 340383c734f8 ("drm/amd/display: Remove pixle rate +limit for subvp") + +[why] +The original commit causes a regression when subvp is applied +on ODM required 8k60hz timing. The display shows black screen +on boot. The issue can be recovered with hotplug. It also causes +MPO to fail. We will temprarily revert this commit and investigate +the root cause further. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Reviewed-by: Chaitanya Dhere +Reviewed-by: Martin Leung +Acked-by: Wayne Lin +Signed-off-by: Wenjing Liu +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +index b49e1dc9d8ba5..a0a65e0991041 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +@@ -623,6 +623,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, + * - Not TMZ surface + */ + if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && ++ !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && + (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && + (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-unify-optimize_required-flags-and-vr.patch b/queue-6.8/drm-amd-display-unify-optimize_required-flags-and-vr.patch new file mode 100644 index 00000000000..dab72dcac36 --- /dev/null +++ b/queue-6.8/drm-amd-display-unify-optimize_required-flags-and-vr.patch @@ -0,0 +1,168 @@ +From 7c6f58944623a471a430cb68b475ed5841225758 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 30 Nov 2023 18:54:48 -0500 +Subject: drm/amd/display: Unify optimize_required flags and VRR adjustments + +From: Aric Cyr + +[ Upstream commit dcbf438d48341dc60e08e3df92120a4aeb097c84 ] + +[why] +There is only a single call to dc_post_update_surfaces_to_stream +so there is no need to have two flags to control it. Unifying +this to a single flag allows dc_stream_adjust_vmin_vmax to skip +actual programming when there is no change required. + +[how] +Remove wm_optimze_required flag and set only optimize_required in its +place. Then in dc_stream_adjust_vmin_vmax, check that the stream timing +range matches the requested one and skip programming if they are equal. + +Reviewed-by: Alvin Lee +Acked-by: Tom Chung +Signed-off-by: Aric Cyr +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Stable-dep-of: a9b1a4f684b3 ("drm/amd/display: Add more checks for exiting idle in DC") +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/dc/core/dc.c | 14 +++++--------- + drivers/gpu/drm/amd/display/dc/dc.h | 1 - + drivers/gpu/drm/amd/display/dc/dc_stream.h | 2 -- + .../drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c | 2 +- + .../drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 8 ++++---- + 5 files changed, 10 insertions(+), 17 deletions(-) + +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index 02e85b832a7d3..3b65f216048e1 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -411,9 +411,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, + * avoid conflicting with firmware updates. + */ + if (dc->ctx->dce_version > DCE_VERSION_MAX) +- if (dc->optimized_required || dc->wm_optimized_required) ++ if (dc->optimized_required) + return false; + ++ if (!memcmp(&stream->adjust, adjust, sizeof(*adjust))) ++ return true; ++ + stream->adjust.v_total_max = adjust->v_total_max; + stream->adjust.v_total_mid = adjust->v_total_mid; + stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; +@@ -2280,7 +2283,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) + } + + dc->optimized_required = false; +- dc->wm_optimized_required = false; + } + + bool dc_set_generic_gpio_for_stereo(bool enable, +@@ -2703,8 +2705,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream( + } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { + dc->optimized_required = true; + } +- +- dc->optimized_required |= dc->wm_optimized_required; + } + + return type; +@@ -2912,9 +2912,6 @@ static void copy_stream_update_to_stream(struct dc *dc, + if (update->vrr_active_fixed) + stream->vrr_active_fixed = *update->vrr_active_fixed; + +- if (update->crtc_timing_adjust) +- stream->adjust = *update->crtc_timing_adjust; +- + if (update->dpms_off) + stream->dpms_off = *update->dpms_off; + +@@ -4350,8 +4347,7 @@ static bool full_update_required(struct dc *dc, + stream_update->mst_bw_update || + stream_update->func_shaper || + stream_update->lut3d_func || +- stream_update->pending_test_pattern || +- stream_update->crtc_timing_adjust)) ++ stream_update->pending_test_pattern)) + return true; + + if (stream) { +diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h +index c9317ea0258ea..7aa9954ec8407 100644 +--- a/drivers/gpu/drm/amd/display/dc/dc.h ++++ b/drivers/gpu/drm/amd/display/dc/dc.h +@@ -1037,7 +1037,6 @@ struct dc { + + /* Require to optimize clocks and bandwidth for added/removed planes */ + bool optimized_required; +- bool wm_optimized_required; + bool idle_optimizations_allowed; + bool enable_c20_dtm_b0; + +diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h +index ee10941caa598..a23eebd9933b7 100644 +--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h ++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h +@@ -139,7 +139,6 @@ union stream_update_flags { + uint32_t wb_update:1; + uint32_t dsc_changed : 1; + uint32_t mst_bw : 1; +- uint32_t crtc_timing_adjust : 1; + uint32_t fams_changed : 1; + } bits; + +@@ -326,7 +325,6 @@ struct dc_stream_update { + struct dc_3dlut *lut3d_func; + + struct test_pattern *pending_test_pattern; +- struct dc_crtc_timing_adjust *crtc_timing_adjust; + }; + + bool dc_is_stream_unchanged( +diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +index f614bc2806d86..c45f84aa320ef 100644 +--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +@@ -3079,7 +3079,7 @@ void dcn10_prepare_bandwidth( + context, + false); + +- dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, ++ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub, + &context->bw_ctx.bw.dcn.watermarks, + dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, + true); +diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +index c29c7eb017c37..868a086c72a2c 100644 +--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +@@ -2160,10 +2160,10 @@ void dcn20_prepare_bandwidth( + } + + /* program dchubbub watermarks: +- * For assigning wm_optimized_required, use |= operator since we don't want ++ * For assigning optimized_required, use |= operator since we don't want + * to clear the value if the optimize has not happened yet + */ +- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub, ++ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub, + &context->bw_ctx.bw.dcn.watermarks, + dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, + false); +@@ -2176,10 +2176,10 @@ void dcn20_prepare_bandwidth( + if (hubbub->funcs->program_compbuf_size) { + if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) { + compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes; +- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes); ++ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes); + } else { + compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb; +- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb); ++ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb); + } + + hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false); +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-display-update-odm-when-odm-combine-is-chang.patch b/queue-6.8/drm-amd-display-update-odm-when-odm-combine-is-chang.patch new file mode 100644 index 00000000000..a1a60432410 --- /dev/null +++ b/queue-6.8/drm-amd-display-update-odm-when-odm-combine-is-chang.patch @@ -0,0 +1,154 @@ +From a96792a1b2bfd9bed21a62c70f74d7c0b8e75149 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 21 Feb 2024 16:55:04 -0500 +Subject: drm/amd/display: Update odm when ODM combine is changed on an otg + master pipe with no plane + +From: Wenjing Liu + +[ Upstream commit 86e9523fb0efce27095d3086473c739cce720d01 ] + +[WHY] +When committing an update with ODM combine change when the plane is +removing or already removed, we fail to detect odm change in pipe +update flags. This has caused mismatch between new dc state and the +actual hardware state, because we missed odm programming. + +[HOW] +- Detect odm change even for otg master pipe without a plane. +- Update odm config before calling program pipes for pipe with planes. + +The commit also updates blank pattern programming when odm is changed +without plane. This is because number of OPP is changed when ODM +combine is changed. Blank pattern is per OPP so we will need to +reprogram OPP based on the new pipe topology. + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Reviewed-by: Dillon Varone +Acked-by: Alex Hung +Signed-off-by: Wenjing Liu +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + .../amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 41 ++++++++++--------- + .../amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 7 ++++ + 2 files changed, 28 insertions(+), 20 deletions(-) + +diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +index 931ac8ed7069d..eb0480aee859d 100644 +--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +@@ -1392,6 +1392,11 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state, + return; + } + ++ if (resource_is_pipe_type(new_pipe, OTG_MASTER) && ++ resource_is_odm_topology_changed(new_pipe, old_pipe)) ++ /* Detect odm changes */ ++ new_pipe->update_flags.bits.odm = 1; ++ + /* Exit on unchanged, unused pipe */ + if (!old_pipe->plane_state && !new_pipe->plane_state) + return; +@@ -1445,10 +1450,6 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state, + + /* Detect top pipe only changes */ + if (resource_is_pipe_type(new_pipe, OTG_MASTER)) { +- /* Detect odm changes */ +- if (resource_is_odm_topology_changed(new_pipe, old_pipe)) +- new_pipe->update_flags.bits.odm = 1; +- + /* Detect global sync changes */ + if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset + || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start +@@ -1891,19 +1892,20 @@ void dcn20_program_front_end_for_ctx( + DC_LOGGER_INIT(dc->ctx->logger); + unsigned int prev_hubp_count = 0; + unsigned int hubp_count = 0; ++ struct pipe_ctx *pipe; + + if (resource_is_pipe_topology_changed(dc->current_state, context)) + resource_log_pipe_topology_update(dc, context); + + if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { +- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; ++ pipe = &context->res_ctx.pipe_ctx[i]; + +- if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) { +- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); ++ if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) { ++ ASSERT(!pipe->plane_state->triplebuffer_flips); + /*turn off triple buffer for full update*/ + dc->hwss.program_triplebuffer( +- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); ++ dc, pipe, pipe->plane_state->triplebuffer_flips); + } + } + } +@@ -1978,12 +1980,22 @@ void dcn20_program_front_end_for_ctx( + DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); + } + ++ /* update ODM for blanked OTG master pipes */ ++ for (i = 0; i < dc->res_pool->pipe_count; i++) { ++ pipe = &context->res_ctx.pipe_ctx[i]; ++ if (resource_is_pipe_type(pipe, OTG_MASTER) && ++ !resource_is_pipe_type(pipe, DPP_PIPE) && ++ pipe->update_flags.bits.odm && ++ hws->funcs.update_odm) ++ hws->funcs.update_odm(dc, context, pipe); ++ } ++ + /* + * Program all updated pipes, order matters for mpcc setup. Start with + * top pipe and program all pipes that follow in order + */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { +- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; ++ pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && !pipe->top_pipe) { + while (pipe) { +@@ -2022,17 +2034,6 @@ void dcn20_program_front_end_for_ctx( + context->stream_status[0].plane_count > 1) { + pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp); + } +- +- /* when dynamic ODM is active, pipes must be reconfigured when all planes are +- * disabled, as some transitions will leave software and hardware state +- * mismatched. +- */ +- if (dc->debug.enable_single_display_2to1_odm_policy && +- pipe->stream && +- pipe->update_flags.bits.disable && +- !pipe->prev_odm_pipe && +- hws->funcs.update_odm) +- hws->funcs.update_odm(dc, context, pipe); + } + } + +diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +index aa36d7a56ca8c..b890db0bfc46b 100644 +--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +@@ -1156,6 +1156,13 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * + dsc->funcs->dsc_disconnect(dsc); + } + } ++ ++ if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE)) ++ /* ++ * blank pattern is generated by OPP, reprogram blank pattern ++ * due to OPP count change ++ */ ++ dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true); + } + + unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div) +-- +2.43.0 + diff --git a/queue-6.8/drm-amdgpu-amdgpu_ttm_gart_bind-set-gtt-bound-flag.patch b/queue-6.8/drm-amdgpu-amdgpu_ttm_gart_bind-set-gtt-bound-flag.patch new file mode 100644 index 00000000000..93cd0b7ff53 --- /dev/null +++ b/queue-6.8/drm-amdgpu-amdgpu_ttm_gart_bind-set-gtt-bound-flag.patch @@ -0,0 +1,42 @@ +From c0343127c6106790ca17491b6fe7db4d0297aca7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 11 Mar 2024 18:07:34 -0400 +Subject: drm/amdgpu: amdgpu_ttm_gart_bind set gtt bound flag +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Philip Yang + +[ Upstream commit 6c6064cbe58b43533e3451ad6a8ba9736c109ac3 ] + +Otherwise after the GTT bo is released, the GTT and gart space is freed +but amdgpu_ttm_backend_unbind will not clear the gart page table entry +and leave valid mapping entry pointing to the stale system page. Then +if GPU access the gart address mistakely, it will read undefined value +instead page fault, harder to debug and reproduce the real issue. + +Cc: stable@vger.kernel.org +Signed-off-by: Philip Yang +Reviewed-by: Christian König +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +index 75c9fd2c6c2a1..b0ed10f4de609 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +@@ -869,6 +869,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, + amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, + gtt->ttm.dma_address, flags); + } ++ gtt->bound = true; + } + + /* +-- +2.43.0 + diff --git a/queue-6.8/drm-amdgpu-display-address-kdoc-for-is_psr_su-in-fil.patch b/queue-6.8/drm-amdgpu-display-address-kdoc-for-is_psr_su-in-fil.patch new file mode 100644 index 00000000000..28117d549c1 --- /dev/null +++ b/queue-6.8/drm-amdgpu-display-address-kdoc-for-is_psr_su-in-fil.patch @@ -0,0 +1,51 @@ +From f4424aca85ed5fa75ec1ec821b5eaf6af21bddc9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 15 Feb 2024 18:25:40 +0530 +Subject: drm/amdgpu/display: Address kdoc for 'is_psr_su' in + 'fill_dc_dirty_rects' + +From: Srinivasan Shanmugam + +[ Upstream commit 3651306ae4c7f3f54caa9feb826a93cc69ccebbf ] + +The is_psr_su parameter is a boolean flag indicating whether the Panel +Self Refresh Selective Update (PSR SU) feature is enabled which is a +power-saving feature that allows only the updated regions of the screen +to be refreshed, reducing the amount of data that needs to be sent to +the display. + +Fixes the below with gcc W=1: +drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c:5257: warning: Function parameter or member 'is_psr_su' not described in 'fill_dc_dirty_rects' + +Fixes: d16df040c8da ("drm/amdgpu: make damage clips support configurable") +Cc: stable@vger.kernel.org +Cc: Hamza Mahfooz +Cc: Mario Limonciello +Cc: Rodrigo Siqueira +Cc: Aurabindo Pillai +Signed-off-by: Srinivasan Shanmugam +Reviewed-by: Rodrigo Siqueira +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 8083e7156afcf..f3065ffa23785 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -5199,6 +5199,10 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane, + * @new_plane_state: New state of @plane + * @crtc_state: New state of CRTC connected to the @plane + * @flip_addrs: DC flip tracking struct, which also tracts dirty rects ++ * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled. ++ * If PSR SU is enabled and damage clips are available, only the regions of the screen ++ * that have changed will be updated. If PSR SU is not enabled, ++ * or if damage clips are not available, the entire screen will be updated. + * @dirty_regions_changed: dirty regions changed + * + * For PSR SU, DC informs the DMUB uController of dirty rectangle regions +-- +2.43.0 + diff --git a/queue-6.8/drm-amdgpu-pm-check-the-validity-of-overdiver-power-.patch b/queue-6.8/drm-amdgpu-pm-check-the-validity-of-overdiver-power-.patch new file mode 100644 index 00000000000..ca9a1c0419c --- /dev/null +++ b/queue-6.8/drm-amdgpu-pm-check-the-validity-of-overdiver-power-.patch @@ -0,0 +1,186 @@ +From ca917267ba603473e37577610f2f5b884dd08572 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 11 Mar 2024 15:23:34 +0800 +Subject: drm/amdgpu/pm: Check the validity of overdiver power limit + +From: Ma Jun + +[ Upstream commit e17718251addb31e1771fd28735ec410e6ca650a ] + +Check the validity of overdriver power limit before using it. + +Fixes: 7968e9748fbb ("drm/amdgpu/pm: Fix the power1_min_cap value") +Signed-off-by: Ma Jun +Suggested-by: Lazar Lijo +Suggested-by: Alex Deucher +Reviewed-by: Alex Deucher +Signed-off-by: Alex Deucher +Cc: stable@vger.kernel.org +Signed-off-by: Sasha Levin +--- + .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 11 +++++---- + .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 9 ++++---- + .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 23 +++++++++++-------- + .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 10 ++++---- + .../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 10 ++++---- + 5 files changed, 37 insertions(+), 26 deletions(-) + +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +index a406372e79d86..40ba7227cca58 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +@@ -1285,6 +1285,7 @@ static int arcturus_get_power_limit(struct smu_context *smu, + { + struct smu_11_0_powerplay_table *powerplay_table = + (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; ++ struct smu_11_0_overdrive_table *od_settings = smu->od_settings; + PPTable_t *pptable = smu->smu_table.driver_pptable; + uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; + +@@ -1304,12 +1305,14 @@ static int arcturus_get_power_limit(struct smu_context *smu, + *default_power_limit = power_limit; + + if (powerplay_table) { +- if (smu->od_enabled) ++ if (smu->od_enabled && ++ od_settings->cap[SMU_11_0_ODCAP_POWER_LIMIT]) { + od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); +- else ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); ++ } else if (od_settings->cap[SMU_11_0_ODCAP_POWER_LIMIT]) { + od_percent_upper = 0; +- +- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); ++ } + } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +index 65bba5fc2335e..836b1df799286 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +@@ -2358,12 +2358,13 @@ static int navi10_get_power_limit(struct smu_context *smu, + + if (powerplay_table) { + if (smu->od_enabled && +- navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) ++ navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) { + od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); +- else ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); ++ } else if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) { + od_percent_upper = 0; +- +- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); ++ } + } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +index 395718b48131b..1f18b61884f3f 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +@@ -617,6 +617,12 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s + return throttler_status; + } + ++static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table, ++ enum SMU_11_0_7_ODFEATURE_CAP cap) ++{ ++ return od_table->cap[cap]; ++} ++ + static int sienna_cichlid_get_power_limit(struct smu_context *smu, + uint32_t *current_power_limit, + uint32_t *default_power_limit, +@@ -625,6 +631,7 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu, + { + struct smu_11_0_7_powerplay_table *powerplay_table = + (struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table; ++ struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings; + uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; + uint16_t *table_member; + +@@ -641,12 +648,14 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu, + *default_power_limit = power_limit; + + if (powerplay_table) { +- if (smu->od_enabled) ++ if (smu->od_enabled && ++ sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT)) { + od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); +- else ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); ++ } else if ((sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT))) { + od_percent_upper = 0; +- +- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); ++ } + } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", +@@ -1252,12 +1261,6 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu, + return dpm_desc->SnapToDiscrete == 0; + } + +-static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table, +- enum SMU_11_0_7_ODFEATURE_CAP cap) +-{ +- return od_table->cap[cap]; +-} +- + static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table, + enum SMU_11_0_7_ODSETTING_ID setting, + uint32_t *min, uint32_t *max) +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +index 7873f024d4294..9c03296f92cdd 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +@@ -2370,12 +2370,14 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu, + *default_power_limit = power_limit; + + if (powerplay_table) { +- if (smu->od_enabled) ++ if (smu->od_enabled && ++ smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); +- else ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); ++ } else if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = 0; +- +- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); ++ } + } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +index 8abf0a772e6b0..7318964f1f148 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +@@ -2334,12 +2334,14 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu, + *default_power_limit = power_limit; + + if (powerplay_table) { +- if (smu->od_enabled) ++ if (smu->od_enabled && ++ (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT))) { + od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); +- else ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); ++ } else if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = 0; +- +- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); ++ } + } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", +-- +2.43.0 + diff --git a/queue-6.8/drm-amdgpu-pm-fix-null-pointer-dereference-when-get-.patch b/queue-6.8/drm-amdgpu-pm-fix-null-pointer-dereference-when-get-.patch new file mode 100644 index 00000000000..c59e34e5ca7 --- /dev/null +++ b/queue-6.8/drm-amdgpu-pm-fix-null-pointer-dereference-when-get-.patch @@ -0,0 +1,200 @@ +From 5a893a052afe0f00808031f1dc92965efa4339b0 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 11 Mar 2024 14:38:34 +0800 +Subject: drm/amdgpu/pm: Fix NULL pointer dereference when get power limit + +From: Ma Jun + +[ Upstream commit 08ae9ef829b8055c2fdc8cfee37510c1f4721a07 ] + +Because powerplay_table initialization is skipped under +sriov case, We check and set default lower and upper OD +value if powerplay_table is NULL. + +Fixes: 7968e9748fbb ("drm/amdgpu/pm: Fix the power1_min_cap value") +Signed-off-by: Ma Jun +Reported-by: Yin Zhenguo +Suggested-by: Lazar Lijo +Suggested-by: Alex Deucher +Reviewed-by: Alex Deucher +Signed-off-by: Alex Deucher +Cc: stable@vger.kernel.org +Signed-off-by: Sasha Levin +--- + .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 14 ++++++++------ + drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 16 +++++++++------- + .../drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 14 ++++++++------ + .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 14 ++++++++------ + .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 14 ++++++++------ + 5 files changed, 41 insertions(+), 31 deletions(-) + +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +index 1d96eb274d72d..a406372e79d86 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +@@ -1286,7 +1286,7 @@ static int arcturus_get_power_limit(struct smu_context *smu, + struct smu_11_0_powerplay_table *powerplay_table = + (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; + PPTable_t *pptable = smu->smu_table.driver_pptable; +- uint32_t power_limit, od_percent_upper, od_percent_lower; ++ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; + + if (smu_v11_0_get_current_power_limit(smu, &power_limit)) { + /* the last hope to figure out the ppt limit */ +@@ -1303,12 +1303,14 @@ static int arcturus_get_power_limit(struct smu_context *smu, + if (default_power_limit) + *default_power_limit = power_limit; + +- if (smu->od_enabled) +- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); +- else +- od_percent_upper = 0; ++ if (powerplay_table) { ++ if (smu->od_enabled) ++ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); ++ else ++ od_percent_upper = 0; + +- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); ++ } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", + od_percent_upper, od_percent_lower, power_limit); +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +index ed189a3878ebe..65bba5fc2335e 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +@@ -2339,7 +2339,7 @@ static int navi10_get_power_limit(struct smu_context *smu, + (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; + struct smu_11_0_overdrive_table *od_settings = smu->od_settings; + PPTable_t *pptable = smu->smu_table.driver_pptable; +- uint32_t power_limit, od_percent_upper, od_percent_lower; ++ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; + + if (smu_v11_0_get_current_power_limit(smu, &power_limit)) { + /* the last hope to figure out the ppt limit */ +@@ -2356,13 +2356,15 @@ static int navi10_get_power_limit(struct smu_context *smu, + if (default_power_limit) + *default_power_limit = power_limit; + +- if (smu->od_enabled && +- navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) +- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); +- else +- od_percent_upper = 0; ++ if (powerplay_table) { ++ if (smu->od_enabled && ++ navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) ++ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); ++ else ++ od_percent_upper = 0; + +- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); ++ } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", + od_percent_upper, od_percent_lower, power_limit); +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +index e2ad2b972ab0b..395718b48131b 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +@@ -625,7 +625,7 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu, + { + struct smu_11_0_7_powerplay_table *powerplay_table = + (struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table; +- uint32_t power_limit, od_percent_upper, od_percent_lower; ++ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; + uint16_t *table_member; + + GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member); +@@ -640,12 +640,14 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu, + if (default_power_limit) + *default_power_limit = power_limit; + +- if (smu->od_enabled) +- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); +- else +- od_percent_upper = 0; ++ if (powerplay_table) { ++ if (smu->od_enabled) ++ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); ++ else ++ od_percent_upper = 0; + +- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); ++ } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", + od_percent_upper, od_percent_lower, power_limit); +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +index 9b80f18ea6c35..7873f024d4294 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +@@ -2356,7 +2356,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu, + (struct smu_13_0_0_powerplay_table *)table_context->power_play_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; +- uint32_t power_limit, od_percent_upper, od_percent_lower; ++ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; + uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; + + if (smu_v13_0_get_current_power_limit(smu, &power_limit)) +@@ -2369,12 +2369,14 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu, + if (default_power_limit) + *default_power_limit = power_limit; + +- if (smu->od_enabled) +- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); +- else +- od_percent_upper = 0; ++ if (powerplay_table) { ++ if (smu->od_enabled) ++ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); ++ else ++ od_percent_upper = 0; + +- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); ++ } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", + od_percent_upper, od_percent_lower, power_limit); +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +index 3dc7b60cb0754..8abf0a772e6b0 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +@@ -2320,7 +2320,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu, + (struct smu_13_0_7_powerplay_table *)table_context->power_play_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; +- uint32_t power_limit, od_percent_upper, od_percent_lower; ++ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; + uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; + + if (smu_v13_0_get_current_power_limit(smu, &power_limit)) +@@ -2333,12 +2333,14 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu, + if (default_power_limit) + *default_power_limit = power_limit; + +- if (smu->od_enabled) +- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); +- else +- od_percent_upper = 0; ++ if (powerplay_table) { ++ if (smu->od_enabled) ++ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); ++ else ++ od_percent_upper = 0; + +- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); ++ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); ++ } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", + od_percent_upper, od_percent_lower, power_limit); +-- +2.43.0 + diff --git a/queue-6.8/drm-bridge-add-edid_read-hook-and-drm_bridge_edid_re.patch b/queue-6.8/drm-bridge-add-edid_read-hook-and-drm_bridge_edid_re.patch new file mode 100644 index 00000000000..d0875f0d711 --- /dev/null +++ b/queue-6.8/drm-bridge-add-edid_read-hook-and-drm_bridge_edid_re.patch @@ -0,0 +1,150 @@ +From 7f9446bb1fa3683771d84dff48c4cf105228afeb Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 23 Jan 2024 21:37:07 +0200 +Subject: drm/bridge: add ->edid_read hook and drm_bridge_edid_read() + +From: Jani Nikula + +[ Upstream commit d807ad80d811ba0c22adfd871e2a46491f80d6e2 ] + +Add new struct drm_edid based ->edid_read hook and +drm_bridge_edid_read() function to call the hook. + +v2: Include drm/drm_edid.h + +Signed-off-by: Jani Nikula +Reviewed-by: Neil Armstrong +Link: https://patchwork.freedesktop.org/patch/msgid/9d08d22eaffcb9c59a2b677e45d7e61fc689bc2f.1706038510.git.jani.nikula@intel.com +Stable-dep-of: 171b711b26cc ("drm/bridge: lt8912b: do not return negative values from .get_modes()") +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/drm_bridge.c | 46 +++++++++++++++++++++++++++++++++++- + include/drm/drm_bridge.h | 33 ++++++++++++++++++++++++++ + 2 files changed, 78 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c +index cee3188adf3d5..4f6f8c662d3fe 100644 +--- a/drivers/gpu/drm/drm_bridge.c ++++ b/drivers/gpu/drm/drm_bridge.c +@@ -27,8 +27,9 @@ + #include + + #include +-#include + #include ++#include ++#include + #include + #include + #include +@@ -1206,6 +1207,47 @@ int drm_bridge_get_modes(struct drm_bridge *bridge, + } + EXPORT_SYMBOL_GPL(drm_bridge_get_modes); + ++/** ++ * drm_bridge_edid_read - read the EDID data of the connected display ++ * @bridge: bridge control structure ++ * @connector: the connector to read EDID for ++ * ++ * If the bridge supports output EDID retrieval, as reported by the ++ * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get ++ * the EDID and return it. Otherwise return NULL. ++ * ++ * If &drm_bridge_funcs.edid_read is not set, fall back to using ++ * drm_bridge_get_edid() and wrapping it in struct drm_edid. ++ * ++ * RETURNS: ++ * The retrieved EDID on success, or NULL otherwise. ++ */ ++const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge, ++ struct drm_connector *connector) ++{ ++ if (!(bridge->ops & DRM_BRIDGE_OP_EDID)) ++ return NULL; ++ ++ /* Transitional: Fall back to ->get_edid. */ ++ if (!bridge->funcs->edid_read) { ++ const struct drm_edid *drm_edid; ++ struct edid *edid; ++ ++ edid = drm_bridge_get_edid(bridge, connector); ++ if (!edid) ++ return NULL; ++ ++ drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH); ++ ++ kfree(edid); ++ ++ return drm_edid; ++ } ++ ++ return bridge->funcs->edid_read(bridge, connector); ++} ++EXPORT_SYMBOL_GPL(drm_bridge_edid_read); ++ + /** + * drm_bridge_get_edid - get the EDID data of the connected display + * @bridge: bridge control structure +@@ -1215,6 +1257,8 @@ EXPORT_SYMBOL_GPL(drm_bridge_get_modes); + * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to + * get the EDID and return it. Otherwise return NULL. + * ++ * Deprecated. Prefer using drm_bridge_edid_read(). ++ * + * RETURNS: + * The retrieved EDID on success, or NULL otherwise. + */ +diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h +index e39da5807ba71..b7aed3ead705b 100644 +--- a/include/drm/drm_bridge.h ++++ b/include/drm/drm_bridge.h +@@ -557,6 +557,37 @@ struct drm_bridge_funcs { + int (*get_modes)(struct drm_bridge *bridge, + struct drm_connector *connector); + ++ /** ++ * @edid_read: ++ * ++ * Read the EDID data of the connected display. ++ * ++ * The @edid_read callback is the preferred way of reporting mode ++ * information for a display connected to the bridge output. Bridges ++ * that support reading EDID shall implement this callback and leave ++ * the @get_modes callback unimplemented. ++ * ++ * The caller of this operation shall first verify the output ++ * connection status and refrain from reading EDID from a disconnected ++ * output. ++ * ++ * This callback is optional. Bridges that implement it shall set the ++ * DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops. ++ * ++ * The connector parameter shall be used for the sole purpose of EDID ++ * retrieval, and shall not be stored internally by bridge drivers for ++ * future usage. ++ * ++ * RETURNS: ++ * ++ * An edid structure newly allocated with drm_edid_alloc() or returned ++ * from drm_edid_read() family of functions on success, or NULL ++ * otherwise. The caller is responsible for freeing the returned edid ++ * structure with drm_edid_free(). ++ */ ++ const struct drm_edid *(*edid_read)(struct drm_bridge *bridge, ++ struct drm_connector *connector); ++ + /** + * @get_edid: + * +@@ -888,6 +919,8 @@ drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, + enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge); + int drm_bridge_get_modes(struct drm_bridge *bridge, + struct drm_connector *connector); ++const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge, ++ struct drm_connector *connector); + struct edid *drm_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector); + void drm_bridge_hpd_enable(struct drm_bridge *bridge, +-- +2.43.0 + diff --git a/queue-6.8/drm-bridge-lt8912b-clear-the-edid-property-on-failur.patch b/queue-6.8/drm-bridge-lt8912b-clear-the-edid-property-on-failur.patch new file mode 100644 index 00000000000..28961cebfb9 --- /dev/null +++ b/queue-6.8/drm-bridge-lt8912b-clear-the-edid-property-on-failur.patch @@ -0,0 +1,44 @@ +From 43b1e1ccc23c611b8ff0f88e85e370e933d1bf58 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 23 Jan 2024 21:37:11 +0200 +Subject: drm/bridge: lt8912b: clear the EDID property on failures + +From: Jani Nikula + +[ Upstream commit 29e032296da5d6294378ffa8bad8e976c5aadbf5 ] + +If EDID read fails, clear the EDID property. + +Cc: Adrien Grassein +Cc: Andrzej Hajda +Cc: Neil Armstrong +Cc: Robert Foss +Cc: Laurent Pinchart +Cc: Jonas Karlman +Cc: Jernej Skrabec +Signed-off-by: Jani Nikula +Reviewed-by: Neil Armstrong +Link: https://patchwork.freedesktop.org/patch/msgid/2080adaadf4bba3d85f58c42c065caf9aad9a4ef.1706038510.git.jani.nikula@intel.com +Stable-dep-of: 171b711b26cc ("drm/bridge: lt8912b: do not return negative values from .get_modes()") +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/bridge/lontium-lt8912b.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c +index 4dc748d5d1ee0..9c0ffc1c6fac4 100644 +--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c ++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c +@@ -447,8 +447,8 @@ static int lt8912_connector_get_modes(struct drm_connector *connector) + u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + + drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector); ++ drm_edid_connector_update(connector, drm_edid); + if (drm_edid) { +- drm_edid_connector_update(connector, drm_edid); + num = drm_edid_connector_add_modes(connector); + } else { + return ret; +-- +2.43.0 + diff --git a/queue-6.8/drm-bridge-lt8912b-do-not-return-negative-values-fro.patch b/queue-6.8/drm-bridge-lt8912b-do-not-return-negative-values-fro.patch new file mode 100644 index 00000000000..24e6649ae6d --- /dev/null +++ b/queue-6.8/drm-bridge-lt8912b-do-not-return-negative-values-fro.patch @@ -0,0 +1,60 @@ +From 3e2f94529f627a3883ccbfb40d84cecdb2f2af35 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 18:03:42 +0200 +Subject: drm/bridge: lt8912b: do not return negative values from .get_modes() + +From: Jani Nikula + +[ Upstream commit 171b711b26cce208bb628526b1b368aeec7b6fa4 ] + +The .get_modes() hooks aren't supposed to return negative error +codes. Return 0 for no modes, whatever the reason. + +Cc: Adrien Grassein +Cc: stable@vger.kernel.org +Acked-by: Thomas Zimmermann +Link: https://patchwork.freedesktop.org/patch/msgid/dcdddcbcb64b6f6cdc55022ee50c10dee8ddbc3d.1709913674.git.jani.nikula@intel.com +Signed-off-by: Jani Nikula +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/bridge/lontium-lt8912b.c | 16 +++++++--------- + 1 file changed, 7 insertions(+), 9 deletions(-) + +diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c +index 9c0ffc1c6fac4..97d4af3d13653 100644 +--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c ++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c +@@ -441,23 +441,21 @@ lt8912_connector_mode_valid(struct drm_connector *connector, + static int lt8912_connector_get_modes(struct drm_connector *connector) + { + const struct drm_edid *drm_edid; +- int ret = -1; +- int num = 0; + struct lt8912 *lt = connector_to_lt8912(connector); + u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; ++ int ret, num; + + drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector); + drm_edid_connector_update(connector, drm_edid); +- if (drm_edid) { +- num = drm_edid_connector_add_modes(connector); +- } else { +- return ret; +- } ++ if (!drm_edid) ++ return 0; ++ ++ num = drm_edid_connector_add_modes(connector); + + ret = drm_display_info_set_bus_formats(&connector->display_info, + &bus_format, 1); +- if (ret) +- num = ret; ++ if (ret < 0) ++ num = 0; + + drm_edid_free(drm_edid); + return num; +-- +2.43.0 + diff --git a/queue-6.8/drm-bridge-lt8912b-use-drm_bridge_edid_read.patch b/queue-6.8/drm-bridge-lt8912b-use-drm_bridge_edid_read.patch new file mode 100644 index 00000000000..91dbaf7c511 --- /dev/null +++ b/queue-6.8/drm-bridge-lt8912b-use-drm_bridge_edid_read.patch @@ -0,0 +1,65 @@ +From ce8da8a13c7f86b51b0c6ef228edac067a64297f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 23 Jan 2024 21:37:10 +0200 +Subject: drm/bridge: lt8912b: use drm_bridge_edid_read() + +From: Jani Nikula + +[ Upstream commit 60d1fe1a7f302cc1151b155ac2d134db59bb1420 ] + +Prefer using the struct drm_edid based functions. + +cc: Adrien Grassein +Cc: Andrzej Hajda +Cc: Neil Armstrong +Cc: Robert Foss +Cc: Laurent Pinchart +Cc: Jonas Karlman +Cc: Jernej Skrabec +Signed-off-by: Jani Nikula +Reviewed-by: Neil Armstrong +Link: https://patchwork.freedesktop.org/patch/msgid/32c9b52fe6fa7cbad6bfd0ff00041876977e02ea.1706038510.git.jani.nikula@intel.com +Stable-dep-of: 171b711b26cc ("drm/bridge: lt8912b: do not return negative values from .get_modes()") +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/bridge/lontium-lt8912b.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c +index 273157428c827..4dc748d5d1ee0 100644 +--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c ++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c +@@ -440,16 +440,16 @@ lt8912_connector_mode_valid(struct drm_connector *connector, + + static int lt8912_connector_get_modes(struct drm_connector *connector) + { +- struct edid *edid; ++ const struct drm_edid *drm_edid; + int ret = -1; + int num = 0; + struct lt8912 *lt = connector_to_lt8912(connector); + u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + +- edid = drm_bridge_get_edid(lt->hdmi_port, connector); +- if (edid) { +- drm_connector_update_edid_property(connector, edid); +- num = drm_add_edid_modes(connector, edid); ++ drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector); ++ if (drm_edid) { ++ drm_edid_connector_update(connector, drm_edid); ++ num = drm_edid_connector_add_modes(connector); + } else { + return ret; + } +@@ -459,7 +459,7 @@ static int lt8912_connector_get_modes(struct drm_connector *connector) + if (ret) + num = ret; + +- kfree(edid); ++ drm_edid_free(drm_edid); + return num; + } + +-- +2.43.0 + diff --git a/queue-6.8/drm-etnaviv-restore-some-id-values.patch b/queue-6.8/drm-etnaviv-restore-some-id-values.patch new file mode 100644 index 00000000000..9e3731055fa --- /dev/null +++ b/queue-6.8/drm-etnaviv-restore-some-id-values.patch @@ -0,0 +1,75 @@ +From 89505e8ab69be995be128d107cf78e05e9026bc3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Mar 2024 14:28:11 +0100 +Subject: drm/etnaviv: Restore some id values + +From: Christian Gmeiner + +[ Upstream commit b735ee173f84d5d0d0733c53946a83c12d770d05 ] + +The hwdb selection logic as a feature that allows it to mark some fields +as 'don't care'. If we match with such a field we memcpy(..) +the current etnaviv_chip_identity into ident. + +This step can overwrite some id values read from the GPU with the +'don't care' value. + +Fix this issue by restoring the affected values after the memcpy(..). + +As this is crucial for user space to know when this feature works as +expected increment the minor version too. + +Fixes: 4078a1186dd3 ("drm/etnaviv: update hwdb selection logic") +Cc: stable@vger.kernel.org +Signed-off-by: Christian Gmeiner +Reviewed-by: Tomeu Vizoso +Signed-off-by: Lucas Stach +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/etnaviv/etnaviv_drv.c | 2 +- + drivers/gpu/drm/etnaviv/etnaviv_hwdb.c | 9 +++++++++ + 2 files changed, 10 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c +index 6228ce6032482..9a2965741dab3 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c +@@ -494,7 +494,7 @@ static const struct drm_driver etnaviv_drm_driver = { + .desc = "etnaviv DRM", + .date = "20151214", + .major = 1, +- .minor = 3, ++ .minor = 4, + }; + + /* +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c +index 67201242438be..8665f2658d51b 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c +@@ -265,6 +265,9 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { + bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu) + { + struct etnaviv_chip_identity *ident = &gpu->identity; ++ const u32 product_id = ident->product_id; ++ const u32 customer_id = ident->customer_id; ++ const u32 eco_id = ident->eco_id; + int i; + + for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) { +@@ -278,6 +281,12 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu) + etnaviv_chip_identities[i].eco_id == ~0U)) { + memcpy(ident, &etnaviv_chip_identities[i], + sizeof(*ident)); ++ ++ /* Restore some id values as ~0U aka 'don't care' might been used. */ ++ ident->product_id = product_id; ++ ident->customer_id = customer_id; ++ ident->eco_id = eco_id; ++ + return true; + } + } +-- +2.43.0 + diff --git a/queue-6.8/drm-exynos-do-not-return-negative-values-from-.get_m.patch b/queue-6.8/drm-exynos-do-not-return-negative-values-from-.get_m.patch new file mode 100644 index 00000000000..26c314902bc --- /dev/null +++ b/queue-6.8/drm-exynos-do-not-return-negative-values-from-.get_m.patch @@ -0,0 +1,67 @@ +From 5d1ffbaa856b47e6cea8d88b71386ab417cab21b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 18:03:41 +0200 +Subject: drm/exynos: do not return negative values from .get_modes() + +From: Jani Nikula + +[ Upstream commit 13d5b040363c7ec0ac29c2de9cf661a24a8aa531 ] + +The .get_modes() hooks aren't supposed to return negative error +codes. Return 0 for no modes, whatever the reason. + +Cc: Inki Dae +Cc: Seung-Woo Kim +Cc: Kyungmin Park +Cc: stable@vger.kernel.org +Acked-by: Thomas Zimmermann +Link: https://patchwork.freedesktop.org/patch/msgid/d8665f620d9c252aa7d5a4811ff6b16e773903a2.1709913674.git.jani.nikula@intel.com +Signed-off-by: Jani Nikula +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/exynos/exynos_drm_vidi.c | 4 ++-- + drivers/gpu/drm/exynos/exynos_hdmi.c | 4 ++-- + 2 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c +index 00382f28748ac..f5bbba9ad2252 100644 +--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c ++++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c +@@ -316,14 +316,14 @@ static int vidi_get_modes(struct drm_connector *connector) + */ + if (!ctx->raw_edid) { + DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n"); +- return -EFAULT; ++ return 0; + } + + edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; + edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL); + if (!edid) { + DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n"); +- return -ENOMEM; ++ return 0; + } + + drm_connector_update_edid_property(connector, edid); +diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c +index 43bed6cbaaea0..b1d02dec3774d 100644 +--- a/drivers/gpu/drm/exynos/exynos_hdmi.c ++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c +@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector) + int ret; + + if (!hdata->ddc_adpt) +- return -ENODEV; ++ return 0; + + edid = drm_get_edid(connector, hdata->ddc_adpt); + if (!edid) +- return -ENODEV; ++ return 0; + + hdata->dvi_mode = !connector->display_info.is_hdmi; + DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n", +-- +2.43.0 + diff --git a/queue-6.8/drm-i915-add-missing-to-__assign_str-macros-in-trace.patch b/queue-6.8/drm-i915-add-missing-to-__assign_str-macros-in-trace.patch new file mode 100644 index 00000000000..4fc851b749a --- /dev/null +++ b/queue-6.8/drm-i915-add-missing-to-__assign_str-macros-in-trace.patch @@ -0,0 +1,65 @@ +From 9447c7aa2d4810e52b944cb90ecec919e92bbda3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 22 Feb 2024 13:30:57 -0500 +Subject: drm/i915: Add missing ; to __assign_str() macros in tracepoint code +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Steven Rostedt (Google) + +[ Upstream commit 0df4c388a1e310400a6e90fb10b286e2673756f0 ] + +I'm working on improving the __assign_str() and __string() macros to be +more efficient, and removed some unneeded semicolons. This triggered a bug +in the build as some of the __assign_str() macros in intel_display_trace +was missing a terminating semicolon. + +Link: https://lore.kernel.org/linux-trace-kernel/20240222133057.2af72a19@gandalf.local.home + +Cc: Daniel Vetter +Cc: David Airlie +Cc: stable@vger.kernel.org +Fixes: 2ceea5d88048b ("drm/i915: Print plane name in fbc tracepoints") +Reviewed-by: Ville Syrjälä +Acked-by: Rodrigo Vivi +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/i915/display/intel_display_trace.h | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h +index 99bdb833591ce..7862e7cefe027 100644 +--- a/drivers/gpu/drm/i915/display/intel_display_trace.h ++++ b/drivers/gpu/drm/i915/display/intel_display_trace.h +@@ -411,7 +411,7 @@ TRACE_EVENT(intel_fbc_activate, + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + plane->pipe); + __assign_str(dev, __dev_name_kms(plane)); +- __assign_str(name, plane->base.name) ++ __assign_str(name, plane->base.name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); +@@ -438,7 +438,7 @@ TRACE_EVENT(intel_fbc_deactivate, + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + plane->pipe); + __assign_str(dev, __dev_name_kms(plane)); +- __assign_str(name, plane->base.name) ++ __assign_str(name, plane->base.name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); +@@ -465,7 +465,7 @@ TRACE_EVENT(intel_fbc_nuke, + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + plane->pipe); + __assign_str(dev, __dev_name_kms(plane)); +- __assign_str(name, plane->base.name) ++ __assign_str(name, plane->base.name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); +-- +2.43.0 + diff --git a/queue-6.8/drm-imx-ipuv3-do-not-return-negative-values-from-.ge.patch b/queue-6.8/drm-imx-ipuv3-do-not-return-negative-values-from-.ge.patch new file mode 100644 index 00000000000..dcdb16156db --- /dev/null +++ b/queue-6.8/drm-imx-ipuv3-do-not-return-negative-values-from-.ge.patch @@ -0,0 +1,47 @@ +From 4c342ce0541d55cfbe7a51f05c758e356d9f15f4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 18:03:43 +0200 +Subject: drm/imx/ipuv3: do not return negative values from .get_modes() + +From: Jani Nikula + +[ Upstream commit c2da9ada64962fcd2e6395ed9987b9874ea032d3 ] + +The .get_modes() hooks aren't supposed to return negative error +codes. Return 0 for no modes, whatever the reason. + +Cc: Philipp Zabel +Cc: stable@vger.kernel.org +Acked-by: Philipp Zabel +Acked-by: Thomas Zimmermann +Link: https://patchwork.freedesktop.org/patch/msgid/311f6eec96d47949b16a670529f4d89fcd97aefa.1709913674.git.jani.nikula@intel.com +Signed-off-by: Jani Nikula +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/imx/ipuv3/parallel-display.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c +index 70349739dd89b..55dedd73f528c 100644 +--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c ++++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c +@@ -72,14 +72,14 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) + int ret; + + if (!mode) +- return -EINVAL; ++ return 0; + + ret = of_get_drm_display_mode(np, &imxpd->mode, + &imxpd->bus_flags, + OF_USE_NATIVE_MODE); + if (ret) { + drm_mode_destroy(connector->dev, mode); +- return ret; ++ return 0; + } + + drm_mode_copy(mode, &imxpd->mode); +-- +2.43.0 + diff --git a/queue-6.8/drm-panel-do-not-return-negative-error-codes-from-dr.patch b/queue-6.8/drm-panel-do-not-return-negative-error-codes-from-dr.patch new file mode 100644 index 00000000000..6a664870526 --- /dev/null +++ b/queue-6.8/drm-panel-do-not-return-negative-error-codes-from-dr.patch @@ -0,0 +1,74 @@ +From 8c3c9de9be10dadf371ebd40fdafccf6f4df6e85 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 18:03:40 +0200 +Subject: drm/panel: do not return negative error codes from + drm_panel_get_modes() + +From: Jani Nikula + +[ Upstream commit fc4e97726530241d96dd7db72eb65979217422c9 ] + +None of the callers of drm_panel_get_modes() expect it to return +negative error codes. Either they propagate the return value in their +struct drm_connector_helper_funcs .get_modes() hook (which is also not +supposed to return negative codes), or add it to other counts leading to +bogus values. + +On the other hand, many of the struct drm_panel_funcs .get_modes() hooks +do return negative error codes, so handle them gracefully instead of +propagating further. + +Return 0 for no modes, whatever the reason. + +Cc: Neil Armstrong +Cc: Jessica Zhang +Cc: Sam Ravnborg +Cc: stable@vger.kernel.org +Reviewed-by: Neil Armstrong +Reviewed-by: Jessica Zhang +Acked-by: Thomas Zimmermann +Link: https://patchwork.freedesktop.org/patch/msgid/79f559b72d8c493940417304e222a4b04dfa19c4.1709913674.git.jani.nikula@intel.com +Signed-off-by: Jani Nikula +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/drm_panel.c | 17 +++++++++++------ + 1 file changed, 11 insertions(+), 6 deletions(-) + +diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c +index e814020bbcd3b..cfbe020de54e0 100644 +--- a/drivers/gpu/drm/drm_panel.c ++++ b/drivers/gpu/drm/drm_panel.c +@@ -274,19 +274,24 @@ EXPORT_SYMBOL(drm_panel_disable); + * The modes probed from the panel are automatically added to the connector + * that the panel is attached to. + * +- * Return: The number of modes available from the panel on success or a +- * negative error code on failure. ++ * Return: The number of modes available from the panel on success, or 0 on ++ * failure (no modes). + */ + int drm_panel_get_modes(struct drm_panel *panel, + struct drm_connector *connector) + { + if (!panel) +- return -EINVAL; ++ return 0; + +- if (panel->funcs && panel->funcs->get_modes) +- return panel->funcs->get_modes(panel, connector); ++ if (panel->funcs && panel->funcs->get_modes) { ++ int num; + +- return -EOPNOTSUPP; ++ num = panel->funcs->get_modes(panel, connector); ++ if (num > 0) ++ return num; ++ } ++ ++ return 0; + } + EXPORT_SYMBOL(drm_panel_get_modes); + +-- +2.43.0 + diff --git a/queue-6.8/drm-probe-helper-warn-about-negative-.get_modes.patch b/queue-6.8/drm-probe-helper-warn-about-negative-.get_modes.patch new file mode 100644 index 00000000000..9acdeea5419 --- /dev/null +++ b/queue-6.8/drm-probe-helper-warn-about-negative-.get_modes.patch @@ -0,0 +1,61 @@ +From 6f9f5ffed661a3299b7883f12f6c0a92b6dccfa5 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 18:03:39 +0200 +Subject: drm/probe-helper: warn about negative .get_modes() + +From: Jani Nikula + +[ Upstream commit 7af03e688792293ba33149fb8df619a8dff90e80 ] + +The .get_modes() callback is supposed to return the number of modes, +never a negative error code. If a negative value is returned, it'll just +be interpreted as a negative count, and added to previous calculations. + +Document the rules, but handle the negative values gracefully with an +error message. + +Cc: stable@vger.kernel.org +Acked-by: Thomas Zimmermann +Link: https://patchwork.freedesktop.org/patch/msgid/50208c866facc33226a3c77b82bb96aeef8ef310.1709913674.git.jani.nikula@intel.com +Signed-off-by: Jani Nikula +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/drm_probe_helper.c | 7 +++++++ + include/drm/drm_modeset_helper_vtables.h | 3 ++- + 2 files changed, 9 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c +index 23b4e9a3361d8..744cde9db246b 100644 +--- a/drivers/gpu/drm/drm_probe_helper.c ++++ b/drivers/gpu/drm/drm_probe_helper.c +@@ -419,6 +419,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector) + + count = connector_funcs->get_modes(connector); + ++ /* The .get_modes() callback should not return negative values. */ ++ if (count < 0) { ++ drm_err(connector->dev, ".get_modes() returned %pe\n", ++ ERR_PTR(count)); ++ count = 0; ++ } ++ + /* + * Fallback for when DDC probe failed in drm_get_edid() and thus skipped + * override/firmware EDID. +diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h +index 881b03e4dc288..9ed42469540eb 100644 +--- a/include/drm/drm_modeset_helper_vtables.h ++++ b/include/drm/drm_modeset_helper_vtables.h +@@ -898,7 +898,8 @@ struct drm_connector_helper_funcs { + * + * RETURNS: + * +- * The number of modes added by calling drm_mode_probed_add(). ++ * The number of modes added by calling drm_mode_probed_add(). Return 0 ++ * on failures (no modes) instead of negative error codes. + */ + int (*get_modes)(struct drm_connector *connector); + +-- +2.43.0 + diff --git a/queue-6.8/drm-ttm-make-sure-the-mapped-tt-pages-are-decrypted-.patch b/queue-6.8/drm-ttm-make-sure-the-mapped-tt-pages-are-decrypted-.patch new file mode 100644 index 00000000000..14903b99e1e --- /dev/null +++ b/queue-6.8/drm-ttm-make-sure-the-mapped-tt-pages-are-decrypted-.patch @@ -0,0 +1,163 @@ +From 61976d376727ad54e38807cb2960cb52d7fe739d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 5 Jan 2024 08:51:05 -0500 +Subject: drm/ttm: Make sure the mapped tt pages are decrypted when needed +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Zack Rusin + +[ Upstream commit 71ce046327cfd3aef3f93d1c44e091395eb03f8f ] + +Some drivers require the mapped tt pages to be decrypted. In an ideal +world this would have been handled by the dma layer, but the TTM page +fault handling would have to be rewritten to able to do that. + +A side-effect of the TTM page fault handling is using a dma allocation +per order (via ttm_pool_alloc_page) which makes it impossible to just +trivially use dma_mmap_attrs. As a result ttm has to be very careful +about trying to make its pgprot for the mapped tt pages match what +the dma layer thinks it is. At the ttm layer it's possible to +deduce the requirement to have tt pages decrypted by checking +whether coherent dma allocations have been requested and the system +is running with confidential computing technologies. + +This approach isn't ideal but keeping TTM matching DMAs expectations +for the page properties is in general fragile, unfortunately proper +fix would require a rewrite of TTM's page fault handling. + +Fixes vmwgfx with SEV enabled. + +v2: Explicitly include cc_platform.h +v3: Use CC_ATTR_GUEST_MEM_ENCRYPT instead of CC_ATTR_MEM_ENCRYPT to +limit the scope to guests and log when memory decryption is enabled. + +Signed-off-by: Zack Rusin +Fixes: 3bf3710e3718 ("drm/ttm: Add a generic TTM memcpy move for page-based iomem") +Reviewed-by: Thomas Hellström +Acked-by: Christian König +Cc: Huang Rui +Cc: dri-devel@lists.freedesktop.org +Cc: linux-kernel@vger.kernel.org +Cc: # v5.14+ +Link: https://patchwork.freedesktop.org/patch/msgid/20230926040359.3040017-1-zack@kde.org +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/ttm/ttm_bo_util.c | 13 +++++++++++-- + drivers/gpu/drm/ttm/ttm_tt.c | 13 +++++++++++++ + include/drm/ttm/ttm_tt.h | 9 ++++++++- + 3 files changed, 32 insertions(+), 3 deletions(-) + +diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c +index fd9fd3d15101c..0b3f4267130c4 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo_util.c ++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c +@@ -294,7 +294,13 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, + enum ttm_caching caching; + + man = ttm_manager_type(bo->bdev, res->mem_type); +- caching = man->use_tt ? bo->ttm->caching : res->bus.caching; ++ if (man->use_tt) { ++ caching = bo->ttm->caching; ++ if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED) ++ tmp = pgprot_decrypted(tmp); ++ } else { ++ caching = res->bus.caching; ++ } + + return ttm_prot_from_caching(caching, tmp); + } +@@ -337,6 +343,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, + .no_wait_gpu = false + }; + struct ttm_tt *ttm = bo->ttm; ++ struct ttm_resource_manager *man = ++ ttm_manager_type(bo->bdev, bo->resource->mem_type); + pgprot_t prot; + int ret; + +@@ -346,7 +354,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, + if (ret) + return ret; + +- if (num_pages == 1 && ttm->caching == ttm_cached) { ++ if (num_pages == 1 && ttm->caching == ttm_cached && ++ !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) { + /* + * We're mapping a single page, and the desired + * page protection is consistent with the bo. +diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c +index e0a77671edd6c..43eaffa7faae3 100644 +--- a/drivers/gpu/drm/ttm/ttm_tt.c ++++ b/drivers/gpu/drm/ttm/ttm_tt.c +@@ -31,11 +31,14 @@ + + #define pr_fmt(fmt) "[TTM] " fmt + ++#include + #include + #include + #include + #include + #include ++#include ++#include + #include + #include + +@@ -60,6 +63,7 @@ static atomic_long_t ttm_dma32_pages_allocated; + int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) + { + struct ttm_device *bdev = bo->bdev; ++ struct drm_device *ddev = bo->base.dev; + uint32_t page_flags = 0; + + dma_resv_assert_held(bo->base.resv); +@@ -81,6 +85,15 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) + pr_err("Illegal buffer object type\n"); + return -EINVAL; + } ++ /* ++ * When using dma_alloc_coherent with memory encryption the ++ * mapped TT pages need to be decrypted or otherwise the drivers ++ * will end up sending encrypted mem to the gpu. ++ */ ++ if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { ++ page_flags |= TTM_TT_FLAG_DECRYPTED; ++ drm_info(ddev, "TT memory decryption enabled."); ++ } + + bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags); + if (unlikely(bo->ttm == NULL)) +diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h +index a4eff85b1f449..2b9d856ff388d 100644 +--- a/include/drm/ttm/ttm_tt.h ++++ b/include/drm/ttm/ttm_tt.h +@@ -79,6 +79,12 @@ struct ttm_tt { + * page_flags = TTM_TT_FLAG_EXTERNAL | + * TTM_TT_FLAG_EXTERNAL_MAPPABLE; + * ++ * TTM_TT_FLAG_DECRYPTED: The mapped ttm pages should be marked as ++ * not encrypted. The framework will try to match what the dma layer ++ * is doing, but note that it is a little fragile because ttm page ++ * fault handling abuses the DMA api a bit and dma_map_attrs can't be ++ * used to assure pgprot always matches. ++ * + * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is + * set by TTM after ttm_tt_populate() has successfully returned, and is + * then unset when TTM calls ttm_tt_unpopulate(). +@@ -87,8 +93,9 @@ struct ttm_tt { + #define TTM_TT_FLAG_ZERO_ALLOC BIT(1) + #define TTM_TT_FLAG_EXTERNAL BIT(2) + #define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3) ++#define TTM_TT_FLAG_DECRYPTED BIT(4) + +-#define TTM_TT_FLAG_PRIV_POPULATED BIT(4) ++#define TTM_TT_FLAG_PRIV_POPULATED BIT(5) + uint32_t page_flags; + /** @num_pages: Number of pages in the page array. */ + uint32_t num_pages; +-- +2.43.0 + diff --git a/queue-6.8/drm-vc4-hdmi-do-not-return-negative-values-from-.get.patch b/queue-6.8/drm-vc4-hdmi-do-not-return-negative-values-from-.get.patch new file mode 100644 index 00000000000..6d388cdc003 --- /dev/null +++ b/queue-6.8/drm-vc4-hdmi-do-not-return-negative-values-from-.get.patch @@ -0,0 +1,39 @@ +From 519b7cf8051acc2c82c433b5a6f463be7a2adfc4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 18:03:44 +0200 +Subject: drm/vc4: hdmi: do not return negative values from .get_modes() + +From: Jani Nikula + +[ Upstream commit abf493988e380f25242c1023275c68bd3579c9ce ] + +The .get_modes() hooks aren't supposed to return negative error +codes. Return 0 for no modes, whatever the reason. + +Cc: Maxime Ripard +Cc: stable@vger.kernel.org +Acked-by: Maxime Ripard +Acked-by: Thomas Zimmermann +Link: https://patchwork.freedesktop.org/patch/msgid/dcda6d4003e2c6192987916b35c7304732800e08.1709913674.git.jani.nikula@intel.com +Signed-off-by: Jani Nikula +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/vc4/vc4_hdmi.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c +index f05e2c95a60d7..f827f26543641 100644 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c +@@ -508,7 +508,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) + edid = drm_get_edid(connector, vc4_hdmi->ddc); + cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid); + if (!edid) +- return -ENODEV; ++ return 0; + + drm_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); +-- +2.43.0 + diff --git a/queue-6.8/drm-vmwgfx-fix-possible-null-pointer-derefence-with-.patch b/queue-6.8/drm-vmwgfx-fix-possible-null-pointer-derefence-with-.patch new file mode 100644 index 00000000000..c82b84c2777 --- /dev/null +++ b/queue-6.8/drm-vmwgfx-fix-possible-null-pointer-derefence-with-.patch @@ -0,0 +1,103 @@ +From 321032ef0d2e34caddb906632c46a1a57eeecb22 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 10 Jan 2024 15:03:05 -0500 +Subject: drm/vmwgfx: Fix possible null pointer derefence with invalid contexts + +From: Zack Rusin + +[ Upstream commit 517621b7060096e48e42f545fa6646fc00252eac ] + +vmw_context_cotable can return either an error or a null pointer and its +usage sometimes went unchecked. Subsequent code would then try to access +either a null pointer or an error value. + +The invalid dereferences were only possible with malformed userspace +apps which never properly initialized the rendering contexts. + +Check the results of vmw_context_cotable to fix the invalid derefs. + +Thanks: +ziming zhang(@ezrak1e) from Ant Group Light-Year Security Lab +who was the first person to discover it. +Niels De Graef who reported it and helped to track down the poc. + +Fixes: 9c079b8ce8bf ("drm/vmwgfx: Adapt execbuf to the new validation api") +Cc: # v4.20+ +Reported-by: Niels De Graef +Signed-off-by: Zack Rusin +Cc: Martin Krastev +Cc: Maaz Mombasawala +Cc: Ian Forbes +Cc: Broadcom internal kernel review list +Cc: dri-devel@lists.freedesktop.org +Reviewed-by: Maaz Mombasawala +Reviewed-by: Martin Krastev +Link: https://patchwork.freedesktop.org/patch/msgid/20240110200305.94086-1-zack.rusin@broadcom.com +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 14 +++++++++++--- + 1 file changed, 11 insertions(+), 3 deletions(-) + +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +index 36987ef3fc300..5fef0b31c1179 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +@@ -447,7 +447,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, + vmw_res_type(ctx) == vmw_res_dx_context) { + for (i = 0; i < cotable_max; ++i) { + res = vmw_context_cotable(ctx, i); +- if (IS_ERR(res)) ++ if (IS_ERR_OR_NULL(res)) + continue; + + ret = vmw_execbuf_res_val_add(sw_context, res, +@@ -1266,6 +1266,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, + return -EINVAL; + + cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY); ++ if (IS_ERR_OR_NULL(cotable_res)) ++ return cotable_res ? PTR_ERR(cotable_res) : -EINVAL; + ret = vmw_cotable_notify(cotable_res, cmd->body.queryId); + + return ret; +@@ -2484,6 +2486,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, + return ret; + + res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]); ++ if (IS_ERR_OR_NULL(res)) ++ return res ? PTR_ERR(res) : -EINVAL; + ret = vmw_cotable_notify(res, cmd->defined_id); + if (unlikely(ret != 0)) + return ret; +@@ -2569,8 +2573,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, + + so_type = vmw_so_cmd_to_type(header->id); + res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]); +- if (IS_ERR(res)) +- return PTR_ERR(res); ++ if (IS_ERR_OR_NULL(res)) ++ return res ? PTR_ERR(res) : -EINVAL; + cmd = container_of(header, typeof(*cmd), header); + ret = vmw_cotable_notify(res, cmd->defined_id); + +@@ -2689,6 +2693,8 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, + return -EINVAL; + + res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER); ++ if (IS_ERR_OR_NULL(res)) ++ return res ? PTR_ERR(res) : -EINVAL; + ret = vmw_cotable_notify(res, cmd->body.shaderId); + if (ret) + return ret; +@@ -3010,6 +3016,8 @@ static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv, + } + + res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT); ++ if (IS_ERR_OR_NULL(res)) ++ return res ? PTR_ERR(res) : -EINVAL; + ret = vmw_cotable_notify(res, cmd->body.soid); + if (ret) + return ret; +-- +2.43.0 + diff --git a/queue-6.8/drm-vmwgfx-fix-the-lifetime-of-the-bo-cursor-memory.patch b/queue-6.8/drm-vmwgfx-fix-the-lifetime-of-the-bo-cursor-memory.patch new file mode 100644 index 00000000000..62755552dd1 --- /dev/null +++ b/queue-6.8/drm-vmwgfx-fix-the-lifetime-of-the-bo-cursor-memory.patch @@ -0,0 +1,75 @@ +From 5228a49b7909b63068345811947ebd83394a048b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 26 Jan 2024 15:08:04 -0500 +Subject: drm/vmwgfx: Fix the lifetime of the bo cursor memory + +From: Zack Rusin + +[ Upstream commit 9a9e8a7159ca09af9b1a300a6c8e8b6ff7501c76 ] + +The cleanup can be dispatched while the atomic update is still active, +which means that the memory acquired in the atomic update needs to +not be invalidated by the cleanup. The buffer objects in vmw_plane_state +instead of using the builtin map_and_cache were trying to handle +the lifetime of the mapped memory themselves, leading to crashes. + +Use the map_and_cache instead of trying to manage the lifetime of the +buffer objects held by the vmw_plane_state. + +Fixes kernel oops'es in IGT's kms_cursor_legacy forked-bo. + +Signed-off-by: Zack Rusin +Fixes: bb6780aa5a1d ("drm/vmwgfx: Diff cursors when using cmds") +Cc: # v6.2+ +Reviewed-by: Martin Krastev +Link: https://patchwork.freedesktop.org/patch/msgid/20240126200804.732454-6-zack.rusin@broadcom.com +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 13 +------------ + 1 file changed, 1 insertion(+), 12 deletions(-) + +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +index c6f7946889d00..65be9e4a8992a 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -184,13 +184,12 @@ static u32 vmw_du_cursor_mob_size(u32 w, u32 h) + */ + static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps) + { +- bool is_iomem; + if (vps->surf) { + if (vps->surf_mapped) + return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo); + return vps->surf->snooper.image; + } else if (vps->bo) +- return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem); ++ return vmw_bo_map_and_cache(vps->bo); + return NULL; + } + +@@ -652,22 +651,12 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, + { + struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); + struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); +- bool is_iomem; + + if (vps->surf_mapped) { + vmw_bo_unmap(vps->surf->res.guest_memory_bo); + vps->surf_mapped = false; + } + +- if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) { +- const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL); +- +- if (likely(ret == 0)) { +- ttm_bo_kunmap(&vps->bo->map); +- ttm_bo_unreserve(&vps->bo->tbo); +- } +- } +- + vmw_du_cursor_plane_unmap_cm(vps); + vmw_du_put_cursor_mob(vcp, vps); + +-- +2.43.0 + diff --git a/queue-6.8/drm-vmwgfx-unmap-the-surface-before-resetting-it-on-.patch b/queue-6.8/drm-vmwgfx-unmap-the-surface-before-resetting-it-on-.patch new file mode 100644 index 00000000000..432d1711ca4 --- /dev/null +++ b/queue-6.8/drm-vmwgfx-unmap-the-surface-before-resetting-it-on-.patch @@ -0,0 +1,127 @@ +From 747a00873a2f30b3e830c3bf72549acadf2c7c25 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 24 Dec 2023 00:25:40 -0500 +Subject: drm/vmwgfx: Unmap the surface before resetting it on a plane state + +From: Zack Rusin + +[ Upstream commit 27571c64f1855881753e6f33c3186573afbab7ba ] + +Switch to a new plane state requires unreferencing of all held surfaces. +In the work required for mob cursors the mapped surfaces started being +cached but the variable indicating whether the surface is currently +mapped was not being reset. This leads to crashes as the duplicated +state, incorrectly, indicates the that surface is mapped even when +no surface is present. That's because after unreferencing the surface +it's perfectly possible for the plane to be backed by a bo instead of a +surface. + +Reset the surface mapped flag when unreferencing the plane state surface +to fix null derefs in cleanup. Fixes crashes in KDE KWin 6.0 on Wayland: + +Oops: 0000 [#1] PREEMPT SMP PTI +CPU: 4 PID: 2533 Comm: kwin_wayland Not tainted 6.7.0-rc3-vmwgfx #2 +Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 11/12/2020 +RIP: 0010:vmw_du_cursor_plane_cleanup_fb+0x124/0x140 [vmwgfx] +Code: 00 00 00 75 3a 48 83 c4 10 5b 5d c3 cc cc cc cc 48 8b b3 a8 00 00 00 48 c7 c7 99 90 43 c0 e8 93 c5 db ca 48 8b 83 a8 00 00 00 <48> 8b 78 28 e8 e3 f> +RSP: 0018:ffffb6b98216fa80 EFLAGS: 00010246 +RAX: 0000000000000000 RBX: ffff969d84cdcb00 RCX: 0000000000000027 +RDX: 0000000000000000 RSI: 0000000000000001 RDI: ffff969e75f21600 +RBP: ffff969d4143dc50 R08: 0000000000000000 R09: ffffb6b98216f920 +R10: 0000000000000003 R11: ffff969e7feb3b10 R12: 0000000000000000 +R13: 0000000000000000 R14: 000000000000027b R15: ffff969d49c9fc00 +FS: 00007f1e8f1b4180(0000) GS:ffff969e75f00000(0000) knlGS:0000000000000000 +CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +CR2: 0000000000000028 CR3: 0000000104006004 CR4: 00000000003706f0 +Call Trace: + + ? __die+0x23/0x70 + ? page_fault_oops+0x171/0x4e0 + ? exc_page_fault+0x7f/0x180 + ? asm_exc_page_fault+0x26/0x30 + ? vmw_du_cursor_plane_cleanup_fb+0x124/0x140 [vmwgfx] + drm_atomic_helper_cleanup_planes+0x9b/0xc0 + commit_tail+0xd1/0x130 + drm_atomic_helper_commit+0x11a/0x140 + drm_atomic_commit+0x97/0xd0 + ? __pfx___drm_printfn_info+0x10/0x10 + drm_atomic_helper_update_plane+0xf5/0x160 + drm_mode_cursor_universal+0x10e/0x270 + drm_mode_cursor_common+0x102/0x230 + ? __pfx_drm_mode_cursor2_ioctl+0x10/0x10 + drm_ioctl_kernel+0xb2/0x110 + drm_ioctl+0x26d/0x4b0 + ? __pfx_drm_mode_cursor2_ioctl+0x10/0x10 + ? __pfx_drm_ioctl+0x10/0x10 + vmw_generic_ioctl+0xa4/0x110 [vmwgfx] + __x64_sys_ioctl+0x94/0xd0 + do_syscall_64+0x61/0xe0 + ? __x64_sys_ioctl+0xaf/0xd0 + ? syscall_exit_to_user_mode+0x2b/0x40 + ? do_syscall_64+0x70/0xe0 + ? __x64_sys_ioctl+0xaf/0xd0 + ? syscall_exit_to_user_mode+0x2b/0x40 + ? do_syscall_64+0x70/0xe0 + ? exc_page_fault+0x7f/0x180 + entry_SYSCALL_64_after_hwframe+0x6e/0x76 +RIP: 0033:0x7f1e93f279ed +Code: 04 25 28 00 00 00 48 89 45 c8 31 c0 48 8d 45 10 c7 45 b0 10 00 00 00 48 89 45 b8 48 8d 45 d0 48 89 45 c0 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff f> +RSP: 002b:00007ffca0faf600 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 +RAX: ffffffffffffffda RBX: 000055db876ed2c0 RCX: 00007f1e93f279ed +RDX: 00007ffca0faf6c0 RSI: 00000000c02464bb RDI: 0000000000000015 +RBP: 00007ffca0faf650 R08: 000055db87184010 R09: 0000000000000007 +R10: 000055db886471a0 R11: 0000000000000246 R12: 00007ffca0faf6c0 +R13: 00000000c02464bb R14: 0000000000000015 R15: 00007ffca0faf790 + +Modules linked in: snd_seq_dummy snd_hrtimer nf_conntrack_netbios_ns nf_conntrack_broadcast nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 nft_fib nft_reject_ine> +CR2: 0000000000000028 +---[ end trace 0000000000000000 ]--- +RIP: 0010:vmw_du_cursor_plane_cleanup_fb+0x124/0x140 [vmwgfx] +Code: 00 00 00 75 3a 48 83 c4 10 5b 5d c3 cc cc cc cc 48 8b b3 a8 00 00 00 48 c7 c7 99 90 43 c0 e8 93 c5 db ca 48 8b 83 a8 00 00 00 <48> 8b 78 28 e8 e3 f> +RSP: 0018:ffffb6b98216fa80 EFLAGS: 00010246 +RAX: 0000000000000000 RBX: ffff969d84cdcb00 RCX: 0000000000000027 +RDX: 0000000000000000 RSI: 0000000000000001 RDI: ffff969e75f21600 +RBP: ffff969d4143dc50 R08: 0000000000000000 R09: ffffb6b98216f920 +R10: 0000000000000003 R11: ffff969e7feb3b10 R12: 0000000000000000 +R13: 0000000000000000 R14: 000000000000027b R15: ffff969d49c9fc00 +FS: 00007f1e8f1b4180(0000) GS:ffff969e75f00000(0000) knlGS:0000000000000000 +CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +CR2: 0000000000000028 CR3: 0000000104006004 CR4: 00000000003706f0 + +Signed-off-by: Zack Rusin +Fixes: 485d98d472d5 ("drm/vmwgfx: Add support for CursorMob and CursorBypass 4") +Reported-by: Stefan Hoffmeister +Closes: https://gitlab.freedesktop.org/drm/misc/-/issues/34 +Cc: Martin Krastev +Cc: Maaz Mombasawala +Cc: Ian Forbes +Cc: Broadcom internal kernel review list +Cc: dri-devel@lists.freedesktop.org +Cc: # v5.19+ +Acked-by: Javier Martinez Canillas +Reviewed-by: Maaz Mombasawala +Reviewed-by: Martin Krastev +Link: https://patchwork.freedesktop.org/patch/msgid/20231224052540.605040-1-zack.rusin@broadcom.com +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +index b6f40781b907a..c6f7946889d00 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -703,6 +703,10 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, + int ret = 0; + + if (vps->surf) { ++ if (vps->surf_mapped) { ++ vmw_bo_unmap(vps->surf->res.guest_memory_bo); ++ vps->surf_mapped = false; ++ } + vmw_surface_unreference(&vps->surf); + vps->surf = NULL; + } +-- +2.43.0 + diff --git a/queue-6.8/ext4-correct-best-extent-lstart-adjustment-logic.patch b/queue-6.8/ext4-correct-best-extent-lstart-adjustment-logic.patch new file mode 100644 index 00000000000..bf216ea8ba8 --- /dev/null +++ b/queue-6.8/ext4-correct-best-extent-lstart-adjustment-logic.patch @@ -0,0 +1,95 @@ +From 1a5921f4f2af6f4daf5ae2161a8fa1c87c36c136 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 1 Feb 2024 22:18:45 +0800 +Subject: ext4: correct best extent lstart adjustment logic + +From: Baokun Li + +[ Upstream commit 4fbf8bc733d14bceb16dda46a3f5e19c6a9621c5 ] + +When yangerkun review commit 93cdf49f6eca ("ext4: Fix best extent lstart +adjustment logic in ext4_mb_new_inode_pa()"), it was found that the best +extent did not completely cover the original request after adjusting the +best extent lstart in ext4_mb_new_inode_pa() as follows: + + original request: 2/10(8) + normalized request: 0/64(64) + best extent: 0/9(9) + +When we check if best ex can be kept at start of goal, ac_o_ex.fe_logical +is 2 less than the adjusted best extent logical end 9, so we think the +adjustment is done. But obviously 0/9(9) doesn't cover 2/10(8), so we +should determine here if the original request logical end is less than or +equal to the adjusted best extent logical end. + +In addition, add a comment stating when adjusted best_ex will not cover +the original request, and remove the duplicate assertion because adjusting +lstart makes no change to b_ex.fe_len. + +Link: https://lore.kernel.org/r/3630fa7f-b432-7afd-5f79-781bc3b2c5ea@huawei.com +Fixes: 93cdf49f6eca ("ext4: Fix best extent lstart adjustment logic in ext4_mb_new_inode_pa()") +Cc: +Signed-off-by: yangerkun +Signed-off-by: Baokun Li +Reviewed-by: Jan Kara +Reviewed-by: Ojaswin Mujoo +Link: https://lore.kernel.org/r/20240201141845.1879253-1-libaokun1@huawei.com +Signed-off-by: Theodore Ts'o +Signed-off-by: Sasha Levin +--- + fs/ext4/mballoc.c | 17 +++++++++++------ + 1 file changed, 11 insertions(+), 6 deletions(-) + +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index e4f7cf9d89c45..70836e25418ab 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -5169,10 +5169,16 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) + .fe_len = ac->ac_orig_goal_len, + }; + loff_t orig_goal_end = extent_logical_end(sbi, &ex); ++ loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex); + +- /* we can't allocate as much as normalizer wants. +- * so, found space must get proper lstart +- * to cover original request */ ++ /* ++ * We can't allocate as much as normalizer wants, so we try ++ * to get proper lstart to cover the original request, except ++ * when the goal doesn't cover the original request as below: ++ * ++ * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048 ++ * best_ex:0/200(200) -> adjusted: 1848/2048(200) ++ */ + BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); + BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); + +@@ -5184,7 +5190,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) + * 1. Check if best ex can be kept at end of goal (before + * cr_best_avail trimmed it) and still cover original start + * 2. Else, check if best ex can be kept at start of goal and +- * still cover original start ++ * still cover original end + * 3. Else, keep the best ex at start of original request. + */ + ex.fe_len = ac->ac_b_ex.fe_len; +@@ -5194,7 +5200,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) + goto adjust_bex; + + ex.fe_logical = ac->ac_g_ex.fe_logical; +- if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex)) ++ if (o_ex_end <= extent_logical_end(sbi, &ex)) + goto adjust_bex; + + ex.fe_logical = ac->ac_o_ex.fe_logical; +@@ -5202,7 +5208,6 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) + ac->ac_b_ex.fe_logical = ex.fe_logical; + + BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); +- BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); + BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end); + } + +-- +2.43.0 + diff --git a/queue-6.8/ext4-fix-corruption-during-on-line-resize.patch b/queue-6.8/ext4-fix-corruption-during-on-line-resize.patch new file mode 100644 index 00000000000..96ccef192f6 --- /dev/null +++ b/queue-6.8/ext4-fix-corruption-during-on-line-resize.patch @@ -0,0 +1,79 @@ +From c0da0ef91bc67aa29420f402f92a50008be5671d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 15 Feb 2024 15:50:09 +0000 +Subject: ext4: fix corruption during on-line resize + +From: Maximilian Heyne + +[ Upstream commit a6b3bfe176e8a5b05ec4447404e412c2a3fc92cc ] + +We observed a corruption during on-line resize of a file system that is +larger than 16 TiB with 4k block size. With having more then 2^32 blocks +resize_inode is turned off by default by mke2fs. The issue can be +reproduced on a smaller file system for convenience by explicitly +turning off resize_inode. An on-line resize across an 8 GiB boundary (the +size of a meta block group in this setup) then leads to a corruption: + + dev=/dev/ # should be >= 16 GiB + mkdir -p /corruption + /sbin/mke2fs -t ext4 -b 4096 -O ^resize_inode $dev $((2 * 2**21 - 2**15)) + mount -t ext4 $dev /corruption + + dd if=/dev/zero bs=4096 of=/corruption/test count=$((2*2**21 - 4*2**15)) + sha1sum /corruption/test + # 79d2658b39dcfd77274e435b0934028adafaab11 /corruption/test + + /sbin/resize2fs $dev $((2*2**21)) + # drop page cache to force reload the block from disk + echo 1 > /proc/sys/vm/drop_caches + + sha1sum /corruption/test + # 3c2abc63cbf1a94c9e6977e0fbd72cd832c4d5c3 /corruption/test + +2^21 = 2^15*2^6 equals 8 GiB whereof 2^15 is the number of blocks per +block group and 2^6 are the number of block groups that make a meta +block group. + +The last checksum might be different depending on how the file is laid +out across the physical blocks. The actual corruption occurs at physical +block 63*2^15 = 2064384 which would be the location of the backup of the +meta block group's block descriptor. During the on-line resize the file +system will be converted to meta_bg starting at s_first_meta_bg which is +2 in the example - meaning all block groups after 16 GiB. However, in +ext4_flex_group_add we might add block groups that are not part of the +first meta block group yet. In the reproducer we achieved this by +substracting the size of a whole block group from the point where the +meta block group would start. This must be considered when updating the +backup block group descriptors to follow the non-meta_bg layout. The fix +is to add a test whether the group to add is already part of the meta +block group or not. + +Fixes: 01f795f9e0d67 ("ext4: add online resizing support for meta_bg and 64-bit file systems") +Cc: +Signed-off-by: Maximilian Heyne +Tested-by: Srivathsa Dara +Reviewed-by: Srivathsa Dara +Link: https://lore.kernel.org/r/20240215155009.94493-1-mheyne@amazon.de +Signed-off-by: Theodore Ts'o +Signed-off-by: Sasha Levin +--- + fs/ext4/resize.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c +index 4d4a5a32e310d..3c0d12382e060 100644 +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -1602,7 +1602,8 @@ static int ext4_flex_group_add(struct super_block *sb, + int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); + int gdb_num_end = ((group + flex_gd->count - 1) / + EXT4_DESC_PER_BLOCK(sb)); +- int meta_bg = ext4_has_feature_meta_bg(sb); ++ int meta_bg = ext4_has_feature_meta_bg(sb) && ++ gdb_num >= le32_to_cpu(es->s_first_meta_bg); + sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr - + ext4_group_first_block_no(sb, 0); + +-- +2.43.0 + diff --git a/queue-6.8/f2fs-mark-inode-dirty-for-fi_atomic_committed-flag.patch b/queue-6.8/f2fs-mark-inode-dirty-for-fi_atomic_committed-flag.patch new file mode 100644 index 00000000000..78544020fd8 --- /dev/null +++ b/queue-6.8/f2fs-mark-inode-dirty-for-fi_atomic_committed-flag.patch @@ -0,0 +1,57 @@ +From 57d7b4c7d564a004aab610b031239e60d596f561 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Mar 2024 20:26:19 +0900 +Subject: f2fs: mark inode dirty for FI_ATOMIC_COMMITTED flag + +From: Sunmin Jeong + +[ Upstream commit 4bf78322346f6320313683dc9464e5423423ad5c ] + +In f2fs_update_inode, i_size of the atomic file isn't updated until +FI_ATOMIC_COMMITTED flag is set. When committing atomic write right +after the writeback of the inode, i_size of the raw inode will not be +updated. It can cause the atomicity corruption due to a mismatch between +old file size and new data. + +To prevent the problem, let's mark inode dirty for FI_ATOMIC_COMMITTED + +Atomic write thread Writeback thread + __writeback_single_inode + write_inode + f2fs_update_inode + - skip i_size update + f2fs_ioc_commit_atomic_write + f2fs_commit_atomic_write + set_inode_flag(inode, FI_ATOMIC_COMMITTED) + f2fs_do_sync_file + f2fs_fsync_node_pages + - skip f2fs_update_inode since the inode is clean + +Fixes: 3db1de0e582c ("f2fs: change the current atomic write way") +Cc: stable@vger.kernel.org #v5.19+ +Reviewed-by: Sungjong Seo +Reviewed-by: Yeongjin Gil +Signed-off-by: Sunmin Jeong +Reviewed-by: Daeho Jeong +Reviewed-by: Chao Yu +Signed-off-by: Jaegeuk Kim +Signed-off-by: Sasha Levin +--- + fs/f2fs/f2fs.h | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index 5a6c35d70c7ad..6610ff6d7e6cc 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -3032,6 +3032,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode, + case FI_INLINE_DOTS: + case FI_PIN_FILE: + case FI_COMPRESS_RELEASED: ++ case FI_ATOMIC_COMMITTED: + f2fs_mark_inode_dirty_sync(inode, true); + } + } +-- +2.43.0 + diff --git a/queue-6.8/f2fs-truncate-page-cache-before-clearing-flags-when-.patch b/queue-6.8/f2fs-truncate-page-cache-before-clearing-flags-when-.patch new file mode 100644 index 00000000000..b61362b399a --- /dev/null +++ b/queue-6.8/f2fs-truncate-page-cache-before-clearing-flags-when-.patch @@ -0,0 +1,64 @@ +From 8722e469c369dc90e0b890076b91a6456a2c45cf Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Mar 2024 20:26:20 +0900 +Subject: f2fs: truncate page cache before clearing flags when aborting atomic + write + +From: Sunmin Jeong + +[ Upstream commit 74b0ebcbdde4c7fe23c979e4cfc2fdbf349c39a3 ] + +In f2fs_do_write_data_page, FI_ATOMIC_FILE flag selects the target inode +between the original inode and COW inode. When aborting atomic write and +writeback occur simultaneously, invalid data can be written to original +inode if the FI_ATOMIC_FILE flag is cleared meanwhile. + +To prevent the problem, let's truncate all pages before clearing the flag + +Atomic write thread Writeback thread + f2fs_abort_atomic_write + clear_inode_flag(inode, FI_ATOMIC_FILE) + __writeback_single_inode + do_writepages + f2fs_do_write_data_page + - use dn of original inode + truncate_inode_pages_final + +Fixes: 3db1de0e582c ("f2fs: change the current atomic write way") +Cc: stable@vger.kernel.org #v5.19+ +Reviewed-by: Sungjong Seo +Reviewed-by: Yeongjin Gil +Signed-off-by: Sunmin Jeong +Reviewed-by: Daeho Jeong +Reviewed-by: Chao Yu +Signed-off-by: Jaegeuk Kim +Signed-off-by: Sasha Levin +--- + fs/f2fs/segment.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c +index 60531f50f37fe..6d5774093452f 100644 +--- a/fs/f2fs/segment.c ++++ b/fs/f2fs/segment.c +@@ -192,6 +192,9 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean) + if (!f2fs_is_atomic_file(inode)) + return; + ++ if (clean) ++ truncate_inode_pages_final(inode->i_mapping); ++ + release_atomic_write_cnt(inode); + clear_inode_flag(inode, FI_ATOMIC_COMMITTED); + clear_inode_flag(inode, FI_ATOMIC_REPLACE); +@@ -201,7 +204,6 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean) + F2FS_I(inode)->atomic_write_task = NULL; + + if (clean) { +- truncate_inode_pages_final(inode->i_mapping); + f2fs_i_size_write(inode, fi->original_i_size); + fi->original_i_size = 0; + } +-- +2.43.0 + diff --git a/queue-6.8/fat-fix-uninitialized-field-in-nostale-filehandles.patch b/queue-6.8/fat-fix-uninitialized-field-in-nostale-filehandles.patch new file mode 100644 index 00000000000..3c5591b1874 --- /dev/null +++ b/queue-6.8/fat-fix-uninitialized-field-in-nostale-filehandles.patch @@ -0,0 +1,49 @@ +From a6a4eef0761c1d4933cbda28c593de0e346c7e99 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 5 Feb 2024 13:26:26 +0100 +Subject: fat: fix uninitialized field in nostale filehandles + +From: Jan Kara + +[ Upstream commit fde2497d2bc3a063d8af88b258dbadc86bd7b57c ] + +When fat_encode_fh_nostale() encodes file handle without a parent it +stores only first 10 bytes of the file handle. However the length of the +file handle must be a multiple of 4 so the file handle is actually 12 +bytes long and the last two bytes remain uninitialized. This is not +great at we potentially leak uninitialized information with the handle +to userspace. Properly initialize the full handle length. + +Link: https://lkml.kernel.org/r/20240205122626.13701-1-jack@suse.cz +Reported-by: syzbot+3ce5dea5b1539ff36769@syzkaller.appspotmail.com +Fixes: ea3983ace6b7 ("fat: restructure export_operations") +Signed-off-by: Jan Kara +Acked-by: OGAWA Hirofumi +Cc: Amir Goldstein +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Sasha Levin +--- + fs/fat/nfs.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c +index c52e63e10d35c..509eea96a457d 100644 +--- a/fs/fat/nfs.c ++++ b/fs/fat/nfs.c +@@ -130,6 +130,12 @@ fat_encode_fh_nostale(struct inode *inode, __u32 *fh, int *lenp, + fid->parent_i_gen = parent->i_generation; + type = FILEID_FAT_WITH_PARENT; + *lenp = FAT_FID_SIZE_WITH_PARENT; ++ } else { ++ /* ++ * We need to initialize this field because the fh is actually ++ * 12 bytes long ++ */ ++ fid->parent_i_pos_hi = 0; + } + + return type; +-- +2.43.0 + diff --git a/queue-6.8/fuse-don-t-unhash-root.patch b/queue-6.8/fuse-don-t-unhash-root.patch new file mode 100644 index 00000000000..400ddced531 --- /dev/null +++ b/queue-6.8/fuse-don-t-unhash-root.patch @@ -0,0 +1,54 @@ +From 657951a65589b9a361352affa82d50f2af956559 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 28 Feb 2024 16:50:49 +0100 +Subject: fuse: don't unhash root + +From: Miklos Szeredi + +[ Upstream commit b1fe686a765e6c0d71811d825b5a1585a202b777 ] + +The root inode is assumed to be always hashed. Do not unhash the root +inode even if it is marked BAD. + +Fixes: 5d069dbe8aaf ("fuse: fix bad inode") +Cc: # v5.11 +Signed-off-by: Miklos Szeredi +Signed-off-by: Sasha Levin +--- + fs/fuse/fuse_i.h | 1 - + fs/fuse/inode.c | 7 +++++-- + 2 files changed, 5 insertions(+), 3 deletions(-) + +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h +index bcbe344888627..20924e799c9f5 100644 +--- a/fs/fuse/fuse_i.h ++++ b/fs/fuse/fuse_i.h +@@ -940,7 +940,6 @@ static inline bool fuse_stale_inode(const struct inode *inode, int generation, + + static inline void fuse_make_bad(struct inode *inode) + { +- remove_inode_hash(inode); + set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state); + } + +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c +index 516ea2979a90f..f73865751c732 100644 +--- a/fs/fuse/inode.c ++++ b/fs/fuse/inode.c +@@ -469,8 +469,11 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, + } else if (fuse_stale_inode(inode, generation, attr)) { + /* nodeid was reused, any I/O on the old inode should fail */ + fuse_make_bad(inode); +- iput(inode); +- goto retry; ++ if (inode != d_inode(sb->s_root)) { ++ remove_inode_hash(inode); ++ iput(inode); ++ goto retry; ++ } + } + fi = get_fuse_inode(inode); + spin_lock(&fi->lock); +-- +2.43.0 + diff --git a/queue-6.8/fuse-fix-root-lookup-with-nonzero-generation.patch b/queue-6.8/fuse-fix-root-lookup-with-nonzero-generation.patch new file mode 100644 index 00000000000..a961dc5936e --- /dev/null +++ b/queue-6.8/fuse-fix-root-lookup-with-nonzero-generation.patch @@ -0,0 +1,48 @@ +From b3ffa2f2fa3226b313f8dd8ac8e648b8417ed923 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 28 Feb 2024 16:50:49 +0100 +Subject: fuse: fix root lookup with nonzero generation + +From: Miklos Szeredi + +[ Upstream commit 68ca1b49e430f6534d0774a94147a823e3b8b26e ] + +The root inode has a fixed nodeid and generation (1, 0). + +Prior to the commit 15db16837a35 ("fuse: fix illegal access to inode with +reused nodeid") generation number on lookup was ignored. After this commit +lookup with the wrong generation number resulted in the inode being +unhashed. This is correct for non-root inodes, but replacing the root +inode is wrong and results in weird behavior. + +Fix by reverting to the old behavior if ignoring the generation for the +root inode, but issuing a warning in dmesg. + +Reported-by: Antonio SJ Musumeci +Closes: https://lore.kernel.org/all/CAOQ4uxhek5ytdN8Yz2tNEOg5ea4NkBb4nk0FGPjPk_9nz-VG3g@mail.gmail.com/ +Fixes: 15db16837a35 ("fuse: fix illegal access to inode with reused nodeid") +Cc: # v5.14 +Signed-off-by: Miklos Szeredi +Signed-off-by: Sasha Levin +--- + fs/fuse/dir.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index d3bc463d9da76..9307bb4393b8f 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -391,6 +391,10 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name + err = -EIO; + if (fuse_invalid_attr(&outarg->attr)) + goto out_put_forget; ++ if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) { ++ pr_warn_once("root generation should be zero\n"); ++ outarg->generation = 0; ++ } + + *inode = fuse_iget(sb, outarg->nodeid, outarg->generation, + &outarg->attr, ATTR_TIMEOUT(outarg), +-- +2.43.0 + diff --git a/queue-6.8/fuse-fix-vm_mayshare-and-direct_io_allow_mmap.patch b/queue-6.8/fuse-fix-vm_mayshare-and-direct_io_allow_mmap.patch new file mode 100644 index 00000000000..4f099dc9747 --- /dev/null +++ b/queue-6.8/fuse-fix-vm_mayshare-and-direct_io_allow_mmap.patch @@ -0,0 +1,65 @@ +From 2a30a0975895e66259482267dabd286416bf52d4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 12 Dec 2023 14:33:23 +0100 +Subject: fuse: fix VM_MAYSHARE and direct_io_allow_mmap + +From: Bernd Schubert + +[ Upstream commit 9511176bbaee0ac60ecc84e7b01cf5972a59ea17 ] + +There were multiple issues with direct_io_allow_mmap: + + - fuse_link_write_file() was missing, resulting in warnings in + fuse_write_file_get() and EIO from msync() + + - "vma->vm_ops = &fuse_file_vm_ops" was not set, but especially + fuse_page_mkwrite is needed. + +The semantics of invalidate_inode_pages2() is so far not clearly defined in +fuse_file_mmap. It dates back to commit 3121bfe76311 ("fuse: fix +"direct_io" private mmap") Though, as direct_io_allow_mmap is a new +feature, that was for MAP_PRIVATE only. As invalidate_inode_pages2() is +calling into fuse_launder_folio() and writes out dirty pages, it should be +safe to call invalidate_inode_pages2 for MAP_PRIVATE and MAP_SHARED as +well. + +Cc: Hao Xu +Cc: stable@vger.kernel.org +Fixes: e78662e818f9 ("fuse: add a new fuse init flag to relax restrictions in no cache mode") +Signed-off-by: Bernd Schubert +Reviewed-by: Amir Goldstein +Signed-off-by: Miklos Szeredi +Signed-off-by: Sasha Levin +--- + fs/fuse/file.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 148a71b8b4d0e..b9cff9b6ca1b8 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -2468,7 +2468,8 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) + return fuse_dax_mmap(file, vma); + + if (ff->open_flags & FOPEN_DIRECT_IO) { +- /* Can't provide the coherency needed for MAP_SHARED ++ /* ++ * Can't provide the coherency needed for MAP_SHARED + * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set. + */ + if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap) +@@ -2476,7 +2477,10 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) + + invalidate_inode_pages2(file->f_mapping); + +- return generic_file_mmap(file, vma); ++ if (!(vma->vm_flags & VM_MAYSHARE)) { ++ /* MAP_PRIVATE */ ++ return generic_file_mmap(file, vma); ++ } + } + + if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) +-- +2.43.0 + diff --git a/queue-6.8/fuse-replace-remaining-make_bad_inode-with-fuse_make.patch b/queue-6.8/fuse-replace-remaining-make_bad_inode-with-fuse_make.patch new file mode 100644 index 00000000000..19f97f5413f --- /dev/null +++ b/queue-6.8/fuse-replace-remaining-make_bad_inode-with-fuse_make.patch @@ -0,0 +1,35 @@ +From ea799d0fcd03f754f5f86a54f47616c12f90f23d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 28 Feb 2024 16:50:49 +0100 +Subject: fuse: replace remaining make_bad_inode() with fuse_make_bad() + +From: Miklos Szeredi + +[ Upstream commit 82e081aebe4d9c26e196c8260005cc4762b57a5d ] + +fuse_do_statx() was added with the wrong helper. + +Fixes: d3045530bdd2 ("fuse: implement statx") +Cc: # v6.6 +Signed-off-by: Miklos Szeredi +Signed-off-by: Sasha Levin +--- + fs/fuse/dir.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index d19cbf34c6341..d3bc463d9da76 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -1210,7 +1210,7 @@ static int fuse_do_statx(struct inode *inode, struct file *file, + if (((sx->mask & STATX_SIZE) && !fuse_valid_size(sx->size)) || + ((sx->mask & STATX_TYPE) && (!fuse_valid_type(sx->mode) || + inode_wrong_type(inode, sx->mode)))) { +- make_bad_inode(inode); ++ fuse_make_bad(inode); + return -EIO; + } + +-- +2.43.0 + diff --git a/queue-6.8/hwmon-amc6821-add-of_match-table.patch b/queue-6.8/hwmon-amc6821-add-of_match-table.patch new file mode 100644 index 00000000000..63f887ac2ff --- /dev/null +++ b/queue-6.8/hwmon-amc6821-add-of_match-table.patch @@ -0,0 +1,56 @@ +From 6c72609f81fa3dc82f7407c3137a3056b2715980 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Mar 2024 12:06:58 +0100 +Subject: hwmon: (amc6821) add of_match table + +From: Josua Mayer + +[ Upstream commit 3f003fda98a7a8d5f399057d92e6ed56b468657c ] + +Add of_match table for "ti,amc6821" compatible string. +This fixes automatic driver loading by userspace when using device-tree, +and if built as a module like major linux distributions do. + +While devices probe just fine with i2c_device_id table, userspace can't +match the "ti,amc6821" compatible string from dt with the plain +"amc6821" device id. As a result, the kernel module can not be loaded. + +Cc: stable@vger.kernel.org +Signed-off-by: Josua Mayer +Link: https://lore.kernel.org/r/20240307-amc6821-of-match-v1-1-5f40464a3110@solid-run.com +[groeck: Cleaned up patch description] +Signed-off-by: Guenter Roeck +Signed-off-by: Sasha Levin +--- + drivers/hwmon/amc6821.c | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c +index 2a7a4b6b00942..9b02b304c2f5d 100644 +--- a/drivers/hwmon/amc6821.c ++++ b/drivers/hwmon/amc6821.c +@@ -934,10 +934,21 @@ static const struct i2c_device_id amc6821_id[] = { + + MODULE_DEVICE_TABLE(i2c, amc6821_id); + ++static const struct of_device_id __maybe_unused amc6821_of_match[] = { ++ { ++ .compatible = "ti,amc6821", ++ .data = (void *)amc6821, ++ }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(of, amc6821_of_match); ++ + static struct i2c_driver amc6821_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "amc6821", ++ .of_match_table = of_match_ptr(amc6821_of_match), + }, + .probe = amc6821_probe, + .id_table = amc6821_id, +-- +2.43.0 + diff --git a/queue-6.8/iio-adc-rockchip_saradc-fix-bitmask-for-channels-on-.patch b/queue-6.8/iio-adc-rockchip_saradc-fix-bitmask-for-channels-on-.patch new file mode 100644 index 00000000000..6e34cc10c7a --- /dev/null +++ b/queue-6.8/iio-adc-rockchip_saradc-fix-bitmask-for-channels-on-.patch @@ -0,0 +1,42 @@ +From a50a2bb5e16586a1c2cef719482b247cd9c66d7a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 23 Feb 2024 13:45:21 +0100 +Subject: iio: adc: rockchip_saradc: fix bitmask for channels on SARADCv2 + +From: Quentin Schulz + +[ Upstream commit b0a4546df24a4f8c59b2d05ae141bd70ceccc386 ] + +The SARADCv2 on RK3588 (the only SoC currently supported that has an +SARADCv2) selects the channel through the channel_sel bitfield which is +the 4 lowest bits, therefore the mask should be GENMASK(3, 0) and not +GENMASK(15, 0). + +Fixes: 757953f8ec69 ("iio: adc: rockchip_saradc: Add support for RK3588") +Signed-off-by: Quentin Schulz +Reviewed-by: Heiko Stuebner +Reviewed-by: Andy Shevchenko +Link: https://lore.kernel.org/r/20240223-saradcv2-chan-mask-v1-1-84b06a0f623a@theobroma-systems.com +Cc: +Signed-off-by: Jonathan Cameron +Signed-off-by: Sasha Levin +--- + drivers/iio/adc/rockchip_saradc.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c +index dd94667a623bd..2da8d6f3241a1 100644 +--- a/drivers/iio/adc/rockchip_saradc.c ++++ b/drivers/iio/adc/rockchip_saradc.c +@@ -52,7 +52,7 @@ + #define SARADC2_START BIT(4) + #define SARADC2_SINGLE_MODE BIT(5) + +-#define SARADC2_CONV_CHANNELS GENMASK(15, 0) ++#define SARADC2_CONV_CHANNELS GENMASK(3, 0) + + struct rockchip_saradc; + +-- +2.43.0 + diff --git a/queue-6.8/iio-adc-rockchip_saradc-use-mask-for-write_enable-bi.patch b/queue-6.8/iio-adc-rockchip_saradc-use-mask-for-write_enable-bi.patch new file mode 100644 index 00000000000..5cea98650b8 --- /dev/null +++ b/queue-6.8/iio-adc-rockchip_saradc-use-mask-for-write_enable-bi.patch @@ -0,0 +1,63 @@ +From b8f0c3fcc9db7faa6eb08cbc6820cdfe26a2e3dd Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 23 Feb 2024 13:45:22 +0100 +Subject: iio: adc: rockchip_saradc: use mask for write_enable bitfield + +From: Quentin Schulz + +[ Upstream commit 5b4e4b72034f85f7a0cdd147d3d729c5a22c8764 ] + +Some of the registers on the SARADCv2 have bits write protected except +if another bit is set. This is usually done by having the lowest 16 bits +store the data to write and the highest 16 bits specify which of the 16 +lowest bits should have their value written to the hardware block. + +The write_enable mask for the channel selection was incorrect because it +was just the value shifted by 16 bits, which means it would only ever +write bits and never clear them. So e.g. if someone starts a conversion +on channel 5, the lowest 4 bits would be 0x5, then starts a conversion +on channel 0, it would still be 5. + +Instead of shifting the value by 16 as the mask, let's use the OR'ing of +the appropriate masks shifted by 16. + +Note that this is not an issue currently because the only SARADCv2 +currently supported has a reset defined in its Device Tree, that reset +resets the SARADC controller before starting a conversion on a channel. +However, this reset is handled as optional by the probe function and +thus proper masking should be used in the event an SARADCv2 without a +reset ever makes it upstream. + +Fixes: 757953f8ec69 ("iio: adc: rockchip_saradc: Add support for RK3588") +Signed-off-by: Quentin Schulz +Reviewed-by: Heiko Stuebner +Link: https://lore.kernel.org/r/20240223-saradcv2-chan-mask-v1-2-84b06a0f623a@theobroma-systems.com +Cc: +Signed-off-by: Jonathan Cameron +Signed-off-by: Sasha Levin +--- + drivers/iio/adc/rockchip_saradc.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c +index 2da8d6f3241a1..1c0042fbbb548 100644 +--- a/drivers/iio/adc/rockchip_saradc.c ++++ b/drivers/iio/adc/rockchip_saradc.c +@@ -102,12 +102,12 @@ static void rockchip_saradc_start_v2(struct rockchip_saradc *info, int chn) + writel_relaxed(0xc, info->regs + SARADC_T_DAS_SOC); + writel_relaxed(0x20, info->regs + SARADC_T_PD_SOC); + val = FIELD_PREP(SARADC2_EN_END_INT, 1); +- val |= val << 16; ++ val |= SARADC2_EN_END_INT << 16; + writel_relaxed(val, info->regs + SARADC2_END_INT_EN); + val = FIELD_PREP(SARADC2_START, 1) | + FIELD_PREP(SARADC2_SINGLE_MODE, 1) | + FIELD_PREP(SARADC2_CONV_CHANNELS, chn); +- val |= val << 16; ++ val |= (SARADC2_START | SARADC2_SINGLE_MODE | SARADC2_CONV_CHANNELS) << 16; + writel(val, info->regs + SARADC2_CONV_CON); + } + +-- +2.43.0 + diff --git a/queue-6.8/io_uring-clean-rings-on-no_mmap-alloc-fail.patch b/queue-6.8/io_uring-clean-rings-on-no_mmap-alloc-fail.patch new file mode 100644 index 00000000000..54d9c52833d --- /dev/null +++ b/queue-6.8/io_uring-clean-rings-on-no_mmap-alloc-fail.patch @@ -0,0 +1,49 @@ +From c5c1020214aec6127b91ecfe4a505bc98720abe4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 12 Mar 2024 14:56:27 +0000 +Subject: io_uring: clean rings on NO_MMAP alloc fail + +From: Pavel Begunkov + +[ Upstream commit cef59d1ea7170ec753182302645a0191c8aa3382 ] + +We make a few cancellation judgements based on ctx->rings, so let's +zero it afer deallocation for IORING_SETUP_NO_MMAP just like it's +done with the mmap case. Likely, it's not a real problem, but zeroing +is safer and better tested. + +Cc: stable@vger.kernel.org +Fixes: 03d89a2de25bbc ("io_uring: support for user allocated memory for rings/sqes") +Signed-off-by: Pavel Begunkov +Link: https://lore.kernel.org/r/9ff6cdf91429b8a51699c210e1f6af6ea3f8bdcf.1710255382.git.asml.silence@gmail.com +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + io_uring/io_uring.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c +index 9f938874c5e13..adf944bb5a2fe 100644 +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -2776,14 +2776,15 @@ static void io_rings_free(struct io_ring_ctx *ctx) + if (!(ctx->flags & IORING_SETUP_NO_MMAP)) { + io_mem_free(ctx->rings); + io_mem_free(ctx->sq_sqes); +- ctx->rings = NULL; +- ctx->sq_sqes = NULL; + } else { + io_pages_free(&ctx->ring_pages, ctx->n_ring_pages); + ctx->n_ring_pages = 0; + io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages); + ctx->n_sqe_pages = 0; + } ++ ++ ctx->rings = NULL; ++ ctx->sq_sqes = NULL; + } + + void *io_mem_alloc(size_t size) +-- +2.43.0 + diff --git a/queue-6.8/io_uring-fix-io_queue_proc-modifying-req-flags.patch b/queue-6.8/io_uring-fix-io_queue_proc-modifying-req-flags.patch new file mode 100644 index 00000000000..c443299a97e --- /dev/null +++ b/queue-6.8/io_uring-fix-io_queue_proc-modifying-req-flags.patch @@ -0,0 +1,65 @@ +From 78b414c246782d766fadacdf435fcc53d8a6019b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Mar 2024 18:06:32 +0000 +Subject: io_uring: fix io_queue_proc modifying req->flags + +From: Pavel Begunkov + +[ Upstream commit 1a8ec63b2b6c91caec87d4e132b1f71b5df342be ] + +With multiple poll entries __io_queue_proc() might be running in +parallel with poll handlers and possibly task_work, we should not be +carelessly modifying req->flags there. io_poll_double_prepare() handles +a similar case with locking but it's much easier to move it into +__io_arm_poll_handler(). + +Cc: stable@vger.kernel.org +Fixes: 595e52284d24a ("io_uring/poll: don't enable lazy wake for POLLEXCLUSIVE") +Signed-off-by: Pavel Begunkov +Link: https://lore.kernel.org/r/455cc49e38cf32026fa1b49670be8c162c2cb583.1709834755.git.asml.silence@gmail.com +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + io_uring/poll.c | 19 +++++++++++-------- + 1 file changed, 11 insertions(+), 8 deletions(-) + +diff --git a/io_uring/poll.c b/io_uring/poll.c +index 58b7556f621eb..c6f4789623cb2 100644 +--- a/io_uring/poll.c ++++ b/io_uring/poll.c +@@ -539,14 +539,6 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt, + poll->wait.private = (void *) wqe_private; + + if (poll->events & EPOLLEXCLUSIVE) { +- /* +- * Exclusive waits may only wake a limited amount of entries +- * rather than all of them, this may interfere with lazy +- * wake if someone does wait(events > 1). Ensure we don't do +- * lazy wake for those, as we need to process each one as they +- * come in. +- */ +- req->flags |= REQ_F_POLL_NO_LAZY; + add_wait_queue_exclusive(head, &poll->wait); + } else { + add_wait_queue(head, &poll->wait); +@@ -618,6 +610,17 @@ static int __io_arm_poll_handler(struct io_kiocb *req, + if (issue_flags & IO_URING_F_UNLOCKED) + req->flags &= ~REQ_F_HASH_LOCKED; + ++ ++ /* ++ * Exclusive waits may only wake a limited amount of entries ++ * rather than all of them, this may interfere with lazy ++ * wake if someone does wait(events > 1). Ensure we don't do ++ * lazy wake for those, as we need to process each one as they ++ * come in. ++ */ ++ if (poll->events & EPOLLEXCLUSIVE) ++ req->flags |= REQ_F_POLL_NO_LAZY; ++ + mask = vfs_poll(req->file, &ipt->pt) & poll->events; + + if (unlikely(ipt->error || !ipt->nr_entries)) { +-- +2.43.0 + diff --git a/queue-6.8/io_uring-fix-mshot-io-wq-checks.patch b/queue-6.8/io_uring-fix-mshot-io-wq-checks.patch new file mode 100644 index 00000000000..c28ff1cc3e1 --- /dev/null +++ b/queue-6.8/io_uring-fix-mshot-io-wq-checks.patch @@ -0,0 +1,40 @@ +From b98a1bf7c682362cf611caafca3830f71c2f5e20 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 13:55:56 +0000 +Subject: io_uring: fix mshot io-wq checks + +From: Pavel Begunkov + +[ Upstream commit 3a96378e22cc46c7c49b5911f6c8631527a133a9 ] + +When checking for concurrent CQE posting, we're not only interested in +requests running from the poll handler but also strayed requests ended +up in normal io-wq execution. We're disallowing multishots in general +from io-wq, not only when they came in a certain way. + +Cc: stable@vger.kernel.org +Fixes: 17add5cea2bba ("io_uring: force multishot CQEs into task context") +Signed-off-by: Pavel Begunkov +Link: https://lore.kernel.org/r/d8c5b36a39258036f93301cd60d3cd295e40653d.1709905727.git.asml.silence@gmail.com +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + io_uring/net.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/io_uring/net.c b/io_uring/net.c +index 386a6745ae32f..5a4001139e288 100644 +--- a/io_uring/net.c ++++ b/io_uring/net.c +@@ -87,7 +87,7 @@ static inline bool io_check_multishot(struct io_kiocb *req, + * generic paths but multipoll may decide to post extra cqes. + */ + return !(issue_flags & IO_URING_F_IOWQ) || +- !(issue_flags & IO_URING_F_MULTISHOT) || ++ !(req->flags & REQ_F_APOLL_MULTISHOT) || + !req->ctx->task_complete; + } + +-- +2.43.0 + diff --git a/queue-6.8/io_uring-fix-mshot-read-defer-taskrun-cqe-posting.patch b/queue-6.8/io_uring-fix-mshot-read-defer-taskrun-cqe-posting.patch new file mode 100644 index 00000000000..6273e03a0d9 --- /dev/null +++ b/queue-6.8/io_uring-fix-mshot-read-defer-taskrun-cqe-posting.patch @@ -0,0 +1,38 @@ +From a2cb7c945be6ee7810fafbd2efe9f915a96ff91d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 6 Mar 2024 16:02:25 +0000 +Subject: io_uring: fix mshot read defer taskrun cqe posting + +From: Pavel Begunkov + +[ Upstream commit 70581dcd0601a09f134f23268e3374e15d736824 ] + +We can't post CQEs from io-wq with DEFER_TASKRUN set, normal completions +are handled but aux should be explicitly disallowed by opcode handlers. + +Cc: stable@vger.kernel.org +Fixes: fc68fcda04910 ("io_uring/rw: add support for IORING_OP_READ_MULTISHOT") +Signed-off-by: Pavel Begunkov +Link: https://lore.kernel.org/r/6fb7cba6f5366da25f4d3eb95273f062309d97fa.1709740837.git.asml.silence@gmail.com +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + io_uring/rw.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/io_uring/rw.c b/io_uring/rw.c +index d5e79d9bdc717..8756e367acd91 100644 +--- a/io_uring/rw.c ++++ b/io_uring/rw.c +@@ -932,6 +932,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) + */ + if (!file_can_poll(req->file)) + return -EBADFD; ++ if (issue_flags & IO_URING_F_IOWQ) ++ return -EAGAIN; + + ret = __io_read(req, issue_flags); + +-- +2.43.0 + diff --git a/queue-6.8/io_uring-futex-always-remove-futex-entry-for-cancel-.patch b/queue-6.8/io_uring-futex-always-remove-futex-entry-for-cancel-.patch new file mode 100644 index 00000000000..c21a8913fef --- /dev/null +++ b/queue-6.8/io_uring-futex-always-remove-futex-entry-for-cancel-.patch @@ -0,0 +1,38 @@ +From a4dbe0cc7c1b46dd53f46b2d99c5d7484a434c26 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 15 Mar 2024 15:37:15 -0600 +Subject: io_uring/futex: always remove futex entry for cancel all + +From: Jens Axboe + +[ Upstream commit 30dab608c3cb99c2a05b76289fd05551703979ae ] + +We know the request is either being removed, or already in the process of +being removed through task_work, so we can delete it from our futex list +upfront. This is important for remove all conditions, as we otherwise +will find it multiple times and prevent cancelation progress. + +Cc: stable@vger.kernel.org +Fixes: 194bb58c6090 ("io_uring: add support for futex wake and wait") +Fixes: 8f350194d5cf ("io_uring: add support for vectored futex waits") +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + io_uring/futex.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/io_uring/futex.c b/io_uring/futex.c +index 3c3575303c3d0..792a03df58dea 100644 +--- a/io_uring/futex.c ++++ b/io_uring/futex.c +@@ -159,6 +159,7 @@ bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task, + hlist_for_each_entry_safe(req, tmp, &ctx->futex_list, hash_node) { + if (!io_match_task_safe(req, task, cancel_all)) + continue; ++ hlist_del_init(&req->hash_node); + __io_futex_cancel(ctx, req); + found = true; + } +-- +2.43.0 + diff --git a/queue-6.8/io_uring-net-correctly-handle-multishot-recvmsg-retr.patch b/queue-6.8/io_uring-net-correctly-handle-multishot-recvmsg-retr.patch new file mode 100644 index 00000000000..6991bdf9295 --- /dev/null +++ b/queue-6.8/io_uring-net-correctly-handle-multishot-recvmsg-retr.patch @@ -0,0 +1,40 @@ +From 57b19e208a2f5c7b3a042830f9d768a72555256b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Mar 2024 17:48:03 -0700 +Subject: io_uring/net: correctly handle multishot recvmsg retry setup + +From: Jens Axboe + +[ Upstream commit deaef31bc1ec7966698a427da8c161930830e1cf ] + +If we loop for multishot receive on the initial attempt, and then abort +later on to wait for more, we miss a case where we should be copying the +io_async_msghdr from the stack to stable storage. This leads to the next +retry potentially failing, if the application had the msghdr on the +stack. + +Cc: stable@vger.kernel.org +Fixes: 9bb66906f23e ("io_uring: support multishot in recvmsg") +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + io_uring/net.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/io_uring/net.c b/io_uring/net.c +index 4aaeada03f1e7..386a6745ae32f 100644 +--- a/io_uring/net.c ++++ b/io_uring/net.c +@@ -915,7 +915,8 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) + kfree(kmsg->free_iov); + io_netmsg_recycle(req, issue_flags); + req->flags &= ~REQ_F_NEED_CLEANUP; +- } ++ } else if (ret == -EAGAIN) ++ return io_setup_async_msg(req, kmsg, issue_flags); + + return ret; + } +-- +2.43.0 + diff --git a/queue-6.8/io_uring-rw-return-iou_issue_skip_complete-for-multi.patch b/queue-6.8/io_uring-rw-return-iou_issue_skip_complete-for-multi.patch new file mode 100644 index 00000000000..8ef61d1914f --- /dev/null +++ b/queue-6.8/io_uring-rw-return-iou_issue_skip_complete-for-multi.patch @@ -0,0 +1,39 @@ +From c234a00906a1b86c52bcdb85f81c6c9e3cf5dd70 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 12 Mar 2024 08:29:47 -0600 +Subject: io_uring/rw: return IOU_ISSUE_SKIP_COMPLETE for multishot retry + +From: Jens Axboe + +[ Upstream commit 0a3737db8479b77f95f4bfda8e71b03c697eb56a ] + +If read multishot is being invoked from the poll retry handler, then we +should return IOU_ISSUE_SKIP_COMPLETE rather than -EAGAIN. If not, then +a CQE will be posted with -EAGAIN rather than triggering the retry when +the file is flagged as readable again. + +Cc: stable@vger.kernel.org +Reported-by: Sargun Dhillon +Fixes: fc68fcda04910 ("io_uring/rw: add support for IORING_OP_READ_MULTISHOT") +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + io_uring/rw.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/io_uring/rw.c b/io_uring/rw.c +index 8756e367acd91..2b84ce8a8a677 100644 +--- a/io_uring/rw.c ++++ b/io_uring/rw.c +@@ -948,6 +948,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) + */ + if (io_kbuf_recycle(req, issue_flags)) + rw->len = 0; ++ if (issue_flags & IO_URING_F_MULTISHOT) ++ return IOU_ISSUE_SKIP_COMPLETE; + return -EAGAIN; + } + +-- +2.43.0 + diff --git a/queue-6.8/io_uring-waitid-always-remove-waitid-entry-for-cance.patch b/queue-6.8/io_uring-waitid-always-remove-waitid-entry-for-cance.patch new file mode 100644 index 00000000000..03fac6bb1a9 --- /dev/null +++ b/queue-6.8/io_uring-waitid-always-remove-waitid-entry-for-cance.patch @@ -0,0 +1,54 @@ +From 90bbd6847b8afbeb783a7c6f9464e909bb9294f5 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 15 Mar 2024 15:42:49 -0600 +Subject: io_uring/waitid: always remove waitid entry for cancel all + +From: Jens Axboe + +[ Upstream commit 2b35b8b43e07b1a6f06fdd84cf4b9eb24785896d ] + +We know the request is either being removed, or already in the process of +being removed through task_work, so we can delete it from our waitid list +upfront. This is important for remove all conditions, as we otherwise +will find it multiple times and prevent cancelation progress. + +Remove the dead check in cancelation as well for the hash_node being +empty or not. We already have a waitid reference check for ownership, +so we don't need to check the list too. + +Cc: stable@vger.kernel.org +Fixes: f31ecf671ddc ("io_uring: add IORING_OP_WAITID support") +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + io_uring/waitid.c | 7 +------ + 1 file changed, 1 insertion(+), 6 deletions(-) + +diff --git a/io_uring/waitid.c b/io_uring/waitid.c +index 6f851978606d9..77d340666cb95 100644 +--- a/io_uring/waitid.c ++++ b/io_uring/waitid.c +@@ -125,12 +125,6 @@ static void io_waitid_complete(struct io_kiocb *req, int ret) + + lockdep_assert_held(&req->ctx->uring_lock); + +- /* +- * Did cancel find it meanwhile? +- */ +- if (hlist_unhashed(&req->hash_node)) +- return; +- + hlist_del_init(&req->hash_node); + + ret = io_waitid_finish(req, ret); +@@ -202,6 +196,7 @@ bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task, + hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) { + if (!io_match_task_safe(req, task, cancel_all)) + continue; ++ hlist_del_init(&req->hash_node); + __io_waitid_cancel(ctx, req); + found = true; + } +-- +2.43.0 + diff --git a/queue-6.8/kasan-test-avoid-gcc-warning-for-intentional-overflo.patch b/queue-6.8/kasan-test-avoid-gcc-warning-for-intentional-overflo.patch new file mode 100644 index 00000000000..7c13bf84fa9 --- /dev/null +++ b/queue-6.8/kasan-test-avoid-gcc-warning-for-intentional-overflo.patch @@ -0,0 +1,56 @@ +From 0eb493bc678a5872fde7fc1f0ba737bb1eab7d6d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 12 Feb 2024 12:15:52 +0100 +Subject: kasan/test: avoid gcc warning for intentional overflow + +From: Arnd Bergmann + +[ Upstream commit e10aea105e9ed14b62a11844fec6aaa87c6935a3 ] + +The out-of-bounds test allocates an object that is three bytes too short +in order to validate the bounds checking. Starting with gcc-14, this +causes a compile-time warning as gcc has grown smart enough to understand +the sizeof() logic: + +mm/kasan/kasan_test.c: In function 'kmalloc_oob_16': +mm/kasan/kasan_test.c:443:14: error: allocation of insufficient size '13' for type 'struct ' with size '16' [-Werror=alloc-size] + 443 | ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); + | ^ + +Hide the actual computation behind a RELOC_HIDE() that ensures +the compiler misses the intentional bug. + +Link: https://lkml.kernel.org/r/20240212111609.869266-1-arnd@kernel.org +Fixes: 3f15801cdc23 ("lib: add kasan test module") +Signed-off-by: Arnd Bergmann +Reviewed-by: Andrey Konovalov +Cc: Alexander Potapenko +Cc: Andrey Ryabinin +Cc: Arnd Bergmann +Cc: Dmitry Vyukov +Cc: Marco Elver +Cc: Vincenzo Frascino +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Sasha Levin +--- + mm/kasan/kasan_test.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c +index 971cfff4ca0b7..e26a2583a6643 100644 +--- a/mm/kasan/kasan_test.c ++++ b/mm/kasan/kasan_test.c +@@ -440,7 +440,8 @@ static void kmalloc_oob_16(struct kunit *test) + /* This test is specifically crafted for the generic mode. */ + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); + +- ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); ++ /* RELOC_HIDE to prevent gcc from warning about short alloc */ ++ ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); + + ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); +-- +2.43.0 + diff --git a/queue-6.8/kbuild-move-wenum-compare-conditional-enum-conversio.patch b/queue-6.8/kbuild-move-wenum-compare-conditional-enum-conversio.patch new file mode 100644 index 00000000000..3f769fac202 --- /dev/null +++ b/queue-6.8/kbuild-move-wenum-compare-conditional-enum-conversio.patch @@ -0,0 +1,72 @@ +From b882268054c87e10a4099c198b7bb404567dde75 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 15:12:47 -0700 +Subject: kbuild: Move -Wenum-{compare-conditional,enum-conversion} into W=1 + +From: Nathan Chancellor + +[ Upstream commit 75b5ab134bb5f657ef7979a59106dce0657e8d87 ] + +Clang enables -Wenum-enum-conversion and -Wenum-compare-conditional +under -Wenum-conversion. A recent change in Clang strengthened these +warnings and they appear frequently in common builds, primarily due to +several instances in common headers but there are quite a few drivers +that have individual instances as well. + + include/linux/vmstat.h:508:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion] + 508 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + + | ~~~~~~~~~~~~~~~~~~~~~ ^ + 509 | item]; + | ~~~~ + + drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c:955:24: warning: conditional expression between different enumeration types ('enum iwl_mac_beacon_flags' and 'enum iwl_mac_beacon_flags_v1') [-Wenum-compare-conditional] + 955 | flags |= is_new_rate ? IWL_MAC_BEACON_CCK + | ^ ~~~~~~~~~~~~~~~~~~ + 956 | : IWL_MAC_BEACON_CCK_V1; + | ~~~~~~~~~~~~~~~~~~~~~ + drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c:1120:21: warning: conditional expression between different enumeration types ('enum iwl_mac_beacon_flags' and 'enum iwl_mac_beacon_flags_v1') [-Wenum-compare-conditional] + 1120 | 0) > 10 ? + | ^ + 1121 | IWL_MAC_BEACON_FILS : + | ~~~~~~~~~~~~~~~~~~~ + 1122 | IWL_MAC_BEACON_FILS_V1; + | ~~~~~~~~~~~~~~~~~~~~~~ + +Doing arithmetic between or returning two different types of enums could +be a bug, so each of the instance of the warning needs to be evaluated. +Unfortunately, as mentioned above, there are many instances of this +warning in many different configurations, which can break the build when +CONFIG_WERROR is enabled. + +To avoid introducing new instances of the warnings while cleaning up the +disruption for the majority of users, disable these warnings for the +default build while leaving them on for W=1 builds. + +Cc: stable@vger.kernel.org +Closes: https://github.com/ClangBuiltLinux/linux/issues/2002 +Link: https://github.com/llvm/llvm-project/commit/8c2ae42b3e1c6aa7c18f873edcebff7c0b45a37e +Acked-by: Yonghong Song +Signed-off-by: Nathan Chancellor +Acked-by: Arnd Bergmann +Signed-off-by: Masahiro Yamada +Signed-off-by: Sasha Levin +--- + scripts/Makefile.extrawarn | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn +index a9e552a1e9105..2f25a1de129d1 100644 +--- a/scripts/Makefile.extrawarn ++++ b/scripts/Makefile.extrawarn +@@ -132,6 +132,8 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast) + KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare + KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access) + KBUILD_CFLAGS += $(call cc-disable-warning, cast-function-type-strict) ++KBUILD_CFLAGS += -Wno-enum-compare-conditional ++KBUILD_CFLAGS += -Wno-enum-enum-conversion + endif + + endif +-- +2.43.0 + diff --git a/queue-6.8/ksmbd-fix-potencial-out-of-bounds-when-buffer-offset.patch b/queue-6.8/ksmbd-fix-potencial-out-of-bounds-when-buffer-offset.patch new file mode 100644 index 00000000000..ada1a14c30e --- /dev/null +++ b/queue-6.8/ksmbd-fix-potencial-out-of-bounds-when-buffer-offset.patch @@ -0,0 +1,316 @@ +From 965ed12f7f4bc6d119bbb32addee2d72eb0e0f37 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 19 Mar 2024 08:40:48 +0900 +Subject: ksmbd: fix potencial out-of-bounds when buffer offset is invalid + +From: Namjae Jeon + +[ Upstream commit c6cd2e8d2d9aa7ee35b1fa6a668e32a22a9753da ] + +I found potencial out-of-bounds when buffer offset fields of a few requests +is invalid. This patch set the minimum value of buffer offset field to +->Buffer offset to validate buffer length. + +Cc: stable@vger.kernel.org +Signed-off-by: Namjae Jeon +Signed-off-by: Steve French +Signed-off-by: Sasha Levin +--- + fs/smb/server/smb2misc.c | 23 +++++++++++++------ + fs/smb/server/smb2pdu.c | 48 ++++++++++++++++++++++------------------ + 2 files changed, 42 insertions(+), 29 deletions(-) + +diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c +index 7c872ffb4b0a9..727cb49926ee5 100644 +--- a/fs/smb/server/smb2misc.c ++++ b/fs/smb/server/smb2misc.c +@@ -101,7 +101,9 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, + *len = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferLength); + break; + case SMB2_TREE_CONNECT: +- *off = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset); ++ *off = max_t(unsigned short int, ++ le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset), ++ offsetof(struct smb2_tree_connect_req, Buffer)); + *len = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathLength); + break; + case SMB2_CREATE: +@@ -110,7 +112,6 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, + max_t(unsigned short int, + le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset), + offsetof(struct smb2_create_req, Buffer)); +- + unsigned short int name_len = + le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength); + +@@ -131,11 +132,15 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, + break; + } + case SMB2_QUERY_INFO: +- *off = le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset); ++ *off = max_t(unsigned int, ++ le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset), ++ offsetof(struct smb2_query_info_req, Buffer)); + *len = le32_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferLength); + break; + case SMB2_SET_INFO: +- *off = le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset); ++ *off = max_t(unsigned int, ++ le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset), ++ offsetof(struct smb2_set_info_req, Buffer)); + *len = le32_to_cpu(((struct smb2_set_info_req *)hdr)->BufferLength); + break; + case SMB2_READ: +@@ -145,7 +150,7 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, + case SMB2_WRITE: + if (((struct smb2_write_req *)hdr)->DataOffset || + ((struct smb2_write_req *)hdr)->Length) { +- *off = max_t(unsigned int, ++ *off = max_t(unsigned short int, + le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset), + offsetof(struct smb2_write_req, Buffer)); + *len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length); +@@ -156,7 +161,9 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, + *len = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoLength); + break; + case SMB2_QUERY_DIRECTORY: +- *off = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset); ++ *off = max_t(unsigned short int, ++ le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset), ++ offsetof(struct smb2_query_directory_req, Buffer)); + *len = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameLength); + break; + case SMB2_LOCK: +@@ -171,7 +178,9 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, + break; + } + case SMB2_IOCTL: +- *off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset); ++ *off = max_t(unsigned int, ++ le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset), ++ offsetof(struct smb2_ioctl_req, Buffer)); + *len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount); + break; + default: +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index 199c31c275e5b..88db6e207e0ee 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -1951,7 +1951,7 @@ int smb2_tree_connect(struct ksmbd_work *work) + + WORK_BUFFERS(work, req, rsp); + +- treename = smb_strndup_from_utf16(req->Buffer, ++ treename = smb_strndup_from_utf16((char *)req + le16_to_cpu(req->PathOffset), + le16_to_cpu(req->PathLength), true, + conn->local_nls); + if (IS_ERR(treename)) { +@@ -2704,7 +2704,7 @@ int smb2_open(struct ksmbd_work *work) + goto err_out2; + } + +- name = smb2_get_name(req->Buffer, ++ name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset), + le16_to_cpu(req->NameLength), + work->conn->local_nls); + if (IS_ERR(name)) { +@@ -4080,7 +4080,7 @@ int smb2_query_dir(struct ksmbd_work *work) + } + + srch_flag = req->Flags; +- srch_ptr = smb_strndup_from_utf16(req->Buffer, ++ srch_ptr = smb_strndup_from_utf16((char *)req + le16_to_cpu(req->FileNameOffset), + le16_to_cpu(req->FileNameLength), 1, + conn->local_nls); + if (IS_ERR(srch_ptr)) { +@@ -4340,7 +4340,8 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp, + sizeof(struct smb2_ea_info_req)) + return -EINVAL; + +- ea_req = (struct smb2_ea_info_req *)req->Buffer; ++ ea_req = (struct smb2_ea_info_req *)((char *)req + ++ le16_to_cpu(req->InputBufferOffset)); + } else { + /* need to send all EAs, if no specific EA is requested*/ + if (le32_to_cpu(req->Flags) & SL_RETURN_SINGLE_ENTRY) +@@ -5986,6 +5987,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, + struct ksmbd_share_config *share) + { + unsigned int buf_len = le32_to_cpu(req->BufferLength); ++ char *buffer = (char *)req + le16_to_cpu(req->BufferOffset); + + switch (req->FileInfoClass) { + case FILE_BASIC_INFORMATION: +@@ -5993,7 +5995,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, + if (buf_len < sizeof(struct smb2_file_basic_info)) + return -EINVAL; + +- return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share); ++ return set_file_basic_info(fp, (struct smb2_file_basic_info *)buffer, share); + } + case FILE_ALLOCATION_INFORMATION: + { +@@ -6001,7 +6003,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, + return -EINVAL; + + return set_file_allocation_info(work, fp, +- (struct smb2_file_alloc_info *)req->Buffer); ++ (struct smb2_file_alloc_info *)buffer); + } + case FILE_END_OF_FILE_INFORMATION: + { +@@ -6009,7 +6011,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, + return -EINVAL; + + return set_end_of_file_info(work, fp, +- (struct smb2_file_eof_info *)req->Buffer); ++ (struct smb2_file_eof_info *)buffer); + } + case FILE_RENAME_INFORMATION: + { +@@ -6017,7 +6019,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, + return -EINVAL; + + return set_rename_info(work, fp, +- (struct smb2_file_rename_info *)req->Buffer, ++ (struct smb2_file_rename_info *)buffer, + buf_len); + } + case FILE_LINK_INFORMATION: +@@ -6026,7 +6028,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, + return -EINVAL; + + return smb2_create_link(work, work->tcon->share_conf, +- (struct smb2_file_link_info *)req->Buffer, ++ (struct smb2_file_link_info *)buffer, + buf_len, fp->filp, + work->conn->local_nls); + } +@@ -6036,7 +6038,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, + return -EINVAL; + + return set_file_disposition_info(fp, +- (struct smb2_file_disposition_info *)req->Buffer); ++ (struct smb2_file_disposition_info *)buffer); + } + case FILE_FULL_EA_INFORMATION: + { +@@ -6049,7 +6051,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, + if (buf_len < sizeof(struct smb2_ea_info)) + return -EINVAL; + +- return smb2_set_ea((struct smb2_ea_info *)req->Buffer, ++ return smb2_set_ea((struct smb2_ea_info *)buffer, + buf_len, &fp->filp->f_path, true); + } + case FILE_POSITION_INFORMATION: +@@ -6057,14 +6059,14 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, + if (buf_len < sizeof(struct smb2_file_pos_info)) + return -EINVAL; + +- return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer); ++ return set_file_position_info(fp, (struct smb2_file_pos_info *)buffer); + } + case FILE_MODE_INFORMATION: + { + if (buf_len < sizeof(struct smb2_file_mode_info)) + return -EINVAL; + +- return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer); ++ return set_file_mode_info(fp, (struct smb2_file_mode_info *)buffer); + } + } + +@@ -6145,7 +6147,7 @@ int smb2_set_info(struct ksmbd_work *work) + } + rc = smb2_set_info_sec(fp, + le32_to_cpu(req->AdditionalInformation), +- req->Buffer, ++ (char *)req + le16_to_cpu(req->BufferOffset), + le32_to_cpu(req->BufferLength)); + ksmbd_revert_fsids(work); + break; +@@ -7591,7 +7593,7 @@ static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id, + struct smb2_ioctl_rsp *rsp) + { + struct ksmbd_rpc_command *rpc_resp; +- char *data_buf = (char *)&req->Buffer[0]; ++ char *data_buf = (char *)req + le32_to_cpu(req->InputOffset); + int nbytes = 0; + + rpc_resp = ksmbd_rpc_ioctl(work->sess, id, data_buf, +@@ -7704,6 +7706,7 @@ int smb2_ioctl(struct ksmbd_work *work) + u64 id = KSMBD_NO_FID; + struct ksmbd_conn *conn = work->conn; + int ret = 0; ++ char *buffer; + + if (work->next_smb2_rcv_hdr_off) { + req = ksmbd_req_buf_next(work); +@@ -7726,6 +7729,8 @@ int smb2_ioctl(struct ksmbd_work *work) + goto out; + } + ++ buffer = (char *)req + le32_to_cpu(req->InputOffset); ++ + cnt_code = le32_to_cpu(req->CtlCode); + ret = smb2_calc_max_out_buf_len(work, 48, + le32_to_cpu(req->MaxOutputResponse)); +@@ -7783,7 +7788,7 @@ int smb2_ioctl(struct ksmbd_work *work) + } + + ret = fsctl_validate_negotiate_info(conn, +- (struct validate_negotiate_info_req *)&req->Buffer[0], ++ (struct validate_negotiate_info_req *)buffer, + (struct validate_negotiate_info_rsp *)&rsp->Buffer[0], + in_buf_len); + if (ret < 0) +@@ -7836,7 +7841,7 @@ int smb2_ioctl(struct ksmbd_work *work) + rsp->VolatileFileId = req->VolatileFileId; + rsp->PersistentFileId = req->PersistentFileId; + fsctl_copychunk(work, +- (struct copychunk_ioctl_req *)&req->Buffer[0], ++ (struct copychunk_ioctl_req *)buffer, + le32_to_cpu(req->CtlCode), + le32_to_cpu(req->InputCount), + req->VolatileFileId, +@@ -7849,8 +7854,7 @@ int smb2_ioctl(struct ksmbd_work *work) + goto out; + } + +- ret = fsctl_set_sparse(work, id, +- (struct file_sparse *)&req->Buffer[0]); ++ ret = fsctl_set_sparse(work, id, (struct file_sparse *)buffer); + if (ret < 0) + goto out; + break; +@@ -7873,7 +7877,7 @@ int smb2_ioctl(struct ksmbd_work *work) + } + + zero_data = +- (struct file_zero_data_information *)&req->Buffer[0]; ++ (struct file_zero_data_information *)buffer; + + off = le64_to_cpu(zero_data->FileOffset); + bfz = le64_to_cpu(zero_data->BeyondFinalZero); +@@ -7904,7 +7908,7 @@ int smb2_ioctl(struct ksmbd_work *work) + } + + ret = fsctl_query_allocated_ranges(work, id, +- (struct file_allocated_range_buffer *)&req->Buffer[0], ++ (struct file_allocated_range_buffer *)buffer, + (struct file_allocated_range_buffer *)&rsp->Buffer[0], + out_buf_len / + sizeof(struct file_allocated_range_buffer), &nbytes); +@@ -7948,7 +7952,7 @@ int smb2_ioctl(struct ksmbd_work *work) + goto out; + } + +- dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0]; ++ dup_ext = (struct duplicate_extents_to_file *)buffer; + + fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle, + dup_ext->PersistentFileHandle); +-- +2.43.0 + diff --git a/queue-6.8/ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf.patch b/queue-6.8/ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf.patch new file mode 100644 index 00000000000..eed4b7b67d3 --- /dev/null +++ b/queue-6.8/ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf.patch @@ -0,0 +1,42 @@ +From 82a22d09d52a7e8446389c936dfffc8a01e37938 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 16 Mar 2024 23:36:36 +0900 +Subject: ksmbd: fix slab-out-of-bounds in smb_strndup_from_utf16() + +From: Namjae Jeon + +[ Upstream commit a80a486d72e20bd12c335bcd38b6e6f19356b0aa ] + +If ->NameOffset of smb2_create_req is smaller than Buffer offset of +smb2_create_req, slab-out-of-bounds read can happen from smb2_open. +This patch set the minimum value of the name offset to the buffer offset +to validate name length of smb2_create_req(). + +Cc: stable@vger.kernel.org +Reported-by: Xuanzhe Yu +Signed-off-by: Namjae Jeon +Signed-off-by: Steve French +Signed-off-by: Sasha Levin +--- + fs/smb/server/smb2misc.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c +index 03dded29a9804..7c872ffb4b0a9 100644 +--- a/fs/smb/server/smb2misc.c ++++ b/fs/smb/server/smb2misc.c +@@ -107,7 +107,10 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, + case SMB2_CREATE: + { + unsigned short int name_off = +- le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset); ++ max_t(unsigned short int, ++ le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset), ++ offsetof(struct smb2_create_req, Buffer)); ++ + unsigned short int name_len = + le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength); + +-- +2.43.0 + diff --git a/queue-6.8/ksmbd-replace-generic_fillattr-with-vfs_getattr.patch b/queue-6.8/ksmbd-replace-generic_fillattr-with-vfs_getattr.patch new file mode 100644 index 00000000000..12d1a585a76 --- /dev/null +++ b/queue-6.8/ksmbd-replace-generic_fillattr-with-vfs_getattr.patch @@ -0,0 +1,479 @@ +From c193c8d0e8b0efde04d35a3f925ecaf834c86c18 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 22 Feb 2024 10:58:19 +0100 +Subject: ksmbd: replace generic_fillattr with vfs_getattr + +From: Marios Makassikis + +[ Upstream commit 5614c8c487f6af627614dd2efca038e4afe0c6d7 ] + +generic_fillattr should not be used outside of ->getattr +implementations. + +Use vfs_getattr instead, and adapt functions to return an +error code to the caller. + +Cc: stable@vger.kernel.org +Signed-off-by: Marios Makassikis +Acked-by: Namjae Jeon +Signed-off-by: Steve French +Signed-off-by: Sasha Levin +--- + fs/smb/server/smb2pdu.c | 170 ++++++++++++++++++++++++------------- + fs/smb/server/smb_common.c | 11 ++- + fs/smb/server/vfs.c | 12 ++- + 3 files changed, 127 insertions(+), 66 deletions(-) + +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index 0c97d3c860726..f6cc5d2730ffb 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -3828,11 +3828,16 @@ static int process_query_dir_entries(struct smb2_query_dir_private *priv) + } + + ksmbd_kstat.kstat = &kstat; +- if (priv->info_level != FILE_NAMES_INFORMATION) +- ksmbd_vfs_fill_dentry_attrs(priv->work, +- idmap, +- dent, +- &ksmbd_kstat); ++ if (priv->info_level != FILE_NAMES_INFORMATION) { ++ rc = ksmbd_vfs_fill_dentry_attrs(priv->work, ++ idmap, ++ dent, ++ &ksmbd_kstat); ++ if (rc) { ++ dput(dent); ++ continue; ++ } ++ } + + rc = smb2_populate_readdir_entry(priv->work->conn, + priv->info_level, +@@ -4480,6 +4485,7 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp, + struct smb2_file_basic_info *basic_info; + struct kstat stat; + u64 time; ++ int ret; + + if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) { + pr_err("no right to read the attributes : 0x%x\n", +@@ -4487,9 +4493,12 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp, + return -EACCES; + } + ++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS, ++ AT_STATX_SYNC_AS_STAT); ++ if (ret) ++ return ret; ++ + basic_info = (struct smb2_file_basic_info *)rsp->Buffer; +- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, +- file_inode(fp->filp), &stat); + basic_info->CreationTime = cpu_to_le64(fp->create_time); + time = ksmbd_UnixTimeToNT(stat.atime); + basic_info->LastAccessTime = cpu_to_le64(time); +@@ -4504,27 +4513,31 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp, + return 0; + } + +-static void get_file_standard_info(struct smb2_query_info_rsp *rsp, +- struct ksmbd_file *fp, void *rsp_org) ++static int get_file_standard_info(struct smb2_query_info_rsp *rsp, ++ struct ksmbd_file *fp, void *rsp_org) + { + struct smb2_file_standard_info *sinfo; + unsigned int delete_pending; +- struct inode *inode; + struct kstat stat; ++ int ret; + +- inode = file_inode(fp->filp); +- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat); ++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS, ++ AT_STATX_SYNC_AS_STAT); ++ if (ret) ++ return ret; + + sinfo = (struct smb2_file_standard_info *)rsp->Buffer; + delete_pending = ksmbd_inode_pending_delete(fp); + +- sinfo->AllocationSize = cpu_to_le64(inode->i_blocks << 9); ++ sinfo->AllocationSize = cpu_to_le64(stat.blocks << 9); + sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); + sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending); + sinfo->DeletePending = delete_pending; + sinfo->Directory = S_ISDIR(stat.mode) ? 1 : 0; + rsp->OutputBufferLength = + cpu_to_le32(sizeof(struct smb2_file_standard_info)); ++ ++ return 0; + } + + static void get_file_alignment_info(struct smb2_query_info_rsp *rsp, +@@ -4546,11 +4559,11 @@ static int get_file_all_info(struct ksmbd_work *work, + struct ksmbd_conn *conn = work->conn; + struct smb2_file_all_info *file_info; + unsigned int delete_pending; +- struct inode *inode; + struct kstat stat; + int conv_len; + char *filename; + u64 time; ++ int ret; + + if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) { + ksmbd_debug(SMB, "no right to read the attributes : 0x%x\n", +@@ -4562,8 +4575,10 @@ static int get_file_all_info(struct ksmbd_work *work, + if (IS_ERR(filename)) + return PTR_ERR(filename); + +- inode = file_inode(fp->filp); +- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat); ++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS, ++ AT_STATX_SYNC_AS_STAT); ++ if (ret) ++ return ret; + + ksmbd_debug(SMB, "filename = %s\n", filename); + delete_pending = ksmbd_inode_pending_delete(fp); +@@ -4579,7 +4594,7 @@ static int get_file_all_info(struct ksmbd_work *work, + file_info->Attributes = fp->f_ci->m_fattr; + file_info->Pad1 = 0; + file_info->AllocationSize = +- cpu_to_le64(inode->i_blocks << 9); ++ cpu_to_le64(stat.blocks << 9); + file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); + file_info->NumberOfLinks = + cpu_to_le32(get_nlink(&stat) - delete_pending); +@@ -4623,10 +4638,10 @@ static void get_file_alternate_info(struct ksmbd_work *work, + cpu_to_le32(sizeof(struct smb2_file_alt_name_info) + conv_len); + } + +-static void get_file_stream_info(struct ksmbd_work *work, +- struct smb2_query_info_rsp *rsp, +- struct ksmbd_file *fp, +- void *rsp_org) ++static int get_file_stream_info(struct ksmbd_work *work, ++ struct smb2_query_info_rsp *rsp, ++ struct ksmbd_file *fp, ++ void *rsp_org) + { + struct ksmbd_conn *conn = work->conn; + struct smb2_file_stream_info *file_info; +@@ -4637,9 +4652,13 @@ static void get_file_stream_info(struct ksmbd_work *work, + int nbytes = 0, streamlen, stream_name_len, next, idx = 0; + int buf_free_len; + struct smb2_query_info_req *req = ksmbd_req_buf_next(work); ++ int ret; ++ ++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS, ++ AT_STATX_SYNC_AS_STAT); ++ if (ret) ++ return ret; + +- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, +- file_inode(fp->filp), &stat); + file_info = (struct smb2_file_stream_info *)rsp->Buffer; + + buf_free_len = +@@ -4720,29 +4739,37 @@ static void get_file_stream_info(struct ksmbd_work *work, + kvfree(xattr_list); + + rsp->OutputBufferLength = cpu_to_le32(nbytes); ++ ++ return 0; + } + +-static void get_file_internal_info(struct smb2_query_info_rsp *rsp, +- struct ksmbd_file *fp, void *rsp_org) ++static int get_file_internal_info(struct smb2_query_info_rsp *rsp, ++ struct ksmbd_file *fp, void *rsp_org) + { + struct smb2_file_internal_info *file_info; + struct kstat stat; ++ int ret; ++ ++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS, ++ AT_STATX_SYNC_AS_STAT); ++ if (ret) ++ return ret; + +- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, +- file_inode(fp->filp), &stat); + file_info = (struct smb2_file_internal_info *)rsp->Buffer; + file_info->IndexNumber = cpu_to_le64(stat.ino); + rsp->OutputBufferLength = + cpu_to_le32(sizeof(struct smb2_file_internal_info)); ++ ++ return 0; + } + + static int get_file_network_open_info(struct smb2_query_info_rsp *rsp, + struct ksmbd_file *fp, void *rsp_org) + { + struct smb2_file_ntwrk_info *file_info; +- struct inode *inode; + struct kstat stat; + u64 time; ++ int ret; + + if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) { + pr_err("no right to read the attributes : 0x%x\n", +@@ -4750,10 +4777,12 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp, + return -EACCES; + } + +- file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer; ++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS, ++ AT_STATX_SYNC_AS_STAT); ++ if (ret) ++ return ret; + +- inode = file_inode(fp->filp); +- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat); ++ file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer; + + file_info->CreationTime = cpu_to_le64(fp->create_time); + time = ksmbd_UnixTimeToNT(stat.atime); +@@ -4763,8 +4792,7 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp, + time = ksmbd_UnixTimeToNT(stat.ctime); + file_info->ChangeTime = cpu_to_le64(time); + file_info->Attributes = fp->f_ci->m_fattr; +- file_info->AllocationSize = +- cpu_to_le64(inode->i_blocks << 9); ++ file_info->AllocationSize = cpu_to_le64(stat.blocks << 9); + file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); + file_info->Reserved = cpu_to_le32(0); + rsp->OutputBufferLength = +@@ -4804,14 +4832,17 @@ static void get_file_mode_info(struct smb2_query_info_rsp *rsp, + cpu_to_le32(sizeof(struct smb2_file_mode_info)); + } + +-static void get_file_compression_info(struct smb2_query_info_rsp *rsp, +- struct ksmbd_file *fp, void *rsp_org) ++static int get_file_compression_info(struct smb2_query_info_rsp *rsp, ++ struct ksmbd_file *fp, void *rsp_org) + { + struct smb2_file_comp_info *file_info; + struct kstat stat; ++ int ret; + +- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, +- file_inode(fp->filp), &stat); ++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS, ++ AT_STATX_SYNC_AS_STAT); ++ if (ret) ++ return ret; + + file_info = (struct smb2_file_comp_info *)rsp->Buffer; + file_info->CompressedFileSize = cpu_to_le64(stat.blocks << 9); +@@ -4823,6 +4854,8 @@ static void get_file_compression_info(struct smb2_query_info_rsp *rsp, + + rsp->OutputBufferLength = + cpu_to_le32(sizeof(struct smb2_file_comp_info)); ++ ++ return 0; + } + + static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp, +@@ -4844,7 +4877,7 @@ static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp, + return 0; + } + +-static void find_file_posix_info(struct smb2_query_info_rsp *rsp, ++static int find_file_posix_info(struct smb2_query_info_rsp *rsp, + struct ksmbd_file *fp, void *rsp_org) + { + struct smb311_posix_qinfo *file_info; +@@ -4852,24 +4885,31 @@ static void find_file_posix_info(struct smb2_query_info_rsp *rsp, + struct mnt_idmap *idmap = file_mnt_idmap(fp->filp); + vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); + vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); ++ struct kstat stat; + u64 time; + int out_buf_len = sizeof(struct smb311_posix_qinfo) + 32; ++ int ret; ++ ++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS, ++ AT_STATX_SYNC_AS_STAT); ++ if (ret) ++ return ret; + + file_info = (struct smb311_posix_qinfo *)rsp->Buffer; + file_info->CreationTime = cpu_to_le64(fp->create_time); +- time = ksmbd_UnixTimeToNT(inode_get_atime(inode)); ++ time = ksmbd_UnixTimeToNT(stat.atime); + file_info->LastAccessTime = cpu_to_le64(time); +- time = ksmbd_UnixTimeToNT(inode_get_mtime(inode)); ++ time = ksmbd_UnixTimeToNT(stat.mtime); + file_info->LastWriteTime = cpu_to_le64(time); +- time = ksmbd_UnixTimeToNT(inode_get_ctime(inode)); ++ time = ksmbd_UnixTimeToNT(stat.ctime); + file_info->ChangeTime = cpu_to_le64(time); + file_info->DosAttributes = fp->f_ci->m_fattr; +- file_info->Inode = cpu_to_le64(inode->i_ino); +- file_info->EndOfFile = cpu_to_le64(inode->i_size); +- file_info->AllocationSize = cpu_to_le64(inode->i_blocks << 9); +- file_info->HardLinks = cpu_to_le32(inode->i_nlink); +- file_info->Mode = cpu_to_le32(inode->i_mode & 0777); +- file_info->DeviceId = cpu_to_le32(inode->i_rdev); ++ file_info->Inode = cpu_to_le64(stat.ino); ++ file_info->EndOfFile = cpu_to_le64(stat.size); ++ file_info->AllocationSize = cpu_to_le64(stat.blocks << 9); ++ file_info->HardLinks = cpu_to_le32(stat.nlink); ++ file_info->Mode = cpu_to_le32(stat.mode & 0777); ++ file_info->DeviceId = cpu_to_le32(stat.rdev); + + /* + * Sids(32) contain two sids(Domain sid(16), UNIX group sid(16)). +@@ -4882,6 +4922,8 @@ static void find_file_posix_info(struct smb2_query_info_rsp *rsp, + SIDUNIX_GROUP, (struct smb_sid *)&file_info->Sids[16]); + + rsp->OutputBufferLength = cpu_to_le32(out_buf_len); ++ ++ return 0; + } + + static int smb2_get_info_file(struct ksmbd_work *work, +@@ -4930,7 +4972,7 @@ static int smb2_get_info_file(struct ksmbd_work *work, + break; + + case FILE_STANDARD_INFORMATION: +- get_file_standard_info(rsp, fp, work->response_buf); ++ rc = get_file_standard_info(rsp, fp, work->response_buf); + break; + + case FILE_ALIGNMENT_INFORMATION: +@@ -4946,11 +4988,11 @@ static int smb2_get_info_file(struct ksmbd_work *work, + break; + + case FILE_STREAM_INFORMATION: +- get_file_stream_info(work, rsp, fp, work->response_buf); ++ rc = get_file_stream_info(work, rsp, fp, work->response_buf); + break; + + case FILE_INTERNAL_INFORMATION: +- get_file_internal_info(rsp, fp, work->response_buf); ++ rc = get_file_internal_info(rsp, fp, work->response_buf); + break; + + case FILE_NETWORK_OPEN_INFORMATION: +@@ -4974,7 +5016,7 @@ static int smb2_get_info_file(struct ksmbd_work *work, + break; + + case FILE_COMPRESSION_INFORMATION: +- get_file_compression_info(rsp, fp, work->response_buf); ++ rc = get_file_compression_info(rsp, fp, work->response_buf); + break; + + case FILE_ATTRIBUTE_TAG_INFORMATION: +@@ -4985,7 +5027,7 @@ static int smb2_get_info_file(struct ksmbd_work *work, + pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n"); + rc = -EOPNOTSUPP; + } else { +- find_file_posix_info(rsp, fp, work->response_buf); ++ rc = find_file_posix_info(rsp, fp, work->response_buf); + } + break; + default: +@@ -5398,7 +5440,6 @@ int smb2_close(struct ksmbd_work *work) + struct smb2_close_rsp *rsp; + struct ksmbd_conn *conn = work->conn; + struct ksmbd_file *fp; +- struct inode *inode; + u64 time; + int err = 0; + +@@ -5453,24 +5494,33 @@ int smb2_close(struct ksmbd_work *work) + rsp->Reserved = 0; + + if (req->Flags == SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB) { ++ struct kstat stat; ++ int ret; ++ + fp = ksmbd_lookup_fd_fast(work, volatile_id); + if (!fp) { + err = -ENOENT; + goto out; + } + +- inode = file_inode(fp->filp); ++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS, ++ AT_STATX_SYNC_AS_STAT); ++ if (ret) { ++ ksmbd_fd_put(work, fp); ++ goto out; ++ } ++ + rsp->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB; +- rsp->AllocationSize = S_ISDIR(inode->i_mode) ? 0 : +- cpu_to_le64(inode->i_blocks << 9); +- rsp->EndOfFile = cpu_to_le64(inode->i_size); ++ rsp->AllocationSize = S_ISDIR(stat.mode) ? 0 : ++ cpu_to_le64(stat.blocks << 9); ++ rsp->EndOfFile = cpu_to_le64(stat.size); + rsp->Attributes = fp->f_ci->m_fattr; + rsp->CreationTime = cpu_to_le64(fp->create_time); +- time = ksmbd_UnixTimeToNT(inode_get_atime(inode)); ++ time = ksmbd_UnixTimeToNT(stat.atime); + rsp->LastAccessTime = cpu_to_le64(time); +- time = ksmbd_UnixTimeToNT(inode_get_mtime(inode)); ++ time = ksmbd_UnixTimeToNT(stat.mtime); + rsp->LastWriteTime = cpu_to_le64(time); +- time = ksmbd_UnixTimeToNT(inode_get_ctime(inode)); ++ time = ksmbd_UnixTimeToNT(stat.ctime); + rsp->ChangeTime = cpu_to_le64(time); + ksmbd_fd_put(work, fp); + } else { +diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c +index 7c98bf699772f..fcaf373cc0080 100644 +--- a/fs/smb/server/smb_common.c ++++ b/fs/smb/server/smb_common.c +@@ -457,10 +457,13 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level, + } + + ksmbd_kstat.kstat = &kstat; +- ksmbd_vfs_fill_dentry_attrs(work, +- idmap, +- dentry, +- &ksmbd_kstat); ++ rc = ksmbd_vfs_fill_dentry_attrs(work, ++ idmap, ++ dentry, ++ &ksmbd_kstat); ++ if (rc) ++ break; ++ + rc = fn(conn, info_level, d_info, &ksmbd_kstat); + if (rc) + break; +diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c +index a6961bfe3e139..2e992fadeaa7d 100644 +--- a/fs/smb/server/vfs.c ++++ b/fs/smb/server/vfs.c +@@ -1682,11 +1682,19 @@ int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work, + struct dentry *dentry, + struct ksmbd_kstat *ksmbd_kstat) + { ++ struct ksmbd_share_config *share_conf = work->tcon->share_conf; + u64 time; + int rc; ++ struct path path = { ++ .mnt = share_conf->vfs_path.mnt, ++ .dentry = dentry, ++ }; + +- generic_fillattr(idmap, STATX_BASIC_STATS, d_inode(dentry), +- ksmbd_kstat->kstat); ++ rc = vfs_getattr(&path, ksmbd_kstat->kstat, ++ STATX_BASIC_STATS | STATX_BTIME, ++ AT_STATX_SYNC_AS_STAT); ++ if (rc) ++ return rc; + + time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime); + ksmbd_kstat->create_time = time; +-- +2.43.0 + diff --git a/queue-6.8/ksmbd-retrieve-number-of-blocks-using-vfs_getattr-in.patch b/queue-6.8/ksmbd-retrieve-number-of-blocks-using-vfs_getattr-in.patch new file mode 100644 index 00000000000..1e2e35bfb48 --- /dev/null +++ b/queue-6.8/ksmbd-retrieve-number-of-blocks-using-vfs_getattr-in.patch @@ -0,0 +1,61 @@ +From 368fcc47d5485d997a349b06769b7fb995da216f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 22 Feb 2024 10:58:21 +0100 +Subject: ksmbd: retrieve number of blocks using vfs_getattr in + set_file_allocation_info + +From: Marios Makassikis + +[ Upstream commit 34cd86b6632718b7df3999d96f51e63de41c5e4f ] + +Use vfs_getattr() to retrieve stat information, rather than make +assumptions about how a filesystem fills inode structs. + +Cc: stable@vger.kernel.org +Signed-off-by: Marios Makassikis +Acked-by: Namjae Jeon +Signed-off-by: Steve French +Signed-off-by: Sasha Levin +--- + fs/smb/server/smb2pdu.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index f6cc5d2730ffb..199c31c275e5b 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -5809,15 +5809,21 @@ static int set_file_allocation_info(struct ksmbd_work *work, + + loff_t alloc_blks; + struct inode *inode; ++ struct kstat stat; + int rc; + + if (!(fp->daccess & FILE_WRITE_DATA_LE)) + return -EACCES; + ++ rc = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS, ++ AT_STATX_SYNC_AS_STAT); ++ if (rc) ++ return rc; ++ + alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9; + inode = file_inode(fp->filp); + +- if (alloc_blks > inode->i_blocks) { ++ if (alloc_blks > stat.blocks) { + smb_break_all_levII_oplock(work, fp, 1); + rc = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0, + alloc_blks * 512); +@@ -5825,7 +5831,7 @@ static int set_file_allocation_info(struct ksmbd_work *work, + pr_err("vfs_fallocate is failed : %d\n", rc); + return rc; + } +- } else if (alloc_blks < inode->i_blocks) { ++ } else if (alloc_blks < stat.blocks) { + loff_t size; + + /* +-- +2.43.0 + diff --git a/queue-6.8/kvm-always-flush-async-pf-workqueue-when-vcpu-is-bei.patch b/queue-6.8/kvm-always-flush-async-pf-workqueue-when-vcpu-is-bei.patch new file mode 100644 index 00000000000..33908dada5e --- /dev/null +++ b/queue-6.8/kvm-always-flush-async-pf-workqueue-when-vcpu-is-bei.patch @@ -0,0 +1,183 @@ +From c2e373f1323a04cea5511ef33452c482656a5642 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 9 Jan 2024 17:15:30 -0800 +Subject: KVM: Always flush async #PF workqueue when vCPU is being destroyed + +From: Sean Christopherson + +[ Upstream commit 3d75b8aa5c29058a512db29da7cbee8052724157 ] + +Always flush the per-vCPU async #PF workqueue when a vCPU is clearing its +completion queue, e.g. when a VM and all its vCPUs is being destroyed. +KVM must ensure that none of its workqueue callbacks is running when the +last reference to the KVM _module_ is put. Gifting a reference to the +associated VM prevents the workqueue callback from dereferencing freed +vCPU/VM memory, but does not prevent the KVM module from being unloaded +before the callback completes. + +Drop the misguided VM refcount gifting, as calling kvm_put_kvm() from +async_pf_execute() if kvm_put_kvm() flushes the async #PF workqueue will +result in deadlock. async_pf_execute() can't return until kvm_put_kvm() +finishes, and kvm_put_kvm() can't return until async_pf_execute() finishes: + + WARNING: CPU: 8 PID: 251 at virt/kvm/kvm_main.c:1435 kvm_put_kvm+0x2d/0x320 [kvm] + Modules linked in: vhost_net vhost vhost_iotlb tap kvm_intel kvm irqbypass + CPU: 8 PID: 251 Comm: kworker/8:1 Tainted: G W 6.6.0-rc1-e7af8d17224a-x86/gmem-vm #119 + Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 + Workqueue: events async_pf_execute [kvm] + RIP: 0010:kvm_put_kvm+0x2d/0x320 [kvm] + Call Trace: + + async_pf_execute+0x198/0x260 [kvm] + process_one_work+0x145/0x2d0 + worker_thread+0x27e/0x3a0 + kthread+0xba/0xe0 + ret_from_fork+0x2d/0x50 + ret_from_fork_asm+0x11/0x20 + + ---[ end trace 0000000000000000 ]--- + INFO: task kworker/8:1:251 blocked for more than 120 seconds. + Tainted: G W 6.6.0-rc1-e7af8d17224a-x86/gmem-vm #119 + "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. + task:kworker/8:1 state:D stack:0 pid:251 ppid:2 flags:0x00004000 + Workqueue: events async_pf_execute [kvm] + Call Trace: + + __schedule+0x33f/0xa40 + schedule+0x53/0xc0 + schedule_timeout+0x12a/0x140 + __wait_for_common+0x8d/0x1d0 + __flush_work.isra.0+0x19f/0x2c0 + kvm_clear_async_pf_completion_queue+0x129/0x190 [kvm] + kvm_arch_destroy_vm+0x78/0x1b0 [kvm] + kvm_put_kvm+0x1c1/0x320 [kvm] + async_pf_execute+0x198/0x260 [kvm] + process_one_work+0x145/0x2d0 + worker_thread+0x27e/0x3a0 + kthread+0xba/0xe0 + ret_from_fork+0x2d/0x50 + ret_from_fork_asm+0x11/0x20 + + +If kvm_clear_async_pf_completion_queue() actually flushes the workqueue, +then there's no need to gift async_pf_execute() a reference because all +invocations of async_pf_execute() will be forced to complete before the +vCPU and its VM are destroyed/freed. And that in turn fixes the module +unloading bug as __fput() won't do module_put() on the last vCPU reference +until the vCPU has been freed, e.g. if closing the vCPU file also puts the +last reference to the KVM module. + +Note that kvm_check_async_pf_completion() may also take the work item off +the completion queue and so also needs to flush the work queue, as the +work will not be seen by kvm_clear_async_pf_completion_queue(). Waiting +on the workqueue could theoretically delay a vCPU due to waiting for the +work to complete, but that's a very, very small chance, and likely a very +small delay. kvm_arch_async_page_present_queued() unconditionally makes a +new request, i.e. will effectively delay entering the guest, so the +remaining work is really just: + + trace_kvm_async_pf_completed(addr, cr2_or_gpa); + + __kvm_vcpu_wake_up(vcpu); + + mmput(mm); + +and mmput() can't drop the last reference to the page tables if the vCPU is +still alive, i.e. the vCPU won't get stuck tearing down page tables. + +Add a helper to do the flushing, specifically to deal with "wakeup all" +work items, as they aren't actually work items, i.e. are never placed in a +workqueue. Trying to flush a bogus workqueue entry rightly makes +__flush_work() complain (kudos to whoever added that sanity check). + +Note, commit 5f6de5cbebee ("KVM: Prevent module exit until all VMs are +freed") *tried* to fix the module refcounting issue by having VMs grab a +reference to the module, but that only made the bug slightly harder to hit +as it gave async_pf_execute() a bit more time to complete before the KVM +module could be unloaded. + +Fixes: af585b921e5d ("KVM: Halt vcpu if page it tries to access is swapped out") +Cc: stable@vger.kernel.org +Cc: David Matlack +Reviewed-by: Xu Yilun +Reviewed-by: Vitaly Kuznetsov +Link: https://lore.kernel.org/r/20240110011533.503302-2-seanjc@google.com +Signed-off-by: Sean Christopherson +Signed-off-by: Sasha Levin +--- + virt/kvm/async_pf.c | 31 ++++++++++++++++++++++++++----- + 1 file changed, 26 insertions(+), 5 deletions(-) + +diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c +index e033c79d528e0..28658b9e0d968 100644 +--- a/virt/kvm/async_pf.c ++++ b/virt/kvm/async_pf.c +@@ -87,7 +87,27 @@ static void async_pf_execute(struct work_struct *work) + __kvm_vcpu_wake_up(vcpu); + + mmput(mm); +- kvm_put_kvm(vcpu->kvm); ++} ++ ++static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) ++{ ++ /* ++ * The async #PF is "done", but KVM must wait for the work item itself, ++ * i.e. async_pf_execute(), to run to completion. If KVM is a module, ++ * KVM must ensure *no* code owned by the KVM (the module) can be run ++ * after the last call to module_put(). Note, flushing the work item ++ * is always required when the item is taken off the completion queue. ++ * E.g. even if the vCPU handles the item in the "normal" path, the VM ++ * could be terminated before async_pf_execute() completes. ++ * ++ * Wake all events skip the queue and go straight done, i.e. don't ++ * need to be flushed (but sanity check that the work wasn't queued). ++ */ ++ if (work->wakeup_all) ++ WARN_ON_ONCE(work->work.func); ++ else ++ flush_work(&work->work); ++ kmem_cache_free(async_pf_cache, work); + } + + void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) +@@ -114,7 +134,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) + #else + if (cancel_work_sync(&work->work)) { + mmput(work->mm); +- kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ + kmem_cache_free(async_pf_cache, work); + } + #endif +@@ -126,7 +145,10 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) + list_first_entry(&vcpu->async_pf.done, + typeof(*work), link); + list_del(&work->link); +- kmem_cache_free(async_pf_cache, work); ++ ++ spin_unlock(&vcpu->async_pf.lock); ++ kvm_flush_and_free_async_pf_work(work); ++ spin_lock(&vcpu->async_pf.lock); + } + spin_unlock(&vcpu->async_pf.lock); + +@@ -151,7 +173,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) + + list_del(&work->queue); + vcpu->async_pf.queued--; +- kmem_cache_free(async_pf_cache, work); ++ kvm_flush_and_free_async_pf_work(work); + } + } + +@@ -186,7 +208,6 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + work->arch = *arch; + work->mm = current->mm; + mmget(work->mm); +- kvm_get_kvm(work->vcpu->kvm); + + INIT_WORK(&work->work, async_pf_execute); + +-- +2.43.0 + diff --git a/queue-6.8/kvm-x86-xen-inject-vcpu-upcall-vector-when-local-api.patch b/queue-6.8/kvm-x86-xen-inject-vcpu-upcall-vector-when-local-api.patch new file mode 100644 index 00000000000..87794ab1f0c --- /dev/null +++ b/queue-6.8/kvm-x86-xen-inject-vcpu-upcall-vector-when-local-api.patch @@ -0,0 +1,124 @@ +From 4272436708b50e179d39b32de8303966b27d75a4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 27 Feb 2024 11:49:16 +0000 +Subject: KVM: x86/xen: inject vCPU upcall vector when local APIC is enabled + +From: David Woodhouse + +[ Upstream commit 8e62bf2bfa46367e14d0ffdcde5aada08759497c ] + +Linux guests since commit b1c3497e604d ("x86/xen: Add support for +HVMOP_set_evtchn_upcall_vector") in v6.0 onwards will use the per-vCPU +upcall vector when it's advertised in the Xen CPUID leaves. + +This upcall is injected through the guest's local APIC as an MSI, unlike +the older system vector which was merely injected by the hypervisor any +time the CPU was able to receive an interrupt and the upcall_pending +flags is set in its vcpu_info. + +Effectively, that makes the per-CPU upcall edge triggered instead of +level triggered, which results in the upcall being lost if the MSI is +delivered when the local APIC is *disabled*. + +Xen checks the vcpu_info->evtchn_upcall_pending flag when the local APIC +for a vCPU is software enabled (in fact, on any write to the SPIV +register which doesn't disable the APIC). Do the same in KVM since KVM +doesn't provide a way for userspace to intervene and trap accesses to +the SPIV register of a local APIC emulated by KVM. + +Fixes: fde0451be8fb3 ("KVM: x86/xen: Support per-vCPU event channel upcall via local APIC") +Signed-off-by: David Woodhouse +Reviewed-by: Paul Durrant +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20240227115648.3104-3-dwmw2@infradead.org +Signed-off-by: Sean Christopherson +Signed-off-by: Sasha Levin +--- + arch/x86/kvm/lapic.c | 5 ++++- + arch/x86/kvm/xen.c | 2 +- + arch/x86/kvm/xen.h | 18 ++++++++++++++++++ + 3 files changed, 23 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 3242f3da24576..75bc7d3f0022d 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -41,6 +41,7 @@ + #include "ioapic.h" + #include "trace.h" + #include "x86.h" ++#include "xen.h" + #include "cpuid.h" + #include "hyperv.h" + #include "smm.h" +@@ -499,8 +500,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) + } + + /* Check if there are APF page ready requests pending */ +- if (enabled) ++ if (enabled) { + kvm_make_request(KVM_REQ_APF_READY, apic->vcpu); ++ kvm_xen_sw_enable_lapic(apic->vcpu); ++ } + } + + static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id) +diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c +index 4b4e738c6f1b7..b0212ba2d87a6 100644 +--- a/arch/x86/kvm/xen.c ++++ b/arch/x86/kvm/xen.c +@@ -493,7 +493,7 @@ void kvm_xen_update_runstate(struct kvm_vcpu *v, int state) + kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable); + } + +-static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v) ++void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v) + { + struct kvm_lapic_irq irq = { }; + int r; +diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h +index f8f1fe22d0906..f5841d9000aeb 100644 +--- a/arch/x86/kvm/xen.h ++++ b/arch/x86/kvm/xen.h +@@ -18,6 +18,7 @@ extern struct static_key_false_deferred kvm_xen_enabled; + + int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); + void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu); ++void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu); + int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); + int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); + int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); +@@ -36,6 +37,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm, + const struct kvm_irq_routing_entry *ue); + void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu); + ++static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu) ++{ ++ /* ++ * The local APIC is being enabled. If the per-vCPU upcall vector is ++ * set and the vCPU's evtchn_upcall_pending flag is set, inject the ++ * interrupt. ++ */ ++ if (static_branch_unlikely(&kvm_xen_enabled.key) && ++ vcpu->arch.xen.vcpu_info_cache.active && ++ vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu)) ++ kvm_xen_inject_vcpu_vector(vcpu); ++} ++ + static inline bool kvm_xen_msr_enabled(struct kvm *kvm) + { + return static_branch_unlikely(&kvm_xen_enabled.key) && +@@ -101,6 +115,10 @@ static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) + { + } + ++static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu) ++{ ++} ++ + static inline bool kvm_xen_msr_enabled(struct kvm *kvm) + { + return false; +-- +2.43.0 + diff --git a/queue-6.8/landlock-warn-once-if-a-landlock-action-is-requested.patch b/queue-6.8/landlock-warn-once-if-a-landlock-action-is-requested.patch new file mode 100644 index 00000000000..2000b071d40 --- /dev/null +++ b/queue-6.8/landlock-warn-once-if-a-landlock-action-is-requested.patch @@ -0,0 +1,83 @@ +From 200d14be567c2c5517547e0ca6efac05dc0d2718 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 27 Feb 2024 12:05:50 +0100 +Subject: landlock: Warn once if a Landlock action is requested while disabled +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Mickaël Salaün + +[ Upstream commit 782191c74875cc33b50263e21d76080b1411884d ] + +Because sandboxing can be used as an opportunistic security measure, +user space may not log unsupported features. Let the system +administrator know if an application tries to use Landlock but failed +because it isn't enabled at boot time. This may be caused by boot +loader configurations with outdated "lsm" kernel's command-line +parameter. + +Cc: stable@vger.kernel.org +Fixes: 265885daf3e5 ("landlock: Add syscall implementations") +Reviewed-by: Kees Cook +Reviewed-by: Günther Noack +Link: https://lore.kernel.org/r/20240227110550.3702236-2-mic@digikod.net +Signed-off-by: Mickaël Salaün +Signed-off-by: Sasha Levin +--- + security/landlock/syscalls.c | 18 +++++++++++++++--- + 1 file changed, 15 insertions(+), 3 deletions(-) + +diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c +index 898358f57fa08..6788e73b6681b 100644 +--- a/security/landlock/syscalls.c ++++ b/security/landlock/syscalls.c +@@ -33,6 +33,18 @@ + #include "ruleset.h" + #include "setup.h" + ++static bool is_initialized(void) ++{ ++ if (likely(landlock_initialized)) ++ return true; ++ ++ pr_warn_once( ++ "Disabled but requested by user space. " ++ "You should enable Landlock at boot time: " ++ "https://docs.kernel.org/userspace-api/landlock.html#boot-time-configuration\n"); ++ return false; ++} ++ + /** + * copy_min_struct_from_user - Safe future-proof argument copying + * +@@ -173,7 +185,7 @@ SYSCALL_DEFINE3(landlock_create_ruleset, + /* Build-time checks. */ + build_check_abi(); + +- if (!landlock_initialized) ++ if (!is_initialized()) + return -EOPNOTSUPP; + + if (flags) { +@@ -398,7 +410,7 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, + struct landlock_ruleset *ruleset; + int err; + +- if (!landlock_initialized) ++ if (!is_initialized()) + return -EOPNOTSUPP; + + /* No flag for now. */ +@@ -458,7 +470,7 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32, + struct landlock_cred_security *new_llcred; + int err; + +- if (!landlock_initialized) ++ if (!is_initialized()) + return -EOPNOTSUPP; + + /* +-- +2.43.0 + diff --git a/queue-6.8/leds-trigger-netdev-fix-kernel-panic-on-interface-re.patch b/queue-6.8/leds-trigger-netdev-fix-kernel-panic-on-interface-re.patch new file mode 100644 index 00000000000..a9a6c84fcf9 --- /dev/null +++ b/queue-6.8/leds-trigger-netdev-fix-kernel-panic-on-interface-re.patch @@ -0,0 +1,65 @@ +From dfcf1c2f09918b85fc695e62dbf4f7318af2213b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 4 Feb 2024 00:54:01 +0100 +Subject: leds: trigger: netdev: Fix kernel panic on interface rename trig + notify + +From: Christian Marangi + +[ Upstream commit 415798bc07dd1c1ae3a656aa026580816e0b9fe8 ] + +Commit d5e01266e7f5 ("leds: trigger: netdev: add additional specific link +speed mode") in the various changes, reworked the way to set the LINKUP +mode in commit cee4bd16c319 ("leds: trigger: netdev: Recheck +NETDEV_LED_MODE_LINKUP on dev rename") and moved it to a generic function. + +This changed the logic where, in the previous implementation the dev +from the trigger event was used to check if the carrier was ok, but in +the new implementation with the generic function, the dev in +trigger_data is used instead. + +This is problematic and cause a possible kernel panic due to the fact +that the dev in the trigger_data still reference the old one as the +new one (passed from the trigger event) still has to be hold and saved +in the trigger_data struct (done in the NETDEV_REGISTER case). + +On calling of get_device_state(), an invalid net_dev is used and this +cause a kernel panic. + +To handle this correctly, move the call to get_device_state() after the +new net_dev is correctly set in trigger_data (in the NETDEV_REGISTER +case) and correctly parse the new dev. + +Fixes: d5e01266e7f5 ("leds: trigger: netdev: add additional specific link speed mode") +Cc: stable@vger.kernel.org +Signed-off-by: Christian Marangi +Reviewed-by: Andrew Lunn +Link: https://lore.kernel.org/r/20240203235413.1146-1-ansuelsmth@gmail.com +Signed-off-by: Lee Jones +Signed-off-by: Sasha Levin +--- + drivers/leds/trigger/ledtrig-netdev.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c +index 8e5475819590e..df1b1d8468e60 100644 +--- a/drivers/leds/trigger/ledtrig-netdev.c ++++ b/drivers/leds/trigger/ledtrig-netdev.c +@@ -504,12 +504,12 @@ static int netdev_trig_notify(struct notifier_block *nb, + trigger_data->duplex = DUPLEX_UNKNOWN; + switch (evt) { + case NETDEV_CHANGENAME: +- get_device_state(trigger_data); +- fallthrough; + case NETDEV_REGISTER: + dev_put(trigger_data->net_dev); + dev_hold(dev); + trigger_data->net_dev = dev; ++ if (evt == NETDEV_CHANGENAME) ++ get_device_state(trigger_data); + break; + case NETDEV_UNREGISTER: + dev_put(trigger_data->net_dev); +-- +2.43.0 + diff --git a/queue-6.8/loongarch-change-__my_cpu_offset-definition-to-avoid.patch b/queue-6.8/loongarch-change-__my_cpu_offset-definition-to-avoid.patch new file mode 100644 index 00000000000..e1079c8cb9a --- /dev/null +++ b/queue-6.8/loongarch-change-__my_cpu_offset-definition-to-avoid.patch @@ -0,0 +1,58 @@ +From 6c02dbfb399845c9b9352a012956df306d68130c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 19 Mar 2024 15:50:34 +0800 +Subject: LoongArch: Change __my_cpu_offset definition to avoid + mis-optimization + +From: Huacai Chen + +[ Upstream commit c87e12e0e8c1241410e758e181ca6bf23efa5b5b ] + +From GCC commit 3f13154553f8546a ("df-scan: remove ad-hoc handling of +global regs in asms"), global registers will no longer be forced to add +to the def-use chain. Then current_thread_info(), current_stack_pointer +and __my_cpu_offset may be lifted out of the loop because they are no +longer treated as "volatile variables". + +This optimization is still correct for the current_thread_info() and +current_stack_pointer usages because they are associated to a thread. +However it is wrong for __my_cpu_offset because it is associated to a +CPU rather than a thread: if the thread migrates to a different CPU in +the loop, __my_cpu_offset should be changed. + +Change __my_cpu_offset definition to treat it as a "volatile variable", +in order to avoid such a mis-optimization. + +Cc: stable@vger.kernel.org +Reported-by: Xiaotian Wu +Reported-by: Miao Wang +Signed-off-by: Xing Li +Signed-off-by: Hongchen Zhang +Signed-off-by: Rui Wang +Signed-off-by: Huacai Chen +Signed-off-by: Sasha Levin +--- + arch/loongarch/include/asm/percpu.h | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h +index 9b36ac003f890..8f290e5546cf7 100644 +--- a/arch/loongarch/include/asm/percpu.h ++++ b/arch/loongarch/include/asm/percpu.h +@@ -29,7 +29,12 @@ static inline void set_my_cpu_offset(unsigned long off) + __my_cpu_offset = off; + csr_write64(off, PERCPU_BASE_KS); + } +-#define __my_cpu_offset __my_cpu_offset ++ ++#define __my_cpu_offset \ ++({ \ ++ __asm__ __volatile__("":"+r"(__my_cpu_offset)); \ ++ __my_cpu_offset; \ ++}) + + #define PERCPU_OP(op, asm_op, c_op) \ + static __always_inline unsigned long __percpu_##op(void *ptr, \ +-- +2.43.0 + diff --git a/queue-6.8/loongarch-crypto-clean-up-useless-assignment-operati.patch b/queue-6.8/loongarch-crypto-clean-up-useless-assignment-operati.patch new file mode 100644 index 00000000000..1a3139c839a --- /dev/null +++ b/queue-6.8/loongarch-crypto-clean-up-useless-assignment-operati.patch @@ -0,0 +1,50 @@ +From e3519bfe7f35e8e54175eed3d8d34b10799019d0 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 19 Mar 2024 15:50:34 +0800 +Subject: LoongArch/crypto: Clean up useless assignment operations + +From: Yuli Wang + +[ Upstream commit fea1c949f6ca5059e12de00d0483645debc5b206 ] + +The LoongArch CRC32 hw acceleration is based on arch/mips/crypto/ +crc32-mips.c. While the MIPS code supports both MIPS32 and MIPS64, +but LoongArch32 lacks the CRC instruction. As a result, the line +"len -= sizeof(u32)" is unnecessary. + +Removing it can make context code style more unified and improve +code readability. + +Cc: stable@vger.kernel.org +Reviewed-by: WANG Xuerui +Suggested-by: Wentao Guan +Signed-off-by: Yuli Wang +Signed-off-by: Huacai Chen +Signed-off-by: Sasha Levin +--- + arch/loongarch/crypto/crc32-loongarch.c | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/arch/loongarch/crypto/crc32-loongarch.c b/arch/loongarch/crypto/crc32-loongarch.c +index a49e507af38c0..3eebea3a7b478 100644 +--- a/arch/loongarch/crypto/crc32-loongarch.c ++++ b/arch/loongarch/crypto/crc32-loongarch.c +@@ -44,7 +44,6 @@ static u32 crc32_loongarch_hw(u32 crc_, const u8 *p, unsigned int len) + + CRC32(crc, value, w); + p += sizeof(u32); +- len -= sizeof(u32); + } + + if (len & sizeof(u16)) { +@@ -80,7 +79,6 @@ static u32 crc32c_loongarch_hw(u32 crc_, const u8 *p, unsigned int len) + + CRC32C(crc, value, w); + p += sizeof(u32); +- len -= sizeof(u32); + } + + if (len & sizeof(u16)) { +-- +2.43.0 + diff --git a/queue-6.8/loongarch-define-the-__io_aw-hook-as-mmiowb.patch b/queue-6.8/loongarch-define-the-__io_aw-hook-as-mmiowb.patch new file mode 100644 index 00000000000..650c323bf9e --- /dev/null +++ b/queue-6.8/loongarch-define-the-__io_aw-hook-as-mmiowb.patch @@ -0,0 +1,117 @@ +From a8edad322fbabbcfa0bc152000a2178b79ad420e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 19 Mar 2024 15:50:34 +0800 +Subject: LoongArch: Define the __io_aw() hook as mmiowb() + +From: Huacai Chen + +[ Upstream commit 9c68ece8b2a5c5ff9b2fcaea923dd73efeb174cd ] + +Commit fb24ea52f78e0d595852e ("drivers: Remove explicit invocations of +mmiowb()") remove all mmiowb() in drivers, but it says: + +"NOTE: mmiowb() has only ever guaranteed ordering in conjunction with +spin_unlock(). However, pairing each mmiowb() removal in this patch with +the corresponding call to spin_unlock() is not at all trivial, so there +is a small chance that this change may regress any drivers incorrectly +relying on mmiowb() to order MMIO writes between CPUs using lock-free +synchronisation." + +The mmio in radeon_ring_commit() is protected by a mutex rather than a +spinlock, but in the mutex fastpath it behaves similar to spinlock. We +can add mmiowb() calls in the radeon driver but the maintainer says he +doesn't like such a workaround, and radeon is not the only example of +mutex protected mmio. + +So we should extend the mmiowb tracking system from spinlock to mutex, +and maybe other locking primitives. This is not easy and error prone, so +we solve it in the architectural code, by simply defining the __io_aw() +hook as mmiowb(). And we no longer need to override queued_spin_unlock() +so use the generic definition. + +Without this, we get such an error when run 'glxgears' on weak ordering +architectures such as LoongArch: + +radeon 0000:04:00.0: ring 0 stalled for more than 10324msec +radeon 0000:04:00.0: ring 3 stalled for more than 10240msec +radeon 0000:04:00.0: GPU lockup (current fence id 0x000000000001f412 last fence id 0x000000000001f414 on ring 3) +radeon 0000:04:00.0: GPU lockup (current fence id 0x000000000000f940 last fence id 0x000000000000f941 on ring 0) +radeon 0000:04:00.0: scheduling IB failed (-35). +[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35) +radeon 0000:04:00.0: scheduling IB failed (-35). +[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35) +radeon 0000:04:00.0: scheduling IB failed (-35). +[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35) +radeon 0000:04:00.0: scheduling IB failed (-35). +[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35) +radeon 0000:04:00.0: scheduling IB failed (-35). +[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35) +radeon 0000:04:00.0: scheduling IB failed (-35). +[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35) +radeon 0000:04:00.0: scheduling IB failed (-35). +[drm:radeon_gem_va_ioctl [radeon]] *ERROR* Couldn't update BO_VA (-35) + +Link: https://lore.kernel.org/dri-devel/29df7e26-d7a8-4f67-b988-44353c4270ac@amd.com/T/#t +Link: https://lore.kernel.org/linux-arch/20240301130532.3953167-1-chenhuacai@loongson.cn/T/#t +Cc: stable@vger.kernel.org +Signed-off-by: Huacai Chen +Signed-off-by: Sasha Levin +--- + arch/loongarch/include/asm/Kbuild | 1 + + arch/loongarch/include/asm/io.h | 2 ++ + arch/loongarch/include/asm/qspinlock.h | 18 ------------------ + 3 files changed, 3 insertions(+), 18 deletions(-) + delete mode 100644 arch/loongarch/include/asm/qspinlock.h + +diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild +index 93783fa24f6e9..dede0b422cfb9 100644 +--- a/arch/loongarch/include/asm/Kbuild ++++ b/arch/loongarch/include/asm/Kbuild +@@ -4,6 +4,7 @@ generic-y += mcs_spinlock.h + generic-y += parport.h + generic-y += early_ioremap.h + generic-y += qrwlock.h ++generic-y += qspinlock.h + generic-y += rwsem.h + generic-y += segment.h + generic-y += user.h +diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h +index c486c2341b662..4a8adcca329b8 100644 +--- a/arch/loongarch/include/asm/io.h ++++ b/arch/loongarch/include/asm/io.h +@@ -71,6 +71,8 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t + #define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l)) + #define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l)) + ++#define __io_aw() mmiowb() ++ + #include + + #define ARCH_HAS_VALID_PHYS_ADDR_RANGE +diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h +deleted file mode 100644 +index 34f43f8ad5912..0000000000000 +--- a/arch/loongarch/include/asm/qspinlock.h ++++ /dev/null +@@ -1,18 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-#ifndef _ASM_QSPINLOCK_H +-#define _ASM_QSPINLOCK_H +- +-#include +- +-#define queued_spin_unlock queued_spin_unlock +- +-static inline void queued_spin_unlock(struct qspinlock *lock) +-{ +- compiletime_assert_atomic_type(lock->locked); +- c_sync(); +- WRITE_ONCE(lock->locked, 0); +-} +- +-#include +- +-#endif /* _ASM_QSPINLOCK_H */ +-- +2.43.0 + diff --git a/queue-6.8/lsm-handle-the-null-buffer-case-in-lsm_fill_user_ctx.patch b/queue-6.8/lsm-handle-the-null-buffer-case-in-lsm_fill_user_ctx.patch new file mode 100644 index 00000000000..75938581189 --- /dev/null +++ b/queue-6.8/lsm-handle-the-null-buffer-case-in-lsm_fill_user_ctx.patch @@ -0,0 +1,55 @@ +From 444f77392a1f79026ca17f50c5cc504707daab58 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Mar 2024 21:37:48 -0400 +Subject: lsm: handle the NULL buffer case in lsm_fill_user_ctx() + +From: Paul Moore + +[ Upstream commit eaf0e7a3d2711018789e9fdb89191d19aa139c47 ] + +Passing a NULL buffer into the lsm_get_self_attr() syscall is a valid +way to quickly determine the minimum size of the buffer needed to for +the syscall to return all of the LSM attributes to the caller. +Unfortunately we/I broke that behavior in commit d7cf3412a9f6 +("lsm: consolidate buffer size handling into lsm_fill_user_ctx()") +such that it returned an error to the caller; this patch restores the +original desired behavior of using the NULL buffer as a quick way to +correctly size the attribute buffer. + +Cc: stable@vger.kernel.org +Fixes: d7cf3412a9f6 ("lsm: consolidate buffer size handling into lsm_fill_user_ctx()") +Signed-off-by: Paul Moore +Signed-off-by: Sasha Levin +--- + security/security.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/security/security.c b/security/security.c +index fb7505c734853..a344b8fa5530d 100644 +--- a/security/security.c ++++ b/security/security.c +@@ -780,7 +780,9 @@ static int lsm_superblock_alloc(struct super_block *sb) + * @id: LSM id + * @flags: LSM defined flags + * +- * Fill all of the fields in a userspace lsm_ctx structure. ++ * Fill all of the fields in a userspace lsm_ctx structure. If @uctx is NULL ++ * simply calculate the required size to output via @utc_len and return ++ * success. + * + * Returns 0 on success, -E2BIG if userspace buffer is not large enough, + * -EFAULT on a copyout error, -ENOMEM if memory can't be allocated. +@@ -799,6 +801,10 @@ int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len, + goto out; + } + ++ /* no buffer - return success/0 and set @uctx_len to the req size */ ++ if (!uctx) ++ goto out; ++ + nctx = kzalloc(nctx_len, GFP_KERNEL); + if (nctx == NULL) { + rc = -ENOMEM; +-- +2.43.0 + diff --git a/queue-6.8/lsm-use-32-bit-compatible-data-types-in-lsm-syscalls.patch b/queue-6.8/lsm-use-32-bit-compatible-data-types-in-lsm-syscalls.patch new file mode 100644 index 00000000000..a865ba1cb46 --- /dev/null +++ b/queue-6.8/lsm-use-32-bit-compatible-data-types-in-lsm-syscalls.patch @@ -0,0 +1,401 @@ +From 41c639352a4a526e5d52fd721111210936057468 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Mar 2024 11:31:26 -0400 +Subject: lsm: use 32-bit compatible data types in LSM syscalls + +From: Casey Schaufler + +[ Upstream commit a5a858f622a0aff5cdb5e271442cd01b2a01467f ] + +Change the size parameters in lsm_list_modules(), lsm_set_self_attr() +and lsm_get_self_attr() from size_t to u32. This avoids the need to +have different interfaces for 32 and 64 bit systems. + +Cc: stable@vger.kernel.org +Fixes: a04a1198088a ("LSM: syscalls for current process attributes") +Fixes: ad4aff9ec25f ("LSM: Create lsm_list_modules system call") +Signed-off-by: Casey Schaufler +Reported-and-reviewed-by: Dmitry V. Levin +[PM: subject and metadata tweaks, syscall.h fixes] +Signed-off-by: Paul Moore +Signed-off-by: Sasha Levin +--- + include/linux/lsm_hook_defs.h | 4 ++-- + include/linux/security.h | 8 ++++---- + include/linux/syscalls.h | 6 +++--- + security/apparmor/lsm.c | 4 ++-- + security/lsm_syscalls.c | 10 +++++----- + security/security.c | 12 ++++++------ + security/selinux/hooks.c | 4 ++-- + security/smack/smack_lsm.c | 4 ++-- + tools/testing/selftests/lsm/common.h | 6 +++--- + tools/testing/selftests/lsm/lsm_get_self_attr_test.c | 10 +++++----- + tools/testing/selftests/lsm/lsm_list_modules_test.c | 8 ++++---- + tools/testing/selftests/lsm/lsm_set_self_attr_test.c | 6 +++--- + 12 files changed, 41 insertions(+), 41 deletions(-) + +diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h +index 76458b6d53da7..f9b5baf1b5f46 100644 +--- a/include/linux/lsm_hook_defs.h ++++ b/include/linux/lsm_hook_defs.h +@@ -265,9 +265,9 @@ LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb) + LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry, + struct inode *inode) + LSM_HOOK(int, -EOPNOTSUPP, getselfattr, unsigned int attr, +- struct lsm_ctx __user *ctx, size_t *size, u32 flags) ++ struct lsm_ctx __user *ctx, u32 *size, u32 flags) + LSM_HOOK(int, -EOPNOTSUPP, setselfattr, unsigned int attr, +- struct lsm_ctx *ctx, size_t size, u32 flags) ++ struct lsm_ctx *ctx, u32 size, u32 flags) + LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name, + char **value) + LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size) +diff --git a/include/linux/security.h b/include/linux/security.h +index d0eb20f90b264..3180d823e0233 100644 +--- a/include/linux/security.h ++++ b/include/linux/security.h +@@ -478,9 +478,9 @@ int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, + unsigned nsops, int alter); + void security_d_instantiate(struct dentry *dentry, struct inode *inode); + int security_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx, +- size_t __user *size, u32 flags); ++ u32 __user *size, u32 flags); + int security_setselfattr(unsigned int attr, struct lsm_ctx __user *ctx, +- size_t size, u32 flags); ++ u32 size, u32 flags); + int security_getprocattr(struct task_struct *p, int lsmid, const char *name, + char **value); + int security_setprocattr(int lsmid, const char *name, void *value, size_t size); +@@ -494,7 +494,7 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); + int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); + int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); + int security_locked_down(enum lockdown_reason what); +-int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, size_t *uctx_len, ++int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len, + void *val, size_t val_len, u64 id, u64 flags); + #else /* CONFIG_SECURITY */ + +@@ -1434,7 +1434,7 @@ static inline int security_locked_down(enum lockdown_reason what) + return 0; + } + static inline int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, +- size_t *uctx_len, void *val, size_t val_len, ++ u32 *uctx_len, void *val, size_t val_len, + u64 id, u64 flags) + { + return -EOPNOTSUPP; +diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h +index 77eb9b0e76850..e619ac10cd234 100644 +--- a/include/linux/syscalls.h ++++ b/include/linux/syscalls.h +@@ -960,10 +960,10 @@ asmlinkage long sys_cachestat(unsigned int fd, + struct cachestat __user *cstat, unsigned int flags); + asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags); + asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx, +- size_t *size, __u32 flags); ++ u32 *size, u32 flags); + asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx, +- size_t size, __u32 flags); +-asmlinkage long sys_lsm_list_modules(u64 *ids, size_t *size, u32 flags); ++ u32 size, u32 flags); ++asmlinkage long sys_lsm_list_modules(u64 *ids, u32 *size, u32 flags); + + /* + * Architecture-specific system calls +diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c +index 9a3dcaafb5b1e..cef8c466af80d 100644 +--- a/security/apparmor/lsm.c ++++ b/security/apparmor/lsm.c +@@ -779,7 +779,7 @@ static int apparmor_sb_pivotroot(const struct path *old_path, + } + + static int apparmor_getselfattr(unsigned int attr, struct lsm_ctx __user *lx, +- size_t *size, u32 flags) ++ u32 *size, u32 flags) + { + int error = -ENOENT; + struct aa_task_ctx *ctx = task_ctx(current); +@@ -924,7 +924,7 @@ static int do_setattr(u64 attr, void *value, size_t size) + } + + static int apparmor_setselfattr(unsigned int attr, struct lsm_ctx *ctx, +- size_t size, u32 flags) ++ u32 size, u32 flags) + { + int rc; + +diff --git a/security/lsm_syscalls.c b/security/lsm_syscalls.c +index 5d391b1f7e694..8440948a690c9 100644 +--- a/security/lsm_syscalls.c ++++ b/security/lsm_syscalls.c +@@ -53,7 +53,7 @@ u64 lsm_name_to_attr(const char *name) + * value indicating the reason for the error is returned. + */ + SYSCALL_DEFINE4(lsm_set_self_attr, unsigned int, attr, struct lsm_ctx __user *, +- ctx, size_t, size, u32, flags) ++ ctx, u32, size, u32, flags) + { + return security_setselfattr(attr, ctx, size, flags); + } +@@ -75,7 +75,7 @@ SYSCALL_DEFINE4(lsm_set_self_attr, unsigned int, attr, struct lsm_ctx __user *, + * a negative value indicating the error is returned. + */ + SYSCALL_DEFINE4(lsm_get_self_attr, unsigned int, attr, struct lsm_ctx __user *, +- ctx, size_t __user *, size, u32, flags) ++ ctx, u32 __user *, size, u32, flags) + { + return security_getselfattr(attr, ctx, size, flags); + } +@@ -93,11 +93,11 @@ SYSCALL_DEFINE4(lsm_get_self_attr, unsigned int, attr, struct lsm_ctx __user *, + * required size. In all other cases a negative value indicating the + * error is returned. + */ +-SYSCALL_DEFINE3(lsm_list_modules, u64 __user *, ids, size_t __user *, size, ++SYSCALL_DEFINE3(lsm_list_modules, u64 __user *, ids, u32 __user *, size, + u32, flags) + { +- size_t total_size = lsm_active_cnt * sizeof(*ids); +- size_t usize; ++ u32 total_size = lsm_active_cnt * sizeof(*ids); ++ u32 usize; + int i; + + if (flags) +diff --git a/security/security.c b/security/security.c +index 7035ee35a3930..fb7505c734853 100644 +--- a/security/security.c ++++ b/security/security.c +@@ -785,7 +785,7 @@ static int lsm_superblock_alloc(struct super_block *sb) + * Returns 0 on success, -E2BIG if userspace buffer is not large enough, + * -EFAULT on a copyout error, -ENOMEM if memory can't be allocated. + */ +-int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, size_t *uctx_len, ++int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len, + void *val, size_t val_len, + u64 id, u64 flags) + { +@@ -3918,14 +3918,14 @@ EXPORT_SYMBOL(security_d_instantiate); + * If @size is insufficient to contain the data -E2BIG is returned. + */ + int security_getselfattr(unsigned int attr, struct lsm_ctx __user *uctx, +- size_t __user *size, u32 flags) ++ u32 __user *size, u32 flags) + { + struct security_hook_list *hp; + struct lsm_ctx lctx = { .id = LSM_ID_UNDEF, }; + u8 __user *base = (u8 __user *)uctx; +- size_t total = 0; +- size_t entrysize; +- size_t left; ++ u32 entrysize; ++ u32 total = 0; ++ u32 left; + bool toobig = false; + bool single = false; + int count = 0; +@@ -4011,7 +4011,7 @@ int security_getselfattr(unsigned int attr, struct lsm_ctx __user *uctx, + * LSM specific failure. + */ + int security_setselfattr(unsigned int attr, struct lsm_ctx __user *uctx, +- size_t size, u32 flags) ++ u32 size, u32 flags) + { + struct security_hook_list *hp; + struct lsm_ctx *lctx; +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index 338b023a8c3ed..71e6e7079d7f7 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -6556,7 +6556,7 @@ static int selinux_lsm_setattr(u64 attr, void *value, size_t size) + * There will only ever be one attribute. + */ + static int selinux_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx, +- size_t *size, u32 flags) ++ u32 *size, u32 flags) + { + int rc; + char *val = NULL; +@@ -6571,7 +6571,7 @@ static int selinux_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx, + } + + static int selinux_setselfattr(unsigned int attr, struct lsm_ctx *ctx, +- size_t size, u32 flags) ++ u32 size, u32 flags) + { + int rc; + +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c +index 6e5f74813c101..6f9a80783a5a3 100644 +--- a/security/smack/smack_lsm.c ++++ b/security/smack/smack_lsm.c +@@ -3651,7 +3651,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) + * There will only ever be one attribute. + */ + static int smack_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx, +- size_t *size, u32 flags) ++ u32 *size, u32 flags) + { + int rc; + struct smack_known *skp; +@@ -3772,7 +3772,7 @@ static int do_setattr(u64 attr, void *value, size_t size) + * Returns 0 on success, an error code otherwise. + */ + static int smack_setselfattr(unsigned int attr, struct lsm_ctx *ctx, +- size_t size, u32 flags) ++ u32 size, u32 flags) + { + int rc; + +diff --git a/tools/testing/selftests/lsm/common.h b/tools/testing/selftests/lsm/common.h +index d404329e5eeb7..06d12110d241b 100644 +--- a/tools/testing/selftests/lsm/common.h ++++ b/tools/testing/selftests/lsm/common.h +@@ -7,7 +7,7 @@ + + #ifndef lsm_get_self_attr + static inline int lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx, +- size_t *size, __u32 flags) ++ __u32 *size, __u32 flags) + { + return syscall(__NR_lsm_get_self_attr, attr, ctx, size, flags); + } +@@ -15,14 +15,14 @@ static inline int lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx, + + #ifndef lsm_set_self_attr + static inline int lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx, +- size_t size, __u32 flags) ++ __u32 size, __u32 flags) + { + return syscall(__NR_lsm_set_self_attr, attr, ctx, size, flags); + } + #endif + + #ifndef lsm_list_modules +-static inline int lsm_list_modules(__u64 *ids, size_t *size, __u32 flags) ++static inline int lsm_list_modules(__u64 *ids, __u32 *size, __u32 flags) + { + return syscall(__NR_lsm_list_modules, ids, size, flags); + } +diff --git a/tools/testing/selftests/lsm/lsm_get_self_attr_test.c b/tools/testing/selftests/lsm/lsm_get_self_attr_test.c +index e0e313d9047a3..df215e4aa63fe 100644 +--- a/tools/testing/selftests/lsm/lsm_get_self_attr_test.c ++++ b/tools/testing/selftests/lsm/lsm_get_self_attr_test.c +@@ -40,7 +40,7 @@ TEST(size_null_lsm_get_self_attr) + TEST(ctx_null_lsm_get_self_attr) + { + const long page_size = sysconf(_SC_PAGESIZE); +- size_t size = page_size; ++ __u32 size = page_size; + int rc; + + rc = lsm_get_self_attr(LSM_ATTR_CURRENT, NULL, &size, 0); +@@ -57,7 +57,7 @@ TEST(size_too_small_lsm_get_self_attr) + { + const long page_size = sysconf(_SC_PAGESIZE); + struct lsm_ctx *ctx = calloc(page_size, 1); +- size_t size = 1; ++ __u32 size = 1; + + ASSERT_NE(NULL, ctx); + errno = 0; +@@ -77,7 +77,7 @@ TEST(flags_zero_lsm_get_self_attr) + const long page_size = sysconf(_SC_PAGESIZE); + struct lsm_ctx *ctx = calloc(page_size, 1); + __u64 *syscall_lsms = calloc(page_size, 1); +- size_t size; ++ __u32 size; + int lsmcount; + int i; + +@@ -117,7 +117,7 @@ TEST(flags_overset_lsm_get_self_attr) + { + const long page_size = sysconf(_SC_PAGESIZE); + struct lsm_ctx *ctx = calloc(page_size, 1); +- size_t size; ++ __u32 size; + + ASSERT_NE(NULL, ctx); + +@@ -140,7 +140,7 @@ TEST(flags_overset_lsm_get_self_attr) + TEST(basic_lsm_get_self_attr) + { + const long page_size = sysconf(_SC_PAGESIZE); +- size_t size = page_size; ++ __u32 size = page_size; + struct lsm_ctx *ctx = calloc(page_size, 1); + struct lsm_ctx *tctx = NULL; + __u64 *syscall_lsms = calloc(page_size, 1); +diff --git a/tools/testing/selftests/lsm/lsm_list_modules_test.c b/tools/testing/selftests/lsm/lsm_list_modules_test.c +index 9df29b1e34978..868641dbb309c 100644 +--- a/tools/testing/selftests/lsm/lsm_list_modules_test.c ++++ b/tools/testing/selftests/lsm/lsm_list_modules_test.c +@@ -31,7 +31,7 @@ TEST(size_null_lsm_list_modules) + TEST(ids_null_lsm_list_modules) + { + const long page_size = sysconf(_SC_PAGESIZE); +- size_t size = page_size; ++ __u32 size = page_size; + + errno = 0; + ASSERT_EQ(-1, lsm_list_modules(NULL, &size, 0)); +@@ -43,7 +43,7 @@ TEST(size_too_small_lsm_list_modules) + { + const long page_size = sysconf(_SC_PAGESIZE); + __u64 *syscall_lsms = calloc(page_size, 1); +- size_t size = 1; ++ __u32 size = 1; + + ASSERT_NE(NULL, syscall_lsms); + errno = 0; +@@ -58,7 +58,7 @@ TEST(flags_set_lsm_list_modules) + { + const long page_size = sysconf(_SC_PAGESIZE); + __u64 *syscall_lsms = calloc(page_size, 1); +- size_t size = page_size; ++ __u32 size = page_size; + + ASSERT_NE(NULL, syscall_lsms); + errno = 0; +@@ -72,7 +72,7 @@ TEST(flags_set_lsm_list_modules) + TEST(correct_lsm_list_modules) + { + const long page_size = sysconf(_SC_PAGESIZE); +- size_t size = page_size; ++ __u32 size = page_size; + __u64 *syscall_lsms = calloc(page_size, 1); + char *sysfs_lsms = calloc(page_size, 1); + char *name; +diff --git a/tools/testing/selftests/lsm/lsm_set_self_attr_test.c b/tools/testing/selftests/lsm/lsm_set_self_attr_test.c +index e9712c6cf5962..66dec47e3ca3f 100644 +--- a/tools/testing/selftests/lsm/lsm_set_self_attr_test.c ++++ b/tools/testing/selftests/lsm/lsm_set_self_attr_test.c +@@ -25,7 +25,7 @@ TEST(size_too_small_lsm_set_self_attr) + { + const long page_size = sysconf(_SC_PAGESIZE); + struct lsm_ctx *ctx = calloc(page_size, 1); +- size_t size = page_size; ++ __u32 size = page_size; + + ASSERT_NE(NULL, ctx); + if (attr_lsm_count()) { +@@ -41,7 +41,7 @@ TEST(flags_zero_lsm_set_self_attr) + { + const long page_size = sysconf(_SC_PAGESIZE); + struct lsm_ctx *ctx = calloc(page_size, 1); +- size_t size = page_size; ++ __u32 size = page_size; + + ASSERT_NE(NULL, ctx); + if (attr_lsm_count()) { +@@ -57,7 +57,7 @@ TEST(flags_overset_lsm_set_self_attr) + { + const long page_size = sysconf(_SC_PAGESIZE); + char *ctx = calloc(page_size, 1); +- size_t size = page_size; ++ __u32 size = page_size; + struct lsm_ctx *tctx = (struct lsm_ctx *)ctx; + + ASSERT_NE(NULL, ctx); +-- +2.43.0 + diff --git a/queue-6.8/mac802154-fix-llsec-key-resources-release-in-mac8021.patch b/queue-6.8/mac802154-fix-llsec-key-resources-release-in-mac8021.patch new file mode 100644 index 00000000000..33d4ff25bd5 --- /dev/null +++ b/queue-6.8/mac802154-fix-llsec-key-resources-release-in-mac8021.patch @@ -0,0 +1,134 @@ +From 95e9e48adf294c17cc9dac8874d8d2c31d9c7d2d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 28 Feb 2024 19:38:39 +0300 +Subject: mac802154: fix llsec key resources release in mac802154_llsec_key_del + +From: Fedor Pchelkin + +[ Upstream commit e8a1e58345cf40b7b272e08ac7b32328b2543e40 ] + +mac802154_llsec_key_del() can free resources of a key directly without +following the RCU rules for waiting before the end of a grace period. This +may lead to use-after-free in case llsec_lookup_key() is traversing the +list of keys in parallel with a key deletion: + +refcount_t: addition on 0; use-after-free. +WARNING: CPU: 4 PID: 16000 at lib/refcount.c:25 refcount_warn_saturate+0x162/0x2a0 +Modules linked in: +CPU: 4 PID: 16000 Comm: wpan-ping Not tainted 6.7.0 #19 +Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014 +RIP: 0010:refcount_warn_saturate+0x162/0x2a0 +Call Trace: + + llsec_lookup_key.isra.0+0x890/0x9e0 + mac802154_llsec_encrypt+0x30c/0x9c0 + ieee802154_subif_start_xmit+0x24/0x1e0 + dev_hard_start_xmit+0x13e/0x690 + sch_direct_xmit+0x2ae/0xbc0 + __dev_queue_xmit+0x11dd/0x3c20 + dgram_sendmsg+0x90b/0xd60 + __sys_sendto+0x466/0x4c0 + __x64_sys_sendto+0xe0/0x1c0 + do_syscall_64+0x45/0xf0 + entry_SYSCALL_64_after_hwframe+0x6e/0x76 + +Also, ieee802154_llsec_key_entry structures are not freed by +mac802154_llsec_key_del(): + +unreferenced object 0xffff8880613b6980 (size 64): + comm "iwpan", pid 2176, jiffies 4294761134 (age 60.475s) + hex dump (first 32 bytes): + 78 0d 8f 18 80 88 ff ff 22 01 00 00 00 00 ad de x......."....... + 00 00 00 00 00 00 00 00 03 00 cd ab 00 00 00 00 ................ + backtrace: + [] __kmem_cache_alloc_node+0x1e2/0x2d0 + [] kmalloc_trace+0x25/0xc0 + [] mac802154_llsec_key_add+0xac9/0xcf0 + [] ieee802154_add_llsec_key+0x5a/0x80 + [] nl802154_add_llsec_key+0x426/0x5b0 + [] genl_family_rcv_msg_doit+0x1fe/0x2f0 + [] genl_rcv_msg+0x531/0x7d0 + [] netlink_rcv_skb+0x169/0x440 + [] genl_rcv+0x28/0x40 + [] netlink_unicast+0x53c/0x820 + [] netlink_sendmsg+0x93b/0xe60 + [] ____sys_sendmsg+0xac5/0xca0 + [] ___sys_sendmsg+0x11d/0x1c0 + [] __sys_sendmsg+0xfa/0x1d0 + [] do_syscall_64+0x45/0xf0 + [] entry_SYSCALL_64_after_hwframe+0x6e/0x76 + +Handle the proper resource release in the RCU callback function +mac802154_llsec_key_del_rcu(). + +Note that if llsec_lookup_key() finds a key, it gets a refcount via +llsec_key_get() and locally copies key id from key_entry (which is a +list element). So it's safe to call llsec_key_put() and free the list +entry after the RCU grace period elapses. + +Found by Linux Verification Center (linuxtesting.org). + +Fixes: 5d637d5aabd8 ("mac802154: add llsec structures and mutators") +Cc: stable@vger.kernel.org +Signed-off-by: Fedor Pchelkin +Acked-by: Alexander Aring +Message-ID: <20240228163840.6667-1-pchelkin@ispras.ru> +Signed-off-by: Stefan Schmidt +Signed-off-by: Sasha Levin +--- + include/net/cfg802154.h | 1 + + net/mac802154/llsec.c | 18 +++++++++++++----- + 2 files changed, 14 insertions(+), 5 deletions(-) + +diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h +index cd95711b12b8e..76d2cd2e2b309 100644 +--- a/include/net/cfg802154.h ++++ b/include/net/cfg802154.h +@@ -401,6 +401,7 @@ struct ieee802154_llsec_key { + + struct ieee802154_llsec_key_entry { + struct list_head list; ++ struct rcu_head rcu; + + struct ieee802154_llsec_key_id id; + struct ieee802154_llsec_key *key; +diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c +index 8d2eabc71bbeb..f13b07ebfb98a 100644 +--- a/net/mac802154/llsec.c ++++ b/net/mac802154/llsec.c +@@ -265,19 +265,27 @@ int mac802154_llsec_key_add(struct mac802154_llsec *sec, + return -ENOMEM; + } + ++static void mac802154_llsec_key_del_rcu(struct rcu_head *rcu) ++{ ++ struct ieee802154_llsec_key_entry *pos; ++ struct mac802154_llsec_key *mkey; ++ ++ pos = container_of(rcu, struct ieee802154_llsec_key_entry, rcu); ++ mkey = container_of(pos->key, struct mac802154_llsec_key, key); ++ ++ llsec_key_put(mkey); ++ kfree_sensitive(pos); ++} ++ + int mac802154_llsec_key_del(struct mac802154_llsec *sec, + const struct ieee802154_llsec_key_id *key) + { + struct ieee802154_llsec_key_entry *pos; + + list_for_each_entry(pos, &sec->table.keys, list) { +- struct mac802154_llsec_key *mkey; +- +- mkey = container_of(pos->key, struct mac802154_llsec_key, key); +- + if (llsec_key_id_equal(&pos->id, key)) { + list_del_rcu(&pos->list); +- llsec_key_put(mkey); ++ call_rcu(&pos->rcu, mac802154_llsec_key_del_rcu); + return 0; + } + } +-- +2.43.0 + diff --git a/queue-6.8/md-add-a-new-helper-reshape_interrupted.patch b/queue-6.8/md-add-a-new-helper-reshape_interrupted.patch new file mode 100644 index 00000000000..7917418b34b --- /dev/null +++ b/queue-6.8/md-add-a-new-helper-reshape_interrupted.patch @@ -0,0 +1,56 @@ +From d48afc48bc9dea92ba8b68c75c632b6ffe64cd3c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 15:23:01 +0800 +Subject: md: add a new helper reshape_interrupted() + +From: Yu Kuai + +[ Upstream commit 503f9d43790fdd0c6e6ae2f4dd3f70b146ac4159 ] + +The helper will be used for dm-raid456 later to detect the case that +reshape can't make progress. + +Cc: stable@vger.kernel.org # v6.7+ +Signed-off-by: Yu Kuai +Signed-off-by: Xiao Ni +Acked-by: Mike Snitzer +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240305072306.2562024-5-yukuai1@huaweicloud.com +Signed-off-by: Sasha Levin +--- + drivers/md/md.h | 19 +++++++++++++++++++ + 1 file changed, 19 insertions(+) + +diff --git a/drivers/md/md.h b/drivers/md/md.h +index db0cb00e4c9ac..ea0fd76c17e75 100644 +--- a/drivers/md/md.h ++++ b/drivers/md/md.h +@@ -571,6 +571,25 @@ static inline bool md_is_rdwr(struct mddev *mddev) + return (mddev->ro == MD_RDWR); + } + ++static inline bool reshape_interrupted(struct mddev *mddev) ++{ ++ /* reshape never start */ ++ if (mddev->reshape_position == MaxSector) ++ return false; ++ ++ /* interrupted */ ++ if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) ++ return true; ++ ++ /* running reshape will be interrupted soon. */ ++ if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) || ++ test_bit(MD_RECOVERY_INTR, &mddev->recovery) || ++ test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) ++ return true; ++ ++ return false; ++} ++ + static inline int __must_check mddev_lock(struct mddev *mddev) + { + return mutex_lock_interruptible(&mddev->reconfig_mutex); +-- +2.43.0 + diff --git a/queue-6.8/md-dm-raid-don-t-call-md_reap_sync_thread-directly.patch b/queue-6.8/md-dm-raid-don-t-call-md_reap_sync_thread-directly.patch new file mode 100644 index 00000000000..82c0dda9ca1 --- /dev/null +++ b/queue-6.8/md-dm-raid-don-t-call-md_reap_sync_thread-directly.patch @@ -0,0 +1,83 @@ +From 4e170adc23af59a34179948460104741321218ec Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 15:23:03 +0800 +Subject: md/dm-raid: don't call md_reap_sync_thread() directly + +From: Yu Kuai + +[ Upstream commit cd32b27a66db8776d8b8e82ec7d7dde97a8693b0 ] + +Currently md_reap_sync_thread() is called from raid_message() directly +without holding 'reconfig_mutex', this is definitely unsafe because +md_reap_sync_thread() can change many fields that is protected by +'reconfig_mutex'. + +However, hold 'reconfig_mutex' here is still problematic because this +will cause deadlock, for example, commit 130443d60b1b ("md: refactor +idle/frozen_sync_thread() to fix deadlock"). + +Fix this problem by using stop_sync_thread() to unregister sync_thread, +like md/raid did. + +Fixes: be83651f0050 ("DM RAID: Add message/status support for changing sync action") +Cc: stable@vger.kernel.org # v6.7+ +Signed-off-by: Yu Kuai +Signed-off-by: Xiao Ni +Acked-by: Mike Snitzer +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240305072306.2562024-7-yukuai1@huaweicloud.com +Signed-off-by: Sasha Levin +--- + drivers/md/dm-raid.c | 28 ++++++++++++++++++---------- + 1 file changed, 18 insertions(+), 10 deletions(-) + +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c +index fff9336fee767..8d38cdb221453 100644 +--- a/drivers/md/dm-raid.c ++++ b/drivers/md/dm-raid.c +@@ -3719,6 +3719,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, + { + struct raid_set *rs = ti->private; + struct mddev *mddev = &rs->md; ++ int ret = 0; + + if (!mddev->pers || !mddev->pers->sync_request) + return -EINVAL; +@@ -3726,17 +3727,24 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, + if (test_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) + return -EBUSY; + +- if (!strcasecmp(argv[0], "frozen")) +- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); +- else +- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ++ if (!strcasecmp(argv[0], "frozen")) { ++ ret = mddev_lock(mddev); ++ if (ret) ++ return ret; + +- if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { +- if (mddev->sync_thread) { +- set_bit(MD_RECOVERY_INTR, &mddev->recovery); +- md_reap_sync_thread(mddev); +- } +- } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle) ++ md_frozen_sync_thread(mddev); ++ mddev_unlock(mddev); ++ } else if (!strcasecmp(argv[0], "idle")) { ++ ret = mddev_lock(mddev); ++ if (ret) ++ return ret; ++ ++ md_idle_sync_thread(mddev); ++ mddev_unlock(mddev); ++ } ++ ++ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ++ if (decipher_sync_action(mddev, mddev->recovery) != st_idle) + return -EBUSY; + else if (!strcasecmp(argv[0], "resync")) + ; /* MD_RECOVERY_NEEDED set below */ +-- +2.43.0 + diff --git a/queue-6.8/md-don-t-clear-md_recovery_frozen-for-new-dm-raid-un.patch b/queue-6.8/md-don-t-clear-md_recovery_frozen-for-new-dm-raid-un.patch new file mode 100644 index 00000000000..7c2b39aab2a --- /dev/null +++ b/queue-6.8/md-don-t-clear-md_recovery_frozen-for-new-dm-raid-un.patch @@ -0,0 +1,53 @@ +From ea777cc378c910623d6c356b6594da81826eda97 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 15:22:58 +0800 +Subject: md: don't clear MD_RECOVERY_FROZEN for new dm-raid until resume + +From: Yu Kuai + +[ Upstream commit 2f03d0c2cd451c7ac2f317079d4ec518f0986b55 ] + +After commit 9dbd1aa3a81c ("dm raid: add reshaping support to the +target") raid_ctr() will set MD_RECOVERY_FROZEN before md_run() and +expect to keep array frozen until resume. However, md_run() will clear +the flag by setting mddev->recovery to 0. + +Before commit 1baae052cccd ("md: Don't ignore suspended array in +md_check_recovery()"), dm-raid actually relied on suspending to prevent +starting new sync_thread. + +Fix this problem by keeping 'MD_RECOVERY_FROZEN' for dm-raid in +md_run(). + +Fixes: 1baae052cccd ("md: Don't ignore suspended array in md_check_recovery()") +Fixes: 9dbd1aa3a81c ("dm raid: add reshaping support to the target") +Cc: stable@vger.kernel.org # v6.7+ +Signed-off-by: Yu Kuai +Signed-off-by: Xiao Ni +Acked-by: Mike Snitzer +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240305072306.2562024-2-yukuai1@huaweicloud.com +Signed-off-by: Sasha Levin +--- + drivers/md/md.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/md/md.c b/drivers/md/md.c +index fbe528ed236f6..3d3a419190042 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -6039,7 +6039,10 @@ int md_run(struct mddev *mddev) + pr_warn("True protection against single-disk failure might be compromised.\n"); + } + +- mddev->recovery = 0; ++ /* dm-raid expect sync_thread to be frozen until resume */ ++ if (mddev->gendisk) ++ mddev->recovery = 0; ++ + /* may be over-ridden by personality */ + mddev->resync_max_sectors = mddev->dev_sectors; + +-- +2.43.0 + diff --git a/queue-6.8/md-export-helper-md_is_rdwr.patch b/queue-6.8/md-export-helper-md_is_rdwr.patch new file mode 100644 index 00000000000..f87b4125933 --- /dev/null +++ b/queue-6.8/md-export-helper-md_is_rdwr.patch @@ -0,0 +1,73 @@ +From a8d9238228aadbb9d8dacb3d56dbe2de14c50043 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 15:23:00 +0800 +Subject: md: export helper md_is_rdwr() + +From: Yu Kuai + +[ Upstream commit 314e9af065513ff86ec9e32eaa96b9bd275cf51d ] + +There are no functional changes for now, prepare to fix a deadlock for +dm-raid456. + +Cc: stable@vger.kernel.org # v6.7+ +Signed-off-by: Yu Kuai +Signed-off-by: Xiao Ni +Acked-by: Mike Snitzer +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240305072306.2562024-4-yukuai1@huaweicloud.com +Signed-off-by: Sasha Levin +--- + drivers/md/md.c | 12 ------------ + drivers/md/md.h | 12 ++++++++++++ + 2 files changed, 12 insertions(+), 12 deletions(-) + +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 7255678608e7c..245ef8af8640a 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -99,18 +99,6 @@ static void mddev_detach(struct mddev *mddev); + static void export_rdev(struct md_rdev *rdev, struct mddev *mddev); + static void md_wakeup_thread_directly(struct md_thread __rcu *thread); + +-enum md_ro_state { +- MD_RDWR, +- MD_RDONLY, +- MD_AUTO_READ, +- MD_MAX_STATE +-}; +- +-static bool md_is_rdwr(struct mddev *mddev) +-{ +- return (mddev->ro == MD_RDWR); +-} +- + /* + * Default number of read corrections we'll attempt on an rdev + * before ejecting it from the array. We divide the read error +diff --git a/drivers/md/md.h b/drivers/md/md.h +index 0d06d640aa06d..db0cb00e4c9ac 100644 +--- a/drivers/md/md.h ++++ b/drivers/md/md.h +@@ -559,6 +559,18 @@ enum recovery_flags { + MD_RESYNCING_REMOTE, /* remote node is running resync thread */ + }; + ++enum md_ro_state { ++ MD_RDWR, ++ MD_RDONLY, ++ MD_AUTO_READ, ++ MD_MAX_STATE ++}; ++ ++static inline bool md_is_rdwr(struct mddev *mddev) ++{ ++ return (mddev->ro == MD_RDWR); ++} ++ + static inline int __must_check mddev_lock(struct mddev *mddev) + { + return mutex_lock_interruptible(&mddev->reconfig_mutex); +-- +2.43.0 + diff --git a/queue-6.8/md-export-helpers-to-stop-sync_thread.patch b/queue-6.8/md-export-helpers-to-stop-sync_thread.patch new file mode 100644 index 00000000000..4572a83d634 --- /dev/null +++ b/queue-6.8/md-export-helpers-to-stop-sync_thread.patch @@ -0,0 +1,87 @@ +From d2046064404bd93cc8575945ef6c7d533e7de998 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 15:22:59 +0800 +Subject: md: export helpers to stop sync_thread + +From: Yu Kuai + +[ Upstream commit 7a2347e284d7ec2f0759be4db60fa7ca937284fc ] + +Add new helpers: + + void md_idle_sync_thread(struct mddev *mddev); + void md_frozen_sync_thread(struct mddev *mddev); + void md_unfrozen_sync_thread(struct mddev *mddev); + +The helpers will be used in dm-raid in later patches to fix regressions +and prevent calling md_reap_sync_thread() directly. + +Cc: stable@vger.kernel.org # v6.7+ +Signed-off-by: Yu Kuai +Signed-off-by: Xiao Ni +Acked-by: Mike Snitzer +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240305072306.2562024-3-yukuai1@huaweicloud.com +Signed-off-by: Sasha Levin +--- + drivers/md/md.c | 29 +++++++++++++++++++++++++++++ + drivers/md/md.h | 3 +++ + 2 files changed, 32 insertions(+) + +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 3d3a419190042..7255678608e7c 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -4920,6 +4920,35 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq) + mddev_lock_nointr(mddev); + } + ++void md_idle_sync_thread(struct mddev *mddev) ++{ ++ lockdep_assert_held(&mddev->reconfig_mutex); ++ ++ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ++ stop_sync_thread(mddev, true, true); ++} ++EXPORT_SYMBOL_GPL(md_idle_sync_thread); ++ ++void md_frozen_sync_thread(struct mddev *mddev) ++{ ++ lockdep_assert_held(&mddev->reconfig_mutex); ++ ++ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ++ stop_sync_thread(mddev, true, false); ++} ++EXPORT_SYMBOL_GPL(md_frozen_sync_thread); ++ ++void md_unfrozen_sync_thread(struct mddev *mddev) ++{ ++ lockdep_assert_held(&mddev->reconfig_mutex); ++ ++ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ++ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); ++ md_wakeup_thread(mddev->thread); ++ sysfs_notify_dirent_safe(mddev->sysfs_action); ++} ++EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread); ++ + static void idle_sync_thread(struct mddev *mddev) + { + mutex_lock(&mddev->sync_mutex); +diff --git a/drivers/md/md.h b/drivers/md/md.h +index 27d187ca6258a..0d06d640aa06d 100644 +--- a/drivers/md/md.h ++++ b/drivers/md/md.h +@@ -782,6 +782,9 @@ extern void md_rdev_clear(struct md_rdev *rdev); + extern void md_handle_request(struct mddev *mddev, struct bio *bio); + extern int mddev_suspend(struct mddev *mddev, bool interruptible); + extern void mddev_resume(struct mddev *mddev); ++extern void md_idle_sync_thread(struct mddev *mddev); ++extern void md_frozen_sync_thread(struct mddev *mddev); ++extern void md_unfrozen_sync_thread(struct mddev *mddev); + + extern void md_reload_sb(struct mddev *mddev, int raid_disk); + extern void md_update_sb(struct mddev *mddev, int force); +-- +2.43.0 + diff --git a/queue-6.8/md-md-bitmap-fix-incorrect-usage-for-sb_index.patch b/queue-6.8/md-md-bitmap-fix-incorrect-usage-for-sb_index.patch new file mode 100644 index 00000000000..ba4c2bda3d1 --- /dev/null +++ b/queue-6.8/md-md-bitmap-fix-incorrect-usage-for-sb_index.patch @@ -0,0 +1,82 @@ +From 6eb94e9646394794da1dd052141c6942a4d44032 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 23 Feb 2024 20:11:28 +0800 +Subject: md/md-bitmap: fix incorrect usage for sb_index + +From: Heming Zhao + +[ Upstream commit ecbd8ebb51bf7e4939d83b9e6022a55cac44ef06 ] + +Commit d7038f951828 ("md-bitmap: don't use ->index for pages backing the +bitmap file") removed page->index from bitmap code, but left wrong code +logic for clustered-md. current code never set slot offset for cluster +nodes, will sometimes cause crash in clustered env. + +Call trace (partly): + md_bitmap_file_set_bit+0x110/0x1d8 [md_mod] + md_bitmap_startwrite+0x13c/0x240 [md_mod] + raid1_make_request+0x6b0/0x1c08 [raid1] + md_handle_request+0x1dc/0x368 [md_mod] + md_submit_bio+0x80/0xf8 [md_mod] + __submit_bio+0x178/0x300 + submit_bio_noacct_nocheck+0x11c/0x338 + submit_bio_noacct+0x134/0x614 + submit_bio+0x28/0xdc + submit_bh_wbc+0x130/0x1cc + submit_bh+0x1c/0x28 + +Fixes: d7038f951828 ("md-bitmap: don't use ->index for pages backing the bitmap file") +Cc: stable@vger.kernel.org # v6.6+ +Signed-off-by: Heming Zhao +Reviewed-by: Christoph Hellwig +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240223121128.28985-1-heming.zhao@suse.com +Signed-off-by: Sasha Levin +--- + drivers/md/md-bitmap.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c +index 9672f75c30503..a4976ceae8688 100644 +--- a/drivers/md/md-bitmap.c ++++ b/drivers/md/md-bitmap.c +@@ -234,7 +234,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap, + sector_t doff; + + bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; +- if (pg_index == store->file_pages - 1) { ++ /* we compare length (page numbers), not page offset. */ ++ if ((pg_index - store->sb_index) == store->file_pages - 1) { + unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1); + + if (last_page_size == 0) +@@ -438,8 +439,8 @@ static void filemap_write_page(struct bitmap *bitmap, unsigned long pg_index, + struct page *page = store->filemap[pg_index]; + + if (mddev_is_clustered(bitmap->mddev)) { +- pg_index += bitmap->cluster_slot * +- DIV_ROUND_UP(store->bytes, PAGE_SIZE); ++ /* go to node bitmap area starting point */ ++ pg_index += store->sb_index; + } + + if (store->file) +@@ -952,6 +953,7 @@ static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) + unsigned long index = file_page_index(store, chunk); + unsigned long node_offset = 0; + ++ index += store->sb_index; + if (mddev_is_clustered(bitmap->mddev)) + node_offset = bitmap->cluster_slot * store->file_pages; + +@@ -982,6 +984,7 @@ static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) + unsigned long index = file_page_index(store, chunk); + unsigned long node_offset = 0; + ++ index += store->sb_index; + if (mddev_is_clustered(bitmap->mddev)) + node_offset = bitmap->cluster_slot * store->file_pages; + +-- +2.43.0 + diff --git a/queue-6.8/md-raid5-fix-atomicity-violation-in-raid5_cache_coun.patch b/queue-6.8/md-raid5-fix-atomicity-violation-in-raid5_cache_coun.patch new file mode 100644 index 00000000000..19d5b2e0453 --- /dev/null +++ b/queue-6.8/md-raid5-fix-atomicity-violation-in-raid5_cache_coun.patch @@ -0,0 +1,119 @@ +From 66c8e2f76ff25cdfcf8790cc70875a52dfaf4bd0 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 12 Jan 2024 15:10:17 +0800 +Subject: md/raid5: fix atomicity violation in raid5_cache_count + +From: Gui-Dong Han <2045gemini@gmail.com> + +[ Upstream commit dfd2bf436709b2bccb78c2dda550dde93700efa7 ] + +In raid5_cache_count(): + if (conf->max_nr_stripes < conf->min_nr_stripes) + return 0; + return conf->max_nr_stripes - conf->min_nr_stripes; +The current check is ineffective, as the values could change immediately +after being checked. + +In raid5_set_cache_size(): + ... + conf->min_nr_stripes = size; + ... + while (size > conf->max_nr_stripes) + conf->min_nr_stripes = conf->max_nr_stripes; + ... + +Due to intermediate value updates in raid5_set_cache_size(), concurrent +execution of raid5_cache_count() and raid5_set_cache_size() may lead to +inconsistent reads of conf->max_nr_stripes and conf->min_nr_stripes. +The current checks are ineffective as values could change immediately +after being checked, raising the risk of conf->min_nr_stripes exceeding +conf->max_nr_stripes and potentially causing an integer overflow. + +This possible bug is found by an experimental static analysis tool +developed by our team. This tool analyzes the locking APIs to extract +function pairs that can be concurrently executed, and then analyzes the +instructions in the paired functions to identify possible concurrency bugs +including data races and atomicity violations. The above possible bug is +reported when our tool analyzes the source code of Linux 6.2. + +To resolve this issue, it is suggested to introduce local variables +'min_stripes' and 'max_stripes' in raid5_cache_count() to ensure the +values remain stable throughout the check. Adding locks in +raid5_cache_count() fails to resolve atomicity violations, as +raid5_set_cache_size() may hold intermediate values of +conf->min_nr_stripes while unlocked. With this patch applied, our tool no +longer reports the bug, with the kernel configuration allyesconfig for +x86_64. Due to the lack of associated hardware, we cannot test the patch +in runtime testing, and just verify it according to the code logic. + +Fixes: edbe83ab4c27 ("md/raid5: allow the stripe_cache to grow and shrink.") +Cc: stable@vger.kernel.org +Signed-off-by: Gui-Dong Han <2045gemini@gmail.com> +Reviewed-by: Yu Kuai +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240112071017.16313-1-2045gemini@gmail.com +Signed-off-by: Song Liu +Signed-off-by: Sasha Levin +--- + drivers/md/raid5.c | 14 ++++++++------ + 1 file changed, 8 insertions(+), 6 deletions(-) + +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 6a7a32f7fb912..6cddea04f942a 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -2412,7 +2412,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) + atomic_inc(&conf->active_stripes); + + raid5_release_stripe(sh); +- conf->max_nr_stripes++; ++ WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1); + return 1; + } + +@@ -2707,7 +2707,7 @@ static int drop_one_stripe(struct r5conf *conf) + shrink_buffers(sh); + free_stripe(conf->slab_cache, sh); + atomic_dec(&conf->active_stripes); +- conf->max_nr_stripes--; ++ WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1); + return 1; + } + +@@ -6820,7 +6820,7 @@ raid5_set_cache_size(struct mddev *mddev, int size) + if (size <= 16 || size > 32768) + return -EINVAL; + +- conf->min_nr_stripes = size; ++ WRITE_ONCE(conf->min_nr_stripes, size); + mutex_lock(&conf->cache_size_mutex); + while (size < conf->max_nr_stripes && + drop_one_stripe(conf)) +@@ -6832,7 +6832,7 @@ raid5_set_cache_size(struct mddev *mddev, int size) + mutex_lock(&conf->cache_size_mutex); + while (size > conf->max_nr_stripes) + if (!grow_one_stripe(conf, GFP_KERNEL)) { +- conf->min_nr_stripes = conf->max_nr_stripes; ++ WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes); + result = -ENOMEM; + break; + } +@@ -7390,11 +7390,13 @@ static unsigned long raid5_cache_count(struct shrinker *shrink, + struct shrink_control *sc) + { + struct r5conf *conf = shrink->private_data; ++ int max_stripes = READ_ONCE(conf->max_nr_stripes); ++ int min_stripes = READ_ONCE(conf->min_nr_stripes); + +- if (conf->max_nr_stripes < conf->min_nr_stripes) ++ if (max_stripes < min_stripes) + /* unlikely, but not impossible */ + return 0; +- return conf->max_nr_stripes - conf->min_nr_stripes; ++ return max_stripes - min_stripes; + } + + static struct r5conf *setup_conf(struct mddev *mddev) +-- +2.43.0 + diff --git a/queue-6.8/md-use-rcu-lock-to-protect-traversal-in-md_spares_ne.patch b/queue-6.8/md-use-rcu-lock-to-protect-traversal-in-md_spares_ne.patch new file mode 100644 index 00000000000..a7e0a8c32f7 --- /dev/null +++ b/queue-6.8/md-use-rcu-lock-to-protect-traversal-in-md_spares_ne.patch @@ -0,0 +1,51 @@ +From efb7890a94d94cf61ca9079e2edd78c1db94e4a0 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 4 Jan 2024 21:36:29 +0800 +Subject: md: use RCU lock to protect traversal in md_spares_need_change() + +From: Li Lingfeng + +[ Upstream commit 570b9147deb6b07b955b55e06c714ca12a5f3e16 ] + +Since md_start_sync() will be called without the protect of mddev_lock, +and it can run concurrently with array reconfiguration, traversal of rdev +in it should be protected by RCU lock. +Commit bc08041b32ab ("md: suspend array in md_start_sync() if array need +reconfiguration") added md_spares_need_change() to md_start_sync(), +casusing use of rdev without any protection. +Fix this by adding RCU lock in md_spares_need_change(). + +Fixes: bc08041b32ab ("md: suspend array in md_start_sync() if array need reconfiguration") +Cc: stable@vger.kernel.org # 6.7+ +Signed-off-by: Li Lingfeng +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240104133629.1277517-1-lilingfeng@huaweicloud.com +Signed-off-by: Sasha Levin +--- + drivers/md/md.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/drivers/md/md.c b/drivers/md/md.c +index d344e6fa3b26f..fbe528ed236f6 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -9277,9 +9277,14 @@ static bool md_spares_need_change(struct mddev *mddev) + { + struct md_rdev *rdev; + +- rdev_for_each(rdev, mddev) +- if (rdev_removeable(rdev) || rdev_addable(rdev)) ++ rcu_read_lock(); ++ rdev_for_each_rcu(rdev, mddev) { ++ if (rdev_removeable(rdev) || rdev_addable(rdev)) { ++ rcu_read_unlock(); + return true; ++ } ++ } ++ rcu_read_unlock(); + return false; + } + +-- +2.43.0 + diff --git a/queue-6.8/media-mc-add-local-pad-to-pipeline-regardless-of-the.patch b/queue-6.8/media-mc-add-local-pad-to-pipeline-regardless-of-the.patch new file mode 100644 index 00000000000..f00a57d5caf --- /dev/null +++ b/queue-6.8/media-mc-add-local-pad-to-pipeline-regardless-of-the.patch @@ -0,0 +1,80 @@ +From acc88b0815415f7da19c42117de5d283e5472078 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 14 Jan 2024 15:55:40 +0200 +Subject: media: mc: Add local pad to pipeline regardless of the link state + +From: Laurent Pinchart + +[ Upstream commit 78f0daa026d4c5e192d31801d1be6caf88250220 ] + +When building pipelines by following links, the +media_pipeline_explore_next_link() function only traverses enabled +links. The remote pad of a disabled link is not added to the pipeline, +and neither is the local pad. While the former is correct as disabled +links should not be followed, not adding the local pad breaks processing +of the MEDIA_PAD_FL_MUST_CONNECT flag. + +The MEDIA_PAD_FL_MUST_CONNECT flag is checked in the +__media_pipeline_start() function that iterates over all pads after +populating the pipeline. If the pad is not present, the check gets +skipped, rendering it useless. + +Fix this by adding the local pad of all links regardless of their state, +only skipping the remote pad for disabled links. + +Cc: stable@vger.kernel.org # 6.1 +Fixes: ae219872834a ("media: mc: entity: Rewrite media_pipeline_start()") +Reported-by: Frieder Schrempf +Closes: https://lore.kernel.org/linux-media/7658a15a-80c5-219f-2477-2a94ba6c6ba1@kontron.de +Signed-off-by: Laurent Pinchart +Acked-by: Sakari Ailus +Signed-off-by: Hans Verkuil +Signed-off-by: Sasha Levin +--- + drivers/media/mc/mc-entity.c | 18 +++++++++--------- + 1 file changed, 9 insertions(+), 9 deletions(-) + +diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c +index 543a392f86357..a6f28366106fb 100644 +--- a/drivers/media/mc/mc-entity.c ++++ b/drivers/media/mc/mc-entity.c +@@ -620,13 +620,6 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, + link->source->entity->name, link->source->index, + link->sink->entity->name, link->sink->index); + +- /* Skip links that are not enabled. */ +- if (!(link->flags & MEDIA_LNK_FL_ENABLED)) { +- dev_dbg(walk->mdev->dev, +- "media pipeline: skipping link (disabled)\n"); +- return 0; +- } +- + /* Get the local pad and remote pad. */ + if (link->source->entity == pad->entity) { + local = link->source; +@@ -648,13 +641,20 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, + } + + /* +- * Add the local and remote pads of the link to the pipeline and push +- * them to the stack, if they're not already present. ++ * Add the local pad of the link to the pipeline and push it to the ++ * stack, if not already present. + */ + ret = media_pipeline_add_pad(pipe, walk, local); + if (ret) + return ret; + ++ /* Similarly, add the remote pad, but only if the link is enabled. */ ++ if (!(link->flags & MEDIA_LNK_FL_ENABLED)) { ++ dev_dbg(walk->mdev->dev, ++ "media pipeline: skipping link (disabled)\n"); ++ return 0; ++ } ++ + ret = media_pipeline_add_pad(pipe, walk, remote); + if (ret) + return ret; +-- +2.43.0 + diff --git a/queue-6.8/media-mc-add-num_links-flag-to-media_pad.patch b/queue-6.8/media-mc-add-num_links-flag-to-media_pad.patch new file mode 100644 index 00000000000..e3304ba115b --- /dev/null +++ b/queue-6.8/media-mc-add-num_links-flag-to-media_pad.patch @@ -0,0 +1,70 @@ +From eb6126fc2ebd42cdf1fc38603e97c44a8699c45d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 15 Jan 2024 00:30:02 +0200 +Subject: media: mc: Add num_links flag to media_pad + +From: Laurent Pinchart + +[ Upstream commit baeddf94aa61879b118f2faa37ed126d772670cc ] + +Maintain a counter of the links connected to a pad in the media_pad +structure. This helps checking if a pad is connected to anything, which +will be used in the pipeline building code. + +Cc: stable@vger.kernel.org # 6.1 +Signed-off-by: Laurent Pinchart +Acked-by: Sakari Ailus +Signed-off-by: Hans Verkuil +Signed-off-by: Sasha Levin +--- + drivers/media/mc/mc-entity.c | 6 ++++++ + include/media/media-entity.h | 2 ++ + 2 files changed, 8 insertions(+) + +diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c +index 7839e3f68efa4..c2d8f59b62c12 100644 +--- a/drivers/media/mc/mc-entity.c ++++ b/drivers/media/mc/mc-entity.c +@@ -1038,6 +1038,9 @@ static void __media_entity_remove_link(struct media_entity *entity, + + /* Remove the reverse links for a data link. */ + if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == MEDIA_LNK_FL_DATA_LINK) { ++ link->source->num_links--; ++ link->sink->num_links--; ++ + if (link->source->entity == entity) + remote = link->sink->entity; + else +@@ -1143,6 +1146,9 @@ media_create_pad_link(struct media_entity *source, u16 source_pad, + sink->num_links++; + source->num_links++; + ++ link->source->num_links++; ++ link->sink->num_links++; ++ + return 0; + } + EXPORT_SYMBOL_GPL(media_create_pad_link); +diff --git a/include/media/media-entity.h b/include/media/media-entity.h +index 2b6cd343ee9e0..4d95893c89846 100644 +--- a/include/media/media-entity.h ++++ b/include/media/media-entity.h +@@ -225,6 +225,7 @@ enum media_pad_signal_type { + * @graph_obj: Embedded structure containing the media object common data + * @entity: Entity this pad belongs to + * @index: Pad index in the entity pads array, numbered from 0 to n ++ * @num_links: Number of links connected to this pad + * @sig_type: Type of the signal inside a media pad + * @flags: Pad flags, as defined in + * :ref:`include/uapi/linux/media.h ` +@@ -236,6 +237,7 @@ struct media_pad { + struct media_gobj graph_obj; /* must be first field in struct */ + struct media_entity *entity; + u16 index; ++ u16 num_links; + enum media_pad_signal_type sig_type; + unsigned long flags; + +-- +2.43.0 + diff --git a/queue-6.8/media-mc-expand-must_connect-flag-to-always-require-.patch b/queue-6.8/media-mc-expand-must_connect-flag-to-always-require-.patch new file mode 100644 index 00000000000..e78055c7ed3 --- /dev/null +++ b/queue-6.8/media-mc-expand-must_connect-flag-to-always-require-.patch @@ -0,0 +1,190 @@ +From 4c10b037b502dd4dad08b2cd4452de1863b7a14f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 15 Jan 2024 01:04:52 +0200 +Subject: media: mc: Expand MUST_CONNECT flag to always require an enabled link + +From: Laurent Pinchart + +[ Upstream commit b3decc5ce7d778224d266423b542326ad469cb5f ] + +The MEDIA_PAD_FL_MUST_CONNECT flag indicates that the pad requires an +enabled link to stream, but only if it has any link at all. This makes +little sense, as if a pad is part of a pipeline, there are very few use +cases for an active link to be mandatory only if links exist at all. A +review of in-tree drivers confirms they all need an enabled link for +pads marked with the MEDIA_PAD_FL_MUST_CONNECT flag. + +Expand the scope of the flag by rejecting pads that have no links at +all. This requires modifying the pipeline build code to add those pads +to the pipeline. + +Cc: stable@vger.kernel.org # 6.1 +Signed-off-by: Laurent Pinchart +Acked-by: Sakari Ailus +Signed-off-by: Hans Verkuil +Signed-off-by: Sasha Levin +--- + .../media/mediactl/media-types.rst | 11 ++-- + drivers/media/mc/mc-entity.c | 53 +++++++++++++++---- + 2 files changed, 48 insertions(+), 16 deletions(-) + +diff --git a/Documentation/userspace-api/media/mediactl/media-types.rst b/Documentation/userspace-api/media/mediactl/media-types.rst +index 0ffeece1e0c8e..6332e8395263b 100644 +--- a/Documentation/userspace-api/media/mediactl/media-types.rst ++++ b/Documentation/userspace-api/media/mediactl/media-types.rst +@@ -375,12 +375,11 @@ Types and flags used to represent the media graph elements + are origins of links. + + * - ``MEDIA_PAD_FL_MUST_CONNECT`` +- - If this flag is set and the pad is linked to any other pad, then +- at least one of those links must be enabled for the entity to be +- able to stream. There could be temporary reasons (e.g. device +- configuration dependent) for the pad to need enabled links even +- when this flag isn't set; the absence of the flag doesn't imply +- there is none. ++ - If this flag is set, then for this pad to be able to stream, it must ++ be connected by at least one enabled link. There could be temporary ++ reasons (e.g. device configuration dependent) for the pad to need ++ enabled links even when this flag isn't set; the absence of the flag ++ doesn't imply there is none. + + + One and only one of ``MEDIA_PAD_FL_SINK`` and ``MEDIA_PAD_FL_SOURCE`` +diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c +index 5907925ffd891..0e28b9a7936ef 100644 +--- a/drivers/media/mc/mc-entity.c ++++ b/drivers/media/mc/mc-entity.c +@@ -535,14 +535,15 @@ static int media_pipeline_walk_push(struct media_pipeline_walk *walk, + + /* + * Move the top entry link cursor to the next link. If all links of the entry +- * have been visited, pop the entry itself. ++ * have been visited, pop the entry itself. Return true if the entry has been ++ * popped. + */ +-static void media_pipeline_walk_pop(struct media_pipeline_walk *walk) ++static bool media_pipeline_walk_pop(struct media_pipeline_walk *walk) + { + struct media_pipeline_walk_entry *entry; + + if (WARN_ON(walk->stack.top < 0)) +- return; ++ return false; + + entry = media_pipeline_walk_top(walk); + +@@ -552,7 +553,7 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk) + walk->stack.top); + + walk->stack.top--; +- return; ++ return true; + } + + entry->links = entry->links->next; +@@ -560,6 +561,8 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk) + dev_dbg(walk->mdev->dev, + "media pipeline: moved entry %u to next link\n", + walk->stack.top); ++ ++ return false; + } + + /* Free all memory allocated while walking the pipeline. */ +@@ -609,11 +612,12 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, + struct media_link *link; + struct media_pad *local; + struct media_pad *remote; ++ bool last_link; + int ret; + + origin = entry->pad; + link = list_entry(entry->links, typeof(*link), list); +- media_pipeline_walk_pop(walk); ++ last_link = media_pipeline_walk_pop(walk); + + dev_dbg(walk->mdev->dev, + "media pipeline: exploring link '%s':%u -> '%s':%u\n", +@@ -638,7 +642,7 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, + local->index)) { + dev_dbg(walk->mdev->dev, + "media pipeline: skipping link (no route)\n"); +- return 0; ++ goto done; + } + + /* +@@ -653,13 +657,44 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, + if (!(link->flags & MEDIA_LNK_FL_ENABLED)) { + dev_dbg(walk->mdev->dev, + "media pipeline: skipping link (disabled)\n"); +- return 0; ++ goto done; + } + + ret = media_pipeline_add_pad(pipe, walk, remote); + if (ret) + return ret; + ++done: ++ /* ++ * If we're done iterating over links, iterate over pads of the entity. ++ * This is necessary to discover pads that are not connected with any ++ * link. Those are dead ends from a pipeline exploration point of view, ++ * but are still part of the pipeline and need to be added to enable ++ * proper validation. ++ */ ++ if (!last_link) ++ return 0; ++ ++ dev_dbg(walk->mdev->dev, ++ "media pipeline: adding unconnected pads of '%s'\n", ++ local->entity->name); ++ ++ media_entity_for_each_pad(origin->entity, local) { ++ /* ++ * Skip the origin pad (already handled), pad that have links ++ * (already discovered through iterating over links) and pads ++ * not internally connected. ++ */ ++ if (origin == local || !local->num_links || ++ !media_entity_has_pad_interdep(origin->entity, origin->index, ++ local->index)) ++ continue; ++ ++ ret = media_pipeline_add_pad(pipe, walk, local); ++ if (ret) ++ return ret; ++ } ++ + return 0; + } + +@@ -771,7 +806,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad, + struct media_pad *pad = ppad->pad; + struct media_entity *entity = pad->entity; + bool has_enabled_link = false; +- bool has_link = false; + struct media_link *link; + + dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name, +@@ -801,7 +835,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad, + /* Record if the pad has links and enabled links. */ + if (link->flags & MEDIA_LNK_FL_ENABLED) + has_enabled_link = true; +- has_link = true; + + /* + * Validate the link if it's enabled and has the +@@ -839,7 +872,7 @@ __must_check int __media_pipeline_start(struct media_pad *pad, + * 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set, + * ensure that it has either no link or an enabled link. + */ +- if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link && ++ if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && + !has_enabled_link) { + dev_dbg(mdev->dev, + "Pad '%s':%u must be connected by an enabled link\n", +-- +2.43.0 + diff --git a/queue-6.8/media-mc-fix-flags-handling-when-creating-pad-links.patch b/queue-6.8/media-mc-fix-flags-handling-when-creating-pad-links.patch new file mode 100644 index 00000000000..92a5168d535 --- /dev/null +++ b/queue-6.8/media-mc-fix-flags-handling-when-creating-pad-links.patch @@ -0,0 +1,59 @@ +From 0d1ec8e8806f2d2fd581e174ef5b1d9f08bcb2fb Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 15 Jan 2024 00:24:12 +0200 +Subject: media: mc: Fix flags handling when creating pad links + +From: Laurent Pinchart + +[ Upstream commit 422f7af75d03d50895938d38bc9cb8be759c440f ] + +The media_create_pad_link() function doesn't correctly clear reject link +type flags, nor does it set the DATA_LINK flag. It only works because +the MEDIA_LNK_FL_DATA_LINK flag's value is 0. + +Fix it by returning an error if any link type flag is set. This doesn't +introduce any regression, as nobody calls the media_create_pad_link() +function with link type flags (easily checked by grepping for the flag +in the source code, there are very few hits). + +Set the MEDIA_LNK_FL_DATA_LINK explicitly, which is a no-op that the +compiler will optimize out, but is still useful to make the code more +explicit and easier to understand. + +Cc: stable@vger.kernel.org # 6.1 +Signed-off-by: Laurent Pinchart +Acked-by: Sakari Ailus +Signed-off-by: Hans Verkuil +Signed-off-by: Sasha Levin +--- + drivers/media/mc/mc-entity.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c +index a6f28366106fb..7839e3f68efa4 100644 +--- a/drivers/media/mc/mc-entity.c ++++ b/drivers/media/mc/mc-entity.c +@@ -1092,6 +1092,11 @@ media_create_pad_link(struct media_entity *source, u16 source_pad, + struct media_link *link; + struct media_link *backlink; + ++ if (flags & MEDIA_LNK_FL_LINK_TYPE) ++ return -EINVAL; ++ ++ flags |= MEDIA_LNK_FL_DATA_LINK; ++ + if (WARN_ON(!source || !sink) || + WARN_ON(source_pad >= source->num_pads) || + WARN_ON(sink_pad >= sink->num_pads)) +@@ -1107,7 +1112,7 @@ media_create_pad_link(struct media_entity *source, u16 source_pad, + + link->source = &source->pads[source_pad]; + link->sink = &sink->pads[sink_pad]; +- link->flags = flags & ~MEDIA_LNK_FL_INTERFACE_LINK; ++ link->flags = flags; + + /* Initialize graph object embedded at the new link */ + media_gobj_create(source->graph_obj.mdev, MEDIA_GRAPH_LINK, +-- +2.43.0 + diff --git a/queue-6.8/media-mc-rename-pad-variable-to-clarify-intent.patch b/queue-6.8/media-mc-rename-pad-variable-to-clarify-intent.patch new file mode 100644 index 00000000000..e632cda8ca9 --- /dev/null +++ b/queue-6.8/media-mc-rename-pad-variable-to-clarify-intent.patch @@ -0,0 +1,68 @@ +From 78ddd0175812a0097e2e578ea51a079defa62ac8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 15 Jan 2024 00:30:02 +0200 +Subject: media: mc: Rename pad variable to clarify intent + +From: Laurent Pinchart + +[ Upstream commit 9ec9109cf9f611e3ec9ed0355afcc7aae5e73176 ] + +The pad local variable in the media_pipeline_explore_next_link() +function is used to store the pad through which the entity has been +reached. Rename it to origin to reflect that and make the code easier to +read. This will be even more important in subsequent commits when +expanding the function with additional logic. + +Cc: stable@vger.kernel.org # 6.1 +Signed-off-by: Laurent Pinchart +Acked-by: Sakari Ailus +Signed-off-by: Hans Verkuil +Signed-off-by: Sasha Levin +--- + drivers/media/mc/mc-entity.c | 11 ++++++----- + 1 file changed, 6 insertions(+), 5 deletions(-) + +diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c +index c2d8f59b62c12..5907925ffd891 100644 +--- a/drivers/media/mc/mc-entity.c ++++ b/drivers/media/mc/mc-entity.c +@@ -605,13 +605,13 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, + struct media_pipeline_walk *walk) + { + struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk); +- struct media_pad *pad; ++ struct media_pad *origin; + struct media_link *link; + struct media_pad *local; + struct media_pad *remote; + int ret; + +- pad = entry->pad; ++ origin = entry->pad; + link = list_entry(entry->links, typeof(*link), list); + media_pipeline_walk_pop(walk); + +@@ -621,7 +621,7 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, + link->sink->entity->name, link->sink->index); + + /* Get the local pad and remote pad. */ +- if (link->source->entity == pad->entity) { ++ if (link->source->entity == origin->entity) { + local = link->source; + remote = link->sink; + } else { +@@ -633,8 +633,9 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, + * Skip links that originate from a different pad than the incoming pad + * that is not connected internally in the entity to the incoming pad. + */ +- if (pad != local && +- !media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) { ++ if (origin != local && ++ !media_entity_has_pad_interdep(origin->entity, origin->index, ++ local->index)) { + dev_dbg(walk->mdev->dev, + "media pipeline: skipping link (no route)\n"); + return 0; +-- +2.43.0 + diff --git a/queue-6.8/media-nxp-imx8-isi-check-whether-crossbar-pad-is-non.patch b/queue-6.8/media-nxp-imx8-isi-check-whether-crossbar-pad-is-non.patch new file mode 100644 index 00000000000..a9f23b031c2 --- /dev/null +++ b/queue-6.8/media-nxp-imx8-isi-check-whether-crossbar-pad-is-non.patch @@ -0,0 +1,57 @@ +From 9dfc2364d47afc811ffc83eab051f4e7b2ac1889 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Dec 2023 16:06:04 +0100 +Subject: media: nxp: imx8-isi: Check whether crossbar pad is non-NULL before + access + +From: Marek Vasut + +[ Upstream commit eb2f932100288dbb881eadfed02e1459c6b9504c ] + +When translating source to sink streams in the crossbar subdev, the +driver tries to locate the remote subdev connected to the sink pad. The +remote pad may be NULL, if userspace tries to enable a stream that ends +at an unconnected crossbar sink. When that occurs, the driver +dereferences the NULL pad, leading to a crash. + +Prevent the crash by checking if the pad is NULL before using it, and +return an error if it is. + +Cc: stable@vger.kernel.org # 6.1 +Fixes: cf21f328fcaf ("media: nxp: Add i.MX8 ISI driver") +Signed-off-by: Marek Vasut +Reviewed-by: Kieran Bingham +Reviewed-by: Fabio Estevam +Reviewed-by: Laurent Pinchart +Link: https://lore.kernel.org/r/20231201150614.63300-1-marex@denx.de +Signed-off-by: Laurent Pinchart +Acked-by: Sakari Ailus +Signed-off-by: Hans Verkuil +Signed-off-by: Sasha Levin +--- + drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c +index 575f173373887..1bb1334ec6f2b 100644 +--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c ++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c +@@ -160,8 +160,14 @@ mxc_isi_crossbar_xlate_streams(struct mxc_isi_crossbar *xbar, + } + + pad = media_pad_remote_pad_first(&xbar->pads[sink_pad]); +- sd = media_entity_to_v4l2_subdev(pad->entity); ++ if (!pad) { ++ dev_dbg(xbar->isi->dev, ++ "no pad connected to crossbar input %u\n", ++ sink_pad); ++ return ERR_PTR(-EPIPE); ++ } + ++ sd = media_entity_to_v4l2_subdev(pad->entity); + if (!sd) { + dev_dbg(xbar->isi->dev, + "no entity connected to crossbar input %u\n", +-- +2.43.0 + diff --git a/queue-6.8/media-nxp-imx8-isi-mark-all-crossbar-sink-pads-as-mu.patch b/queue-6.8/media-nxp-imx8-isi-mark-all-crossbar-sink-pads-as-mu.patch new file mode 100644 index 00000000000..27087fc5189 --- /dev/null +++ b/queue-6.8/media-nxp-imx8-isi-mark-all-crossbar-sink-pads-as-mu.patch @@ -0,0 +1,55 @@ +From b818151006498788cebc6fc275e7ec9380aed050 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 15 Jan 2024 04:16:29 +0200 +Subject: media: nxp: imx8-isi: Mark all crossbar sink pads as MUST_CONNECT + +From: Laurent Pinchart + +[ Upstream commit 9b71021b2ea537632b01e51e3f003df24a637858 ] + +All the sink pads of the crossbar switch require an active link if +they're part of the pipeline. Mark them with the +MEDIA_PAD_FL_MUST_CONNECT flag to fail pipeline validation if they're +not connected. This allows removing a manual check when translating +streams. + +Cc: stable@vger.kernel.org # 6.1 +Signed-off-by: Laurent Pinchart +Acked-by: Sakari Ailus +Signed-off-by: Hans Verkuil +Signed-off-by: Sasha Levin +--- + .../media/platform/nxp/imx8-isi/imx8-isi-crossbar.c | 10 ++-------- + 1 file changed, 2 insertions(+), 8 deletions(-) + +diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c +index 1bb1334ec6f2b..93a55c97cd173 100644 +--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c ++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c +@@ -160,13 +160,6 @@ mxc_isi_crossbar_xlate_streams(struct mxc_isi_crossbar *xbar, + } + + pad = media_pad_remote_pad_first(&xbar->pads[sink_pad]); +- if (!pad) { +- dev_dbg(xbar->isi->dev, +- "no pad connected to crossbar input %u\n", +- sink_pad); +- return ERR_PTR(-EPIPE); +- } +- + sd = media_entity_to_v4l2_subdev(pad->entity); + if (!sd) { + dev_dbg(xbar->isi->dev, +@@ -475,7 +468,8 @@ int mxc_isi_crossbar_init(struct mxc_isi_dev *isi) + } + + for (i = 0; i < xbar->num_sinks; ++i) +- xbar->pads[i].flags = MEDIA_PAD_FL_SINK; ++ xbar->pads[i].flags = MEDIA_PAD_FL_SINK ++ | MEDIA_PAD_FL_MUST_CONNECT; + for (i = 0; i < xbar->num_sources; ++i) + xbar->pads[i + xbar->num_sinks].flags = MEDIA_PAD_FL_SOURCE; + +-- +2.43.0 + diff --git a/queue-6.8/media-staging-ipu3-imgu-set-fields-before-media_enti.patch b/queue-6.8/media-staging-ipu3-imgu-set-fields-before-media_enti.patch new file mode 100644 index 00000000000..168be2dca8d --- /dev/null +++ b/queue-6.8/media-staging-ipu3-imgu-set-fields-before-media_enti.patch @@ -0,0 +1,81 @@ +From 7853345cd2a16d0e3618e6c2bba177cf8d51eefa Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 9 Jan 2024 17:09:09 +0900 +Subject: media: staging: ipu3-imgu: Set fields before media_entity_pads_init() + +From: Hidenori Kobayashi + +[ Upstream commit 87318b7092670d4086bfec115a0280a60c51c2dd ] + +The imgu driver fails to probe with the following message because it +does not set the pad's flags before calling media_entity_pads_init(). + +[ 14.596315] ipu3-imgu 0000:00:05.0: failed initialize subdev media entity (-22) +[ 14.596322] ipu3-imgu 0000:00:05.0: failed to register subdev0 ret (-22) +[ 14.596327] ipu3-imgu 0000:00:05.0: failed to register pipes (-22) +[ 14.596331] ipu3-imgu 0000:00:05.0: failed to create V4L2 devices (-22) + +Fix the initialization order so that the driver probe succeeds. The ops +initialization is also moved together for readability. + +Fixes: a0ca1627b450 ("media: staging/intel-ipu3: Add v4l2 driver based on media framework") +Cc: # 6.7 +Cc: Dan Carpenter +Signed-off-by: Hidenori Kobayashi +Signed-off-by: Sakari Ailus +Signed-off-by: Hans Verkuil +Signed-off-by: Sasha Levin +--- + drivers/staging/media/ipu3/ipu3-v4l2.c | 16 ++++++++-------- + 1 file changed, 8 insertions(+), 8 deletions(-) + +diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c +index a66f034380c05..3df58eb3e8822 100644 +--- a/drivers/staging/media/ipu3/ipu3-v4l2.c ++++ b/drivers/staging/media/ipu3/ipu3-v4l2.c +@@ -1069,6 +1069,11 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu, + struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; + + /* Initialize subdev media entity */ ++ imgu_sd->subdev.entity.ops = &imgu_media_ops; ++ for (i = 0; i < IMGU_NODE_NUM; i++) { ++ imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ? ++ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; ++ } + r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM, + imgu_sd->subdev_pads); + if (r) { +@@ -1076,11 +1081,6 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu, + "failed initialize subdev media entity (%d)\n", r); + return r; + } +- imgu_sd->subdev.entity.ops = &imgu_media_ops; +- for (i = 0; i < IMGU_NODE_NUM; i++) { +- imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ? +- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; +- } + + /* Initialize subdev */ + v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops); +@@ -1177,15 +1177,15 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe, + } + + /* Initialize media entities */ ++ node->vdev_pad.flags = node->output ? ++ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK; ++ vdev->entity.ops = NULL; + r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad); + if (r) { + dev_err(dev, "failed initialize media entity (%d)\n", r); + mutex_destroy(&node->lock); + return r; + } +- node->vdev_pad.flags = node->output ? +- MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK; +- vdev->entity.ops = NULL; + + /* Initialize vbq */ + vbq->type = node->vdev_fmt.type; +-- +2.43.0 + diff --git a/queue-6.8/media-xc4000-fix-atomicity-violation-in-xc4000_get_f.patch b/queue-6.8/media-xc4000-fix-atomicity-violation-in-xc4000_get_f.patch new file mode 100644 index 00000000000..16d9412ac98 --- /dev/null +++ b/queue-6.8/media-xc4000-fix-atomicity-violation-in-xc4000_get_f.patch @@ -0,0 +1,79 @@ +From 27c6a4da02ffc5ddc8cadb08f026eeaff170ea57 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 22 Dec 2023 13:50:30 +0800 +Subject: media: xc4000: Fix atomicity violation in xc4000_get_frequency + +From: Gui-Dong Han <2045gemini@gmail.com> + +[ Upstream commit 36d503ad547d1c75758a6fcdbec2806f1b6aeb41 ] + +In xc4000_get_frequency(): + *freq = priv->freq_hz + priv->freq_offset; +The code accesses priv->freq_hz and priv->freq_offset without holding any +lock. + +In xc4000_set_params(): + // Code that updates priv->freq_hz and priv->freq_offset + ... + +xc4000_get_frequency() and xc4000_set_params() may execute concurrently, +risking inconsistent reads of priv->freq_hz and priv->freq_offset. Since +these related data may update during reading, it can result in incorrect +frequency calculation, leading to atomicity violations. + +This possible bug is found by an experimental static analysis tool +developed by our team, BassCheck[1]. This tool analyzes the locking APIs +to extract function pairs that can be concurrently executed, and then +analyzes the instructions in the paired functions to identify possible +concurrency bugs including data races and atomicity violations. The above +possible bug is reported when our tool analyzes the source code of +Linux 6.2. + +To address this issue, it is proposed to add a mutex lock pair in +xc4000_get_frequency() to ensure atomicity. With this patch applied, our +tool no longer reports the possible bug, with the kernel configuration +allyesconfig for x86_64. Due to the lack of associated hardware, we cannot +test the patch in runtime testing, and just verify it according to the +code logic. + +[1] https://sites.google.com/view/basscheck/ + +Fixes: 4c07e32884ab ("[media] xc4000: Fix get_frequency()") +Cc: stable@vger.kernel.org +Reported-by: BassCheck +Signed-off-by: Gui-Dong Han <2045gemini@gmail.com> +Signed-off-by: Hans Verkuil +Signed-off-by: Sasha Levin +--- + drivers/media/tuners/xc4000.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c +index 57ded9ff3f043..29bc63021c5aa 100644 +--- a/drivers/media/tuners/xc4000.c ++++ b/drivers/media/tuners/xc4000.c +@@ -1515,10 +1515,10 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) + { + struct xc4000_priv *priv = fe->tuner_priv; + ++ mutex_lock(&priv->lock); + *freq = priv->freq_hz + priv->freq_offset; + + if (debug) { +- mutex_lock(&priv->lock); + if ((priv->cur_fw.type + & (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) { + u16 snr = 0; +@@ -1529,8 +1529,8 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) + return 0; + } + } +- mutex_unlock(&priv->lock); + } ++ mutex_unlock(&priv->lock); + + dprintk(1, "%s()\n", __func__); + +-- +2.43.0 + diff --git a/queue-6.8/memtest-use-read-write-_once-in-memory-scanning.patch b/queue-6.8/memtest-use-read-write-_once-in-memory-scanning.patch new file mode 100644 index 00000000000..59abdb1f59c --- /dev/null +++ b/queue-6.8/memtest-use-read-write-_once-in-memory-scanning.patch @@ -0,0 +1,45 @@ +From 6ea4f1c13836f429de652f74d4fdc83efa7d4ea0 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 12 Mar 2024 16:04:23 +0800 +Subject: memtest: use {READ,WRITE}_ONCE in memory scanning + +From: Qiang Zhang + +[ Upstream commit 82634d7e24271698e50a3ec811e5f50de790a65f ] + +memtest failed to find bad memory when compiled with clang. So use +{WRITE,READ}_ONCE to access memory to avoid compiler over optimization. + +Link: https://lkml.kernel.org/r/20240312080422.691222-1-qiang4.zhang@intel.com +Signed-off-by: Qiang Zhang +Cc: Bill Wendling +Cc: Justin Stitt +Cc: Nathan Chancellor +Cc: Nick Desaulniers +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Sasha Levin +--- + mm/memtest.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/mm/memtest.c b/mm/memtest.c +index 32f3e9dda8370..c2c609c391199 100644 +--- a/mm/memtest.c ++++ b/mm/memtest.c +@@ -51,10 +51,10 @@ static void __init memtest(u64 pattern, phys_addr_t start_phys, phys_addr_t size + last_bad = 0; + + for (p = start; p < end; p++) +- *p = pattern; ++ WRITE_ONCE(*p, pattern); + + for (p = start; p < end; p++, start_phys_aligned += incr) { +- if (*p == pattern) ++ if (READ_ONCE(*p) == pattern) + continue; + if (start_phys_aligned == last_bad + incr) { + last_bad += incr; +-- +2.43.0 + diff --git a/queue-6.8/mfd-twl-select-mfd_core.patch b/queue-6.8/mfd-twl-select-mfd_core.patch new file mode 100644 index 00000000000..f13e35c36a4 --- /dev/null +++ b/queue-6.8/mfd-twl-select-mfd_core.patch @@ -0,0 +1,39 @@ +From 42f786b25aa8b31739e1cd69d229686eedaef70f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 21 Feb 2024 15:30:18 +0100 +Subject: mfd: twl: Select MFD_CORE + +From: Alexander Sverdlin + +[ Upstream commit 3bb36528d46e494987ee5e9682d08318928ae041 ] + +Fix link error: +ld.bfd: drivers/mfd/twl-core.o: in function `twl_probe': +git/drivers/mfd/twl-core.c:846: undefined reference to `devm_mfd_add_devices' + +Cc: +Fixes: 63416320419e ("mfd: twl-core: Add a clock subdevice for the TWL6032") +Signed-off-by: Alexander Sverdlin +Reviewed-by: Andreas Kemnade +Link: https://lore.kernel.org/r/20240221143021.3542736-1-alexander.sverdlin@siemens.com +Signed-off-by: Lee Jones +Signed-off-by: Sasha Levin +--- + drivers/mfd/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig +index e7a6e45b9fac2..4b023ee229cf1 100644 +--- a/drivers/mfd/Kconfig ++++ b/drivers/mfd/Kconfig +@@ -1773,6 +1773,7 @@ config TWL4030_CORE + bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 Support" + depends on I2C=y + select IRQ_DOMAIN ++ select MFD_CORE + select REGMAP_I2C + help + Say yes here if you have TWL4030 / TWL6030 family chip on your board. +-- +2.43.0 + diff --git a/queue-6.8/mm-swap-fix-race-between-free_swap_and_cache-and-swa.patch b/queue-6.8/mm-swap-fix-race-between-free_swap_and_cache-and-swa.patch new file mode 100644 index 00000000000..9c32236a071 --- /dev/null +++ b/queue-6.8/mm-swap-fix-race-between-free_swap_and_cache-and-swa.patch @@ -0,0 +1,119 @@ +From 72f0aee33da6f9326a79a2f5adf8327cc755dc9c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 6 Mar 2024 14:03:56 +0000 +Subject: mm: swap: fix race between free_swap_and_cache() and swapoff() + +From: Ryan Roberts + +[ Upstream commit 82b1c07a0af603e3c47b906c8e991dc96f01688e ] + +There was previously a theoretical window where swapoff() could run and +teardown a swap_info_struct while a call to free_swap_and_cache() was +running in another thread. This could cause, amongst other bad +possibilities, swap_page_trans_huge_swapped() (called by +free_swap_and_cache()) to access the freed memory for swap_map. + +This is a theoretical problem and I haven't been able to provoke it from a +test case. But there has been agreement based on code review that this is +possible (see link below). + +Fix it by using get_swap_device()/put_swap_device(), which will stall +swapoff(). There was an extra check in _swap_info_get() to confirm that +the swap entry was not free. This isn't present in get_swap_device() +because it doesn't make sense in general due to the race between getting +the reference and swapoff. So I've added an equivalent check directly in +free_swap_and_cache(). + +Details of how to provoke one possible issue (thanks to David Hildenbrand +for deriving this): + +--8<----- + +__swap_entry_free() might be the last user and result in +"count == SWAP_HAS_CACHE". + +swapoff->try_to_unuse() will stop as soon as soon as si->inuse_pages==0. + +So the question is: could someone reclaim the folio and turn +si->inuse_pages==0, before we completed swap_page_trans_huge_swapped(). + +Imagine the following: 2 MiB folio in the swapcache. Only 2 subpages are +still references by swap entries. + +Process 1 still references subpage 0 via swap entry. +Process 2 still references subpage 1 via swap entry. + +Process 1 quits. Calls free_swap_and_cache(). +-> count == SWAP_HAS_CACHE +[then, preempted in the hypervisor etc.] + +Process 2 quits. Calls free_swap_and_cache(). +-> count == SWAP_HAS_CACHE + +Process 2 goes ahead, passes swap_page_trans_huge_swapped(), and calls +__try_to_reclaim_swap(). + +__try_to_reclaim_swap()->folio_free_swap()->delete_from_swap_cache()-> +put_swap_folio()->free_swap_slot()->swapcache_free_entries()-> +swap_entry_free()->swap_range_free()-> +... +WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries); + +What stops swapoff to succeed after process 2 reclaimed the swap cache +but before process1 finished its call to swap_page_trans_huge_swapped()? + +--8<----- + +Link: https://lkml.kernel.org/r/20240306140356.3974886-1-ryan.roberts@arm.com +Fixes: 7c00bafee87c ("mm/swap: free swap slots in batch") +Closes: https://lore.kernel.org/linux-mm/65a66eb9-41f8-4790-8db2-0c70ea15979f@redhat.com/ +Signed-off-by: Ryan Roberts +Cc: David Hildenbrand +Cc: "Huang, Ying" +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Sasha Levin +--- + mm/swapfile.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +diff --git a/mm/swapfile.c b/mm/swapfile.c +index 746aa9da53025..6fe0cc25535f5 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -1227,6 +1227,11 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, + * with get_swap_device() and put_swap_device(), unless the swap + * functions call get/put_swap_device() by themselves. + * ++ * Note that when only holding the PTL, swapoff might succeed immediately ++ * after freeing a swap entry. Therefore, immediately after ++ * __swap_entry_free(), the swap info might become stale and should not ++ * be touched without a prior get_swap_device(). ++ * + * Check whether swap entry is valid in the swap device. If so, + * return pointer to swap_info_struct, and keep the swap entry valid + * via preventing the swap device from being swapoff, until +@@ -1604,13 +1609,19 @@ int free_swap_and_cache(swp_entry_t entry) + if (non_swap_entry(entry)) + return 1; + +- p = _swap_info_get(entry); ++ p = get_swap_device(entry); + if (p) { ++ if (WARN_ON(data_race(!p->swap_map[swp_offset(entry)]))) { ++ put_swap_device(p); ++ return 0; ++ } ++ + count = __swap_entry_free(p, entry); + if (count == SWAP_HAS_CACHE && + !swap_page_trans_huge_swapped(p, entry)) + __try_to_reclaim_swap(p, swp_offset(entry), + TTRS_UNMAPPED | TTRS_FULL); ++ put_swap_device(p); + } + return p != NULL; + } +-- +2.43.0 + diff --git a/queue-6.8/mmc-core-fix-switch-on-gp3-partition.patch b/queue-6.8/mmc-core-fix-switch-on-gp3-partition.patch new file mode 100644 index 00000000000..a0a7d793dc8 --- /dev/null +++ b/queue-6.8/mmc-core-fix-switch-on-gp3-partition.patch @@ -0,0 +1,86 @@ +From 2c58672dfdee335a6a6800c7c51a575772faa5db Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 6 Mar 2024 10:44:38 +0900 +Subject: mmc: core: Fix switch on gp3 partition + +From: Dominique Martinet + +[ Upstream commit 4af59a8df5ea930038cd3355e822f5eedf4accc1 ] + +Commit e7794c14fd73 ("mmc: rpmb: fixes pause retune on all RPMB +partitions.") added a mask check for 'part_type', but the mask used was +wrong leading to the code intended for rpmb also being executed for GP3. + +On some MMCs (but not all) this would make gp3 partition inaccessible: +armadillo:~# head -c 1 < /dev/mmcblk2gp3 +head: standard input: I/O error +armadillo:~# dmesg -c +[ 422.976583] mmc2: running CQE recovery +[ 423.058182] mmc2: running CQE recovery +[ 423.137607] mmc2: running CQE recovery +[ 423.137802] blk_update_request: I/O error, dev mmcblk2gp3, sector 0 op 0x0:(READ) flags 0x80700 phys_seg 4 prio class 0 +[ 423.237125] mmc2: running CQE recovery +[ 423.318206] mmc2: running CQE recovery +[ 423.397680] mmc2: running CQE recovery +[ 423.397837] blk_update_request: I/O error, dev mmcblk2gp3, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 0 +[ 423.408287] Buffer I/O error on dev mmcblk2gp3, logical block 0, async page read + +the part_type values of interest here are defined as follow: +main 0 +boot0 1 +boot1 2 +rpmb 3 +gp0 4 +gp1 5 +gp2 6 +gp3 7 + +so mask with EXT_CSD_PART_CONFIG_ACC_MASK (7) to correctly identify rpmb + +Fixes: e7794c14fd73 ("mmc: rpmb: fixes pause retune on all RPMB partitions.") +Cc: stable@vger.kernel.org +Cc: Jorge Ramirez-Ortiz +Signed-off-by: Dominique Martinet +Reviewed-by: Linus Walleij +Link: https://lore.kernel.org/r/20240306-mmc-partswitch-v1-1-bf116985d950@codewreck.org +Signed-off-by: Ulf Hansson +Signed-off-by: Sasha Levin +--- + drivers/mmc/core/block.c | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c +index 32d49100dff51..86efa6084696e 100644 +--- a/drivers/mmc/core/block.c ++++ b/drivers/mmc/core/block.c +@@ -874,10 +874,11 @@ static const struct block_device_operations mmc_bdops = { + static int mmc_blk_part_switch_pre(struct mmc_card *card, + unsigned int part_type) + { +- const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB; ++ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; ++ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; + int ret = 0; + +- if ((part_type & mask) == mask) { ++ if ((part_type & mask) == rpmb) { + if (card->ext_csd.cmdq_en) { + ret = mmc_cmdq_disable(card); + if (ret) +@@ -892,10 +893,11 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card, + static int mmc_blk_part_switch_post(struct mmc_card *card, + unsigned int part_type) + { +- const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB; ++ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; ++ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; + int ret = 0; + +- if ((part_type & mask) == mask) { ++ if ((part_type & mask) == rpmb) { + mmc_retune_unpause(card->host); + if (card->reenable_cmdq && !card->ext_csd.cmdq_en) + ret = mmc_cmdq_enable(card); +-- +2.43.0 + diff --git a/queue-6.8/mmc-tmio-avoid-concurrent-runs-of-mmc_request_done.patch b/queue-6.8/mmc-tmio-avoid-concurrent-runs-of-mmc_request_done.patch new file mode 100644 index 00000000000..78bb31bba70 --- /dev/null +++ b/queue-6.8/mmc-tmio-avoid-concurrent-runs-of-mmc_request_done.patch @@ -0,0 +1,51 @@ +From 5f9561574bacb43fd5fc26704902bb993ba5b32f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 11:42:56 +0100 +Subject: mmc: tmio: avoid concurrent runs of mmc_request_done() + +From: Wolfram Sang + +[ Upstream commit e8d1b41e69d72c62865bebe8f441163ec00b3d44 ] + +With the to-be-fixed commit, the reset_work handler cleared 'host->mrq' +outside of the spinlock protected critical section. That leaves a small +race window during execution of 'tmio_mmc_reset()' where the done_work +handler could grab a pointer to the now invalid 'host->mrq'. Both would +use it to call mmc_request_done() causing problems (see link below). + +However, 'host->mrq' cannot simply be cleared earlier inside the +critical section. That would allow new mrqs to come in asynchronously +while the actual reset of the controller still needs to be done. So, +like 'tmio_mmc_set_ios()', an ERR_PTR is used to prevent new mrqs from +coming in but still avoiding concurrency between work handlers. + +Reported-by: Dirk Behme +Closes: https://lore.kernel.org/all/20240220061356.3001761-1-dirk.behme@de.bosch.com/ +Fixes: df3ef2d3c92c ("mmc: protect the tmio_mmc driver against a theoretical race") +Signed-off-by: Wolfram Sang +Tested-by: Dirk Behme +Reviewed-by: Dirk Behme +Cc: stable@vger.kernel.org # 3.0+ +Link: https://lore.kernel.org/r/20240305104423.3177-2-wsa+renesas@sang-engineering.com +Signed-off-by: Ulf Hansson +Signed-off-by: Sasha Levin +--- + drivers/mmc/host/tmio_mmc_core.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c +index be7f18fd4836a..c253d176db691 100644 +--- a/drivers/mmc/host/tmio_mmc_core.c ++++ b/drivers/mmc/host/tmio_mmc_core.c +@@ -259,6 +259,8 @@ static void tmio_mmc_reset_work(struct work_struct *work) + else + mrq->cmd->error = -ETIMEDOUT; + ++ /* No new calls yet, but disallow concurrent tmio_mmc_done_work() */ ++ host->mrq = ERR_PTR(-EBUSY); + host->cmd = NULL; + host->data = NULL; + +-- +2.43.0 + diff --git a/queue-6.8/mtd-rawnand-add-a-helper-for-calculating-a-page-inde.patch b/queue-6.8/mtd-rawnand-add-a-helper-for-calculating-a-page-inde.patch new file mode 100644 index 00000000000..ed96d77f8c0 --- /dev/null +++ b/queue-6.8/mtd-rawnand-add-a-helper-for-calculating-a-page-inde.patch @@ -0,0 +1,60 @@ +From c32dfea35a8601b452871ea76f3ece10ff339967 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 23 Feb 2024 12:55:44 +0100 +Subject: mtd: rawnand: Add a helper for calculating a page index + +From: Miquel Raynal + +[ Upstream commit df9803bf5a91e3599f12b53c94722f2c4e144a86 ] + +For LUN crossing boundaries, it is handy to know what is the index of +the last page in a LUN. This helper will soon be reused. At the same +time I rename page_per_lun to ppl in the calling function to clarify the +lines. + +Cc: stable@vger.kernel.org # v6.7 +Signed-off-by: Miquel Raynal +Link: https://lore.kernel.org/linux-mtd/20240223115545.354541-3-miquel.raynal@bootlin.com +Signed-off-by: Sasha Levin +--- + drivers/mtd/nand/raw/nand_base.c | 16 +++++++++++----- + 1 file changed, 11 insertions(+), 5 deletions(-) + +diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c +index bcfd99a1699fd..d6a27e08b1127 100644 +--- a/drivers/mtd/nand/raw/nand_base.c ++++ b/drivers/mtd/nand/raw/nand_base.c +@@ -1211,19 +1211,25 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, + return nand_exec_op(chip, &op); + } + ++static unsigned int rawnand_last_page_of_lun(unsigned int pages_per_lun, unsigned int lun) ++{ ++ /* lun is expected to be very small */ ++ return (lun * pages_per_lun) + pages_per_lun - 1; ++} ++ + static void rawnand_cap_cont_reads(struct nand_chip *chip) + { + struct nand_memory_organization *memorg; +- unsigned int pages_per_lun, first_lun, last_lun; ++ unsigned int ppl, first_lun, last_lun; + + memorg = nanddev_get_memorg(&chip->base); +- pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun; +- first_lun = chip->cont_read.first_page / pages_per_lun; +- last_lun = chip->cont_read.last_page / pages_per_lun; ++ ppl = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun; ++ first_lun = chip->cont_read.first_page / ppl; ++ last_lun = chip->cont_read.last_page / ppl; + + /* Prevent sequential cache reads across LUN boundaries */ + if (first_lun != last_lun) +- chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1; ++ chip->cont_read.pause_page = rawnand_last_page_of_lun(ppl, first_lun); + else + chip->cont_read.pause_page = chip->cont_read.last_page; + } +-- +2.43.0 + diff --git a/queue-6.8/mtd-rawnand-constrain-even-more-when-continuous-read.patch b/queue-6.8/mtd-rawnand-constrain-even-more-when-continuous-read.patch new file mode 100644 index 00000000000..f804c062cfb --- /dev/null +++ b/queue-6.8/mtd-rawnand-constrain-even-more-when-continuous-read.patch @@ -0,0 +1,70 @@ +From 1039d579c482a97baaf0c7e6c1462be5f2764ace Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Mar 2024 12:53:14 +0100 +Subject: mtd: rawnand: Constrain even more when continuous reads are enabled + +From: Miquel Raynal + +[ Upstream commit 78ffbefba8d7822b232585570b293de5bc397da6 ] + +As a matter of fact, continuous reads require additional handling at the +operation level in order for them to work properly. The core helpers do +have this additional logic now, but any time a controller implements its +own page helper, this extra logic is "lost". This means we need another +level of per-controller driver checks to ensure they can leverage +continuous reads. This is for now unsupported, so in order to ensure +continuous reads are enabled only when fully using the core page +helpers, we need to add more initial checks. + +Also, as performance is not relevant during raw accesses, we also +prevent these from enabling the feature. + +This should solve the issue seen with controllers such as the STM32 FMC2 +when in sequencer mode. In this case, the continuous read feature would +be enabled but not leveraged, and most importantly not disabled, leading +to further operations to fail. + +Reported-by: Christophe Kerello +Fixes: 003fe4b9545b ("mtd: rawnand: Support for sequential cache reads") +Cc: stable@vger.kernel.org +Signed-off-by: Miquel Raynal +Tested-by: Christophe Kerello +Link: https://lore.kernel.org/linux-mtd/20240307115315.1942678-1-miquel.raynal@bootlin.com +Signed-off-by: Sasha Levin +--- + drivers/mtd/nand/raw/nand_base.c | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c +index 4d5a663e4e059..2479fa98f9912 100644 +--- a/drivers/mtd/nand/raw/nand_base.c ++++ b/drivers/mtd/nand/raw/nand_base.c +@@ -3594,7 +3594,8 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, + oob = ops->oobbuf; + oob_required = oob ? 1 : 0; + +- rawnand_enable_cont_reads(chip, page, readlen, col); ++ if (likely(ops->mode != MTD_OPS_RAW)) ++ rawnand_enable_cont_reads(chip, page, readlen, col); + + while (1) { + struct mtd_ecc_stats ecc_stats = mtd->ecc_stats; +@@ -5212,6 +5213,15 @@ static void rawnand_late_check_supported_ops(struct nand_chip *chip) + if (!nand_has_exec_op(chip)) + return; + ++ /* ++ * For now, continuous reads can only be used with the core page helpers. ++ * This can be extended later. ++ */ ++ if (!(chip->ecc.read_page == nand_read_page_hwecc || ++ chip->ecc.read_page == nand_read_page_syndrome || ++ chip->ecc.read_page == nand_read_page_swecc)) ++ return; ++ + rawnand_check_cont_read_support(chip); + } + +-- +2.43.0 + diff --git a/queue-6.8/mtd-rawnand-ensure-all-continuous-terms-are-always-i.patch b/queue-6.8/mtd-rawnand-ensure-all-continuous-terms-are-always-i.patch new file mode 100644 index 00000000000..f04c99e6df3 --- /dev/null +++ b/queue-6.8/mtd-rawnand-ensure-all-continuous-terms-are-always-i.patch @@ -0,0 +1,77 @@ +From 6c458ed91c3af7296e54ce5df88998e1917cda20 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 23 Feb 2024 12:55:45 +0100 +Subject: mtd: rawnand: Ensure all continuous terms are always in sync + +From: Miquel Raynal + +[ Upstream commit 6fb075fca63c3486612986eeff84ed4179644038 ] + +While crossing a LUN boundary, it is probably safer (and clearer) to +keep all members of the continuous read structure aligned, including the +pause page (which is the last page of the lun or the last page of the +continuous read). Once these members properly in sync, we can use the +rawnand_cap_cont_reads() helper everywhere to "prepare" the next +continuous read if there is one. + +Fixes: bbcd80f53a5e ("mtd: rawnand: Prevent crossing LUN boundaries during sequential reads") +Cc: stable@vger.kernel.org +Signed-off-by: Miquel Raynal +Link: https://lore.kernel.org/linux-mtd/20240223115545.354541-4-miquel.raynal@bootlin.com +Signed-off-by: Sasha Levin +--- + drivers/mtd/nand/raw/nand_base.c | 23 ++++++++++++++--------- + 1 file changed, 14 insertions(+), 9 deletions(-) + +diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c +index d6a27e08b1127..4d5a663e4e059 100644 +--- a/drivers/mtd/nand/raw/nand_base.c ++++ b/drivers/mtd/nand/raw/nand_base.c +@@ -1232,6 +1232,15 @@ static void rawnand_cap_cont_reads(struct nand_chip *chip) + chip->cont_read.pause_page = rawnand_last_page_of_lun(ppl, first_lun); + else + chip->cont_read.pause_page = chip->cont_read.last_page; ++ ++ if (chip->cont_read.first_page == chip->cont_read.pause_page) { ++ chip->cont_read.first_page++; ++ chip->cont_read.pause_page = min(chip->cont_read.last_page, ++ rawnand_last_page_of_lun(ppl, first_lun + 1)); ++ } ++ ++ if (chip->cont_read.first_page >= chip->cont_read.last_page) ++ chip->cont_read.ongoing = false; + } + + static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page, +@@ -1298,12 +1307,11 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p + if (!chip->cont_read.ongoing) + return 0; + +- if (page == chip->cont_read.pause_page && +- page != chip->cont_read.last_page) { +- chip->cont_read.first_page = chip->cont_read.pause_page + 1; +- rawnand_cap_cont_reads(chip); +- } else if (page == chip->cont_read.last_page) { ++ if (page == chip->cont_read.last_page) { + chip->cont_read.ongoing = false; ++ } else if (page == chip->cont_read.pause_page) { ++ chip->cont_read.first_page++; ++ rawnand_cap_cont_reads(chip); + } + + return 0; +@@ -3510,10 +3518,7 @@ static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned i + return; + + chip->cont_read.first_page++; +- if (chip->cont_read.first_page == chip->cont_read.pause_page) +- chip->cont_read.first_page++; +- if (chip->cont_read.first_page >= chip->cont_read.last_page) +- chip->cont_read.ongoing = false; ++ rawnand_cap_cont_reads(chip); + } + + /** +-- +2.43.0 + diff --git a/queue-6.8/mtd-rawnand-fix-and-simplify-again-the-continuous-re.patch b/queue-6.8/mtd-rawnand-fix-and-simplify-again-the-continuous-re.patch new file mode 100644 index 00000000000..934eb71d0df --- /dev/null +++ b/queue-6.8/mtd-rawnand-fix-and-simplify-again-the-continuous-re.patch @@ -0,0 +1,86 @@ +From 6c1c25af5b8cbea87751de8d890919a34fd45f57 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 23 Feb 2024 12:55:43 +0100 +Subject: mtd: rawnand: Fix and simplify again the continuous read derivations + +From: Miquel Raynal + +[ Upstream commit c7ee7c8d4b60fe46d4861b1200bc1c7ab657960a ] + +We need to avoid the first page if we don't read it entirely. +We need to avoid the last page if we don't read it entirely. +While rather simple, this logic has been failed in the previous +fix. This time I wrote about 30 unit tests locally to check each +possible condition, hopefully I covered them all. + +Reported-by: Christophe Kerello +Closes: https://lore.kernel.org/linux-mtd/20240221175327.42f7076d@xps-13/T/#m399bacb10db8f58f6b1f0149a1df867ec086bb0a +Suggested-by: Christophe Kerello +Fixes: 828f6df1bcba ("mtd: rawnand: Clarify conditions to enable continuous reads") +Cc: stable@vger.kernel.org +Signed-off-by: Miquel Raynal +Tested-by: Christophe Kerello +Link: https://lore.kernel.org/linux-mtd/20240223115545.354541-2-miquel.raynal@bootlin.com +Signed-off-by: Sasha Levin +--- + drivers/mtd/nand/raw/nand_base.c | 34 +++++++++++++++++++------------- + 1 file changed, 20 insertions(+), 14 deletions(-) + +diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c +index 3b3ce2926f5d1..bcfd99a1699fd 100644 +--- a/drivers/mtd/nand/raw/nand_base.c ++++ b/drivers/mtd/nand/raw/nand_base.c +@@ -3466,30 +3466,36 @@ static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page, + u32 readlen, int col) + { + struct mtd_info *mtd = nand_to_mtd(chip); +- unsigned int end_page, end_col; ++ unsigned int first_page, last_page; + + chip->cont_read.ongoing = false; + + if (!chip->controller->supported_op.cont_read) + return; + +- end_page = DIV_ROUND_UP(col + readlen, mtd->writesize); +- end_col = (col + readlen) % mtd->writesize; ++ /* ++ * Don't bother making any calculations if the length is too small. ++ * Side effect: avoids possible integer underflows below. ++ */ ++ if (readlen < (2 * mtd->writesize)) ++ return; + ++ /* Derive the page where continuous read should start (the first full page read) */ ++ first_page = page; + if (col) +- page++; +- +- if (end_col && end_page) +- end_page--; ++ first_page++; + +- if (page + 1 > end_page) +- return; +- +- chip->cont_read.first_page = page; +- chip->cont_read.last_page = end_page; +- chip->cont_read.ongoing = true; ++ /* Derive the page where continuous read should stop (the last full page read) */ ++ last_page = page + ((col + readlen) / mtd->writesize) - 1; + +- rawnand_cap_cont_reads(chip); ++ /* Configure and enable continuous read when suitable */ ++ if (first_page < last_page) { ++ chip->cont_read.first_page = first_page; ++ chip->cont_read.last_page = last_page; ++ chip->cont_read.ongoing = true; ++ /* May reset the ongoing flag */ ++ rawnand_cap_cont_reads(chip); ++ } + } + + static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page) +-- +2.43.0 + diff --git a/queue-6.8/mtd-rawnand-meson-fix-scrambling-mode-value-in-comma.patch b/queue-6.8/mtd-rawnand-meson-fix-scrambling-mode-value-in-comma.patch new file mode 100644 index 00000000000..ecc6aa92916 --- /dev/null +++ b/queue-6.8/mtd-rawnand-meson-fix-scrambling-mode-value-in-comma.patch @@ -0,0 +1,39 @@ +From 23adee4bdc923ceb846f9473618aa39519b0c2fa Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 11 Feb 2024 00:45:51 +0300 +Subject: mtd: rawnand: meson: fix scrambling mode value in command macro + +From: Arseniy Krasnov + +[ Upstream commit ef6f463599e16924cdd02ce5056ab52879dc008c ] + +Scrambling mode is enabled by value (1 << 19). NFC_CMD_SCRAMBLER_ENABLE +is already (1 << 19), so there is no need to shift it again in CMDRWGEN +macro. + +Signed-off-by: Arseniy Krasnov +Cc: +Fixes: 8fae856c5350 ("mtd: rawnand: meson: add support for Amlogic NAND flash controller") +Signed-off-by: Miquel Raynal +Link: https://lore.kernel.org/linux-mtd/20240210214551.441610-1-avkrasnov@salutedevices.com +Signed-off-by: Sasha Levin +--- + drivers/mtd/nand/raw/meson_nand.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c +index cdb58aca59c08..2a96a87cf79ce 100644 +--- a/drivers/mtd/nand/raw/meson_nand.c ++++ b/drivers/mtd/nand/raw/meson_nand.c +@@ -63,7 +63,7 @@ + #define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \ + ( \ + (cmd_dir) | \ +- ((ran) << 19) | \ ++ (ran) | \ + ((bch) << 14) | \ + ((short_mode) << 13) | \ + (((page_size) & 0x7f) << 6) | \ +-- +2.43.0 + diff --git a/queue-6.8/net-esp-fix-bad-handling-of-pages-from-page_pool.patch b/queue-6.8/net-esp-fix-bad-handling-of-pages-from-page_pool.patch new file mode 100644 index 00000000000..317081866bf --- /dev/null +++ b/queue-6.8/net-esp-fix-bad-handling-of-pages-from-page_pool.patch @@ -0,0 +1,196 @@ +From b084bd28f67620da3bdc7fff25a410c7116b3269 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 17:26:00 +0200 +Subject: net: esp: fix bad handling of pages from page_pool + +From: Dragos Tatulea + +[ Upstream commit c3198822c6cb9fb588e446540485669cc81c5d34 ] + +When the skb is reorganized during esp_output (!esp->inline), the pages +coming from the original skb fragments are supposed to be released back +to the system through put_page. But if the skb fragment pages are +originating from a page_pool, calling put_page on them will trigger a +page_pool leak which will eventually result in a crash. + +This leak can be easily observed when using CONFIG_DEBUG_VM and doing +ipsec + gre (non offloaded) forwarding: + + BUG: Bad page state in process ksoftirqd/16 pfn:1451b6 + page:00000000de2b8d32 refcount:0 mapcount:0 mapping:0000000000000000 index:0x1451b6000 pfn:0x1451b6 + flags: 0x200000000000000(node=0|zone=2) + page_type: 0xffffffff() + raw: 0200000000000000 dead000000000040 ffff88810d23c000 0000000000000000 + raw: 00000001451b6000 0000000000000001 00000000ffffffff 0000000000000000 + page dumped because: page_pool leak + Modules linked in: ip_gre gre mlx5_ib mlx5_core xt_conntrack xt_MASQUERADE nf_conntrack_netlink nfnetlink iptable_nat nf_nat xt_addrtype br_netfilter rpcrdma rdma_ucm ib_iser libiscsi scsi_transport_iscsi ib_umad rdma_cm ib_ipoib iw_cm ib_cm ib_uverbs ib_core overlay zram zsmalloc fuse [last unloaded: mlx5_core] + CPU: 16 PID: 96 Comm: ksoftirqd/16 Not tainted 6.8.0-rc4+ #22 + Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014 + Call Trace: + + dump_stack_lvl+0x36/0x50 + bad_page+0x70/0xf0 + free_unref_page_prepare+0x27a/0x460 + free_unref_page+0x38/0x120 + esp_ssg_unref.isra.0+0x15f/0x200 + esp_output_tail+0x66d/0x780 + esp_xmit+0x2c5/0x360 + validate_xmit_xfrm+0x313/0x370 + ? validate_xmit_skb+0x1d/0x330 + validate_xmit_skb_list+0x4c/0x70 + sch_direct_xmit+0x23e/0x350 + __dev_queue_xmit+0x337/0xba0 + ? nf_hook_slow+0x3f/0xd0 + ip_finish_output2+0x25e/0x580 + iptunnel_xmit+0x19b/0x240 + ip_tunnel_xmit+0x5fb/0xb60 + ipgre_xmit+0x14d/0x280 [ip_gre] + dev_hard_start_xmit+0xc3/0x1c0 + __dev_queue_xmit+0x208/0xba0 + ? nf_hook_slow+0x3f/0xd0 + ip_finish_output2+0x1ca/0x580 + ip_sublist_rcv_finish+0x32/0x40 + ip_sublist_rcv+0x1b2/0x1f0 + ? ip_rcv_finish_core.constprop.0+0x460/0x460 + ip_list_rcv+0x103/0x130 + __netif_receive_skb_list_core+0x181/0x1e0 + netif_receive_skb_list_internal+0x1b3/0x2c0 + napi_gro_receive+0xc8/0x200 + gro_cell_poll+0x52/0x90 + __napi_poll+0x25/0x1a0 + net_rx_action+0x28e/0x300 + __do_softirq+0xc3/0x276 + ? sort_range+0x20/0x20 + run_ksoftirqd+0x1e/0x30 + smpboot_thread_fn+0xa6/0x130 + kthread+0xcd/0x100 + ? kthread_complete_and_exit+0x20/0x20 + ret_from_fork+0x31/0x50 + ? kthread_complete_and_exit+0x20/0x20 + ret_from_fork_asm+0x11/0x20 + + +The suggested fix is to introduce a new wrapper (skb_page_unref) that +covers page refcounting for page_pool pages as well. + +Cc: stable@vger.kernel.org +Fixes: 6a5bcd84e886 ("page_pool: Allow drivers to hint on SKB recycling") +Reported-and-tested-by: Anatoli N.Chechelnickiy +Reported-by: Ian Kumlien +Link: https://lore.kernel.org/netdev/CAA85sZvvHtrpTQRqdaOx6gd55zPAVsqMYk_Lwh4Md5knTq7AyA@mail.gmail.com +Signed-off-by: Dragos Tatulea +Reviewed-by: Mina Almasry +Reviewed-by: Jakub Kicinski +Acked-by: Ilias Apalodimas +Signed-off-by: Steffen Klassert +Signed-off-by: Sasha Levin +--- + include/linux/skbuff.h | 10 ++++++++++ + net/ipv4/esp4.c | 8 ++++---- + net/ipv6/esp6.c | 8 ++++---- + 3 files changed, 18 insertions(+), 8 deletions(-) + +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index 2dde34c29203b..d9a1ccfb57080 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -3448,6 +3448,16 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) + + bool napi_pp_put_page(struct page *page, bool napi_safe); + ++static inline void ++skb_page_unref(const struct sk_buff *skb, struct page *page, bool napi_safe) ++{ ++#ifdef CONFIG_PAGE_POOL ++ if (skb->pp_recycle && napi_pp_put_page(page, napi_safe)) ++ return; ++#endif ++ put_page(page); ++} ++ + static inline void + napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe) + { +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c +index 4dd9e50406720..d33d124218140 100644 +--- a/net/ipv4/esp4.c ++++ b/net/ipv4/esp4.c +@@ -95,7 +95,7 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, + __alignof__(struct scatterlist)); + } + +-static void esp_ssg_unref(struct xfrm_state *x, void *tmp) ++static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) + { + struct crypto_aead *aead = x->data; + int extralen = 0; +@@ -114,7 +114,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp) + */ + if (req->src != req->dst) + for (sg = sg_next(req->src); sg; sg = sg_next(sg)) +- put_page(sg_page(sg)); ++ skb_page_unref(skb, sg_page(sg), false); + } + + #ifdef CONFIG_INET_ESPINTCP +@@ -260,7 +260,7 @@ static void esp_output_done(void *data, int err) + } + + tmp = ESP_SKB_CB(skb)->tmp; +- esp_ssg_unref(x, tmp); ++ esp_ssg_unref(x, tmp, skb); + kfree(tmp); + + if (xo && (xo->flags & XFRM_DEV_RESUME)) { +@@ -639,7 +639,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * + } + + if (sg != dsg) +- esp_ssg_unref(x, tmp); ++ esp_ssg_unref(x, tmp, skb); + + if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) + err = esp_output_tail_tcp(x, skb); +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c +index 6e6efe026cdcc..7371886d4f9f4 100644 +--- a/net/ipv6/esp6.c ++++ b/net/ipv6/esp6.c +@@ -112,7 +112,7 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, + __alignof__(struct scatterlist)); + } + +-static void esp_ssg_unref(struct xfrm_state *x, void *tmp) ++static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) + { + struct crypto_aead *aead = x->data; + int extralen = 0; +@@ -131,7 +131,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp) + */ + if (req->src != req->dst) + for (sg = sg_next(req->src); sg; sg = sg_next(sg)) +- put_page(sg_page(sg)); ++ skb_page_unref(skb, sg_page(sg), false); + } + + #ifdef CONFIG_INET6_ESPINTCP +@@ -294,7 +294,7 @@ static void esp_output_done(void *data, int err) + } + + tmp = ESP_SKB_CB(skb)->tmp; +- esp_ssg_unref(x, tmp); ++ esp_ssg_unref(x, tmp, skb); + kfree(tmp); + + esp_output_encap_csum(skb); +@@ -677,7 +677,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info + } + + if (sg != dsg) +- esp_ssg_unref(x, tmp); ++ esp_ssg_unref(x, tmp, skb); + + if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) + err = esp_output_tail_tcp(x, skb); +-- +2.43.0 + diff --git a/queue-6.8/net-hns3-tracing-fix-hclgevf-trace-event-strings.patch b/queue-6.8/net-hns3-tracing-fix-hclgevf-trace-event-strings.patch new file mode 100644 index 00000000000..3efa781bda1 --- /dev/null +++ b/queue-6.8/net-hns3-tracing-fix-hclgevf-trace-event-strings.patch @@ -0,0 +1,153 @@ +From 55fa686d4539ada8abe4b370627759ef019ac557 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Mar 2024 09:34:54 -0400 +Subject: net: hns3: tracing: fix hclgevf trace event strings +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Steven Rostedt (Google) + +[ Upstream commit 3f9952e8d80cca2da3b47ecd5ad9ec16cfd1a649 ] + +The __string() and __assign_str() helper macros of the TRACE_EVENT() macro +are going through some optimizations where only the source string of +__string() will be used and the __assign_str() source will be ignored and +later removed. + +To make sure that there's no issues, a new check is added between the +__string() src argument and the __assign_str() src argument that does a +strcmp() to make sure they are the same string. + +The hclgevf trace events have: + + __assign_str(devname, &hdev->nic.kinfo.netdev->name); + +Which triggers the warning: + +hclgevf_trace.h:34:39: error: passing argument 1 of ‘strcmp’ from incompatible pointer type [-Werror=incompatible-pointer-types] + 34 | __assign_str(devname, &hdev->nic.kinfo.netdev->name); + [..] +arch/x86/include/asm/string_64.h:75:24: note: expected ‘const char *’ but argument is of type ‘char (*)[16]’ + 75 | int strcmp(const char *cs, const char *ct); + | ~~~~~~~~~~~~^~ + +Because __assign_str() now has: + + WARN_ON_ONCE(__builtin_constant_p(src) ? \ + strcmp((src), __data_offsets.dst##_ptr_) : \ + (src) != __data_offsets.dst##_ptr_); \ + +The problem is the '&' on hdev->nic.kinfo.netdev->name. That's because +that name is: + + char name[IFNAMSIZ] + +Where passing an address '&' of a char array is not compatible with strcmp(). + +The '&' is not necessary, remove it. + +Link: https://lore.kernel.org/linux-trace-kernel/20240313093454.3909afe7@gandalf.local.home + +Cc: netdev +Cc: Yisen Zhuang +Cc: Salil Mehta +Cc: "David S. Miller" +Cc: Eric Dumazet +Cc: Jakub Kicinski +Cc: Yufeng Mo +Cc: Huazhong Tan +Cc: stable@vger.kernel.org +Acked-by: Paolo Abeni +Reviewed-by: Jijie Shao +Fixes: d8355240cf8fb ("net: hns3: add trace event support for PF/VF mailbox") +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h | 8 ++++---- + .../net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h | 8 ++++---- + 2 files changed, 8 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h +index 8510b88d49820..f3cd5a376eca9 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h +@@ -24,7 +24,7 @@ TRACE_EVENT(hclge_pf_mbx_get, + __field(u8, code) + __field(u8, subcode) + __string(pciname, pci_name(hdev->pdev)) +- __string(devname, &hdev->vport[0].nic.kinfo.netdev->name) ++ __string(devname, hdev->vport[0].nic.kinfo.netdev->name) + __array(u32, mbx_data, PF_GET_MBX_LEN) + ), + +@@ -33,7 +33,7 @@ TRACE_EVENT(hclge_pf_mbx_get, + __entry->code = req->msg.code; + __entry->subcode = req->msg.subcode; + __assign_str(pciname, pci_name(hdev->pdev)); +- __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); ++ __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_vf_to_pf_cmd)); + ), +@@ -56,7 +56,7 @@ TRACE_EVENT(hclge_pf_mbx_send, + __field(u8, vfid) + __field(u16, code) + __string(pciname, pci_name(hdev->pdev)) +- __string(devname, &hdev->vport[0].nic.kinfo.netdev->name) ++ __string(devname, hdev->vport[0].nic.kinfo.netdev->name) + __array(u32, mbx_data, PF_SEND_MBX_LEN) + ), + +@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_pf_mbx_send, + __entry->vfid = req->dest_vfid; + __entry->code = le16_to_cpu(req->msg.code); + __assign_str(pciname, pci_name(hdev->pdev)); +- __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); ++ __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_pf_to_vf_cmd)); + ), +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h +index 5d4895bb57a17..b259e95dd53c2 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h +@@ -23,7 +23,7 @@ TRACE_EVENT(hclge_vf_mbx_get, + __field(u8, vfid) + __field(u16, code) + __string(pciname, pci_name(hdev->pdev)) +- __string(devname, &hdev->nic.kinfo.netdev->name) ++ __string(devname, hdev->nic.kinfo.netdev->name) + __array(u32, mbx_data, VF_GET_MBX_LEN) + ), + +@@ -31,7 +31,7 @@ TRACE_EVENT(hclge_vf_mbx_get, + __entry->vfid = req->dest_vfid; + __entry->code = le16_to_cpu(req->msg.code); + __assign_str(pciname, pci_name(hdev->pdev)); +- __assign_str(devname, &hdev->nic.kinfo.netdev->name); ++ __assign_str(devname, hdev->nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_pf_to_vf_cmd)); + ), +@@ -55,7 +55,7 @@ TRACE_EVENT(hclge_vf_mbx_send, + __field(u8, code) + __field(u8, subcode) + __string(pciname, pci_name(hdev->pdev)) +- __string(devname, &hdev->nic.kinfo.netdev->name) ++ __string(devname, hdev->nic.kinfo.netdev->name) + __array(u32, mbx_data, VF_SEND_MBX_LEN) + ), + +@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_vf_mbx_send, + __entry->code = req->msg.code; + __entry->subcode = req->msg.subcode; + __assign_str(pciname, pci_name(hdev->pdev)); +- __assign_str(devname, &hdev->nic.kinfo.netdev->name); ++ __assign_str(devname, hdev->nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_vf_to_pf_cmd)); + ), +-- +2.43.0 + diff --git a/queue-6.8/nfs-fix-uaf-in-direct-writes.patch b/queue-6.8/nfs-fix-uaf-in-direct-writes.patch new file mode 100644 index 00000000000..b7fba74cedc --- /dev/null +++ b/queue-6.8/nfs-fix-uaf-in-direct-writes.patch @@ -0,0 +1,125 @@ +From 9b68eedeb329ae816dba66cd460826dd778945d9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Mar 2024 11:49:57 -0500 +Subject: nfs: fix UAF in direct writes + +From: Josef Bacik + +[ Upstream commit 17f46b803d4f23c66cacce81db35fef3adb8f2af ] + +In production we have been hitting the following warning consistently + +------------[ cut here ]------------ +refcount_t: underflow; use-after-free. +WARNING: CPU: 17 PID: 1800359 at lib/refcount.c:28 refcount_warn_saturate+0x9c/0xe0 +Workqueue: nfsiod nfs_direct_write_schedule_work [nfs] +RIP: 0010:refcount_warn_saturate+0x9c/0xe0 +PKRU: 55555554 +Call Trace: + + ? __warn+0x9f/0x130 + ? refcount_warn_saturate+0x9c/0xe0 + ? report_bug+0xcc/0x150 + ? handle_bug+0x3d/0x70 + ? exc_invalid_op+0x16/0x40 + ? asm_exc_invalid_op+0x16/0x20 + ? refcount_warn_saturate+0x9c/0xe0 + nfs_direct_write_schedule_work+0x237/0x250 [nfs] + process_one_work+0x12f/0x4a0 + worker_thread+0x14e/0x3b0 + ? ZSTD_getCParams_internal+0x220/0x220 + kthread+0xdc/0x120 + ? __btf_name_valid+0xa0/0xa0 + ret_from_fork+0x1f/0x30 + +This is because we're completing the nfs_direct_request twice in a row. + +The source of this is when we have our commit requests to submit, we +process them and send them off, and then in the completion path for the +commit requests we have + +if (nfs_commit_end(cinfo.mds)) + nfs_direct_write_complete(dreq); + +However since we're submitting asynchronous requests we sometimes have +one that completes before we submit the next one, so we end up calling +complete on the nfs_direct_request twice. + +The only other place we use nfs_generic_commit_list() is in +__nfs_commit_inode, which wraps this call in a + +nfs_commit_begin(); +nfs_commit_end(); + +Which is a common pattern for this style of completion handling, one +that is also repeated in the direct code with get_dreq()/put_dreq() +calls around where we process events as well as in the completion paths. + +Fix this by using the same pattern for the commit requests. + +Before with my 200 node rocksdb stress running this warning would pop +every 10ish minutes. With my patch the stress test has been running for +several hours without popping. + +Signed-off-by: Josef Bacik +Cc: stable@vger.kernel.org +Signed-off-by: Trond Myklebust +Signed-off-by: Sasha Levin +--- + fs/nfs/direct.c | 11 +++++++++-- + fs/nfs/write.c | 2 +- + include/linux/nfs_fs.h | 1 + + 3 files changed, 11 insertions(+), 3 deletions(-) + +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c +index c03926a1cc73b..d8cd4f69e33a0 100644 +--- a/fs/nfs/direct.c ++++ b/fs/nfs/direct.c +@@ -667,10 +667,17 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) + LIST_HEAD(mds_list); + + nfs_init_cinfo_from_dreq(&cinfo, dreq); ++ nfs_commit_begin(cinfo.mds); + nfs_scan_commit(dreq->inode, &mds_list, &cinfo); + res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo); +- if (res < 0) /* res == -ENOMEM */ +- nfs_direct_write_reschedule(dreq); ++ if (res < 0) { /* res == -ENOMEM */ ++ spin_lock(&dreq->lock); ++ if (dreq->flags == 0) ++ dreq->flags = NFS_ODIRECT_RESCHED_WRITES; ++ spin_unlock(&dreq->lock); ++ } ++ if (nfs_commit_end(cinfo.mds)) ++ nfs_direct_write_complete(dreq); + } + + static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq) +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index bb79d3a886ae8..5d9dc6c053255 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -1650,7 +1650,7 @@ static int wait_on_commit(struct nfs_mds_commit_info *cinfo) + !atomic_read(&cinfo->rpcs_out)); + } + +-static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) ++void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) + { + atomic_inc(&cinfo->rpcs_out); + } +diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h +index f5ce7b1011461..d59116ac82099 100644 +--- a/include/linux/nfs_fs.h ++++ b/include/linux/nfs_fs.h +@@ -611,6 +611,7 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio); + extern int nfs_commit_inode(struct inode *, int); + extern struct nfs_commit_data *nfs_commitdata_alloc(void); + extern void nfs_commit_free(struct nfs_commit_data *data); ++void nfs_commit_begin(struct nfs_mds_commit_info *cinfo); + bool nfs_commit_end(struct nfs_mds_commit_info *cinfo); + + static inline bool nfs_have_writebacks(const struct inode *inode) +-- +2.43.0 + diff --git a/queue-6.8/nfs-read-unlock-folio-on-nfs_page_create_from_folio-.patch b/queue-6.8/nfs-read-unlock-folio-on-nfs_page_create_from_folio-.patch new file mode 100644 index 00000000000..7ea3877ad6b --- /dev/null +++ b/queue-6.8/nfs-read-unlock-folio-on-nfs_page_create_from_folio-.patch @@ -0,0 +1,40 @@ +From 69eb64b10d12dd648915973818b770870e1410b2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Mar 2024 09:41:18 -0500 +Subject: NFS: Read unlock folio on nfs_page_create_from_folio() error + +From: Benjamin Coddington + +[ Upstream commit 11974eec839c167362af685aae5f5e1baaf979eb ] + +The netfs conversion lost a folio_unlock() for the case where +nfs_page_create_from_folio() returns an error (usually -ENOMEM). Restore +it. + +Reported-by: David Jeffery +Cc: # 6.4+ +Fixes: 000dbe0bec05 ("NFS: Convert buffered read paths to use netfs when fscache is enabled") +Signed-off-by: Benjamin Coddington +Acked-by: Dave Wysochanski +Signed-off-by: Trond Myklebust +Signed-off-by: Sasha Levin +--- + fs/nfs/read.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/fs/nfs/read.c b/fs/nfs/read.c +index 7dc21a48e3e7b..a142287d86f68 100644 +--- a/fs/nfs/read.c ++++ b/fs/nfs/read.c +@@ -305,6 +305,8 @@ int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio, + new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len); + if (IS_ERR(new)) { + error = PTR_ERR(new); ++ if (nfs_netfs_folio_unlock(folio)) ++ folio_unlock(folio); + goto out; + } + +-- +2.43.0 + diff --git a/queue-6.8/nfsd-fix-nfsd_clid_class-use-of-__string_len-macro.patch b/queue-6.8/nfsd-fix-nfsd_clid_class-use-of-__string_len-macro.patch new file mode 100644 index 00000000000..4a2033272f0 --- /dev/null +++ b/queue-6.8/nfsd-fix-nfsd_clid_class-use-of-__string_len-macro.patch @@ -0,0 +1,57 @@ +From f2e3a1daa8e240fb6ac7baecf5c119f21e88cb3b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 22 Feb 2024 12:28:28 -0500 +Subject: NFSD: Fix nfsd_clid_class use of __string_len() macro + +From: Steven Rostedt (Google) + +[ Upstream commit 9388a2aa453321bcf1ad2603959debea9e6ab6d4 ] + +I'm working on restructuring the __string* macros so that it doesn't need +to recalculate the string twice. That is, it will save it off when +processing __string() and the __assign_str() will not need to do the work +again as it currently does. + +Currently __string_len(item, src, len) doesn't actually use "src", but my +changes will require src to be correct as that is where the __assign_str() +will get its value from. + +The event class nfsd_clid_class has: + + __string_len(name, name, clp->cl_name.len) + +But the second "name" does not exist and causes my changes to fail to +build. That second parameter should be: clp->cl_name.data. + +Link: https://lore.kernel.org/linux-trace-kernel/20240222122828.3d8d213c@gandalf.local.home + +Cc: Neil Brown +Cc: Olga Kornievskaia +Cc: Dai Ngo +Cc: Tom Talpey +Cc: stable@vger.kernel.org +Fixes: d27b74a8675ca ("NFSD: Use new __string_len C macros for nfsd_clid_class") +Acked-by: Chuck Lever +Acked-by: Jeff Layton +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Sasha Levin +--- + fs/nfsd/trace.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h +index d1e8cf079b0f4..2cd57033791ff 100644 +--- a/fs/nfsd/trace.h ++++ b/fs/nfsd/trace.h +@@ -843,7 +843,7 @@ DECLARE_EVENT_CLASS(nfsd_clid_class, + __array(unsigned char, addr, sizeof(struct sockaddr_in6)) + __field(unsigned long, flavor) + __array(unsigned char, verifier, NFS4_VERIFIER_SIZE) +- __string_len(name, name, clp->cl_name.len) ++ __string_len(name, clp->cl_name.data, clp->cl_name.len) + ), + TP_fast_assign( + __entry->cl_boot = clp->cl_clientid.cl_boot; +-- +2.43.0 + diff --git a/queue-6.8/nilfs2-fix-failure-to-detect-dat-corruption-in-btree.patch b/queue-6.8/nilfs2-fix-failure-to-detect-dat-corruption-in-btree.patch new file mode 100644 index 00000000000..19d4eb611fb --- /dev/null +++ b/queue-6.8/nilfs2-fix-failure-to-detect-dat-corruption-in-btree.patch @@ -0,0 +1,131 @@ +From c7bf58475a269a6d7e799a80554b1b9d4e64f4d8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Mar 2024 19:58:26 +0900 +Subject: nilfs2: fix failure to detect DAT corruption in btree and direct + mappings + +From: Ryusuke Konishi + +[ Upstream commit f2f26b4a84a0ef41791bd2d70861c8eac748f4ba ] + +Patch series "nilfs2: fix kernel bug at submit_bh_wbc()". + +This resolves a kernel BUG reported by syzbot. Since there are two +flaws involved, I've made each one a separate patch. + +The first patch alone resolves the syzbot-reported bug, but I think +both fixes should be sent to stable, so I've tagged them as such. + +This patch (of 2): + +Syzbot has reported a kernel bug in submit_bh_wbc() when writing file data +to a nilfs2 file system whose metadata is corrupted. + +There are two flaws involved in this issue. + +The first flaw is that when nilfs_get_block() locates a data block using +btree or direct mapping, if the disk address translation routine +nilfs_dat_translate() fails with internal code -ENOENT due to DAT metadata +corruption, it can be passed back to nilfs_get_block(). This causes +nilfs_get_block() to misidentify an existing block as non-existent, +causing both data block lookup and insertion to fail inconsistently. + +The second flaw is that nilfs_get_block() returns a successful status in +this inconsistent state. This causes the caller __block_write_begin_int() +or others to request a read even though the buffer is not mapped, +resulting in a BUG_ON check for the BH_Mapped flag in submit_bh_wbc() +failing. + +This fixes the first issue by changing the return value to code -EINVAL +when a conversion using DAT fails with code -ENOENT, avoiding the +conflicting condition that leads to the kernel bug described above. Here, +code -EINVAL indicates that metadata corruption was detected during the +block lookup, which will be properly handled as a file system error and +converted to -EIO when passing through the nilfs2 bmap layer. + +Link: https://lkml.kernel.org/r/20240313105827.5296-1-konishi.ryusuke@gmail.com +Link: https://lkml.kernel.org/r/20240313105827.5296-2-konishi.ryusuke@gmail.com +Fixes: c3a7abf06ce7 ("nilfs2: support contiguous lookup of blocks") +Signed-off-by: Ryusuke Konishi +Reported-by: syzbot+cfed5b56649bddf80d6e@syzkaller.appspotmail.com +Closes: https://syzkaller.appspot.com/bug?extid=cfed5b56649bddf80d6e +Tested-by: Ryusuke Konishi +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Sasha Levin +--- + fs/nilfs2/btree.c | 9 +++++++-- + fs/nilfs2/direct.c | 9 +++++++-- + 2 files changed, 14 insertions(+), 4 deletions(-) + +diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c +index 13592e82eaf68..65659fa0372e6 100644 +--- a/fs/nilfs2/btree.c ++++ b/fs/nilfs2/btree.c +@@ -724,7 +724,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, + dat = nilfs_bmap_get_dat(btree); + ret = nilfs_dat_translate(dat, ptr, &blocknr); + if (ret < 0) +- goto out; ++ goto dat_error; + ptr = blocknr; + } + cnt = 1; +@@ -743,7 +743,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, + if (dat) { + ret = nilfs_dat_translate(dat, ptr2, &blocknr); + if (ret < 0) +- goto out; ++ goto dat_error; + ptr2 = blocknr; + } + if (ptr2 != ptr + cnt || ++cnt == maxblocks) +@@ -781,6 +781,11 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, + out: + nilfs_btree_free_path(path); + return ret; ++ ++ dat_error: ++ if (ret == -ENOENT) ++ ret = -EINVAL; /* Notify bmap layer of metadata corruption */ ++ goto out; + } + + static void nilfs_btree_promote_key(struct nilfs_bmap *btree, +diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c +index 4c85914f2abc3..893ab36824cc2 100644 +--- a/fs/nilfs2/direct.c ++++ b/fs/nilfs2/direct.c +@@ -66,7 +66,7 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct, + dat = nilfs_bmap_get_dat(direct); + ret = nilfs_dat_translate(dat, ptr, &blocknr); + if (ret < 0) +- return ret; ++ goto dat_error; + ptr = blocknr; + } + +@@ -79,7 +79,7 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct, + if (dat) { + ret = nilfs_dat_translate(dat, ptr2, &blocknr); + if (ret < 0) +- return ret; ++ goto dat_error; + ptr2 = blocknr; + } + if (ptr2 != ptr + cnt) +@@ -87,6 +87,11 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct, + } + *ptrp = ptr; + return cnt; ++ ++ dat_error: ++ if (ret == -ENOENT) ++ ret = -EINVAL; /* Notify bmap layer of metadata corruption */ ++ return ret; + } + + static __u64 +-- +2.43.0 + diff --git a/queue-6.8/nilfs2-prevent-kernel-bug-at-submit_bh_wbc.patch b/queue-6.8/nilfs2-prevent-kernel-bug-at-submit_bh_wbc.patch new file mode 100644 index 00000000000..f6e4e5cf447 --- /dev/null +++ b/queue-6.8/nilfs2-prevent-kernel-bug-at-submit_bh_wbc.patch @@ -0,0 +1,44 @@ +From 5829198e24223ae756c3fb748360ab08bef11f8c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Mar 2024 19:58:27 +0900 +Subject: nilfs2: prevent kernel bug at submit_bh_wbc() + +From: Ryusuke Konishi + +[ Upstream commit 269cdf353b5bdd15f1a079671b0f889113865f20 ] + +Fix a bug where nilfs_get_block() returns a successful status when +searching and inserting the specified block both fail inconsistently. If +this inconsistent behavior is not due to a previously fixed bug, then an +unexpected race is occurring, so return a temporary error -EAGAIN instead. + +This prevents callers such as __block_write_begin_int() from requesting a +read into a buffer that is not mapped, which would cause the BUG_ON check +for the BH_Mapped flag in submit_bh_wbc() to fail. + +Link: https://lkml.kernel.org/r/20240313105827.5296-3-konishi.ryusuke@gmail.com +Fixes: 1f5abe7e7dbc ("nilfs2: replace BUG_ON and BUG calls triggerable from ioctl") +Signed-off-by: Ryusuke Konishi +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Sasha Levin +--- + fs/nilfs2/inode.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c +index 9c334c722fc1c..5a888b2c1803d 100644 +--- a/fs/nilfs2/inode.c ++++ b/fs/nilfs2/inode.c +@@ -112,7 +112,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, + "%s (ino=%lu): a race condition while inserting a data block at offset=%llu", + __func__, inode->i_ino, + (unsigned long long)blkoff); +- err = 0; ++ err = -EAGAIN; + } + nilfs_transaction_abort(inode->i_sb); + goto out; +-- +2.43.0 + diff --git a/queue-6.8/nvmem-meson-efuse-fix-function-pointer-type-mismatch.patch b/queue-6.8/nvmem-meson-efuse-fix-function-pointer-type-mismatch.patch new file mode 100644 index 00000000000..631936f7763 --- /dev/null +++ b/queue-6.8/nvmem-meson-efuse-fix-function-pointer-type-mismatch.patch @@ -0,0 +1,80 @@ +From 00a7f88a261b388136c88447786e35238a46ad7e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 24 Feb 2024 11:40:23 +0000 +Subject: nvmem: meson-efuse: fix function pointer type mismatch + +From: Jerome Brunet + +[ Upstream commit cbd38332c140829ab752ba4e727f98be5c257f18 ] + +clang-16 warns about casting functions to incompatible types, as is done +here to call clk_disable_unprepare: + +drivers/nvmem/meson-efuse.c:78:12: error: cast from 'void (*)(struct clk *)' to 'void (*)(void *)' converts to incompatible function type [-Werror,-Wcast-function-type-strict] + 78 | (void(*)(void *))clk_disable_unprepare, + +The pattern of getting, enabling and setting a disable callback for a +clock can be replaced with devm_clk_get_enabled(), which also fixes +this warning. + +Fixes: 611fbca1c861 ("nvmem: meson-efuse: add peripheral clock") +Cc: Stable@vger.kernel.org +Reported-by: Arnd Bergmann +Signed-off-by: Jerome Brunet +Reviewed-by: Martin Blumenstingl +Acked-by: Arnd Bergmann +Reviewed-by: Justin Stitt +Signed-off-by: Srinivas Kandagatla +Link: https://lore.kernel.org/r/20240224114023.85535-2-srinivas.kandagatla@linaro.org +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/nvmem/meson-efuse.c | 25 +++---------------------- + 1 file changed, 3 insertions(+), 22 deletions(-) + +diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c +index b922df99f9bc3..33678d0af2c24 100644 +--- a/drivers/nvmem/meson-efuse.c ++++ b/drivers/nvmem/meson-efuse.c +@@ -47,7 +47,6 @@ static int meson_efuse_probe(struct platform_device *pdev) + struct nvmem_config *econfig; + struct clk *clk; + unsigned int size; +- int ret; + + sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0); + if (!sm_np) { +@@ -60,27 +59,9 @@ static int meson_efuse_probe(struct platform_device *pdev) + if (!fw) + return -EPROBE_DEFER; + +- clk = devm_clk_get(dev, NULL); +- if (IS_ERR(clk)) { +- ret = PTR_ERR(clk); +- if (ret != -EPROBE_DEFER) +- dev_err(dev, "failed to get efuse gate"); +- return ret; +- } +- +- ret = clk_prepare_enable(clk); +- if (ret) { +- dev_err(dev, "failed to enable gate"); +- return ret; +- } +- +- ret = devm_add_action_or_reset(dev, +- (void(*)(void *))clk_disable_unprepare, +- clk); +- if (ret) { +- dev_err(dev, "failed to add disable callback"); +- return ret; +- } ++ clk = devm_clk_get_enabled(dev, NULL); ++ if (IS_ERR(clk)) ++ return dev_err_probe(dev, PTR_ERR(clk), "failed to get efuse gate"); + + if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) { + dev_err(dev, "failed to get max user"); +-- +2.43.0 + diff --git a/queue-6.8/parisc-avoid-clobbering-the-c-b-bits-in-the-psw-with.patch b/queue-6.8/parisc-avoid-clobbering-the-c-b-bits-in-the-psw-with.patch new file mode 100644 index 00000000000..f61e738514f --- /dev/null +++ b/queue-6.8/parisc-avoid-clobbering-the-c-b-bits-in-the-psw-with.patch @@ -0,0 +1,64 @@ +From 528ef83754ef1b7ae093cda59de88c4898c7814f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 23 Feb 2024 16:40:51 +0100 +Subject: parisc: Avoid clobbering the C/B bits in the PSW with tophys and + tovirt macros + +From: John David Anglin + +[ Upstream commit 4603fbaa76b5e703b38ac8cc718102834eb6e330 ] + +Use add,l to avoid clobbering the C/B bits in the PSW. + +Signed-off-by: John David Anglin +Signed-off-by: Helge Deller +Cc: stable@vger.kernel.org # v5.10+ +Signed-off-by: Sasha Levin +--- + arch/parisc/include/asm/assembly.h | 18 ++++++++++-------- + 1 file changed, 10 insertions(+), 8 deletions(-) + +diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h +index 5937d5edaba1e..000a28e1c5e8d 100644 +--- a/arch/parisc/include/asm/assembly.h ++++ b/arch/parisc/include/asm/assembly.h +@@ -97,26 +97,28 @@ + * version takes two arguments: a src and destination register. + * However, the source and destination registers can not be + * the same register. ++ * ++ * We use add,l to avoid clobbering the C/B bits in the PSW. + */ + + .macro tophys grvirt, grphys +- ldil L%(__PAGE_OFFSET), \grphys +- sub \grvirt, \grphys, \grphys ++ ldil L%(-__PAGE_OFFSET), \grphys ++ addl \grvirt, \grphys, \grphys + .endm +- ++ + .macro tovirt grphys, grvirt + ldil L%(__PAGE_OFFSET), \grvirt +- add \grphys, \grvirt, \grvirt ++ addl \grphys, \grvirt, \grvirt + .endm + + .macro tophys_r1 gr +- ldil L%(__PAGE_OFFSET), %r1 +- sub \gr, %r1, \gr ++ ldil L%(-__PAGE_OFFSET), %r1 ++ addl \gr, %r1, \gr + .endm +- ++ + .macro tovirt_r1 gr + ldil L%(__PAGE_OFFSET), %r1 +- add \gr, %r1, \gr ++ addl \gr, %r1, \gr + .endm + + .macro delay value +-- +2.43.0 + diff --git a/queue-6.8/parisc-fix-csum_ipv6_magic-on-32-bit-systems.patch b/queue-6.8/parisc-fix-csum_ipv6_magic-on-32-bit-systems.patch new file mode 100644 index 00000000000..31f88140c0d --- /dev/null +++ b/queue-6.8/parisc-fix-csum_ipv6_magic-on-32-bit-systems.patch @@ -0,0 +1,55 @@ +From f6f2d81e53ef772e7aea21f38a99aadd0d1a66ef Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 10 Feb 2024 11:15:56 -0800 +Subject: parisc: Fix csum_ipv6_magic on 32-bit systems + +From: Guenter Roeck + +[ Upstream commit 4408ba75e4ba80c91fde7e10bccccf388f5c09be ] + +Calculating the IPv6 checksum on 32-bit systems missed overflows when +adding the proto+len fields into the checksum. This results in the +following unit test failure. + + # test_csum_ipv6_magic: ASSERTION FAILED at lib/checksum_kunit.c:506 + Expected ( u64)csum_result == ( u64)expected, but + ( u64)csum_result == 46722 (0xb682) + ( u64)expected == 46721 (0xb681) + not ok 5 test_csum_ipv6_magic + +This is probably rarely seen in the real world because proto+len are +usually small values which will rarely result in overflows when calculating +the checksum. However, the unit test code uses large values for the length +field, causing the test to fail. + +Fix the problem by adding the missing carry into the final checksum. + +Cc: Palmer Dabbelt +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Cc: stable@vger.kernel.org +Signed-off-by: Guenter Roeck +Tested-by: Charlie Jenkins +Reviewed-by: Charlie Jenkins +Signed-off-by: Helge Deller +Signed-off-by: Sasha Levin +--- + arch/parisc/include/asm/checksum.h | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h +index f705e5dd10742..e619e67440db9 100644 +--- a/arch/parisc/include/asm/checksum.h ++++ b/arch/parisc/include/asm/checksum.h +@@ -163,7 +163,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + " ldw,ma 4(%2), %7\n" /* 4th daddr */ + " addc %6, %0, %0\n" + " addc %7, %0, %0\n" +-" addc %3, %0, %0\n" /* fold in proto+len, catch carry */ ++" addc %3, %0, %0\n" /* fold in proto+len */ ++" addc 0, %0, %0\n" /* add carry */ + + #endif + : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len), +-- +2.43.0 + diff --git a/queue-6.8/parisc-fix-csum_ipv6_magic-on-64-bit-systems.patch b/queue-6.8/parisc-fix-csum_ipv6_magic-on-64-bit-systems.patch new file mode 100644 index 00000000000..cdc77054439 --- /dev/null +++ b/queue-6.8/parisc-fix-csum_ipv6_magic-on-64-bit-systems.patch @@ -0,0 +1,55 @@ +From f6d55f4509e8675050bbe709de84262a9cb13792 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 13 Feb 2024 15:46:31 -0800 +Subject: parisc: Fix csum_ipv6_magic on 64-bit systems + +From: Guenter Roeck + +[ Upstream commit 4b75b12d70506e31fc02356bbca60f8d5ca012d0 ] + +hppa 64-bit systems calculates the IPv6 checksum using 64-bit add +operations. The last add folds protocol and length fields into the 64-bit +result. While unlikely, this operation can overflow. The overflow can be +triggered with a code sequence such as the following. + + /* try to trigger massive overflows */ + memset(tmp_buf, 0xff, sizeof(struct in6_addr)); + csum_result = csum_ipv6_magic((struct in6_addr *)tmp_buf, + (struct in6_addr *)tmp_buf, + 0xffff, 0xff, 0xffffffff); + +Fix the problem by adding any overflows from the final add operation into +the calculated checksum. Fortunately, we can do this without additional +cost by replacing the add operation used to fold the checksum into 32 bit +with "add,dc" to add in the missing carry. + +Cc: Palmer Dabbelt +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Cc: stable@vger.kernel.org +Signed-off-by: Guenter Roeck +Reviewed-by: Charlie Jenkins +Tested-by: Guenter Roeck +Signed-off-by: Helge Deller +Signed-off-by: Sasha Levin +--- + arch/parisc/include/asm/checksum.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h +index e619e67440db9..c949aa20fa162 100644 +--- a/arch/parisc/include/asm/checksum.h ++++ b/arch/parisc/include/asm/checksum.h +@@ -137,8 +137,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + " add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */ + " extrd,u %0, 31, 32, %4\n"/* copy upper half down */ + " depdi 0, 31, 32, %0\n"/* clear upper half */ +-" add %4, %0, %0\n" /* fold into 32-bits */ +-" addc 0, %0, %0\n" /* add carry */ ++" add,dc %4, %0, %0\n" /* fold into 32-bits, plus carry */ ++" addc 0, %0, %0\n" /* add final carry */ + + #else + +-- +2.43.0 + diff --git a/queue-6.8/parisc-fix-ip_fast_csum.patch b/queue-6.8/parisc-fix-ip_fast_csum.patch new file mode 100644 index 00000000000..47346d4d324 --- /dev/null +++ b/queue-6.8/parisc-fix-ip_fast_csum.patch @@ -0,0 +1,66 @@ +From fa687da0aeb7d122892022ec729b6b16d572e3f6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 10 Feb 2024 09:55:26 -0800 +Subject: parisc: Fix ip_fast_csum + +From: Guenter Roeck + +[ Upstream commit a2abae8f0b638c31bb9799d9dd847306e0d005bd ] + +IP checksum unit tests report the following error when run on hppa/hppa64. + + # test_ip_fast_csum: ASSERTION FAILED at lib/checksum_kunit.c:463 + Expected ( u64)csum_result == ( u64)expected, but + ( u64)csum_result == 33754 (0x83da) + ( u64)expected == 10946 (0x2ac2) + not ok 4 test_ip_fast_csum + +0x83da is the expected result if the IP header length is 20 bytes. 0x2ac2 +is the expected result if the IP header length is 24 bytes. The test fails +with an IP header length of 24 bytes. It appears that ip_fast_csum() +always returns the checksum for a 20-byte header, no matter how long +the header actually is. + +Code analysis shows a suspicious assembler sequence in ip_fast_csum(). + + " addc %0, %3, %0\n" + "1: ldws,ma 4(%1), %3\n" + " addib,< 0, %2, 1b\n" <--- + +While my understanding of HPPA assembler is limited, it does not seem +to make much sense to subtract 0 from a register and to expect the result +to ever be negative. Subtracting 1 from the length parameter makes more +sense. On top of that, the operation should be repeated if and only if +the result is still > 0, so change the suspicious instruction to + " addib,> -1, %2, 1b\n" + +The IP checksum unit test passes after this change. + +Cc: Palmer Dabbelt +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Cc: stable@vger.kernel.org +Signed-off-by: Guenter Roeck +Tested-by: Charlie Jenkins +Reviewed-by: Charlie Jenkins +Signed-off-by: Helge Deller +Signed-off-by: Sasha Levin +--- + arch/parisc/include/asm/checksum.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h +index 3c43baca7b397..f705e5dd10742 100644 +--- a/arch/parisc/include/asm/checksum.h ++++ b/arch/parisc/include/asm/checksum.h +@@ -40,7 +40,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) + " addc %0, %5, %0\n" + " addc %0, %3, %0\n" + "1: ldws,ma 4(%1), %3\n" +-" addib,< 0, %2, 1b\n" ++" addib,> -1, %2, 1b\n" + " addc %0, %3, %0\n" + "\n" + " extru %0, 31, 16, %4\n" +-- +2.43.0 + diff --git a/queue-6.8/parisc-strip-upper-32-bit-of-sum-in-csum_ipv6_magic-.patch b/queue-6.8/parisc-strip-upper-32-bit-of-sum-in-csum_ipv6_magic-.patch new file mode 100644 index 00000000000..2f6d056e419 --- /dev/null +++ b/queue-6.8/parisc-strip-upper-32-bit-of-sum-in-csum_ipv6_magic-.patch @@ -0,0 +1,55 @@ +From b2d28bbd837fc82ef51d3dafc32e38f2f1c2c537 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 27 Feb 2024 12:33:51 -0800 +Subject: parisc: Strip upper 32 bit of sum in csum_ipv6_magic for 64-bit + builds + +From: Guenter Roeck + +[ Upstream commit 0568b6f0d863643db2edcc7be31165740c89fa82 ] + +IPv6 checksum tests with unaligned addresses on 64-bit builds result +in unexpected failures. + +Expected expected == csum_result, but + expected == 46591 (0xb5ff) + csum_result == 46381 (0xb52d) +with alignment offset 1 + +Oddly enough, the problem disappeared after adding test code into +the beginning of csum_ipv6_magic(). + +As it turns out, the 'sum' parameter of csum_ipv6_magic() is declared as +__wsum, which is a 32-bit variable. However, it is treated as 64-bit +variable in the 64-bit assembler code. Tests showed that the upper 32 bit +of the register used to pass the variable are _not_ cleared when entering +the function. This can result in checksum calculation errors. + +Clearing the upper 32 bit of 'sum' as first operation in the assembler +code fixes the problem. + +Acked-by: Helge Deller +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Cc: stable@vger.kernel.org +Signed-off-by: Guenter Roeck +Signed-off-by: Helge Deller +Signed-off-by: Sasha Levin +--- + arch/parisc/include/asm/checksum.h | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h +index c949aa20fa162..2aceebcd695c8 100644 +--- a/arch/parisc/include/asm/checksum.h ++++ b/arch/parisc/include/asm/checksum.h +@@ -126,6 +126,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + ** Try to keep 4 registers with "live" values ahead of the ALU. + */ + ++" depdi 0, 31, 32, %0\n"/* clear upper half of incoming checksum */ + " ldd,ma 8(%1), %4\n" /* get 1st saddr word */ + " ldd,ma 8(%2), %5\n" /* get 1st daddr word */ + " add %4, %0, %0\n" +-- +2.43.0 + diff --git a/queue-6.8/parisc-unaligned-rewrite-64-bit-inline-assembly-of-e.patch b/queue-6.8/parisc-unaligned-rewrite-64-bit-inline-assembly-of-e.patch new file mode 100644 index 00000000000..f8c726c97a7 --- /dev/null +++ b/queue-6.8/parisc-unaligned-rewrite-64-bit-inline-assembly-of-e.patch @@ -0,0 +1,88 @@ +From d634c80a02d77b89371d06598c90a1cfe3b8e43b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 16 Feb 2024 14:26:55 +0100 +Subject: parisc/unaligned: Rewrite 64-bit inline assembly of emulate_ldd() + +From: Guenter Roeck + +[ Upstream commit e5db6a74571a8baf87a116ea39aab946283362ff ] + +Convert to use real temp variables instead of clobbering processor +registers. This aligns the 64-bit inline assembly code with the 32-bit +assembly code which was rewritten with commit 427c1073a2a1 +("parisc/unaligned: Rewrite 32-bit inline assembly of emulate_ldd()"). + +While at it, fix comment in 32-bit rewrite code. Temporary variables are +now used for both 32-bit and 64-bit code, so move their declarations +to the function header. + +No functional change intended. + +Signed-off-by: Guenter Roeck +Cc: stable@vger.kernel.org # v6.0+ +Signed-off-by: Helge Deller +Signed-off-by: Sasha Levin +--- + arch/parisc/kernel/unaligned.c | 27 ++++++++++++--------------- + 1 file changed, 12 insertions(+), 15 deletions(-) + +diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c +index c520e551a1652..a8e75e5b884a7 100644 +--- a/arch/parisc/kernel/unaligned.c ++++ b/arch/parisc/kernel/unaligned.c +@@ -169,6 +169,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop) + static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) + { + unsigned long saddr = regs->ior; ++ unsigned long shift, temp1; + __u64 val = 0; + ASM_EXCEPTIONTABLE_VAR(ret); + +@@ -180,25 +181,22 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) + + #ifdef CONFIG_64BIT + __asm__ __volatile__ ( +-" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */ +-" mtsp %4, %%sr1\n" +-" depd %%r0,63,3,%3\n" +-"1: ldd 0(%%sr1,%3),%0\n" +-"2: ldd 8(%%sr1,%3),%%r20\n" +-" subi 64,%%r19,%%r19\n" +-" mtsar %%r19\n" +-" shrpd %0,%%r20,%%sar,%0\n" ++" depd,z %2,60,3,%3\n" /* shift=(ofs&7)*8 */ ++" mtsp %5, %%sr1\n" ++" depd %%r0,63,3,%2\n" ++"1: ldd 0(%%sr1,%2),%0\n" ++"2: ldd 8(%%sr1,%2),%4\n" ++" subi 64,%3,%3\n" ++" mtsar %3\n" ++" shrpd %0,%4,%%sar,%0\n" + "3: \n" + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1") +- : "=r" (val), "+r" (ret) +- : "0" (val), "r" (saddr), "r" (regs->isr) +- : "r19", "r20" ); ++ : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1) ++ : "r" (regs->isr) ); + #else +- { +- unsigned long shift, temp1; + __asm__ __volatile__ ( +-" zdep %2,29,2,%3\n" /* r19=(ofs&3)*8 */ ++" zdep %2,29,2,%3\n" /* shift=(ofs&3)*8 */ + " mtsp %5, %%sr1\n" + " dep %%r0,31,2,%2\n" + "1: ldw 0(%%sr1,%2),%0\n" +@@ -214,7 +212,6 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1") + : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1) + : "r" (regs->isr) ); +- } + #endif + + DPRINTF("val = 0x%llx\n", val); +-- +2.43.0 + diff --git a/queue-6.8/pci-aer-block-runtime-suspend-when-handling-errors.patch b/queue-6.8/pci-aer-block-runtime-suspend-when-handling-errors.patch new file mode 100644 index 00000000000..5dafdeb3723 --- /dev/null +++ b/queue-6.8/pci-aer-block-runtime-suspend-when-handling-errors.patch @@ -0,0 +1,99 @@ +From aab35d0089362ef4763028f16be320b9097db714 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 12 Feb 2024 13:01:35 +0100 +Subject: PCI/AER: Block runtime suspend when handling errors + +From: Stanislaw Gruszka + +[ Upstream commit 002bf2fbc00e5c4b95fb167287e2ae7d1973281e ] + +PM runtime can be done simultaneously with AER error handling. Avoid that +by using pm_runtime_get_sync() before and pm_runtime_put() after reset in +pcie_do_recovery() for all recovering devices. + +pm_runtime_get_sync() will increase dev->power.usage_count counter to +prevent any possible future request to runtime suspend a device. It will +also resume a device, if it was previously in D3hot state. + +I tested with igc device by doing simultaneous aer_inject and rpm +suspend/resume via /sys/bus/pci/devices/PCI_ID/power/control and can +reproduce: + + igc 0000:02:00.0: not ready 65535ms after bus reset; giving up + pcieport 0000:00:1c.2: AER: Root Port link has been reset (-25) + pcieport 0000:00:1c.2: AER: subordinate device reset failed + pcieport 0000:00:1c.2: AER: device recovery failed + igc 0000:02:00.0: Unable to change power state from D3hot to D0, device inaccessible + +The problem disappears when this patch is applied. + +Link: https://lore.kernel.org/r/20240212120135.146068-1-stanislaw.gruszka@linux.intel.com +Signed-off-by: Stanislaw Gruszka +Signed-off-by: Bjorn Helgaas +Reviewed-by: Kuppuswamy Sathyanarayanan +Acked-by: Rafael J. Wysocki +Cc: +Signed-off-by: Sasha Levin +--- + drivers/pci/pcie/err.c | 20 ++++++++++++++++++++ + 1 file changed, 20 insertions(+) + +diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c +index 59c90d04a609a..705893b5f7b09 100644 +--- a/drivers/pci/pcie/err.c ++++ b/drivers/pci/pcie/err.c +@@ -13,6 +13,7 @@ + #define dev_fmt(fmt) "AER: " fmt + + #include ++#include + #include + #include + #include +@@ -85,6 +86,18 @@ static int report_error_detected(struct pci_dev *dev, + return 0; + } + ++static int pci_pm_runtime_get_sync(struct pci_dev *pdev, void *data) ++{ ++ pm_runtime_get_sync(&pdev->dev); ++ return 0; ++} ++ ++static int pci_pm_runtime_put(struct pci_dev *pdev, void *data) ++{ ++ pm_runtime_put(&pdev->dev); ++ return 0; ++} ++ + static int report_frozen_detected(struct pci_dev *dev, void *data) + { + return report_error_detected(dev, pci_channel_io_frozen, data); +@@ -207,6 +220,8 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, + else + bridge = pci_upstream_bridge(dev); + ++ pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL); ++ + pci_dbg(bridge, "broadcast error_detected message\n"); + if (state == pci_channel_io_frozen) { + pci_walk_bridge(bridge, report_frozen_detected, &status); +@@ -251,10 +266,15 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, + pcie_clear_device_status(dev); + pci_aer_clear_nonfatal_status(dev); + } ++ ++ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL); ++ + pci_info(bridge, "device recovery successful\n"); + return status; + + failed: ++ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL); ++ + pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT); + + /* TODO: Should kernel panic here? */ +-- +2.43.0 + diff --git a/queue-6.8/pci-dpc-quirk-pio-log-size-for-intel-raptor-lake-roo.patch b/queue-6.8/pci-dpc-quirk-pio-log-size-for-intel-raptor-lake-roo.patch new file mode 100644 index 00000000000..d1abdf19997 --- /dev/null +++ b/queue-6.8/pci-dpc-quirk-pio-log-size-for-intel-raptor-lake-roo.patch @@ -0,0 +1,55 @@ +From 853071e647b30f1854432813a7eebcad5615c401 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 12:30:56 +0100 +Subject: PCI/DPC: Quirk PIO log size for Intel Raptor Lake Root Ports + +From: Paul Menzel + +[ Upstream commit 627c6db20703b5d18d928464f411d0d4ec327508 ] + +Commit 5459c0b70467 ("PCI/DPC: Quirk PIO log size for certain Intel Root +Ports") and commit 3b8803494a06 ("PCI/DPC: Quirk PIO log size for Intel Ice +Lake Root Ports") add quirks for Ice, Tiger and Alder Lake Root Ports. +System firmware for Raptor Lake still has the bug, so Linux logs the +warning below on several Raptor Lake systems like Dell Precision 3581 with +Intel Raptor Lake processor (0W18NX) system firmware/BIOS version 1.10.1. + + pci 0000:00:07.0: [8086:a76e] type 01 class 0x060400 + pci 0000:00:07.0: DPC: RP PIO log size 0 is invalid + pci 0000:00:07.1: [8086:a73f] type 01 class 0x060400 + pci 0000:00:07.1: DPC: RP PIO log size 0 is invalid + +Apply the quirk for Raptor Lake Root Ports as well. + +This also enables the DPC driver to dump the RP PIO Log registers when DPC +is triggered. + +Link: https://lore.kernel.org/r/20240305113057.56468-1-pmenzel@molgen.mpg.de +Reported-by: Niels van Aert +Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218560 +Signed-off-by: Paul Menzel +Signed-off-by: Bjorn Helgaas +Cc: +Cc: Mika Westerberg +Cc: Niels van Aert +Signed-off-by: Sasha Levin +--- + drivers/pci/quirks.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 2ebbe51a7efe0..eff7f5df08e27 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -6226,6 +6226,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size); + #endif + + /* +-- +2.43.0 + diff --git a/queue-6.8/pci-dwc-endpoint-fix-advertised-resizable-bar-size.patch b/queue-6.8/pci-dwc-endpoint-fix-advertised-resizable-bar-size.patch new file mode 100644 index 00000000000..cbc96303549 --- /dev/null +++ b/queue-6.8/pci-dwc-endpoint-fix-advertised-resizable-bar-size.patch @@ -0,0 +1,75 @@ +From bb54504dd226a6c6fe52b661f7943465cec74263 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Mar 2024 12:15:20 +0100 +Subject: PCI: dwc: endpoint: Fix advertised resizable BAR size +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Niklas Cassel + +[ Upstream commit 72e34b8593e08a0ee759b7a038e0b178418ea6f8 ] + +The commit message in commit fc9a77040b04 ("PCI: designware-ep: Configure +Resizable BAR cap to advertise the smallest size") claims that it modifies +the Resizable BAR capability to only advertise support for 1 MB size BARs. + +However, the commit writes all zeroes to PCI_REBAR_CAP (the register which +contains the possible BAR sizes that a BAR be resized to). + +According to the spec, it is illegal to not have a bit set in +PCI_REBAR_CAP, and 1 MB is the smallest size allowed. + +Set bit 4 in PCI_REBAR_CAP, so that we actually advertise support for a +1 MB BAR size. + +Before: + Capabilities: [2e8 v1] Physical Resizable BAR + BAR 0: current size: 1MB + BAR 1: current size: 1MB + BAR 2: current size: 1MB + BAR 3: current size: 1MB + BAR 4: current size: 1MB + BAR 5: current size: 1MB +After: + Capabilities: [2e8 v1] Physical Resizable BAR + BAR 0: current size: 1MB, supported: 1MB + BAR 1: current size: 1MB, supported: 1MB + BAR 2: current size: 1MB, supported: 1MB + BAR 3: current size: 1MB, supported: 1MB + BAR 4: current size: 1MB, supported: 1MB + BAR 5: current size: 1MB, supported: 1MB + +Fixes: fc9a77040b04 ("PCI: designware-ep: Configure Resizable BAR cap to advertise the smallest size") +Link: https://lore.kernel.org/linux-pci/20240307111520.3303774-1-cassel@kernel.org +Signed-off-by: Niklas Cassel +Signed-off-by: Krzysztof Wilczyński +Reviewed-by: Manivannan Sadhasivam +Cc: # 5.2 +Signed-off-by: Sasha Levin +--- + drivers/pci/controller/dwc/pcie-designware-ep.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c +index 9a437cfce073c..746a11dcb67f1 100644 +--- a/drivers/pci/controller/dwc/pcie-designware-ep.c ++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c +@@ -629,8 +629,13 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) + nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> + PCI_REBAR_CTRL_NBAR_SHIFT; + ++ /* ++ * PCIe r6.0, sec 7.8.6.2 require us to support at least one ++ * size in the range from 1 MB to 512 GB. Advertise support ++ * for 1 MB BAR size only. ++ */ + for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) +- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); ++ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4)); + } + + /* +-- +2.43.0 + diff --git a/queue-6.8/pci-hv-fix-ring-buffer-size-calculation.patch b/queue-6.8/pci-hv-fix-ring-buffer-size-calculation.patch new file mode 100644 index 00000000000..2540e26a66a --- /dev/null +++ b/queue-6.8/pci-hv-fix-ring-buffer-size-calculation.patch @@ -0,0 +1,65 @@ +From c3b311af8e57d1084ad1c2711cb5c9ca19094604 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 16 Feb 2024 12:22:40 -0800 +Subject: PCI: hv: Fix ring buffer size calculation +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Michael Kelley + +[ Upstream commit b5ff74c1ef50fe08e384026875fec660fadfaedd ] + +For a physical PCI device that is passed through to a Hyper-V guest VM, +current code specifies the VMBus ring buffer size as 4 pages. But this +is an inappropriate dependency, since the amount of ring buffer space +needed is unrelated to PAGE_SIZE. For example, on x86 the ring buffer +size ends up as 16 Kbytes, while on ARM64 with 64 Kbyte pages, the ring +size bloats to 256 Kbytes. The ring buffer for PCI pass-thru devices +is used for only a few messages during device setup and removal, so any +space above a few Kbytes is wasted. + +Fix this by declaring the ring buffer size to be a fixed 16 Kbytes. +Furthermore, use the VMBUS_RING_SIZE() macro so that the ring buffer +header is properly accounted for, and so the size is rounded up to a +page boundary, using the page size for which the kernel is built. While +w/64 Kbyte pages this results in a 64 Kbyte ring buffer header plus a +64 Kbyte ring buffer, that's the smallest possible with that page size. +It's still 128 Kbytes better than the current code. + +Link: https://lore.kernel.org/linux-pci/20240216202240.251818-1-mhklinux@outlook.com +Signed-off-by: Michael Kelley +Signed-off-by: Krzysztof Wilczyński +Reviewed-by: Kuppuswamy Sathyanarayanan +Reviewed-by: Ilpo Jarvinen +Reviewed-by: Long Li +Cc: # 5.15.x +Signed-off-by: Sasha Levin +--- + drivers/pci/controller/pci-hyperv.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c +index 1eaffff40b8d4..5992280e8110b 100644 +--- a/drivers/pci/controller/pci-hyperv.c ++++ b/drivers/pci/controller/pci-hyperv.c +@@ -49,6 +49,7 @@ + #include + #include + #include ++#include + #include + + /* +@@ -465,7 +466,7 @@ struct pci_eject_response { + u32 status; + } __packed; + +-static int pci_ring_size = (4 * PAGE_SIZE); ++static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K); + + /* + * Driver specific state. +-- +2.43.0 + diff --git a/queue-6.8/pci-pm-drain-runtime-idle-callbacks-before-driver-re.patch b/queue-6.8/pci-pm-drain-runtime-idle-callbacks-before-driver-re.patch new file mode 100644 index 00000000000..ddb1bdfdbac --- /dev/null +++ b/queue-6.8/pci-pm-drain-runtime-idle-callbacks-before-driver-re.patch @@ -0,0 +1,76 @@ +From e5ff35f6009640a2f1220eff22067cff0740bbc8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 11:45:38 +0100 +Subject: PCI/PM: Drain runtime-idle callbacks before driver removal + +From: Rafael J. Wysocki + +[ Upstream commit 9d5286d4e7f68beab450deddbb6a32edd5ecf4bf ] + +A race condition between the .runtime_idle() callback and the .remove() +callback in the rtsx_pcr PCI driver leads to a kernel crash due to an +unhandled page fault [1]. + +The problem is that rtsx_pci_runtime_idle() is not expected to be running +after pm_runtime_get_sync() has been called, but the latter doesn't really +guarantee that. It only guarantees that the suspend and resume callbacks +will not be running when it returns. + +However, if a .runtime_idle() callback is already running when +pm_runtime_get_sync() is called, the latter will notice that the runtime PM +status of the device is RPM_ACTIVE and it will return right away without +waiting for the former to complete. In fact, it cannot wait for +.runtime_idle() to complete because it may be called from that callback (it +arguably does not make much sense to do that, but it is not strictly +prohibited). + +Thus in general, whoever is providing a .runtime_idle() callback needs +to protect it from running in parallel with whatever code runs after +pm_runtime_get_sync(). [Note that .runtime_idle() will not start after +pm_runtime_get_sync() has returned, but it may continue running then if it +has started earlier.] + +One way to address that race condition is to call pm_runtime_barrier() +after pm_runtime_get_sync() (not before it, because a nonzero value of the +runtime PM usage counter is necessary to prevent runtime PM callbacks from +being invoked) to wait for the .runtime_idle() callback to complete should +it be running at that point. A suitable place for doing that is in +pci_device_remove() which calls pm_runtime_get_sync() before removing the +driver, so it may as well call pm_runtime_barrier() subsequently, which +will prevent the race in question from occurring, not just in the rtsx_pcr +driver, but in any PCI drivers providing .runtime_idle() callbacks. + +Link: https://lore.kernel.org/lkml/20240229062201.49500-1-kai.heng.feng@canonical.com/ # [1] +Link: https://lore.kernel.org/r/5761426.DvuYhMxLoT@kreacher +Reported-by: Kai-Heng Feng +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Bjorn Helgaas +Tested-by: Ricky Wu +Acked-by: Kai-Heng Feng +Cc: +Signed-off-by: Sasha Levin +--- + drivers/pci/pci-driver.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c +index 51ec9e7e784f0..9c59bf03d6579 100644 +--- a/drivers/pci/pci-driver.c ++++ b/drivers/pci/pci-driver.c +@@ -473,6 +473,13 @@ static void pci_device_remove(struct device *dev) + + if (drv->remove) { + pm_runtime_get_sync(dev); ++ /* ++ * If the driver provides a .runtime_idle() callback and it has ++ * started to run already, it may continue to run in parallel ++ * with the code below, so wait until all of the runtime PM ++ * activity has completed. ++ */ ++ pm_runtime_barrier(dev); + drv->remove(pci_dev); + pm_runtime_put_noidle(dev); + } +-- +2.43.0 + diff --git a/queue-6.8/pci-qcom-disable-aspm-l0s-for-sc8280xp-sa8540p-and-s.patch b/queue-6.8/pci-qcom-disable-aspm-l0s-for-sc8280xp-sa8540p-and-s.patch new file mode 100644 index 00000000000..c07c5afcf2f --- /dev/null +++ b/queue-6.8/pci-qcom-disable-aspm-l0s-for-sc8280xp-sa8540p-and-s.patch @@ -0,0 +1,113 @@ +From df4bdb5d63de893b7f5a82212bb48ce5b3ae6f27 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 6 Mar 2024 10:56:49 +0100 +Subject: PCI: qcom: Disable ASPM L0s for sc8280xp, sa8540p and sa8295p + +From: Johan Hovold + +[ Upstream commit d1997c98781459f7b6d0bf1858f538f48454a97b ] + +Commit 9f4f3dfad8cf ("PCI: qcom: Enable ASPM for platforms supporting +1.9.0 ops") started enabling ASPM unconditionally when the hardware +claims to support it. This triggers Correctable Errors for some PCIe +devices on machines like the Lenovo ThinkPad X13s when L0s is enabled, +which could indicate an incomplete driver ASPM implementation or that +the hardware does in fact not support L0s. + +This has now been confirmed by Qualcomm to be the case for sc8280xp and +its derivate platforms (e.g. sa8540p and sa8295p). Specifically, the PHY +configuration used on these platforms is not correctly tuned for L0s and +there is currently no updated configuration available. + +Add a new flag to the driver configuration data and use it to disable +ASPM L0s on sc8280xp, sa8540p and sa8295p for now. + +Note that only the 1.9.0 ops enable ASPM currently. + +Link: https://lore.kernel.org/r/20240306095651.4551-4-johan+linaro@kernel.org +Fixes: 9f4f3dfad8cf ("PCI: qcom: Enable ASPM for platforms supporting 1.9.0 ops") +Signed-off-by: Johan Hovold +Signed-off-by: Lorenzo Pieralisi +Reviewed-by: Manivannan Sadhasivam +Cc: stable@vger.kernel.org # 6.7 +Signed-off-by: Sasha Levin +--- + drivers/pci/controller/dwc/pcie-qcom.c | 31 ++++++++++++++++++++++++-- + 1 file changed, 29 insertions(+), 2 deletions(-) + +diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c +index 2ce2a3bd932bd..9f83a1611a207 100644 +--- a/drivers/pci/controller/dwc/pcie-qcom.c ++++ b/drivers/pci/controller/dwc/pcie-qcom.c +@@ -229,6 +229,7 @@ struct qcom_pcie_ops { + + struct qcom_pcie_cfg { + const struct qcom_pcie_ops *ops; ++ bool no_l0s; + }; + + struct qcom_pcie { +@@ -272,6 +273,26 @@ static int qcom_pcie_start_link(struct dw_pcie *pci) + return 0; + } + ++static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci) ++{ ++ struct qcom_pcie *pcie = to_qcom_pcie(pci); ++ u16 offset; ++ u32 val; ++ ++ if (!pcie->cfg->no_l0s) ++ return; ++ ++ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); ++ ++ dw_pcie_dbi_ro_wr_en(pci); ++ ++ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); ++ val &= ~PCI_EXP_LNKCAP_ASPM_L0S; ++ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); ++ ++ dw_pcie_dbi_ro_wr_dis(pci); ++} ++ + static void qcom_pcie_clear_hpc(struct dw_pcie *pci) + { + u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); +@@ -961,6 +982,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) + + static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) + { ++ qcom_pcie_clear_aspm_l0s(pcie->pci); + qcom_pcie_clear_hpc(pcie->pci); + + return 0; +@@ -1358,6 +1380,11 @@ static const struct qcom_pcie_cfg cfg_2_9_0 = { + .ops = &ops_2_9_0, + }; + ++static const struct qcom_pcie_cfg cfg_sc8280xp = { ++ .ops = &ops_1_9_0, ++ .no_l0s = true, ++}; ++ + static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = qcom_pcie_link_up, + .start_link = qcom_pcie_start_link, +@@ -1629,11 +1656,11 @@ static const struct of_device_id qcom_pcie_match[] = { + { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 }, + { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, + { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, +- { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 }, ++ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp }, + { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0}, + { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, +- { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 }, ++ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp }, + { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, + { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, +-- +2.43.0 + diff --git a/queue-6.8/pci-qcom-enable-bdf-to-sid-translation-properly.patch b/queue-6.8/pci-qcom-enable-bdf-to-sid-translation-properly.patch new file mode 100644 index 00000000000..89b4206ed28 --- /dev/null +++ b/queue-6.8/pci-qcom-enable-bdf-to-sid-translation-properly.patch @@ -0,0 +1,98 @@ +From 34e92de1f162604f38d58e0d66508380de5365fc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Mar 2024 16:35:15 +0530 +Subject: PCI: qcom: Enable BDF to SID translation properly +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Manivannan Sadhasivam + +[ Upstream commit bf79e33cdd89db498e00a6131e937259de5f2705 ] + +Qcom SoCs making use of ARM SMMU require BDF to SID translation table in +the driver to properly map the SID for the PCIe devices based on their BDF +identifier. This is currently achieved with the help of +qcom_pcie_config_sid_1_9_0() function for SoCs supporting the 1_9_0 config. + +But With newer Qcom SoCs starting from SM8450, BDF to SID translation is +set to bypass mode by default in hardware. Due to this, the translation +table that is set in the qcom_pcie_config_sid_1_9_0() is essentially +unused and the default SID is used for all endpoints in SoCs starting from +SM8450. + +This is a security concern and also warrants swapping the DeviceID in DT +while using the GIC ITS to handle MSIs from endpoints. The swapping is +currently done like below in DT when using GIC ITS: + + /* + * MSIs for BDF (1:0.0) only works with Device ID 0x5980. + * Hence, the IDs are swapped. + */ + msi-map = <0x0 &gic_its 0x5981 0x1>, + <0x100 &gic_its 0x5980 0x1>; + +Here, swapping of the DeviceIDs ensure that the endpoint with BDF (1:0.0) +gets the DeviceID 0x5980 which is associated with the default SID as per +the iommu mapping in DT. So MSIs were delivered with IDs swapped so far. +But this also means the Root Port (0:0.0) won't receive any MSIs (for PME, +AER etc...) + +So let's fix these issues by clearing the BDF to SID bypass mode for all +SoCs making use of the 1_9_0 config. This allows the PCIe devices to use +the correct SID, thus avoiding the DeviceID swapping hack in DT and also +achieving the isolation between devices. + +Fixes: 4c9398822106 ("PCI: qcom: Add support for configuring BDF to SID mapping for SM8250") +Link: https://lore.kernel.org/linux-pci/20240307-pci-bdf-sid-fix-v1-1-9423a7e2d63c@linaro.org +Signed-off-by: Manivannan Sadhasivam +Signed-off-by: Krzysztof Wilczyński +Cc: stable@vger.kernel.org # 5.11 +Signed-off-by: Sasha Levin +--- + drivers/pci/controller/dwc/pcie-qcom.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c +index 9f83a1611a207..23bb0eee5c411 100644 +--- a/drivers/pci/controller/dwc/pcie-qcom.c ++++ b/drivers/pci/controller/dwc/pcie-qcom.c +@@ -53,6 +53,7 @@ + #define PARF_SLV_ADDR_SPACE_SIZE 0x358 + #define PARF_DEVICE_TYPE 0x1000 + #define PARF_BDF_TO_SID_TABLE_N 0x2000 ++#define PARF_BDF_TO_SID_CFG 0x2c00 + + /* ELBI registers */ + #define ELBI_SYS_CTRL 0x04 +@@ -120,6 +121,9 @@ + /* PARF_DEVICE_TYPE register fields */ + #define DEVICE_TYPE_RC 0x4 + ++/* PARF_BDF_TO_SID_CFG fields */ ++#define BDF_TO_SID_BYPASS BIT(0) ++ + /* ELBI_SYS_CTRL register fields */ + #define ELBI_SYS_CTRL_LT_ENABLE BIT(0) + +@@ -1030,11 +1034,17 @@ static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) + u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; + int i, nr_map, size = 0; + u32 smmu_sid_base; ++ u32 val; + + of_get_property(dev->of_node, "iommu-map", &size); + if (!size) + return 0; + ++ /* Enable BDF to SID translation by disabling bypass mode (default) */ ++ val = readl(pcie->parf + PARF_BDF_TO_SID_CFG); ++ val &= ~BDF_TO_SID_BYPASS; ++ writel(val, pcie->parf + PARF_BDF_TO_SID_CFG); ++ + map = kzalloc(size, GFP_KERNEL); + if (!map) + return -ENOMEM; +-- +2.43.0 + diff --git a/queue-6.8/pci_iounmap-fix-mmio-mapping-leak.patch b/queue-6.8/pci_iounmap-fix-mmio-mapping-leak.patch new file mode 100644 index 00000000000..764ddb3faa2 --- /dev/null +++ b/queue-6.8/pci_iounmap-fix-mmio-mapping-leak.patch @@ -0,0 +1,44 @@ +From 74588fa7fdf6d7e5b055884e6f32dea743cda285 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 31 Jan 2024 10:00:20 +0100 +Subject: pci_iounmap(): Fix MMIO mapping leak + +From: Philipp Stanner + +[ Upstream commit 7626913652cc786c238e2dd7d8740b17d41b2637 ] + +The #ifdef ARCH_HAS_GENERIC_IOPORT_MAP accidentally also guards iounmap(), +which means MMIO mappings are leaked. + +Move the guard so we call iounmap() for MMIO mappings. + +Fixes: 316e8d79a095 ("pci_iounmap'2: Electric Boogaloo: try to make sense of it all") +Link: https://lore.kernel.org/r/20240131090023.12331-2-pstanner@redhat.com +Reported-by: Danilo Krummrich +Suggested-by: Arnd Bergmann +Signed-off-by: Philipp Stanner +Signed-off-by: Bjorn Helgaas +Reviewed-by: Arnd Bergmann +Cc: # v5.15+ +Signed-off-by: Sasha Levin +--- + lib/pci_iomap.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c +index ce39ce9f3526e..2829ddb0e316b 100644 +--- a/lib/pci_iomap.c ++++ b/lib/pci_iomap.c +@@ -170,8 +170,8 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *p) + + if (addr >= start && addr < start + IO_SPACE_LIMIT) + return; +- iounmap(p); + #endif ++ iounmap(p); + } + EXPORT_SYMBOL(pci_iounmap); + +-- +2.43.0 + diff --git a/queue-6.8/phy-tegra-xusb-add-api-to-retrieve-the-port-number-o.patch b/queue-6.8/phy-tegra-xusb-add-api-to-retrieve-the-port-number-o.patch new file mode 100644 index 00000000000..c91814a35ae --- /dev/null +++ b/queue-6.8/phy-tegra-xusb-add-api-to-retrieve-the-port-number-o.patch @@ -0,0 +1,68 @@ +From a0ca319ddfcd18e6c40c2279458a48a1dc586692 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Mar 2024 11:03:27 +0800 +Subject: phy: tegra: xusb: Add API to retrieve the port number of phy + +From: Wayne Chang + +[ Upstream commit d843f031d9e90462253015bc0bd9e3852d206bf2 ] + +This patch introduces a new API, tegra_xusb_padctl_get_port_number, +to the Tegra XUSB Pad Controller driver. This API is used to identify +the USB port that is associated with a given PHY. + +The function takes a PHY pointer for either a USB2 PHY or USB3 PHY as input +and returns the corresponding port number. If the PHY pointer is invalid, +it returns -ENODEV. + +Cc: stable@vger.kernel.org +Signed-off-by: Wayne Chang +Reviewed-by: Jon Hunter +Tested-by: Jon Hunter +Link: https://lore.kernel.org/r/20240307030328.1487748-2-waynec@nvidia.com +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/phy/tegra/xusb.c | 13 +++++++++++++ + include/linux/phy/tegra/xusb.h | 1 + + 2 files changed, 14 insertions(+) + +diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c +index 142ebe0247cc0..983a6e6173bd2 100644 +--- a/drivers/phy/tegra/xusb.c ++++ b/drivers/phy/tegra/xusb.c +@@ -1531,6 +1531,19 @@ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl, + } + EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion); + ++int tegra_xusb_padctl_get_port_number(struct phy *phy) ++{ ++ struct tegra_xusb_lane *lane; ++ ++ if (!phy) ++ return -ENODEV; ++ ++ lane = phy_get_drvdata(phy); ++ ++ return lane->index; ++} ++EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_port_number); ++ + MODULE_AUTHOR("Thierry Reding "); + MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver"); + MODULE_LICENSE("GPL v2"); +diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h +index 70998e6dd6fdc..6ca51e0080ec0 100644 +--- a/include/linux/phy/tegra/xusb.h ++++ b/include/linux/phy/tegra/xusb.h +@@ -26,6 +26,7 @@ void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy); + int tegra_phy_xusb_utmi_port_reset(struct phy *phy); + int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl, + unsigned int port); ++int tegra_xusb_padctl_get_port_number(struct phy *phy); + int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy, + enum usb_device_speed speed); + int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy); +-- +2.43.0 + diff --git a/queue-6.8/pinctrl-qcom-sm8650-lpass-lpi-correct-kconfig-name.patch b/queue-6.8/pinctrl-qcom-sm8650-lpass-lpi-correct-kconfig-name.patch new file mode 100644 index 00000000000..c6f9536e65d --- /dev/null +++ b/queue-6.8/pinctrl-qcom-sm8650-lpass-lpi-correct-kconfig-name.patch @@ -0,0 +1,38 @@ +From 0fdc42c2168282e297452a77d0a773f528738c16 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 16 Feb 2024 11:24:35 +0100 +Subject: pinctrl: qcom: sm8650-lpass-lpi: correct Kconfig name + +From: Krzysztof Kozlowski + +[ Upstream commit 47847b9bcdb13c2da1829323a66651ef63047b77 ] + +Use proper model name in SM8650 LPASS pin controller Kconfig entry. + +Cc: +Fixes: c4e47673853f ("pinctrl: qcom: sm8650-lpass-lpi: add SM8650 LPASS") +Signed-off-by: Krzysztof Kozlowski +Reviewed-by: Neil Armstrong +Link: https://lore.kernel.org/r/20240216102435.89867-1-krzysztof.kozlowski@linaro.org +Signed-off-by: Linus Walleij +Signed-off-by: Sasha Levin +--- + drivers/pinctrl/qcom/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig +index e0f2829c15d6a..24619e80b2cce 100644 +--- a/drivers/pinctrl/qcom/Kconfig ++++ b/drivers/pinctrl/qcom/Kconfig +@@ -125,7 +125,7 @@ config PINCTRL_SM8550_LPASS_LPI + platform. + + config PINCTRL_SM8650_LPASS_LPI +- tristate "Qualcomm Technologies Inc SM8550 LPASS LPI pin controller driver" ++ tristate "Qualcomm Technologies Inc SM8650 LPASS LPI pin controller driver" + depends on ARM64 || COMPILE_TEST + depends on PINCTRL_LPASS_LPI + help +-- +2.43.0 + diff --git a/queue-6.8/platform-x86-intel-tpmi-change-vsec-offset-to-u64.patch b/queue-6.8/platform-x86-intel-tpmi-change-vsec-offset-to-u64.patch new file mode 100644 index 00000000000..c0e9d75459b --- /dev/null +++ b/queue-6.8/platform-x86-intel-tpmi-change-vsec-offset-to-u64.patch @@ -0,0 +1,70 @@ +From 65c7c3c0fa85a28b754819e6b1bbb20651c812d1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 11:46:44 -0800 +Subject: platform/x86/intel/tpmi: Change vsec offset to u64 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Srinivas Pandruvada + +[ Upstream commit 57221a07ff37ff356f9265acd228bc3c8744c8fc ] + +The vsec offset can be 64 bit long depending on the PFS start. So change +type to u64. Also use 64 bit formatting for seq_printf. + +Fixes: 47731fd2865f ("platform/x86/intel: Intel TPMI enumeration driver") +Signed-off-by: Srinivas Pandruvada +Cc: stable@vger.kernel.org # v6.3+ +Link: https://lore.kernel.org/r/20240305194644.2077867-1-srinivas.pandruvada@linux.intel.com +Reviewed-by: Ilpo Järvinen +Signed-off-by: Ilpo Järvinen +Signed-off-by: Sasha Levin +--- + drivers/platform/x86/intel/tpmi.c | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c +index e73cdea67fff8..910df7c654f48 100644 +--- a/drivers/platform/x86/intel/tpmi.c ++++ b/drivers/platform/x86/intel/tpmi.c +@@ -96,7 +96,7 @@ struct intel_tpmi_pfs_entry { + */ + struct intel_tpmi_pm_feature { + struct intel_tpmi_pfs_entry pfs_header; +- unsigned int vsec_offset; ++ u64 vsec_offset; + struct intel_vsec_device *vsec_dev; + }; + +@@ -376,7 +376,7 @@ static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused) + read_blocked = feature_state.read_blocked ? 'Y' : 'N'; + write_blocked = feature_state.write_blocked ? 'Y' : 'N'; + } +- seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%08x\t%c\t%c\t\t%c\t\t%c\n", ++ seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\t\t%c\t\t%c\n", + pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries, + pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset, + pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled, +@@ -395,7 +395,8 @@ static int tpmi_mem_dump_show(struct seq_file *s, void *unused) + struct intel_tpmi_pm_feature *pfs = s->private; + int count, ret = 0; + void __iomem *mem; +- u32 off, size; ++ u32 size; ++ u64 off; + u8 *buffer; + + size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs); +@@ -411,7 +412,7 @@ static int tpmi_mem_dump_show(struct seq_file *s, void *unused) + mutex_lock(&tpmi_dev_lock); + + for (count = 0; count < pfs->pfs_header.num_entries; ++count) { +- seq_printf(s, "TPMI Instance:%d offset:0x%x\n", count, off); ++ seq_printf(s, "TPMI Instance:%d offset:0x%llx\n", count, off); + + mem = ioremap(off, size); + if (!mem) { +-- +2.43.0 + diff --git a/queue-6.8/pm-sleep-wakeirq-fix-wake-irq-warning-in-system-susp.patch b/queue-6.8/pm-sleep-wakeirq-fix-wake-irq-warning-in-system-susp.patch new file mode 100644 index 00000000000..83175bb774d --- /dev/null +++ b/queue-6.8/pm-sleep-wakeirq-fix-wake-irq-warning-in-system-susp.patch @@ -0,0 +1,59 @@ +From 5512190ffbb897082fece2347b409f59954db3b5 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Mar 2024 17:26:57 +0800 +Subject: PM: sleep: wakeirq: fix wake irq warning in system suspend + +From: Qingliang Li + +[ Upstream commit e7a7681c859643f3f2476b2a28a494877fd89442 ] + +When driver uses pm_runtime_force_suspend() as the system suspend callback +function and registers the wake irq with reverse enable ordering, the wake +irq will be re-enabled when entering system suspend, triggering an +'Unbalanced enable for IRQ xxx' warning. In this scenario, the call +sequence during system suspend is as follows: + suspend_devices_and_enter() + -> dpm_suspend_start() + -> dpm_run_callback() + -> pm_runtime_force_suspend() + -> dev_pm_enable_wake_irq_check() + -> dev_pm_enable_wake_irq_complete() + + -> suspend_enter() + -> dpm_suspend_noirq() + -> device_wakeup_arm_wake_irqs() + -> dev_pm_arm_wake_irq() + +To fix this issue, complete the setting of WAKE_IRQ_DEDICATED_ENABLED flag +in dev_pm_enable_wake_irq_complete() to avoid redundant irq enablement. + +Fixes: 8527beb12087 ("PM: sleep: wakeirq: fix wake irq arming") +Reviewed-by: Dhruva Gole +Signed-off-by: Qingliang Li +Reviewed-by: Johan Hovold +Cc: 5.16+ # 5.16+ +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + drivers/base/power/wakeirq.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c +index 42171f766dcba..5a5a9e978e85f 100644 +--- a/drivers/base/power/wakeirq.c ++++ b/drivers/base/power/wakeirq.c +@@ -313,8 +313,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev) + return; + + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED && +- wirq->status & WAKE_IRQ_DEDICATED_REVERSE) ++ wirq->status & WAKE_IRQ_DEDICATED_REVERSE) { + enable_irq(wirq->irq); ++ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED; ++ } + } + + /** +-- +2.43.0 + diff --git a/queue-6.8/pm-suspend-set-mem_sleep_current-during-kernel-comma.patch b/queue-6.8/pm-suspend-set-mem_sleep_current-during-kernel-comma.patch new file mode 100644 index 00000000000..aa25d25878e --- /dev/null +++ b/queue-6.8/pm-suspend-set-mem_sleep_current-during-kernel-comma.patch @@ -0,0 +1,42 @@ +From 64faef0536f1d94a66f1fc7584cc3cfece45d308 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 12:14:59 +0530 +Subject: PM: suspend: Set mem_sleep_current during kernel command line setup + +From: Maulik Shah + +[ Upstream commit 9bc4ffd32ef8943f5c5a42c9637cfd04771d021b ] + +psci_init_system_suspend() invokes suspend_set_ops() very early during +bootup even before kernel command line for mem_sleep_default is setup. +This leads to kernel command line mem_sleep_default=s2idle not working +as mem_sleep_current gets changed to deep via suspend_set_ops() and never +changes back to s2idle. + +Set mem_sleep_current along with mem_sleep_default during kernel command +line setup as default suspend mode. + +Fixes: faf7ec4a92c0 ("drivers: firmware: psci: add system suspend support") +CC: stable@vger.kernel.org # 5.4+ +Signed-off-by: Maulik Shah +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + kernel/power/suspend.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c +index fa3bf161d13f7..a718067deecee 100644 +--- a/kernel/power/suspend.c ++++ b/kernel/power/suspend.c +@@ -192,6 +192,7 @@ static int __init mem_sleep_default_setup(char *str) + if (mem_sleep_labels[state] && + !strcmp(str, mem_sleep_labels[state])) { + mem_sleep_default = state; ++ mem_sleep_current = state; + break; + } + +-- +2.43.0 + diff --git a/queue-6.8/powercap-intel_rapl-fix-a-null-pointer-dereference.patch b/queue-6.8/powercap-intel_rapl-fix-a-null-pointer-dereference.patch new file mode 100644 index 00000000000..afd446ab6cb --- /dev/null +++ b/queue-6.8/powercap-intel_rapl-fix-a-null-pointer-dereference.patch @@ -0,0 +1,49 @@ +From b1823793cf740a2758f68fcb492de41a722737ad Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 31 Jan 2024 19:37:08 +0800 +Subject: powercap: intel_rapl: Fix a NULL pointer dereference + +From: Zhang Rui + +[ Upstream commit 2d1f5006ff95770da502f8cee2a224a1ff83866e ] + +A NULL pointer dereference is triggered when probing the MMIO RAPL +driver on platforms with CPU ID not listed in intel_rapl_common CPU +model list. + +This is because the intel_rapl_common module still probes on such +platforms even if 'defaults_msr' is not set after commit 1488ac990ac8 +("powercap: intel_rapl: Allow probing without CPUID match"). Thus the +MMIO RAPL rp->priv->defaults is NULL when registering to RAPL framework. + +Fix the problem by adding sanity check to ensure rp->priv->rapl_defaults +is always valid. + +Fixes: 1488ac990ac8 ("powercap: intel_rapl: Allow probing without CPUID match") +Signed-off-by: Zhang Rui +Cc: 6.5+ # 6.5+ +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + drivers/powercap/intel_rapl_common.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c +index 2feed036c1cd4..1a739afd47d96 100644 +--- a/drivers/powercap/intel_rapl_common.c ++++ b/drivers/powercap/intel_rapl_common.c +@@ -759,6 +759,11 @@ static int rapl_config(struct rapl_package *rp) + default: + return -EINVAL; + } ++ ++ /* defaults_msr can be NULL on unsupported platforms */ ++ if (!rp->priv->defaults || !rp->priv->rpi) ++ return -ENODEV; ++ + return 0; + } + +-- +2.43.0 + diff --git a/queue-6.8/powercap-intel_rapl-fix-locking-in-tpmi-rapl.patch b/queue-6.8/powercap-intel_rapl-fix-locking-in-tpmi-rapl.patch new file mode 100644 index 00000000000..4189348dca2 --- /dev/null +++ b/queue-6.8/powercap-intel_rapl-fix-locking-in-tpmi-rapl.patch @@ -0,0 +1,196 @@ +From f7017726c966339a91ed8fd9eb484cbba302e65e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 31 Jan 2024 19:37:09 +0800 +Subject: powercap: intel_rapl: Fix locking in TPMI RAPL + +From: Zhang Rui + +[ Upstream commit 1aa09b9379a7a644cd2f75ae0bac82b8783df600 ] + +The RAPL framework uses CPU hotplug locking to protect the rapl_packages +list and rp->lead_cpu to guarantee that + + 1. the RAPL package device is not unprobed and freed + 2. the cached rp->lead_cpu is always valid + +for operations like powercap sysfs accesses. + +Current RAPL APIs assume being called from CPU hotplug callbacks which +hold the CPU hotplug lock, but TPMI RAPL driver invokes the APIs in the +driver's .probe() function without acquiring the CPU hotplug lock. + +Fix the problem by providing both locked and lockless versions of RAPL +APIs. + +Fixes: 9eef7f9da928 ("powercap: intel_rapl: Introduce RAPL TPMI interface driver") +Signed-off-by: Zhang Rui +Cc: 6.5+ # 6.5+ +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + drivers/powercap/intel_rapl_common.c | 29 +++++++++++++++++-- + drivers/powercap/intel_rapl_msr.c | 8 ++--- + .../int340x_thermal/processor_thermal_rapl.c | 8 ++--- + include/linux/intel_rapl.h | 6 ++++ + 4 files changed, 40 insertions(+), 11 deletions(-) + +diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c +index 1a739afd47d96..9d3e102f1a76b 100644 +--- a/drivers/powercap/intel_rapl_common.c ++++ b/drivers/powercap/intel_rapl_common.c +@@ -5,6 +5,7 @@ + */ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + ++#include + #include + #include + #include +@@ -1504,7 +1505,7 @@ static int rapl_detect_domains(struct rapl_package *rp) + } + + /* called from CPU hotplug notifier, hotplug lock held */ +-void rapl_remove_package(struct rapl_package *rp) ++void rapl_remove_package_cpuslocked(struct rapl_package *rp) + { + struct rapl_domain *rd, *rd_package = NULL; + +@@ -1533,10 +1534,18 @@ void rapl_remove_package(struct rapl_package *rp) + list_del(&rp->plist); + kfree(rp); + } ++EXPORT_SYMBOL_GPL(rapl_remove_package_cpuslocked); ++ ++void rapl_remove_package(struct rapl_package *rp) ++{ ++ guard(cpus_read_lock)(); ++ rapl_remove_package_cpuslocked(rp); ++} + EXPORT_SYMBOL_GPL(rapl_remove_package); + + /* caller to ensure CPU hotplug lock is held */ +-struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu) ++struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv, ++ bool id_is_cpu) + { + struct rapl_package *rp; + int uid; +@@ -1554,10 +1563,17 @@ struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, + + return NULL; + } ++EXPORT_SYMBOL_GPL(rapl_find_package_domain_cpuslocked); ++ ++struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu) ++{ ++ guard(cpus_read_lock)(); ++ return rapl_find_package_domain_cpuslocked(id, priv, id_is_cpu); ++} + EXPORT_SYMBOL_GPL(rapl_find_package_domain); + + /* called from CPU hotplug notifier, hotplug lock held */ +-struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu) ++struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv, bool id_is_cpu) + { + struct rapl_package *rp; + int ret; +@@ -1603,6 +1619,13 @@ struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id + kfree(rp); + return ERR_PTR(ret); + } ++EXPORT_SYMBOL_GPL(rapl_add_package_cpuslocked); ++ ++struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu) ++{ ++ guard(cpus_read_lock)(); ++ return rapl_add_package_cpuslocked(id, priv, id_is_cpu); ++} + EXPORT_SYMBOL_GPL(rapl_add_package); + + static void power_limit_state_save(void) +diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c +index 250bd41a588c7..b4b6930cacb0b 100644 +--- a/drivers/powercap/intel_rapl_msr.c ++++ b/drivers/powercap/intel_rapl_msr.c +@@ -73,9 +73,9 @@ static int rapl_cpu_online(unsigned int cpu) + { + struct rapl_package *rp; + +- rp = rapl_find_package_domain(cpu, rapl_msr_priv, true); ++ rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true); + if (!rp) { +- rp = rapl_add_package(cpu, rapl_msr_priv, true); ++ rp = rapl_add_package_cpuslocked(cpu, rapl_msr_priv, true); + if (IS_ERR(rp)) + return PTR_ERR(rp); + } +@@ -88,14 +88,14 @@ static int rapl_cpu_down_prep(unsigned int cpu) + struct rapl_package *rp; + int lead_cpu; + +- rp = rapl_find_package_domain(cpu, rapl_msr_priv, true); ++ rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true); + if (!rp) + return 0; + + cpumask_clear_cpu(cpu, &rp->cpumask); + lead_cpu = cpumask_first(&rp->cpumask); + if (lead_cpu >= nr_cpu_ids) +- rapl_remove_package(rp); ++ rapl_remove_package_cpuslocked(rp); + else if (rp->lead_cpu == cpu) + rp->lead_cpu = lead_cpu; + return 0; +diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c +index 2f00fc3bf274a..e964a9375722a 100644 +--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c ++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c +@@ -27,9 +27,9 @@ static int rapl_mmio_cpu_online(unsigned int cpu) + if (topology_physical_package_id(cpu)) + return 0; + +- rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true); ++ rp = rapl_find_package_domain_cpuslocked(cpu, &rapl_mmio_priv, true); + if (!rp) { +- rp = rapl_add_package(cpu, &rapl_mmio_priv, true); ++ rp = rapl_add_package_cpuslocked(cpu, &rapl_mmio_priv, true); + if (IS_ERR(rp)) + return PTR_ERR(rp); + } +@@ -42,14 +42,14 @@ static int rapl_mmio_cpu_down_prep(unsigned int cpu) + struct rapl_package *rp; + int lead_cpu; + +- rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true); ++ rp = rapl_find_package_domain_cpuslocked(cpu, &rapl_mmio_priv, true); + if (!rp) + return 0; + + cpumask_clear_cpu(cpu, &rp->cpumask); + lead_cpu = cpumask_first(&rp->cpumask); + if (lead_cpu >= nr_cpu_ids) +- rapl_remove_package(rp); ++ rapl_remove_package_cpuslocked(rp); + else if (rp->lead_cpu == cpu) + rp->lead_cpu = lead_cpu; + return 0; +diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h +index 33f21bd85dbf2..f3196f82fd8a1 100644 +--- a/include/linux/intel_rapl.h ++++ b/include/linux/intel_rapl.h +@@ -178,6 +178,12 @@ struct rapl_package { + struct rapl_if_priv *priv; + }; + ++struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv, ++ bool id_is_cpu); ++struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv, ++ bool id_is_cpu); ++void rapl_remove_package_cpuslocked(struct rapl_package *rp); ++ + struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu); + struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu); + void rapl_remove_package(struct rapl_package *rp); +-- +2.43.0 + diff --git a/queue-6.8/powercap-intel_rapl_tpmi-fix-a-register-bug.patch b/queue-6.8/powercap-intel_rapl_tpmi-fix-a-register-bug.patch new file mode 100644 index 00000000000..e800bcea0d8 --- /dev/null +++ b/queue-6.8/powercap-intel_rapl_tpmi-fix-a-register-bug.patch @@ -0,0 +1,38 @@ +From e11b15a144b058b7642e137f6e3444c41e43b269 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 31 Jan 2024 19:37:10 +0800 +Subject: powercap: intel_rapl_tpmi: Fix a register bug + +From: Zhang Rui + +[ Upstream commit faa9130ce716b286d786d59032bacfd9052c2094 ] + +Add the missing Domain Info register. This also fixes the bogus +definition of the Interrupt register. + +Neither of these two registers was used previously. + +Fixes: 9eef7f9da928 ("powercap: intel_rapl: Introduce RAPL TPMI interface driver") +Signed-off-by: Zhang Rui +Cc: 6.5+ # 6.5+ +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + drivers/powercap/intel_rapl_tpmi.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c +index 891c90fefd8b7..f1c734ac3c349 100644 +--- a/drivers/powercap/intel_rapl_tpmi.c ++++ b/drivers/powercap/intel_rapl_tpmi.c +@@ -40,6 +40,7 @@ enum tpmi_rapl_register { + TPMI_RAPL_REG_ENERGY_STATUS, + TPMI_RAPL_REG_PERF_STATUS, + TPMI_RAPL_REG_POWER_INFO, ++ TPMI_RAPL_REG_DOMAIN_INFO, + TPMI_RAPL_REG_INTERRUPT, + TPMI_RAPL_REG_MAX = 15, + }; +-- +2.43.0 + diff --git a/queue-6.8/powercap-intel_rapl_tpmi-fix-system-domain-probing.patch b/queue-6.8/powercap-intel_rapl_tpmi-fix-system-domain-probing.patch new file mode 100644 index 00000000000..60b8970efb7 --- /dev/null +++ b/queue-6.8/powercap-intel_rapl_tpmi-fix-system-domain-probing.patch @@ -0,0 +1,67 @@ +From 8a82e5ffee39a6d6c0917f8278377a624d2f5000 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 31 Jan 2024 19:37:11 +0800 +Subject: powercap: intel_rapl_tpmi: Fix System Domain probing + +From: Zhang Rui + +[ Upstream commit 903eb9fb85e32810f376a2858aad77c9298f9488 ] + +Only domain root packages can enumerate System (Psys) domain. +Whether a package is domain root or not is described in the Bit 0 of the +Domain Info register. + +Add support for Domain Info register and fix the System domain probing +accordingly. + +Fixes: 9eef7f9da928 ("powercap: intel_rapl: Introduce RAPL TPMI interface driver") +Signed-off-by: Zhang Rui +Cc: 6.5+ # 6.5+ +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + drivers/powercap/intel_rapl_tpmi.c | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c +index f1c734ac3c349..f6b7f085977ce 100644 +--- a/drivers/powercap/intel_rapl_tpmi.c ++++ b/drivers/powercap/intel_rapl_tpmi.c +@@ -131,6 +131,12 @@ static void trp_release(struct tpmi_rapl_package *trp) + mutex_unlock(&tpmi_rapl_lock); + } + ++/* ++ * Bit 0 of TPMI_RAPL_REG_DOMAIN_INFO indicates if the current package is a domain ++ * root or not. Only domain root packages can enumerate System (Psys) Domain. ++ */ ++#define TPMI_RAPL_DOMAIN_ROOT BIT(0) ++ + static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) + { + u8 tpmi_domain_version; +@@ -140,6 +146,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) + enum rapl_domain_reg_id reg_id; + int tpmi_domain_size, tpmi_domain_flags; + u64 tpmi_domain_header = readq(trp->base + offset); ++ u64 tpmi_domain_info; + + /* Domain Parent bits are ignored for now */ + tpmi_domain_version = tpmi_domain_header & 0xff; +@@ -170,6 +177,13 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) + domain_type = RAPL_DOMAIN_PACKAGE; + break; + case TPMI_RAPL_DOMAIN_SYSTEM: ++ if (!(tpmi_domain_flags & BIT(TPMI_RAPL_REG_DOMAIN_INFO))) { ++ pr_warn(FW_BUG "System domain must support Domain Info register\n"); ++ return -ENODEV; ++ } ++ tpmi_domain_info = readq(trp->base + offset + TPMI_RAPL_REG_DOMAIN_INFO); ++ if (!(tpmi_domain_info & TPMI_RAPL_DOMAIN_ROOT)) ++ return 0; + domain_type = RAPL_DOMAIN_PLATFORM; + break; + case TPMI_RAPL_DOMAIN_MEMORY: +-- +2.43.0 + diff --git a/queue-6.8/powerpc-fsl-fix-mfpmr-build-errors-with-newer-binuti.patch b/queue-6.8/powerpc-fsl-fix-mfpmr-build-errors-with-newer-binuti.patch new file mode 100644 index 00000000000..dd3ecdd5ce0 --- /dev/null +++ b/queue-6.8/powerpc-fsl-fix-mfpmr-build-errors-with-newer-binuti.patch @@ -0,0 +1,61 @@ +From 8883e7ce9f785bb4f375c4a59003bcc43b304926 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 23:25:19 +1100 +Subject: powerpc/fsl: Fix mfpmr build errors with newer binutils + +From: Michael Ellerman + +[ Upstream commit 5f491356b7149564ab22323ccce79c8d595bfd0c ] + +Binutils 2.38 complains about the use of mfpmr when building +ppc6xx_defconfig: + + CC arch/powerpc/kernel/pmc.o + {standard input}: Assembler messages: + {standard input}:45: Error: unrecognized opcode: `mfpmr' + {standard input}:56: Error: unrecognized opcode: `mtpmr' + +This is because by default the kernel is built with -mcpu=powerpc, and +the mt/mfpmr instructions are not defined. + +It can be avoided by enabling CONFIG_E300C3_CPU, but just adding that to +the defconfig will leave open the possibility of randconfig failures. + +So add machine directives around the mt/mfpmr instructions to tell +binutils how to assemble them. + +Cc: stable@vger.kernel.org +Reported-by: Jan-Benedict Glaw +Signed-off-by: Michael Ellerman +Link: https://msgid.link/20240229122521.762431-3-mpe@ellerman.id.au +Signed-off-by: Sasha Levin +--- + arch/powerpc/include/asm/reg_fsl_emb.h | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h +index a21f529c43d96..8359c06d92d9f 100644 +--- a/arch/powerpc/include/asm/reg_fsl_emb.h ++++ b/arch/powerpc/include/asm/reg_fsl_emb.h +@@ -12,9 +12,16 @@ + #ifndef __ASSEMBLY__ + /* Performance Monitor Registers */ + #define mfpmr(rn) ({unsigned int rval; \ +- asm volatile("mfpmr %0," __stringify(rn) \ ++ asm volatile(".machine push; " \ ++ ".machine e300; " \ ++ "mfpmr %0," __stringify(rn) ";" \ ++ ".machine pop; " \ + : "=r" (rval)); rval;}) +-#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v)) ++#define mtpmr(rn, v) asm volatile(".machine push; " \ ++ ".machine e300; " \ ++ "mtpmr " __stringify(rn) ",%0; " \ ++ ".machine pop; " \ ++ : : "r" (v)) + #endif /* __ASSEMBLY__ */ + + /* Freescale Book E Performance Monitor APU Registers */ +-- +2.43.0 + diff --git a/queue-6.8/powerpc-smp-adjust-nr_cpu_ids-to-cover-all-threads-o.patch b/queue-6.8/powerpc-smp-adjust-nr_cpu_ids-to-cover-all-threads-o.patch new file mode 100644 index 00000000000..d1fcf3b2a88 --- /dev/null +++ b/queue-6.8/powerpc-smp-adjust-nr_cpu_ids-to-cover-all-threads-o.patch @@ -0,0 +1,41 @@ +From 8213fc299b681cad1f69ea7152ca6ed73d1b1832 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 15 Feb 2024 00:14:04 +1100 +Subject: powerpc/smp: Adjust nr_cpu_ids to cover all threads of a core + +From: Michael Ellerman + +[ Upstream commit 5580e96dad5a439d561d9648ffcbccb739c2a120 ] + +If nr_cpu_ids is too low to include at least all the threads of a single +core adjust nr_cpu_ids upwards. This avoids triggering odd bugs in code +that assumes all threads of a core are available. + +Cc: stable@vger.kernel.org +Signed-off-by: Michael Ellerman +Link: https://msgid.link/20231229120107.2281153-1-mpe@ellerman.id.au +Signed-off-by: Sasha Levin +--- + arch/powerpc/kernel/prom.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c +index 0b5878c3125b1..58e80076bed5c 100644 +--- a/arch/powerpc/kernel/prom.c ++++ b/arch/powerpc/kernel/prom.c +@@ -375,6 +375,12 @@ static int __init early_init_dt_scan_cpus(unsigned long node, + if (IS_ENABLED(CONFIG_PPC64)) + boot_cpu_hwid = be32_to_cpu(intserv[found_thread]); + ++ if (nr_cpu_ids % nthreads != 0) { ++ set_nr_cpu_ids(ALIGN(nr_cpu_ids, nthreads)); ++ pr_warn("nr_cpu_ids was not a multiple of threads_per_core, adjusted to %d\n", ++ nr_cpu_ids); ++ } ++ + /* + * PAPR defines "logical" PVR values for cpus that + * meet various levels of the architecture: +-- +2.43.0 + diff --git a/queue-6.8/powerpc-smp-increase-nr_cpu_ids-to-include-the-boot-.patch b/queue-6.8/powerpc-smp-increase-nr_cpu_ids-to-include-the-boot-.patch new file mode 100644 index 00000000000..4b8c129dc8e --- /dev/null +++ b/queue-6.8/powerpc-smp-increase-nr_cpu_ids-to-include-the-boot-.patch @@ -0,0 +1,41 @@ +From 4084a96f508e1370ffe4a469cfa0123f6e785059 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 15 Feb 2024 00:14:04 +1100 +Subject: powerpc/smp: Increase nr_cpu_ids to include the boot CPU + +From: Michael Ellerman + +[ Upstream commit 777f81f0a9c780a6443bcf2c7785f0cc2e87c1ef ] + +If nr_cpu_ids is too low to include the boot CPU adjust nr_cpu_ids +upward. Otherwise the kernel will BUG when trying to allocate a paca +for the boot CPU and fail to boot. + +Cc: stable@vger.kernel.org +Signed-off-by: Michael Ellerman +Link: https://msgid.link/20231229120107.2281153-2-mpe@ellerman.id.au +Signed-off-by: Sasha Levin +--- + arch/powerpc/kernel/prom.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c +index 58e80076bed5c..77364729a1b61 100644 +--- a/arch/powerpc/kernel/prom.c ++++ b/arch/powerpc/kernel/prom.c +@@ -381,6 +381,12 @@ static int __init early_init_dt_scan_cpus(unsigned long node, + nr_cpu_ids); + } + ++ if (boot_cpuid >= nr_cpu_ids) { ++ set_nr_cpu_ids(min(CONFIG_NR_CPUS, ALIGN(boot_cpuid + 1, nthreads))); ++ pr_warn("Boot CPU %d >= nr_cpu_ids, adjusted nr_cpu_ids to %d\n", ++ boot_cpuid, nr_cpu_ids); ++ } ++ + /* + * PAPR defines "logical" PVR values for cpus that + * meet various levels of the architecture: +-- +2.43.0 + diff --git a/queue-6.8/powerpc-xor_vmx-add-mhard-float-to-cflags.patch b/queue-6.8/powerpc-xor_vmx-add-mhard-float-to-cflags.patch new file mode 100644 index 00000000000..77451804cfe --- /dev/null +++ b/queue-6.8/powerpc-xor_vmx-add-mhard-float-to-cflags.patch @@ -0,0 +1,48 @@ +From 918a0e73aa6508ebafc5856961b5c57d82c5113e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 27 Jan 2024 11:07:43 -0700 +Subject: powerpc: xor_vmx: Add '-mhard-float' to CFLAGS + +From: Nathan Chancellor + +[ Upstream commit 35f20786c481d5ced9283ff42de5c69b65e5ed13 ] + +arch/powerpc/lib/xor_vmx.o is built with '-msoft-float' (from the main +powerpc Makefile) and '-maltivec' (from its CFLAGS), which causes an +error when building with clang after a recent change in main: + + error: option '-msoft-float' cannot be specified with '-maltivec' + make[6]: *** [scripts/Makefile.build:243: arch/powerpc/lib/xor_vmx.o] Error 1 + +Explicitly add '-mhard-float' before '-maltivec' in xor_vmx.o's CFLAGS +to override the previous inclusion of '-msoft-float' (as the last option +wins), which matches how other areas of the kernel use '-maltivec', such +as AMDGPU. + +Cc: stable@vger.kernel.org +Closes: https://github.com/ClangBuiltLinux/linux/issues/1986 +Link: https://github.com/llvm/llvm-project/commit/4792f912b232141ecba4cbae538873be3c28556c +Signed-off-by: Nathan Chancellor +Signed-off-by: Michael Ellerman +Link: https://msgid.link/20240127-ppc-xor_vmx-drop-msoft-float-v1-1-f24140e81376@kernel.org +Signed-off-by: Sasha Levin +--- + arch/powerpc/lib/Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile +index 6eac63e79a899..0ab65eeb93ee3 100644 +--- a/arch/powerpc/lib/Makefile ++++ b/arch/powerpc/lib/Makefile +@@ -76,7 +76,7 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o + obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o + + obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o +-CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec) ++CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec) + # Enable + CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include) + +-- +2.43.0 + diff --git a/queue-6.8/remoteproc-virtio-fix-wdg-cannot-recovery-remote-pro.patch b/queue-6.8/remoteproc-virtio-fix-wdg-cannot-recovery-remote-pro.patch new file mode 100644 index 00000000000..5995e5f2879 --- /dev/null +++ b/queue-6.8/remoteproc-virtio-fix-wdg-cannot-recovery-remote-pro.patch @@ -0,0 +1,63 @@ +From 3d43b95eabf73e1b65e5a4b937f787d760266b8b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 17 Dec 2023 13:36:59 +0800 +Subject: remoteproc: virtio: Fix wdg cannot recovery remote processor + +From: Joakim Zhang + +[ Upstream commit b327c72753d6a78de37aed6c35756f2ef62897ee ] + +Recovery remote processor failed when wdg irq received: +[ 0.842574] remoteproc remoteproc0: crash detected in cix-dsp-rproc: type watchdog +[ 0.842750] remoteproc remoteproc0: handling crash #1 in cix-dsp-rproc +[ 0.842824] remoteproc remoteproc0: recovering cix-dsp-rproc +[ 0.843342] remoteproc remoteproc0: stopped remote processor cix-dsp-rproc +[ 0.847901] rproc-virtio rproc-virtio.0.auto: Failed to associate buffer +[ 0.847979] remoteproc remoteproc0: failed to probe subdevices for cix-dsp-rproc: -16 + +The reason is that dma coherent mem would not be released when +recovering the remote processor, due to rproc_virtio_remove() +would not be called, where the mem released. It will fail when +it try to allocate and associate buffer again. + +Releasing reserved memory from rproc_virtio_dev_release(), instead of +rproc_virtio_remove(). + +Fixes: 1d7b61c06dc3 ("remoteproc: virtio: Create platform device for the remoteproc_virtio") +Signed-off-by: Joakim Zhang +Acked-by: Arnaud Pouliquen +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20231217053659.3245745-1-joakim.zhang@cixtech.com +Signed-off-by: Mathieu Poirier +Signed-off-by: Sasha Levin +--- + drivers/remoteproc/remoteproc_virtio.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c +index 83d76915a6ad6..25b66b113b695 100644 +--- a/drivers/remoteproc/remoteproc_virtio.c ++++ b/drivers/remoteproc/remoteproc_virtio.c +@@ -351,6 +351,9 @@ static void rproc_virtio_dev_release(struct device *dev) + + kfree(vdev); + ++ of_reserved_mem_device_release(&rvdev->pdev->dev); ++ dma_release_coherent_memory(&rvdev->pdev->dev); ++ + put_device(&rvdev->pdev->dev); + } + +@@ -584,9 +587,6 @@ static void rproc_virtio_remove(struct platform_device *pdev) + rproc_remove_subdev(rproc, &rvdev->subdev); + rproc_remove_rvdev(rvdev); + +- of_reserved_mem_device_release(&pdev->dev); +- dma_release_coherent_memory(&pdev->dev); +- + put_device(&rproc->dev); + } + +-- +2.43.0 + diff --git a/queue-6.8/revert-block-mq-deadline-use-correct-way-to-throttli.patch b/queue-6.8/revert-block-mq-deadline-use-correct-way-to-throttli.patch new file mode 100644 index 00000000000..a9821dbdf70 --- /dev/null +++ b/queue-6.8/revert-block-mq-deadline-use-correct-way-to-throttli.patch @@ -0,0 +1,48 @@ +From 75d7f318700e43e4c2d8b57522d545b2ffb533c9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Mar 2024 14:42:18 -0700 +Subject: Revert "block/mq-deadline: use correct way to throttling write + requests" + +From: Bart Van Assche + +[ Upstream commit 256aab46e31683d76d45ccbedc287b4d3f3e322b ] + +The code "max(1U, 3 * (1U << shift) / 4)" comes from the Kyber I/O +scheduler. The Kyber I/O scheduler maintains one internal queue per hwq +and hence derives its async_depth from the number of hwq tags. Using +this approach for the mq-deadline scheduler is wrong since the +mq-deadline scheduler maintains one internal queue for all hwqs +combined. Hence this revert. + +Cc: stable@vger.kernel.org +Cc: Damien Le Moal +Cc: Harshit Mogalapalli +Cc: Zhiguo Niu +Fixes: d47f9717e5cf ("block/mq-deadline: use correct way to throttling write requests") +Signed-off-by: Bart Van Assche +Link: https://lore.kernel.org/r/20240313214218.1736147-1-bvanassche@acm.org +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + block/mq-deadline.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/block/mq-deadline.c b/block/mq-deadline.c +index f958e79277b8b..02a916ba62ee7 100644 +--- a/block/mq-deadline.c ++++ b/block/mq-deadline.c +@@ -646,9 +646,8 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) + struct request_queue *q = hctx->queue; + struct deadline_data *dd = q->elevator->elevator_data; + struct blk_mq_tags *tags = hctx->sched_tags; +- unsigned int shift = tags->bitmap_tags.sb.shift; + +- dd->async_depth = max(1U, 3 * (1U << shift) / 4); ++ dd->async_depth = max(1UL, 3 * q->nr_requests / 4); + + sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth); + } +-- +2.43.0 + diff --git a/queue-6.8/revert-revert-md-raid5-wait-for-md_sb_change_pending.patch b/queue-6.8/revert-revert-md-raid5-wait-for-md_sb_change_pending.patch new file mode 100644 index 00000000000..282e5d447e3 --- /dev/null +++ b/queue-6.8/revert-revert-md-raid5-wait-for-md_sb_change_pending.patch @@ -0,0 +1,72 @@ +From 146fb9ae9a07461e07feb3a77cb7b163a75c43c1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 25 Jan 2024 00:21:31 -0800 +Subject: Revert "Revert "md/raid5: Wait for MD_SB_CHANGE_PENDING in raid5d"" + +From: Song Liu + +[ Upstream commit 3445139e3a594be77eff48bc17eff67cf983daed ] + +This reverts commit bed9e27baf52a09b7ba2a3714f1e24e17ced386d. + +The original set [1][2] was expected to undo a suboptimal fix in [2], and +replace it with a better fix [1]. However, as reported by Dan Moulding [2] +causes an issue with raid5 with journal device. + +Revert [2] for now to close the issue. We will follow up on another issue +reported by Juxiao Bi, as [2] is expected to fix it. We believe this is a +good trade-off, because the latter issue happens less freqently. + +In the meanwhile, we will NOT revert [1], as it contains the right logic. + +[1] commit d6e035aad6c0 ("md: bypass block throttle for superblock update") +[2] commit bed9e27baf52 ("Revert "md/raid5: Wait for MD_SB_CHANGE_PENDING in raid5d"") + +Reported-by: Dan Moulding +Closes: https://lore.kernel.org/linux-raid/20240123005700.9302-1-dan@danm.net/ +Fixes: bed9e27baf52 ("Revert "md/raid5: Wait for MD_SB_CHANGE_PENDING in raid5d"") +Cc: stable@vger.kernel.org # v5.19+ +Cc: Junxiao Bi +Cc: Yu Kuai +Signed-off-by: Song Liu +Reviewed-by: Yu Kuai +Link: https://lore.kernel.org/r/20240125082131.788600-1-song@kernel.org +Signed-off-by: Sasha Levin +--- + drivers/md/raid5.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 6cddea04f942a..4357673bee269 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -36,6 +36,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -6773,7 +6774,18 @@ static void raid5d(struct md_thread *thread) + spin_unlock_irq(&conf->device_lock); + md_check_recovery(mddev); + spin_lock_irq(&conf->device_lock); ++ ++ /* ++ * Waiting on MD_SB_CHANGE_PENDING below may deadlock ++ * seeing md_check_recovery() is needed to clear ++ * the flag when using mdmon. ++ */ ++ continue; + } ++ ++ wait_event_lock_irq(mddev->sb_wait, ++ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), ++ conf->device_lock); + } + pr_debug("%d stripes handled\n", handled); + +-- +2.43.0 + diff --git a/queue-6.8/ring-buffer-do-not-set-shortest_full-when-full-targe.patch b/queue-6.8/ring-buffer-do-not-set-shortest_full-when-full-targe.patch new file mode 100644 index 00000000000..acde90f6294 --- /dev/null +++ b/queue-6.8/ring-buffer-do-not-set-shortest_full-when-full-targe.patch @@ -0,0 +1,54 @@ +From 2fbd6b14cc7e1a3e180158c5f10d9776a2105d17 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 12 Mar 2024 11:56:41 -0400 +Subject: ring-buffer: Do not set shortest_full when full target is hit + +From: Steven Rostedt (Google) + +[ Upstream commit 761d9473e27f0c8782895013a3e7b52a37c8bcfc ] + +The rb_watermark_hit() checks if the amount of data in the ring buffer is +above the percentage level passed in by the "full" variable. If it is, it +returns true. + +But it also sets the "shortest_full" field of the cpu_buffer that informs +writers that it needs to call the irq_work if the amount of data on the +ring buffer is above the requested amount. + +The rb_watermark_hit() always sets the shortest_full even if the amount in +the ring buffer is what it wants. As it is not going to wait, because it +has what it wants, there's no reason to set shortest_full. + +Link: https://lore.kernel.org/linux-trace-kernel/20240312115641.6aa8ba08@gandalf.local.home + +Cc: stable@vger.kernel.org +Cc: Mathieu Desnoyers +Fixes: 42fb0a1e84ff5 ("tracing/ring-buffer: Have polling block on watermark") +Reviewed-by: Masami Hiramatsu (Google) +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Sasha Levin +--- + kernel/trace/ring_buffer.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index aa332ace108b1..6ffbccb9bcf00 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -834,9 +834,10 @@ static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full) + pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; + ret = !pagebusy && full_hit(buffer, cpu, full); + +- if (!cpu_buffer->shortest_full || +- cpu_buffer->shortest_full > full) +- cpu_buffer->shortest_full = full; ++ if (!ret && (!cpu_buffer->shortest_full || ++ cpu_buffer->shortest_full > full)) { ++ cpu_buffer->shortest_full = full; ++ } + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); + } + return ret; +-- +2.43.0 + diff --git a/queue-6.8/ring-buffer-fix-full_waiters_pending-in-poll.patch b/queue-6.8/ring-buffer-fix-full_waiters_pending-in-poll.patch new file mode 100644 index 00000000000..afd31521acf --- /dev/null +++ b/queue-6.8/ring-buffer-fix-full_waiters_pending-in-poll.patch @@ -0,0 +1,138 @@ +From 9739d7bd5c6249916a9a10c94d6be67988967a35 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 12 Mar 2024 09:19:20 -0400 +Subject: ring-buffer: Fix full_waiters_pending in poll + +From: Steven Rostedt (Google) + +[ Upstream commit 8145f1c35fa648da662078efab299c4467b85ad5 ] + +If a reader of the ring buffer is doing a poll, and waiting for the ring +buffer to hit a specific watermark, there could be a case where it gets +into an infinite ping-pong loop. + +The poll code has: + + rbwork->full_waiters_pending = true; + if (!cpu_buffer->shortest_full || + cpu_buffer->shortest_full > full) + cpu_buffer->shortest_full = full; + +The writer will see full_waiters_pending and check if the ring buffer is +filled over the percentage of the shortest_full value. If it is, it calls +an irq_work to wake up all the waiters. + +But the code could get into a circular loop: + + CPU 0 CPU 1 + ----- ----- + [ Poll ] + [ shortest_full = 0 ] + rbwork->full_waiters_pending = true; + if (rbwork->full_waiters_pending && + [ buffer percent ] > shortest_full) { + rbwork->wakeup_full = true; + [ queue_irqwork ] + + cpu_buffer->shortest_full = full; + + [ IRQ work ] + if (rbwork->wakeup_full) { + cpu_buffer->shortest_full = 0; + wakeup poll waiters; + [woken] + if ([ buffer percent ] > full) + break; + rbwork->full_waiters_pending = true; + if (rbwork->full_waiters_pending && + [ buffer percent ] > shortest_full) { + rbwork->wakeup_full = true; + [ queue_irqwork ] + + cpu_buffer->shortest_full = full; + + [ IRQ work ] + if (rbwork->wakeup_full) { + cpu_buffer->shortest_full = 0; + wakeup poll waiters; + [woken] + + [ Wash, rinse, repeat! ] + +In the poll, the shortest_full needs to be set before the +full_pending_waiters, as once that is set, the writer will compare the +current shortest_full (which is incorrect) to decide to call the irq_work, +which will reset the shortest_full (expecting the readers to update it). + +Also move the setting of full_waiters_pending after the check if the ring +buffer has the required percentage filled. There's no reason to tell the +writer to wake up waiters if there are no waiters. + +Link: https://lore.kernel.org/linux-trace-kernel/20240312131952.630922155@goodmis.org + +Cc: stable@vger.kernel.org +Cc: Mark Rutland +Cc: Mathieu Desnoyers +Cc: Andrew Morton +Fixes: 42fb0a1e84ff5 ("tracing/ring-buffer: Have polling block on watermark") +Reviewed-by: Masami Hiramatsu (Google) +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Sasha Levin +--- + kernel/trace/ring_buffer.c | 27 ++++++++++++++++++++------- + 1 file changed, 20 insertions(+), 7 deletions(-) + +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index 6ffbccb9bcf00..99fdda29ce4e9 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -965,16 +965,32 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, + poll_wait(filp, &rbwork->full_waiters, poll_table); + + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); +- rbwork->full_waiters_pending = true; + if (!cpu_buffer->shortest_full || + cpu_buffer->shortest_full > full) + cpu_buffer->shortest_full = full; + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); +- } else { +- poll_wait(filp, &rbwork->waiters, poll_table); +- rbwork->waiters_pending = true; ++ if (full_hit(buffer, cpu, full)) ++ return EPOLLIN | EPOLLRDNORM; ++ /* ++ * Only allow full_waiters_pending update to be seen after ++ * the shortest_full is set. If the writer sees the ++ * full_waiters_pending flag set, it will compare the ++ * amount in the ring buffer to shortest_full. If the amount ++ * in the ring buffer is greater than the shortest_full ++ * percent, it will call the irq_work handler to wake up ++ * this list. The irq_handler will reset shortest_full ++ * back to zero. That's done under the reader_lock, but ++ * the below smp_mb() makes sure that the update to ++ * full_waiters_pending doesn't leak up into the above. ++ */ ++ smp_mb(); ++ rbwork->full_waiters_pending = true; ++ return 0; + } + ++ poll_wait(filp, &rbwork->waiters, poll_table); ++ rbwork->waiters_pending = true; ++ + /* + * There's a tight race between setting the waiters_pending and + * checking if the ring buffer is empty. Once the waiters_pending bit +@@ -990,9 +1006,6 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, + */ + smp_mb(); + +- if (full) +- return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; +- + if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || + (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) + return EPOLLIN | EPOLLRDNORM; +-- +2.43.0 + diff --git a/queue-6.8/ring-buffer-use-wait_event_interruptible-in-ring_buf.patch b/queue-6.8/ring-buffer-use-wait_event_interruptible-in-ring_buf.patch new file mode 100644 index 00000000000..20bb08e151f --- /dev/null +++ b/queue-6.8/ring-buffer-use-wait_event_interruptible-in-ring_buf.patch @@ -0,0 +1,203 @@ +From 7c948143d5a288defed1963933ca86d316ef2e03 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 12 Mar 2024 08:15:07 -0400 +Subject: ring-buffer: Use wait_event_interruptible() in ring_buffer_wait() + +From: Steven Rostedt (Google) + +[ Upstream commit 7af9ded0c2caac0a95f33df5cb04706b0f502588 ] + +Convert ring_buffer_wait() over to wait_event_interruptible(). The default +condition is to execute the wait loop inside __wait_event() just once. + +This does not change the ring_buffer_wait() prototype yet, but +restructures the code so that it can take a "cond" and "data" parameter +and will call wait_event_interruptible() with a helper function as the +condition. + +The helper function (rb_wait_cond) takes the cond function and data +parameters. It will first check if the buffer hit the watermark defined by +the "full" parameter and then call the passed in condition parameter. If +either are true, it returns true. + +If rb_wait_cond() does not return true, it will set the appropriate +"waiters_pending" flag and returns false. + +Link: https://lore.kernel.org/linux-trace-kernel/CAHk-=wgsNgewHFxZAJiAQznwPMqEtQmi1waeS2O1v6L4c_Um5A@mail.gmail.com/ +Link: https://lore.kernel.org/linux-trace-kernel/20240312121703.399598519@goodmis.org + +Cc: stable@vger.kernel.org +Cc: Masami Hiramatsu +Cc: Mark Rutland +Cc: Mathieu Desnoyers +Cc: Andrew Morton +Cc: Linus Torvalds +Cc: linke li +Cc: Rabin Vincent +Fixes: f3ddb74ad0790 ("tracing: Wake up ring buffer waiters on closing of the file") +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Sasha Levin +--- + include/linux/ring_buffer.h | 1 + + kernel/trace/ring_buffer.c | 116 +++++++++++++++++++++--------------- + 2 files changed, 69 insertions(+), 48 deletions(-) + +diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h +index fa802db216f94..338a33db1577e 100644 +--- a/include/linux/ring_buffer.h ++++ b/include/linux/ring_buffer.h +@@ -98,6 +98,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k + __ring_buffer_alloc((size), (flags), &__key); \ + }) + ++typedef bool (*ring_buffer_cond_fn)(void *data); + int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full); + __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, + struct file *filp, poll_table *poll_table, int full); +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index 99fdda29ce4e9..a75b644bdd351 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -843,43 +843,15 @@ static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full) + return ret; + } + +-/** +- * ring_buffer_wait - wait for input to the ring buffer +- * @buffer: buffer to wait on +- * @cpu: the cpu buffer to wait on +- * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS +- * +- * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon +- * as data is added to any of the @buffer's cpu buffers. Otherwise +- * it will wait for data to be added to a specific cpu buffer. +- */ +-int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) ++static inline bool ++rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer, ++ int cpu, int full, ring_buffer_cond_fn cond, void *data) + { +- struct ring_buffer_per_cpu *cpu_buffer; +- DEFINE_WAIT(wait); +- struct rb_irq_work *work; +- int ret = 0; +- +- /* +- * Depending on what the caller is waiting for, either any +- * data in any cpu buffer, or a specific buffer, put the +- * caller on the appropriate wait queue. +- */ +- if (cpu == RING_BUFFER_ALL_CPUS) { +- work = &buffer->irq_work; +- /* Full only makes sense on per cpu reads */ +- full = 0; +- } else { +- if (!cpumask_test_cpu(cpu, buffer->cpumask)) +- return -ENODEV; +- cpu_buffer = buffer->buffers[cpu]; +- work = &cpu_buffer->irq_work; +- } ++ if (rb_watermark_hit(buffer, cpu, full)) ++ return true; + +- if (full) +- prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); +- else +- prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); ++ if (cond(data)) ++ return true; + + /* + * The events can happen in critical sections where +@@ -902,27 +874,75 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) + * a task has been queued. It's OK for spurious wake ups. + */ + if (full) +- work->full_waiters_pending = true; ++ rbwork->full_waiters_pending = true; + else +- work->waiters_pending = true; ++ rbwork->waiters_pending = true; + +- if (rb_watermark_hit(buffer, cpu, full)) +- goto out; ++ return false; ++} + +- if (signal_pending(current)) { +- ret = -EINTR; +- goto out; ++/* ++ * The default wait condition for ring_buffer_wait() is to just to exit the ++ * wait loop the first time it is woken up. ++ */ ++static bool rb_wait_once(void *data) ++{ ++ long *once = data; ++ ++ /* wait_event() actually calls this twice before scheduling*/ ++ if (*once > 1) ++ return true; ++ ++ (*once)++; ++ return false; ++} ++ ++/** ++ * ring_buffer_wait - wait for input to the ring buffer ++ * @buffer: buffer to wait on ++ * @cpu: the cpu buffer to wait on ++ * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS ++ * ++ * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon ++ * as data is added to any of the @buffer's cpu buffers. Otherwise ++ * it will wait for data to be added to a specific cpu buffer. ++ */ ++int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) ++{ ++ struct ring_buffer_per_cpu *cpu_buffer; ++ struct wait_queue_head *waitq; ++ ring_buffer_cond_fn cond; ++ struct rb_irq_work *rbwork; ++ void *data; ++ long once = 0; ++ int ret = 0; ++ ++ cond = rb_wait_once; ++ data = &once; ++ ++ /* ++ * Depending on what the caller is waiting for, either any ++ * data in any cpu buffer, or a specific buffer, put the ++ * caller on the appropriate wait queue. ++ */ ++ if (cpu == RING_BUFFER_ALL_CPUS) { ++ rbwork = &buffer->irq_work; ++ /* Full only makes sense on per cpu reads */ ++ full = 0; ++ } else { ++ if (!cpumask_test_cpu(cpu, buffer->cpumask)) ++ return -ENODEV; ++ cpu_buffer = buffer->buffers[cpu]; ++ rbwork = &cpu_buffer->irq_work; + } + +- schedule(); +- out: + if (full) +- finish_wait(&work->full_waiters, &wait); ++ waitq = &rbwork->full_waiters; + else +- finish_wait(&work->waiters, &wait); ++ waitq = &rbwork->waiters; + +- if (!ret && !rb_watermark_hit(buffer, cpu, full) && signal_pending(current)) +- ret = -EINTR; ++ ret = wait_event_interruptible((*waitq), ++ rb_wait_cond(rbwork, buffer, cpu, full, cond, data)); + + return ret; + } +-- +2.43.0 + diff --git a/queue-6.8/s390-zcrypt-fix-reference-counting-on-zcrypt-card-ob.patch b/queue-6.8/s390-zcrypt-fix-reference-counting-on-zcrypt-card-ob.patch new file mode 100644 index 00000000000..cf90e6b5863 --- /dev/null +++ b/queue-6.8/s390-zcrypt-fix-reference-counting-on-zcrypt-card-ob.patch @@ -0,0 +1,124 @@ +From e9c5e0368397e22652ecccdd15655b5b00eec1a0 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 15:20:09 +0100 +Subject: s390/zcrypt: fix reference counting on zcrypt card objects + +From: Harald Freudenberger + +[ Upstream commit 50ed48c80fecbe17218afed4f8bed005c802976c ] + +Tests with hot-plugging crytpo cards on KVM guests with debug +kernel build revealed an use after free for the load field of +the struct zcrypt_card. The reason was an incorrect reference +handling of the zcrypt card object which could lead to a free +of the zcrypt card object while it was still in use. + +This is an example of the slab message: + + kernel: 0x00000000885a7512-0x00000000885a7513 @offset=1298. First byte 0x68 instead of 0x6b + kernel: Allocated in zcrypt_card_alloc+0x36/0x70 [zcrypt] age=18046 cpu=3 pid=43 + kernel: kmalloc_trace+0x3f2/0x470 + kernel: zcrypt_card_alloc+0x36/0x70 [zcrypt] + kernel: zcrypt_cex4_card_probe+0x26/0x380 [zcrypt_cex4] + kernel: ap_device_probe+0x15c/0x290 + kernel: really_probe+0xd2/0x468 + kernel: driver_probe_device+0x40/0xf0 + kernel: __device_attach_driver+0xc0/0x140 + kernel: bus_for_each_drv+0x8c/0xd0 + kernel: __device_attach+0x114/0x198 + kernel: bus_probe_device+0xb4/0xc8 + kernel: device_add+0x4d2/0x6e0 + kernel: ap_scan_adapter+0x3d0/0x7c0 + kernel: ap_scan_bus+0x5a/0x3b0 + kernel: ap_scan_bus_wq_callback+0x40/0x60 + kernel: process_one_work+0x26e/0x620 + kernel: worker_thread+0x21c/0x440 + kernel: Freed in zcrypt_card_put+0x54/0x80 [zcrypt] age=9024 cpu=3 pid=43 + kernel: kfree+0x37e/0x418 + kernel: zcrypt_card_put+0x54/0x80 [zcrypt] + kernel: ap_device_remove+0x4c/0xe0 + kernel: device_release_driver_internal+0x1c4/0x270 + kernel: bus_remove_device+0x100/0x188 + kernel: device_del+0x164/0x3c0 + kernel: device_unregister+0x30/0x90 + kernel: ap_scan_adapter+0xc8/0x7c0 + kernel: ap_scan_bus+0x5a/0x3b0 + kernel: ap_scan_bus_wq_callback+0x40/0x60 + kernel: process_one_work+0x26e/0x620 + kernel: worker_thread+0x21c/0x440 + kernel: kthread+0x150/0x168 + kernel: __ret_from_fork+0x3c/0x58 + kernel: ret_from_fork+0xa/0x30 + kernel: Slab 0x00000372022169c0 objects=20 used=18 fp=0x00000000885a7c88 flags=0x3ffff00000000a00(workingset|slab|node=0|zone=1|lastcpupid=0x1ffff) + kernel: Object 0x00000000885a74b8 @offset=1208 fp=0x00000000885a7c88 + kernel: Redzone 00000000885a74b0: bb bb bb bb bb bb bb bb ........ + kernel: Object 00000000885a74b8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk + kernel: Object 00000000885a74c8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk + kernel: Object 00000000885a74d8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk + kernel: Object 00000000885a74e8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk + kernel: Object 00000000885a74f8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk + kernel: Object 00000000885a7508: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 68 4b 6b 6b 6b a5 kkkkkkkkkkhKkkk. + kernel: Redzone 00000000885a7518: bb bb bb bb bb bb bb bb ........ + kernel: Padding 00000000885a756c: 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZZZZZ + kernel: CPU: 0 PID: 387 Comm: systemd-udevd Not tainted 6.8.0-HF #2 + kernel: Hardware name: IBM 3931 A01 704 (KVM/Linux) + kernel: Call Trace: + kernel: [<00000000ca5ab5b8>] dump_stack_lvl+0x90/0x120 + kernel: [<00000000c99d78bc>] check_bytes_and_report+0x114/0x140 + kernel: [<00000000c99d53cc>] check_object+0x334/0x3f8 + kernel: [<00000000c99d820c>] alloc_debug_processing+0xc4/0x1f8 + kernel: [<00000000c99d852e>] get_partial_node.part.0+0x1ee/0x3e0 + kernel: [<00000000c99d94ec>] ___slab_alloc+0xaf4/0x13c8 + kernel: [<00000000c99d9e38>] __slab_alloc.constprop.0+0x78/0xb8 + kernel: [<00000000c99dc8dc>] __kmalloc+0x434/0x590 + kernel: [<00000000c9b4c0ce>] ext4_htree_store_dirent+0x4e/0x1c0 + kernel: [<00000000c9b908a2>] htree_dirblock_to_tree+0x17a/0x3f0 + kernel: [<00000000c9b919dc>] ext4_htree_fill_tree+0x134/0x400 + kernel: [<00000000c9b4b3d0>] ext4_dx_readdir+0x160/0x2f0 + kernel: [<00000000c9b4bedc>] ext4_readdir+0x5f4/0x760 + kernel: [<00000000c9a7efc4>] iterate_dir+0xb4/0x280 + kernel: [<00000000c9a7f1ea>] __do_sys_getdents64+0x5a/0x120 + kernel: [<00000000ca5d6946>] __do_syscall+0x256/0x310 + kernel: [<00000000ca5eea10>] system_call+0x70/0x98 + kernel: INFO: lockdep is turned off. + kernel: FIX kmalloc-96: Restoring Poison 0x00000000885a7512-0x00000000885a7513=0x6b + kernel: FIX kmalloc-96: Marking all objects used + +The fix is simple: Before use of the queue not only the queue object +but also the card object needs to increase it's reference count +with a call to zcrypt_card_get(). Similar after use of the queue +not only the queue but also the card object's reference count is +decreased with zcrypt_card_put(). + +Signed-off-by: Harald Freudenberger +Reviewed-by: Holger Dengler +Cc: stable@vger.kernel.org +Signed-off-by: Heiko Carstens +Signed-off-by: Sasha Levin +--- + drivers/s390/crypto/zcrypt_api.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c +index 74200f54dfff7..53ddae5ad890b 100644 +--- a/drivers/s390/crypto/zcrypt_api.c ++++ b/drivers/s390/crypto/zcrypt_api.c +@@ -579,6 +579,7 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, + { + if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner)) + return NULL; ++ zcrypt_card_get(zc); + zcrypt_queue_get(zq); + get_device(&zq->queue->ap_dev.device); + atomic_add(weight, &zc->load); +@@ -598,6 +599,7 @@ static inline void zcrypt_drop_queue(struct zcrypt_card *zc, + atomic_sub(weight, &zq->load); + put_device(&zq->queue->ap_dev.device); + zcrypt_queue_put(zq); ++ zcrypt_card_put(zc); + module_put(mod); + } + +-- +2.43.0 + diff --git a/queue-6.8/selftests-mqueue-set-timeout-to-180-seconds.patch b/queue-6.8/selftests-mqueue-set-timeout-to-180-seconds.patch new file mode 100644 index 00000000000..b03914a504d --- /dev/null +++ b/queue-6.8/selftests-mqueue-set-timeout-to-180-seconds.patch @@ -0,0 +1,35 @@ +From 71bec925fe7f8611e1cde9f2ae20b07275714da4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 19 Feb 2024 16:08:02 -0800 +Subject: selftests/mqueue: Set timeout to 180 seconds + +From: SeongJae Park + +[ Upstream commit 85506aca2eb4ea41223c91c5fe25125953c19b13 ] + +While mq_perf_tests runs with the default kselftest timeout limit, which +is 45 seconds, the test takes about 60 seconds to complete on i3.metal +AWS instances. Hence, the test always times out. Increase the timeout +to 180 seconds. + +Fixes: 852c8cbf34d3 ("selftests/kselftest/runner.sh: Add 45 second timeout per test") +Cc: # 5.4.x +Signed-off-by: SeongJae Park +Reviewed-by: Kees Cook +Signed-off-by: Shuah Khan +Signed-off-by: Sasha Levin +--- + tools/testing/selftests/mqueue/setting | 1 + + 1 file changed, 1 insertion(+) + create mode 100644 tools/testing/selftests/mqueue/setting + +diff --git a/tools/testing/selftests/mqueue/setting b/tools/testing/selftests/mqueue/setting +new file mode 100644 +index 0000000000000..a953c96aa16e1 +--- /dev/null ++++ b/tools/testing/selftests/mqueue/setting +@@ -0,0 +1 @@ ++timeout=180 +-- +2.43.0 + diff --git a/queue-6.8/serial-core-only-stop-transmit-when-hw-fifo-is-empty.patch b/queue-6.8/serial-core-only-stop-transmit-when-hw-fifo-is-empty.patch new file mode 100644 index 00000000000..b37aa0856ce --- /dev/null +++ b/queue-6.8/serial-core-only-stop-transmit-when-hw-fifo-is-empty.patch @@ -0,0 +1,46 @@ +From f2517cf23dc69e01c7effc8f2a243b24fd71b11e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 3 Mar 2024 16:08:07 +0100 +Subject: serial: core: only stop transmit when HW fifo is empty + +From: Jonas Gorski + +[ Upstream commit 7bfb915a597a301abb892f620fe5c283a9fdbd77 ] + +If the circular buffer is empty, it just means we fit all characters to +send into the HW fifo, but not that the hardware finished transmitting +them. + +So if we immediately call stop_tx() after that, this may abort any +pending characters in the HW fifo, and cause dropped characters on the +console. + +Fix this by only stopping tx when the tx HW fifo is actually empty. + +Fixes: 8275b48b2780 ("tty: serial: introduce transmit helpers") +Cc: stable@vger.kernel.org +Signed-off-by: Jonas Gorski +Link: https://lore.kernel.org/r/20240303150807.68117-1-jonas.gorski@gmail.com +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + include/linux/serial_core.h | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h +index 55b1f3ba48ac1..bb0f2d4ac62f6 100644 +--- a/include/linux/serial_core.h ++++ b/include/linux/serial_core.h +@@ -786,7 +786,8 @@ enum UART_TX_FLAGS { + if (pending < WAKEUP_CHARS) { \ + uart_write_wakeup(__port); \ + \ +- if (!((flags) & UART_TX_NOSTOP) && pending == 0) \ ++ if (!((flags) & UART_TX_NOSTOP) && pending == 0 && \ ++ __port->ops->tx_empty(__port)) \ + __port->ops->stop_tx(__port); \ + } \ + \ +-- +2.43.0 + diff --git a/queue-6.8/serial-lock-console-when-calling-into-driver-before-.patch b/queue-6.8/serial-lock-console-when-calling-into-driver-before-.patch new file mode 100644 index 00000000000..ebe6b7ef3c0 --- /dev/null +++ b/queue-6.8/serial-lock-console-when-calling-into-driver-before-.patch @@ -0,0 +1,187 @@ +From 67ae2da3caf56ceedeee500326fde2cbdf672b71 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 4 Mar 2024 13:43:49 -0800 +Subject: serial: Lock console when calling into driver before registration + +From: Peter Collingbourne + +[ Upstream commit 801410b26a0e8b8a16f7915b2b55c9528b69ca87 ] + +During the handoff from earlycon to the real console driver, we have +two separate drivers operating on the same device concurrently. In the +case of the 8250 driver these concurrent accesses cause problems due +to the driver's use of banked registers, controlled by LCR.DLAB. It is +possible for the setup(), config_port(), pm() and set_mctrl() callbacks +to set DLAB, which can cause the earlycon code that intends to access +TX to instead access DLL, leading to missed output and corruption on +the serial line due to unintended modifications to the baud rate. + +In particular, for setup() we have: + +univ8250_console_setup() +-> serial8250_console_setup() +-> uart_set_options() +-> serial8250_set_termios() +-> serial8250_do_set_termios() +-> serial8250_do_set_divisor() + +For config_port() we have: + +serial8250_config_port() +-> autoconfig() + +For pm() we have: + +serial8250_pm() +-> serial8250_do_pm() +-> serial8250_set_sleep() + +For set_mctrl() we have (for some devices): + +serial8250_set_mctrl() +-> omap8250_set_mctrl() +-> __omap8250_set_mctrl() + +To avoid such problems, let's make it so that the console is locked +during pre-registration calls to these callbacks, which will prevent +the earlycon driver from running concurrently. + +Remove the partial solution to this problem in the 8250 driver +that locked the console only during autoconfig_irq(), as this would +result in a deadlock with the new approach. The console continues +to be locked during autoconfig_irq() because it can only be called +through uart_configure_port(). + +Although this patch introduces more locking than strictly necessary +(and in particular it also locks during the call to rs485_config() +which is not affected by this issue as far as I can tell), it follows +the principle that it is the responsibility of the generic console +code to manage the earlycon handoff by ensuring that earlycon and real +console driver code cannot run concurrently, and not the individual +drivers. + +Signed-off-by: Peter Collingbourne +Reviewed-by: John Ogness +Link: https://linux-review.googlesource.com/id/I7cf8124dcebf8618e6b2ee543fa5b25532de55d8 +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20240304214350.501253-1-pcc@google.com +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/tty/serial/8250/8250_port.c | 6 ------ + drivers/tty/serial/serial_core.c | 12 ++++++++++++ + kernel/printk/printk.c | 21 ++++++++++++++++++--- + 3 files changed, 30 insertions(+), 9 deletions(-) + +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index 8ca061d3bbb92..1d65055dde276 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -1329,9 +1329,6 @@ static void autoconfig_irq(struct uart_8250_port *up) + inb_p(ICP); + } + +- if (uart_console(port)) +- console_lock(); +- + /* forget possible initially masked and pending IRQ */ + probe_irq_off(probe_irq_on()); + save_mcr = serial8250_in_MCR(up); +@@ -1371,9 +1368,6 @@ static void autoconfig_irq(struct uart_8250_port *up) + if (port->flags & UPF_FOURPORT) + outb_p(save_ICP, ICP); + +- if (uart_console(port)) +- console_unlock(); +- + port->irq = (irq > 0) ? irq : 0; + } + +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index d6a58a9e072a1..ff85ebd3a007d 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -2608,7 +2608,12 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, + port->type = PORT_UNKNOWN; + flags |= UART_CONFIG_TYPE; + } ++ /* Synchronize with possible boot console. */ ++ if (uart_console(port)) ++ console_lock(); + port->ops->config_port(port, flags); ++ if (uart_console(port)) ++ console_unlock(); + } + + if (port->type != PORT_UNKNOWN) { +@@ -2616,6 +2621,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, + + uart_report_port(drv, port); + ++ /* Synchronize with possible boot console. */ ++ if (uart_console(port)) ++ console_lock(); ++ + /* Power up port for set_mctrl() */ + uart_change_pm(state, UART_PM_STATE_ON); + +@@ -2632,6 +2641,9 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, + + uart_rs485_config(port); + ++ if (uart_console(port)) ++ console_unlock(); ++ + /* + * If this driver supports console, and it hasn't been + * successfully registered yet, try to re-register it. +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index 72f6a564e832f..a11e1b6f29c04 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -3295,6 +3295,21 @@ static int __init keep_bootcon_setup(char *str) + + early_param("keep_bootcon", keep_bootcon_setup); + ++static int console_call_setup(struct console *newcon, char *options) ++{ ++ int err; ++ ++ if (!newcon->setup) ++ return 0; ++ ++ /* Synchronize with possible boot console. */ ++ console_lock(); ++ err = newcon->setup(newcon, options); ++ console_unlock(); ++ ++ return err; ++} ++ + /* + * This is called by register_console() to try to match + * the newly registered console with any of the ones selected +@@ -3330,8 +3345,8 @@ static int try_enable_preferred_console(struct console *newcon, + if (_braille_register_console(newcon, c)) + return 0; + +- if (newcon->setup && +- (err = newcon->setup(newcon, c->options)) != 0) ++ err = console_call_setup(newcon, c->options); ++ if (err) + return err; + } + newcon->flags |= CON_ENABLED; +@@ -3357,7 +3372,7 @@ static void try_enable_default_console(struct console *newcon) + if (newcon->index < 0) + newcon->index = 0; + +- if (newcon->setup && newcon->setup(newcon, NULL) != 0) ++ if (console_call_setup(newcon, NULL) != 0) + return; + + newcon->flags |= CON_ENABLED; +-- +2.43.0 + diff --git a/queue-6.8/serial-max310x-fix-null-pointer-dereference-in-i2c-i.patch b/queue-6.8/serial-max310x-fix-null-pointer-dereference-in-i2c-i.patch new file mode 100644 index 00000000000..0f0b1c86aa3 --- /dev/null +++ b/queue-6.8/serial-max310x-fix-null-pointer-dereference-in-i2c-i.patch @@ -0,0 +1,62 @@ +From 53b5d7a5dfd70e93b147d7fe7f1b040ce4d776e7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 18 Jan 2024 10:21:57 -0500 +Subject: serial: max310x: fix NULL pointer dereference in I2C instantiation + +From: Hugo Villeneuve + +[ Upstream commit 0d27056c24efd3d63a03f3edfbcfc4827086b110 ] + +When trying to instantiate a max14830 device from userspace: + + echo max14830 0x60 > /sys/bus/i2c/devices/i2c-2/new_device + +we get the following error: + + Unable to handle kernel NULL pointer dereference at virtual address... + ... + Call trace: + max310x_i2c_probe+0x48/0x170 [max310x] + i2c_device_probe+0x150/0x2a0 + ... + +Add check for validity of devtype to prevent the error, and abort probe +with a meaningful error message. + +Fixes: 2e1f2d9a9bdb ("serial: max310x: implement I2C support") +Cc: stable@vger.kernel.org +Reviewed-by: Andy Shevchenko +Signed-off-by: Hugo Villeneuve +Link: https://lore.kernel.org/r/20240118152213.2644269-2-hugo@hugovil.com +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/tty/serial/max310x.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c +index f70e2c277ab7e..198413df091b4 100644 +--- a/drivers/tty/serial/max310x.c ++++ b/drivers/tty/serial/max310x.c +@@ -1635,13 +1635,16 @@ static unsigned short max310x_i2c_slave_addr(unsigned short addr, + + static int max310x_i2c_probe(struct i2c_client *client) + { +- const struct max310x_devtype *devtype = +- device_get_match_data(&client->dev); ++ const struct max310x_devtype *devtype; + struct i2c_client *port_client; + struct regmap *regmaps[4]; + unsigned int i; + u8 port_addr; + ++ devtype = device_get_match_data(&client->dev); ++ if (!devtype) ++ return dev_err_probe(&client->dev, -ENODEV, "Failed to match device\n"); ++ + if (client->addr < devtype->slave_addr.min || + client->addr > devtype->slave_addr.max) + return dev_err_probe(&client->dev, -EINVAL, +-- +2.43.0 + diff --git a/queue-6.8/series b/queue-6.8/series index e69de29bb2d..6fb98eed556 100644 --- a/queue-6.8/series +++ b/queue-6.8/series @@ -0,0 +1,241 @@ +drm-vmwgfx-unmap-the-surface-before-resetting-it-on-.patch +wifi-brcmfmac-fix-use-after-free-bug-in-brcmf_cfg802.patch +wifi-brcmfmac-avoid-invalid-list-operation-when-vend.patch +media-staging-ipu3-imgu-set-fields-before-media_enti.patch +arm64-dts-qcom-sc7280-add-additional-msi-interrupts.patch +remoteproc-virtio-fix-wdg-cannot-recovery-remote-pro.patch +clk-qcom-gcc-sdm845-add-soft-dependency-on-rpmhpd.patch +smack-set-smack64transmute-only-for-dirs-in-smack_in.patch +smack-handle-smack64transmute-in-smack_inode_setsecu.patch +arm-dts-marvell-fix-maxium-maxim-typo-in-brownstone-.patch +drm-vmwgfx-fix-possible-null-pointer-derefence-with-.patch +arm64-dts-qcom-sm8450-hdk-correct-amic4-and-amic5-mi.patch +serial-max310x-fix-null-pointer-dereference-in-i2c-i.patch +drm-vmwgfx-fix-the-lifetime-of-the-bo-cursor-memory.patch +pci_iounmap-fix-mmio-mapping-leak.patch +media-xc4000-fix-atomicity-violation-in-xc4000_get_f.patch +media-mc-add-local-pad-to-pipeline-regardless-of-the.patch +media-mc-fix-flags-handling-when-creating-pad-links.patch +media-nxp-imx8-isi-check-whether-crossbar-pad-is-non.patch +media-mc-add-num_links-flag-to-media_pad.patch +media-mc-rename-pad-variable-to-clarify-intent.patch +media-mc-expand-must_connect-flag-to-always-require-.patch +media-nxp-imx8-isi-mark-all-crossbar-sink-pads-as-mu.patch +md-use-rcu-lock-to-protect-traversal-in-md_spares_ne.patch +kvm-always-flush-async-pf-workqueue-when-vcpu-is-bei.patch +arm64-dts-qcom-sm8550-qrd-correct-wcd9385-tx-port-ma.patch +arm64-dts-qcom-sm8550-mtp-correct-wcd9385-tx-port-ma.patch +cpufreq-amd-pstate-fix-min_perf-assignment-in-amd_ps.patch +thermal-intel-fix-intel_tcc_get_temp-to-support-nega.patch +powercap-intel_rapl-fix-a-null-pointer-dereference.patch +powercap-intel_rapl-fix-locking-in-tpmi-rapl.patch +powercap-intel_rapl_tpmi-fix-a-register-bug.patch +powercap-intel_rapl_tpmi-fix-system-domain-probing.patch +powerpc-smp-adjust-nr_cpu_ids-to-cover-all-threads-o.patch +powerpc-smp-increase-nr_cpu_ids-to-include-the-boot-.patch +sparc64-nmi-watchdog-fix-return-value-of-__setup-han.patch +sparc-vdso-fix-return-value-of-__setup-handler.patch +crypto-qat-change-slas-cleanup-flow-at-shutdown.patch +crypto-qat-resolve-race-condition-during-aer-recover.patch +selftests-mqueue-set-timeout-to-180-seconds.patch +pinctrl-qcom-sm8650-lpass-lpi-correct-kconfig-name.patch +ext4-correct-best-extent-lstart-adjustment-logic.patch +drm-amdgpu-display-address-kdoc-for-is_psr_su-in-fil.patch +block-clear-zone-limits-for-a-non-zoned-stacked-queu.patch +kasan-test-avoid-gcc-warning-for-intentional-overflo.patch +bounds-support-non-power-of-two-config_nr_cpus.patch +fat-fix-uninitialized-field-in-nostale-filehandles.patch +fuse-fix-vm_mayshare-and-direct_io_allow_mmap.patch +mfd-twl-select-mfd_core.patch +ubifs-set-page-uptodate-in-the-correct-place.patch +ubi-check-for-too-small-leb-size-in-vtbl-code.patch +ubi-correct-the-calculation-of-fastmap-size.patch +ubifs-ubifs_symlink-fix-memleak-of-inode-i_link-in-e.patch +mtd-rawnand-meson-fix-scrambling-mode-value-in-comma.patch +md-md-bitmap-fix-incorrect-usage-for-sb_index.patch +x86-nmi-fix-the-inverse-in-nmi-handler-check.patch +parisc-unaligned-rewrite-64-bit-inline-assembly-of-e.patch +parisc-avoid-clobbering-the-c-b-bits-in-the-psw-with.patch +parisc-fix-ip_fast_csum.patch +parisc-fix-csum_ipv6_magic-on-32-bit-systems.patch +parisc-fix-csum_ipv6_magic-on-64-bit-systems.patch +parisc-strip-upper-32-bit-of-sum-in-csum_ipv6_magic-.patch +md-raid5-fix-atomicity-violation-in-raid5_cache_coun.patch +iio-adc-rockchip_saradc-fix-bitmask-for-channels-on-.patch +iio-adc-rockchip_saradc-use-mask-for-write_enable-bi.patch +docs-restore-smart-quotes-for-quotes.patch +cpufreq-limit-resolving-a-frequency-to-policy-min-ma.patch +pm-suspend-set-mem_sleep_current-during-kernel-comma.patch +vfio-pds-always-clear-the-save-restore-fds-on-reset.patch +clk-qcom-gcc-ipq5018-fix-terminating-of-frequency-ta.patch +clk-qcom-gcc-ipq6018-fix-terminating-of-frequency-ta.patch +clk-qcom-gcc-ipq8074-fix-terminating-of-frequency-ta.patch +clk-qcom-gcc-ipq9574-fix-terminating-of-frequency-ta.patch +clk-qcom-camcc-sc8280xp-fix-terminating-of-frequency.patch +clk-qcom-mmcc-apq8084-fix-terminating-of-frequency-t.patch +clk-qcom-mmcc-msm8974-fix-terminating-of-frequency-t.patch +usb-xhci-add-error-handling-in-xhci_map_urb_for_dma.patch +powerpc-fsl-fix-mfpmr-build-errors-with-newer-binuti.patch +usb-serial-ftdi_sio-add-support-for-gmc-z216c-adapte.patch +usb-serial-add-device-id-for-verifone-adapter.patch +usb-serial-cp210x-add-id-for-mgp-instruments-pds100.patch +wifi-mac80211-track-capability-opmode-nss-separately.patch +usb-serial-option-add-meig-smart-slm320-product.patch +kvm-x86-xen-inject-vcpu-upcall-vector-when-local-api.patch +usb-serial-cp210x-add-pid-vid-for-tdk-nc0110013m-and.patch +pm-sleep-wakeirq-fix-wake-irq-warning-in-system-susp.patch +mmc-tmio-avoid-concurrent-runs-of-mmc_request_done.patch +fuse-replace-remaining-make_bad_inode-with-fuse_make.patch +fuse-fix-root-lookup-with-nonzero-generation.patch +fuse-don-t-unhash-root.patch +usb-typec-ucsi-clean-up-ucsi_cable_prop-macros.patch +usb-dwc3-am62-fix-module-unload-reload-behavior.patch +usb-dwc3-am62-disable-wakeup-at-remove.patch +serial-core-only-stop-transmit-when-hw-fifo-is-empty.patch +serial-lock-console-when-calling-into-driver-before-.patch +btrfs-qgroup-always-free-reserved-space-for-extent-r.patch +btrfs-fix-off-by-one-chunk-length-calculation-at-con.patch +wifi-rtw88-add-missing-vid-pids-for-8811cu-and-8821c.patch +docs-makefile-add-dependency-to-ynl_index-for-target.patch +pci-pm-drain-runtime-idle-callbacks-before-driver-re.patch +pci-dpc-quirk-pio-log-size-for-intel-raptor-lake-roo.patch +acpi-cppc-use-access_width-over-bit_width-for-system.patch +revert-revert-md-raid5-wait-for-md_sb_change_pending.patch +md-don-t-clear-md_recovery_frozen-for-new-dm-raid-un.patch +md-export-helpers-to-stop-sync_thread.patch +md-export-helper-md_is_rdwr.patch +md-add-a-new-helper-reshape_interrupted.patch +dm-raid-really-frozen-sync_thread-during-suspend.patch +md-dm-raid-don-t-call-md_reap_sync_thread-directly.patch +dm-raid-add-a-new-helper-prepare_suspend-in-md_perso.patch +dm-raid456-md-raid456-fix-a-deadlock-for-dm-raid456-.patch +dm-raid-fix-lockdep-waring-in-pers-hot_add_disk.patch +powerpc-xor_vmx-add-mhard-float-to-cflags.patch +block-fix-page-refcounts-for-unaligned-buffers-in-__.patch +mac802154-fix-llsec-key-resources-release-in-mac8021.patch +mm-swap-fix-race-between-free_swap_and_cache-and-swa.patch +mmc-core-fix-switch-on-gp3-partition.patch +bluetooth-btnxpuart-fix-btnxpuart_close.patch +leds-trigger-netdev-fix-kernel-panic-on-interface-re.patch +drm-etnaviv-restore-some-id-values.patch +landlock-warn-once-if-a-landlock-action-is-requested.patch +io_uring-fix-mshot-read-defer-taskrun-cqe-posting.patch +hwmon-amc6821-add-of_match-table.patch +io_uring-fix-io_queue_proc-modifying-req-flags.patch +ext4-fix-corruption-during-on-line-resize.patch +nvmem-meson-efuse-fix-function-pointer-type-mismatch.patch +slimbus-core-remove-usage-of-the-deprecated-ida_simp.patch +phy-tegra-xusb-add-api-to-retrieve-the-port-number-o.patch +usb-gadget-tegra-xudc-fix-usb3-phy-retrieval-logic.patch +speakup-fix-8bit-characters-from-direct-synth.patch +debugfs-fix-wait-cancellation-handling-during-remove.patch +pci-aer-block-runtime-suspend-when-handling-errors.patch +io_uring-net-correctly-handle-multishot-recvmsg-retr.patch +io_uring-fix-mshot-io-wq-checks.patch +pci-qcom-disable-aspm-l0s-for-sc8280xp-sa8540p-and-s.patch +sparc32-fix-parport-build-with-sparc32.patch +nfs-fix-uaf-in-direct-writes.patch +nfs-read-unlock-folio-on-nfs_page_create_from_folio-.patch +kbuild-move-wenum-compare-conditional-enum-conversio.patch +pci-qcom-enable-bdf-to-sid-translation-properly.patch +pci-dwc-endpoint-fix-advertised-resizable-bar-size.patch +pci-hv-fix-ring-buffer-size-calculation.patch +cifs-prevent-updating-file-size-from-server-if-we-ha.patch +cifs-allow-changing-password-during-remount.patch +thermal-drivers-mediatek-fix-control-buffer-enableme.patch +vfio-pci-disable-auto-enable-of-exclusive-intx-irq.patch +vfio-pci-lock-external-intx-masking-ops.patch +vfio-platform-disable-virqfds-on-cleanup.patch +vfio-platform-create-persistent-irq-handlers.patch +vfio-fsl-mc-block-calling-interrupt-handler-without-.patch +tpm-tpm_tis-avoid-warning-splat-at-shutdown.patch +ksmbd-replace-generic_fillattr-with-vfs_getattr.patch +ksmbd-retrieve-number-of-blocks-using-vfs_getattr-in.patch +platform-x86-intel-tpmi-change-vsec-offset-to-u64.patch +io_uring-rw-return-iou_issue_skip_complete-for-multi.patch +io_uring-clean-rings-on-no_mmap-alloc-fail.patch +ring-buffer-do-not-set-shortest_full-when-full-targe.patch +ring-buffer-fix-full_waiters_pending-in-poll.patch +ring-buffer-use-wait_event_interruptible-in-ring_buf.patch +tracing-ring-buffer-fix-wait_on_pipe-race.patch +dlm-fix-user-space-lkb-refcounting.patch +soc-fsl-qbman-always-disable-interrupts-when-taking-.patch +soc-fsl-qbman-use-raw-spinlock-for-cgr_lock.patch +s390-zcrypt-fix-reference-counting-on-zcrypt-card-ob.patch +drm-probe-helper-warn-about-negative-.get_modes.patch +drm-panel-do-not-return-negative-error-codes-from-dr.patch +drm-exynos-do-not-return-negative-values-from-.get_m.patch +drm-imx-ipuv3-do-not-return-negative-values-from-.ge.patch +drm-vc4-hdmi-do-not-return-negative-values-from-.get.patch +clocksource-drivers-timer-riscv-clear-timer-interrup.patch +memtest-use-read-write-_once-in-memory-scanning.patch +revert-block-mq-deadline-use-correct-way-to-throttli.patch +lsm-use-32-bit-compatible-data-types-in-lsm-syscalls.patch +lsm-handle-the-null-buffer-case-in-lsm_fill_user_ctx.patch +f2fs-mark-inode-dirty-for-fi_atomic_committed-flag.patch +f2fs-truncate-page-cache-before-clearing-flags-when-.patch +nilfs2-fix-failure-to-detect-dat-corruption-in-btree.patch +nilfs2-prevent-kernel-bug-at-submit_bh_wbc.patch +cifs-make-sure-server-interfaces-are-requested-only-.patch +cifs-reduce-warning-log-level-for-server-not-adverti.patch +cifs-open_cached_dir-add-file_read_ea-to-desired-acc.patch +mtd-rawnand-fix-and-simplify-again-the-continuous-re.patch +mtd-rawnand-add-a-helper-for-calculating-a-page-inde.patch +mtd-rawnand-ensure-all-continuous-terms-are-always-i.patch +mtd-rawnand-constrain-even-more-when-continuous-read.patch +cpufreq-dt-always-allocate-zeroed-cpumask.patch +io_uring-futex-always-remove-futex-entry-for-cancel-.patch +io_uring-waitid-always-remove-waitid-entry-for-cance.patch +x86-cpu-amd-update-the-zenbleed-microcode-revisions.patch +ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf.patch +net-esp-fix-bad-handling-of-pages-from-page_pool.patch +nfsd-fix-nfsd_clid_class-use-of-__string_len-macro.patch +drm-i915-add-missing-to-__assign_str-macros-in-trace.patch +net-hns3-tracing-fix-hclgevf-trace-event-strings.patch +cxl-trace-properly-initialize-cxl_poison-region-name.patch +ksmbd-fix-potencial-out-of-bounds-when-buffer-offset.patch +virtio-reenable-config-if-freezing-device-failed.patch +loongarch-change-__my_cpu_offset-definition-to-avoid.patch +loongarch-define-the-__io_aw-hook-as-mmiowb.patch +loongarch-crypto-clean-up-useless-assignment-operati.patch +wireguard-netlink-check-for-dangling-peer-via-is_dea.patch +wireguard-netlink-access-device-through-ctx-instead-.patch +wireguard-selftests-set-riscv_isa_fallback-on-riscv-.patch +ahci-asm1064-asm1166-don-t-limit-reported-ports.patch +drm-amd-display-change-default-size-for-dummy-plane-.patch +drm-amdgpu-amdgpu_ttm_gart_bind-set-gtt-bound-flag.patch +drm-amdgpu-pm-fix-null-pointer-dereference-when-get-.patch +drm-amdgpu-pm-check-the-validity-of-overdiver-power-.patch +drm-amd-display-override-min-required-dcfclk-in-dml1.patch +drm-amd-display-allow-dirty-rects-to-be-sent-to-dmub.patch +drm-amd-display-init-dppclk-from-smu-on-dcn32.patch +drm-amd-display-update-odm-when-odm-combine-is-chang.patch +drm-amd-display-fix-idle-check-for-shared-firmware-s.patch +drm-amd-display-amend-coasting-vtotal-for-replay-low.patch +drm-amd-display-lock-all-enabled-otg-pipes-even-with.patch +drm-amd-display-implement-wait_for_odm_update_pendin.patch +drm-amd-display-return-the-correct-hdcp-error-code.patch +drm-amd-display-add-a-dc_state-null-check-in-dc_stat.patch +drm-amd-display-fix-noise-issue-on-hdmi-av-mute.patch +dm-snapshot-fix-lockup-in-dm_exception_table_exit.patch +x86-pm-work-around-false-positive-kmemleak-report-in.patch +wifi-brcmfmac-add-per-vendor-feature-detection-callb.patch +wifi-brcmfmac-cfg80211-use-wsec-to-set-sae-password.patch +wifi-brcmfmac-demote-vendor-specific-attach-detach-m.patch +drm-ttm-make-sure-the-mapped-tt-pages-are-decrypted-.patch +drm-amd-display-unify-optimize_required-flags-and-vr.patch +drm-amd-display-add-more-checks-for-exiting-idle-in-.patch +btrfs-add-set_folio_extent_mapped-helper.patch +btrfs-replace-sb-s_blocksize-by-fs_info-sectorsize.patch +btrfs-add-helpers-to-get-inode-from-page-folio-point.patch +btrfs-add-helpers-to-get-fs_info-from-page-folio-poi.patch +btrfs-add-helper-to-get-fs_info-from-struct-inode-po.patch +btrfs-qgroup-validate-btrfs_qgroup_inherit-parameter.patch +vfio-introduce-interface-to-flush-virqfd-inject-work.patch +vfio-pci-create-persistent-intx-handler.patch +drm-bridge-add-edid_read-hook-and-drm_bridge_edid_re.patch +drm-bridge-lt8912b-use-drm_bridge_edid_read.patch +drm-bridge-lt8912b-clear-the-edid-property-on-failur.patch +drm-bridge-lt8912b-do-not-return-negative-values-fro.patch +drm-amd-display-remove-pixle-rate-limit-for-subvp.patch +drm-amd-display-revert-remove-pixle-rate-limit-for-s.patch diff --git a/queue-6.8/slimbus-core-remove-usage-of-the-deprecated-ida_simp.patch b/queue-6.8/slimbus-core-remove-usage-of-the-deprecated-ida_simp.patch new file mode 100644 index 00000000000..2d36233327d --- /dev/null +++ b/queue-6.8/slimbus-core-remove-usage-of-the-deprecated-ida_simp.patch @@ -0,0 +1,45 @@ +From 55767f1e743cad36b8a5cd47d3d0d06dd5046e08 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 24 Feb 2024 11:41:37 +0000 +Subject: slimbus: core: Remove usage of the deprecated ida_simple_xx() API + +From: Christophe JAILLET + +[ Upstream commit 89ffa4cccec54467446f141a79b9e36893079fb8 ] + +ida_alloc() and ida_free() should be preferred to the deprecated +ida_simple_get() and ida_simple_remove(). + +Note that the upper limit of ida_simple_get() is exclusive, but the one of +ida_alloc_range() is inclusive. So change this change allows one more +device. Previously address 0xFE was never used. + +Fixes: 46a2bb5a7f7e ("slimbus: core: Add slim controllers support") +Cc: Stable@vger.kernel.org +Signed-off-by: Christophe JAILLET +Signed-off-by: Srinivas Kandagatla +Link: https://lore.kernel.org/r/20240224114137.85781-2-srinivas.kandagatla@linaro.org +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/slimbus/core.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c +index d43873bb5fe6d..01cbd46219810 100644 +--- a/drivers/slimbus/core.c ++++ b/drivers/slimbus/core.c +@@ -436,8 +436,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev, + if (ret < 0) + goto err; + } else if (report_present) { +- ret = ida_simple_get(&ctrl->laddr_ida, +- 0, SLIM_LA_MANAGER - 1, GFP_KERNEL); ++ ret = ida_alloc_max(&ctrl->laddr_ida, ++ SLIM_LA_MANAGER - 1, GFP_KERNEL); + if (ret < 0) + goto err; + +-- +2.43.0 + diff --git a/queue-6.8/smack-handle-smack64transmute-in-smack_inode_setsecu.patch b/queue-6.8/smack-handle-smack64transmute-in-smack_inode_setsecu.patch new file mode 100644 index 00000000000..d2e56d54257 --- /dev/null +++ b/queue-6.8/smack-handle-smack64transmute-in-smack_inode_setsecu.patch @@ -0,0 +1,44 @@ +From fe6037f80c541bc1daec4586c14fb22f58f4a3e7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 16 Nov 2023 10:01:22 +0100 +Subject: smack: Handle SMACK64TRANSMUTE in smack_inode_setsecurity() + +From: Roberto Sassu + +[ Upstream commit ac02f007d64eb2769d0bde742aac4d7a5fc6e8a5 ] + +If the SMACK64TRANSMUTE xattr is provided, and the inode is a directory, +update the in-memory inode flags by setting SMK_INODE_TRANSMUTE. + +Cc: stable@vger.kernel.org +Fixes: 5c6d1125f8db ("Smack: Transmute labels on specified directories") # v2.6.38.x +Signed-off-by: Roberto Sassu +Signed-off-by: Casey Schaufler +Signed-off-by: Sasha Levin +--- + security/smack/smack_lsm.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c +index 72b371812a001..6e5f74813c101 100644 +--- a/security/smack/smack_lsm.c ++++ b/security/smack/smack_lsm.c +@@ -2856,6 +2856,15 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name, + if (value == NULL || size > SMK_LONGLABEL || size == 0) + return -EINVAL; + ++ if (strcmp(name, XATTR_SMACK_TRANSMUTE) == 0) { ++ if (!S_ISDIR(inode->i_mode) || size != TRANS_TRUE_SIZE || ++ strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0) ++ return -EINVAL; ++ ++ nsp->smk_flags |= SMK_INODE_TRANSMUTE; ++ return 0; ++ } ++ + skp = smk_import_entry(value, size); + if (IS_ERR(skp)) + return PTR_ERR(skp); +-- +2.43.0 + diff --git a/queue-6.8/smack-set-smack64transmute-only-for-dirs-in-smack_in.patch b/queue-6.8/smack-set-smack64transmute-only-for-dirs-in-smack_in.patch new file mode 100644 index 00000000000..2eaf6431230 --- /dev/null +++ b/queue-6.8/smack-set-smack64transmute-only-for-dirs-in-smack_in.patch @@ -0,0 +1,38 @@ +From 38ee712e352f29a46f8a0e64b3abddf563c5eebe Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 16 Nov 2023 10:01:21 +0100 +Subject: smack: Set SMACK64TRANSMUTE only for dirs in smack_inode_setxattr() + +From: Roberto Sassu + +[ Upstream commit 9c82169208dde516510aaba6bbd8b13976690c5d ] + +Since the SMACK64TRANSMUTE xattr makes sense only for directories, enforce +this restriction in smack_inode_setxattr(). + +Cc: stable@vger.kernel.org +Fixes: 5c6d1125f8db ("Smack: Transmute labels on specified directories") # v2.6.38.x +Signed-off-by: Roberto Sassu +Signed-off-by: Casey Schaufler +Signed-off-by: Sasha Levin +--- + security/smack/smack_lsm.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c +index 0fdbf04cc2583..72b371812a001 100644 +--- a/security/smack/smack_lsm.c ++++ b/security/smack/smack_lsm.c +@@ -1314,7 +1314,8 @@ static int smack_inode_setxattr(struct mnt_idmap *idmap, + check_star = 1; + } else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) { + check_priv = 1; +- if (size != TRANS_TRUE_SIZE || ++ if (!S_ISDIR(d_backing_inode(dentry)->i_mode) || ++ size != TRANS_TRUE_SIZE || + strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0) + rc = -EINVAL; + } else +-- +2.43.0 + diff --git a/queue-6.8/soc-fsl-qbman-always-disable-interrupts-when-taking-.patch b/queue-6.8/soc-fsl-qbman-always-disable-interrupts-when-taking-.patch new file mode 100644 index 00000000000..7abe577afc5 --- /dev/null +++ b/queue-6.8/soc-fsl-qbman-always-disable-interrupts-when-taking-.patch @@ -0,0 +1,73 @@ +From ecdeb937626013a73de809e316fedee3d23f3e9d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 11 Mar 2024 12:38:29 -0400 +Subject: soc: fsl: qbman: Always disable interrupts when taking cgr_lock + +From: Sean Anderson + +[ Upstream commit 584c2a9184a33a40fceee838f856de3cffa19be3 ] + +smp_call_function_single disables IRQs when executing the callback. To +prevent deadlocks, we must disable IRQs when taking cgr_lock elsewhere. +This is already done by qman_update_cgr and qman_delete_cgr; fix the +other lockers. + +Fixes: 96f413f47677 ("soc/fsl/qbman: fix issue in qman_delete_cgr_safe()") +CC: stable@vger.kernel.org +Signed-off-by: Sean Anderson +Reviewed-by: Camelia Groza +Tested-by: Vladimir Oltean +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/soc/fsl/qbman/qman.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c +index 739e4eee6b75c..1bf1f1ea67f00 100644 +--- a/drivers/soc/fsl/qbman/qman.c ++++ b/drivers/soc/fsl/qbman/qman.c +@@ -1456,11 +1456,11 @@ static void qm_congestion_task(struct work_struct *work) + union qm_mc_result *mcr; + struct qman_cgr *cgr; + +- spin_lock(&p->cgr_lock); ++ spin_lock_irq(&p->cgr_lock); + qm_mc_start(&p->p); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); + if (!qm_mc_result_timeout(&p->p, &mcr)) { +- spin_unlock(&p->cgr_lock); ++ spin_unlock_irq(&p->cgr_lock); + dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); + qman_p_irqsource_add(p, QM_PIRQ_CSCI); + return; +@@ -1476,7 +1476,7 @@ static void qm_congestion_task(struct work_struct *work) + list_for_each_entry(cgr, &p->cgr_cbs, node) + if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) + cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); +- spin_unlock(&p->cgr_lock); ++ spin_unlock_irq(&p->cgr_lock); + qman_p_irqsource_add(p, QM_PIRQ_CSCI); + } + +@@ -2440,7 +2440,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, + preempt_enable(); + + cgr->chan = p->config->channel; +- spin_lock(&p->cgr_lock); ++ spin_lock_irq(&p->cgr_lock); + + if (opts) { + struct qm_mcc_initcgr local_opts = *opts; +@@ -2477,7 +2477,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, + qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) + cgr->cb(p, cgr, 1); + out: +- spin_unlock(&p->cgr_lock); ++ spin_unlock_irq(&p->cgr_lock); + put_affine_portal(); + return ret; + } +-- +2.43.0 + diff --git a/queue-6.8/soc-fsl-qbman-use-raw-spinlock-for-cgr_lock.patch b/queue-6.8/soc-fsl-qbman-use-raw-spinlock-for-cgr_lock.patch new file mode 100644 index 00000000000..3284986f88c --- /dev/null +++ b/queue-6.8/soc-fsl-qbman-use-raw-spinlock-for-cgr_lock.patch @@ -0,0 +1,132 @@ +From 053db4de264e7c68761f603d0ec6028326c37474 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 11 Mar 2024 12:38:30 -0400 +Subject: soc: fsl: qbman: Use raw spinlock for cgr_lock + +From: Sean Anderson + +[ Upstream commit fbec4e7fed89b579f2483041fabf9650fb0dd6bc ] + +smp_call_function always runs its callback in hard IRQ context, even on +PREEMPT_RT, where spinlocks can sleep. So we need to use a raw spinlock +for cgr_lock to ensure we aren't waiting on a sleeping task. + +Although this bug has existed for a while, it was not apparent until +commit ef2a8d5478b9 ("net: dpaa: Adjust queue depth on rate change") +which invokes smp_call_function_single via qman_update_cgr_safe every +time a link goes up or down. + +Fixes: 96f413f47677 ("soc/fsl/qbman: fix issue in qman_delete_cgr_safe()") +CC: stable@vger.kernel.org +Reported-by: Vladimir Oltean +Closes: https://lore.kernel.org/all/20230323153935.nofnjucqjqnz34ej@skbuf/ +Reported-by: Steffen Trumtrar +Closes: https://lore.kernel.org/linux-arm-kernel/87wmsyvclu.fsf@pengutronix.de/ +Signed-off-by: Sean Anderson +Reviewed-by: Camelia Groza +Tested-by: Vladimir Oltean +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/soc/fsl/qbman/qman.c | 25 ++++++++++++++----------- + 1 file changed, 14 insertions(+), 11 deletions(-) + +diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c +index 1bf1f1ea67f00..7e9074519ad22 100644 +--- a/drivers/soc/fsl/qbman/qman.c ++++ b/drivers/soc/fsl/qbman/qman.c +@@ -991,7 +991,7 @@ struct qman_portal { + /* linked-list of CSCN handlers. */ + struct list_head cgr_cbs; + /* list lock */ +- spinlock_t cgr_lock; ++ raw_spinlock_t cgr_lock; + struct work_struct congestion_work; + struct work_struct mr_work; + char irqname[MAX_IRQNAME]; +@@ -1281,7 +1281,7 @@ static int qman_create_portal(struct qman_portal *portal, + /* if the given mask is NULL, assume all CGRs can be seen */ + qman_cgrs_fill(&portal->cgrs[0]); + INIT_LIST_HEAD(&portal->cgr_cbs); +- spin_lock_init(&portal->cgr_lock); ++ raw_spin_lock_init(&portal->cgr_lock); + INIT_WORK(&portal->congestion_work, qm_congestion_task); + INIT_WORK(&portal->mr_work, qm_mr_process_task); + portal->bits = 0; +@@ -1456,11 +1456,14 @@ static void qm_congestion_task(struct work_struct *work) + union qm_mc_result *mcr; + struct qman_cgr *cgr; + +- spin_lock_irq(&p->cgr_lock); ++ /* ++ * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock! ++ */ ++ raw_spin_lock_irq(&p->cgr_lock); + qm_mc_start(&p->p); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); + if (!qm_mc_result_timeout(&p->p, &mcr)) { +- spin_unlock_irq(&p->cgr_lock); ++ raw_spin_unlock_irq(&p->cgr_lock); + dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); + qman_p_irqsource_add(p, QM_PIRQ_CSCI); + return; +@@ -1476,7 +1479,7 @@ static void qm_congestion_task(struct work_struct *work) + list_for_each_entry(cgr, &p->cgr_cbs, node) + if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) + cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); +- spin_unlock_irq(&p->cgr_lock); ++ raw_spin_unlock_irq(&p->cgr_lock); + qman_p_irqsource_add(p, QM_PIRQ_CSCI); + } + +@@ -2440,7 +2443,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, + preempt_enable(); + + cgr->chan = p->config->channel; +- spin_lock_irq(&p->cgr_lock); ++ raw_spin_lock_irq(&p->cgr_lock); + + if (opts) { + struct qm_mcc_initcgr local_opts = *opts; +@@ -2477,7 +2480,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, + qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) + cgr->cb(p, cgr, 1); + out: +- spin_unlock_irq(&p->cgr_lock); ++ raw_spin_unlock_irq(&p->cgr_lock); + put_affine_portal(); + return ret; + } +@@ -2512,7 +2515,7 @@ int qman_delete_cgr(struct qman_cgr *cgr) + return -EINVAL; + + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); +- spin_lock_irqsave(&p->cgr_lock, irqflags); ++ raw_spin_lock_irqsave(&p->cgr_lock, irqflags); + list_del(&cgr->node); + /* + * If there are no other CGR objects for this CGRID in the list, +@@ -2537,7 +2540,7 @@ int qman_delete_cgr(struct qman_cgr *cgr) + /* add back to the list */ + list_add(&cgr->node, &p->cgr_cbs); + release_lock: +- spin_unlock_irqrestore(&p->cgr_lock, irqflags); ++ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags); + put_affine_portal(); + return ret; + } +@@ -2577,9 +2580,9 @@ static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts) + if (!p) + return -EINVAL; + +- spin_lock_irqsave(&p->cgr_lock, irqflags); ++ raw_spin_lock_irqsave(&p->cgr_lock, irqflags); + ret = qm_modify_cgr(cgr, 0, opts); +- spin_unlock_irqrestore(&p->cgr_lock, irqflags); ++ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags); + put_affine_portal(); + return ret; + } +-- +2.43.0 + diff --git a/queue-6.8/sparc-vdso-fix-return-value-of-__setup-handler.patch b/queue-6.8/sparc-vdso-fix-return-value-of-__setup-handler.patch new file mode 100644 index 00000000000..8d7f6fb1cda --- /dev/null +++ b/queue-6.8/sparc-vdso-fix-return-value-of-__setup-handler.patch @@ -0,0 +1,57 @@ +From e2ec3556473786a83664731da8fe474a59395fc4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 10 Feb 2024 21:28:08 -0800 +Subject: sparc: vDSO: fix return value of __setup handler + +From: Randy Dunlap + +[ Upstream commit 5378f00c935bebb846b1fdb0e79cb76c137c56b5 ] + +__setup() handlers should return 1 to obsolete_checksetup() in +init/main.c to indicate that the boot option has been handled. +A return of 0 causes the boot option/value to be listed as an Unknown +kernel parameter and added to init's (limited) argument or environment +strings. Also, error return codes don't mean anything to +obsolete_checksetup() -- only non-zero (usually 1) or zero. +So return 1 from vdso_setup(). + +Fixes: 9a08862a5d2e ("vDSO for sparc") +Signed-off-by: Randy Dunlap +Reported-by: Igor Zhbanov +Link: lore.kernel.org/r/64644a2f-4a20-bab3-1e15-3b2cdd0defe3@omprussia.ru +Cc: "David S. Miller" +Cc: sparclinux@vger.kernel.org +Cc: Dan Carpenter +Cc: Nick Alcock +Cc: Sam Ravnborg +Cc: Andrew Morton +Cc: stable@vger.kernel.org +Cc: Arnd Bergmann +Cc: Andreas Larsson +Signed-off-by: Andreas Larsson +Link: https://lore.kernel.org/r/20240211052808.22635-1-rdunlap@infradead.org +Signed-off-by: Sasha Levin +--- + arch/sparc/vdso/vma.c | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c +index 136c78f28f8ba..1bbf4335de454 100644 +--- a/arch/sparc/vdso/vma.c ++++ b/arch/sparc/vdso/vma.c +@@ -449,9 +449,8 @@ static __init int vdso_setup(char *s) + unsigned long val; + + err = kstrtoul(s, 10, &val); +- if (err) +- return err; +- vdso_enabled = val; +- return 0; ++ if (!err) ++ vdso_enabled = val; ++ return 1; + } + __setup("vdso=", vdso_setup); +-- +2.43.0 + diff --git a/queue-6.8/sparc32-fix-parport-build-with-sparc32.patch b/queue-6.8/sparc32-fix-parport-build-with-sparc32.patch new file mode 100644 index 00000000000..73851344278 --- /dev/null +++ b/queue-6.8/sparc32-fix-parport-build-with-sparc32.patch @@ -0,0 +1,572 @@ +From 2c4842d42cad823e3927c9a54d8a7c30158d27d2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 24 Feb 2024 18:42:27 +0100 +Subject: sparc32: Fix parport build with sparc32 + +From: Sam Ravnborg + +[ Upstream commit 91d3ff922c346d6d8cb8de5ff8d504fe0ca9e17e ] + +include/asm/parport.h is sparc64 specific. +Rename it to parport_64.h and use the generic version for sparc32. + +This fixed all{mod,yes}config build errors like: + +parport_pc.c:(.text):undefined-reference-to-ebus_dma_enable +parport_pc.c:(.text):undefined-reference-to-ebus_dma_irq_enable +parport_pc.c:(.text):undefined-reference-to-ebus_dma_register + +The errors occur as the sparc32 build references sparc64 symbols. + +Signed-off-by: Sam Ravnborg +Cc: "David S. Miller" +Cc: Andreas Larsson +Cc: Randy Dunlap +Cc: Maciej W. Rozycki +Closes: https://lore.kernel.org/r/20230406160548.25721-1-rdunlap@infradead.org/ +Fixes: 66bcd06099bb ("parport_pc: Also enable driver for PCI systems") +Cc: stable@vger.kernel.org # v5.18+ +Tested-by: Randy Dunlap # build-tested +Reviewed-by: Andreas Larsson +Signed-off-by: Andreas Larsson +Link: https://lore.kernel.org/r/20240224-sam-fix-sparc32-all-builds-v2-6-1f186603c5c4@ravnborg.org +Signed-off-by: Sasha Levin +--- + arch/sparc/include/asm/parport.h | 259 +--------------------------- + arch/sparc/include/asm/parport_64.h | 256 +++++++++++++++++++++++++++ + 2 files changed, 263 insertions(+), 252 deletions(-) + create mode 100644 arch/sparc/include/asm/parport_64.h + +diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h +index 0a7ffcfd59cda..e2eed8f97665f 100644 +--- a/arch/sparc/include/asm/parport.h ++++ b/arch/sparc/include/asm/parport.h +@@ -1,256 +1,11 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* parport.h: sparc64 specific parport initialization and dma. +- * +- * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be) +- */ ++#ifndef ___ASM_SPARC_PARPORT_H ++#define ___ASM_SPARC_PARPORT_H + +-#ifndef _ASM_SPARC64_PARPORT_H +-#define _ASM_SPARC64_PARPORT_H 1 +- +-#include +-#include +- +-#include +-#include +-#include +- +-#define PARPORT_PC_MAX_PORTS PARPORT_MAX +- +-/* +- * While sparc64 doesn't have an ISA DMA API, we provide something that looks +- * close enough to make parport_pc happy +- */ +-#define HAS_DMA +- +-#ifdef CONFIG_PARPORT_PC_FIFO +-static DEFINE_SPINLOCK(dma_spin_lock); +- +-#define claim_dma_lock() \ +-({ unsigned long flags; \ +- spin_lock_irqsave(&dma_spin_lock, flags); \ +- flags; \ +-}) +- +-#define release_dma_lock(__flags) \ +- spin_unlock_irqrestore(&dma_spin_lock, __flags); ++#if defined(__sparc__) && defined(__arch64__) ++#include ++#else ++#include ++#endif + #endif + +-static struct sparc_ebus_info { +- struct ebus_dma_info info; +- unsigned int addr; +- unsigned int count; +- int lock; +- +- struct parport *port; +-} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS]; +- +-static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS); +- +-static inline int request_dma(unsigned int dmanr, const char *device_id) +-{ +- if (dmanr >= PARPORT_PC_MAX_PORTS) +- return -EINVAL; +- if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0) +- return -EBUSY; +- return 0; +-} +- +-static inline void free_dma(unsigned int dmanr) +-{ +- if (dmanr >= PARPORT_PC_MAX_PORTS) { +- printk(KERN_WARNING "Trying to free DMA%d\n", dmanr); +- return; +- } +- if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) { +- printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr); +- return; +- } +-} +- +-static inline void enable_dma(unsigned int dmanr) +-{ +- ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1); +- +- if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info, +- sparc_ebus_dmas[dmanr].addr, +- sparc_ebus_dmas[dmanr].count)) +- BUG(); +-} +- +-static inline void disable_dma(unsigned int dmanr) +-{ +- ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0); +-} +- +-static inline void clear_dma_ff(unsigned int dmanr) +-{ +- /* nothing */ +-} +- +-static inline void set_dma_mode(unsigned int dmanr, char mode) +-{ +- ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE)); +-} +- +-static inline void set_dma_addr(unsigned int dmanr, unsigned int addr) +-{ +- sparc_ebus_dmas[dmanr].addr = addr; +-} +- +-static inline void set_dma_count(unsigned int dmanr, unsigned int count) +-{ +- sparc_ebus_dmas[dmanr].count = count; +-} +- +-static inline unsigned int get_dma_residue(unsigned int dmanr) +-{ +- return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info); +-} +- +-static int ecpp_probe(struct platform_device *op) +-{ +- unsigned long base = op->resource[0].start; +- unsigned long config = op->resource[1].start; +- unsigned long d_base = op->resource[2].start; +- unsigned long d_len; +- struct device_node *parent; +- struct parport *p; +- int slot, err; +- +- parent = op->dev.of_node->parent; +- if (of_node_name_eq(parent, "dma")) { +- p = parport_pc_probe_port(base, base + 0x400, +- op->archdata.irqs[0], PARPORT_DMA_NOFIFO, +- op->dev.parent->parent, 0); +- if (!p) +- return -ENOMEM; +- dev_set_drvdata(&op->dev, p); +- return 0; +- } +- +- for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) { +- if (!test_and_set_bit(slot, dma_slot_map)) +- break; +- } +- err = -ENODEV; +- if (slot >= PARPORT_PC_MAX_PORTS) +- goto out_err; +- +- spin_lock_init(&sparc_ebus_dmas[slot].info.lock); +- +- d_len = (op->resource[2].end - d_base) + 1UL; +- sparc_ebus_dmas[slot].info.regs = +- of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA"); +- +- if (!sparc_ebus_dmas[slot].info.regs) +- goto out_clear_map; +- +- sparc_ebus_dmas[slot].info.flags = 0; +- sparc_ebus_dmas[slot].info.callback = NULL; +- sparc_ebus_dmas[slot].info.client_cookie = NULL; +- sparc_ebus_dmas[slot].info.irq = 0xdeadbeef; +- strcpy(sparc_ebus_dmas[slot].info.name, "parport"); +- if (ebus_dma_register(&sparc_ebus_dmas[slot].info)) +- goto out_unmap_regs; +- +- ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1); +- +- /* Configure IRQ to Push Pull, Level Low */ +- /* Enable ECP, set bit 2 of the CTR first */ +- outb(0x04, base + 0x02); +- ns87303_modify(config, PCR, +- PCR_EPP_ENABLE | +- PCR_IRQ_ODRAIN, +- PCR_ECP_ENABLE | +- PCR_ECP_CLK_ENA | +- PCR_IRQ_POLAR); +- +- /* CTR bit 5 controls direction of port */ +- ns87303_modify(config, PTR, +- 0, PTR_LPT_REG_DIR); +- +- p = parport_pc_probe_port(base, base + 0x400, +- op->archdata.irqs[0], +- slot, +- op->dev.parent, +- 0); +- err = -ENOMEM; +- if (!p) +- goto out_disable_irq; +- +- dev_set_drvdata(&op->dev, p); +- +- return 0; +- +-out_disable_irq: +- ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0); +- ebus_dma_unregister(&sparc_ebus_dmas[slot].info); +- +-out_unmap_regs: +- of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len); +- +-out_clear_map: +- clear_bit(slot, dma_slot_map); +- +-out_err: +- return err; +-} +- +-static int ecpp_remove(struct platform_device *op) +-{ +- struct parport *p = dev_get_drvdata(&op->dev); +- int slot = p->dma; +- +- parport_pc_unregister_port(p); +- +- if (slot != PARPORT_DMA_NOFIFO) { +- unsigned long d_base = op->resource[2].start; +- unsigned long d_len; +- +- d_len = (op->resource[2].end - d_base) + 1UL; +- +- ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0); +- ebus_dma_unregister(&sparc_ebus_dmas[slot].info); +- of_iounmap(&op->resource[2], +- sparc_ebus_dmas[slot].info.regs, +- d_len); +- clear_bit(slot, dma_slot_map); +- } +- +- return 0; +-} +- +-static const struct of_device_id ecpp_match[] = { +- { +- .name = "ecpp", +- }, +- { +- .name = "parallel", +- .compatible = "ecpp", +- }, +- { +- .name = "parallel", +- .compatible = "ns87317-ecpp", +- }, +- { +- .name = "parallel", +- .compatible = "pnpALI,1533,3", +- }, +- {}, +-}; +- +-static struct platform_driver ecpp_driver = { +- .driver = { +- .name = "ecpp", +- .of_match_table = ecpp_match, +- }, +- .probe = ecpp_probe, +- .remove = ecpp_remove, +-}; +- +-static int parport_pc_find_nonpci_ports(int autoirq, int autodma) +-{ +- return platform_driver_register(&ecpp_driver); +-} +- +-#endif /* !(_ASM_SPARC64_PARPORT_H */ +diff --git a/arch/sparc/include/asm/parport_64.h b/arch/sparc/include/asm/parport_64.h +new file mode 100644 +index 0000000000000..0a7ffcfd59cda +--- /dev/null ++++ b/arch/sparc/include/asm/parport_64.h +@@ -0,0 +1,256 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* parport.h: sparc64 specific parport initialization and dma. ++ * ++ * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be) ++ */ ++ ++#ifndef _ASM_SPARC64_PARPORT_H ++#define _ASM_SPARC64_PARPORT_H 1 ++ ++#include ++#include ++ ++#include ++#include ++#include ++ ++#define PARPORT_PC_MAX_PORTS PARPORT_MAX ++ ++/* ++ * While sparc64 doesn't have an ISA DMA API, we provide something that looks ++ * close enough to make parport_pc happy ++ */ ++#define HAS_DMA ++ ++#ifdef CONFIG_PARPORT_PC_FIFO ++static DEFINE_SPINLOCK(dma_spin_lock); ++ ++#define claim_dma_lock() \ ++({ unsigned long flags; \ ++ spin_lock_irqsave(&dma_spin_lock, flags); \ ++ flags; \ ++}) ++ ++#define release_dma_lock(__flags) \ ++ spin_unlock_irqrestore(&dma_spin_lock, __flags); ++#endif ++ ++static struct sparc_ebus_info { ++ struct ebus_dma_info info; ++ unsigned int addr; ++ unsigned int count; ++ int lock; ++ ++ struct parport *port; ++} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS]; ++ ++static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS); ++ ++static inline int request_dma(unsigned int dmanr, const char *device_id) ++{ ++ if (dmanr >= PARPORT_PC_MAX_PORTS) ++ return -EINVAL; ++ if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0) ++ return -EBUSY; ++ return 0; ++} ++ ++static inline void free_dma(unsigned int dmanr) ++{ ++ if (dmanr >= PARPORT_PC_MAX_PORTS) { ++ printk(KERN_WARNING "Trying to free DMA%d\n", dmanr); ++ return; ++ } ++ if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) { ++ printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr); ++ return; ++ } ++} ++ ++static inline void enable_dma(unsigned int dmanr) ++{ ++ ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1); ++ ++ if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info, ++ sparc_ebus_dmas[dmanr].addr, ++ sparc_ebus_dmas[dmanr].count)) ++ BUG(); ++} ++ ++static inline void disable_dma(unsigned int dmanr) ++{ ++ ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0); ++} ++ ++static inline void clear_dma_ff(unsigned int dmanr) ++{ ++ /* nothing */ ++} ++ ++static inline void set_dma_mode(unsigned int dmanr, char mode) ++{ ++ ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE)); ++} ++ ++static inline void set_dma_addr(unsigned int dmanr, unsigned int addr) ++{ ++ sparc_ebus_dmas[dmanr].addr = addr; ++} ++ ++static inline void set_dma_count(unsigned int dmanr, unsigned int count) ++{ ++ sparc_ebus_dmas[dmanr].count = count; ++} ++ ++static inline unsigned int get_dma_residue(unsigned int dmanr) ++{ ++ return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info); ++} ++ ++static int ecpp_probe(struct platform_device *op) ++{ ++ unsigned long base = op->resource[0].start; ++ unsigned long config = op->resource[1].start; ++ unsigned long d_base = op->resource[2].start; ++ unsigned long d_len; ++ struct device_node *parent; ++ struct parport *p; ++ int slot, err; ++ ++ parent = op->dev.of_node->parent; ++ if (of_node_name_eq(parent, "dma")) { ++ p = parport_pc_probe_port(base, base + 0x400, ++ op->archdata.irqs[0], PARPORT_DMA_NOFIFO, ++ op->dev.parent->parent, 0); ++ if (!p) ++ return -ENOMEM; ++ dev_set_drvdata(&op->dev, p); ++ return 0; ++ } ++ ++ for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) { ++ if (!test_and_set_bit(slot, dma_slot_map)) ++ break; ++ } ++ err = -ENODEV; ++ if (slot >= PARPORT_PC_MAX_PORTS) ++ goto out_err; ++ ++ spin_lock_init(&sparc_ebus_dmas[slot].info.lock); ++ ++ d_len = (op->resource[2].end - d_base) + 1UL; ++ sparc_ebus_dmas[slot].info.regs = ++ of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA"); ++ ++ if (!sparc_ebus_dmas[slot].info.regs) ++ goto out_clear_map; ++ ++ sparc_ebus_dmas[slot].info.flags = 0; ++ sparc_ebus_dmas[slot].info.callback = NULL; ++ sparc_ebus_dmas[slot].info.client_cookie = NULL; ++ sparc_ebus_dmas[slot].info.irq = 0xdeadbeef; ++ strcpy(sparc_ebus_dmas[slot].info.name, "parport"); ++ if (ebus_dma_register(&sparc_ebus_dmas[slot].info)) ++ goto out_unmap_regs; ++ ++ ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1); ++ ++ /* Configure IRQ to Push Pull, Level Low */ ++ /* Enable ECP, set bit 2 of the CTR first */ ++ outb(0x04, base + 0x02); ++ ns87303_modify(config, PCR, ++ PCR_EPP_ENABLE | ++ PCR_IRQ_ODRAIN, ++ PCR_ECP_ENABLE | ++ PCR_ECP_CLK_ENA | ++ PCR_IRQ_POLAR); ++ ++ /* CTR bit 5 controls direction of port */ ++ ns87303_modify(config, PTR, ++ 0, PTR_LPT_REG_DIR); ++ ++ p = parport_pc_probe_port(base, base + 0x400, ++ op->archdata.irqs[0], ++ slot, ++ op->dev.parent, ++ 0); ++ err = -ENOMEM; ++ if (!p) ++ goto out_disable_irq; ++ ++ dev_set_drvdata(&op->dev, p); ++ ++ return 0; ++ ++out_disable_irq: ++ ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0); ++ ebus_dma_unregister(&sparc_ebus_dmas[slot].info); ++ ++out_unmap_regs: ++ of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len); ++ ++out_clear_map: ++ clear_bit(slot, dma_slot_map); ++ ++out_err: ++ return err; ++} ++ ++static int ecpp_remove(struct platform_device *op) ++{ ++ struct parport *p = dev_get_drvdata(&op->dev); ++ int slot = p->dma; ++ ++ parport_pc_unregister_port(p); ++ ++ if (slot != PARPORT_DMA_NOFIFO) { ++ unsigned long d_base = op->resource[2].start; ++ unsigned long d_len; ++ ++ d_len = (op->resource[2].end - d_base) + 1UL; ++ ++ ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0); ++ ebus_dma_unregister(&sparc_ebus_dmas[slot].info); ++ of_iounmap(&op->resource[2], ++ sparc_ebus_dmas[slot].info.regs, ++ d_len); ++ clear_bit(slot, dma_slot_map); ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id ecpp_match[] = { ++ { ++ .name = "ecpp", ++ }, ++ { ++ .name = "parallel", ++ .compatible = "ecpp", ++ }, ++ { ++ .name = "parallel", ++ .compatible = "ns87317-ecpp", ++ }, ++ { ++ .name = "parallel", ++ .compatible = "pnpALI,1533,3", ++ }, ++ {}, ++}; ++ ++static struct platform_driver ecpp_driver = { ++ .driver = { ++ .name = "ecpp", ++ .of_match_table = ecpp_match, ++ }, ++ .probe = ecpp_probe, ++ .remove = ecpp_remove, ++}; ++ ++static int parport_pc_find_nonpci_ports(int autoirq, int autodma) ++{ ++ return platform_driver_register(&ecpp_driver); ++} ++ ++#endif /* !(_ASM_SPARC64_PARPORT_H */ +-- +2.43.0 + diff --git a/queue-6.8/sparc64-nmi-watchdog-fix-return-value-of-__setup-han.patch b/queue-6.8/sparc64-nmi-watchdog-fix-return-value-of-__setup-han.patch new file mode 100644 index 00000000000..376edc28b80 --- /dev/null +++ b/queue-6.8/sparc64-nmi-watchdog-fix-return-value-of-__setup-han.patch @@ -0,0 +1,51 @@ +From eb7ef149ac9138c51076e3f1fd4974231d9c58e8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 10 Feb 2024 21:28:02 -0800 +Subject: sparc64: NMI watchdog: fix return value of __setup handler + +From: Randy Dunlap + +[ Upstream commit 3ed7c61e49d65dacb96db798c0ab6fcd55a1f20f ] + +__setup() handlers should return 1 to obsolete_checksetup() in +init/main.c to indicate that the boot option has been handled. +A return of 0 causes the boot option/value to be listed as an Unknown +kernel parameter and added to init's (limited) argument or environment +strings. Also, error return codes don't mean anything to +obsolete_checksetup() -- only non-zero (usually 1) or zero. +So return 1 from setup_nmi_watchdog(). + +Fixes: e5553a6d0442 ("sparc64: Implement NMI watchdog on capable cpus.") +Signed-off-by: Randy Dunlap +Reported-by: Igor Zhbanov +Link: lore.kernel.org/r/64644a2f-4a20-bab3-1e15-3b2cdd0defe3@omprussia.ru +Cc: "David S. Miller" +Cc: sparclinux@vger.kernel.org +Cc: Sam Ravnborg +Cc: Andrew Morton +Cc: stable@vger.kernel.org +Cc: Arnd Bergmann +Cc: Andreas Larsson +Signed-off-by: Andreas Larsson +Link: https://lore.kernel.org/r/20240211052802.22612-1-rdunlap@infradead.org +Signed-off-by: Sasha Levin +--- + arch/sparc/kernel/nmi.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c +index 17cdfdbf1f3b7..149adc0947530 100644 +--- a/arch/sparc/kernel/nmi.c ++++ b/arch/sparc/kernel/nmi.c +@@ -279,7 +279,7 @@ static int __init setup_nmi_watchdog(char *str) + if (!strncmp(str, "panic", 5)) + panic_on_timeout = 1; + +- return 0; ++ return 1; + } + __setup("nmi_watchdog=", setup_nmi_watchdog); + +-- +2.43.0 + diff --git a/queue-6.8/speakup-fix-8bit-characters-from-direct-synth.patch b/queue-6.8/speakup-fix-8bit-characters-from-direct-synth.patch new file mode 100644 index 00000000000..f516ba60d4b --- /dev/null +++ b/queue-6.8/speakup-fix-8bit-characters-from-direct-synth.patch @@ -0,0 +1,49 @@ +From 6db1db8eb5bcd91e0f4619536bb502589ff31e15 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 4 Feb 2024 16:57:36 +0100 +Subject: speakup: Fix 8bit characters from direct synth + +From: Samuel Thibault + +[ Upstream commit b6c8dafc9d86eb77e502bb018ec4105e8d2fbf78 ] + +When userland echoes 8bit characters to /dev/synth with e.g. + +echo -e '\xe9' > /dev/synth + +synth_write would get characters beyond 0x7f, and thus negative when +char is signed. When given to synth_buffer_add which takes a u16, this +would sign-extend and produce a U+ffxy character rather than U+xy. +Users thus get garbled text instead of accents in their output. + +Let's fix this by making sure that we read unsigned characters. + +Signed-off-by: Samuel Thibault +Fixes: 89fc2ae80bb1 ("speakup: extend synth buffer to 16bit unicode characters") +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20240204155736.2oh4ot7tiaa2wpbh@begin +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/accessibility/speakup/synth.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c +index eea2a2fa4f015..45f9061031338 100644 +--- a/drivers/accessibility/speakup/synth.c ++++ b/drivers/accessibility/speakup/synth.c +@@ -208,8 +208,10 @@ void spk_do_flush(void) + wake_up_process(speakup_task); + } + +-void synth_write(const char *buf, size_t count) ++void synth_write(const char *_buf, size_t count) + { ++ const unsigned char *buf = (const unsigned char *) _buf; ++ + while (count--) + synth_buffer_add(*buf++); + synth_start(); +-- +2.43.0 + diff --git a/queue-6.8/thermal-drivers-mediatek-fix-control-buffer-enableme.patch b/queue-6.8/thermal-drivers-mediatek-fix-control-buffer-enableme.patch new file mode 100644 index 00000000000..88d616051b3 --- /dev/null +++ b/queue-6.8/thermal-drivers-mediatek-fix-control-buffer-enableme.patch @@ -0,0 +1,48 @@ +From 7aff024e9796df85fab41d22f805d57197a04331 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Sep 2023 13:20:18 +0200 +Subject: thermal/drivers/mediatek: Fix control buffer enablement on MT7896 + +From: Frank Wunderlich + +[ Upstream commit 371ed6263e2403068b359f0c07188548c2d70827 ] + +Reading thermal sensor on mt7986 devices returns invalid temperature: + +bpi-r3 ~ # cat /sys/class/thermal/thermal_zone0/temp + -274000 + +Fix this by adding missing members in mtk_thermal_data struct which were +used in mtk_thermal_turn_on_buffer after commit 33140e668b10. + +Cc: stable@vger.kernel.org +Fixes: 33140e668b10 ("thermal/drivers/mediatek: Control buffer enablement tweaks") +Signed-off-by: Frank Wunderlich +Reviewed-by: Markus Schneider-Pargmann +Reviewed-by: Daniel Golle +Tested-by: Daniel Golle +Reviewed-by: AngeloGioacchino Del Regno +Signed-off-by: Daniel Lezcano +Link: https://lore.kernel.org/r/20230907112018.52811-1-linux@fw-web.de +Signed-off-by: Sasha Levin +--- + drivers/thermal/mediatek/auxadc_thermal.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c +index 8b0edb2048443..9ee2e7283435a 100644 +--- a/drivers/thermal/mediatek/auxadc_thermal.c ++++ b/drivers/thermal/mediatek/auxadc_thermal.c +@@ -690,6 +690,9 @@ static const struct mtk_thermal_data mt7986_thermal_data = { + .adcpnp = mt7986_adcpnp, + .sensor_mux_values = mt7986_mux_values, + .version = MTK_THERMAL_V3, ++ .apmixed_buffer_ctl_reg = APMIXED_SYS_TS_CON1, ++ .apmixed_buffer_ctl_mask = GENMASK(31, 6) | BIT(3), ++ .apmixed_buffer_ctl_set = BIT(0), + }; + + static bool mtk_thermal_temp_is_valid(int temp) +-- +2.43.0 + diff --git a/queue-6.8/thermal-intel-fix-intel_tcc_get_temp-to-support-nega.patch b/queue-6.8/thermal-intel-fix-intel_tcc_get_temp-to-support-nega.patch new file mode 100644 index 00000000000..174f03df11b --- /dev/null +++ b/queue-6.8/thermal-intel-fix-intel_tcc_get_temp-to-support-nega.patch @@ -0,0 +1,126 @@ +From 8fe0a43d99885dcbed199dadf1bf0df3ae717c3d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Feb 2024 09:54:09 +0800 +Subject: thermal/intel: Fix intel_tcc_get_temp() to support negative CPU + temperature + +From: Zhang Rui + +[ Upstream commit 7251b9e8a007ddd834aa81f8c7ea338884629fec ] + +CPU temperature can be negative in some cases. Thus the negative CPU +temperature should not be considered as a failure. + +Fix intel_tcc_get_temp() and its users to support negative CPU +temperature. + +Fixes: a3c1f066e1c5 ("thermal/intel: Introduce Intel TCC library") +Signed-off-by: Zhang Rui +Reviewed-by: Stanislaw Gruszka +Cc: 6.3+ # 6.3+ +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + .../intel/int340x_thermal/processor_thermal_device.c | 8 ++++---- + drivers/thermal/intel/intel_tcc.c | 12 ++++++------ + drivers/thermal/intel/x86_pkg_temp_thermal.c | 8 ++++---- + include/linux/intel_tcc.h | 2 +- + 4 files changed, 15 insertions(+), 15 deletions(-) + +diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +index 649f67fdf3454..d75fae7b7ed22 100644 +--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c ++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +@@ -176,14 +176,14 @@ static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone, + int *temp) + { + int cpu; +- int curr_temp; ++ int curr_temp, ret; + + *temp = 0; + + for_each_online_cpu(cpu) { +- curr_temp = intel_tcc_get_temp(cpu, false); +- if (curr_temp < 0) +- return curr_temp; ++ ret = intel_tcc_get_temp(cpu, &curr_temp, false); ++ if (ret < 0) ++ return ret; + if (!*temp || curr_temp > *temp) + *temp = curr_temp; + } +diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c +index 2e5c741c41ca0..5e8b7f34b3951 100644 +--- a/drivers/thermal/intel/intel_tcc.c ++++ b/drivers/thermal/intel/intel_tcc.c +@@ -103,18 +103,19 @@ EXPORT_SYMBOL_NS_GPL(intel_tcc_set_offset, INTEL_TCC); + /** + * intel_tcc_get_temp() - returns the current temperature + * @cpu: cpu that the MSR should be run on, nagative value means any cpu. ++ * @temp: pointer to the memory for saving cpu temperature. + * @pkg: true: Package Thermal Sensor. false: Core Thermal Sensor. + * + * Get the current temperature returned by the CPU core/package level + * thermal sensor, in degrees C. + * +- * Return: Temperature in degrees C on success, negative error code otherwise. ++ * Return: 0 on success, negative error code otherwise. + */ +-int intel_tcc_get_temp(int cpu, bool pkg) ++int intel_tcc_get_temp(int cpu, int *temp, bool pkg) + { + u32 low, high; + u32 msr = pkg ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS; +- int tjmax, temp, err; ++ int tjmax, err; + + tjmax = intel_tcc_get_tjmax(cpu); + if (tjmax < 0) +@@ -131,9 +132,8 @@ int intel_tcc_get_temp(int cpu, bool pkg) + if (!(low & BIT(31))) + return -ENODATA; + +- temp = tjmax - ((low >> 16) & 0x7f); ++ *temp = tjmax - ((low >> 16) & 0x7f); + +- /* Do not allow negative CPU temperature */ +- return temp >= 0 ? temp : -ENODATA; ++ return 0; + } + EXPORT_SYMBOL_NS_GPL(intel_tcc_get_temp, INTEL_TCC); +diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c +index 11a7f8108bbbf..61c3d450ee605 100644 +--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c ++++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c +@@ -108,11 +108,11 @@ static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu) + static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) + { + struct zone_device *zonedev = thermal_zone_device_priv(tzd); +- int val; ++ int val, ret; + +- val = intel_tcc_get_temp(zonedev->cpu, true); +- if (val < 0) +- return val; ++ ret = intel_tcc_get_temp(zonedev->cpu, &val, true); ++ if (ret < 0) ++ return ret; + + *temp = val * 1000; + pr_debug("sys_get_curr_temp %d\n", *temp); +diff --git a/include/linux/intel_tcc.h b/include/linux/intel_tcc.h +index f422612c28d6b..8ff8eabb4a987 100644 +--- a/include/linux/intel_tcc.h ++++ b/include/linux/intel_tcc.h +@@ -13,6 +13,6 @@ + int intel_tcc_get_tjmax(int cpu); + int intel_tcc_get_offset(int cpu); + int intel_tcc_set_offset(int cpu, int offset); +-int intel_tcc_get_temp(int cpu, bool pkg); ++int intel_tcc_get_temp(int cpu, int *temp, bool pkg); + + #endif /* __INTEL_TCC_H__ */ +-- +2.43.0 + diff --git a/queue-6.8/tpm-tpm_tis-avoid-warning-splat-at-shutdown.patch b/queue-6.8/tpm-tpm_tis-avoid-warning-splat-at-shutdown.patch new file mode 100644 index 00000000000..7efbb98fa65 --- /dev/null +++ b/queue-6.8/tpm-tpm_tis-avoid-warning-splat-at-shutdown.patch @@ -0,0 +1,50 @@ +From b97f0aff52e28f973595090773c4f3afc158dcea Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 1 Feb 2024 12:36:45 +0100 +Subject: tpm,tpm_tis: Avoid warning splat at shutdown + +From: Lino Sanfilippo + +[ Upstream commit b7ab4bbd0188f3985b821fa09456b11105a8dedf ] + +If interrupts are not activated the work struct 'free_irq_work' is not +initialized. This results in a warning splat at module shutdown. + +Fix this by always initializing the work regardless of whether interrupts +are activated or not. + +cc: stable@vger.kernel.org +Fixes: 481c2d14627d ("tpm,tpm_tis: Disable interrupts after 1000 unhandled IRQs") +Reported-by: Jarkko Sakkinen +Closes: https://lore.kernel.org/all/CX32RFOMJUQ0.3R4YCL9MDCB96@kernel.org/ +Signed-off-by: Lino Sanfilippo +Signed-off-by: Jarkko Sakkinen +Signed-off-by: Sasha Levin +--- + drivers/char/tpm/tpm_tis_core.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c +index 1b350412d8a6b..64c875657687d 100644 +--- a/drivers/char/tpm/tpm_tis_core.c ++++ b/drivers/char/tpm/tpm_tis_core.c +@@ -919,8 +919,6 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask, + int rc; + u32 int_status; + +- INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func); +- + rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL, + tis_int_handler, IRQF_ONESHOT | flags, + dev_name(&chip->dev), chip); +@@ -1132,6 +1130,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, + priv->phy_ops = phy_ops; + priv->locality_count = 0; + mutex_init(&priv->locality_count_mutex); ++ INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func); + + dev_set_drvdata(&chip->dev, priv); + +-- +2.43.0 + diff --git a/queue-6.8/tracing-ring-buffer-fix-wait_on_pipe-race.patch b/queue-6.8/tracing-ring-buffer-fix-wait_on_pipe-race.patch new file mode 100644 index 00000000000..684bb6765f5 --- /dev/null +++ b/queue-6.8/tracing-ring-buffer-fix-wait_on_pipe-race.patch @@ -0,0 +1,257 @@ +From e48e1cf76d3cddb2e8bfd4db8361bf32e8349260 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 12 Mar 2024 08:15:08 -0400 +Subject: tracing/ring-buffer: Fix wait_on_pipe() race + +From: Steven Rostedt (Google) + +[ Upstream commit 2aa043a55b9a764c9cbde5a8c654eeaaffe224cf ] + +When the trace_pipe_raw file is closed, there should be no new readers on +the file descriptor. This is mostly handled with the waking and wait_index +fields of the iterator. But there's still a slight race. + + CPU 0 CPU 1 + ----- ----- + wait_index++; + index = wait_index; + ring_buffer_wake_waiters(); + wait_on_pipe() + ring_buffer_wait(); + +The ring_buffer_wait() will miss the wakeup from CPU 1. The problem is +that the ring_buffer_wait() needs the logic of: + + prepare_to_wait(); + if (!condition) + schedule(); + +Where the missing condition check is the iter->wait_index update. + +Have the ring_buffer_wait() take a conditional callback function and a +data parameter that can be used within the wait_event_interruptible() of +the ring_buffer_wait() function. + +In wait_on_pipe(), pass a condition function that will check if the +wait_index has been updated, if it has, it will return true to break out +of the wait_event_interruptible() loop. + +Create a new field "closed" in the trace_iterator and set it in the +.flush() callback before calling ring_buffer_wake_waiters(). +This will keep any new readers from waiting on a closed file descriptor. + +Have the wait_on_pipe() condition callback also check the closed field. + +Change the wait_index field of the trace_iterator to atomic_t. There's no +reason it needs to be 'long' and making it atomic and using +atomic_read_acquire() and atomic_fetch_inc_release() will provide the +necessary memory barriers. + +Add a "woken" flag to tracing_buffers_splice_read() to exit the loop after +one more try to fetch data. That is, if it waited for data and something +woke it up, it should try to collect any new data and then exit back to +user space. + +Link: https://lore.kernel.org/linux-trace-kernel/CAHk-=wgsNgewHFxZAJiAQznwPMqEtQmi1waeS2O1v6L4c_Um5A@mail.gmail.com/ +Link: https://lore.kernel.org/linux-trace-kernel/20240312121703.557950713@goodmis.org + +Cc: stable@vger.kernel.org +Cc: Masami Hiramatsu +Cc: Mark Rutland +Cc: Mathieu Desnoyers +Cc: Andrew Morton +Cc: Linus Torvalds +Cc: linke li +Cc: Rabin Vincent +Fixes: f3ddb74ad0790 ("tracing: Wake up ring buffer waiters on closing of the file") +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Sasha Levin +--- + include/linux/ring_buffer.h | 3 ++- + include/linux/trace_events.h | 5 ++++- + kernel/trace/ring_buffer.c | 13 ++++++----- + kernel/trace/trace.c | 43 ++++++++++++++++++++++++++---------- + 4 files changed, 45 insertions(+), 19 deletions(-) + +diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h +index 338a33db1577e..dc5ae4e96aee0 100644 +--- a/include/linux/ring_buffer.h ++++ b/include/linux/ring_buffer.h +@@ -99,7 +99,8 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k + }) + + typedef bool (*ring_buffer_cond_fn)(void *data); +-int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full); ++int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full, ++ ring_buffer_cond_fn cond, void *data); + __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, + struct file *filp, poll_table *poll_table, int full); + void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu); +diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h +index d68ff9b1247f9..fc6d0af56bb17 100644 +--- a/include/linux/trace_events.h ++++ b/include/linux/trace_events.h +@@ -103,13 +103,16 @@ struct trace_iterator { + unsigned int temp_size; + char *fmt; /* modified format holder */ + unsigned int fmt_size; +- long wait_index; ++ atomic_t wait_index; + + /* trace_seq for __print_flags() and __print_symbolic() etc. */ + struct trace_seq tmp_seq; + + cpumask_var_t started; + ++ /* Set when the file is closed to prevent new waiters */ ++ bool closed; ++ + /* it's true when current open file is snapshot */ + bool snapshot; + +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index a75b644bdd351..ad0d475d1f570 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -902,23 +902,26 @@ static bool rb_wait_once(void *data) + * @buffer: buffer to wait on + * @cpu: the cpu buffer to wait on + * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS ++ * @cond: condition function to break out of wait (NULL to run once) ++ * @data: the data to pass to @cond. + * + * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon + * as data is added to any of the @buffer's cpu buffers. Otherwise + * it will wait for data to be added to a specific cpu buffer. + */ +-int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) ++int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full, ++ ring_buffer_cond_fn cond, void *data) + { + struct ring_buffer_per_cpu *cpu_buffer; + struct wait_queue_head *waitq; +- ring_buffer_cond_fn cond; + struct rb_irq_work *rbwork; +- void *data; + long once = 0; + int ret = 0; + +- cond = rb_wait_once; +- data = &once; ++ if (!cond) { ++ cond = rb_wait_once; ++ data = &once; ++ } + + /* + * Depending on what the caller is waiting for, either any +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index c9c8983073485..d390fea3a6a52 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1955,15 +1955,36 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) + + #endif /* CONFIG_TRACER_MAX_TRACE */ + ++struct pipe_wait { ++ struct trace_iterator *iter; ++ int wait_index; ++}; ++ ++static bool wait_pipe_cond(void *data) ++{ ++ struct pipe_wait *pwait = data; ++ struct trace_iterator *iter = pwait->iter; ++ ++ if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index) ++ return true; ++ ++ return iter->closed; ++} ++ + static int wait_on_pipe(struct trace_iterator *iter, int full) + { ++ struct pipe_wait pwait; + int ret; + + /* Iterators are static, they should be filled or empty */ + if (trace_buffer_iter(iter, iter->cpu_file)) + return 0; + +- ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full); ++ pwait.wait_index = atomic_read_acquire(&iter->wait_index); ++ pwait.iter = iter; ++ ++ ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full, ++ wait_pipe_cond, &pwait); + + #ifdef CONFIG_TRACER_MAX_TRACE + /* +@@ -8398,9 +8419,9 @@ static int tracing_buffers_flush(struct file *file, fl_owner_t id) + struct ftrace_buffer_info *info = file->private_data; + struct trace_iterator *iter = &info->iter; + +- iter->wait_index++; ++ iter->closed = true; + /* Make sure the waiters see the new wait_index */ +- smp_wmb(); ++ (void)atomic_fetch_inc_release(&iter->wait_index); + + ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); + +@@ -8500,6 +8521,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, + .spd_release = buffer_spd_release, + }; + struct buffer_ref *ref; ++ bool woken = false; + int page_size; + int entries, i; + ssize_t ret = 0; +@@ -8573,17 +8595,17 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, + + /* did we read anything? */ + if (!spd.nr_pages) { +- long wait_index; + + if (ret) + goto out; + ++ if (woken) ++ goto out; ++ + ret = -EAGAIN; + if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) + goto out; + +- wait_index = READ_ONCE(iter->wait_index); +- + ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent); + if (ret) + goto out; +@@ -8592,10 +8614,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, + if (!tracer_tracing_is_on(iter->tr)) + goto out; + +- /* Make sure we see the new wait_index */ +- smp_rmb(); +- if (wait_index != iter->wait_index) +- goto out; ++ /* Iterate one more time to collect any new data then exit */ ++ woken = true; + + goto again; + } +@@ -8618,9 +8638,8 @@ static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned + + mutex_lock(&trace_types_lock); + +- iter->wait_index++; + /* Make sure the waiters see the new wait_index */ +- smp_wmb(); ++ (void)atomic_fetch_inc_release(&iter->wait_index); + + ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); + +-- +2.43.0 + diff --git a/queue-6.8/ubi-check-for-too-small-leb-size-in-vtbl-code.patch b/queue-6.8/ubi-check-for-too-small-leb-size-in-vtbl-code.patch new file mode 100644 index 00000000000..2e13ed5be21 --- /dev/null +++ b/queue-6.8/ubi-check-for-too-small-leb-size-in-vtbl-code.patch @@ -0,0 +1,45 @@ +From ce6765ce8eca037aff6dab20b69c598ac51c8d56 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 24 Jan 2024 07:37:02 +0100 +Subject: ubi: Check for too small LEB size in VTBL code + +From: Richard Weinberger + +[ Upstream commit 68a24aba7c593eafa8fd00f2f76407b9b32b47a9 ] + +If the LEB size is smaller than a volume table record we cannot +have volumes. +In this case abort attaching. + +Cc: Chenyuan Yang +Cc: stable@vger.kernel.org +Fixes: 801c135ce73d ("UBI: Unsorted Block Images") +Reported-by: Chenyuan Yang +Closes: https://lore.kernel.org/linux-mtd/1433EB7A-FC89-47D6-8F47-23BE41B263B3@illinois.edu/ +Signed-off-by: Richard Weinberger +Reviewed-by: Zhihao Cheng +Signed-off-by: Sasha Levin +--- + drivers/mtd/ubi/vtbl.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c +index f700f0e4f2ec4..6e5489e233dd2 100644 +--- a/drivers/mtd/ubi/vtbl.c ++++ b/drivers/mtd/ubi/vtbl.c +@@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai) + * The number of supported volumes is limited by the eraseblock size + * and by the UBI_MAX_VOLUMES constant. + */ ++ ++ if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) { ++ ubi_err(ubi, "LEB size too small for a volume record"); ++ return -EINVAL; ++ } ++ + ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE; + if (ubi->vtbl_slots > UBI_MAX_VOLUMES) + ubi->vtbl_slots = UBI_MAX_VOLUMES; +-- +2.43.0 + diff --git a/queue-6.8/ubi-correct-the-calculation-of-fastmap-size.patch b/queue-6.8/ubi-correct-the-calculation-of-fastmap-size.patch new file mode 100644 index 00000000000..7df9733503f --- /dev/null +++ b/queue-6.8/ubi-correct-the-calculation-of-fastmap-size.patch @@ -0,0 +1,43 @@ +From 0b5616302e136d37f74930e78b72d113297c23cd Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 20 Feb 2024 10:49:03 +0800 +Subject: ubi: correct the calculation of fastmap size + +From: Zhang Yi + +[ Upstream commit 7f174ae4f39e8475adcc09d26c5a43394689ad6c ] + +Now that the calculation of fastmap size in ubi_calc_fm_size() is +incorrect since it miss each user volume's ubi_fm_eba structure and the +Internal UBI volume info. Let's correct the calculation. + +Cc: stable@vger.kernel.org +Signed-off-by: Zhang Yi +Reviewed-by: Zhihao Cheng +Signed-off-by: Richard Weinberger +Signed-off-by: Sasha Levin +--- + drivers/mtd/ubi/fastmap.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c +index 2a728c31e6b85..9a4940874be5b 100644 +--- a/drivers/mtd/ubi/fastmap.c ++++ b/drivers/mtd/ubi/fastmap.c +@@ -85,9 +85,10 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi) + sizeof(struct ubi_fm_scan_pool) + + sizeof(struct ubi_fm_scan_pool) + + (ubi->peb_count * sizeof(struct ubi_fm_ec)) + +- (sizeof(struct ubi_fm_eba) + +- (ubi->peb_count * sizeof(__be32))) + +- sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; ++ ((sizeof(struct ubi_fm_eba) + ++ sizeof(struct ubi_fm_volhdr)) * ++ (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) + ++ (ubi->peb_count * sizeof(__be32)); + return roundup(size, ubi->leb_size); + } + +-- +2.43.0 + diff --git a/queue-6.8/ubifs-set-page-uptodate-in-the-correct-place.patch b/queue-6.8/ubifs-set-page-uptodate-in-the-correct-place.patch new file mode 100644 index 00000000000..40b393e8251 --- /dev/null +++ b/queue-6.8/ubifs-set-page-uptodate-in-the-correct-place.patch @@ -0,0 +1,74 @@ +From 3a0dcde990d79b65c09ed9c6b56d0bf1163c699d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 24 Jan 2024 17:52:44 +0000 +Subject: ubifs: Set page uptodate in the correct place + +From: Matthew Wilcox (Oracle) + +[ Upstream commit 723012cab779eee8228376754e22c6594229bf8f ] + +Page cache reads are lockless, so setting the freshly allocated page +uptodate before we've overwritten it with the data it's supposed to have +in it will allow a simultaneous reader to see old data. Move the call +to SetPageUptodate into ubifs_write_end(), which is after we copied the +new data into the page. + +Fixes: 1e51764a3c2a ("UBIFS: add new flash file system") +Cc: stable@vger.kernel.org +Signed-off-by: Matthew Wilcox (Oracle) +Reviewed-by: Zhihao Cheng +Signed-off-by: Richard Weinberger +Signed-off-by: Sasha Levin +--- + fs/ubifs/file.c | 13 ++++--------- + 1 file changed, 4 insertions(+), 9 deletions(-) + +diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c +index 5029eb3390a56..d0694b83dd02c 100644 +--- a/fs/ubifs/file.c ++++ b/fs/ubifs/file.c +@@ -261,9 +261,6 @@ static int write_begin_slow(struct address_space *mapping, + return err; + } + } +- +- SetPageUptodate(page); +- ClearPageError(page); + } + + if (PagePrivate(page)) +@@ -463,9 +460,6 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, + return err; + } + } +- +- SetPageUptodate(page); +- ClearPageError(page); + } + + err = allocate_budget(c, page, ui, appending); +@@ -475,10 +469,8 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, + * If we skipped reading the page because we were going to + * write all of it, then it is not up to date. + */ +- if (skipped_read) { ++ if (skipped_read) + ClearPageChecked(page); +- ClearPageUptodate(page); +- } + /* + * Budgeting failed which means it would have to force + * write-back but didn't, because we set the @fast flag in the +@@ -569,6 +561,9 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, + goto out; + } + ++ if (len == PAGE_SIZE) ++ SetPageUptodate(page); ++ + if (!PagePrivate(page)) { + attach_page_private(page, (void *)1); + atomic_long_inc(&c->dirty_pg_cnt); +-- +2.43.0 + diff --git a/queue-6.8/ubifs-ubifs_symlink-fix-memleak-of-inode-i_link-in-e.patch b/queue-6.8/ubifs-ubifs_symlink-fix-memleak-of-inode-i_link-in-e.patch new file mode 100644 index 00000000000..ee63f9b205f --- /dev/null +++ b/queue-6.8/ubifs-ubifs_symlink-fix-memleak-of-inode-i_link-in-e.patch @@ -0,0 +1,61 @@ +From ffc5fd19d74dd2d2de2c3da43b1046c3449846af Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 8 Jan 2024 10:41:05 +0800 +Subject: ubifs: ubifs_symlink: Fix memleak of inode->i_link in error path + +From: Zhihao Cheng + +[ Upstream commit 6379b44cdcd67f5f5d986b73953e99700591edfa ] + +For error handling path in ubifs_symlink(), inode will be marked as +bad first, then iput() is invoked. If inode->i_link is initialized by +fscrypt_encrypt_symlink() in encryption scenario, inode->i_link won't +be freed by callchain ubifs_free_inode -> fscrypt_free_inode in error +handling path, because make_bad_inode() has changed 'inode->i_mode' as +'S_IFREG'. +Following kmemleak is easy to be reproduced by injecting error in +ubifs_jnl_update() when doing symlink in encryption scenario: + unreferenced object 0xffff888103da3d98 (size 8): + comm "ln", pid 1692, jiffies 4294914701 (age 12.045s) + backtrace: + kmemdup+0x32/0x70 + __fscrypt_encrypt_symlink+0xed/0x1c0 + ubifs_symlink+0x210/0x300 [ubifs] + vfs_symlink+0x216/0x360 + do_symlinkat+0x11a/0x190 + do_syscall_64+0x3b/0xe0 +There are two ways fixing it: + 1. Remove make_bad_inode() in error handling path. We can do that + because ubifs_evict_inode() will do same processes for good + symlink inode and bad symlink inode, for inode->i_nlink checking + is before is_bad_inode(). + 2. Free inode->i_link before marking inode bad. +Method 2 is picked, it has less influence, personally, I think. + +Cc: stable@vger.kernel.org +Fixes: 2c58d548f570 ("fscrypt: cache decrypted symlink target in ->i_link") +Signed-off-by: Zhihao Cheng +Suggested-by: Eric Biggers +Reviewed-by: Eric Biggers +Signed-off-by: Richard Weinberger +Signed-off-by: Sasha Levin +--- + fs/ubifs/dir.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c +index e413a9cf8ee38..6b3db00d9b121 100644 +--- a/fs/ubifs/dir.c ++++ b/fs/ubifs/dir.c +@@ -1134,6 +1134,8 @@ static int ubifs_mknod(struct mnt_idmap *idmap, struct inode *dir, + dir_ui->ui_size = dir->i_size; + mutex_unlock(&dir_ui->ui_mutex); + out_inode: ++ /* Free inode->i_link before inode is marked as bad. */ ++ fscrypt_free_inode(inode); + make_bad_inode(inode); + iput(inode); + out_fname: +-- +2.43.0 + diff --git a/queue-6.8/usb-dwc3-am62-disable-wakeup-at-remove.patch b/queue-6.8/usb-dwc3-am62-disable-wakeup-at-remove.patch new file mode 100644 index 00000000000..37aa4f8bfa0 --- /dev/null +++ b/queue-6.8/usb-dwc3-am62-disable-wakeup-at-remove.patch @@ -0,0 +1,40 @@ +From bb986915a54cfb639bd943cc17cf3b2d01a2f729 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 27 Feb 2024 11:23:49 +0200 +Subject: usb: dwc3-am62: Disable wakeup at remove + +From: Roger Quadros + +[ Upstream commit 4ead695e6b3cac06543d7bc7241ab75aee4ea6a6 ] + +Disable wakeup at remove. +Fixes the below warnings on module unload and reload. + +> dwc3-am62 f900000.dwc3-usb: couldn't enable device as a wakeup source: -17 +> dwc3-am62 f910000.dwc3-usb: couldn't enable device as a wakeup source: -17 + +Fixes: 4e3972b589da ("usb: dwc3-am62: Enable as a wakeup source by default") +Cc: stable@vger.kernel.org # v6.4+ +Signed-off-by: Roger Quadros +Link: https://lore.kernel.org/r/20240227-for-v6-9-am62-usb-errata-3-0-v4-2-0ada8ddb0767@kernel.org +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/usb/dwc3/dwc3-am62.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c +index f85603b7f7c5e..ea6e29091c0c9 100644 +--- a/drivers/usb/dwc3/dwc3-am62.c ++++ b/drivers/usb/dwc3/dwc3-am62.c +@@ -274,6 +274,7 @@ static void dwc3_ti_remove(struct platform_device *pdev) + u32 reg; + + pm_runtime_get_sync(dev); ++ device_init_wakeup(dev, false); + of_platform_depopulate(dev); + + /* Clear mode valid bit */ +-- +2.43.0 + diff --git a/queue-6.8/usb-dwc3-am62-fix-module-unload-reload-behavior.patch b/queue-6.8/usb-dwc3-am62-fix-module-unload-reload-behavior.patch new file mode 100644 index 00000000000..24eaadcc0d2 --- /dev/null +++ b/queue-6.8/usb-dwc3-am62-fix-module-unload-reload-behavior.patch @@ -0,0 +1,78 @@ +From 857d2125aee9ad72fd547ba125f8080fd415b618 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 27 Feb 2024 11:23:48 +0200 +Subject: usb: dwc3-am62: fix module unload/reload behavior + +From: Roger Quadros + +[ Upstream commit 6661befe41009c210efa2c1bcd16a5cc4cff8a06 ] + +As runtime PM is enabled, the module can be runtime +suspended when .remove() is called. + +Do a pm_runtime_get_sync() to make sure module is active +before doing any register operations. + +Doing a pm_runtime_put_sync() should disable the refclk +so no need to disable it again. + +Fixes the below warning at module removel. + +[ 39.705310] ------------[ cut here ]------------ +[ 39.710004] clk:162:3 already disabled +[ 39.713941] WARNING: CPU: 0 PID: 921 at drivers/clk/clk.c:1090 clk_core_disable+0xb0/0xb8 + +We called of_platform_populate() in .probe() so call the +cleanup function of_platform_depopulate() in .remove(). +Get rid of the now unnnecessary dwc3_ti_remove_core(). +Without this, module re-load doesn't work properly. + +Fixes: e8784c0aec03 ("drivers: usb: dwc3: Add AM62 USB wrapper driver") +Cc: stable@vger.kernel.org # v5.19+ +Signed-off-by: Roger Quadros +Link: https://lore.kernel.org/r/20240227-for-v6-9-am62-usb-errata-3-0-v4-1-0ada8ddb0767@kernel.org +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/usb/dwc3/dwc3-am62.c | 12 ++---------- + 1 file changed, 2 insertions(+), 10 deletions(-) + +diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c +index 90a587bc29b74..f85603b7f7c5e 100644 +--- a/drivers/usb/dwc3/dwc3-am62.c ++++ b/drivers/usb/dwc3/dwc3-am62.c +@@ -267,21 +267,14 @@ static int dwc3_ti_probe(struct platform_device *pdev) + return ret; + } + +-static int dwc3_ti_remove_core(struct device *dev, void *c) +-{ +- struct platform_device *pdev = to_platform_device(dev); +- +- platform_device_unregister(pdev); +- return 0; +-} +- + static void dwc3_ti_remove(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; + struct dwc3_am62 *am62 = platform_get_drvdata(pdev); + u32 reg; + +- device_for_each_child(dev, NULL, dwc3_ti_remove_core); ++ pm_runtime_get_sync(dev); ++ of_platform_depopulate(dev); + + /* Clear mode valid bit */ + reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL); +@@ -289,7 +282,6 @@ static void dwc3_ti_remove(struct platform_device *pdev) + dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg); + + pm_runtime_put_sync(dev); +- clk_disable_unprepare(am62->usb2_refclk); + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + } +-- +2.43.0 + diff --git a/queue-6.8/usb-gadget-tegra-xudc-fix-usb3-phy-retrieval-logic.patch b/queue-6.8/usb-gadget-tegra-xudc-fix-usb3-phy-retrieval-logic.patch new file mode 100644 index 00000000000..1e15b4138a2 --- /dev/null +++ b/queue-6.8/usb-gadget-tegra-xudc-fix-usb3-phy-retrieval-logic.patch @@ -0,0 +1,112 @@ +From 9b807619eec2b12dcc7c2dcb0217b682669bf445 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 7 Mar 2024 11:03:28 +0800 +Subject: usb: gadget: tegra-xudc: Fix USB3 PHY retrieval logic + +From: Wayne Chang + +[ Upstream commit 84fa943d93c31ee978355e6c6c69592dae3c9f59 ] + +This commit resolves an issue in the tegra-xudc USB gadget driver that +incorrectly fetched USB3 PHY instances. The problem stemmed from the +assumption of a one-to-one correspondence between USB2 and USB3 PHY +names and their association with physical USB ports in the device tree. + +Previously, the driver associated USB3 PHY names directly with the USB3 +instance number, leading to mismatches when mapping the physical USB +ports. For instance, if using USB3-1 PHY, the driver expect the +corresponding PHY name as 'usb3-1'. However, the physical USB ports in +the device tree were designated as USB2-0 and USB3-0 as we only have +one device controller, causing a misalignment. + +This commit rectifies the issue by adjusting the PHY naming logic. +Now, the driver correctly correlates the USB2 and USB3 PHY instances, +allowing the USB2-0 and USB3-1 PHYs to form a physical USB port pair +while accurately reflecting their configuration in the device tree by +naming them USB2-0 and USB3-0, respectively. + +The change ensures that the PHY and PHY names align appropriately, +resolving the mismatch between physical USB ports and their associated +names in the device tree. + +Fixes: b4e19931c98a ("usb: gadget: tegra-xudc: Support multiple device modes") +Cc: stable@vger.kernel.org +Signed-off-by: Wayne Chang +Reviewed-by: Jon Hunter +Tested-by: Jon Hunter +Link: https://lore.kernel.org/r/20240307030328.1487748-3-waynec@nvidia.com +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/usb/gadget/udc/tegra-xudc.c | 39 ++++++++++++++++++----------- + 1 file changed, 25 insertions(+), 14 deletions(-) + +diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c +index cb85168fd00c2..7aa46d426f31b 100644 +--- a/drivers/usb/gadget/udc/tegra-xudc.c ++++ b/drivers/usb/gadget/udc/tegra-xudc.c +@@ -3491,8 +3491,8 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc) + + static int tegra_xudc_phy_get(struct tegra_xudc *xudc) + { +- int err = 0, usb3; +- unsigned int i; ++ int err = 0, usb3_companion_port; ++ unsigned int i, j; + + xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys, + sizeof(*xudc->utmi_phy), GFP_KERNEL); +@@ -3520,7 +3520,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc) + if (IS_ERR(xudc->utmi_phy[i])) { + err = PTR_ERR(xudc->utmi_phy[i]); + dev_err_probe(xudc->dev, err, +- "failed to get usb2-%d PHY\n", i); ++ "failed to get PHY for phy-name usb2-%d\n", i); + goto clean_up; + } else if (xudc->utmi_phy[i]) { + /* Get usb-phy, if utmi phy is available */ +@@ -3539,19 +3539,30 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc) + } + + /* Get USB3 phy */ +- usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i); +- if (usb3 < 0) ++ usb3_companion_port = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i); ++ if (usb3_companion_port < 0) + continue; + +- snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3); +- xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name); +- if (IS_ERR(xudc->usb3_phy[i])) { +- err = PTR_ERR(xudc->usb3_phy[i]); +- dev_err_probe(xudc->dev, err, +- "failed to get usb3-%d PHY\n", usb3); +- goto clean_up; +- } else if (xudc->usb3_phy[i]) +- dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3); ++ for (j = 0; j < xudc->soc->num_phys; j++) { ++ snprintf(phy_name, sizeof(phy_name), "usb3-%d", j); ++ xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name); ++ if (IS_ERR(xudc->usb3_phy[i])) { ++ err = PTR_ERR(xudc->usb3_phy[i]); ++ dev_err_probe(xudc->dev, err, ++ "failed to get PHY for phy-name usb3-%d\n", j); ++ goto clean_up; ++ } else if (xudc->usb3_phy[i]) { ++ int usb2_port = ++ tegra_xusb_padctl_get_port_number(xudc->utmi_phy[i]); ++ int usb3_port = ++ tegra_xusb_padctl_get_port_number(xudc->usb3_phy[i]); ++ if (usb3_port == usb3_companion_port) { ++ dev_dbg(xudc->dev, "USB2 port %d is paired with USB3 port %d for device mode port %d\n", ++ usb2_port, usb3_port, i); ++ break; ++ } ++ } ++ } + } + + return err; +-- +2.43.0 + diff --git a/queue-6.8/usb-serial-add-device-id-for-verifone-adapter.patch b/queue-6.8/usb-serial-add-device-id-for-verifone-adapter.patch new file mode 100644 index 00000000000..d309dc5cab0 --- /dev/null +++ b/queue-6.8/usb-serial-add-device-id-for-verifone-adapter.patch @@ -0,0 +1,94 @@ +From 7ea10d5a96cbe662072fc8702118f7e3aace66b9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 13 Feb 2024 21:53:29 +0000 +Subject: USB: serial: add device ID for VeriFone adapter + +From: Cameron Williams + +[ Upstream commit cda704809797a8a86284f9df3eef5e62ec8a3175 ] + +Add device ID for a (probably fake) CP2102 UART device. + +lsusb -v output: + +Device Descriptor: + bLength 18 + bDescriptorType 1 + bcdUSB 1.10 + bDeviceClass 0 [unknown] + bDeviceSubClass 0 [unknown] + bDeviceProtocol 0 + bMaxPacketSize0 64 + idVendor 0x11ca VeriFone Inc + idProduct 0x0212 Verifone USB to Printer + bcdDevice 1.00 + iManufacturer 1 Silicon Labs + iProduct 2 Verifone USB to Printer + iSerial 3 0001 + bNumConfigurations 1 + Configuration Descriptor: + bLength 9 + bDescriptorType 2 + wTotalLength 0x0020 + bNumInterfaces 1 + bConfigurationValue 1 + iConfiguration 0 + bmAttributes 0x80 + (Bus Powered) + MaxPower 100mA + Interface Descriptor: + bLength 9 + bDescriptorType 4 + bInterfaceNumber 0 + bAlternateSetting 0 + bNumEndpoints 2 + bInterfaceClass 255 Vendor Specific Class + bInterfaceSubClass 0 [unknown] + bInterfaceProtocol 0 + iInterface 2 Verifone USB to Printer + Endpoint Descriptor: + bLength 7 + bDescriptorType 5 + bEndpointAddress 0x81 EP 1 IN + bmAttributes 2 + Transfer Type Bulk + Synch Type None + Usage Type Data + wMaxPacketSize 0x0040 1x 64 bytes + bInterval 0 + Endpoint Descriptor: + bLength 7 + bDescriptorType 5 + bEndpointAddress 0x01 EP 1 OUT + bmAttributes 2 + Transfer Type Bulk + Synch Type None + Usage Type Data + wMaxPacketSize 0x0040 1x 64 bytes + bInterval 0 +Device Status: 0x0000 + (Bus Powered) + +Signed-off-by: Cameron Williams +Cc: stable@vger.kernel.org +Signed-off-by: Johan Hovold +Signed-off-by: Sasha Levin +--- + drivers/usb/serial/cp210x.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 923e0ed85444b..d339d81f6e8cf 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */ + { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */ + { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */ ++ { USB_DEVICE(0x11CA, 0x0212) }, /* Verifone USB to Printer (UART, CP2102) */ + { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */ + { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */ + { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ +-- +2.43.0 + diff --git a/queue-6.8/usb-serial-cp210x-add-id-for-mgp-instruments-pds100.patch b/queue-6.8/usb-serial-cp210x-add-id-for-mgp-instruments-pds100.patch new file mode 100644 index 00000000000..4c1d20e3f38 --- /dev/null +++ b/queue-6.8/usb-serial-cp210x-add-id-for-mgp-instruments-pds100.patch @@ -0,0 +1,44 @@ +From f231419bce7b8ae81bfdfa673aed7f51ca00f57f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 14 Feb 2024 11:47:29 +0100 +Subject: USB: serial: cp210x: add ID for MGP Instruments PDS100 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Christian Häggström + +[ Upstream commit a0d9d868491a362d421521499d98308c8e3a0398 ] + +The radiation meter has the text MGP Instruments PDS-100G or PDS-100GN +produced by Mirion Technologies. Tested by forcing the driver +association with + + echo 10c4 863c > /sys/bus/usb-serial/drivers/cp210x/new_id + +and then setting the serial port in 115200 8N1 mode. The device +announces ID_USB_VENDOR_ENC=Silicon\x20Labs and ID_USB_MODEL_ENC=PDS100 + +Signed-off-by: Christian Häggström +Cc: stable@vger.kernel.org +Signed-off-by: Johan Hovold +Signed-off-by: Sasha Levin +--- + drivers/usb/serial/cp210x.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index d339d81f6e8cf..2169b6549a260 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -144,6 +144,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ + { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ ++ { USB_DEVICE(0x10C4, 0x863C) }, /* MGP Instruments PDS100 */ + { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ + { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ + { USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */ +-- +2.43.0 + diff --git a/queue-6.8/usb-serial-cp210x-add-pid-vid-for-tdk-nc0110013m-and.patch b/queue-6.8/usb-serial-cp210x-add-pid-vid-for-tdk-nc0110013m-and.patch new file mode 100644 index 00000000000..0424662f726 --- /dev/null +++ b/queue-6.8/usb-serial-cp210x-add-pid-vid-for-tdk-nc0110013m-and.patch @@ -0,0 +1,36 @@ +From 0b278c7cd2978ae584559cd6f5b7fdfccb7cf9dd Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 08:46:14 +0900 +Subject: USB: serial: cp210x: add pid/vid for TDK NC0110013M and MM0110113M + +From: Toru Katagiri + +[ Upstream commit b1a8da9ff1395c4879b4bd41e55733d944f3d613 ] + +TDK NC0110013M and MM0110113M have custom USB IDs for CP210x, +so we need to add them to the driver. + +Signed-off-by: Toru Katagiri +Cc: stable@vger.kernel.org +Signed-off-by: Johan Hovold +Signed-off-by: Sasha Levin +--- + drivers/usb/serial/cp210x.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 2169b6549a260..21fd26609252b 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -56,6 +56,8 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ + { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ + { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ ++ { USB_DEVICE(0x04BF, 0x1301) }, /* TDK Corporation NC0110013M - Network Controller */ ++ { USB_DEVICE(0x04BF, 0x1303) }, /* TDK Corporation MM0110113M - i3 Micro Module */ + { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ + { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */ + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ +-- +2.43.0 + diff --git a/queue-6.8/usb-serial-ftdi_sio-add-support-for-gmc-z216c-adapte.patch b/queue-6.8/usb-serial-ftdi_sio-add-support-for-gmc-z216c-adapte.patch new file mode 100644 index 00000000000..593cebed43c --- /dev/null +++ b/queue-6.8/usb-serial-ftdi_sio-add-support-for-gmc-z216c-adapte.patch @@ -0,0 +1,53 @@ +From 40f529ee3c48106b65b7a55ee8f7301acec4db6d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 11 Feb 2024 15:42:46 +0100 +Subject: USB: serial: ftdi_sio: add support for GMC Z216C Adapter IR-USB + +From: Daniel Vogelbacher + +[ Upstream commit 3fb7bc4f3a98c48981318b87cf553c5f115fd5ca ] + +The GMC IR-USB adapter cable utilizes a FTDI FT232R chip. + +Add VID/PID for this adapter so it can be used as serial device via +ftdi_sio. + +Signed-off-by: Daniel Vogelbacher +Cc: stable@vger.kernel.org +Signed-off-by: Johan Hovold +Signed-off-by: Sasha Levin +--- + drivers/usb/serial/ftdi_sio.c | 2 ++ + drivers/usb/serial/ftdi_sio_ids.h | 6 ++++++ + 2 files changed, 8 insertions(+) + +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 13a56783830df..22d01a0f10fbc 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -1077,6 +1077,8 @@ static const struct usb_device_id id_table_combined[] = { + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, ++ /* GMC devices */ ++ { USB_DEVICE(GMC_VID, GMC_Z216C_PID) }, + { } /* Terminating entry */ + }; + +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 21a2b5a25fc09..5ee60ba2a73cd 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -1606,3 +1606,9 @@ + #define UBLOX_VID 0x1546 + #define UBLOX_C099F9P_ZED_PID 0x0502 + #define UBLOX_C099F9P_ODIN_PID 0x0503 ++ ++/* ++ * GMC devices ++ */ ++#define GMC_VID 0x1cd7 ++#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */ +-- +2.43.0 + diff --git a/queue-6.8/usb-serial-option-add-meig-smart-slm320-product.patch b/queue-6.8/usb-serial-option-add-meig-smart-slm320-product.patch new file mode 100644 index 00000000000..9fbed43257a --- /dev/null +++ b/queue-6.8/usb-serial-option-add-meig-smart-slm320-product.patch @@ -0,0 +1,85 @@ +From 696ffaa57245aeafec05820856b648ccfc1078c9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 31 Jan 2024 18:49:17 +0100 +Subject: USB: serial: option: add MeiG Smart SLM320 product +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Aurélien Jacobs + +[ Upstream commit 46809c51565b83881aede6cdf3b0d25254966a41 ] + +Update the USB serial option driver to support MeiG Smart SLM320. + +ID 2dee:4d41 UNISOC UNISOC-8910 + +T: Bus=01 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 9 Spd=480 MxCh= 0 +D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1 +P: Vendor=2dee ProdID=4d41 Rev=00.00 +S: Manufacturer=UNISOC +S: Product=UNISOC-8910 +C: #Ifs= 8 Cfg#= 1 Atr=e0 MxPwr=400mA +I: If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option +E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms +E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms +I: If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option +E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms +E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms +I: If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option +E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms +E: Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms +I: If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option +E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms +E: Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms +I: If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option +E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms +E: Ad=85(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms +I: If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option +E: Ad=06(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms +E: Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms +I: If#= 6 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option +E: Ad=07(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms +E: Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms +I: If#= 7 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option +E: Ad=08(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms +E: Ad=88(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms + +Tested successfully a PPP LTE connection using If#= 0. +Not sure of the purpose of every other serial interfaces. + +Signed-off-by: Aurélien Jacobs +Cc: stable@vger.kernel.org +Signed-off-by: Johan Hovold +Signed-off-by: Sasha Levin +--- + drivers/usb/serial/option.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 2ae124c49d448..55a65d941ccbf 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -613,6 +613,11 @@ static void option_instat_callback(struct urb *urb); + /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */ + #define LUAT_PRODUCT_AIR720U 0x4e00 + ++/* MeiG Smart Technology products */ ++#define MEIGSMART_VENDOR_ID 0x2dee ++/* MeiG Smart SLM320 based on UNISOC UIS8910 */ ++#define MEIGSMART_PRODUCT_SLM320 0x4d41 ++ + /* Device flags */ + + /* Highest interface number which can be used with NCTRL() and RSVD() */ +@@ -2282,6 +2287,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) }, + { } /* Terminating entry */ + }; + MODULE_DEVICE_TABLE(usb, option_ids); +-- +2.43.0 + diff --git a/queue-6.8/usb-typec-ucsi-clean-up-ucsi_cable_prop-macros.patch b/queue-6.8/usb-typec-ucsi-clean-up-ucsi_cable_prop-macros.patch new file mode 100644 index 00000000000..5d690dd8e6e --- /dev/null +++ b/queue-6.8/usb-typec-ucsi-clean-up-ucsi_cable_prop-macros.patch @@ -0,0 +1,47 @@ +From bd7a7e0391d08eb014bdfd4195f26210f587597e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Mar 2024 02:58:01 +0000 +Subject: usb: typec: ucsi: Clean up UCSI_CABLE_PROP macros + +From: Jameson Thies + +[ Upstream commit 4d0a5a9915793377c0fe1a8d78de6bcd92cea963 ] + +Clean up UCSI_CABLE_PROP macros by fixing a bitmask shifting error for +plug type and updating the modal support macro for consistent naming. + +Fixes: 3cf657f07918 ("usb: typec: ucsi: Remove all bit-fields") +Cc: stable@vger.kernel.org +Reviewed-by: Benson Leung +Reviewed-by: Prashant Malani +Reviewed-by: Dmitry Baryshkov +Signed-off-by: Jameson Thies +Link: https://lore.kernel.org/r/20240305025804.1290919-2-jthies@google.com +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/usb/typec/ucsi/ucsi.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h +index 6478016d5cb8b..4550f3e8cfe9c 100644 +--- a/drivers/usb/typec/ucsi/ucsi.h ++++ b/drivers/usb/typec/ucsi/ucsi.h +@@ -221,12 +221,12 @@ struct ucsi_cable_property { + #define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0) + #define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1) + #define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2) +-#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0)) ++#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) (((_f_) & GENMASK(4, 3)) >> 3) + #define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0 + #define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1 + #define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2 + #define UCSI_CABLE_PROPERTY_PLUG_OTHER 3 +-#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5) ++#define UCSI_CABLE_PROP_FLAG_MODE_SUPPORT BIT(5) + u8 latency; + } __packed; + +-- +2.43.0 + diff --git a/queue-6.8/usb-xhci-add-error-handling-in-xhci_map_urb_for_dma.patch b/queue-6.8/usb-xhci-add-error-handling-in-xhci_map_urb_for_dma.patch new file mode 100644 index 00000000000..1e2b2123dc6 --- /dev/null +++ b/queue-6.8/usb-xhci-add-error-handling-in-xhci_map_urb_for_dma.patch @@ -0,0 +1,43 @@ +From 582a42e521e906ed0b1fa946d71441663ab29600 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Feb 2024 16:14:38 +0200 +Subject: usb: xhci: Add error handling in xhci_map_urb_for_dma + +From: Prashanth K + +[ Upstream commit be95cc6d71dfd0cba66e3621c65413321b398052 ] + +Currently xhci_map_urb_for_dma() creates a temporary buffer and copies +the SG list to the new linear buffer. But if the kzalloc_node() fails, +then the following sg_pcopy_to_buffer() can lead to crash since it +tries to memcpy to NULL pointer. + +So return -ENOMEM if kzalloc returns null pointer. + +Cc: stable@vger.kernel.org # 5.11 +Fixes: 2017a1e58472 ("usb: xhci: Use temporary buffer to consolidate SG") +Signed-off-by: Prashanth K +Signed-off-by: Mathias Nyman +Link: https://lore.kernel.org/r/20240229141438.619372-10-mathias.nyman@linux.intel.com +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/usb/host/xhci.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 0886829d53e51..afccd58c9a75a 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1220,6 +1220,8 @@ static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb) + + temp = kzalloc_node(buf_len, GFP_ATOMIC, + dev_to_node(hcd->self.sysdev)); ++ if (!temp) ++ return -ENOMEM; + + if (usb_urb_dir_out(urb)) + sg_pcopy_to_buffer(urb->sg, urb->num_sgs, +-- +2.43.0 + diff --git a/queue-6.8/vfio-fsl-mc-block-calling-interrupt-handler-without-.patch b/queue-6.8/vfio-fsl-mc-block-calling-interrupt-handler-without-.patch new file mode 100644 index 00000000000..c7af85b5f16 --- /dev/null +++ b/queue-6.8/vfio-fsl-mc-block-calling-interrupt-handler-without-.patch @@ -0,0 +1,58 @@ +From fd5ef1142b219f2fe419816093a6fda1b5ab505f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 16:05:28 -0700 +Subject: vfio/fsl-mc: Block calling interrupt handler without trigger + +From: Alex Williamson + +[ Upstream commit 7447d911af699a15f8d050dfcb7c680a86f87012 ] + +The eventfd_ctx trigger pointer of the vfio_fsl_mc_irq object is +initially NULL and may become NULL if the user sets the trigger +eventfd to -1. The interrupt handler itself is guaranteed that +trigger is always valid between request_irq() and free_irq(), but +the loopback testing mechanisms to invoke the handler function +need to test the trigger. The triggering and setting ioctl paths +both make use of igate and are therefore mutually exclusive. + +The vfio-fsl-mc driver does not make use of irqfds, nor does it +support any sort of masking operations, therefore unlike vfio-pci +and vfio-platform, the flow can remain essentially unchanged. + +Cc: Diana Craciun +Cc: +Fixes: cc0ee20bd969 ("vfio/fsl-mc: trigger an interrupt via eventfd") +Reviewed-by: Kevin Tian +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-8-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Sasha Levin +--- + drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c +index d62fbfff20b82..82b2afa9b7e31 100644 +--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c ++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c +@@ -141,13 +141,14 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev, + irq = &vdev->mc_irqs[index]; + + if (flags & VFIO_IRQ_SET_DATA_NONE) { +- vfio_fsl_mc_irq_handler(hwirq, irq); ++ if (irq->trigger) ++ eventfd_signal(irq->trigger); + + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + u8 trigger = *(u8 *)data; + +- if (trigger) +- vfio_fsl_mc_irq_handler(hwirq, irq); ++ if (trigger && irq->trigger) ++ eventfd_signal(irq->trigger); + } + + return 0; +-- +2.43.0 + diff --git a/queue-6.8/vfio-introduce-interface-to-flush-virqfd-inject-work.patch b/queue-6.8/vfio-introduce-interface-to-flush-virqfd-inject-work.patch new file mode 100644 index 00000000000..5a99d3976b2 --- /dev/null +++ b/queue-6.8/vfio-introduce-interface-to-flush-virqfd-inject-work.patch @@ -0,0 +1,94 @@ +From 57805a44ab6bd22ffe34bb46c97b2e80c0e12044 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 16:05:24 -0700 +Subject: vfio: Introduce interface to flush virqfd inject workqueue + +From: Alex Williamson + +[ Upstream commit b620ecbd17a03cacd06f014a5d3f3a11285ce053 ] + +In order to synchronize changes that can affect the thread callback, +introduce an interface to force a flush of the inject workqueue. The +irqfd pointer is only valid under spinlock, but the workqueue cannot +be flushed under spinlock. Therefore the flush work for the irqfd is +queued under spinlock. The vfio_irqfd_cleanup_wq workqueue is re-used +for queuing this work such that flushing the workqueue is also ordered +relative to shutdown. + +Reviewed-by: Kevin Tian +Reviewed-by: Reinette Chatre +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-4-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Stable-dep-of: 18c198c96a81 ("vfio/pci: Create persistent INTx handler") +Signed-off-by: Sasha Levin +--- + drivers/vfio/virqfd.c | 21 +++++++++++++++++++++ + include/linux/vfio.h | 2 ++ + 2 files changed, 23 insertions(+) + +diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c +index 29c564b7a6e13..5322691338019 100644 +--- a/drivers/vfio/virqfd.c ++++ b/drivers/vfio/virqfd.c +@@ -101,6 +101,13 @@ static void virqfd_inject(struct work_struct *work) + virqfd->thread(virqfd->opaque, virqfd->data); + } + ++static void virqfd_flush_inject(struct work_struct *work) ++{ ++ struct virqfd *virqfd = container_of(work, struct virqfd, flush_inject); ++ ++ flush_work(&virqfd->inject); ++} ++ + int vfio_virqfd_enable(void *opaque, + int (*handler)(void *, void *), + void (*thread)(void *, void *), +@@ -124,6 +131,7 @@ int vfio_virqfd_enable(void *opaque, + + INIT_WORK(&virqfd->shutdown, virqfd_shutdown); + INIT_WORK(&virqfd->inject, virqfd_inject); ++ INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject); + + irqfd = fdget(fd); + if (!irqfd.file) { +@@ -213,3 +221,16 @@ void vfio_virqfd_disable(struct virqfd **pvirqfd) + flush_workqueue(vfio_irqfd_cleanup_wq); + } + EXPORT_SYMBOL_GPL(vfio_virqfd_disable); ++ ++void vfio_virqfd_flush_thread(struct virqfd **pvirqfd) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&virqfd_lock, flags); ++ if (*pvirqfd && (*pvirqfd)->thread) ++ queue_work(vfio_irqfd_cleanup_wq, &(*pvirqfd)->flush_inject); ++ spin_unlock_irqrestore(&virqfd_lock, flags); ++ ++ flush_workqueue(vfio_irqfd_cleanup_wq); ++} ++EXPORT_SYMBOL_GPL(vfio_virqfd_flush_thread); +diff --git a/include/linux/vfio.h b/include/linux/vfio.h +index 89b265bc6ec31..8b1a298204091 100644 +--- a/include/linux/vfio.h ++++ b/include/linux/vfio.h +@@ -356,6 +356,7 @@ struct virqfd { + wait_queue_entry_t wait; + poll_table pt; + struct work_struct shutdown; ++ struct work_struct flush_inject; + struct virqfd **pvirqfd; + }; + +@@ -363,5 +364,6 @@ int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *), + void (*thread)(void *, void *), void *data, + struct virqfd **pvirqfd, int fd); + void vfio_virqfd_disable(struct virqfd **pvirqfd); ++void vfio_virqfd_flush_thread(struct virqfd **pvirqfd); + + #endif /* VFIO_H */ +-- +2.43.0 + diff --git a/queue-6.8/vfio-pci-create-persistent-intx-handler.patch b/queue-6.8/vfio-pci-create-persistent-intx-handler.patch new file mode 100644 index 00000000000..15e105ecea7 --- /dev/null +++ b/queue-6.8/vfio-pci-create-persistent-intx-handler.patch @@ -0,0 +1,271 @@ +From 386e973124d85413a457ea119186004427129114 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 16:05:25 -0700 +Subject: vfio/pci: Create persistent INTx handler + +From: Alex Williamson + +[ Upstream commit 18c198c96a815c962adc2b9b77909eec0be7df4d ] + +A vulnerability exists where the eventfd for INTx signaling can be +deconfigured, which unregisters the IRQ handler but still allows +eventfds to be signaled with a NULL context through the SET_IRQS ioctl +or through unmask irqfd if the device interrupt is pending. + +Ideally this could be solved with some additional locking; the igate +mutex serializes the ioctl and config space accesses, and the interrupt +handler is unregistered relative to the trigger, but the irqfd path +runs asynchronous to those. The igate mutex cannot be acquired from the +atomic context of the eventfd wake function. Disabling the irqfd +relative to the eventfd registration is potentially incompatible with +existing userspace. + +As a result, the solution implemented here moves configuration of the +INTx interrupt handler to track the lifetime of the INTx context object +and irq_type configuration, rather than registration of a particular +trigger eventfd. Synchronization is added between the ioctl path and +eventfd_signal() wrapper such that the eventfd trigger can be +dynamically updated relative to in-flight interrupts or irqfd callbacks. + +Cc: +Fixes: 89e1f7d4c66d ("vfio: Add PCI device driver") +Reported-by: Reinette Chatre +Reviewed-by: Kevin Tian +Reviewed-by: Reinette Chatre +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-5-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Sasha Levin +--- + drivers/vfio/pci/vfio_pci_intrs.c | 145 ++++++++++++++++-------------- + 1 file changed, 78 insertions(+), 67 deletions(-) + +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c +index 75c85eec21b3c..fb5392b749fff 100644 +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -90,11 +90,15 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused) + + if (likely(is_intx(vdev) && !vdev->virq_disabled)) { + struct vfio_pci_irq_ctx *ctx; ++ struct eventfd_ctx *trigger; + + ctx = vfio_irq_ctx_get(vdev, 0); + if (WARN_ON_ONCE(!ctx)) + return; +- eventfd_signal(ctx->trigger); ++ ++ trigger = READ_ONCE(ctx->trigger); ++ if (likely(trigger)) ++ eventfd_signal(trigger); + } + } + +@@ -253,100 +257,100 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id) + return ret; + } + +-static int vfio_intx_enable(struct vfio_pci_core_device *vdev) ++static int vfio_intx_enable(struct vfio_pci_core_device *vdev, ++ struct eventfd_ctx *trigger) + { ++ struct pci_dev *pdev = vdev->pdev; + struct vfio_pci_irq_ctx *ctx; ++ unsigned long irqflags; ++ char *name; ++ int ret; + + if (!is_irq_none(vdev)) + return -EINVAL; + +- if (!vdev->pdev->irq) ++ if (!pdev->irq) + return -ENODEV; + ++ name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev)); ++ if (!name) ++ return -ENOMEM; ++ + ctx = vfio_irq_ctx_alloc(vdev, 0); + if (!ctx) + return -ENOMEM; + ++ ctx->name = name; ++ ctx->trigger = trigger; ++ + /* +- * If the virtual interrupt is masked, restore it. Devices +- * supporting DisINTx can be masked at the hardware level +- * here, non-PCI-2.3 devices will have to wait until the +- * interrupt is enabled. ++ * Fill the initial masked state based on virq_disabled. After ++ * enable, changing the DisINTx bit in vconfig directly changes INTx ++ * masking. igate prevents races during setup, once running masked ++ * is protected via irqlock. ++ * ++ * Devices supporting DisINTx also reflect the current mask state in ++ * the physical DisINTx bit, which is not affected during IRQ setup. ++ * ++ * Devices without DisINTx support require an exclusive interrupt. ++ * IRQ masking is performed at the IRQ chip. Again, igate protects ++ * against races during setup and IRQ handlers and irqfds are not ++ * yet active, therefore masked is stable and can be used to ++ * conditionally auto-enable the IRQ. ++ * ++ * irq_type must be stable while the IRQ handler is registered, ++ * therefore it must be set before request_irq(). + */ + ctx->masked = vdev->virq_disabled; +- if (vdev->pci_2_3) +- pci_intx(vdev->pdev, !ctx->masked); ++ if (vdev->pci_2_3) { ++ pci_intx(pdev, !ctx->masked); ++ irqflags = IRQF_SHARED; ++ } else { ++ irqflags = ctx->masked ? IRQF_NO_AUTOEN : 0; ++ } + + vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; + ++ ret = request_irq(pdev->irq, vfio_intx_handler, ++ irqflags, ctx->name, vdev); ++ if (ret) { ++ vdev->irq_type = VFIO_PCI_NUM_IRQS; ++ kfree(name); ++ vfio_irq_ctx_free(vdev, ctx, 0); ++ return ret; ++ } ++ + return 0; + } + +-static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd) ++static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, ++ struct eventfd_ctx *trigger) + { + struct pci_dev *pdev = vdev->pdev; +- unsigned long irqflags = IRQF_SHARED; + struct vfio_pci_irq_ctx *ctx; +- struct eventfd_ctx *trigger; +- unsigned long flags; +- int ret; ++ struct eventfd_ctx *old; + + ctx = vfio_irq_ctx_get(vdev, 0); + if (WARN_ON_ONCE(!ctx)) + return -EINVAL; + +- if (ctx->trigger) { +- free_irq(pdev->irq, vdev); +- kfree(ctx->name); +- eventfd_ctx_put(ctx->trigger); +- ctx->trigger = NULL; +- } +- +- if (fd < 0) /* Disable only */ +- return 0; +- +- ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", +- pci_name(pdev)); +- if (!ctx->name) +- return -ENOMEM; +- +- trigger = eventfd_ctx_fdget(fd); +- if (IS_ERR(trigger)) { +- kfree(ctx->name); +- return PTR_ERR(trigger); +- } ++ old = ctx->trigger; + +- ctx->trigger = trigger; ++ WRITE_ONCE(ctx->trigger, trigger); + +- /* +- * Devices without DisINTx support require an exclusive interrupt, +- * IRQ masking is performed at the IRQ chip. The masked status is +- * protected by vdev->irqlock. Setup the IRQ without auto-enable and +- * unmask as necessary below under lock. DisINTx is unmodified by +- * the IRQ configuration and may therefore use auto-enable. +- */ +- if (!vdev->pci_2_3) +- irqflags = IRQF_NO_AUTOEN; +- +- ret = request_irq(pdev->irq, vfio_intx_handler, +- irqflags, ctx->name, vdev); +- if (ret) { +- ctx->trigger = NULL; +- kfree(ctx->name); +- eventfd_ctx_put(trigger); +- return ret; ++ /* Releasing an old ctx requires synchronizing in-flight users */ ++ if (old) { ++ synchronize_irq(pdev->irq); ++ vfio_virqfd_flush_thread(&ctx->unmask); ++ eventfd_ctx_put(old); + } + +- spin_lock_irqsave(&vdev->irqlock, flags); +- if (!vdev->pci_2_3 && !ctx->masked) +- enable_irq(pdev->irq); +- spin_unlock_irqrestore(&vdev->irqlock, flags); +- + return 0; + } + + static void vfio_intx_disable(struct vfio_pci_core_device *vdev) + { ++ struct pci_dev *pdev = vdev->pdev; + struct vfio_pci_irq_ctx *ctx; + + ctx = vfio_irq_ctx_get(vdev, 0); +@@ -354,10 +358,13 @@ static void vfio_intx_disable(struct vfio_pci_core_device *vdev) + if (ctx) { + vfio_virqfd_disable(&ctx->unmask); + vfio_virqfd_disable(&ctx->mask); ++ free_irq(pdev->irq, vdev); ++ if (ctx->trigger) ++ eventfd_ctx_put(ctx->trigger); ++ kfree(ctx->name); ++ vfio_irq_ctx_free(vdev, ctx, 0); + } +- vfio_intx_set_signal(vdev, -1); + vdev->irq_type = VFIO_PCI_NUM_IRQS; +- vfio_irq_ctx_free(vdev, ctx, 0); + } + + /* +@@ -641,19 +648,23 @@ static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev, + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { ++ struct eventfd_ctx *trigger = NULL; + int32_t fd = *(int32_t *)data; + int ret; + +- if (is_intx(vdev)) +- return vfio_intx_set_signal(vdev, fd); ++ if (fd >= 0) { ++ trigger = eventfd_ctx_fdget(fd); ++ if (IS_ERR(trigger)) ++ return PTR_ERR(trigger); ++ } + +- ret = vfio_intx_enable(vdev); +- if (ret) +- return ret; ++ if (is_intx(vdev)) ++ ret = vfio_intx_set_signal(vdev, trigger); ++ else ++ ret = vfio_intx_enable(vdev, trigger); + +- ret = vfio_intx_set_signal(vdev, fd); +- if (ret) +- vfio_intx_disable(vdev); ++ if (ret && trigger) ++ eventfd_ctx_put(trigger); + + return ret; + } +-- +2.43.0 + diff --git a/queue-6.8/vfio-pci-disable-auto-enable-of-exclusive-intx-irq.patch b/queue-6.8/vfio-pci-disable-auto-enable-of-exclusive-intx-irq.patch new file mode 100644 index 00000000000..488a9830083 --- /dev/null +++ b/queue-6.8/vfio-pci-disable-auto-enable-of-exclusive-intx-irq.patch @@ -0,0 +1,71 @@ +From 144c648a90dcd34b476ca052b2fa3fecbd82e3b9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 16:05:22 -0700 +Subject: vfio/pci: Disable auto-enable of exclusive INTx IRQ + +From: Alex Williamson + +[ Upstream commit fe9a7082684eb059b925c535682e68c34d487d43 ] + +Currently for devices requiring masking at the irqchip for INTx, ie. +devices without DisINTx support, the IRQ is enabled in request_irq() +and subsequently disabled as necessary to align with the masked status +flag. This presents a window where the interrupt could fire between +these events, resulting in the IRQ incrementing the disable depth twice. +This would be unrecoverable for a user since the masked flag prevents +nested enables through vfio. + +Instead, invert the logic using IRQF_NO_AUTOEN such that exclusive INTx +is never auto-enabled, then unmask as required. + +Cc: +Fixes: 89e1f7d4c66d ("vfio: Add PCI device driver") +Reviewed-by: Kevin Tian +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-2-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Sasha Levin +--- + drivers/vfio/pci/vfio_pci_intrs.c | 17 ++++++++++------- + 1 file changed, 10 insertions(+), 7 deletions(-) + +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c +index 237beac838097..136101179fcbd 100644 +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -296,8 +296,15 @@ static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd) + + ctx->trigger = trigger; + ++ /* ++ * Devices without DisINTx support require an exclusive interrupt, ++ * IRQ masking is performed at the IRQ chip. The masked status is ++ * protected by vdev->irqlock. Setup the IRQ without auto-enable and ++ * unmask as necessary below under lock. DisINTx is unmodified by ++ * the IRQ configuration and may therefore use auto-enable. ++ */ + if (!vdev->pci_2_3) +- irqflags = 0; ++ irqflags = IRQF_NO_AUTOEN; + + ret = request_irq(pdev->irq, vfio_intx_handler, + irqflags, ctx->name, vdev); +@@ -308,13 +315,9 @@ static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd) + return ret; + } + +- /* +- * INTx disable will stick across the new irq setup, +- * disable_irq won't. +- */ + spin_lock_irqsave(&vdev->irqlock, flags); +- if (!vdev->pci_2_3 && ctx->masked) +- disable_irq_nosync(pdev->irq); ++ if (!vdev->pci_2_3 && !ctx->masked) ++ enable_irq(pdev->irq); + spin_unlock_irqrestore(&vdev->irqlock, flags); + + return 0; +-- +2.43.0 + diff --git a/queue-6.8/vfio-pci-lock-external-intx-masking-ops.patch b/queue-6.8/vfio-pci-lock-external-intx-masking-ops.patch new file mode 100644 index 00000000000..93c964f08a0 --- /dev/null +++ b/queue-6.8/vfio-pci-lock-external-intx-masking-ops.patch @@ -0,0 +1,128 @@ +From b786f7ee8ddb155d14c40424546cde6a41bff455 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 16:05:23 -0700 +Subject: vfio/pci: Lock external INTx masking ops + +From: Alex Williamson + +[ Upstream commit 810cd4bb53456d0503cc4e7934e063835152c1b7 ] + +Mask operations through config space changes to DisINTx may race INTx +configuration changes via ioctl. Create wrappers that add locking for +paths outside of the core interrupt code. + +In particular, irq_type is updated holding igate, therefore testing +is_intx() requires holding igate. For example clearing DisINTx from +config space can otherwise race changes of the interrupt configuration. + +This aligns interfaces which may trigger the INTx eventfd into two +camps, one side serialized by igate and the other only enabled while +INTx is configured. A subsequent patch introduces synchronization for +the latter flows. + +Cc: +Fixes: 89e1f7d4c66d ("vfio: Add PCI device driver") +Reported-by: Reinette Chatre +Reviewed-by: Kevin Tian +Reviewed-by: Reinette Chatre +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-3-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Sasha Levin +--- + drivers/vfio/pci/vfio_pci_intrs.c | 34 +++++++++++++++++++++++++------ + 1 file changed, 28 insertions(+), 6 deletions(-) + +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c +index 136101179fcbd..75c85eec21b3c 100644 +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -99,13 +99,15 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused) + } + + /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */ +-bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) ++static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) + { + struct pci_dev *pdev = vdev->pdev; + struct vfio_pci_irq_ctx *ctx; + unsigned long flags; + bool masked_changed = false; + ++ lockdep_assert_held(&vdev->igate); ++ + spin_lock_irqsave(&vdev->irqlock, flags); + + /* +@@ -143,6 +145,17 @@ bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) + return masked_changed; + } + ++bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) ++{ ++ bool mask_changed; ++ ++ mutex_lock(&vdev->igate); ++ mask_changed = __vfio_pci_intx_mask(vdev); ++ mutex_unlock(&vdev->igate); ++ ++ return mask_changed; ++} ++ + /* + * If this is triggered by an eventfd, we can't call eventfd_signal + * or else we'll deadlock on the eventfd wait queue. Return >0 when +@@ -194,12 +207,21 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused) + return ret; + } + +-void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) ++static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) + { ++ lockdep_assert_held(&vdev->igate); ++ + if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0) + vfio_send_intx_eventfd(vdev, NULL); + } + ++void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) ++{ ++ mutex_lock(&vdev->igate); ++ __vfio_pci_intx_unmask(vdev); ++ mutex_unlock(&vdev->igate); ++} ++ + static irqreturn_t vfio_intx_handler(int irq, void *dev_id) + { + struct vfio_pci_core_device *vdev = dev_id; +@@ -563,11 +585,11 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev, + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_NONE) { +- vfio_pci_intx_unmask(vdev); ++ __vfio_pci_intx_unmask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t unmask = *(uint8_t *)data; + if (unmask) +- vfio_pci_intx_unmask(vdev); ++ __vfio_pci_intx_unmask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0); + int32_t fd = *(int32_t *)data; +@@ -594,11 +616,11 @@ static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev, + return -EINVAL; + + if (flags & VFIO_IRQ_SET_DATA_NONE) { +- vfio_pci_intx_mask(vdev); ++ __vfio_pci_intx_mask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { + uint8_t mask = *(uint8_t *)data; + if (mask) +- vfio_pci_intx_mask(vdev); ++ __vfio_pci_intx_mask(vdev); + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + return -ENOTTY; /* XXX implement me */ + } +-- +2.43.0 + diff --git a/queue-6.8/vfio-pds-always-clear-the-save-restore-fds-on-reset.patch b/queue-6.8/vfio-pds-always-clear-the-save-restore-fds-on-reset.patch new file mode 100644 index 00000000000..8d0514110b6 --- /dev/null +++ b/queue-6.8/vfio-pds-always-clear-the-save-restore-fds-on-reset.patch @@ -0,0 +1,48 @@ +From 096cd7e37d0cf288bd57567ed7c87457d31508f7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 27 Feb 2024 16:32:04 -0800 +Subject: vfio/pds: Always clear the save/restore FDs on reset + +From: Brett Creeley + +[ Upstream commit 8512ed256334f6637fc0699ce794792c357544ec ] + +After reset the VFIO device state will always be put in +VFIO_DEVICE_STATE_RUNNING, but the save/restore files will only be +cleared if the previous state was VFIO_DEVICE_STATE_ERROR. This +can/will cause the restore/save files to be leaked if/when the +migration state machine transitions through the states that +re-allocates these files. Fix this by always clearing the +restore/save files for resets. + +Fixes: 7dabb1bcd177 ("vfio/pds: Add support for firmware recovery") +Cc: stable@vger.kernel.org +Signed-off-by: Brett Creeley +Reviewed-by: Shannon Nelson +Reviewed-by: Kevin Tian +Link: https://lore.kernel.org/r/20240228003205.47311-2-brett.creeley@amd.com +Signed-off-by: Alex Williamson +Signed-off-by: Sasha Levin +--- + drivers/vfio/pci/pds/vfio_dev.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c +index 4c351c59d05a9..a286ebcc71126 100644 +--- a/drivers/vfio/pci/pds/vfio_dev.c ++++ b/drivers/vfio/pci/pds/vfio_dev.c +@@ -32,9 +32,9 @@ void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio) + mutex_lock(&pds_vfio->reset_mutex); + if (pds_vfio->deferred_reset) { + pds_vfio->deferred_reset = false; ++ pds_vfio_put_restore_file(pds_vfio); ++ pds_vfio_put_save_file(pds_vfio); + if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) { +- pds_vfio_put_restore_file(pds_vfio); +- pds_vfio_put_save_file(pds_vfio); + pds_vfio_dirty_disable(pds_vfio, false); + } + pds_vfio->state = pds_vfio->deferred_reset_state; +-- +2.43.0 + diff --git a/queue-6.8/vfio-platform-create-persistent-irq-handlers.patch b/queue-6.8/vfio-platform-create-persistent-irq-handlers.patch new file mode 100644 index 00000000000..8b0728afe31 --- /dev/null +++ b/queue-6.8/vfio-platform-create-persistent-irq-handlers.patch @@ -0,0 +1,254 @@ +From f927a0db0539af1c5cc4b3aee884c5b99ef0bc01 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 16:05:27 -0700 +Subject: vfio/platform: Create persistent IRQ handlers + +From: Alex Williamson + +[ Upstream commit 675daf435e9f8e5a5eab140a9864dfad6668b375 ] + +The vfio-platform SET_IRQS ioctl currently allows loopback triggering of +an interrupt before a signaling eventfd has been configured by the user, +which thereby allows a NULL pointer dereference. + +Rather than register the IRQ relative to a valid trigger, register all +IRQs in a disabled state in the device open path. This allows mask +operations on the IRQ to nest within the overall enable state governed +by a valid eventfd signal. This decouples @masked, protected by the +@locked spinlock from @trigger, protected via the @igate mutex. + +In doing so, it's guaranteed that changes to @trigger cannot race the +IRQ handlers because the IRQ handler is synchronously disabled before +modifying the trigger, and loopback triggering of the IRQ via ioctl is +safe due to serialization with trigger changes via igate. + +For compatibility, request_irq() failures are maintained to be local to +the SET_IRQS ioctl rather than a fatal error in the open device path. +This allows, for example, a userspace driver with polling mode support +to continue to work regardless of moving the request_irq() call site. +This necessarily blocks all SET_IRQS access to the failed index. + +Cc: Eric Auger +Cc: +Fixes: 57f972e2b341 ("vfio/platform: trigger an interrupt via eventfd") +Reviewed-by: Kevin Tian +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-7-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Sasha Levin +--- + drivers/vfio/platform/vfio_platform_irq.c | 100 +++++++++++++++------- + 1 file changed, 68 insertions(+), 32 deletions(-) + +diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c +index e5dcada9e86c4..ef41ecef83af1 100644 +--- a/drivers/vfio/platform/vfio_platform_irq.c ++++ b/drivers/vfio/platform/vfio_platform_irq.c +@@ -136,6 +136,16 @@ static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev, + return 0; + } + ++/* ++ * The trigger eventfd is guaranteed valid in the interrupt path ++ * and protected by the igate mutex when triggered via ioctl. ++ */ ++static void vfio_send_eventfd(struct vfio_platform_irq *irq_ctx) ++{ ++ if (likely(irq_ctx->trigger)) ++ eventfd_signal(irq_ctx->trigger); ++} ++ + static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) + { + struct vfio_platform_irq *irq_ctx = dev_id; +@@ -155,7 +165,7 @@ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) + spin_unlock_irqrestore(&irq_ctx->lock, flags); + + if (ret == IRQ_HANDLED) +- eventfd_signal(irq_ctx->trigger); ++ vfio_send_eventfd(irq_ctx); + + return ret; + } +@@ -164,52 +174,40 @@ static irqreturn_t vfio_irq_handler(int irq, void *dev_id) + { + struct vfio_platform_irq *irq_ctx = dev_id; + +- eventfd_signal(irq_ctx->trigger); ++ vfio_send_eventfd(irq_ctx); + + return IRQ_HANDLED; + } + + static int vfio_set_trigger(struct vfio_platform_device *vdev, int index, +- int fd, irq_handler_t handler) ++ int fd) + { + struct vfio_platform_irq *irq = &vdev->irqs[index]; + struct eventfd_ctx *trigger; +- int ret; + + if (irq->trigger) { +- irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN); +- free_irq(irq->hwirq, irq); +- kfree(irq->name); ++ disable_irq(irq->hwirq); + eventfd_ctx_put(irq->trigger); + irq->trigger = NULL; + } + + if (fd < 0) /* Disable only */ + return 0; +- irq->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-irq[%d](%s)", +- irq->hwirq, vdev->name); +- if (!irq->name) +- return -ENOMEM; + + trigger = eventfd_ctx_fdget(fd); +- if (IS_ERR(trigger)) { +- kfree(irq->name); ++ if (IS_ERR(trigger)) + return PTR_ERR(trigger); +- } + + irq->trigger = trigger; + +- irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN); +- ret = request_irq(irq->hwirq, handler, 0, irq->name, irq); +- if (ret) { +- kfree(irq->name); +- eventfd_ctx_put(trigger); +- irq->trigger = NULL; +- return ret; +- } +- +- if (!irq->masked) +- enable_irq(irq->hwirq); ++ /* ++ * irq->masked effectively provides nested disables within the overall ++ * enable relative to trigger. Specifically request_irq() is called ++ * with NO_AUTOEN, therefore the IRQ is initially disabled. The user ++ * may only further disable the IRQ with a MASK operations because ++ * irq->masked is initially false. ++ */ ++ enable_irq(irq->hwirq); + + return 0; + } +@@ -228,7 +226,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev, + handler = vfio_irq_handler; + + if (!count && (flags & VFIO_IRQ_SET_DATA_NONE)) +- return vfio_set_trigger(vdev, index, -1, handler); ++ return vfio_set_trigger(vdev, index, -1); + + if (start != 0 || count != 1) + return -EINVAL; +@@ -236,7 +234,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev, + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd = *(int32_t *)data; + +- return vfio_set_trigger(vdev, index, fd, handler); ++ return vfio_set_trigger(vdev, index, fd); + } + + if (flags & VFIO_IRQ_SET_DATA_NONE) { +@@ -260,6 +258,14 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev, + unsigned start, unsigned count, uint32_t flags, + void *data) = NULL; + ++ /* ++ * For compatibility, errors from request_irq() are local to the ++ * SET_IRQS path and reflected in the name pointer. This allows, ++ * for example, polling mode fallback for an exclusive IRQ failure. ++ */ ++ if (IS_ERR(vdev->irqs[index].name)) ++ return PTR_ERR(vdev->irqs[index].name); ++ + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_MASK: + func = vfio_platform_set_irq_mask; +@@ -280,7 +286,7 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev, + + int vfio_platform_irq_init(struct vfio_platform_device *vdev) + { +- int cnt = 0, i; ++ int cnt = 0, i, ret = 0; + + while (vdev->get_irq(vdev, cnt) >= 0) + cnt++; +@@ -292,29 +298,54 @@ int vfio_platform_irq_init(struct vfio_platform_device *vdev) + + for (i = 0; i < cnt; i++) { + int hwirq = vdev->get_irq(vdev, i); ++ irq_handler_t handler = vfio_irq_handler; + +- if (hwirq < 0) ++ if (hwirq < 0) { ++ ret = -EINVAL; + goto err; ++ } + + spin_lock_init(&vdev->irqs[i].lock); + + vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD; + +- if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) ++ if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) { + vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE + | VFIO_IRQ_INFO_AUTOMASKED; ++ handler = vfio_automasked_irq_handler; ++ } + + vdev->irqs[i].count = 1; + vdev->irqs[i].hwirq = hwirq; + vdev->irqs[i].masked = false; ++ vdev->irqs[i].name = kasprintf(GFP_KERNEL_ACCOUNT, ++ "vfio-irq[%d](%s)", hwirq, ++ vdev->name); ++ if (!vdev->irqs[i].name) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ ret = request_irq(hwirq, handler, IRQF_NO_AUTOEN, ++ vdev->irqs[i].name, &vdev->irqs[i]); ++ if (ret) { ++ kfree(vdev->irqs[i].name); ++ vdev->irqs[i].name = ERR_PTR(ret); ++ } + } + + vdev->num_irqs = cnt; + + return 0; + err: ++ for (--i; i >= 0; i--) { ++ if (!IS_ERR(vdev->irqs[i].name)) { ++ free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]); ++ kfree(vdev->irqs[i].name); ++ } ++ } + kfree(vdev->irqs); +- return -EINVAL; ++ return ret; + } + + void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev) +@@ -324,7 +355,12 @@ void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev) + for (i = 0; i < vdev->num_irqs; i++) { + vfio_virqfd_disable(&vdev->irqs[i].mask); + vfio_virqfd_disable(&vdev->irqs[i].unmask); +- vfio_set_trigger(vdev, i, -1, NULL); ++ if (!IS_ERR(vdev->irqs[i].name)) { ++ free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]); ++ if (vdev->irqs[i].trigger) ++ eventfd_ctx_put(vdev->irqs[i].trigger); ++ kfree(vdev->irqs[i].name); ++ } + } + + vdev->num_irqs = 0; +-- +2.43.0 + diff --git a/queue-6.8/vfio-platform-disable-virqfds-on-cleanup.patch b/queue-6.8/vfio-platform-disable-virqfds-on-cleanup.patch new file mode 100644 index 00000000000..ce3db2f4a77 --- /dev/null +++ b/queue-6.8/vfio-platform-disable-virqfds-on-cleanup.patch @@ -0,0 +1,44 @@ +From 3a7d1a7e317f398ec48b99326d014d214ba06577 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 16:05:26 -0700 +Subject: vfio/platform: Disable virqfds on cleanup + +From: Alex Williamson + +[ Upstream commit fcdc0d3d40bc26c105acf8467f7d9018970944ae ] + +irqfds for mask and unmask that are not specifically disabled by the +user are leaked. Remove any irqfds during cleanup + +Cc: Eric Auger +Cc: +Fixes: a7fa7c77cf15 ("vfio/platform: implement IRQ masking/unmasking via an eventfd") +Reviewed-by: Kevin Tian +Reviewed-by: Eric Auger +Link: https://lore.kernel.org/r/20240308230557.805580-6-alex.williamson@redhat.com +Signed-off-by: Alex Williamson +Signed-off-by: Sasha Levin +--- + drivers/vfio/platform/vfio_platform_irq.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c +index 61a1bfb68ac78..e5dcada9e86c4 100644 +--- a/drivers/vfio/platform/vfio_platform_irq.c ++++ b/drivers/vfio/platform/vfio_platform_irq.c +@@ -321,8 +321,11 @@ void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev) + { + int i; + +- for (i = 0; i < vdev->num_irqs; i++) ++ for (i = 0; i < vdev->num_irqs; i++) { ++ vfio_virqfd_disable(&vdev->irqs[i].mask); ++ vfio_virqfd_disable(&vdev->irqs[i].unmask); + vfio_set_trigger(vdev, i, -1, NULL); ++ } + + vdev->num_irqs = 0; + kfree(vdev->irqs); +-- +2.43.0 + diff --git a/queue-6.8/virtio-reenable-config-if-freezing-device-failed.patch b/queue-6.8/virtio-reenable-config-if-freezing-device-failed.patch new file mode 100644 index 00000000000..e8bfa4a7bc4 --- /dev/null +++ b/queue-6.8/virtio-reenable-config-if-freezing-device-failed.patch @@ -0,0 +1,50 @@ +From d3493450c8c8eda7530df482ed134fbee1bb2b60 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 13 Feb 2024 14:54:25 +0100 +Subject: virtio: reenable config if freezing device failed + +From: David Hildenbrand + +[ Upstream commit 310227f42882c52356b523e2f4e11690eebcd2ab ] + +Currently, we don't reenable the config if freezing the device failed. + +For example, virtio-mem currently doesn't support suspend+resume, and +trying to freeze the device will always fail. Afterwards, the device +will no longer respond to resize requests, because it won't get notified +about config changes. + +Let's fix this by re-enabling the config if freezing fails. + +Fixes: 22b7050a024d ("virtio: defer config changed notifications") +Cc: +Cc: "Michael S. Tsirkin" +Cc: Jason Wang +Cc: Xuan Zhuo +Signed-off-by: David Hildenbrand +Message-Id: <20240213135425.795001-1-david@redhat.com> +Signed-off-by: Michael S. Tsirkin +Signed-off-by: Sasha Levin +--- + drivers/virtio/virtio.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c +index f4080692b3513..f513ee21b1c18 100644 +--- a/drivers/virtio/virtio.c ++++ b/drivers/virtio/virtio.c +@@ -510,8 +510,10 @@ int virtio_device_freeze(struct virtio_device *dev) + + if (drv && drv->freeze) { + ret = drv->freeze(dev); +- if (ret) ++ if (ret) { ++ virtio_config_enable(dev); + return ret; ++ } + } + + if (dev->config->destroy_avq) +-- +2.43.0 + diff --git a/queue-6.8/wifi-brcmfmac-add-per-vendor-feature-detection-callb.patch b/queue-6.8/wifi-brcmfmac-add-per-vendor-feature-detection-callb.patch new file mode 100644 index 00000000000..cbe787b8565 --- /dev/null +++ b/queue-6.8/wifi-brcmfmac-add-per-vendor-feature-detection-callb.patch @@ -0,0 +1,110 @@ +From e97d0bef14ecf88c5f06bdbe805889cee2e3453f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 3 Jan 2024 10:57:02 +0100 +Subject: wifi: brcmfmac: add per-vendor feature detection callback + +From: Arend van Spriel + +[ Upstream commit 14e1391b71027948cdbacdbea4bf8858c2068eb7 ] + +Adding a .feat_attach() callback allowing per-vendor overrides +of the driver feature flags. In this patch the callback is only +provided by BCA vendor to disable SAE feature as it has not been +confirmed yet. BCA chips generally do not have the in-driver +supplicant (idsup) feature so they rely on NL80211_CMD_EXTERNAL_AUTH +to trigger user-space authentication. + +Signed-off-by: Arend van Spriel +Signed-off-by: Kalle Valo +Link: https://msgid.link/20240103095704.135651-3-arend.vanspriel@broadcom.com +Stable-dep-of: 85da8f71aaa7 ("wifi: brcmfmac: Demote vendor-specific attach/detach messages to info") +Signed-off-by: Sasha Levin +--- + .../wireless/broadcom/brcm80211/brcmfmac/bca/core.c | 8 ++++++++ + .../wireless/broadcom/brcm80211/brcmfmac/feature.c | 3 +++ + .../net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h | 12 ++++++++++++ + 3 files changed, 23 insertions(+) + +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c +index ac3a36fa3640c..a5d9ac5e67638 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + #include "vops.h" + +@@ -21,7 +22,14 @@ static void brcmf_bca_detach(struct brcmf_pub *drvr) + pr_err("%s: executing\n", __func__); + } + ++static void brcmf_bca_feat_attach(struct brcmf_if *ifp) ++{ ++ /* SAE support not confirmed so disabling for now */ ++ ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_SAE); ++} ++ + const struct brcmf_fwvid_ops brcmf_bca_ops = { + .attach = brcmf_bca_attach, + .detach = brcmf_bca_detach, ++ .feat_attach = brcmf_bca_feat_attach, + }; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +index 6d10c9efbe93d..909a34a1ab503 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +@@ -13,6 +13,7 @@ + #include "debug.h" + #include "fwil.h" + #include "fwil_types.h" ++#include "fwvid.h" + #include "feature.h" + #include "common.h" + +@@ -339,6 +340,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) + brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); + brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_SCAN_V2, "scan_ver"); + ++ brcmf_fwvid_feat_attach(ifp); ++ + if (drvr->settings->feature_disable) { + brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n", + ifp->drvr->feat_flags, +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h +index 43df58bb70ad3..17fbdbb76f51b 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h +@@ -8,10 +8,12 @@ + #include "firmware.h" + + struct brcmf_pub; ++struct brcmf_if; + + struct brcmf_fwvid_ops { + int (*attach)(struct brcmf_pub *drvr); + void (*detach)(struct brcmf_pub *drvr); ++ void (*feat_attach)(struct brcmf_if *ifp); + }; + + /* exported functions */ +@@ -44,4 +46,14 @@ static inline void brcmf_fwvid_detach(struct brcmf_pub *drvr) + brcmf_fwvid_detach_ops(drvr); + } + ++static inline void brcmf_fwvid_feat_attach(struct brcmf_if *ifp) ++{ ++ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops; ++ ++ if (!vops->feat_attach) ++ return; ++ ++ vops->feat_attach(ifp); ++} ++ + #endif /* FWVID_H_ */ +-- +2.43.0 + diff --git a/queue-6.8/wifi-brcmfmac-avoid-invalid-list-operation-when-vend.patch b/queue-6.8/wifi-brcmfmac-avoid-invalid-list-operation-when-vend.patch new file mode 100644 index 00000000000..d5710c57ef2 --- /dev/null +++ b/queue-6.8/wifi-brcmfmac-avoid-invalid-list-operation-when-vend.patch @@ -0,0 +1,44 @@ +From 06f37455be7cc24ebcfbcab7618ab8b1a034f8d6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 6 Jan 2024 11:38:34 +0100 +Subject: wifi: brcmfmac: avoid invalid list operation when vendor attach fails + +From: Arend van Spriel + +[ Upstream commit b822015a1f57268f5b2ff656736dc4004e7097da ] + +When the brcmf_fwvid_attach() fails the driver instance is not added +to the vendor list. Hence we should not try to delete it from that +list when the brcmf_fwvid_detach() function is called in cleanup path. + +Cc: stable@vger.kernel.org # 6.2.x +Fixes: d6a5c562214f ("wifi: brcmfmac: add support for vendor-specific firmware api") +Signed-off-by: Arend van Spriel +Signed-off-by: Kalle Valo +Link: https://msgid.link/20240106103835.269149-3-arend.vanspriel@broadcom.com +Signed-off-by: Sasha Levin +--- + drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c +index 86eafdb405419..f610818c2b059 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c +@@ -187,9 +187,10 @@ void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr) + + mutex_lock(&fwvid_list_lock); + +- drvr->vops = NULL; +- list_del(&drvr->bus_if->list); +- ++ if (drvr->vops) { ++ drvr->vops = NULL; ++ list_del(&drvr->bus_if->list); ++ } + mutex_unlock(&fwvid_list_lock); + } + +-- +2.43.0 + diff --git a/queue-6.8/wifi-brcmfmac-cfg80211-use-wsec-to-set-sae-password.patch b/queue-6.8/wifi-brcmfmac-cfg80211-use-wsec-to-set-sae-password.patch new file mode 100644 index 00000000000..2bdd1c6a0e8 --- /dev/null +++ b/queue-6.8/wifi-brcmfmac-cfg80211-use-wsec-to-set-sae-password.patch @@ -0,0 +1,306 @@ +From 3ff30b274355186ce18ba7bdc79362fd6898f111 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 3 Jan 2024 10:57:04 +0100 +Subject: wifi: brcmfmac: cfg80211: Use WSEC to set SAE password + +From: Hector Martin + +[ Upstream commit 9f7861c56b51b84d30114e7fea9d744a9d5ba9b7 ] + +Using the WSEC command instead of sae_password seems to be the supported +mechanism on newer firmware, and also how the brcmdhd driver does it. + +The existing firmware mechanism intended for (some) Cypress chips has +been separated from the new firmware mechanism using the multi-vendor +framework. Depending on the device it will select the appropriate +firmware mechanism. + +This makes WPA3 work with iwd, or with wpa_supplicant pending a support +patchset [2]. + +[1] https://rachelbythebay.com/w/2023/11/06/wpa3/ +[2] http://lists.infradead.org/pipermail/hostap/2023-July/041653.html + +Signed-off-by: Hector Martin +Reviewed-by: Neal Gompa +[arend.vanspriel@broadcom.com: use multi-vendor framework] +Signed-off-by: Arend van Spriel +Signed-off-by: Kalle Valo +Link: https://msgid.link/20240103095704.135651-5-arend.vanspriel@broadcom.com +Stable-dep-of: 85da8f71aaa7 ("wifi: brcmfmac: Demote vendor-specific attach/detach messages to info") +Signed-off-by: Sasha Levin +--- + .../broadcom/brcm80211/brcmfmac/cfg80211.c | 56 ++++++++----------- + .../broadcom/brcm80211/brcmfmac/cfg80211.h | 2 + + .../broadcom/brcm80211/brcmfmac/cyw/core.c | 28 ++++++++++ + .../broadcom/brcm80211/brcmfmac/fwil.c | 1 + + .../broadcom/brcm80211/brcmfmac/fwil_types.h | 2 +- + .../broadcom/brcm80211/brcmfmac/fwvid.h | 13 +++++ + .../broadcom/brcm80211/brcmfmac/wcc/core.c | 9 +++ + 7 files changed, 76 insertions(+), 35 deletions(-) + +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +index 0c9581a3a67f2..625b7cb37f47b 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +@@ -32,6 +32,7 @@ + #include "vendor.h" + #include "bus.h" + #include "common.h" ++#include "fwvid.h" + + #define BRCMF_SCAN_IE_LEN_MAX 2048 + +@@ -1686,52 +1687,39 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e) + return reason; + } + +-static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len) ++int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags) + { + struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_wsec_pmk_le pmk; + int err; + ++ if (key_len > sizeof(pmk.key)) { ++ bphy_err(drvr, "key must be less than %zu bytes\n", ++ sizeof(pmk.key)); ++ return -EINVAL; ++ } ++ + memset(&pmk, 0, sizeof(pmk)); + +- /* pass pmk directly */ +- pmk.key_len = cpu_to_le16(pmk_len); +- pmk.flags = cpu_to_le16(0); +- memcpy(pmk.key, pmk_data, pmk_len); ++ /* pass key material directly */ ++ pmk.key_len = cpu_to_le16(key_len); ++ pmk.flags = cpu_to_le16(flags); ++ memcpy(pmk.key, key, key_len); + +- /* store psk in firmware */ ++ /* store key material in firmware */ + err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK, + &pmk, sizeof(pmk)); + if (err < 0) + bphy_err(drvr, "failed to change PSK in firmware (len=%u)\n", +- pmk_len); ++ key_len); + + return err; + } ++BRCMF_EXPORT_SYMBOL_GPL(brcmf_set_wsec); + +-static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data, +- u16 pwd_len) ++static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len) + { +- struct brcmf_pub *drvr = ifp->drvr; +- struct brcmf_wsec_sae_pwd_le sae_pwd; +- int err; +- +- if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) { +- bphy_err(drvr, "sae_password must be less than %d\n", +- BRCMF_WSEC_MAX_SAE_PASSWORD_LEN); +- return -EINVAL; +- } +- +- sae_pwd.key_len = cpu_to_le16(pwd_len); +- memcpy(sae_pwd.key, pwd_data, pwd_len); +- +- err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd, +- sizeof(sae_pwd)); +- if (err < 0) +- bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n", +- pwd_len); +- +- return err; ++ return brcmf_set_wsec(ifp, pmk_data, pmk_len, 0); + } + + static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason, +@@ -2502,8 +2490,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, + bphy_err(drvr, "failed to clean up user-space RSNE\n"); + goto done; + } +- err = brcmf_set_sae_password(ifp, sme->crypto.sae_pwd, +- sme->crypto.sae_pwd_len); ++ err = brcmf_fwvid_set_sae_password(ifp, &sme->crypto); + if (!err && sme->crypto.psk) + err = brcmf_set_pmk(ifp, sme->crypto.psk, + BRCMF_WSEC_MAX_PSK_LEN); +@@ -5256,8 +5243,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, + if (crypto->sae_pwd) { + brcmf_dbg(INFO, "using SAE offload\n"); + profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_SAE); +- err = brcmf_set_sae_password(ifp, crypto->sae_pwd, +- crypto->sae_pwd_len); ++ err = brcmf_fwvid_set_sae_password(ifp, crypto); + if (err < 0) + goto exit; + } +@@ -5364,10 +5350,12 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev, + msleep(400); + + if (profile->use_fwauth != BIT(BRCMF_PROFILE_FWAUTH_NONE)) { ++ struct cfg80211_crypto_settings crypto = {}; ++ + if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_PSK)) + brcmf_set_pmk(ifp, NULL, 0); + if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_SAE)) +- brcmf_set_sae_password(ifp, NULL, 0); ++ brcmf_fwvid_set_sae_password(ifp, &crypto); + profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE); + } + +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +index 0e1fa3f0dea2c..dc3a6a537507d 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +@@ -468,4 +468,6 @@ void brcmf_set_mpc(struct brcmf_if *ndev, int mpc); + void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg); + void brcmf_cfg80211_free_netdev(struct net_device *ndev); + ++int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags); ++ + #endif /* BRCMFMAC_CFG80211_H */ +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c +index b75652ba9359f..24670497f1a40 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + #include "vops.h" + +@@ -21,7 +22,34 @@ static void brcmf_cyw_detach(struct brcmf_pub *drvr) + pr_err("%s: executing\n", __func__); + } + ++static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp, ++ struct cfg80211_crypto_settings *crypto) ++{ ++ struct brcmf_pub *drvr = ifp->drvr; ++ struct brcmf_wsec_sae_pwd_le sae_pwd; ++ u16 pwd_len = crypto->sae_pwd_len; ++ int err; ++ ++ if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) { ++ bphy_err(drvr, "sae_password must be less than %d\n", ++ BRCMF_WSEC_MAX_SAE_PASSWORD_LEN); ++ return -EINVAL; ++ } ++ ++ sae_pwd.key_len = cpu_to_le16(pwd_len); ++ memcpy(sae_pwd.key, crypto->sae_pwd, pwd_len); ++ ++ err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd, ++ sizeof(sae_pwd)); ++ if (err < 0) ++ bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n", ++ pwd_len); ++ ++ return err; ++} ++ + const struct brcmf_fwvid_ops brcmf_cyw_ops = { + .attach = brcmf_cyw_attach, + .detach = brcmf_cyw_detach, ++ .set_sae_password = brcmf_cyw_set_sae_pwd, + }; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c +index 72fe8bce6eaf5..a9514d72f770b 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c +@@ -239,6 +239,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *dat + mutex_unlock(&drvr->proto_block); + return err; + } ++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_set); + + s32 + brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data, +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +index 9d248ba1c0b2b..e74a23e11830c 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +@@ -584,7 +584,7 @@ struct brcmf_wsec_key_le { + struct brcmf_wsec_pmk_le { + __le16 key_len; + __le16 flags; +- u8 key[2 * BRCMF_WSEC_MAX_PSK_LEN + 1]; ++ u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN]; + }; + + /** +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h +index 17fbdbb76f51b..d9fc76b46db96 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h +@@ -6,6 +6,7 @@ + #define FWVID_H_ + + #include "firmware.h" ++#include "cfg80211.h" + + struct brcmf_pub; + struct brcmf_if; +@@ -14,6 +15,7 @@ struct brcmf_fwvid_ops { + int (*attach)(struct brcmf_pub *drvr); + void (*detach)(struct brcmf_pub *drvr); + void (*feat_attach)(struct brcmf_if *ifp); ++ int (*set_sae_password)(struct brcmf_if *ifp, struct cfg80211_crypto_settings *crypto); + }; + + /* exported functions */ +@@ -56,4 +58,15 @@ static inline void brcmf_fwvid_feat_attach(struct brcmf_if *ifp) + vops->feat_attach(ifp); + } + ++static inline int brcmf_fwvid_set_sae_password(struct brcmf_if *ifp, ++ struct cfg80211_crypto_settings *crypto) ++{ ++ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops; ++ ++ if (!vops || !vops->set_sae_password) ++ return -EOPNOTSUPP; ++ ++ return vops->set_sae_password(ifp, crypto); ++} ++ + #endif /* FWVID_H_ */ +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c +index 5573a47766ad5..2d8f80bd73829 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + #include "vops.h" + +@@ -21,7 +22,15 @@ static void brcmf_wcc_detach(struct brcmf_pub *drvr) + pr_debug("%s: executing\n", __func__); + } + ++static int brcmf_wcc_set_sae_pwd(struct brcmf_if *ifp, ++ struct cfg80211_crypto_settings *crypto) ++{ ++ return brcmf_set_wsec(ifp, crypto->sae_pwd, crypto->sae_pwd_len, ++ BRCMF_WSEC_PASSPHRASE); ++} ++ + const struct brcmf_fwvid_ops brcmf_wcc_ops = { + .attach = brcmf_wcc_attach, + .detach = brcmf_wcc_detach, ++ .set_sae_password = brcmf_wcc_set_sae_pwd, + }; +-- +2.43.0 + diff --git a/queue-6.8/wifi-brcmfmac-demote-vendor-specific-attach-detach-m.patch b/queue-6.8/wifi-brcmfmac-demote-vendor-specific-attach-detach-m.patch new file mode 100644 index 00000000000..b89d0fd3c36 --- /dev/null +++ b/queue-6.8/wifi-brcmfmac-demote-vendor-specific-attach-detach-m.patch @@ -0,0 +1,204 @@ +From 77ec69650fa58811e0fb8acb730b6beadda514e7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 6 Jan 2024 11:38:33 +0100 +Subject: wifi: brcmfmac: Demote vendor-specific attach/detach messages to info + +From: Hector Martin + +[ Upstream commit 85da8f71aaa7b83ea7ef0e89182e0cd47e16d465 ] + +People are getting spooked by brcmfmac errors on their boot console. +There's no reason for these messages to be errors. + +Cc: stable@vger.kernel.org # 6.2.x +Fixes: d6a5c562214f ("wifi: brcmfmac: add support for vendor-specific firmware api") +Signed-off-by: Hector Martin +[arend.vanspriel@broadcom.com: remove attach/detach vendor callbacks] +Signed-off-by: Arend van Spriel +Signed-off-by: Kalle Valo +Link: https://msgid.link/20240106103835.269149-2-arend.vanspriel@broadcom.com +Signed-off-by: Sasha Levin +--- + .../broadcom/brcm80211/brcmfmac/bca/core.c | 13 ---------- + .../broadcom/brcm80211/brcmfmac/cyw/core.c | 13 ---------- + .../broadcom/brcm80211/brcmfmac/fwvid.c | 7 +++-- + .../broadcom/brcm80211/brcmfmac/fwvid.h | 26 ++----------------- + .../broadcom/brcm80211/brcmfmac/wcc/core.c | 15 +---------- + 5 files changed, 6 insertions(+), 68 deletions(-) + +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c +index a5d9ac5e67638..a963c242975ac 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c +@@ -11,17 +11,6 @@ + + #include "vops.h" + +-static int brcmf_bca_attach(struct brcmf_pub *drvr) +-{ +- pr_err("%s: executing\n", __func__); +- return 0; +-} +- +-static void brcmf_bca_detach(struct brcmf_pub *drvr) +-{ +- pr_err("%s: executing\n", __func__); +-} +- + static void brcmf_bca_feat_attach(struct brcmf_if *ifp) + { + /* SAE support not confirmed so disabling for now */ +@@ -29,7 +18,5 @@ static void brcmf_bca_feat_attach(struct brcmf_if *ifp) + } + + const struct brcmf_fwvid_ops brcmf_bca_ops = { +- .attach = brcmf_bca_attach, +- .detach = brcmf_bca_detach, + .feat_attach = brcmf_bca_feat_attach, + }; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c +index 24670497f1a40..bec5748310b9c 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c +@@ -11,17 +11,6 @@ + + #include "vops.h" + +-static int brcmf_cyw_attach(struct brcmf_pub *drvr) +-{ +- pr_err("%s: executing\n", __func__); +- return 0; +-} +- +-static void brcmf_cyw_detach(struct brcmf_pub *drvr) +-{ +- pr_err("%s: executing\n", __func__); +-} +- + static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp, + struct cfg80211_crypto_settings *crypto) + { +@@ -49,7 +38,5 @@ static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp, + } + + const struct brcmf_fwvid_ops brcmf_cyw_ops = { +- .attach = brcmf_cyw_attach, +- .detach = brcmf_cyw_detach, + .set_sae_password = brcmf_cyw_set_sae_pwd, + }; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c +index f610818c2b059..b427782554b59 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c +@@ -89,8 +89,7 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *vmod, + if (fwvid >= BRCMF_FWVENDOR_NUM) + return -ERANGE; + +- if (WARN_ON(!vmod) || WARN_ON(!vops) || +- WARN_ON(!vops->attach) || WARN_ON(!vops->detach)) ++ if (WARN_ON(!vmod) || WARN_ON(!vops)) + return -EINVAL; + + if (WARN_ON(fwvid_list[fwvid].vmod)) +@@ -150,7 +149,7 @@ static inline int brcmf_fwvid_request_module(enum brcmf_fwvendor fwvid) + } + #endif + +-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr) ++int brcmf_fwvid_attach(struct brcmf_pub *drvr) + { + enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid; + int ret; +@@ -175,7 +174,7 @@ int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr) + return ret; + } + +-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr) ++void brcmf_fwvid_detach(struct brcmf_pub *drvr) + { + enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid; + +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h +index d9fc76b46db96..dac22534d0334 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h +@@ -12,8 +12,6 @@ struct brcmf_pub; + struct brcmf_if; + + struct brcmf_fwvid_ops { +- int (*attach)(struct brcmf_pub *drvr); +- void (*detach)(struct brcmf_pub *drvr); + void (*feat_attach)(struct brcmf_if *ifp); + int (*set_sae_password)(struct brcmf_if *ifp, struct cfg80211_crypto_settings *crypto); + }; +@@ -24,30 +22,10 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *mod, + int brcmf_fwvid_unregister_vendor(enum brcmf_fwvendor fwvid, struct module *mod); + + /* core driver functions */ +-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr); +-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr); ++int brcmf_fwvid_attach(struct brcmf_pub *drvr); ++void brcmf_fwvid_detach(struct brcmf_pub *drvr); + const char *brcmf_fwvid_vendor_name(struct brcmf_pub *drvr); + +-static inline int brcmf_fwvid_attach(struct brcmf_pub *drvr) +-{ +- int ret; +- +- ret = brcmf_fwvid_attach_ops(drvr); +- if (ret) +- return ret; +- +- return drvr->vops->attach(drvr); +-} +- +-static inline void brcmf_fwvid_detach(struct brcmf_pub *drvr) +-{ +- if (!drvr->vops) +- return; +- +- drvr->vops->detach(drvr); +- brcmf_fwvid_detach_ops(drvr); +-} +- + static inline void brcmf_fwvid_feat_attach(struct brcmf_if *ifp) + { + const struct brcmf_fwvid_ops *vops = ifp->drvr->vops; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c +index 2d8f80bd73829..fd593b93ad404 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c +@@ -7,21 +7,10 @@ + #include + #include + #include +-#include ++#include + + #include "vops.h" + +-static int brcmf_wcc_attach(struct brcmf_pub *drvr) +-{ +- pr_debug("%s: executing\n", __func__); +- return 0; +-} +- +-static void brcmf_wcc_detach(struct brcmf_pub *drvr) +-{ +- pr_debug("%s: executing\n", __func__); +-} +- + static int brcmf_wcc_set_sae_pwd(struct brcmf_if *ifp, + struct cfg80211_crypto_settings *crypto) + { +@@ -30,7 +19,5 @@ static int brcmf_wcc_set_sae_pwd(struct brcmf_if *ifp, + } + + const struct brcmf_fwvid_ops brcmf_wcc_ops = { +- .attach = brcmf_wcc_attach, +- .detach = brcmf_wcc_detach, + .set_sae_password = brcmf_wcc_set_sae_pwd, + }; +-- +2.43.0 + diff --git a/queue-6.8/wifi-brcmfmac-fix-use-after-free-bug-in-brcmf_cfg802.patch b/queue-6.8/wifi-brcmfmac-fix-use-after-free-bug-in-brcmf_cfg802.patch new file mode 100644 index 00000000000..b4732af87ba --- /dev/null +++ b/queue-6.8/wifi-brcmfmac-fix-use-after-free-bug-in-brcmf_cfg802.patch @@ -0,0 +1,77 @@ +From 888fd35b12af4d4ef1bf111e7318fd3c1313a8e6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 7 Jan 2024 08:25:04 +0100 +Subject: wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach + +From: Zheng Wang + +[ Upstream commit 0f7352557a35ab7888bc7831411ec8a3cbe20d78 ] + +This is the candidate patch of CVE-2023-47233 : +https://nvd.nist.gov/vuln/detail/CVE-2023-47233 + +In brcm80211 driver,it starts with the following invoking chain +to start init a timeout worker: + +->brcmf_usb_probe + ->brcmf_usb_probe_cb + ->brcmf_attach + ->brcmf_bus_started + ->brcmf_cfg80211_attach + ->wl_init_priv + ->brcmf_init_escan + ->INIT_WORK(&cfg->escan_timeout_work, + brcmf_cfg80211_escan_timeout_worker); + +If we disconnect the USB by hotplug, it will call +brcmf_usb_disconnect to make cleanup. The invoking chain is : + +brcmf_usb_disconnect + ->brcmf_usb_disconnect_cb + ->brcmf_detach + ->brcmf_cfg80211_detach + ->kfree(cfg); + +While the timeout woker may still be running. This will cause +a use-after-free bug on cfg in brcmf_cfg80211_escan_timeout_worker. + +Fix it by deleting the timer and canceling the worker in +brcmf_cfg80211_detach. + +Fixes: e756af5b30b0 ("brcmfmac: add e-scan support.") +Signed-off-by: Zheng Wang +Cc: stable@vger.kernel.org +[arend.vanspriel@broadcom.com: keep timer delete as is and cancel work just before free] +Signed-off-by: Arend van Spriel +Signed-off-by: Kalle Valo +Link: https://msgid.link/20240107072504.392713-1-arend.vanspriel@broadcom.com +Signed-off-by: Sasha Levin +--- + drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +index 1a5d7494f5e80..0c9581a3a67f2 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +@@ -1179,8 +1179,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, + scan_request = cfg->scan_request; + cfg->scan_request = NULL; + +- if (timer_pending(&cfg->escan_timeout)) +- del_timer_sync(&cfg->escan_timeout); ++ timer_delete_sync(&cfg->escan_timeout); + + if (fw_abort) { + /* Do a scan abort to stop the driver's scan engine */ +@@ -8440,6 +8439,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg) + brcmf_btcoex_detach(cfg); + wiphy_unregister(cfg->wiphy); + wl_deinit_priv(cfg); ++ cancel_work_sync(&cfg->escan_timeout_work); + brcmf_free_wiphy(cfg->wiphy); + kfree(cfg); + } +-- +2.43.0 + diff --git a/queue-6.8/wifi-mac80211-track-capability-opmode-nss-separately.patch b/queue-6.8/wifi-mac80211-track-capability-opmode-nss-separately.patch new file mode 100644 index 00000000000..b14b18ecbeb --- /dev/null +++ b/queue-6.8/wifi-mac80211-track-capability-opmode-nss-separately.patch @@ -0,0 +1,202 @@ +From a87003d66947882f6c25f40fa0c7be9c446fecb1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 28 Feb 2024 12:01:57 +0100 +Subject: wifi: mac80211: track capability/opmode NSS separately + +From: Johannes Berg + +[ Upstream commit a8bca3e9371dc5e276af4168be099b2a05554c2a ] + +We're currently tracking rx_nss for each station, and that +is meant to be initialized to the capability NSS and later +reduced by the operating mode notification NSS. + +However, we're mixing up capabilities and operating mode +NSS in the same variable. This forces us to recalculate +the NSS capability on operating mode notification RX, +which is a bit strange; due to the previous fix I had to +never keep rx_nss as zero, it also means that the capa is +never taken into account properly. + +Fix all this by storing the capability value, that can be +recalculated unconditionally whenever needed, and storing +the operating mode notification NSS separately, taking it +into account when assigning the final rx_nss value. + +Cc: stable@vger.kernel.org +Fixes: dd6c064cfc3f ("wifi: mac80211: set station RX-NSS on reconfig") +Reviewed-by: Miriam Rachel Korenblit +Link: https://msgid.link/20240228120157.0e1c41924d1d.I0acaa234e0267227b7e3ef81a59117c8792116bc@changeid +Signed-off-by: Johannes Berg +Signed-off-by: Sasha Levin +--- + net/mac80211/cfg.c | 2 +- + net/mac80211/ieee80211_i.h | 2 +- + net/mac80211/rate.c | 2 +- + net/mac80211/sta_info.h | 6 ++++- + net/mac80211/vht.c | 46 ++++++++++++++++++-------------------- + 5 files changed, 30 insertions(+), 28 deletions(-) + +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index 327682995c926..1f55f88b69dae 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -1869,7 +1869,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, + sband->band); + } + +- ieee80211_sta_set_rx_nss(link_sta); ++ ieee80211_sta_init_nss(link_sta); + + return ret; + } +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h +index 0b2b53550bd99..a18361afea249 100644 +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -2112,7 +2112,7 @@ enum ieee80211_sta_rx_bandwidth + ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta); + enum ieee80211_sta_rx_bandwidth + ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta); +-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta); ++void ieee80211_sta_init_nss(struct link_sta_info *link_sta); + enum ieee80211_sta_rx_bandwidth + ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width); + enum nl80211_chan_width +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c +index 9d33fd2377c88..0efdaa8f2a92e 100644 +--- a/net/mac80211/rate.c ++++ b/net/mac80211/rate.c +@@ -37,7 +37,7 @@ void rate_control_rate_init(struct sta_info *sta) + struct ieee80211_supported_band *sband; + struct ieee80211_chanctx_conf *chanctx_conf; + +- ieee80211_sta_set_rx_nss(&sta->deflink); ++ ieee80211_sta_init_nss(&sta->deflink); + + if (!ref) + return; +diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h +index 5ef1554f991f6..ac4c7a6f962ea 100644 +--- a/net/mac80211/sta_info.h ++++ b/net/mac80211/sta_info.h +@@ -3,7 +3,7 @@ + * Copyright 2002-2005, Devicescape Software, Inc. + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright(c) 2015-2017 Intel Deutschland GmbH +- * Copyright(c) 2020-2023 Intel Corporation ++ * Copyright(c) 2020-2024 Intel Corporation + */ + + #ifndef STA_INFO_H +@@ -482,6 +482,8 @@ struct ieee80211_fragment_cache { + * same for non-MLD STA. This is used as key for searching link STA + * @link_id: Link ID uniquely identifying the link STA. This is 0 for non-MLD + * and set to the corresponding vif LinkId for MLD STA ++ * @op_mode_nss: NSS limit as set by operating mode notification, or 0 ++ * @capa_nss: NSS limit as determined by local and peer capabilities + * @link_hash_node: hash node for rhashtable + * @sta: Points to the STA info + * @gtk: group keys negotiated with this station, if any +@@ -518,6 +520,8 @@ struct link_sta_info { + u8 addr[ETH_ALEN]; + u8 link_id; + ++ u8 op_mode_nss, capa_nss; ++ + struct rhlist_head link_hash_node; + + struct sta_info *sta; +diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c +index b3a5c3e96a720..bc13b1419981a 100644 +--- a/net/mac80211/vht.c ++++ b/net/mac80211/vht.c +@@ -4,7 +4,7 @@ + * + * Portions of this file + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH +- * Copyright (C) 2018 - 2023 Intel Corporation ++ * Copyright (C) 2018 - 2024 Intel Corporation + */ + + #include +@@ -541,15 +541,11 @@ ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta) + return bw; + } + +-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta) ++void ieee80211_sta_init_nss(struct link_sta_info *link_sta) + { + u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, eht_rx_nss = 0, rx_nss; + bool support_160; + +- /* if we received a notification already don't overwrite it */ +- if (link_sta->pub->rx_nss) +- return; +- + if (link_sta->pub->eht_cap.has_eht) { + int i; + const u8 *rx_nss_mcs = (void *)&link_sta->pub->eht_cap.eht_mcs_nss_supp; +@@ -627,7 +623,15 @@ void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta) + rx_nss = max(vht_rx_nss, ht_rx_nss); + rx_nss = max(he_rx_nss, rx_nss); + rx_nss = max(eht_rx_nss, rx_nss); +- link_sta->pub->rx_nss = max_t(u8, 1, rx_nss); ++ rx_nss = max_t(u8, 1, rx_nss); ++ link_sta->capa_nss = rx_nss; ++ ++ /* that shouldn't be set yet, but we can handle it anyway */ ++ if (link_sta->op_mode_nss) ++ link_sta->pub->rx_nss = ++ min_t(u8, rx_nss, link_sta->op_mode_nss); ++ else ++ link_sta->pub->rx_nss = rx_nss; + } + + u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, +@@ -637,7 +641,7 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, + enum ieee80211_sta_rx_bandwidth new_bw; + struct sta_opmode_info sta_opmode = {}; + u32 changed = 0; +- u8 nss, cur_nss; ++ u8 nss; + + /* ignore - no support for BF yet */ + if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF) +@@ -647,23 +651,17 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, + nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; + nss += 1; + +- if (link_sta->pub->rx_nss != nss) { +- cur_nss = link_sta->pub->rx_nss; +- /* Reset rx_nss and call ieee80211_sta_set_rx_nss() which +- * will set the same to max nss value calculated based on capability. +- */ +- link_sta->pub->rx_nss = 0; +- ieee80211_sta_set_rx_nss(link_sta); +- /* Do not allow an nss change to rx_nss greater than max_nss +- * negotiated and capped to APs capability during association. +- */ +- if (nss <= link_sta->pub->rx_nss) { +- link_sta->pub->rx_nss = nss; +- sta_opmode.rx_nss = nss; +- changed |= IEEE80211_RC_NSS_CHANGED; +- sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED; ++ if (link_sta->op_mode_nss != nss) { ++ if (nss <= link_sta->capa_nss) { ++ link_sta->op_mode_nss = nss; ++ ++ if (nss != link_sta->pub->rx_nss) { ++ link_sta->pub->rx_nss = nss; ++ changed |= IEEE80211_RC_NSS_CHANGED; ++ sta_opmode.rx_nss = link_sta->pub->rx_nss; ++ sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED; ++ } + } else { +- link_sta->pub->rx_nss = cur_nss; + pr_warn_ratelimited("Ignoring NSS change in VHT Operating Mode Notification from %pM with invalid nss %d", + link_sta->pub->addr, nss); + } +-- +2.43.0 + diff --git a/queue-6.8/wifi-rtw88-add-missing-vid-pids-for-8811cu-and-8821c.patch b/queue-6.8/wifi-rtw88-add-missing-vid-pids-for-8811cu-and-8821c.patch new file mode 100644 index 00000000000..a4ea9076c6f --- /dev/null +++ b/queue-6.8/wifi-rtw88-add-missing-vid-pids-for-8811cu-and-8821c.patch @@ -0,0 +1,88 @@ +From 52352402cc43bbcce260e9e1e6ceb4399e5f0a7b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 27 Feb 2024 02:34:40 +0000 +Subject: wifi: rtw88: Add missing VID/PIDs for 8811CU and 8821CU + +From: Nick Morrow + +[ Upstream commit b8a62478f3b143592d1241de1a7f5f8629ad0f49 ] + +Add VID/PIDs that are known to be missing for this driver. + +Removed /* 8811CU */ and /* 8821CU */ as they are redundant +since the file is specific to those chips. + +Removed /* TOTOLINK A650UA v3 */ as the manufacturer. It has a REALTEK +VID so it may not be specific to this adapter. + +Verified and tested. + +Cc: stable@vger.kernel.org +Signed-off-by: Nick Morrow +Signed-off-by: Larry Finger +Acked-by: Ping-Ke Shih +Signed-off-by: Kalle Valo +Link: https://msgid.link/4ume7mjw63u7.XlMUvUuacW2ErhOCdqlLkw2@1EHFQ.trk.elasticemail.com +Signed-off-by: Sasha Levin +--- + .../net/wireless/realtek/rtw88/rtw8821cu.c | 40 ++++++++++++------- + 1 file changed, 26 insertions(+), 14 deletions(-) + +diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c +index 7a5cbdc31ef79..e2c7d9f876836 100644 +--- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c ++++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c +@@ -9,24 +9,36 @@ + #include "usb.h" + + static const struct usb_device_id rtw_8821cu_id_table[] = { +- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff), +- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */ ++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8731, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb820, 0xff, 0xff, 0xff), +- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */ +- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff), +- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */ ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc80c, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc820, 0xff, 0xff, 0xff), +- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */ ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82a, 0xff, 0xff, 0xff), +- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */ ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff), +- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */ +- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff), +- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */ +- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff), +- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */ +- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff), +- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* TOTOLINK A650UA v3 */ ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xd811, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */ + {}, + }; + MODULE_DEVICE_TABLE(usb, rtw_8821cu_id_table); +-- +2.43.0 + diff --git a/queue-6.8/wireguard-netlink-access-device-through-ctx-instead-.patch b/queue-6.8/wireguard-netlink-access-device-through-ctx-instead-.patch new file mode 100644 index 00000000000..93d06bf1c2d --- /dev/null +++ b/queue-6.8/wireguard-netlink-access-device-through-ctx-instead-.patch @@ -0,0 +1,44 @@ +From 1231a072c3717c0c763b0a8458d39d4c82ba8678 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Mar 2024 16:49:10 -0600 +Subject: wireguard: netlink: access device through ctx instead of peer + +From: Jason A. Donenfeld + +[ Upstream commit 71cbd32e3db82ea4a74e3ef9aeeaa6971969c86f ] + +The previous commit fixed a bug that led to a NULL peer->device being +dereferenced. It's actually easier and faster performance-wise to +instead get the device from ctx->wg. This semantically makes more sense +too, since ctx->wg->peer_allowedips.seq is compared with +ctx->allowedips_seq, basing them both in ctx. This also acts as a +defence in depth provision against freed peers. + +Cc: stable@vger.kernel.org +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Reviewed-by: Jiri Pirko +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/wireguard/netlink.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c +index c17aee454fa3b..f7055180ba4aa 100644 +--- a/drivers/net/wireguard/netlink.c ++++ b/drivers/net/wireguard/netlink.c +@@ -164,8 +164,8 @@ get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx) + if (!allowedips_node) + goto no_allowedips; + if (!ctx->allowedips_seq) +- ctx->allowedips_seq = peer->device->peer_allowedips.seq; +- else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq) ++ ctx->allowedips_seq = ctx->wg->peer_allowedips.seq; ++ else if (ctx->allowedips_seq != ctx->wg->peer_allowedips.seq) + goto no_allowedips; + + allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS); +-- +2.43.0 + diff --git a/queue-6.8/wireguard-netlink-check-for-dangling-peer-via-is_dea.patch b/queue-6.8/wireguard-netlink-check-for-dangling-peer-via-is_dea.patch new file mode 100644 index 00000000000..18ed32c9367 --- /dev/null +++ b/queue-6.8/wireguard-netlink-check-for-dangling-peer-via-is_dea.patch @@ -0,0 +1,90 @@ +From c9d8dd31d6af020dfa3a67fda0b73e47d5dae291 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Mar 2024 16:49:09 -0600 +Subject: wireguard: netlink: check for dangling peer via is_dead instead of + empty list + +From: Jason A. Donenfeld + +[ Upstream commit 55b6c738673871c9b0edae05d0c97995c1ff08c4 ] + +If all peers are removed via wg_peer_remove_all(), rather than setting +peer_list to empty, the peer is added to a temporary list with a head on +the stack of wg_peer_remove_all(). If a netlink dump is resumed and the +cursored peer is one that has been removed via wg_peer_remove_all(), it +will iterate from that peer and then attempt to dump freed peers. + +Fix this by instead checking peer->is_dead, which was explictly created +for this purpose. Also move up the device_update_lock lockdep assertion, +since reading is_dead relies on that. + +It can be reproduced by a small script like: + + echo "Setting config..." + ip link add dev wg0 type wireguard + wg setconf wg0 /big-config + ( + while true; do + echo "Showing config..." + wg showconf wg0 > /dev/null + done + ) & + sleep 4 + wg setconf wg0 <(printf "[Peer]\nPublicKey=$(wg genkey)\n") + +Resulting in: + + BUG: KASAN: slab-use-after-free in __lock_acquire+0x182a/0x1b20 + Read of size 8 at addr ffff88811956ec70 by task wg/59 + CPU: 2 PID: 59 Comm: wg Not tainted 6.8.0-rc2-debug+ #5 + Call Trace: + + dump_stack_lvl+0x47/0x70 + print_address_description.constprop.0+0x2c/0x380 + print_report+0xab/0x250 + kasan_report+0xba/0xf0 + __lock_acquire+0x182a/0x1b20 + lock_acquire+0x191/0x4b0 + down_read+0x80/0x440 + get_peer+0x140/0xcb0 + wg_get_device_dump+0x471/0x1130 + +Cc: stable@vger.kernel.org +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Reported-by: Lillian Berry +Signed-off-by: Jason A. Donenfeld +Reviewed-by: Jiri Pirko +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/wireguard/netlink.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c +index e220d761b1f27..c17aee454fa3b 100644 +--- a/drivers/net/wireguard/netlink.c ++++ b/drivers/net/wireguard/netlink.c +@@ -255,17 +255,17 @@ static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb) + if (!peers_nest) + goto out; + ret = 0; +- /* If the last cursor was removed via list_del_init in peer_remove, then ++ lockdep_assert_held(&wg->device_update_lock); ++ /* If the last cursor was removed in peer_remove or peer_remove_all, then + * we just treat this the same as there being no more peers left. The + * reason is that seq_nr should indicate to userspace that this isn't a + * coherent dump anyway, so they'll try again. + */ + if (list_empty(&wg->peer_list) || +- (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) { ++ (ctx->next_peer && ctx->next_peer->is_dead)) { + nla_nest_cancel(skb, peers_nest); + goto out; + } +- lockdep_assert_held(&wg->device_update_lock); + peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list); + list_for_each_entry_continue(peer, &wg->peer_list, peer_list) { + if (get_peer(peer, skb, ctx)) { +-- +2.43.0 + diff --git a/queue-6.8/wireguard-selftests-set-riscv_isa_fallback-on-riscv-.patch b/queue-6.8/wireguard-selftests-set-riscv_isa_fallback-on-riscv-.patch new file mode 100644 index 00000000000..a8164812f0a --- /dev/null +++ b/queue-6.8/wireguard-selftests-set-riscv_isa_fallback-on-riscv-.patch @@ -0,0 +1,51 @@ +From 712d2e327147fe57f41770d3e29943454e0d214c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Mar 2024 16:49:11 -0600 +Subject: wireguard: selftests: set RISCV_ISA_FALLBACK on riscv{32,64} + +From: Jason A. Donenfeld + +[ Upstream commit e995f5dd9a9cef818af32ec60fc38d68614afd12 ] + +This option is needed to continue booting with QEMU. Recent changes that +made this optional meant that it gets unset in the test harness, and so +WireGuard CI has been broken. Fix this by simply setting this option. + +Cc: stable@vger.kernel.org +Fixes: 496ea826d1e1 ("RISC-V: provide Kconfig & commandline options to control parsing "riscv,isa"") +Signed-off-by: Jason A. Donenfeld +Reviewed-by: Jiri Pirko +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + tools/testing/selftests/wireguard/qemu/arch/riscv32.config | 1 + + tools/testing/selftests/wireguard/qemu/arch/riscv64.config | 1 + + 2 files changed, 2 insertions(+) + +diff --git a/tools/testing/selftests/wireguard/qemu/arch/riscv32.config b/tools/testing/selftests/wireguard/qemu/arch/riscv32.config +index 2fc36efb166dc..a7f8e8a956259 100644 +--- a/tools/testing/selftests/wireguard/qemu/arch/riscv32.config ++++ b/tools/testing/selftests/wireguard/qemu/arch/riscv32.config +@@ -3,6 +3,7 @@ CONFIG_ARCH_RV32I=y + CONFIG_MMU=y + CONFIG_FPU=y + CONFIG_SOC_VIRT=y ++CONFIG_RISCV_ISA_FALLBACK=y + CONFIG_SERIAL_8250=y + CONFIG_SERIAL_8250_CONSOLE=y + CONFIG_SERIAL_OF_PLATFORM=y +diff --git a/tools/testing/selftests/wireguard/qemu/arch/riscv64.config b/tools/testing/selftests/wireguard/qemu/arch/riscv64.config +index dc266f3b19155..daeb3e5e09658 100644 +--- a/tools/testing/selftests/wireguard/qemu/arch/riscv64.config ++++ b/tools/testing/selftests/wireguard/qemu/arch/riscv64.config +@@ -2,6 +2,7 @@ CONFIG_ARCH_RV64I=y + CONFIG_MMU=y + CONFIG_FPU=y + CONFIG_SOC_VIRT=y ++CONFIG_RISCV_ISA_FALLBACK=y + CONFIG_SERIAL_8250=y + CONFIG_SERIAL_8250_CONSOLE=y + CONFIG_SERIAL_OF_PLATFORM=y +-- +2.43.0 + diff --git a/queue-6.8/x86-cpu-amd-update-the-zenbleed-microcode-revisions.patch b/queue-6.8/x86-cpu-amd-update-the-zenbleed-microcode-revisions.patch new file mode 100644 index 00000000000..7dcd0ecc546 --- /dev/null +++ b/queue-6.8/x86-cpu-amd-update-the-zenbleed-microcode-revisions.patch @@ -0,0 +1,44 @@ +From 1d4b09bf5ca0ece605bc18cd16814abd67694321 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 15 Mar 2024 22:42:27 +0100 +Subject: x86/CPU/AMD: Update the Zenbleed microcode revisions + +From: Borislav Petkov (AMD) + +[ Upstream commit 5c84b051bd4e777cf37aaff983277e58c99618d5 ] + +Update them to the correct revision numbers. + +Fixes: 522b1d69219d ("x86/cpu/amd: Add a Zenbleed fix") +Signed-off-by: Borislav Petkov (AMD) +Cc: +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + arch/x86/kernel/cpu/amd.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index f3abca334199d..dfa8d0cf5e185 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -989,11 +989,11 @@ static bool cpu_has_zenbleed_microcode(void) + u32 good_rev = 0; + + switch (boot_cpu_data.x86_model) { +- case 0x30 ... 0x3f: good_rev = 0x0830107a; break; +- case 0x60 ... 0x67: good_rev = 0x0860010b; break; +- case 0x68 ... 0x6f: good_rev = 0x08608105; break; +- case 0x70 ... 0x7f: good_rev = 0x08701032; break; +- case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; ++ case 0x30 ... 0x3f: good_rev = 0x0830107b; break; ++ case 0x60 ... 0x67: good_rev = 0x0860010c; break; ++ case 0x68 ... 0x6f: good_rev = 0x08608107; break; ++ case 0x70 ... 0x7f: good_rev = 0x08701033; break; ++ case 0xa0 ... 0xaf: good_rev = 0x08a00009; break; + + default: + return false; +-- +2.43.0 + diff --git a/queue-6.8/x86-nmi-fix-the-inverse-in-nmi-handler-check.patch b/queue-6.8/x86-nmi-fix-the-inverse-in-nmi-handler-check.patch new file mode 100644 index 00000000000..2b2796d86c0 --- /dev/null +++ b/queue-6.8/x86-nmi-fix-the-inverse-in-nmi-handler-check.patch @@ -0,0 +1,58 @@ +From 4694f8a2bc1aa9ee9da515202b7b332be9d8a63c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Feb 2024 08:52:35 -0800 +Subject: x86/nmi: Fix the inverse "in NMI handler" check + +From: Breno Leitao + +[ Upstream commit d54e56f31a34fa38fcb5e91df609f9633419a79a ] + +Commit 344da544f177 ("x86/nmi: Print reasons why backtrace NMIs are +ignored") creates a super nice framework to diagnose NMIs. + +Every time nmi_exc() is called, it increments a per_cpu counter +(nsp->idt_nmi_seq). At its exit, it also increments the same counter. By +reading this counter it can be seen how many times that function was called +(dividing by 2), and, if the function is still being executed, by checking +the idt_nmi_seq's least significant bit. + +On the check side (nmi_backtrace_stall_check()), that variable is queried +to check if the NMI is still being executed, but, there is a mistake in the +bitwise operation. That code wants to check if the least significant bit of +the idt_nmi_seq is set or not, but does the opposite, and checks for all +the other bits, which will always be true after the first exc_nmi() +executed successfully. + +This appends the misleading string to the dump "(CPU currently in NMI +handler function)" + +Fix it by checking the least significant bit, and if it is set, append the +string. + +Fixes: 344da544f177 ("x86/nmi: Print reasons why backtrace NMIs are ignored") +Signed-off-by: Breno Leitao +Signed-off-by: Thomas Gleixner +Reviewed-by: Paul E. McKenney +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20240207165237.1048837-1-leitao@debian.org +Signed-off-by: Sasha Levin +--- + arch/x86/kernel/nmi.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c +index 3082cf24b69e3..6da2cfa23c293 100644 +--- a/arch/x86/kernel/nmi.c ++++ b/arch/x86/kernel/nmi.c +@@ -636,7 +636,7 @@ void nmi_backtrace_stall_check(const struct cpumask *btp) + msgp = nmi_check_stall_msg[idx]; + if (nsp->idt_ignored_snap != READ_ONCE(nsp->idt_ignored) && (idx & 0x1)) + modp = ", but OK because ignore_nmis was set"; +- if (nmi_seq & ~0x1) ++ if (nmi_seq & 0x1) + msghp = " (CPU currently in NMI handler function)"; + else if (nsp->idt_nmi_seq_snap + 1 == nmi_seq) + msghp = " (CPU exited one NMI handler function)"; +-- +2.43.0 + diff --git a/queue-6.8/x86-pm-work-around-false-positive-kmemleak-report-in.patch b/queue-6.8/x86-pm-work-around-false-positive-kmemleak-report-in.patch new file mode 100644 index 00000000000..4f30c99f02e --- /dev/null +++ b/queue-6.8/x86-pm-work-around-false-positive-kmemleak-report-in.patch @@ -0,0 +1,102 @@ +From 98911da8e75df11f27daa089add775b8c530f16b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Mar 2024 14:26:56 +0000 +Subject: x86/pm: Work around false positive kmemleak report in + msr_build_context() + +From: Anton Altaparmakov + +[ Upstream commit e3f269ed0accbb22aa8f25d2daffa23c3fccd407 ] + +Since: + + 7ee18d677989 ("x86/power: Make restore_processor_context() sane") + +kmemleak reports this issue: + + unreferenced object 0xf68241e0 (size 32): + comm "swapper/0", pid 1, jiffies 4294668610 (age 68.432s) + hex dump (first 32 bytes): + 00 cc cc cc 29 10 01 c0 00 00 00 00 00 00 00 00 ....)........... + 00 42 82 f6 cc cc cc cc cc cc cc cc cc cc cc cc .B.............. + backtrace: + [<461c1d50>] __kmem_cache_alloc_node+0x106/0x260 + [] __kmalloc+0x54/0x160 + [] msr_build_context.constprop.0+0x35/0x100 + [<46635aff>] pm_check_save_msr+0x63/0x80 + [<6b6bb938>] do_one_initcall+0x41/0x1f0 + [<3f3add60>] kernel_init_freeable+0x199/0x1e8 + [<3b538fde>] kernel_init+0x1a/0x110 + [<938ae2b2>] ret_from_fork+0x1c/0x28 + +Which is a false positive. + +Reproducer: + + - Run rsync of whole kernel tree (multiple times if needed). + - start a kmemleak scan + - Note this is just an example: a lot of our internal tests hit these. + +The root cause is similar to the fix in: + + b0b592cf0836 x86/pm: Fix false positive kmemleak report in msr_build_context() + +ie. the alignment within the packed struct saved_context +which has everything unaligned as there is only "u16 gs;" at start of +struct where in the past there were four u16 there thus aligning +everything afterwards. The issue is with the fact that Kmemleak only +searches for pointers that are aligned (see how pointers are scanned in +kmemleak.c) so when the struct members are not aligned it doesn't see +them. + +Testing: + +We run a lot of tests with our CI, and after applying this fix we do not +see any kmemleak issues any more whilst without it we see hundreds of +the above report. From a single, simple test run consisting of 416 individual test +cases on kernel 5.10 x86 with kmemleak enabled we got 20 failures due to this, +which is quite a lot. With this fix applied we get zero kmemleak related failures. + +Fixes: 7ee18d677989 ("x86/power: Make restore_processor_context() sane") +Signed-off-by: Anton Altaparmakov +Signed-off-by: Ingo Molnar +Acked-by: "Rafael J. Wysocki" +Cc: stable@vger.kernel.org +Cc: Linus Torvalds +Link: https://lore.kernel.org/r/20240314142656.17699-1-anton@tuxera.com +Signed-off-by: Sasha Levin +--- + arch/x86/include/asm/suspend_32.h | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h +index a800abb1a9925..d8416b3bf832e 100644 +--- a/arch/x86/include/asm/suspend_32.h ++++ b/arch/x86/include/asm/suspend_32.h +@@ -12,11 +12,6 @@ + + /* image of the saved processor state */ + struct saved_context { +- /* +- * On x86_32, all segment registers except gs are saved at kernel +- * entry in pt_regs. +- */ +- u16 gs; + unsigned long cr0, cr2, cr3, cr4; + u64 misc_enable; + struct saved_msrs saved_msrs; +@@ -27,6 +22,11 @@ struct saved_context { + unsigned long tr; + unsigned long safety; + unsigned long return_address; ++ /* ++ * On x86_32, all segment registers except gs are saved at kernel ++ * entry in pt_regs. ++ */ ++ u16 gs; + bool misc_enable_saved; + } __attribute__((packed)); + +-- +2.43.0 +