]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.17-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 Oct 2025 12:42:51 +0000 (14:42 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 Oct 2025 12:42:51 +0000 (14:42 +0200)
added patches:
cdx-fix-device-node-reference-leak-in-cdx_msi_domain_init.patch
clk-qcom-tcsrcc-x1e80100-set-the-bi_tcxo-as-parent-to-edp-refclk.patch
clk-samsung-exynos990-fix-cmu_top-mux-div-bit-widths.patch
clk-samsung-exynos990-replace-bogus-divs-with-fixed-factor-clocks.patch
clk-samsung-exynos990-use-pll_con0-for-pll-parent-muxes.patch
copy_sighand-handle-architectures-where-sizeof-unsigned-long-sizeof-u64.patch
cpufreq-cppc-avoid-using-cpufreq_eternal-as-transition-delay.patch
cpufreq-intel_pstate-fix-object-lifecycle-issue-in-update_qos_request.patch
crypto-aspeed-fix-dma_unmap_sg-direction.patch
crypto-atmel-fix-dma_unmap_sg-direction.patch
crypto-rockchip-fix-dma_unmap_sg-nents-value.patch
eventpoll-replace-rwlock-with-spinlock.patch
fbdev-fix-logic-error-in-offb-name-match.patch
fs-ntfs3-fix-a-resource-leak-bug-in-wnd_extend.patch
fs-quota-create-dedicated-workqueue-for-quota_release_work.patch
fsnotify-pass-correct-offset-to-fsnotify_mmap_perm.patch
fuse-fix-livelock-in-synchronous-file-put-from-fuseblk-workers.patch
fuse-fix-possibly-missing-fuse_copy_finish-call-in-fuse_notify.patch
gpio-mpfs-fix-setting-gpio-direction-to-output.patch
i3c-fix-default-i2c-adapter-timeout-value.patch
iio-adc-pac1934-fix-channel-disable-configuration.patch
iio-dac-ad5360-use-int-type-to-store-negative-error-codes.patch
iio-dac-ad5421-use-int-type-to-store-negative-error-codes.patch
iio-frequency-adf4350-fix-prescaler-usage.patch
iio-imu-inv_icm42600-avoid-configuring-if-already-pm_runtime-suspended.patch
iio-imu-inv_icm42600-drop-redundant-pm_runtime-reinitialization-in-resume.patch
iio-imu-inv_icm42600-simplify-pm_runtime-setup.patch
iio-xilinx-ams-fix-ams_alarm_thr_direct_mask.patch
iio-xilinx-ams-unmask-interrupts-after-updating-alarms.patch
init-handle-bootloader-identifier-in-kernel-parameters.patch
iommu-vt-d-prs-isn-t-usable-if-pds-isn-t-supported.patch
ipmi-msghandler-change-seq_lock-to-a-mutex.patch
ipmi-rework-user-message-limit-handling.patch
kernel-sys.c-fix-the-racy-usage-of-task_lock-tsk-group_leader-in-sys_prlimit64-paths.patch

35 files changed:
queue-6.17/cdx-fix-device-node-reference-leak-in-cdx_msi_domain_init.patch [new file with mode: 0644]
queue-6.17/clk-qcom-tcsrcc-x1e80100-set-the-bi_tcxo-as-parent-to-edp-refclk.patch [new file with mode: 0644]
queue-6.17/clk-samsung-exynos990-fix-cmu_top-mux-div-bit-widths.patch [new file with mode: 0644]
queue-6.17/clk-samsung-exynos990-replace-bogus-divs-with-fixed-factor-clocks.patch [new file with mode: 0644]
queue-6.17/clk-samsung-exynos990-use-pll_con0-for-pll-parent-muxes.patch [new file with mode: 0644]
queue-6.17/copy_sighand-handle-architectures-where-sizeof-unsigned-long-sizeof-u64.patch [new file with mode: 0644]
queue-6.17/cpufreq-cppc-avoid-using-cpufreq_eternal-as-transition-delay.patch [new file with mode: 0644]
queue-6.17/cpufreq-intel_pstate-fix-object-lifecycle-issue-in-update_qos_request.patch [new file with mode: 0644]
queue-6.17/crypto-aspeed-fix-dma_unmap_sg-direction.patch [new file with mode: 0644]
queue-6.17/crypto-atmel-fix-dma_unmap_sg-direction.patch [new file with mode: 0644]
queue-6.17/crypto-rockchip-fix-dma_unmap_sg-nents-value.patch [new file with mode: 0644]
queue-6.17/eventpoll-replace-rwlock-with-spinlock.patch [new file with mode: 0644]
queue-6.17/fbdev-fix-logic-error-in-offb-name-match.patch [new file with mode: 0644]
queue-6.17/fs-ntfs3-fix-a-resource-leak-bug-in-wnd_extend.patch [new file with mode: 0644]
queue-6.17/fs-quota-create-dedicated-workqueue-for-quota_release_work.patch [new file with mode: 0644]
queue-6.17/fsnotify-pass-correct-offset-to-fsnotify_mmap_perm.patch [new file with mode: 0644]
queue-6.17/fuse-fix-livelock-in-synchronous-file-put-from-fuseblk-workers.patch [new file with mode: 0644]
queue-6.17/fuse-fix-possibly-missing-fuse_copy_finish-call-in-fuse_notify.patch [new file with mode: 0644]
queue-6.17/gpio-mpfs-fix-setting-gpio-direction-to-output.patch [new file with mode: 0644]
queue-6.17/i3c-fix-default-i2c-adapter-timeout-value.patch [new file with mode: 0644]
queue-6.17/iio-adc-pac1934-fix-channel-disable-configuration.patch [new file with mode: 0644]
queue-6.17/iio-dac-ad5360-use-int-type-to-store-negative-error-codes.patch [new file with mode: 0644]
queue-6.17/iio-dac-ad5421-use-int-type-to-store-negative-error-codes.patch [new file with mode: 0644]
queue-6.17/iio-frequency-adf4350-fix-prescaler-usage.patch [new file with mode: 0644]
queue-6.17/iio-imu-inv_icm42600-avoid-configuring-if-already-pm_runtime-suspended.patch [new file with mode: 0644]
queue-6.17/iio-imu-inv_icm42600-drop-redundant-pm_runtime-reinitialization-in-resume.patch [new file with mode: 0644]
queue-6.17/iio-imu-inv_icm42600-simplify-pm_runtime-setup.patch [new file with mode: 0644]
queue-6.17/iio-xilinx-ams-fix-ams_alarm_thr_direct_mask.patch [new file with mode: 0644]
queue-6.17/iio-xilinx-ams-unmask-interrupts-after-updating-alarms.patch [new file with mode: 0644]
queue-6.17/init-handle-bootloader-identifier-in-kernel-parameters.patch [new file with mode: 0644]
queue-6.17/iommu-vt-d-prs-isn-t-usable-if-pds-isn-t-supported.patch [new file with mode: 0644]
queue-6.17/ipmi-msghandler-change-seq_lock-to-a-mutex.patch [new file with mode: 0644]
queue-6.17/ipmi-rework-user-message-limit-handling.patch [new file with mode: 0644]
queue-6.17/kernel-sys.c-fix-the-racy-usage-of-task_lock-tsk-group_leader-in-sys_prlimit64-paths.patch [new file with mode: 0644]
queue-6.17/series

diff --git a/queue-6.17/cdx-fix-device-node-reference-leak-in-cdx_msi_domain_init.patch b/queue-6.17/cdx-fix-device-node-reference-leak-in-cdx_msi_domain_init.patch
new file mode 100644 (file)
index 0000000..ccf1f87
--- /dev/null
@@ -0,0 +1,33 @@
+From 76254bc489d39dae9a3427f0984fe64213d20548 Mon Sep 17 00:00:00 2001
+From: Miaoqian Lin <linmq006@gmail.com>
+Date: Tue, 2 Sep 2025 16:49:33 +0800
+Subject: cdx: Fix device node reference leak in cdx_msi_domain_init
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+commit 76254bc489d39dae9a3427f0984fe64213d20548 upstream.
+
+Add missing of_node_put() call to release
+the device node reference obtained via of_parse_phandle().
+
+Fixes: 0e439ba38e61 ("cdx: add MSI support for CDX bus")
+Cc: stable@vger.kernel.org
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Acked-by: Nipun Gupta <nipun.gupta@amd.com>
+Link: https://lore.kernel.org/r/20250902084933.2418264-1-linmq006@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cdx/cdx_msi.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/cdx/cdx_msi.c
++++ b/drivers/cdx/cdx_msi.c
+@@ -174,6 +174,7 @@ struct irq_domain *cdx_msi_domain_init(s
+       }
+       parent = irq_find_matching_fwnode(of_fwnode_handle(parent_node), DOMAIN_BUS_NEXUS);
++      of_node_put(parent_node);
+       if (!parent || !msi_get_domain_info(parent)) {
+               dev_err(dev, "unable to locate ITS domain\n");
+               return NULL;
diff --git a/queue-6.17/clk-qcom-tcsrcc-x1e80100-set-the-bi_tcxo-as-parent-to-edp-refclk.patch b/queue-6.17/clk-qcom-tcsrcc-x1e80100-set-the-bi_tcxo-as-parent-to-edp-refclk.patch
new file mode 100644 (file)
index 0000000..8d1527d
--- /dev/null
@@ -0,0 +1,38 @@
+From 57c8e9da3dfe606b918d8f193837ebf2213a9545 Mon Sep 17 00:00:00 2001
+From: Abel Vesa <abel.vesa@linaro.org>
+Date: Wed, 30 Jul 2025 19:11:12 +0300
+Subject: clk: qcom: tcsrcc-x1e80100: Set the bi_tcxo as parent to eDP refclk
+
+From: Abel Vesa <abel.vesa@linaro.org>
+
+commit 57c8e9da3dfe606b918d8f193837ebf2213a9545 upstream.
+
+All the other ref clocks provided by this driver have the bi_tcxo
+as parent. The eDP refclk is the only one without a parent, leading
+to reporting its rate as 0. So set its parent to bi_tcxo, just like
+the rest of the refclks.
+
+Cc: stable@vger.kernel.org # v6.9
+Fixes: 06aff116199c ("clk: qcom: Add TCSR clock driver for x1e80100")
+Signed-off-by: Abel Vesa <abel.vesa@linaro.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250730-clk-qcom-tcsrcc-x1e80100-parent-edp-refclk-v1-1-7a36ef06e045@linaro.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/qcom/tcsrcc-x1e80100.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/clk/qcom/tcsrcc-x1e80100.c
++++ b/drivers/clk/qcom/tcsrcc-x1e80100.c
+@@ -29,6 +29,10 @@ static struct clk_branch tcsr_edp_clkref
+               .enable_mask = BIT(0),
+               .hw.init = &(const struct clk_init_data) {
+                       .name = "tcsr_edp_clkref_en",
++                      .parent_data = &(const struct clk_parent_data){
++                              .index = DT_BI_TCXO_PAD,
++                      },
++                      .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
diff --git a/queue-6.17/clk-samsung-exynos990-fix-cmu_top-mux-div-bit-widths.patch b/queue-6.17/clk-samsung-exynos990-fix-cmu_top-mux-div-bit-widths.patch
new file mode 100644 (file)
index 0000000..a866f9f
--- /dev/null
@@ -0,0 +1,93 @@
+From ce2eb09b430ddf9d7c9d685bdd81de011bccd4ad Mon Sep 17 00:00:00 2001
+From: Denzeel Oliva <wachiturroxd150@gmail.com>
+Date: Sat, 30 Aug 2025 16:28:39 +0000
+Subject: clk: samsung: exynos990: Fix CMU_TOP mux/div bit widths
+
+From: Denzeel Oliva <wachiturroxd150@gmail.com>
+
+commit ce2eb09b430ddf9d7c9d685bdd81de011bccd4ad upstream.
+
+Correct several mux/div widths (DSP_BUS, G2D_MSCL, HSI0 USBDP_DEBUG,
+HSI1 UFS_EMBD, APM_BUS, CPUCL0_DBG_BUS, DPU) to match hardware.
+
+Fixes: bdd03ebf721f ("clk: samsung: Introduce Exynos990 clock controller driver")
+Signed-off-by: Denzeel Oliva <wachiturroxd150@gmail.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20250830-fix-cmu-top-v5-2-7c62f608309e@gmail.com
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/samsung/clk-exynos990.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/clk/samsung/clk-exynos990.c b/drivers/clk/samsung/clk-exynos990.c
+index 12e98bf5005a..385f1d972667 100644
+--- a/drivers/clk/samsung/clk-exynos990.c
++++ b/drivers/clk/samsung/clk-exynos990.c
+@@ -766,11 +766,11 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+       MUX(CLK_MOUT_CMU_DPU_ALT, "mout_cmu_dpu_alt",
+           mout_cmu_dpu_alt_p, CLK_CON_MUX_MUX_CLKCMU_DPU_ALT, 0, 2),
+       MUX(CLK_MOUT_CMU_DSP_BUS, "mout_cmu_dsp_bus",
+-          mout_cmu_dsp_bus_p, CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 2),
++          mout_cmu_dsp_bus_p, CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 3),
+       MUX(CLK_MOUT_CMU_G2D_G2D, "mout_cmu_g2d_g2d",
+           mout_cmu_g2d_g2d_p, CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 2),
+       MUX(CLK_MOUT_CMU_G2D_MSCL, "mout_cmu_g2d_mscl",
+-          mout_cmu_g2d_mscl_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 1),
++          mout_cmu_g2d_mscl_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 2),
+       MUX(CLK_MOUT_CMU_HPM, "mout_cmu_hpm",
+           mout_cmu_hpm_p, CLK_CON_MUX_MUX_CLKCMU_HPM, 0, 2),
+       MUX(CLK_MOUT_CMU_HSI0_BUS, "mout_cmu_hsi0_bus",
+@@ -782,7 +782,7 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+           0, 2),
+       MUX(CLK_MOUT_CMU_HSI0_USBDP_DEBUG, "mout_cmu_hsi0_usbdp_debug",
+           mout_cmu_hsi0_usbdp_debug_p,
+-          CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG, 0, 2),
++          CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG, 0, 1),
+       MUX(CLK_MOUT_CMU_HSI1_BUS, "mout_cmu_hsi1_bus",
+           mout_cmu_hsi1_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS, 0, 3),
+       MUX(CLK_MOUT_CMU_HSI1_MMC_CARD, "mout_cmu_hsi1_mmc_card",
+@@ -795,7 +795,7 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+           0, 2),
+       MUX(CLK_MOUT_CMU_HSI1_UFS_EMBD, "mout_cmu_hsi1_ufs_embd",
+           mout_cmu_hsi1_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD,
+-          0, 1),
++          0, 2),
+       MUX(CLK_MOUT_CMU_HSI2_BUS, "mout_cmu_hsi2_bus",
+           mout_cmu_hsi2_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS, 0, 1),
+       MUX(CLK_MOUT_CMU_HSI2_PCIE, "mout_cmu_hsi2_pcie",
+@@ -869,7 +869,7 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
+           CLK_CON_DIV_PLL_SHARED4_DIV4, 0, 1),
+       DIV(CLK_DOUT_CMU_APM_BUS, "dout_cmu_apm_bus", "gout_cmu_apm_bus",
+-          CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
++          CLK_CON_DIV_CLKCMU_APM_BUS, 0, 2),
+       DIV(CLK_DOUT_CMU_AUD_CPU, "dout_cmu_aud_cpu", "gout_cmu_aud_cpu",
+           CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3),
+       DIV(CLK_DOUT_CMU_BUS0_BUS, "dout_cmu_bus0_bus", "gout_cmu_bus0_bus",
+@@ -894,9 +894,9 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
+           CLK_CON_DIV_CLKCMU_CMU_BOOST, 0, 2),
+       DIV(CLK_DOUT_CMU_CORE_BUS, "dout_cmu_core_bus", "gout_cmu_core_bus",
+           CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
+-      DIV(CLK_DOUT_CMU_CPUCL0_DBG_BUS, "dout_cmu_cpucl0_debug",
++      DIV(CLK_DOUT_CMU_CPUCL0_DBG_BUS, "dout_cmu_cpucl0_dbg_bus",
+           "gout_cmu_cpucl0_dbg_bus", CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS,
+-          0, 3),
++          0, 4),
+       DIV(CLK_DOUT_CMU_CPUCL0_SWITCH, "dout_cmu_cpucl0_switch",
+           "gout_cmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
+       DIV(CLK_DOUT_CMU_CPUCL1_SWITCH, "dout_cmu_cpucl1_switch",
+@@ -986,8 +986,8 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
+           CLK_CON_DIV_CLKCMU_TNR_BUS, 0, 4),
+       DIV(CLK_DOUT_CMU_VRA_BUS, "dout_cmu_vra_bus", "gout_cmu_vra_bus",
+           CLK_CON_DIV_CLKCMU_VRA_BUS, 0, 4),
+-      DIV(CLK_DOUT_CMU_DPU, "dout_cmu_clkcmu_dpu", "gout_cmu_dpu",
+-          CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 4),
++      DIV(CLK_DOUT_CMU_DPU, "dout_cmu_dpu", "gout_cmu_dpu",
++          CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 3),
+ };
+ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+-- 
+2.51.0
+
diff --git a/queue-6.17/clk-samsung-exynos990-replace-bogus-divs-with-fixed-factor-clocks.patch b/queue-6.17/clk-samsung-exynos990-replace-bogus-divs-with-fixed-factor-clocks.patch
new file mode 100644 (file)
index 0000000..5c27fa8
--- /dev/null
@@ -0,0 +1,82 @@
+From a66dabcd2cb8389fd73cab8896fd727fa2ea8d8b Mon Sep 17 00:00:00 2001
+From: Denzeel Oliva <wachiturroxd150@gmail.com>
+Date: Sat, 30 Aug 2025 16:28:40 +0000
+Subject: clk: samsung: exynos990: Replace bogus divs with fixed-factor clocks
+
+From: Denzeel Oliva <wachiturroxd150@gmail.com>
+
+commit a66dabcd2cb8389fd73cab8896fd727fa2ea8d8b upstream.
+
+HSI1/2 PCIe and HSI0 USBDP debug outputs are fixed divide-by-8.
+OTP also uses 1/8 from oscclk. Replace incorrect div clocks with
+fixed-factor clocks to reflect hardware.
+
+Fixes: bdd03ebf721f ("clk: samsung: Introduce Exynos990 clock controller driver")
+Signed-off-by: Denzeel Oliva <wachiturroxd150@gmail.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20250830-fix-cmu-top-v5-3-7c62f608309e@gmail.com
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/samsung/clk-exynos990.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/clk/samsung/clk-exynos990.c b/drivers/clk/samsung/clk-exynos990.c
+index 385f1d972667..8571c225d090 100644
+--- a/drivers/clk/samsung/clk-exynos990.c
++++ b/drivers/clk/samsung/clk-exynos990.c
+@@ -931,16 +931,11 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
+           CLK_CON_DIV_CLKCMU_HSI0_DPGTC, 0, 3),
+       DIV(CLK_DOUT_CMU_HSI0_USB31DRD, "dout_cmu_hsi0_usb31drd",
+           "gout_cmu_hsi0_usb31drd", CLK_CON_DIV_CLKCMU_HSI0_USB31DRD, 0, 4),
+-      DIV(CLK_DOUT_CMU_HSI0_USBDP_DEBUG, "dout_cmu_hsi0_usbdp_debug",
+-          "gout_cmu_hsi0_usbdp_debug", CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG,
+-          0, 4),
+       DIV(CLK_DOUT_CMU_HSI1_BUS, "dout_cmu_hsi1_bus", "gout_cmu_hsi1_bus",
+           CLK_CON_DIV_CLKCMU_HSI1_BUS, 0, 3),
+       DIV(CLK_DOUT_CMU_HSI1_MMC_CARD, "dout_cmu_hsi1_mmc_card",
+           "gout_cmu_hsi1_mmc_card", CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
+           0, 9),
+-      DIV(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie", "gout_cmu_hsi1_pcie",
+-          CLK_CON_DIV_CLKCMU_HSI1_PCIE, 0, 7),
+       DIV(CLK_DOUT_CMU_HSI1_UFS_CARD, "dout_cmu_hsi1_ufs_card",
+           "gout_cmu_hsi1_ufs_card", CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD,
+           0, 3),
+@@ -949,8 +944,6 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
+           0, 3),
+       DIV(CLK_DOUT_CMU_HSI2_BUS, "dout_cmu_hsi2_bus", "gout_cmu_hsi2_bus",
+           CLK_CON_DIV_CLKCMU_HSI2_BUS, 0, 4),
+-      DIV(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie", "gout_cmu_hsi2_pcie",
+-          CLK_CON_DIV_CLKCMU_HSI2_PCIE, 0, 7),
+       DIV(CLK_DOUT_CMU_IPP_BUS, "dout_cmu_ipp_bus", "gout_cmu_ipp_bus",
+           CLK_CON_DIV_CLKCMU_IPP_BUS, 0, 4),
+       DIV(CLK_DOUT_CMU_ITP_BUS, "dout_cmu_itp_bus", "gout_cmu_itp_bus",
+@@ -990,6 +983,16 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
+           CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 3),
+ };
++static const struct samsung_fixed_factor_clock cmu_top_ffactor[] __initconst = {
++      FFACTOR(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie",
++              "gout_cmu_hsi1_pcie", 1, 8, 0),
++      FFACTOR(CLK_DOUT_CMU_OTP, "dout_cmu_otp", "oscclk", 1, 8, 0),
++      FFACTOR(CLK_DOUT_CMU_HSI0_USBDP_DEBUG, "dout_cmu_hsi0_usbdp_debug",
++              "gout_cmu_hsi0_usbdp_debug", 1, 8, 0),
++      FFACTOR(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie",
++              "gout_cmu_hsi2_pcie", 1, 8, 0),
++};
++
+ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+       GATE(CLK_GOUT_CMU_APM_BUS, "gout_cmu_apm_bus", "mout_cmu_apm_bus",
+            CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, CLK_IGNORE_UNUSED, 0),
+@@ -1133,6 +1136,8 @@ static const struct samsung_cmu_info top_cmu_info __initconst = {
+       .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+       .div_clks = top_div_clks,
+       .nr_div_clks = ARRAY_SIZE(top_div_clks),
++      .fixed_factor_clks = cmu_top_ffactor,
++      .nr_fixed_factor_clks = ARRAY_SIZE(cmu_top_ffactor),
+       .gate_clks = top_gate_clks,
+       .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+       .nr_clk_ids = CLKS_NR_TOP,
+-- 
+2.51.0
+
diff --git a/queue-6.17/clk-samsung-exynos990-use-pll_con0-for-pll-parent-muxes.patch b/queue-6.17/clk-samsung-exynos990-use-pll_con0-for-pll-parent-muxes.patch
new file mode 100644 (file)
index 0000000..14d1c74
--- /dev/null
@@ -0,0 +1,67 @@
+From 19b50ab02eddbbd87ec2f0ad4a5bc93ac1c9b82d Mon Sep 17 00:00:00 2001
+From: Denzeel Oliva <wachiturroxd150@gmail.com>
+Date: Sat, 30 Aug 2025 16:28:38 +0000
+Subject: clk: samsung: exynos990: Use PLL_CON0 for PLL parent muxes
+
+From: Denzeel Oliva <wachiturroxd150@gmail.com>
+
+commit 19b50ab02eddbbd87ec2f0ad4a5bc93ac1c9b82d upstream.
+
+Parent select bits for shared PLLs are in PLL_CON0, not PLL_CON3.
+Using the wrong register leads to incorrect parent selection and rates.
+
+Fixes: bdd03ebf721f ("clk: samsung: Introduce Exynos990 clock controller driver")
+Signed-off-by: Denzeel Oliva <wachiturroxd150@gmail.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20250830-fix-cmu-top-v5-1-7c62f608309e@gmail.com
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/samsung/clk-exynos990.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/clk/samsung/clk-exynos990.c b/drivers/clk/samsung/clk-exynos990.c
+index 8d3f193d2b4d..12e98bf5005a 100644
+--- a/drivers/clk/samsung/clk-exynos990.c
++++ b/drivers/clk/samsung/clk-exynos990.c
+@@ -239,12 +239,19 @@ static const unsigned long top_clk_regs[] __initconst = {
+       PLL_LOCKTIME_PLL_SHARED2,
+       PLL_LOCKTIME_PLL_SHARED3,
+       PLL_LOCKTIME_PLL_SHARED4,
++      PLL_CON0_PLL_G3D,
+       PLL_CON3_PLL_G3D,
++      PLL_CON0_PLL_MMC,
+       PLL_CON3_PLL_MMC,
++      PLL_CON0_PLL_SHARED0,
+       PLL_CON3_PLL_SHARED0,
++      PLL_CON0_PLL_SHARED1,
+       PLL_CON3_PLL_SHARED1,
++      PLL_CON0_PLL_SHARED2,
+       PLL_CON3_PLL_SHARED2,
++      PLL_CON0_PLL_SHARED3,
+       PLL_CON3_PLL_SHARED3,
++      PLL_CON0_PLL_SHARED4,
+       PLL_CON3_PLL_SHARED4,
+       CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
+       CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
+@@ -689,13 +696,13 @@ PNAME(mout_cmu_vra_bus_p)                = { "dout_cmu_shared0_div3",
+ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+       MUX(CLK_MOUT_PLL_SHARED0, "mout_pll_shared0", mout_pll_shared0_p,
+-          PLL_CON3_PLL_SHARED0, 4, 1),
++          PLL_CON0_PLL_SHARED0, 4, 1),
+       MUX(CLK_MOUT_PLL_SHARED1, "mout_pll_shared1", mout_pll_shared1_p,
+-          PLL_CON3_PLL_SHARED1, 4, 1),
++          PLL_CON0_PLL_SHARED1, 4, 1),
+       MUX(CLK_MOUT_PLL_SHARED2, "mout_pll_shared2", mout_pll_shared2_p,
+-          PLL_CON3_PLL_SHARED2, 4, 1),
++          PLL_CON0_PLL_SHARED2, 4, 1),
+       MUX(CLK_MOUT_PLL_SHARED3, "mout_pll_shared3", mout_pll_shared3_p,
+-          PLL_CON3_PLL_SHARED3, 4, 1),
++          PLL_CON0_PLL_SHARED3, 4, 1),
+       MUX(CLK_MOUT_PLL_SHARED4, "mout_pll_shared4", mout_pll_shared4_p,
+           PLL_CON0_PLL_SHARED4, 4, 1),
+       MUX(CLK_MOUT_PLL_MMC, "mout_pll_mmc", mout_pll_mmc_p,
+-- 
+2.51.0
+
diff --git a/queue-6.17/copy_sighand-handle-architectures-where-sizeof-unsigned-long-sizeof-u64.patch b/queue-6.17/copy_sighand-handle-architectures-where-sizeof-unsigned-long-sizeof-u64.patch
new file mode 100644 (file)
index 0000000..a8ddbaf
--- /dev/null
@@ -0,0 +1,58 @@
+From 04ff48239f46e8b493571e260bd0e6c3a6400371 Mon Sep 17 00:00:00 2001
+From: Simon Schuster <schuster.simon@siemens-energy.com>
+Date: Mon, 1 Sep 2025 15:09:50 +0200
+Subject: copy_sighand: Handle architectures where sizeof(unsigned long) < sizeof(u64)
+
+From: Simon Schuster <schuster.simon@siemens-energy.com>
+
+commit 04ff48239f46e8b493571e260bd0e6c3a6400371 upstream.
+
+With the introduction of clone3 in commit 7f192e3cd316 ("fork: add
+clone3") the effective bit width of clone_flags on all architectures was
+increased from 32-bit to 64-bit. However, the signature of the copy_*
+helper functions (e.g., copy_sighand) used by copy_process was not
+adapted.
+
+As such, they truncate the flags on any 32-bit architectures that
+supports clone3 (arc, arm, csky, m68k, microblaze, mips32, openrisc,
+parisc32, powerpc32, riscv32, x86-32 and xtensa).
+
+For copy_sighand with CLONE_CLEAR_SIGHAND being an actual u64
+constant, this triggers an observable bug in kernel selftest
+clone3_clear_sighand:
+
+        if (clone_flags & CLONE_CLEAR_SIGHAND)
+
+in function copy_sighand within fork.c will always fail given:
+
+        unsigned long /* == uint32_t */ clone_flags
+        #define CLONE_CLEAR_SIGHAND 0x100000000ULL
+
+This commit fixes the bug by always passing clone_flags to copy_sighand
+via their declared u64 type, invariant of architecture-dependent integer
+sizes.
+
+Fixes: b612e5df4587 ("clone3: add CLONE_CLEAR_SIGHAND")
+Cc: stable@vger.kernel.org # linux-5.5+
+Signed-off-by: Simon Schuster <schuster.simon@siemens-energy.com>
+Link: https://lore.kernel.org/20250901-nios2-implement-clone3-v2-1-53fcf5577d57@siemens-energy.com
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/fork.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1596,7 +1596,7 @@ static int copy_files(unsigned long clon
+       return 0;
+ }
+-static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
++static int copy_sighand(u64 clone_flags, struct task_struct *tsk)
+ {
+       struct sighand_struct *sig;
diff --git a/queue-6.17/cpufreq-cppc-avoid-using-cpufreq_eternal-as-transition-delay.patch b/queue-6.17/cpufreq-cppc-avoid-using-cpufreq_eternal-as-transition-delay.patch
new file mode 100644 (file)
index 0000000..07629be
--- /dev/null
@@ -0,0 +1,65 @@
+From f965d111e68f4a993cc44d487d416e3d954eea11 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Fri, 26 Sep 2025 12:19:41 +0200
+Subject: cpufreq: CPPC: Avoid using CPUFREQ_ETERNAL as transition delay
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit f965d111e68f4a993cc44d487d416e3d954eea11 upstream.
+
+If cppc_get_transition_latency() returns CPUFREQ_ETERNAL to indicate a
+failure to retrieve the transition latency value from the platform
+firmware, the CPPC cpufreq driver will use that value (converted to
+microseconds) as the policy transition delay, but it is way too large
+for any practical use.
+
+Address this by making the driver use the cpufreq's default
+transition latency value (in microseconds) as the transition delay
+if CPUFREQ_ETERNAL is returned by cppc_get_transition_latency().
+
+Fixes: d4f3388afd48 ("cpufreq / CPPC: Set platform specific transition_delay_us")
+Cc: 5.19+ <stable@vger.kernel.org> # 5.19
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Reviewed-by: Jie Zhan <zhanjie9@hisilicon.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Reviewed-by: Qais Yousef <qyousef@layalina.io>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/cppc_cpufreq.c |   14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -310,6 +310,16 @@ static int cppc_verify_policy(struct cpu
+       return 0;
+ }
++static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
++{
++      unsigned int transition_latency_ns = cppc_get_transition_latency(cpu);
++
++      if (transition_latency_ns == CPUFREQ_ETERNAL)
++              return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC;
++
++      return transition_latency_ns / NSEC_PER_USEC;
++}
++
+ /*
+  * The PCC subspace describes the rate at which platform can accept commands
+  * on the shared PCC channel (including READs which do not count towards freq
+@@ -332,12 +342,12 @@ static unsigned int cppc_cpufreq_get_tra
+                       return 10000;
+               }
+       }
+-      return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
++      return __cppc_cpufreq_get_transition_delay_us(cpu);
+ }
+ #else
+ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+ {
+-      return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
++      return __cppc_cpufreq_get_transition_delay_us(cpu);
+ }
+ #endif
diff --git a/queue-6.17/cpufreq-intel_pstate-fix-object-lifecycle-issue-in-update_qos_request.patch b/queue-6.17/cpufreq-intel_pstate-fix-object-lifecycle-issue-in-update_qos_request.patch
new file mode 100644 (file)
index 0000000..aed6f92
--- /dev/null
@@ -0,0 +1,58 @@
+From 69e5d50fcf4093fb3f9f41c4f931f12c2ca8c467 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Fri, 5 Sep 2025 15:52:03 +0200
+Subject: cpufreq: intel_pstate: Fix object lifecycle issue in update_qos_request()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 69e5d50fcf4093fb3f9f41c4f931f12c2ca8c467 upstream.
+
+The cpufreq_cpu_put() call in update_qos_request() takes place too early
+because the latter subsequently calls freq_qos_update_request() that
+indirectly accesses the policy object in question through the QoS request
+object passed to it.
+
+Fortunately, update_qos_request() is called under intel_pstate_driver_lock,
+so this issue does not matter for changing the intel_pstate operation
+mode, but it theoretically can cause a crash to occur on CPU device hot
+removal (which currently can only happen in virt, but it is formally
+supported nevertheless).
+
+Address this issue by modifying update_qos_request() to drop the
+reference to the policy later.
+
+Fixes: da5c504c7aae ("cpufreq: intel_pstate: Implement QoS supported freq constraints")
+Cc: 5.4+ <stable@vger.kernel.org> # 5.4+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Zihuan Zhang <zhangzihuan@kylinos.cn>
+Link: https://patch.msgid.link/2255671.irdbgypaU6@rafael.j.wysocki
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/intel_pstate.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1710,10 +1710,10 @@ static void update_qos_request(enum freq
+                       continue;
+               req = policy->driver_data;
+-              cpufreq_cpu_put(policy);
+-
+-              if (!req)
++              if (!req) {
++                      cpufreq_cpu_put(policy);
+                       continue;
++              }
+               if (hwp_active)
+                       intel_pstate_get_hwp_cap(cpu);
+@@ -1729,6 +1729,8 @@ static void update_qos_request(enum freq
+               if (freq_qos_update_request(req, freq) < 0)
+                       pr_warn("Failed to update freq constraint: CPU%d\n", i);
++
++              cpufreq_cpu_put(policy);
+       }
+ }
diff --git a/queue-6.17/crypto-aspeed-fix-dma_unmap_sg-direction.patch b/queue-6.17/crypto-aspeed-fix-dma_unmap_sg-direction.patch
new file mode 100644 (file)
index 0000000..6a0df52
--- /dev/null
@@ -0,0 +1,33 @@
+From 838d2d51513e6d2504a678e906823cfd2ecaaa22 Mon Sep 17 00:00:00 2001
+From: Thomas Fourier <fourier.thomas@gmail.com>
+Date: Wed, 10 Sep 2025 10:22:31 +0200
+Subject: crypto: aspeed - Fix dma_unmap_sg() direction
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+commit 838d2d51513e6d2504a678e906823cfd2ecaaa22 upstream.
+
+It seems like everywhere in this file, when the request is not
+bidirectionala, req->src is mapped with DMA_TO_DEVICE and req->dst is
+mapped with DMA_FROM_DEVICE.
+
+Fixes: 62f58b1637b7 ("crypto: aspeed - add HACE crypto driver")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/aspeed/aspeed-hace-crypto.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
++++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
+@@ -346,7 +346,7 @@ free_req:
+       } else {
+               dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
+-                           DMA_TO_DEVICE);
++                           DMA_FROM_DEVICE);
+               dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+                            DMA_TO_DEVICE);
+       }
diff --git a/queue-6.17/crypto-atmel-fix-dma_unmap_sg-direction.patch b/queue-6.17/crypto-atmel-fix-dma_unmap_sg-direction.patch
new file mode 100644 (file)
index 0000000..08324f4
--- /dev/null
@@ -0,0 +1,32 @@
+From f5d643156ef62216955c119216d2f3815bd51cb1 Mon Sep 17 00:00:00 2001
+From: Thomas Fourier <fourier.thomas@gmail.com>
+Date: Wed, 3 Sep 2025 10:34:46 +0200
+Subject: crypto: atmel - Fix dma_unmap_sg() direction
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+commit f5d643156ef62216955c119216d2f3815bd51cb1 upstream.
+
+It seems like everywhere in this file, dd->in_sg is mapped with
+DMA_TO_DEVICE and dd->out_sg is mapped with DMA_FROM_DEVICE.
+
+Fixes: 13802005d8f2 ("crypto: atmel - add Atmel DES/TDES driver")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/atmel-tdes.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/atmel-tdes.c
++++ b/drivers/crypto/atmel-tdes.c
+@@ -512,7 +512,7 @@ static int atmel_tdes_crypt_start(struct
+       if (err && (dd->flags & TDES_FLAGS_FAST)) {
+               dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+-              dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
++              dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+       }
+       return err;
diff --git a/queue-6.17/crypto-rockchip-fix-dma_unmap_sg-nents-value.patch b/queue-6.17/crypto-rockchip-fix-dma_unmap_sg-nents-value.patch
new file mode 100644 (file)
index 0000000..e461c38
--- /dev/null
@@ -0,0 +1,32 @@
+From 21140e5caf019e4a24e1ceabcaaa16bd693b393f Mon Sep 17 00:00:00 2001
+From: Thomas Fourier <fourier.thomas@gmail.com>
+Date: Wed, 3 Sep 2025 10:06:46 +0200
+Subject: crypto: rockchip - Fix dma_unmap_sg() nents value
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+commit 21140e5caf019e4a24e1ceabcaaa16bd693b393f upstream.
+
+The dma_unmap_sg() functions should be called with the same nents as the
+dma_map_sg(), not the value the map function returned.
+
+Fixes: 57d67c6e8219 ("crypto: rockchip - rework by using crypto_engine")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/rockchip/rk3288_crypto_ahash.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+@@ -254,7 +254,7 @@ static void rk_hash_unprepare(struct cry
+       struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+       struct rk_crypto_info *rkc = rctx->dev;
+-      dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
++      dma_unmap_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ }
+ static int rk_hash_run(struct crypto_engine *engine, void *breq)
diff --git a/queue-6.17/eventpoll-replace-rwlock-with-spinlock.patch b/queue-6.17/eventpoll-replace-rwlock-with-spinlock.patch
new file mode 100644 (file)
index 0000000..cb10e00
--- /dev/null
@@ -0,0 +1,389 @@
+From 0c43094f8cc9d3d99d835c0ac9c4fe1ccc62babd Mon Sep 17 00:00:00 2001
+From: Nam Cao <namcao@linutronix.de>
+Date: Tue, 15 Jul 2025 14:46:34 +0200
+Subject: eventpoll: Replace rwlock with spinlock
+
+From: Nam Cao <namcao@linutronix.de>
+
+commit 0c43094f8cc9d3d99d835c0ac9c4fe1ccc62babd upstream.
+
+The ready event list of an epoll object is protected by read-write
+semaphore:
+
+  - The consumer (waiter) acquires the write lock and takes items.
+  - the producer (waker) takes the read lock and adds items.
+
+The point of this design is enabling epoll to scale well with large number
+of producers, as multiple producers can hold the read lock at the same
+time.
+
+Unfortunately, this implementation may cause scheduling priority inversion
+problem. Suppose the consumer has higher scheduling priority than the
+producer. The consumer needs to acquire the write lock, but may be blocked
+by the producer holding the read lock. Since read-write semaphore does not
+support priority-boosting for the readers (even with CONFIG_PREEMPT_RT=y),
+we have a case of priority inversion: a higher priority consumer is blocked
+by a lower priority producer. This problem was reported in [1].
+
+Furthermore, this could also cause stall problem, as described in [2].
+
+Fix this problem by replacing rwlock with spinlock.
+
+This reduces the event bandwidth, as the producers now have to contend with
+each other for the spinlock. According to the benchmark from
+https://github.com/rouming/test-tools/blob/master/stress-epoll.c:
+
+    On 12 x86 CPUs:
+                  Before     After        Diff
+        threads  events/ms  events/ms
+              8       7162       4956     -31%
+             16       8733       5383     -38%
+             32       7968       5572     -30%
+             64      10652       5739     -46%
+            128      11236       5931     -47%
+
+    On 4 riscv CPUs:
+                  Before     After        Diff
+        threads  events/ms  events/ms
+              8       2958       2833      -4%
+             16       3323       3097      -7%
+             32       3451       3240      -6%
+             64       3554       3178     -11%
+            128       3601       3235     -10%
+
+Although the numbers look bad, it should be noted that this benchmark
+creates multiple threads who do nothing except constantly generating new
+epoll events, thus contention on the spinlock is high. For real workload,
+the event rate is likely much lower, and the performance drop is not as
+bad.
+
+Using another benchmark (perf bench epoll wait) where spinlock contention
+is lower, improvement is even observed on x86:
+
+    On 12 x86 CPUs:
+        Before: Averaged 110279 operations/sec (+- 1.09%), total secs = 8
+        After:  Averaged 114577 operations/sec (+- 2.25%), total secs = 8
+
+    On 4 riscv CPUs:
+        Before: Averaged 175767 operations/sec (+- 0.62%), total secs = 8
+        After:  Averaged 167396 operations/sec (+- 0.23%), total secs = 8
+
+In conclusion, no one is likely to be upset over this change. After all,
+spinlock was used originally for years, and the commit which converted to
+rwlock didn't mention a real workload, just that the benchmark numbers are
+nice.
+
+This patch is not exactly the revert of commit a218cc491420 ("epoll: use
+rwlock in order to reduce ep_poll_callback() contention"), because git
+revert conflicts in some places which are not obvious on the resolution.
+This patch is intended to be backported, therefore go with the obvious
+approach:
+
+  - Replace rwlock_t with spinlock_t one to one
+
+  - Delete list_add_tail_lockless() and chain_epi_lockless(). These were
+    introduced to allow producers to concurrently add items to the list.
+    But now that spinlock no longer allows producers to touch the event
+    list concurrently, these two functions are not necessary anymore.
+
+Fixes: a218cc491420 ("epoll: use rwlock in order to reduce ep_poll_callback() contention")
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Link: https://lore.kernel.org/ec92458ea357ec503c737ead0f10b2c6e4c37d47.1752581388.git.namcao@linutronix.de
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Cc: stable@vger.kernel.org
+Reported-by: Frederic Weisbecker <frederic@kernel.org>
+Closes: https://lore.kernel.org/linux-rt-users/20210825132754.GA895675@lothringen/ [1]
+Reported-by: Valentin Schneider <vschneid@redhat.com>
+Closes: https://lore.kernel.org/linux-rt-users/xhsmhttqvnall.mognet@vschneid.remote.csb/ [2]
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/eventpoll.c |  139 ++++++++++-----------------------------------------------
+ 1 file changed, 26 insertions(+), 113 deletions(-)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -46,10 +46,10 @@
+  *
+  * 1) epnested_mutex (mutex)
+  * 2) ep->mtx (mutex)
+- * 3) ep->lock (rwlock)
++ * 3) ep->lock (spinlock)
+  *
+  * The acquire order is the one listed above, from 1 to 3.
+- * We need a rwlock (ep->lock) because we manipulate objects
++ * We need a spinlock (ep->lock) because we manipulate objects
+  * from inside the poll callback, that might be triggered from
+  * a wake_up() that in turn might be called from IRQ context.
+  * So we can't sleep inside the poll callback and hence we need
+@@ -195,7 +195,7 @@ struct eventpoll {
+       struct list_head rdllist;
+       /* Lock which protects rdllist and ovflist */
+-      rwlock_t lock;
++      spinlock_t lock;
+       /* RB tree root used to store monitored fd structs */
+       struct rb_root_cached rbr;
+@@ -741,10 +741,10 @@ static void ep_start_scan(struct eventpo
+        * in a lockless way.
+        */
+       lockdep_assert_irqs_enabled();
+-      write_lock_irq(&ep->lock);
++      spin_lock_irq(&ep->lock);
+       list_splice_init(&ep->rdllist, txlist);
+       WRITE_ONCE(ep->ovflist, NULL);
+-      write_unlock_irq(&ep->lock);
++      spin_unlock_irq(&ep->lock);
+ }
+ static void ep_done_scan(struct eventpoll *ep,
+@@ -752,7 +752,7 @@ static void ep_done_scan(struct eventpol
+ {
+       struct epitem *epi, *nepi;
+-      write_lock_irq(&ep->lock);
++      spin_lock_irq(&ep->lock);
+       /*
+        * During the time we spent inside the "sproc" callback, some
+        * other events might have been queued by the poll callback.
+@@ -793,7 +793,7 @@ static void ep_done_scan(struct eventpol
+                       wake_up(&ep->wq);
+       }
+-      write_unlock_irq(&ep->lock);
++      spin_unlock_irq(&ep->lock);
+ }
+ static void ep_get(struct eventpoll *ep)
+@@ -868,10 +868,10 @@ static bool __ep_remove(struct eventpoll
+       rb_erase_cached(&epi->rbn, &ep->rbr);
+-      write_lock_irq(&ep->lock);
++      spin_lock_irq(&ep->lock);
+       if (ep_is_linked(epi))
+               list_del_init(&epi->rdllink);
+-      write_unlock_irq(&ep->lock);
++      spin_unlock_irq(&ep->lock);
+       wakeup_source_unregister(ep_wakeup_source(epi));
+       /*
+@@ -1152,7 +1152,7 @@ static int ep_alloc(struct eventpoll **p
+               return -ENOMEM;
+       mutex_init(&ep->mtx);
+-      rwlock_init(&ep->lock);
++      spin_lock_init(&ep->lock);
+       init_waitqueue_head(&ep->wq);
+       init_waitqueue_head(&ep->poll_wait);
+       INIT_LIST_HEAD(&ep->rdllist);
+@@ -1240,99 +1240,9 @@ struct file *get_epoll_tfile_raw_ptr(str
+ #endif /* CONFIG_KCMP */
+ /*
+- * Adds a new entry to the tail of the list in a lockless way, i.e.
+- * multiple CPUs are allowed to call this function concurrently.
+- *
+- * Beware: it is necessary to prevent any other modifications of the
+- *         existing list until all changes are completed, in other words
+- *         concurrent list_add_tail_lockless() calls should be protected
+- *         with a read lock, where write lock acts as a barrier which
+- *         makes sure all list_add_tail_lockless() calls are fully
+- *         completed.
+- *
+- *        Also an element can be locklessly added to the list only in one
+- *        direction i.e. either to the tail or to the head, otherwise
+- *        concurrent access will corrupt the list.
+- *
+- * Return: %false if element has been already added to the list, %true
+- * otherwise.
+- */
+-static inline bool list_add_tail_lockless(struct list_head *new,
+-                                        struct list_head *head)
+-{
+-      struct list_head *prev;
+-
+-      /*
+-       * This is simple 'new->next = head' operation, but cmpxchg()
+-       * is used in order to detect that same element has been just
+-       * added to the list from another CPU: the winner observes
+-       * new->next == new.
+-       */
+-      if (!try_cmpxchg(&new->next, &new, head))
+-              return false;
+-
+-      /*
+-       * Initially ->next of a new element must be updated with the head
+-       * (we are inserting to the tail) and only then pointers are atomically
+-       * exchanged.  XCHG guarantees memory ordering, thus ->next should be
+-       * updated before pointers are actually swapped and pointers are
+-       * swapped before prev->next is updated.
+-       */
+-
+-      prev = xchg(&head->prev, new);
+-
+-      /*
+-       * It is safe to modify prev->next and new->prev, because a new element
+-       * is added only to the tail and new->next is updated before XCHG.
+-       */
+-
+-      prev->next = new;
+-      new->prev = prev;
+-
+-      return true;
+-}
+-
+-/*
+- * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
+- * i.e. multiple CPUs are allowed to call this function concurrently.
+- *
+- * Return: %false if epi element has been already chained, %true otherwise.
+- */
+-static inline bool chain_epi_lockless(struct epitem *epi)
+-{
+-      struct eventpoll *ep = epi->ep;
+-
+-      /* Fast preliminary check */
+-      if (epi->next != EP_UNACTIVE_PTR)
+-              return false;
+-
+-      /* Check that the same epi has not been just chained from another CPU */
+-      if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
+-              return false;
+-
+-      /* Atomically exchange tail */
+-      epi->next = xchg(&ep->ovflist, epi);
+-
+-      return true;
+-}
+-
+-/*
+  * This is the callback that is passed to the wait queue wakeup
+  * mechanism. It is called by the stored file descriptors when they
+  * have events to report.
+- *
+- * This callback takes a read lock in order not to contend with concurrent
+- * events from another file descriptor, thus all modifications to ->rdllist
+- * or ->ovflist are lockless.  Read lock is paired with the write lock from
+- * ep_start/done_scan(), which stops all list modifications and guarantees
+- * that lists state is seen correctly.
+- *
+- * Another thing worth to mention is that ep_poll_callback() can be called
+- * concurrently for the same @epi from different CPUs if poll table was inited
+- * with several wait queues entries.  Plural wakeup from different CPUs of a
+- * single wait queue is serialized by wq.lock, but the case when multiple wait
+- * queues are used should be detected accordingly.  This is detected using
+- * cmpxchg() operation.
+  */
+ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
+ {
+@@ -1343,7 +1253,7 @@ static int ep_poll_callback(wait_queue_e
+       unsigned long flags;
+       int ewake = 0;
+-      read_lock_irqsave(&ep->lock, flags);
++      spin_lock_irqsave(&ep->lock, flags);
+       ep_set_busy_poll_napi_id(epi);
+@@ -1372,12 +1282,15 @@ static int ep_poll_callback(wait_queue_e
+        * chained in ep->ovflist and requeued later on.
+        */
+       if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
+-              if (chain_epi_lockless(epi))
++              if (epi->next == EP_UNACTIVE_PTR) {
++                      epi->next = READ_ONCE(ep->ovflist);
++                      WRITE_ONCE(ep->ovflist, epi);
+                       ep_pm_stay_awake_rcu(epi);
++              }
+       } else if (!ep_is_linked(epi)) {
+               /* In the usual case, add event to ready list. */
+-              if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
+-                      ep_pm_stay_awake_rcu(epi);
++              list_add_tail(&epi->rdllink, &ep->rdllist);
++              ep_pm_stay_awake_rcu(epi);
+       }
+       /*
+@@ -1410,7 +1323,7 @@ static int ep_poll_callback(wait_queue_e
+               pwake++;
+ out_unlock:
+-      read_unlock_irqrestore(&ep->lock, flags);
++      spin_unlock_irqrestore(&ep->lock, flags);
+       /* We have to call this outside the lock */
+       if (pwake)
+@@ -1745,7 +1658,7 @@ static int ep_insert(struct eventpoll *e
+       }
+       /* We have to drop the new item inside our item list to keep track of it */
+-      write_lock_irq(&ep->lock);
++      spin_lock_irq(&ep->lock);
+       /* record NAPI ID of new item if present */
+       ep_set_busy_poll_napi_id(epi);
+@@ -1762,7 +1675,7 @@ static int ep_insert(struct eventpoll *e
+                       pwake++;
+       }
+-      write_unlock_irq(&ep->lock);
++      spin_unlock_irq(&ep->lock);
+       /* We have to call this outside the lock */
+       if (pwake)
+@@ -1826,7 +1739,7 @@ static int ep_modify(struct eventpoll *e
+        * list, push it inside.
+        */
+       if (ep_item_poll(epi, &pt, 1)) {
+-              write_lock_irq(&ep->lock);
++              spin_lock_irq(&ep->lock);
+               if (!ep_is_linked(epi)) {
+                       list_add_tail(&epi->rdllink, &ep->rdllist);
+                       ep_pm_stay_awake(epi);
+@@ -1837,7 +1750,7 @@ static int ep_modify(struct eventpoll *e
+                       if (waitqueue_active(&ep->poll_wait))
+                               pwake++;
+               }
+-              write_unlock_irq(&ep->lock);
++              spin_unlock_irq(&ep->lock);
+       }
+       /* We have to call this outside the lock */
+@@ -2089,7 +2002,7 @@ static int ep_poll(struct eventpoll *ep,
+               init_wait(&wait);
+               wait.func = ep_autoremove_wake_function;
+-              write_lock_irq(&ep->lock);
++              spin_lock_irq(&ep->lock);
+               /*
+                * Barrierless variant, waitqueue_active() is called under
+                * the same lock on wakeup ep_poll_callback() side, so it
+@@ -2108,7 +2021,7 @@ static int ep_poll(struct eventpoll *ep,
+               if (!eavail)
+                       __add_wait_queue_exclusive(&ep->wq, &wait);
+-              write_unlock_irq(&ep->lock);
++              spin_unlock_irq(&ep->lock);
+               if (!eavail)
+                       timed_out = !ep_schedule_timeout(to) ||
+@@ -2124,7 +2037,7 @@ static int ep_poll(struct eventpoll *ep,
+               eavail = 1;
+               if (!list_empty_careful(&wait.entry)) {
+-                      write_lock_irq(&ep->lock);
++                      spin_lock_irq(&ep->lock);
+                       /*
+                        * If the thread timed out and is not on the wait queue,
+                        * it means that the thread was woken up after its
+@@ -2135,7 +2048,7 @@ static int ep_poll(struct eventpoll *ep,
+                       if (timed_out)
+                               eavail = list_empty(&wait.entry);
+                       __remove_wait_queue(&ep->wq, &wait);
+-                      write_unlock_irq(&ep->lock);
++                      spin_unlock_irq(&ep->lock);
+               }
+       }
+ }
diff --git a/queue-6.17/fbdev-fix-logic-error-in-offb-name-match.patch b/queue-6.17/fbdev-fix-logic-error-in-offb-name-match.patch
new file mode 100644 (file)
index 0000000..b3a1c41
--- /dev/null
@@ -0,0 +1,37 @@
+From 15df28699b28d6b49dc305040c4e26a9553df07a Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@linux-m68k.org>
+Date: Thu, 9 Oct 2025 09:56:25 +1100
+Subject: fbdev: Fix logic error in "offb" name match
+
+From: Finn Thain <fthain@linux-m68k.org>
+
+commit 15df28699b28d6b49dc305040c4e26a9553df07a upstream.
+
+A regression was reported to me recently whereby /dev/fb0 had disappeared
+from a PowerBook G3 Series "Wallstreet". The problem shows up when the
+"video=ofonly" parameter is passed to the kernel, which is what the
+bootloader does when "no video driver" is selected. The cause of the
+problem is the "offb" string comparison, which got mangled when it got
+refactored. Fix it.
+
+Cc: stable@vger.kernel.org
+Fixes: 93604a5ade3a ("fbdev: Handle video= parameter in video/cmdline.c")
+Reported-and-tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@linux-m68k.org>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/video/fbdev/core/fb_cmdline.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/video/fbdev/core/fb_cmdline.c
++++ b/drivers/video/fbdev/core/fb_cmdline.c
+@@ -40,7 +40,7 @@ int fb_get_options(const char *name, cha
+       bool enabled;
+       if (name)
+-              is_of = strncmp(name, "offb", 4);
++              is_of = !strncmp(name, "offb", 4);
+       enabled = __video_get_options(name, &options, is_of);
diff --git a/queue-6.17/fs-ntfs3-fix-a-resource-leak-bug-in-wnd_extend.patch b/queue-6.17/fs-ntfs3-fix-a-resource-leak-bug-in-wnd_extend.patch
new file mode 100644 (file)
index 0000000..1c005c3
--- /dev/null
@@ -0,0 +1,31 @@
+From d68318471aa2e16222ebf492883e05a2d72b9b17 Mon Sep 17 00:00:00 2001
+From: Haoxiang Li <haoxiang_li2024@163.com>
+Date: Tue, 15 Jul 2025 17:51:20 +0800
+Subject: fs/ntfs3: Fix a resource leak bug in wnd_extend()
+
+From: Haoxiang Li <haoxiang_li2024@163.com>
+
+commit d68318471aa2e16222ebf492883e05a2d72b9b17 upstream.
+
+Add put_bh() to decrease the refcount of 'bh' after the job
+is finished, preventing a resource leak.
+
+Fixes: 3f3b442b5ad2 ("fs/ntfs3: Add bitmap")
+Cc: stable@vger.kernel.org
+Signed-off-by: Haoxiang Li <haoxiang_li2024@163.com>
+Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs3/bitmap.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -1371,6 +1371,7 @@ int wnd_extend(struct wnd_bitmap *wnd, s
+               mark_buffer_dirty(bh);
+               unlock_buffer(bh);
+               /* err = sync_dirty_buffer(bh); */
++              put_bh(bh);
+               b0 = 0;
+               bits -= op;
diff --git a/queue-6.17/fs-quota-create-dedicated-workqueue-for-quota_release_work.patch b/queue-6.17/fs-quota-create-dedicated-workqueue-for-quota_release_work.patch
new file mode 100644 (file)
index 0000000..1f72d0a
--- /dev/null
@@ -0,0 +1,89 @@
+From 72b7ceca857f38a8ca7c5629feffc63769638974 Mon Sep 17 00:00:00 2001
+From: Shashank A P <shashank.ap@samsung.com>
+Date: Mon, 1 Sep 2025 14:59:00 +0530
+Subject: fs: quota: create dedicated workqueue for quota_release_work
+
+From: Shashank A P <shashank.ap@samsung.com>
+
+commit 72b7ceca857f38a8ca7c5629feffc63769638974 upstream.
+
+There is a kernel panic due to WARN_ONCE when panic_on_warn is set.
+
+This issue occurs when writeback is triggered due to sync call for an
+opened file(ie, writeback reason is WB_REASON_SYNC). When f2fs balance
+is needed at sync path, flush for quota_release_work is triggered.
+By default quota_release_work is queued to "events_unbound" queue which
+does not have WQ_MEM_RECLAIM flag. During f2fs balance "writeback"
+workqueue tries to flush quota_release_work causing kernel panic due to
+MEM_RECLAIM flag mismatch errors.
+
+This patch creates dedicated workqueue with WQ_MEM_RECLAIM flag
+for work quota_release_work.
+
+------------[ cut here ]------------
+WARNING: CPU: 4 PID: 14867 at kernel/workqueue.c:3721 check_flush_dependency+0x13c/0x148
+Call trace:
+ check_flush_dependency+0x13c/0x148
+ __flush_work+0xd0/0x398
+ flush_delayed_work+0x44/0x5c
+ dquot_writeback_dquots+0x54/0x318
+ f2fs_do_quota_sync+0xb8/0x1a8
+ f2fs_write_checkpoint+0x3cc/0x99c
+ f2fs_gc+0x190/0x750
+ f2fs_balance_fs+0x110/0x168
+ f2fs_write_single_data_page+0x474/0x7dc
+ f2fs_write_data_pages+0x7d0/0xd0c
+ do_writepages+0xe0/0x2f4
+ __writeback_single_inode+0x44/0x4ac
+ writeback_sb_inodes+0x30c/0x538
+ wb_writeback+0xf4/0x440
+ wb_workfn+0x128/0x5d4
+ process_scheduled_works+0x1c4/0x45c
+ worker_thread+0x32c/0x3e8
+ kthread+0x11c/0x1b0
+ ret_from_fork+0x10/0x20
+Kernel panic - not syncing: kernel: panic_on_warn set ...
+
+Fixes: ac6f420291b3 ("quota: flush quota_release_work upon quota writeback")
+CC: stable@vger.kernel.org
+Signed-off-by: Shashank A P <shashank.ap@samsung.com>
+Link: https://patch.msgid.link/20250901092905.2115-1-shashank.ap@samsung.com
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/quota/dquot.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -162,6 +162,9 @@ static struct quota_module_name module_n
+ /* SLAB cache for dquot structures */
+ static struct kmem_cache *dquot_cachep;
++/* workqueue for work quota_release_work*/
++static struct workqueue_struct *quota_unbound_wq;
++
+ void register_quota_format(struct quota_format_type *fmt)
+ {
+       spin_lock(&dq_list_lock);
+@@ -881,7 +884,7 @@ void dqput(struct dquot *dquot)
+       put_releasing_dquots(dquot);
+       atomic_dec(&dquot->dq_count);
+       spin_unlock(&dq_list_lock);
+-      queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
++      queue_delayed_work(quota_unbound_wq, &quota_release_work, 1);
+ }
+ EXPORT_SYMBOL(dqput);
+@@ -3041,6 +3044,11 @@ static int __init dquot_init(void)
+       shrinker_register(dqcache_shrinker);
++      quota_unbound_wq = alloc_workqueue("quota_events_unbound",
++                                         WQ_UNBOUND | WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
++      if (!quota_unbound_wq)
++              panic("Cannot create quota_unbound_wq\n");
++
+       return 0;
+ }
+ fs_initcall(dquot_init);
diff --git a/queue-6.17/fsnotify-pass-correct-offset-to-fsnotify_mmap_perm.patch b/queue-6.17/fsnotify-pass-correct-offset-to-fsnotify_mmap_perm.patch
new file mode 100644 (file)
index 0000000..e3aa3b2
--- /dev/null
@@ -0,0 +1,59 @@
+From 28bba2c2935e219d6cb6946e16b9a0b7c47913be Mon Sep 17 00:00:00 2001
+From: Ryan Roberts <ryan.roberts@arm.com>
+Date: Fri, 3 Oct 2025 16:52:36 +0100
+Subject: fsnotify: pass correct offset to fsnotify_mmap_perm()
+
+From: Ryan Roberts <ryan.roberts@arm.com>
+
+commit 28bba2c2935e219d6cb6946e16b9a0b7c47913be upstream.
+
+fsnotify_mmap_perm() requires a byte offset for the file about to be
+mmap'ed.  But it is called from vm_mmap_pgoff(), which has a page offset.
+Previously the conversion was done incorrectly so let's fix it, being
+careful not to overflow on 32-bit platforms.
+
+Discovered during code review.
+
+Link: https://lkml.kernel.org/r/20251003155238.2147410-1-ryan.roberts@arm.com
+Fixes: 066e053fe208 ("fsnotify: add pre-content hooks on mmap()")
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Reviewed-by: Kiryl Shutsemau <kas@kernel.org>
+Cc: Amir Goldstein <amir73il@gmail.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/util.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/mm/util.c b/mm/util.c
+index 6c1d64ed0221..8989d5767528 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -566,6 +566,7 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
+       unsigned long len, unsigned long prot,
+       unsigned long flag, unsigned long pgoff)
+ {
++      loff_t off = (loff_t)pgoff << PAGE_SHIFT;
+       unsigned long ret;
+       struct mm_struct *mm = current->mm;
+       unsigned long populate;
+@@ -573,7 +574,7 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
+       ret = security_mmap_file(file, prot, flag);
+       if (!ret)
+-              ret = fsnotify_mmap_perm(file, prot, pgoff >> PAGE_SHIFT, len);
++              ret = fsnotify_mmap_perm(file, prot, off, len);
+       if (!ret) {
+               if (mmap_write_lock_killable(mm))
+                       return -EINTR;
+-- 
+2.51.0
+
diff --git a/queue-6.17/fuse-fix-livelock-in-synchronous-file-put-from-fuseblk-workers.patch b/queue-6.17/fuse-fix-livelock-in-synchronous-file-put-from-fuseblk-workers.patch
new file mode 100644 (file)
index 0000000..fd1d273
--- /dev/null
@@ -0,0 +1,90 @@
+From 26e5c67deb2e1f42a951f022fdf5b9f7eb747b01 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Mon, 15 Sep 2025 17:24:17 -0700
+Subject: fuse: fix livelock in synchronous file put from fuseblk workers
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit 26e5c67deb2e1f42a951f022fdf5b9f7eb747b01 upstream.
+
+I observed a hang when running generic/323 against a fuseblk server.
+This test opens a file, initiates a lot of AIO writes to that file
+descriptor, and closes the file descriptor before the writes complete.
+Unsurprisingly, the AIO exerciser threads are mostly stuck waiting for
+responses from the fuseblk server:
+
+# cat /proc/372265/task/372313/stack
+[<0>] request_wait_answer+0x1fe/0x2a0 [fuse]
+[<0>] __fuse_simple_request+0xd3/0x2b0 [fuse]
+[<0>] fuse_do_getattr+0xfc/0x1f0 [fuse]
+[<0>] fuse_file_read_iter+0xbe/0x1c0 [fuse]
+[<0>] aio_read+0x130/0x1e0
+[<0>] io_submit_one+0x542/0x860
+[<0>] __x64_sys_io_submit+0x98/0x1a0
+[<0>] do_syscall_64+0x37/0xf0
+[<0>] entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+But the /weird/ part is that the fuseblk server threads are waiting for
+responses from itself:
+
+# cat /proc/372210/task/372232/stack
+[<0>] request_wait_answer+0x1fe/0x2a0 [fuse]
+[<0>] __fuse_simple_request+0xd3/0x2b0 [fuse]
+[<0>] fuse_file_put+0x9a/0xd0 [fuse]
+[<0>] fuse_release+0x36/0x50 [fuse]
+[<0>] __fput+0xec/0x2b0
+[<0>] task_work_run+0x55/0x90
+[<0>] syscall_exit_to_user_mode+0xe9/0x100
+[<0>] do_syscall_64+0x43/0xf0
+[<0>] entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+The fuseblk server is fuse2fs so there's nothing all that exciting in
+the server itself.  So why is the fuse server calling fuse_file_put?
+The commit message for the fstest sheds some light on that:
+
+"By closing the file descriptor before calling io_destroy, you pretty
+much guarantee that the last put on the ioctx will be done in interrupt
+context (during I/O completion).
+
+Aha.  AIO fgets a new struct file from the fd when it queues the ioctx.
+The completion of the FUSE_WRITE command from userspace causes the fuse
+server to call the AIO completion function.  The completion puts the
+struct file, queuing a delayed fput to the fuse server task.  When the
+fuse server task returns to userspace, it has to run the delayed fput,
+which in the case of a fuseblk server, it does synchronously.
+
+Sending the FUSE_RELEASE command sychronously from fuse server threads
+is a bad idea because a client program can initiate enough simultaneous
+AIOs such that all the fuse server threads end up in delayed_fput, and
+now there aren't any threads left to handle the queued fuse commands.
+
+Fix this by only using asynchronous fputs when closing files, and leave
+a comment explaining why.
+
+Cc: stable@vger.kernel.org # v2.6.38
+Fixes: 5a18ec176c934c ("fuse: fix hang of single threaded fuseblk filesystem")
+Signed-off-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/file.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -356,8 +356,14 @@ void fuse_file_release(struct inode *ino
+        * Make the release synchronous if this is a fuseblk mount,
+        * synchronous RELEASE is allowed (and desirable) in this case
+        * because the server can be trusted not to screw up.
++       *
++       * Always use the asynchronous file put because the current thread
++       * might be the fuse server.  This can happen if a process starts some
++       * aio and closes the fd before the aio completes.  Since aio takes its
++       * own ref to the file, the IO completion has to drop the ref, which is
++       * how the fuse server can end up closing its clients' files.
+        */
+-      fuse_file_put(ff, ff->fm->fc->destroy);
++      fuse_file_put(ff, false);
+ }
+ void fuse_release_common(struct file *file, bool isdir)
diff --git a/queue-6.17/fuse-fix-possibly-missing-fuse_copy_finish-call-in-fuse_notify.patch b/queue-6.17/fuse-fix-possibly-missing-fuse_copy_finish-call-in-fuse_notify.patch
new file mode 100644 (file)
index 0000000..5efb6a1
--- /dev/null
@@ -0,0 +1,36 @@
+From 0b563aad1c0a05dc7d123f68a9f82f79de206dad Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Mon, 1 Sep 2025 17:16:26 +0200
+Subject: fuse: fix possibly missing fuse_copy_finish() call in fuse_notify()
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 0b563aad1c0a05dc7d123f68a9f82f79de206dad upstream.
+
+In case of FUSE_NOTIFY_RESEND and FUSE_NOTIFY_INC_EPOCH fuse_copy_finish()
+isn't called.
+
+Fix by always calling fuse_copy_finish() after fuse_notify().  It's a no-op
+if called a second time.
+
+Fixes: 760eac73f9f6 ("fuse: Introduce a new notification type for resend pending requests")
+Fixes: 2396356a945b ("fuse: add more control over cache invalidation behaviour")
+Cc: <stable@vger.kernel.org> # v6.9
+Reviewed-by: Joanne Koong <joannelkoong@gmail.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/dev.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -2156,7 +2156,7 @@ static ssize_t fuse_dev_do_write(struct
+        */
+       if (!oh.unique) {
+               err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
+-              goto out;
++              goto copy_finish;
+       }
+       err = -EINVAL;
diff --git a/queue-6.17/gpio-mpfs-fix-setting-gpio-direction-to-output.patch b/queue-6.17/gpio-mpfs-fix-setting-gpio-direction-to-output.patch
new file mode 100644 (file)
index 0000000..33b705f
--- /dev/null
@@ -0,0 +1,41 @@
+From bc061143637532c08d9fc657eec93fdc2588068e Mon Sep 17 00:00:00 2001
+From: Conor Dooley <conor.dooley@microchip.com>
+Date: Thu, 25 Sep 2025 16:39:18 +0100
+Subject: gpio: mpfs: fix setting gpio direction to output
+
+From: Conor Dooley <conor.dooley@microchip.com>
+
+commit bc061143637532c08d9fc657eec93fdc2588068e upstream.
+
+mpfs_gpio_direction_output() actually sets the line to input mode.
+Use the correct register settings for output mode so that this function
+actually works as intended.
+
+This was a copy-paste mistake made when converting to regmap during the
+driver submission process. It went unnoticed because my test for output
+mode is toggling LEDs on an Icicle kit which functions with the
+incorrect code. The internal reporter has yet to test the patch, but on
+their system the incorrect setting may be the reason for failures to
+drive the GPIO lines on the BeagleV-fire board.
+
+CC: stable@vger.kernel.org
+Fixes: a987b78f3615e ("gpio: mpfs: add polarfire soc gpio support")
+Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
+Link: https://lore.kernel.org/r/20250925-boogieman-carrot-82989ff75d10@spud
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-mpfs.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-mpfs.c
++++ b/drivers/gpio/gpio-mpfs.c
+@@ -69,7 +69,7 @@ static int mpfs_gpio_direction_output(st
+       struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+       regmap_update_bits(mpfs_gpio->regs, MPFS_GPIO_CTRL(gpio_index),
+-                         MPFS_GPIO_DIR_MASK, MPFS_GPIO_EN_IN);
++                         MPFS_GPIO_DIR_MASK, MPFS_GPIO_EN_OUT | MPFS_GPIO_EN_OUT_BUF);
+       regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, BIT(gpio_index),
+                          value << gpio_index);
diff --git a/queue-6.17/i3c-fix-default-i2c-adapter-timeout-value.patch b/queue-6.17/i3c-fix-default-i2c-adapter-timeout-value.patch
new file mode 100644 (file)
index 0000000..66b593f
--- /dev/null
@@ -0,0 +1,46 @@
+From 9395b3c412933401a34845d5326afe4011bbd40f Mon Sep 17 00:00:00 2001
+From: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Date: Fri, 5 Sep 2025 13:03:20 +0300
+Subject: i3c: Fix default I2C adapter timeout value
+
+From: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+
+commit 9395b3c412933401a34845d5326afe4011bbd40f upstream.
+
+Commit 3a379bbcea0a ("i3c: Add core I3C infrastructure") set the default
+adapter timeout for I2C transfers as 1000 (ms). However that parameter
+is defined in jiffies not in milliseconds.
+
+With mipi-i3c-hci driver this wasn't visible until commit c0a90eb55a69
+("i3c: mipi-i3c-hci: use adapter timeout value for I2C transfers").
+
+Fix this by setting the default timeout as HZ (CONFIG_HZ) not 1000.
+
+Fixes: 1b84691e7870 ("i3c: dw: use adapter timeout value for I2C transfers")
+Fixes: be27ed672878 ("i3c: master: cdns: use adapter timeout value for I2C transfers")
+Fixes: c0a90eb55a69 ("i3c: mipi-i3c-hci: use adapter timeout value for I2C transfers")
+Fixes: a747e01adad2 ("i3c: master: svc: use adapter timeout value for I2C transfers")
+Fixes: d028219a9f14 ("i3c: master: Add basic driver for the Renesas I3C controller")
+Fixes: 3a379bbcea0a ("i3c: Add core I3C infrastructure")
+Cc: stable@vger.kernel.org # 6.17
+Signed-off-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Reviewed-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Link: https://lore.kernel.org/r/20250905100320.954536-1-jarkko.nikula@linux.intel.com
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i3c/master.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -2492,7 +2492,7 @@ static int i3c_master_i2c_adapter_init(s
+       strscpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
+       /* FIXME: Should we allow i3c masters to override these values? */
+-      adap->timeout = 1000;
++      adap->timeout = HZ;
+       adap->retries = 3;
+       id = of_alias_get_id(master->dev.of_node, "i2c");
diff --git a/queue-6.17/iio-adc-pac1934-fix-channel-disable-configuration.patch b/queue-6.17/iio-adc-pac1934-fix-channel-disable-configuration.patch
new file mode 100644 (file)
index 0000000..8d85b63
--- /dev/null
@@ -0,0 +1,71 @@
+From 3c63ba1c430af1c0dcd68dd36f2246980621dcba Mon Sep 17 00:00:00 2001
+From: Aleksandar Gerasimovski <aleksandar.gerasimovski@belden.com>
+Date: Mon, 11 Aug 2025 13:09:04 +0000
+Subject: iio/adc/pac1934: fix channel disable configuration
+
+From: Aleksandar Gerasimovski <aleksandar.gerasimovski@belden.com>
+
+commit 3c63ba1c430af1c0dcd68dd36f2246980621dcba upstream.
+
+There are two problems with the chip configuration in this driver:
+- First, is that writing 12 bytes (ARRAY_SIZE(regs)) would anyhow
+  lead to a config overflow due to HW auto increment implementation
+  in the chip.
+- Second, the i2c_smbus_write_block_data write ends up in writing
+  unexpected value to the channel_dis register, this is because
+  the smbus size that is 0x03 in this case gets written to the
+  register. The PAC1931/2/3/4 data sheet does not really specify
+  that block write is indeed supported.
+
+This problem is probably not visible on PAC1934 version where all
+channels are used as the chip is properly configured by luck,
+but in our case whenusing PAC1931 this leads to nonfunctional device.
+
+Fixes: 0fb528c8255b (iio: adc: adding support for PAC193x)
+Suggested-by: Rene Straub <mailto:rene.straub@belden.com>
+Signed-off-by: Aleksandar Gerasimovski <aleksandar.gerasimovski@belden.com>
+Reviewed-by: Marius Cristea <marius.cristea@microchip.com>
+Link: https://patch.msgid.link/20250811130904.2481790-1-aleksandar.gerasimovski@belden.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/pac1934.c |   20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/drivers/iio/adc/pac1934.c
++++ b/drivers/iio/adc/pac1934.c
+@@ -88,6 +88,7 @@
+ #define PAC1934_VPOWER_3_ADDR                 0x19
+ #define PAC1934_VPOWER_4_ADDR                 0x1A
+ #define PAC1934_REFRESH_V_REG_ADDR            0x1F
++#define PAC1934_SLOW_REG_ADDR                 0x20
+ #define PAC1934_CTRL_STAT_REGS_ADDR           0x1C
+ #define PAC1934_PID_REG_ADDR                  0xFD
+ #define PAC1934_MID_REG_ADDR                  0xFE
+@@ -1265,8 +1266,23 @@ static int pac1934_chip_configure(struct
+       /* no SLOW triggered REFRESH, clear POR */
+       regs[PAC1934_SLOW_REG_OFF] = 0;
+-      ret =  i2c_smbus_write_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
+-                                        ARRAY_SIZE(regs), (u8 *)regs);
++      /*
++       * Write the three bytes sequentially, as the device does not support
++       * block write.
++       */
++      ret = i2c_smbus_write_byte_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
++                                      regs[PAC1934_CHANNEL_DIS_REG_OFF]);
++      if (ret)
++              return ret;
++
++      ret = i2c_smbus_write_byte_data(client,
++                                      PAC1934_CTRL_STAT_REGS_ADDR + PAC1934_NEG_PWR_REG_OFF,
++                                      regs[PAC1934_NEG_PWR_REG_OFF]);
++      if (ret)
++              return ret;
++
++      ret = i2c_smbus_write_byte_data(client, PAC1934_SLOW_REG_ADDR,
++                                      regs[PAC1934_SLOW_REG_OFF]);
+       if (ret)
+               return ret;
diff --git a/queue-6.17/iio-dac-ad5360-use-int-type-to-store-negative-error-codes.patch b/queue-6.17/iio-dac-ad5360-use-int-type-to-store-negative-error-codes.patch
new file mode 100644 (file)
index 0000000..a3c00f3
--- /dev/null
@@ -0,0 +1,35 @@
+From f9381ece76de999a2065d5b4fdd87fa17883978c Mon Sep 17 00:00:00 2001
+From: Qianfeng Rong <rongqianfeng@vivo.com>
+Date: Mon, 1 Sep 2025 21:57:25 +0800
+Subject: iio: dac: ad5360: use int type to store negative error codes
+
+From: Qianfeng Rong <rongqianfeng@vivo.com>
+
+commit f9381ece76de999a2065d5b4fdd87fa17883978c upstream.
+
+Change the 'ret' variable in ad5360_update_ctrl() from unsigned int to
+int, as it needs to store either negative error codes or zero returned
+by ad5360_write_unlocked().
+
+Fixes: a3e2940c24d3 ("staging:iio:dac: Add AD5360 driver")
+Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Link: https://patch.msgid.link/20250901135726.17601-2-rongqianfeng@vivo.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/dac/ad5360.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/dac/ad5360.c
++++ b/drivers/iio/dac/ad5360.c
+@@ -262,7 +262,7 @@ static int ad5360_update_ctrl(struct iio
+       unsigned int clr)
+ {
+       struct ad5360_state *st = iio_priv(indio_dev);
+-      unsigned int ret;
++      int ret;
+       mutex_lock(&st->lock);
diff --git a/queue-6.17/iio-dac-ad5421-use-int-type-to-store-negative-error-codes.patch b/queue-6.17/iio-dac-ad5421-use-int-type-to-store-negative-error-codes.patch
new file mode 100644 (file)
index 0000000..70a3b1b
--- /dev/null
@@ -0,0 +1,35 @@
+From 3379c900320954d768ed9903691fb2520926bbe3 Mon Sep 17 00:00:00 2001
+From: Qianfeng Rong <rongqianfeng@vivo.com>
+Date: Mon, 1 Sep 2025 21:57:26 +0800
+Subject: iio: dac: ad5421: use int type to store negative error codes
+
+From: Qianfeng Rong <rongqianfeng@vivo.com>
+
+commit 3379c900320954d768ed9903691fb2520926bbe3 upstream.
+
+Change the 'ret' variable in ad5421_update_ctrl() from unsigned int to
+int, as it needs to store either negative error codes or zero returned
+by ad5421_write_unlocked().
+
+Fixes: 5691b23489db ("staging:iio:dac: Add AD5421 driver")
+Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Link: https://patch.msgid.link/20250901135726.17601-3-rongqianfeng@vivo.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/dac/ad5421.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/dac/ad5421.c
++++ b/drivers/iio/dac/ad5421.c
+@@ -186,7 +186,7 @@ static int ad5421_update_ctrl(struct iio
+       unsigned int clr)
+ {
+       struct ad5421_state *st = iio_priv(indio_dev);
+-      unsigned int ret;
++      int ret;
+       mutex_lock(&st->lock);
diff --git a/queue-6.17/iio-frequency-adf4350-fix-prescaler-usage.patch b/queue-6.17/iio-frequency-adf4350-fix-prescaler-usage.patch
new file mode 100644 (file)
index 0000000..dcc3b4b
--- /dev/null
@@ -0,0 +1,72 @@
+From 33d7ecbf69aa7dd4145e3b77962bcb8759eede3d Mon Sep 17 00:00:00 2001
+From: Michael Hennerich <michael.hennerich@analog.com>
+Date: Fri, 29 Aug 2025 12:25:42 +0100
+Subject: iio: frequency: adf4350: Fix prescaler usage.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michael Hennerich <michael.hennerich@analog.com>
+
+commit 33d7ecbf69aa7dd4145e3b77962bcb8759eede3d upstream.
+
+The ADF4350/1 features a programmable dual-modulus prescaler of 4/5 or 8/9.
+When set to 4/5, the maximum RF frequency allowed is 3 GHz.
+Therefore, when operating the ADF4351 above 3 GHz, this must be set to 8/9.
+In this context not the RF output frequency is meant
+- it's the VCO frequency.
+
+Therefore move the prescaler selection after we derived the VCO frequency
+from the desired RF output frequency.
+
+This BUG may have caused PLL lock instabilities when operating the VCO at
+the very high range close to 4.4 GHz.
+
+Fixes: e31166f0fd48 ("iio: frequency: New driver for Analog Devices ADF4350/ADF4351 Wideband Synthesizers")
+Signed-off-by: Michael Hennerich <michael.hennerich@analog.com>
+Signed-off-by: Nuno Sá <nuno.sa@analog.com>
+Reviewed-by: Andy Shevchenko <andy@kernel.org>
+Link: https://patch.msgid.link/20250829-adf4350-fix-v2-1-0bf543ba797d@analog.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/frequency/adf4350.c |   20 +++++++++++++-------
+ 1 file changed, 13 insertions(+), 7 deletions(-)
+
+--- a/drivers/iio/frequency/adf4350.c
++++ b/drivers/iio/frequency/adf4350.c
+@@ -149,6 +149,19 @@ static int adf4350_set_freq(struct adf43
+       if (freq > ADF4350_MAX_OUT_FREQ || freq < st->min_out_freq)
+               return -EINVAL;
++      st->r4_rf_div_sel = 0;
++
++      /*
++       * !\TODO: The below computation is making sure we get a power of 2
++       * shift (st->r4_rf_div_sel) so that freq becomes higher or equal to
++       * ADF4350_MIN_VCO_FREQ. This might be simplified with fls()/fls_long()
++       * and friends.
++       */
++      while (freq < ADF4350_MIN_VCO_FREQ) {
++              freq <<= 1;
++              st->r4_rf_div_sel++;
++      }
++
+       if (freq > ADF4350_MAX_FREQ_45_PRESC) {
+               prescaler = ADF4350_REG1_PRESCALER;
+               mdiv = 75;
+@@ -157,13 +170,6 @@ static int adf4350_set_freq(struct adf43
+               mdiv = 23;
+       }
+-      st->r4_rf_div_sel = 0;
+-
+-      while (freq < ADF4350_MIN_VCO_FREQ) {
+-              freq <<= 1;
+-              st->r4_rf_div_sel++;
+-      }
+-
+       /*
+        * Allow a predefined reference division factor
+        * if not set, compute our own
diff --git a/queue-6.17/iio-imu-inv_icm42600-avoid-configuring-if-already-pm_runtime-suspended.patch b/queue-6.17/iio-imu-inv_icm42600-avoid-configuring-if-already-pm_runtime-suspended.patch
new file mode 100644 (file)
index 0000000..6451379
--- /dev/null
@@ -0,0 +1,61 @@
+From 466f7a2fef2a4e426f809f79845a1ec1aeb558f4 Mon Sep 17 00:00:00 2001
+From: Sean Nyekjaer <sean@geanix.com>
+Date: Mon, 1 Sep 2025 09:49:15 +0200
+Subject: iio: imu: inv_icm42600: Avoid configuring if already pm_runtime suspended
+
+From: Sean Nyekjaer <sean@geanix.com>
+
+commit 466f7a2fef2a4e426f809f79845a1ec1aeb558f4 upstream.
+
+Do as in suspend, skip resume configuration steps if the device is already
+pm_runtime suspended. This avoids reconfiguring a device that is already
+in the correct low-power state and ensures that pm_runtime handles the
+power state transitions properly.
+
+Fixes: 31c24c1e93c3 ("iio: imu: inv_icm42600: add core of new inv_icm42600 driver")
+Signed-off-by: Sean Nyekjaer <sean@geanix.com>
+Link: https://patch.msgid.link/20250901-icm42pmreg-v3-3-ef1336246960@geanix.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/inv_icm42600/inv_icm42600_core.c |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -837,17 +837,15 @@ static int inv_icm42600_suspend(struct d
+       struct device *accel_dev;
+       bool wakeup;
+       int accel_conf;
+-      int ret;
++      int ret = 0;
+       mutex_lock(&st->lock);
+       st->suspended.gyro = st->conf.gyro.mode;
+       st->suspended.accel = st->conf.accel.mode;
+       st->suspended.temp = st->conf.temp_en;
+-      if (pm_runtime_suspended(dev)) {
+-              ret = 0;
++      if (pm_runtime_suspended(dev))
+               goto out_unlock;
+-      }
+       /* disable FIFO data streaming */
+       if (st->fifo.on) {
+@@ -900,10 +898,13 @@ static int inv_icm42600_resume(struct de
+       struct inv_icm42600_sensor_state *accel_st = iio_priv(st->indio_accel);
+       struct device *accel_dev;
+       bool wakeup;
+-      int ret;
++      int ret = 0;
+       mutex_lock(&st->lock);
++      if (pm_runtime_suspended(dev))
++              goto out_unlock;
++
+       /* check wakeup capability */
+       accel_dev = &st->indio_accel->dev;
+       wakeup = st->apex.on && device_may_wakeup(accel_dev);
diff --git a/queue-6.17/iio-imu-inv_icm42600-drop-redundant-pm_runtime-reinitialization-in-resume.patch b/queue-6.17/iio-imu-inv_icm42600-drop-redundant-pm_runtime-reinitialization-in-resume.patch
new file mode 100644 (file)
index 0000000..72184d4
--- /dev/null
@@ -0,0 +1,37 @@
+From a95a0b4e471a6d8860f40c6ac8f1cad9dde3189a Mon Sep 17 00:00:00 2001
+From: Sean Nyekjaer <sean@geanix.com>
+Date: Mon, 1 Sep 2025 09:49:14 +0200
+Subject: iio: imu: inv_icm42600: Drop redundant pm_runtime reinitialization in resume
+
+From: Sean Nyekjaer <sean@geanix.com>
+
+commit a95a0b4e471a6d8860f40c6ac8f1cad9dde3189a upstream.
+
+Remove unnecessary calls to pm_runtime_disable(), pm_runtime_set_active(),
+and pm_runtime_enable() from the resume path. These operations are not
+required here and can interfere with proper pm_runtime state handling,
+especially when resuming from a pm_runtime suspended state.
+
+Fixes: 31c24c1e93c3 ("iio: imu: inv_icm42600: add core of new inv_icm42600 driver")
+Signed-off-by: Sean Nyekjaer <sean@geanix.com>
+Link: https://patch.msgid.link/20250901-icm42pmreg-v3-2-ef1336246960@geanix.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/inv_icm42600/inv_icm42600_core.c |    4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -917,10 +917,6 @@ static int inv_icm42600_resume(struct de
+                       goto out_unlock;
+       }
+-      pm_runtime_disable(dev);
+-      pm_runtime_set_active(dev);
+-      pm_runtime_enable(dev);
+-
+       /* restore sensors state */
+       ret = inv_icm42600_set_pwr_mgmt0(st, st->suspended.gyro,
+                                        st->suspended.accel,
diff --git a/queue-6.17/iio-imu-inv_icm42600-simplify-pm_runtime-setup.patch b/queue-6.17/iio-imu-inv_icm42600-simplify-pm_runtime-setup.patch
new file mode 100644 (file)
index 0000000..498d534
--- /dev/null
@@ -0,0 +1,82 @@
+From 0792c1984a45ccd7a296d6b8cb78088bc99a212e Mon Sep 17 00:00:00 2001
+From: Sean Nyekjaer <sean@geanix.com>
+Date: Mon, 1 Sep 2025 09:49:13 +0200
+Subject: iio: imu: inv_icm42600: Simplify pm_runtime setup
+
+From: Sean Nyekjaer <sean@geanix.com>
+
+commit 0792c1984a45ccd7a296d6b8cb78088bc99a212e upstream.
+
+Rework the power management in inv_icm42600_core_probe() to use
+devm_pm_runtime_set_active_enabled(), which simplifies the runtime PM
+setup by handling activation and enabling in one step.
+Remove the separate inv_icm42600_disable_pm callback, as it's no longer
+needed with the devm-managed approach.
+Using devm_pm_runtime_enable() also fixes the missing disable of
+autosuspend.
+Update inv_icm42600_disable_vddio_reg() to only disable the regulator if
+the device is not suspended i.e. powered-down, preventing unbalanced
+disables.
+Also remove redundant error msg on regulator_disable(), the regulator
+framework already emits an error message when regulator_disable() fails.
+
+This simplifies the PM setup and avoids manipulating the usage counter
+unnecessarily.
+
+Fixes: 31c24c1e93c3 ("iio: imu: inv_icm42600: add core of new inv_icm42600 driver")
+Signed-off-by: Sean Nyekjaer <sean@geanix.com>
+Link: https://patch.msgid.link/20250901-icm42pmreg-v3-1-ef1336246960@geanix.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/inv_icm42600/inv_icm42600_core.c |   24 ++++++-----------------
+ 1 file changed, 7 insertions(+), 17 deletions(-)
+
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -711,20 +711,12 @@ static void inv_icm42600_disable_vdd_reg
+ static void inv_icm42600_disable_vddio_reg(void *_data)
+ {
+       struct inv_icm42600_state *st = _data;
+-      const struct device *dev = regmap_get_device(st->map);
+-      int ret;
+-
+-      ret = regulator_disable(st->vddio_supply);
+-      if (ret)
+-              dev_err(dev, "failed to disable vddio error %d\n", ret);
+-}
++      struct device *dev = regmap_get_device(st->map);
+-static void inv_icm42600_disable_pm(void *_data)
+-{
+-      struct device *dev = _data;
++      if (pm_runtime_status_suspended(dev))
++              return;
+-      pm_runtime_put_sync(dev);
+-      pm_runtime_disable(dev);
++      regulator_disable(st->vddio_supply);
+ }
+ int inv_icm42600_core_probe(struct regmap *regmap, int chip,
+@@ -824,16 +816,14 @@ int inv_icm42600_core_probe(struct regma
+               return ret;
+       /* setup runtime power management */
+-      ret = pm_runtime_set_active(dev);
++      ret = devm_pm_runtime_set_active_enabled(dev);
+       if (ret)
+               return ret;
+-      pm_runtime_get_noresume(dev);
+-      pm_runtime_enable(dev);
++
+       pm_runtime_set_autosuspend_delay(dev, INV_ICM42600_SUSPEND_DELAY_MS);
+       pm_runtime_use_autosuspend(dev);
+-      pm_runtime_put(dev);
+-      return devm_add_action_or_reset(dev, inv_icm42600_disable_pm, dev);
++      return ret;
+ }
+ EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, "IIO_ICM42600");
diff --git a/queue-6.17/iio-xilinx-ams-fix-ams_alarm_thr_direct_mask.patch b/queue-6.17/iio-xilinx-ams-fix-ams_alarm_thr_direct_mask.patch
new file mode 100644 (file)
index 0000000..377a711
--- /dev/null
@@ -0,0 +1,38 @@
+From 1315cc2dbd5034f566e20ddce4d675cb9e6d4ddd Mon Sep 17 00:00:00 2001
+From: Sean Anderson <sean.anderson@linux.dev>
+Date: Mon, 14 Jul 2025 20:30:58 -0400
+Subject: iio: xilinx-ams: Fix AMS_ALARM_THR_DIRECT_MASK
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+commit 1315cc2dbd5034f566e20ddce4d675cb9e6d4ddd upstream.
+
+AMS_ALARM_THR_DIRECT_MASK should be bit 0, not bit 1. This would cause
+hysteresis to be enabled with a lower threshold of -28C. The temperature
+alarm would never deassert even if the temperature dropped below the
+upper threshold.
+
+Fixes: d5c70627a794 ("iio: adc: Add Xilinx AMS driver")
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Reviewed-by: O'Griofa, Conall <conall.ogriofa@amd.com>
+Tested-by: Erim, Salih <Salih.Erim@amd.com>
+Acked-by: Erim, Salih <Salih.Erim@amd.com>
+Link: https://patch.msgid.link/20250715003058.2035656-1-sean.anderson@linux.dev
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/xilinx-ams.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/adc/xilinx-ams.c
++++ b/drivers/iio/adc/xilinx-ams.c
+@@ -118,7 +118,7 @@
+ #define AMS_ALARM_THRESHOLD_OFF_10    0x10
+ #define AMS_ALARM_THRESHOLD_OFF_20    0x20
+-#define AMS_ALARM_THR_DIRECT_MASK     BIT(1)
++#define AMS_ALARM_THR_DIRECT_MASK     BIT(0)
+ #define AMS_ALARM_THR_MIN             0x0000
+ #define AMS_ALARM_THR_MAX             (BIT(16) - 1)
diff --git a/queue-6.17/iio-xilinx-ams-unmask-interrupts-after-updating-alarms.patch b/queue-6.17/iio-xilinx-ams-unmask-interrupts-after-updating-alarms.patch
new file mode 100644 (file)
index 0000000..4d0fe87
--- /dev/null
@@ -0,0 +1,107 @@
+From feb500c7ae7a198db4d2757901bce562feeefa5e Mon Sep 17 00:00:00 2001
+From: Sean Anderson <sean.anderson@linux.dev>
+Date: Mon, 14 Jul 2025 20:28:47 -0400
+Subject: iio: xilinx-ams: Unmask interrupts after updating alarms
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+commit feb500c7ae7a198db4d2757901bce562feeefa5e upstream.
+
+To convert level-triggered alarms into edge-triggered IIO events, alarms
+are masked when they are triggered. To ensure we catch subsequent
+alarms, we then periodically poll to see if the alarm is still active.
+If it isn't, we unmask it. Active but masked alarms are stored in
+current_masked_alarm.
+
+If an active alarm is disabled, it will remain set in
+current_masked_alarm until ams_unmask_worker clears it. If the alarm is
+re-enabled before ams_unmask_worker runs, then it will never be cleared
+from current_masked_alarm. This will prevent the alarm event from being
+pushed even if the alarm is still active.
+
+Fix this by recalculating current_masked_alarm immediately when enabling
+or disabling alarms.
+
+Fixes: d5c70627a794 ("iio: adc: Add Xilinx AMS driver")
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Reviewed-by: O'Griofa, Conall <conall.ogriofa@amd.com>
+Tested-by: Erim, Salih <Salih.Erim@amd.com>
+Acked-by: Erim, Salih <Salih.Erim@amd.com>
+Link: https://patch.msgid.link/20250715002847.2035228-1-sean.anderson@linux.dev
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/xilinx-ams.c |   45 +++++++++++++++++++++++--------------------
+ 1 file changed, 25 insertions(+), 20 deletions(-)
+
+--- a/drivers/iio/adc/xilinx-ams.c
++++ b/drivers/iio/adc/xilinx-ams.c
+@@ -389,6 +389,29 @@ static void ams_update_pl_alarm(struct a
+       ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
+ }
++static void ams_unmask(struct ams *ams)
++{
++      unsigned int status, unmask;
++
++      status = readl(ams->base + AMS_ISR_0);
++
++      /* Clear those bits which are not active anymore */
++      unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
++
++      /* Clear status of disabled alarm */
++      unmask |= ams->intr_mask;
++
++      ams->current_masked_alarm &= status;
++
++      /* Also clear those which are masked out anyway */
++      ams->current_masked_alarm &= ~ams->intr_mask;
++
++      /* Clear the interrupts before we unmask them */
++      writel(unmask, ams->base + AMS_ISR_0);
++
++      ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
++}
++
+ static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
+ {
+       unsigned long flags;
+@@ -401,6 +424,7 @@ static void ams_update_alarm(struct ams
+       spin_lock_irqsave(&ams->intr_lock, flags);
+       ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
++      ams_unmask(ams);
+       spin_unlock_irqrestore(&ams->intr_lock, flags);
+ }
+@@ -1035,28 +1059,9 @@ static void ams_handle_events(struct iio
+ static void ams_unmask_worker(struct work_struct *work)
+ {
+       struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
+-      unsigned int status, unmask;
+       spin_lock_irq(&ams->intr_lock);
+-
+-      status = readl(ams->base + AMS_ISR_0);
+-
+-      /* Clear those bits which are not active anymore */
+-      unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
+-
+-      /* Clear status of disabled alarm */
+-      unmask |= ams->intr_mask;
+-
+-      ams->current_masked_alarm &= status;
+-
+-      /* Also clear those which are masked out anyway */
+-      ams->current_masked_alarm &= ~ams->intr_mask;
+-
+-      /* Clear the interrupts before we unmask them */
+-      writel(unmask, ams->base + AMS_ISR_0);
+-
+-      ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
+-
++      ams_unmask(ams);
+       spin_unlock_irq(&ams->intr_lock);
+       /* If still pending some alarm re-trigger the timer */
diff --git a/queue-6.17/init-handle-bootloader-identifier-in-kernel-parameters.patch b/queue-6.17/init-handle-bootloader-identifier-in-kernel-parameters.patch
new file mode 100644 (file)
index 0000000..e80e28f
--- /dev/null
@@ -0,0 +1,74 @@
+From e416f0ed3c500c05c55fb62ee62662717b1c7f71 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Mon, 21 Jul 2025 18:13:43 +0800
+Subject: init: handle bootloader identifier in kernel parameters
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit e416f0ed3c500c05c55fb62ee62662717b1c7f71 upstream.
+
+BootLoaders (Grub, LILO, etc) may pass an identifier such as "BOOT_IMAGE=
+/boot/vmlinuz-x.y.z" to kernel parameters.  But these identifiers are not
+recognized by the kernel itself so will be passed to userspace.  However
+user space init program also don't recognize it.
+
+KEXEC/KDUMP (kexec-tools) may also pass an identifier such as "kexec" on
+some architectures.
+
+We cannot change BootLoader's behavior, because this behavior exists for
+many years, and there are already user space programs search BOOT_IMAGE=
+in /proc/cmdline to obtain the kernel image locations:
+
+https://github.com/linuxdeepin/deepin-ab-recovery/blob/master/util.go
+(search getBootOptions)
+https://github.com/linuxdeepin/deepin-ab-recovery/blob/master/main.go
+(search getKernelReleaseWithBootOption) So the the best way is handle
+(ignore) it by the kernel itself, which can avoid such boot warnings (if
+we use something like init=/bin/bash, bootloader identifier can even cause
+a crash):
+
+Kernel command line: BOOT_IMAGE=(hd0,1)/vmlinuz-6.x root=/dev/sda3 ro console=tty
+Unknown kernel command line parameters "BOOT_IMAGE=(hd0,1)/vmlinuz-6.x", will be passed to user space.
+
+[chenhuacai@loongson.cn: use strstarts()]
+  Link: https://lkml.kernel.org/r/20250815090120.1569947-1-chenhuacai@loongson.cn
+Link: https://lkml.kernel.org/r/20250721101343.3283480-1-chenhuacai@loongson.cn
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Jan Kara <jack@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ init/main.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -544,6 +544,12 @@ static int __init unknown_bootoption(cha
+                                    const char *unused, void *arg)
+ {
+       size_t len = strlen(param);
++      /*
++       * Well-known bootloader identifiers:
++       * 1. LILO/Grub pass "BOOT_IMAGE=...";
++       * 2. kexec/kdump (kexec-tools) pass "kexec".
++       */
++      const char *bootloader[] = { "BOOT_IMAGE=", "kexec", NULL };
+       /* Handle params aliased to sysctls */
+       if (sysctl_is_alias(param))
+@@ -551,6 +557,12 @@ static int __init unknown_bootoption(cha
+       repair_env_string(param, val);
++      /* Handle bootloader identifier */
++      for (int i = 0; bootloader[i]; i++) {
++              if (strstarts(param, bootloader[i]))
++                      return 0;
++      }
++
+       /* Handle obsolete-style parameters */
+       if (obsolete_checksetup(param))
+               return 0;
diff --git a/queue-6.17/iommu-vt-d-prs-isn-t-usable-if-pds-isn-t-supported.patch b/queue-6.17/iommu-vt-d-prs-isn-t-usable-if-pds-isn-t-supported.patch
new file mode 100644 (file)
index 0000000..d4111a9
--- /dev/null
@@ -0,0 +1,50 @@
+From 5ef7e24c742038a5d8c626fdc0e3a21834358341 Mon Sep 17 00:00:00 2001
+From: Lu Baolu <baolu.lu@linux.intel.com>
+Date: Thu, 18 Sep 2025 13:02:02 +0800
+Subject: iommu/vt-d: PRS isn't usable if PDS isn't supported
+
+From: Lu Baolu <baolu.lu@linux.intel.com>
+
+commit 5ef7e24c742038a5d8c626fdc0e3a21834358341 upstream.
+
+The specification, Section 7.10, "Software Steps to Drain Page Requests &
+Responses," requires software to submit an Invalidation Wait Descriptor
+(inv_wait_dsc) with the Page-request Drain (PD=1) flag set, along with
+the Invalidation Wait Completion Status Write flag (SW=1). It then waits
+for the Invalidation Wait Descriptor's completion.
+
+However, the PD field in the Invalidation Wait Descriptor is optional, as
+stated in Section 6.5.2.9, "Invalidation Wait Descriptor":
+
+"Page-request Drain (PD): Remapping hardware implementations reporting
+ Page-request draining as not supported (PDS = 0 in ECAP_REG) treat this
+ field as reserved."
+
+This implies that if the IOMMU doesn't support the PDS capability, software
+can't drain page requests and group responses as expected.
+
+Do not enable PCI/PRI if the IOMMU doesn't support PDS.
+
+Reported-by: Joel Granados <joel.granados@kernel.org>
+Closes: https://lore.kernel.org/r/20250909-jag-pds-v1-1-ad8cba0e494e@kernel.org
+Fixes: 66ac4db36f4c ("iommu/vt-d: Add page request draining support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Link: https://lore.kernel.org/r/20250915062946.120196-1-baolu.lu@linux.intel.com
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/intel/iommu.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3817,7 +3817,7 @@ static struct iommu_device *intel_iommu_
+                       }
+                       if (info->ats_supported && ecap_prs(iommu->ecap) &&
+-                          pci_pri_supported(pdev))
++                          ecap_pds(iommu->ecap) && pci_pri_supported(pdev))
+                               info->pri_supported = 1;
+               }
+       }
diff --git a/queue-6.17/ipmi-msghandler-change-seq_lock-to-a-mutex.patch b/queue-6.17/ipmi-msghandler-change-seq_lock-to-a-mutex.patch
new file mode 100644 (file)
index 0000000..dd4cc8f
--- /dev/null
@@ -0,0 +1,278 @@
+From 8fd8ea2869cfafb3b1d6f95ff49561b13a73438d Mon Sep 17 00:00:00 2001
+From: Corey Minyard <corey@minyard.net>
+Date: Tue, 19 Aug 2025 13:11:39 -0500
+Subject: ipmi:msghandler:Change seq_lock to a mutex
+
+From: Corey Minyard <corey@minyard.net>
+
+commit 8fd8ea2869cfafb3b1d6f95ff49561b13a73438d upstream.
+
+Dan Carpenter got a Smatch warning:
+
+       drivers/char/ipmi/ipmi_msghandler.c:5265 ipmi_free_recv_msg()
+       warn: sleeping in atomic context
+
+due to the recent rework of the IPMI driver's locking.  I didn't realize
+vfree could block.  But there is an easy solution to this, now that
+almost everything in the message handler runs in thread context.
+
+I wanted to spend the time earlier to see if seq_lock could be converted
+from a spinlock to a mutex, but I wanted the previous changes to go in
+and soak before I did that.  So I went ahead and did the analysis and
+converting should work.  And solve this problem.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lore.kernel.org/r/202503240244.LR7pOwyr-lkp@intel.com/
+Fixes: 3be997d5a64a ("ipmi:msghandler: Remove srcu from the ipmi user structure")
+Cc: <stable@vger.kernel.org> # 6.16
+Signed-off-by: Corey Minyard <corey@minyard.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_msghandler.c |   63 ++++++++++++++----------------------
+ 1 file changed, 26 insertions(+), 37 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -466,7 +466,7 @@ struct ipmi_smi {
+        * interface to match them up with their responses.  A routine
+        * is called periodically to time the items in this list.
+        */
+-      spinlock_t       seq_lock;
++      struct mutex seq_lock;
+       struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
+       int curr_seq;
+@@ -1117,12 +1117,11 @@ static int intf_find_seq(struct ipmi_smi
+                        struct ipmi_recv_msg **recv_msg)
+ {
+       int           rv = -ENODEV;
+-      unsigned long flags;
+       if (seq >= IPMI_IPMB_NUM_SEQ)
+               return -EINVAL;
+-      spin_lock_irqsave(&intf->seq_lock, flags);
++      mutex_lock(&intf->seq_lock);
+       if (intf->seq_table[seq].inuse) {
+               struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
+@@ -1135,7 +1134,7 @@ static int intf_find_seq(struct ipmi_smi
+                       rv = 0;
+               }
+       }
+-      spin_unlock_irqrestore(&intf->seq_lock, flags);
++      mutex_unlock(&intf->seq_lock);
+       return rv;
+ }
+@@ -1146,14 +1145,13 @@ static int intf_start_seq_timer(struct i
+                               long       msgid)
+ {
+       int           rv = -ENODEV;
+-      unsigned long flags;
+       unsigned char seq;
+       unsigned long seqid;
+       GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+-      spin_lock_irqsave(&intf->seq_lock, flags);
++      mutex_lock(&intf->seq_lock);
+       /*
+        * We do this verification because the user can be deleted
+        * while a message is outstanding.
+@@ -1164,7 +1162,7 @@ static int intf_start_seq_timer(struct i
+               ent->timeout = ent->orig_timeout;
+               rv = 0;
+       }
+-      spin_unlock_irqrestore(&intf->seq_lock, flags);
++      mutex_unlock(&intf->seq_lock);
+       return rv;
+ }
+@@ -1175,7 +1173,6 @@ static int intf_err_seq(struct ipmi_smi
+                       unsigned int err)
+ {
+       int                  rv = -ENODEV;
+-      unsigned long        flags;
+       unsigned char        seq;
+       unsigned long        seqid;
+       struct ipmi_recv_msg *msg = NULL;
+@@ -1183,7 +1180,7 @@ static int intf_err_seq(struct ipmi_smi
+       GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+-      spin_lock_irqsave(&intf->seq_lock, flags);
++      mutex_lock(&intf->seq_lock);
+       /*
+        * We do this verification because the user can be deleted
+        * while a message is outstanding.
+@@ -1197,7 +1194,7 @@ static int intf_err_seq(struct ipmi_smi
+               msg = ent->recv_msg;
+               rv = 0;
+       }
+-      spin_unlock_irqrestore(&intf->seq_lock, flags);
++      mutex_unlock(&intf->seq_lock);
+       if (msg)
+               deliver_err_response(intf, msg, err);
+@@ -1210,7 +1207,6 @@ int ipmi_create_user(unsigned int
+                    void                  *handler_data,
+                    struct ipmi_user      **user)
+ {
+-      unsigned long flags;
+       struct ipmi_user *new_user = NULL;
+       int           rv = 0;
+       struct ipmi_smi *intf;
+@@ -1278,9 +1274,9 @@ int ipmi_create_user(unsigned int
+       new_user->gets_events = false;
+       mutex_lock(&intf->users_mutex);
+-      spin_lock_irqsave(&intf->seq_lock, flags);
++      mutex_lock(&intf->seq_lock);
+       list_add(&new_user->link, &intf->users);
+-      spin_unlock_irqrestore(&intf->seq_lock, flags);
++      mutex_unlock(&intf->seq_lock);
+       mutex_unlock(&intf->users_mutex);
+       if (handler->ipmi_watchdog_pretimeout)
+@@ -1326,7 +1322,6 @@ static void _ipmi_destroy_user(struct ip
+ {
+       struct ipmi_smi  *intf = user->intf;
+       int              i;
+-      unsigned long    flags;
+       struct cmd_rcvr  *rcvr;
+       struct cmd_rcvr  *rcvrs = NULL;
+       struct ipmi_recv_msg *msg, *msg2;
+@@ -1347,7 +1342,7 @@ static void _ipmi_destroy_user(struct ip
+       list_del(&user->link);
+       atomic_dec(&intf->nr_users);
+-      spin_lock_irqsave(&intf->seq_lock, flags);
++      mutex_lock(&intf->seq_lock);
+       for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+               if (intf->seq_table[i].inuse
+                   && (intf->seq_table[i].recv_msg->user == user)) {
+@@ -1356,7 +1351,7 @@ static void _ipmi_destroy_user(struct ip
+                       ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+               }
+       }
+-      spin_unlock_irqrestore(&intf->seq_lock, flags);
++      mutex_unlock(&intf->seq_lock);
+       /*
+        * Remove the user from the command receiver's table.  First
+@@ -2026,10 +2021,7 @@ static int i_ipmi_req_ipmb(struct ipmi_s
+                */
+               smi_msg->user_data = recv_msg;
+       } else {
+-              /* It's a command, so get a sequence for it. */
+-              unsigned long flags;
+-
+-              spin_lock_irqsave(&intf->seq_lock, flags);
++              mutex_lock(&intf->seq_lock);
+               if (is_maintenance_mode_cmd(msg))
+                       intf->ipmb_maintenance_mode_timeout =
+@@ -2087,7 +2079,7 @@ static int i_ipmi_req_ipmb(struct ipmi_s
+                * to be correct.
+                */
+ out_err:
+-              spin_unlock_irqrestore(&intf->seq_lock, flags);
++              mutex_unlock(&intf->seq_lock);
+       }
+       return rv;
+@@ -2205,10 +2197,7 @@ static int i_ipmi_req_lan(struct ipmi_sm
+                */
+               smi_msg->user_data = recv_msg;
+       } else {
+-              /* It's a command, so get a sequence for it. */
+-              unsigned long flags;
+-
+-              spin_lock_irqsave(&intf->seq_lock, flags);
++              mutex_lock(&intf->seq_lock);
+               /*
+                * Create a sequence number with a 1 second
+@@ -2257,7 +2246,7 @@ static int i_ipmi_req_lan(struct ipmi_sm
+                * to be correct.
+                */
+ out_err:
+-              spin_unlock_irqrestore(&intf->seq_lock, flags);
++              mutex_unlock(&intf->seq_lock);
+       }
+       return rv;
+@@ -3562,7 +3551,7 @@ int ipmi_add_smi(struct module         *
+       atomic_set(&intf->nr_users, 0);
+       intf->handlers = handlers;
+       intf->send_info = send_info;
+-      spin_lock_init(&intf->seq_lock);
++      mutex_init(&intf->seq_lock);
+       for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
+               intf->seq_table[j].inuse = 0;
+               intf->seq_table[j].seqid = 0;
+@@ -4487,9 +4476,10 @@ static int handle_one_recv_msg(struct ip
+       if (msg->rsp_size < 2) {
+               /* Message is too small to be correct. */
+-              dev_warn(intf->si_dev,
+-                       "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+-                       (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
++              dev_warn_ratelimited(intf->si_dev,
++                                   "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
++                                   (msg->data[0] >> 2) | 1,
++                                   msg->data[1], msg->rsp_size);
+ return_unspecified:
+               /* Generate an error response for the message. */
+@@ -4907,8 +4897,7 @@ smi_from_recv_msg(struct ipmi_smi *intf,
+ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
+                             struct list_head *timeouts,
+                             unsigned long timeout_period,
+-                            int slot, unsigned long *flags,
+-                            bool *need_timer)
++                            int slot, bool *need_timer)
+ {
+       struct ipmi_recv_msg *msg;
+@@ -4960,7 +4949,7 @@ static void check_msg_timeout(struct ipm
+                       return;
+               }
+-              spin_unlock_irqrestore(&intf->seq_lock, *flags);
++              mutex_unlock(&intf->seq_lock);
+               /*
+                * Send the new message.  We send with a zero
+@@ -4981,7 +4970,7 @@ static void check_msg_timeout(struct ipm
+               } else
+                       ipmi_free_smi_msg(smi_msg);
+-              spin_lock_irqsave(&intf->seq_lock, *flags);
++              mutex_lock(&intf->seq_lock);
+       }
+ }
+@@ -5008,7 +4997,7 @@ static bool ipmi_timeout_handler(struct
+        * list.
+        */
+       INIT_LIST_HEAD(&timeouts);
+-      spin_lock_irqsave(&intf->seq_lock, flags);
++      mutex_lock(&intf->seq_lock);
+       if (intf->ipmb_maintenance_mode_timeout) {
+               if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
+                       intf->ipmb_maintenance_mode_timeout = 0;
+@@ -5018,8 +5007,8 @@ static bool ipmi_timeout_handler(struct
+       for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+               check_msg_timeout(intf, &intf->seq_table[i],
+                                 &timeouts, timeout_period, i,
+-                                &flags, &need_timer);
+-      spin_unlock_irqrestore(&intf->seq_lock, flags);
++                                &need_timer);
++      mutex_unlock(&intf->seq_lock);
+       list_for_each_entry_safe(msg, msg2, &timeouts, link)
+               deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
diff --git a/queue-6.17/ipmi-rework-user-message-limit-handling.patch b/queue-6.17/ipmi-rework-user-message-limit-handling.patch
new file mode 100644 (file)
index 0000000..bf2a464
--- /dev/null
@@ -0,0 +1,648 @@
+From b52da4054ee0bf9ecb44996f2c83236ff50b3812 Mon Sep 17 00:00:00 2001
+From: Corey Minyard <corey@minyard.net>
+Date: Fri, 5 Sep 2025 11:33:39 -0500
+Subject: ipmi: Rework user message limit handling
+
+From: Corey Minyard <corey@minyard.net>
+
+commit b52da4054ee0bf9ecb44996f2c83236ff50b3812 upstream.
+
+The limit on the number of user messages had a number of issues,
+improper counting in some cases and a use after free.
+
+Restructure how this is all done to handle more in the receive message
+allocation routine, so all refcouting and user message limit counts
+are done in that routine.  It's a lot cleaner and safer.
+
+Reported-by: Gilles BULOZ <gilles.buloz@kontron.com>
+Closes: https://lore.kernel.org/lkml/aLsw6G0GyqfpKs2S@mail.minyard.net/
+Fixes: 8e76741c3d8b ("ipmi: Add a limit on the number of users that may use IPMI")
+Cc: <stable@vger.kernel.org> # 4.19
+Signed-off-by: Corey Minyard <corey@minyard.net>
+Tested-by: Gilles BULOZ <gilles.buloz@kontron.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_msghandler.c |  420 +++++++++++++++++-------------------
+ 1 file changed, 200 insertions(+), 220 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -38,7 +38,9 @@
+ #define IPMI_DRIVER_VERSION "39.2"
+-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
++static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
++static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
++                                 struct ipmi_user *user);
+ static int ipmi_init_msghandler(void);
+ static void smi_work(struct work_struct *t);
+ static void handle_new_recv_msgs(struct ipmi_smi *intf);
+@@ -955,7 +957,6 @@ static int deliver_response(struct ipmi_
+                * risk.  At this moment, simply skip it in that case.
+                */
+               ipmi_free_recv_msg(msg);
+-              atomic_dec(&msg->user->nr_msgs);
+       } else {
+               /*
+                * Deliver it in smi_work.  The message will hold a
+@@ -1616,8 +1617,7 @@ int ipmi_set_gets_events(struct ipmi_use
+               }
+               list_for_each_entry_safe(msg, msg2, &msgs, link) {
+-                      msg->user = user;
+-                      kref_get(&user->refcount);
++                      ipmi_set_recv_msg_user(msg, user);
+                       deliver_local_response(intf, msg);
+               }
+       }
+@@ -2288,22 +2288,15 @@ static int i_ipmi_request(struct ipmi_us
+       int run_to_completion = READ_ONCE(intf->run_to_completion);
+       int rv = 0;
+-      if (user) {
+-              if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+-                      /* Decrement will happen at the end of the routine. */
+-                      rv = -EBUSY;
+-                      goto out;
+-              }
+-      }
+-
+-      if (supplied_recv)
++      if (supplied_recv) {
+               recv_msg = supplied_recv;
+-      else {
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (recv_msg == NULL) {
+-                      rv = -ENOMEM;
+-                      goto out;
+-              }
++              recv_msg->user = user;
++              if (user)
++                      atomic_inc(&user->nr_msgs);
++      } else {
++              recv_msg = ipmi_alloc_recv_msg(user);
++              if (IS_ERR(recv_msg))
++                      return PTR_ERR(recv_msg);
+       }
+       recv_msg->user_msg_data = user_msg_data;
+@@ -2314,8 +2307,7 @@ static int i_ipmi_request(struct ipmi_us
+               if (smi_msg == NULL) {
+                       if (!supplied_recv)
+                               ipmi_free_recv_msg(recv_msg);
+-                      rv = -ENOMEM;
+-                      goto out;
++                      return -ENOMEM;
+               }
+       }
+@@ -2326,10 +2318,6 @@ static int i_ipmi_request(struct ipmi_us
+               goto out_err;
+       }
+-      recv_msg->user = user;
+-      if (user)
+-              /* The put happens when the message is freed. */
+-              kref_get(&user->refcount);
+       recv_msg->msgid = msgid;
+       /*
+        * Store the message to send in the receive message so timeout
+@@ -2358,8 +2346,10 @@ static int i_ipmi_request(struct ipmi_us
+       if (rv) {
+ out_err:
+-              ipmi_free_smi_msg(smi_msg);
+-              ipmi_free_recv_msg(recv_msg);
++              if (!supplied_smi)
++                      ipmi_free_smi_msg(smi_msg);
++              if (!supplied_recv)
++                      ipmi_free_recv_msg(recv_msg);
+       } else {
+               dev_dbg(intf->si_dev, "Send: %*ph\n",
+                       smi_msg->data_size, smi_msg->data);
+@@ -2369,9 +2359,6 @@ out_err:
+       if (!run_to_completion)
+               mutex_unlock(&intf->users_mutex);
+-out:
+-      if (rv && user)
+-              atomic_dec(&user->nr_msgs);
+       return rv;
+ }
+@@ -3862,7 +3849,7 @@ static int handle_ipmb_get_msg_cmd(struc
+       unsigned char            chan;
+       struct ipmi_user         *user = NULL;
+       struct ipmi_ipmb_addr    *ipmb_addr;
+-      struct ipmi_recv_msg     *recv_msg;
++      struct ipmi_recv_msg     *recv_msg = NULL;
+       if (msg->rsp_size < 10) {
+               /* Message not big enough, just ignore it. */
+@@ -3883,9 +3870,8 @@ static int handle_ipmb_get_msg_cmd(struc
+       rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+       if (rcvr) {
+               user = rcvr->user;
+-              kref_get(&user->refcount);
+-      } else
+-              user = NULL;
++              recv_msg = ipmi_alloc_recv_msg(user);
++      }
+       rcu_read_unlock();
+       if (user == NULL) {
+@@ -3915,47 +3901,41 @@ static int handle_ipmb_get_msg_cmd(struc
+                * causes it to not be freed or queued.
+                */
+               rv = -1;
+-      } else {
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (!recv_msg) {
+-                      /*
+-                       * We couldn't allocate memory for the
+-                       * message, so requeue it for handling
+-                       * later.
+-                       */
+-                      rv = 1;
+-                      kref_put(&user->refcount, free_ipmi_user);
+-              } else {
+-                      /* Extract the source address from the data. */
+-                      ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+-                      ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+-                      ipmb_addr->slave_addr = msg->rsp[6];
+-                      ipmb_addr->lun = msg->rsp[7] & 3;
+-                      ipmb_addr->channel = msg->rsp[3] & 0xf;
++      } else if (!IS_ERR(recv_msg)) {
++              /* Extract the source address from the data. */
++              ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
++              ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
++              ipmb_addr->slave_addr = msg->rsp[6];
++              ipmb_addr->lun = msg->rsp[7] & 3;
++              ipmb_addr->channel = msg->rsp[3] & 0xf;
+-                      /*
+-                       * Extract the rest of the message information
+-                       * from the IPMB header.
+-                       */
+-                      recv_msg->user = user;
+-                      recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+-                      recv_msg->msgid = msg->rsp[7] >> 2;
+-                      recv_msg->msg.netfn = msg->rsp[4] >> 2;
+-                      recv_msg->msg.cmd = msg->rsp[8];
+-                      recv_msg->msg.data = recv_msg->msg_data;
++              /*
++               * Extract the rest of the message information
++               * from the IPMB header.
++               */
++              recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
++              recv_msg->msgid = msg->rsp[7] >> 2;
++              recv_msg->msg.netfn = msg->rsp[4] >> 2;
++              recv_msg->msg.cmd = msg->rsp[8];
++              recv_msg->msg.data = recv_msg->msg_data;
+-                      /*
+-                       * We chop off 10, not 9 bytes because the checksum
+-                       * at the end also needs to be removed.
+-                       */
+-                      recv_msg->msg.data_len = msg->rsp_size - 10;
+-                      memcpy(recv_msg->msg_data, &msg->rsp[9],
+-                             msg->rsp_size - 10);
+-                      if (deliver_response(intf, recv_msg))
+-                              ipmi_inc_stat(intf, unhandled_commands);
+-                      else
+-                              ipmi_inc_stat(intf, handled_commands);
+-              }
++              /*
++               * We chop off 10, not 9 bytes because the checksum
++               * at the end also needs to be removed.
++               */
++              recv_msg->msg.data_len = msg->rsp_size - 10;
++              memcpy(recv_msg->msg_data, &msg->rsp[9],
++                     msg->rsp_size - 10);
++              if (deliver_response(intf, recv_msg))
++                      ipmi_inc_stat(intf, unhandled_commands);
++              else
++                      ipmi_inc_stat(intf, handled_commands);
++      } else {
++              /*
++               * We couldn't allocate memory for the message, so
++               * requeue it for handling later.
++               */
++              rv = 1;
+       }
+       return rv;
+@@ -3968,7 +3948,7 @@ static int handle_ipmb_direct_rcv_cmd(st
+       int                      rv = 0;
+       struct ipmi_user         *user = NULL;
+       struct ipmi_ipmb_direct_addr *daddr;
+-      struct ipmi_recv_msg     *recv_msg;
++      struct ipmi_recv_msg     *recv_msg = NULL;
+       unsigned char netfn = msg->rsp[0] >> 2;
+       unsigned char cmd = msg->rsp[3];
+@@ -3977,9 +3957,8 @@ static int handle_ipmb_direct_rcv_cmd(st
+       rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
+       if (rcvr) {
+               user = rcvr->user;
+-              kref_get(&user->refcount);
+-      } else
+-              user = NULL;
++              recv_msg = ipmi_alloc_recv_msg(user);
++      }
+       rcu_read_unlock();
+       if (user == NULL) {
+@@ -4001,44 +3980,38 @@ static int handle_ipmb_direct_rcv_cmd(st
+                * causes it to not be freed or queued.
+                */
+               rv = -1;
+-      } else {
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (!recv_msg) {
+-                      /*
+-                       * We couldn't allocate memory for the
+-                       * message, so requeue it for handling
+-                       * later.
+-                       */
+-                      rv = 1;
+-                      kref_put(&user->refcount, free_ipmi_user);
+-              } else {
+-                      /* Extract the source address from the data. */
+-                      daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+-                      daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+-                      daddr->channel = 0;
+-                      daddr->slave_addr = msg->rsp[1];
+-                      daddr->rs_lun = msg->rsp[0] & 3;
+-                      daddr->rq_lun = msg->rsp[2] & 3;
++      } else if (!IS_ERR(recv_msg)) {
++              /* Extract the source address from the data. */
++              daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
++              daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
++              daddr->channel = 0;
++              daddr->slave_addr = msg->rsp[1];
++              daddr->rs_lun = msg->rsp[0] & 3;
++              daddr->rq_lun = msg->rsp[2] & 3;
+-                      /*
+-                       * Extract the rest of the message information
+-                       * from the IPMB header.
+-                       */
+-                      recv_msg->user = user;
+-                      recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+-                      recv_msg->msgid = (msg->rsp[2] >> 2);
+-                      recv_msg->msg.netfn = msg->rsp[0] >> 2;
+-                      recv_msg->msg.cmd = msg->rsp[3];
+-                      recv_msg->msg.data = recv_msg->msg_data;
+-
+-                      recv_msg->msg.data_len = msg->rsp_size - 4;
+-                      memcpy(recv_msg->msg_data, msg->rsp + 4,
+-                             msg->rsp_size - 4);
+-                      if (deliver_response(intf, recv_msg))
+-                              ipmi_inc_stat(intf, unhandled_commands);
+-                      else
+-                              ipmi_inc_stat(intf, handled_commands);
+-              }
++              /*
++               * Extract the rest of the message information
++               * from the IPMB header.
++               */
++              recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
++              recv_msg->msgid = (msg->rsp[2] >> 2);
++              recv_msg->msg.netfn = msg->rsp[0] >> 2;
++              recv_msg->msg.cmd = msg->rsp[3];
++              recv_msg->msg.data = recv_msg->msg_data;
++
++              recv_msg->msg.data_len = msg->rsp_size - 4;
++              memcpy(recv_msg->msg_data, msg->rsp + 4,
++                     msg->rsp_size - 4);
++              if (deliver_response(intf, recv_msg))
++                      ipmi_inc_stat(intf, unhandled_commands);
++              else
++                      ipmi_inc_stat(intf, handled_commands);
++      } else {
++              /*
++               * We couldn't allocate memory for the message, so
++               * requeue it for handling later.
++               */
++              rv = 1;
+       }
+       return rv;
+@@ -4152,7 +4125,7 @@ static int handle_lan_get_msg_cmd(struct
+       unsigned char            chan;
+       struct ipmi_user         *user = NULL;
+       struct ipmi_lan_addr     *lan_addr;
+-      struct ipmi_recv_msg     *recv_msg;
++      struct ipmi_recv_msg     *recv_msg = NULL;
+       if (msg->rsp_size < 12) {
+               /* Message not big enough, just ignore it. */
+@@ -4173,9 +4146,8 @@ static int handle_lan_get_msg_cmd(struct
+       rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+       if (rcvr) {
+               user = rcvr->user;
+-              kref_get(&user->refcount);
+-      } else
+-              user = NULL;
++              recv_msg = ipmi_alloc_recv_msg(user);
++      }
+       rcu_read_unlock();
+       if (user == NULL) {
+@@ -4206,49 +4178,44 @@ static int handle_lan_get_msg_cmd(struct
+                * causes it to not be freed or queued.
+                */
+               rv = -1;
+-      } else {
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (!recv_msg) {
+-                      /*
+-                       * We couldn't allocate memory for the
+-                       * message, so requeue it for handling later.
+-                       */
+-                      rv = 1;
+-                      kref_put(&user->refcount, free_ipmi_user);
+-              } else {
+-                      /* Extract the source address from the data. */
+-                      lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+-                      lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+-                      lan_addr->session_handle = msg->rsp[4];
+-                      lan_addr->remote_SWID = msg->rsp[8];
+-                      lan_addr->local_SWID = msg->rsp[5];
+-                      lan_addr->lun = msg->rsp[9] & 3;
+-                      lan_addr->channel = msg->rsp[3] & 0xf;
+-                      lan_addr->privilege = msg->rsp[3] >> 4;
++      } else if (!IS_ERR(recv_msg)) {
++              /* Extract the source address from the data. */
++              lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
++              lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
++              lan_addr->session_handle = msg->rsp[4];
++              lan_addr->remote_SWID = msg->rsp[8];
++              lan_addr->local_SWID = msg->rsp[5];
++              lan_addr->lun = msg->rsp[9] & 3;
++              lan_addr->channel = msg->rsp[3] & 0xf;
++              lan_addr->privilege = msg->rsp[3] >> 4;
+-                      /*
+-                       * Extract the rest of the message information
+-                       * from the IPMB header.
+-                       */
+-                      recv_msg->user = user;
+-                      recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+-                      recv_msg->msgid = msg->rsp[9] >> 2;
+-                      recv_msg->msg.netfn = msg->rsp[6] >> 2;
+-                      recv_msg->msg.cmd = msg->rsp[10];
+-                      recv_msg->msg.data = recv_msg->msg_data;
++              /*
++               * Extract the rest of the message information
++               * from the IPMB header.
++               */
++              recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
++              recv_msg->msgid = msg->rsp[9] >> 2;
++              recv_msg->msg.netfn = msg->rsp[6] >> 2;
++              recv_msg->msg.cmd = msg->rsp[10];
++              recv_msg->msg.data = recv_msg->msg_data;
+-                      /*
+-                       * We chop off 12, not 11 bytes because the checksum
+-                       * at the end also needs to be removed.
+-                       */
+-                      recv_msg->msg.data_len = msg->rsp_size - 12;
+-                      memcpy(recv_msg->msg_data, &msg->rsp[11],
+-                             msg->rsp_size - 12);
+-                      if (deliver_response(intf, recv_msg))
+-                              ipmi_inc_stat(intf, unhandled_commands);
+-                      else
+-                              ipmi_inc_stat(intf, handled_commands);
+-              }
++              /*
++               * We chop off 12, not 11 bytes because the checksum
++               * at the end also needs to be removed.
++               */
++              recv_msg->msg.data_len = msg->rsp_size - 12;
++              memcpy(recv_msg->msg_data, &msg->rsp[11],
++                     msg->rsp_size - 12);
++              if (deliver_response(intf, recv_msg))
++                      ipmi_inc_stat(intf, unhandled_commands);
++              else
++                      ipmi_inc_stat(intf, handled_commands);
++      } else {
++              /*
++               * We couldn't allocate memory for the message, so
++               * requeue it for handling later.
++               */
++              rv = 1;
+       }
+       return rv;
+@@ -4270,7 +4237,7 @@ static int handle_oem_get_msg_cmd(struct
+       unsigned char         chan;
+       struct ipmi_user *user = NULL;
+       struct ipmi_system_interface_addr *smi_addr;
+-      struct ipmi_recv_msg  *recv_msg;
++      struct ipmi_recv_msg  *recv_msg = NULL;
+       /*
+        * We expect the OEM SW to perform error checking
+@@ -4299,9 +4266,8 @@ static int handle_oem_get_msg_cmd(struct
+       rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+       if (rcvr) {
+               user = rcvr->user;
+-              kref_get(&user->refcount);
+-      } else
+-              user = NULL;
++              recv_msg = ipmi_alloc_recv_msg(user);
++      }
+       rcu_read_unlock();
+       if (user == NULL) {
+@@ -4314,48 +4280,42 @@ static int handle_oem_get_msg_cmd(struct
+                */
+               rv = 0;
+-      } else {
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (!recv_msg) {
+-                      /*
+-                       * We couldn't allocate memory for the
+-                       * message, so requeue it for handling
+-                       * later.
+-                       */
+-                      rv = 1;
+-                      kref_put(&user->refcount, free_ipmi_user);
+-              } else {
+-                      /*
+-                       * OEM Messages are expected to be delivered via
+-                       * the system interface to SMS software.  We might
+-                       * need to visit this again depending on OEM
+-                       * requirements
+-                       */
+-                      smi_addr = ((struct ipmi_system_interface_addr *)
+-                                  &recv_msg->addr);
+-                      smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+-                      smi_addr->channel = IPMI_BMC_CHANNEL;
+-                      smi_addr->lun = msg->rsp[0] & 3;
+-
+-                      recv_msg->user = user;
+-                      recv_msg->user_msg_data = NULL;
+-                      recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+-                      recv_msg->msg.netfn = msg->rsp[0] >> 2;
+-                      recv_msg->msg.cmd = msg->rsp[1];
+-                      recv_msg->msg.data = recv_msg->msg_data;
++      } else if (!IS_ERR(recv_msg)) {
++              /*
++               * OEM Messages are expected to be delivered via
++               * the system interface to SMS software.  We might
++               * need to visit this again depending on OEM
++               * requirements
++               */
++              smi_addr = ((struct ipmi_system_interface_addr *)
++                          &recv_msg->addr);
++              smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
++              smi_addr->channel = IPMI_BMC_CHANNEL;
++              smi_addr->lun = msg->rsp[0] & 3;
++
++              recv_msg->user_msg_data = NULL;
++              recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
++              recv_msg->msg.netfn = msg->rsp[0] >> 2;
++              recv_msg->msg.cmd = msg->rsp[1];
++              recv_msg->msg.data = recv_msg->msg_data;
+-                      /*
+-                       * The message starts at byte 4 which follows the
+-                       * Channel Byte in the "GET MESSAGE" command
+-                       */
+-                      recv_msg->msg.data_len = msg->rsp_size - 4;
+-                      memcpy(recv_msg->msg_data, &msg->rsp[4],
+-                             msg->rsp_size - 4);
+-                      if (deliver_response(intf, recv_msg))
+-                              ipmi_inc_stat(intf, unhandled_commands);
+-                      else
+-                              ipmi_inc_stat(intf, handled_commands);
+-              }
++              /*
++               * The message starts at byte 4 which follows the
++               * Channel Byte in the "GET MESSAGE" command
++               */
++              recv_msg->msg.data_len = msg->rsp_size - 4;
++              memcpy(recv_msg->msg_data, &msg->rsp[4],
++                     msg->rsp_size - 4);
++              if (deliver_response(intf, recv_msg))
++                      ipmi_inc_stat(intf, unhandled_commands);
++              else
++                      ipmi_inc_stat(intf, handled_commands);
++      } else {
++              /*
++               * We couldn't allocate memory for the message, so
++               * requeue it for handling later.
++               */
++              rv = 1;
+       }
+       return rv;
+@@ -4413,8 +4373,8 @@ static int handle_read_event_rsp(struct
+               if (!user->gets_events)
+                       continue;
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (!recv_msg) {
++              recv_msg = ipmi_alloc_recv_msg(user);
++              if (IS_ERR(recv_msg)) {
+                       mutex_unlock(&intf->users_mutex);
+                       list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
+                                                link) {
+@@ -4435,8 +4395,6 @@ static int handle_read_event_rsp(struct
+               deliver_count++;
+               copy_event_into_recv_msg(recv_msg, msg);
+-              recv_msg->user = user;
+-              kref_get(&user->refcount);
+               list_add_tail(&recv_msg->link, &msgs);
+       }
+       mutex_unlock(&intf->users_mutex);
+@@ -4452,8 +4410,8 @@ static int handle_read_event_rsp(struct
+                * No one to receive the message, put it in queue if there's
+                * not already too many things in the queue.
+                */
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (!recv_msg) {
++              recv_msg = ipmi_alloc_recv_msg(NULL);
++              if (IS_ERR(recv_msg)) {
+                       /*
+                        * We couldn't allocate memory for the
+                        * message, so requeue it for handling
+@@ -4868,12 +4826,10 @@ static void smi_work(struct work_struct
+               list_del(&msg->link);
+-              if (refcount_read(&user->destroyed) == 0) {
++              if (refcount_read(&user->destroyed) == 0)
+                       ipmi_free_recv_msg(msg);
+-              } else {
+-                      atomic_dec(&user->nr_msgs);
++              else
+                       user->handler->ipmi_recv_hndl(msg, user->handler_data);
+-              }
+       }
+       mutex_unlock(&intf->user_msgs_mutex);
+@@ -5190,27 +5146,51 @@ static void free_recv_msg(struct ipmi_re
+               kfree(msg);
+ }
+-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
++static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
+ {
+       struct ipmi_recv_msg *rv;
++      if (user) {
++              if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
++                      atomic_dec(&user->nr_msgs);
++                      return ERR_PTR(-EBUSY);
++              }
++      }
++
+       rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
+-      if (rv) {
+-              rv->user = NULL;
+-              rv->done = free_recv_msg;
+-              atomic_inc(&recv_msg_inuse_count);
++      if (!rv) {
++              if (user)
++                      atomic_dec(&user->nr_msgs);
++              return ERR_PTR(-ENOMEM);
+       }
++
++      rv->user = user;
++      rv->done = free_recv_msg;
++      if (user)
++              kref_get(&user->refcount);
++      atomic_inc(&recv_msg_inuse_count);
+       return rv;
+ }
+ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
+ {
+-      if (msg->user && !oops_in_progress)
++      if (msg->user && !oops_in_progress) {
++              atomic_dec(&msg->user->nr_msgs);
+               kref_put(&msg->user->refcount, free_ipmi_user);
++      }
+       msg->done(msg);
+ }
+ EXPORT_SYMBOL(ipmi_free_recv_msg);
++static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
++                                 struct ipmi_user *user)
++{
++      WARN_ON_ONCE(msg->user); /* User should not be set. */
++      msg->user = user;
++      atomic_inc(&user->nr_msgs);
++      kref_get(&user->refcount);
++}
++
+ static atomic_t panic_done_count = ATOMIC_INIT(0);
+ static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
diff --git a/queue-6.17/kernel-sys.c-fix-the-racy-usage-of-task_lock-tsk-group_leader-in-sys_prlimit64-paths.patch b/queue-6.17/kernel-sys.c-fix-the-racy-usage-of-task_lock-tsk-group_leader-in-sys_prlimit64-paths.patch
new file mode 100644 (file)
index 0000000..214f349
--- /dev/null
@@ -0,0 +1,74 @@
+From a15f37a40145c986cdf289a4b88390f35efdecc4 Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Mon, 15 Sep 2025 14:09:17 +0200
+Subject: kernel/sys.c: fix the racy usage of task_lock(tsk->group_leader) in sys_prlimit64() paths
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+commit a15f37a40145c986cdf289a4b88390f35efdecc4 upstream.
+
+The usage of task_lock(tsk->group_leader) in sys_prlimit64()->do_prlimit()
+path is very broken.
+
+sys_prlimit64() does get_task_struct(tsk) but this only protects task_struct
+itself. If tsk != current and tsk is not a leader, this process can exit/exec
+and task_lock(tsk->group_leader) may use the already freed task_struct.
+
+Another problem is that sys_prlimit64() can race with mt-exec which changes
+->group_leader. In this case do_prlimit() may take the wrong lock, or (worse)
+->group_leader may change between task_lock() and task_unlock().
+
+Change sys_prlimit64() to take tasklist_lock when necessary. This is not
+nice, but I don't see a better fix for -stable.
+
+Link: https://lkml.kernel.org/r/20250915120917.GA27702@redhat.com
+Fixes: 18c91bb2d872 ("prlimit: do not grab the tasklist_lock")
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Jiri Slaby <jirislaby@kernel.org>
+Cc: Mateusz Guzik <mjguzik@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sys.c |   22 ++++++++++++++++++++--
+ 1 file changed, 20 insertions(+), 2 deletions(-)
+
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1734,6 +1734,7 @@ SYSCALL_DEFINE4(prlimit64, pid_t, pid, u
+       struct rlimit old, new;
+       struct task_struct *tsk;
+       unsigned int checkflags = 0;
++      bool need_tasklist;
+       int ret;
+       if (old_rlim)
+@@ -1760,8 +1761,25 @@ SYSCALL_DEFINE4(prlimit64, pid_t, pid, u
+       get_task_struct(tsk);
+       rcu_read_unlock();
+-      ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
+-                      old_rlim ? &old : NULL);
++      need_tasklist = !same_thread_group(tsk, current);
++      if (need_tasklist) {
++              /*
++               * Ensure we can't race with group exit or de_thread(),
++               * so tsk->group_leader can't be freed or changed until
++               * read_unlock(tasklist_lock) below.
++               */
++              read_lock(&tasklist_lock);
++              if (!pid_alive(tsk))
++                      ret = -ESRCH;
++      }
++
++      if (!ret) {
++              ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
++                              old_rlim ? &old : NULL);
++      }
++
++      if (need_tasklist)
++              read_unlock(&tasklist_lock);
+       if (!ret && old_rlim) {
+               rlim_to_rlim64(&old, &old64);
index 68900406b63c60429385c4b95d890b74893b39b4..0c4759f192b32cbe5fa01b833e4f451eda3ab139 100644 (file)
@@ -196,3 +196,37 @@ blk-crypto-fix-missing-blktrace-bio-split-events.patch
 btrfs-avoid-potential-out-of-bounds-in-btrfs_encode_fh.patch
 bus-mhi-ep-fix-chained-transfer-handling-in-read-path.patch
 bus-mhi-host-do-not-use-uninitialized-dev-pointer-in-mhi_init_irq_setup.patch
+cdx-fix-device-node-reference-leak-in-cdx_msi_domain_init.patch
+clk-qcom-tcsrcc-x1e80100-set-the-bi_tcxo-as-parent-to-edp-refclk.patch
+clk-samsung-exynos990-use-pll_con0-for-pll-parent-muxes.patch
+clk-samsung-exynos990-fix-cmu_top-mux-div-bit-widths.patch
+clk-samsung-exynos990-replace-bogus-divs-with-fixed-factor-clocks.patch
+copy_sighand-handle-architectures-where-sizeof-unsigned-long-sizeof-u64.patch
+cpufreq-cppc-avoid-using-cpufreq_eternal-as-transition-delay.patch
+cpufreq-intel_pstate-fix-object-lifecycle-issue-in-update_qos_request.patch
+crypto-aspeed-fix-dma_unmap_sg-direction.patch
+crypto-atmel-fix-dma_unmap_sg-direction.patch
+crypto-rockchip-fix-dma_unmap_sg-nents-value.patch
+eventpoll-replace-rwlock-with-spinlock.patch
+fbdev-fix-logic-error-in-offb-name-match.patch
+fs-ntfs3-fix-a-resource-leak-bug-in-wnd_extend.patch
+fs-quota-create-dedicated-workqueue-for-quota_release_work.patch
+fsnotify-pass-correct-offset-to-fsnotify_mmap_perm.patch
+fuse-fix-possibly-missing-fuse_copy_finish-call-in-fuse_notify.patch
+fuse-fix-livelock-in-synchronous-file-put-from-fuseblk-workers.patch
+gpio-mpfs-fix-setting-gpio-direction-to-output.patch
+i3c-fix-default-i2c-adapter-timeout-value.patch
+iio-adc-pac1934-fix-channel-disable-configuration.patch
+iio-dac-ad5360-use-int-type-to-store-negative-error-codes.patch
+iio-dac-ad5421-use-int-type-to-store-negative-error-codes.patch
+iio-frequency-adf4350-fix-prescaler-usage.patch
+iio-xilinx-ams-fix-ams_alarm_thr_direct_mask.patch
+iio-xilinx-ams-unmask-interrupts-after-updating-alarms.patch
+init-handle-bootloader-identifier-in-kernel-parameters.patch
+iio-imu-inv_icm42600-simplify-pm_runtime-setup.patch
+iio-imu-inv_icm42600-drop-redundant-pm_runtime-reinitialization-in-resume.patch
+iio-imu-inv_icm42600-avoid-configuring-if-already-pm_runtime-suspended.patch
+iommu-vt-d-prs-isn-t-usable-if-pds-isn-t-supported.patch
+ipmi-rework-user-message-limit-handling.patch
+ipmi-msghandler-change-seq_lock-to-a-mutex.patch
+kernel-sys.c-fix-the-racy-usage-of-task_lock-tsk-group_leader-in-sys_prlimit64-paths.patch