]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 Oct 2025 12:48:08 +0000 (14:48 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 Oct 2025 12:48:08 +0000 (14:48 +0200)
added patches:
clk-qcom-tcsrcc-x1e80100-set-the-bi_tcxo-as-parent-to-edp-refclk.patch
copy_sighand-handle-architectures-where-sizeof-unsigned-long-sizeof-u64.patch
cpufreq-cppc-avoid-using-cpufreq_eternal-as-transition-delay.patch
cpufreq-intel_pstate-fix-object-lifecycle-issue-in-update_qos_request.patch
crypto-aspeed-fix-dma_unmap_sg-direction.patch
crypto-atmel-fix-dma_unmap_sg-direction.patch
crypto-rockchip-fix-dma_unmap_sg-nents-value.patch
eventpoll-replace-rwlock-with-spinlock.patch
fbdev-fix-logic-error-in-offb-name-match.patch
fs-ntfs3-fix-a-resource-leak-bug-in-wnd_extend.patch
fs-quota-create-dedicated-workqueue-for-quota_release_work.patch
fuse-fix-livelock-in-synchronous-file-put-from-fuseblk-workers.patch
fuse-fix-possibly-missing-fuse_copy_finish-call-in-fuse_notify.patch
iio-adc-pac1934-fix-channel-disable-configuration.patch
iio-dac-ad5360-use-int-type-to-store-negative-error-codes.patch
iio-dac-ad5421-use-int-type-to-store-negative-error-codes.patch
iio-frequency-adf4350-fix-prescaler-usage.patch
iio-imu-inv_icm42600-drop-redundant-pm_runtime-reinitialization-in-resume.patch
iio-xilinx-ams-fix-ams_alarm_thr_direct_mask.patch
iio-xilinx-ams-unmask-interrupts-after-updating-alarms.patch
init-handle-bootloader-identifier-in-kernel-parameters.patch
iommu-vt-d-prs-isn-t-usable-if-pds-isn-t-supported.patch
kernel-sys.c-fix-the-racy-usage-of-task_lock-tsk-group_leader-in-sys_prlimit64-paths.patch

24 files changed:
queue-6.12/clk-qcom-tcsrcc-x1e80100-set-the-bi_tcxo-as-parent-to-edp-refclk.patch [new file with mode: 0644]
queue-6.12/copy_sighand-handle-architectures-where-sizeof-unsigned-long-sizeof-u64.patch [new file with mode: 0644]
queue-6.12/cpufreq-cppc-avoid-using-cpufreq_eternal-as-transition-delay.patch [new file with mode: 0644]
queue-6.12/cpufreq-intel_pstate-fix-object-lifecycle-issue-in-update_qos_request.patch [new file with mode: 0644]
queue-6.12/crypto-aspeed-fix-dma_unmap_sg-direction.patch [new file with mode: 0644]
queue-6.12/crypto-atmel-fix-dma_unmap_sg-direction.patch [new file with mode: 0644]
queue-6.12/crypto-rockchip-fix-dma_unmap_sg-nents-value.patch [new file with mode: 0644]
queue-6.12/eventpoll-replace-rwlock-with-spinlock.patch [new file with mode: 0644]
queue-6.12/fbdev-fix-logic-error-in-offb-name-match.patch [new file with mode: 0644]
queue-6.12/fs-ntfs3-fix-a-resource-leak-bug-in-wnd_extend.patch [new file with mode: 0644]
queue-6.12/fs-quota-create-dedicated-workqueue-for-quota_release_work.patch [new file with mode: 0644]
queue-6.12/fuse-fix-livelock-in-synchronous-file-put-from-fuseblk-workers.patch [new file with mode: 0644]
queue-6.12/fuse-fix-possibly-missing-fuse_copy_finish-call-in-fuse_notify.patch [new file with mode: 0644]
queue-6.12/iio-adc-pac1934-fix-channel-disable-configuration.patch [new file with mode: 0644]
queue-6.12/iio-dac-ad5360-use-int-type-to-store-negative-error-codes.patch [new file with mode: 0644]
queue-6.12/iio-dac-ad5421-use-int-type-to-store-negative-error-codes.patch [new file with mode: 0644]
queue-6.12/iio-frequency-adf4350-fix-prescaler-usage.patch [new file with mode: 0644]
queue-6.12/iio-imu-inv_icm42600-drop-redundant-pm_runtime-reinitialization-in-resume.patch [new file with mode: 0644]
queue-6.12/iio-xilinx-ams-fix-ams_alarm_thr_direct_mask.patch [new file with mode: 0644]
queue-6.12/iio-xilinx-ams-unmask-interrupts-after-updating-alarms.patch [new file with mode: 0644]
queue-6.12/init-handle-bootloader-identifier-in-kernel-parameters.patch [new file with mode: 0644]
queue-6.12/iommu-vt-d-prs-isn-t-usable-if-pds-isn-t-supported.patch [new file with mode: 0644]
queue-6.12/kernel-sys.c-fix-the-racy-usage-of-task_lock-tsk-group_leader-in-sys_prlimit64-paths.patch [new file with mode: 0644]
queue-6.12/series

diff --git a/queue-6.12/clk-qcom-tcsrcc-x1e80100-set-the-bi_tcxo-as-parent-to-edp-refclk.patch b/queue-6.12/clk-qcom-tcsrcc-x1e80100-set-the-bi_tcxo-as-parent-to-edp-refclk.patch
new file mode 100644 (file)
index 0000000..8d1527d
--- /dev/null
@@ -0,0 +1,38 @@
+From 57c8e9da3dfe606b918d8f193837ebf2213a9545 Mon Sep 17 00:00:00 2001
+From: Abel Vesa <abel.vesa@linaro.org>
+Date: Wed, 30 Jul 2025 19:11:12 +0300
+Subject: clk: qcom: tcsrcc-x1e80100: Set the bi_tcxo as parent to eDP refclk
+
+From: Abel Vesa <abel.vesa@linaro.org>
+
+commit 57c8e9da3dfe606b918d8f193837ebf2213a9545 upstream.
+
+All the other ref clocks provided by this driver have the bi_tcxo
+as parent. The eDP refclk is the only one without a parent, leading
+to reporting its rate as 0. So set its parent to bi_tcxo, just like
+the rest of the refclks.
+
+Cc: stable@vger.kernel.org # v6.9
+Fixes: 06aff116199c ("clk: qcom: Add TCSR clock driver for x1e80100")
+Signed-off-by: Abel Vesa <abel.vesa@linaro.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250730-clk-qcom-tcsrcc-x1e80100-parent-edp-refclk-v1-1-7a36ef06e045@linaro.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/qcom/tcsrcc-x1e80100.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/clk/qcom/tcsrcc-x1e80100.c
++++ b/drivers/clk/qcom/tcsrcc-x1e80100.c
+@@ -29,6 +29,10 @@ static struct clk_branch tcsr_edp_clkref
+               .enable_mask = BIT(0),
+               .hw.init = &(const struct clk_init_data) {
+                       .name = "tcsr_edp_clkref_en",
++                      .parent_data = &(const struct clk_parent_data){
++                              .index = DT_BI_TCXO_PAD,
++                      },
++                      .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
diff --git a/queue-6.12/copy_sighand-handle-architectures-where-sizeof-unsigned-long-sizeof-u64.patch b/queue-6.12/copy_sighand-handle-architectures-where-sizeof-unsigned-long-sizeof-u64.patch
new file mode 100644 (file)
index 0000000..ffdb87a
--- /dev/null
@@ -0,0 +1,58 @@
+From 04ff48239f46e8b493571e260bd0e6c3a6400371 Mon Sep 17 00:00:00 2001
+From: Simon Schuster <schuster.simon@siemens-energy.com>
+Date: Mon, 1 Sep 2025 15:09:50 +0200
+Subject: copy_sighand: Handle architectures where sizeof(unsigned long) < sizeof(u64)
+
+From: Simon Schuster <schuster.simon@siemens-energy.com>
+
+commit 04ff48239f46e8b493571e260bd0e6c3a6400371 upstream.
+
+With the introduction of clone3 in commit 7f192e3cd316 ("fork: add
+clone3") the effective bit width of clone_flags on all architectures was
+increased from 32-bit to 64-bit. However, the signature of the copy_*
+helper functions (e.g., copy_sighand) used by copy_process was not
+adapted.
+
+As such, they truncate the flags on any 32-bit architectures that
+supports clone3 (arc, arm, csky, m68k, microblaze, mips32, openrisc,
+parisc32, powerpc32, riscv32, x86-32 and xtensa).
+
+For copy_sighand with CLONE_CLEAR_SIGHAND being an actual u64
+constant, this triggers an observable bug in kernel selftest
+clone3_clear_sighand:
+
+        if (clone_flags & CLONE_CLEAR_SIGHAND)
+
+in function copy_sighand within fork.c will always fail given:
+
+        unsigned long /* == uint32_t */ clone_flags
+        #define CLONE_CLEAR_SIGHAND 0x100000000ULL
+
+This commit fixes the bug by always passing clone_flags to copy_sighand
+via their declared u64 type, invariant of architecture-dependent integer
+sizes.
+
+Fixes: b612e5df4587 ("clone3: add CLONE_CLEAR_SIGHAND")
+Cc: stable@vger.kernel.org # linux-5.5+
+Signed-off-by: Simon Schuster <schuster.simon@siemens-energy.com>
+Link: https://lore.kernel.org/20250901-nios2-implement-clone3-v2-1-53fcf5577d57@siemens-energy.com
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/fork.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1807,7 +1807,7 @@ static int copy_files(unsigned long clon
+       return 0;
+ }
+-static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
++static int copy_sighand(u64 clone_flags, struct task_struct *tsk)
+ {
+       struct sighand_struct *sig;
diff --git a/queue-6.12/cpufreq-cppc-avoid-using-cpufreq_eternal-as-transition-delay.patch b/queue-6.12/cpufreq-cppc-avoid-using-cpufreq_eternal-as-transition-delay.patch
new file mode 100644 (file)
index 0000000..ab94da3
--- /dev/null
@@ -0,0 +1,65 @@
+From f965d111e68f4a993cc44d487d416e3d954eea11 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Fri, 26 Sep 2025 12:19:41 +0200
+Subject: cpufreq: CPPC: Avoid using CPUFREQ_ETERNAL as transition delay
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit f965d111e68f4a993cc44d487d416e3d954eea11 upstream.
+
+If cppc_get_transition_latency() returns CPUFREQ_ETERNAL to indicate a
+failure to retrieve the transition latency value from the platform
+firmware, the CPPC cpufreq driver will use that value (converted to
+microseconds) as the policy transition delay, but it is way too large
+for any practical use.
+
+Address this by making the driver use the cpufreq's default
+transition latency value (in microseconds) as the transition delay
+if CPUFREQ_ETERNAL is returned by cppc_get_transition_latency().
+
+Fixes: d4f3388afd48 ("cpufreq / CPPC: Set platform specific transition_delay_us")
+Cc: 5.19+ <stable@vger.kernel.org> # 5.19
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Reviewed-by: Jie Zhan <zhanjie9@hisilicon.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Reviewed-by: Qais Yousef <qyousef@layalina.io>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/cppc_cpufreq.c |   14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -339,6 +339,16 @@ static int cppc_verify_policy(struct cpu
+       return 0;
+ }
++static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
++{
++      unsigned int transition_latency_ns = cppc_get_transition_latency(cpu);
++
++      if (transition_latency_ns == CPUFREQ_ETERNAL)
++              return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC;
++
++      return transition_latency_ns / NSEC_PER_USEC;
++}
++
+ /*
+  * The PCC subspace describes the rate at which platform can accept commands
+  * on the shared PCC channel (including READs which do not count towards freq
+@@ -361,12 +371,12 @@ static unsigned int cppc_cpufreq_get_tra
+                       return 10000;
+               }
+       }
+-      return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
++      return __cppc_cpufreq_get_transition_delay_us(cpu);
+ }
+ #else
+ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+ {
+-      return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
++      return __cppc_cpufreq_get_transition_delay_us(cpu);
+ }
+ #endif
diff --git a/queue-6.12/cpufreq-intel_pstate-fix-object-lifecycle-issue-in-update_qos_request.patch b/queue-6.12/cpufreq-intel_pstate-fix-object-lifecycle-issue-in-update_qos_request.patch
new file mode 100644 (file)
index 0000000..e86b792
--- /dev/null
@@ -0,0 +1,58 @@
+From 69e5d50fcf4093fb3f9f41c4f931f12c2ca8c467 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Fri, 5 Sep 2025 15:52:03 +0200
+Subject: cpufreq: intel_pstate: Fix object lifecycle issue in update_qos_request()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 69e5d50fcf4093fb3f9f41c4f931f12c2ca8c467 upstream.
+
+The cpufreq_cpu_put() call in update_qos_request() takes place too early
+because the latter subsequently calls freq_qos_update_request() that
+indirectly accesses the policy object in question through the QoS request
+object passed to it.
+
+Fortunately, update_qos_request() is called under intel_pstate_driver_lock,
+so this issue does not matter for changing the intel_pstate operation
+mode, but it theoretically can cause a crash to occur on CPU device hot
+removal (which currently can only happen in virt, but it is formally
+supported nevertheless).
+
+Address this issue by modifying update_qos_request() to drop the
+reference to the policy later.
+
+Fixes: da5c504c7aae ("cpufreq: intel_pstate: Implement QoS supported freq constraints")
+Cc: 5.4+ <stable@vger.kernel.org> # 5.4+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Zihuan Zhang <zhangzihuan@kylinos.cn>
+Link: https://patch.msgid.link/2255671.irdbgypaU6@rafael.j.wysocki
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/intel_pstate.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1582,10 +1582,10 @@ static void update_qos_request(enum freq
+                       continue;
+               req = policy->driver_data;
+-              cpufreq_cpu_put(policy);
+-
+-              if (!req)
++              if (!req) {
++                      cpufreq_cpu_put(policy);
+                       continue;
++              }
+               if (hwp_active)
+                       intel_pstate_get_hwp_cap(cpu);
+@@ -1601,6 +1601,8 @@ static void update_qos_request(enum freq
+               if (freq_qos_update_request(req, freq) < 0)
+                       pr_warn("Failed to update freq constraint: CPU%d\n", i);
++
++              cpufreq_cpu_put(policy);
+       }
+ }
diff --git a/queue-6.12/crypto-aspeed-fix-dma_unmap_sg-direction.patch b/queue-6.12/crypto-aspeed-fix-dma_unmap_sg-direction.patch
new file mode 100644 (file)
index 0000000..6a0df52
--- /dev/null
@@ -0,0 +1,33 @@
+From 838d2d51513e6d2504a678e906823cfd2ecaaa22 Mon Sep 17 00:00:00 2001
+From: Thomas Fourier <fourier.thomas@gmail.com>
+Date: Wed, 10 Sep 2025 10:22:31 +0200
+Subject: crypto: aspeed - Fix dma_unmap_sg() direction
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+commit 838d2d51513e6d2504a678e906823cfd2ecaaa22 upstream.
+
+It seems like everywhere in this file, when the request is not
+bidirectionala, req->src is mapped with DMA_TO_DEVICE and req->dst is
+mapped with DMA_FROM_DEVICE.
+
+Fixes: 62f58b1637b7 ("crypto: aspeed - add HACE crypto driver")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/aspeed/aspeed-hace-crypto.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
++++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
+@@ -346,7 +346,7 @@ free_req:
+       } else {
+               dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
+-                           DMA_TO_DEVICE);
++                           DMA_FROM_DEVICE);
+               dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+                            DMA_TO_DEVICE);
+       }
diff --git a/queue-6.12/crypto-atmel-fix-dma_unmap_sg-direction.patch b/queue-6.12/crypto-atmel-fix-dma_unmap_sg-direction.patch
new file mode 100644 (file)
index 0000000..08324f4
--- /dev/null
@@ -0,0 +1,32 @@
+From f5d643156ef62216955c119216d2f3815bd51cb1 Mon Sep 17 00:00:00 2001
+From: Thomas Fourier <fourier.thomas@gmail.com>
+Date: Wed, 3 Sep 2025 10:34:46 +0200
+Subject: crypto: atmel - Fix dma_unmap_sg() direction
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+commit f5d643156ef62216955c119216d2f3815bd51cb1 upstream.
+
+It seems like everywhere in this file, dd->in_sg is mapped with
+DMA_TO_DEVICE and dd->out_sg is mapped with DMA_FROM_DEVICE.
+
+Fixes: 13802005d8f2 ("crypto: atmel - add Atmel DES/TDES driver")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/atmel-tdes.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/atmel-tdes.c
++++ b/drivers/crypto/atmel-tdes.c
+@@ -512,7 +512,7 @@ static int atmel_tdes_crypt_start(struct
+       if (err && (dd->flags & TDES_FLAGS_FAST)) {
+               dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+-              dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
++              dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+       }
+       return err;
diff --git a/queue-6.12/crypto-rockchip-fix-dma_unmap_sg-nents-value.patch b/queue-6.12/crypto-rockchip-fix-dma_unmap_sg-nents-value.patch
new file mode 100644 (file)
index 0000000..28aca87
--- /dev/null
@@ -0,0 +1,32 @@
+From 21140e5caf019e4a24e1ceabcaaa16bd693b393f Mon Sep 17 00:00:00 2001
+From: Thomas Fourier <fourier.thomas@gmail.com>
+Date: Wed, 3 Sep 2025 10:06:46 +0200
+Subject: crypto: rockchip - Fix dma_unmap_sg() nents value
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+commit 21140e5caf019e4a24e1ceabcaaa16bd693b393f upstream.
+
+The dma_unmap_sg() functions should be called with the same nents as the
+dma_map_sg(), not the value the map function returned.
+
+Fixes: 57d67c6e8219 ("crypto: rockchip - rework by using crypto_engine")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/rockchip/rk3288_crypto_ahash.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+@@ -252,7 +252,7 @@ static void rk_hash_unprepare(struct cry
+       struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+       struct rk_crypto_info *rkc = rctx->dev;
+-      dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
++      dma_unmap_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ }
+ static int rk_hash_run(struct crypto_engine *engine, void *breq)
diff --git a/queue-6.12/eventpoll-replace-rwlock-with-spinlock.patch b/queue-6.12/eventpoll-replace-rwlock-with-spinlock.patch
new file mode 100644 (file)
index 0000000..9e0e241
--- /dev/null
@@ -0,0 +1,389 @@
+From 0c43094f8cc9d3d99d835c0ac9c4fe1ccc62babd Mon Sep 17 00:00:00 2001
+From: Nam Cao <namcao@linutronix.de>
+Date: Tue, 15 Jul 2025 14:46:34 +0200
+Subject: eventpoll: Replace rwlock with spinlock
+
+From: Nam Cao <namcao@linutronix.de>
+
+commit 0c43094f8cc9d3d99d835c0ac9c4fe1ccc62babd upstream.
+
+The ready event list of an epoll object is protected by read-write
+semaphore:
+
+  - The consumer (waiter) acquires the write lock and takes items.
+  - the producer (waker) takes the read lock and adds items.
+
+The point of this design is enabling epoll to scale well with large number
+of producers, as multiple producers can hold the read lock at the same
+time.
+
+Unfortunately, this implementation may cause scheduling priority inversion
+problem. Suppose the consumer has higher scheduling priority than the
+producer. The consumer needs to acquire the write lock, but may be blocked
+by the producer holding the read lock. Since read-write semaphore does not
+support priority-boosting for the readers (even with CONFIG_PREEMPT_RT=y),
+we have a case of priority inversion: a higher priority consumer is blocked
+by a lower priority producer. This problem was reported in [1].
+
+Furthermore, this could also cause stall problem, as described in [2].
+
+Fix this problem by replacing rwlock with spinlock.
+
+This reduces the event bandwidth, as the producers now have to contend with
+each other for the spinlock. According to the benchmark from
+https://github.com/rouming/test-tools/blob/master/stress-epoll.c:
+
+    On 12 x86 CPUs:
+                  Before     After        Diff
+        threads  events/ms  events/ms
+              8       7162       4956     -31%
+             16       8733       5383     -38%
+             32       7968       5572     -30%
+             64      10652       5739     -46%
+            128      11236       5931     -47%
+
+    On 4 riscv CPUs:
+                  Before     After        Diff
+        threads  events/ms  events/ms
+              8       2958       2833      -4%
+             16       3323       3097      -7%
+             32       3451       3240      -6%
+             64       3554       3178     -11%
+            128       3601       3235     -10%
+
+Although the numbers look bad, it should be noted that this benchmark
+creates multiple threads who do nothing except constantly generating new
+epoll events, thus contention on the spinlock is high. For real workload,
+the event rate is likely much lower, and the performance drop is not as
+bad.
+
+Using another benchmark (perf bench epoll wait) where spinlock contention
+is lower, improvement is even observed on x86:
+
+    On 12 x86 CPUs:
+        Before: Averaged 110279 operations/sec (+- 1.09%), total secs = 8
+        After:  Averaged 114577 operations/sec (+- 2.25%), total secs = 8
+
+    On 4 riscv CPUs:
+        Before: Averaged 175767 operations/sec (+- 0.62%), total secs = 8
+        After:  Averaged 167396 operations/sec (+- 0.23%), total secs = 8
+
+In conclusion, no one is likely to be upset over this change. After all,
+spinlock was used originally for years, and the commit which converted to
+rwlock didn't mention a real workload, just that the benchmark numbers are
+nice.
+
+This patch is not exactly the revert of commit a218cc491420 ("epoll: use
+rwlock in order to reduce ep_poll_callback() contention"), because git
+revert conflicts in some places which are not obvious on the resolution.
+This patch is intended to be backported, therefore go with the obvious
+approach:
+
+  - Replace rwlock_t with spinlock_t one to one
+
+  - Delete list_add_tail_lockless() and chain_epi_lockless(). These were
+    introduced to allow producers to concurrently add items to the list.
+    But now that spinlock no longer allows producers to touch the event
+    list concurrently, these two functions are not necessary anymore.
+
+Fixes: a218cc491420 ("epoll: use rwlock in order to reduce ep_poll_callback() contention")
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Link: https://lore.kernel.org/ec92458ea357ec503c737ead0f10b2c6e4c37d47.1752581388.git.namcao@linutronix.de
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Cc: stable@vger.kernel.org
+Reported-by: Frederic Weisbecker <frederic@kernel.org>
+Closes: https://lore.kernel.org/linux-rt-users/20210825132754.GA895675@lothringen/ [1]
+Reported-by: Valentin Schneider <vschneid@redhat.com>
+Closes: https://lore.kernel.org/linux-rt-users/xhsmhttqvnall.mognet@vschneid.remote.csb/ [2]
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/eventpoll.c |  139 ++++++++++-----------------------------------------------
+ 1 file changed, 26 insertions(+), 113 deletions(-)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -46,10 +46,10 @@
+  *
+  * 1) epnested_mutex (mutex)
+  * 2) ep->mtx (mutex)
+- * 3) ep->lock (rwlock)
++ * 3) ep->lock (spinlock)
+  *
+  * The acquire order is the one listed above, from 1 to 3.
+- * We need a rwlock (ep->lock) because we manipulate objects
++ * We need a spinlock (ep->lock) because we manipulate objects
+  * from inside the poll callback, that might be triggered from
+  * a wake_up() that in turn might be called from IRQ context.
+  * So we can't sleep inside the poll callback and hence we need
+@@ -195,7 +195,7 @@ struct eventpoll {
+       struct list_head rdllist;
+       /* Lock which protects rdllist and ovflist */
+-      rwlock_t lock;
++      spinlock_t lock;
+       /* RB tree root used to store monitored fd structs */
+       struct rb_root_cached rbr;
+@@ -713,10 +713,10 @@ static void ep_start_scan(struct eventpo
+        * in a lockless way.
+        */
+       lockdep_assert_irqs_enabled();
+-      write_lock_irq(&ep->lock);
++      spin_lock_irq(&ep->lock);
+       list_splice_init(&ep->rdllist, txlist);
+       WRITE_ONCE(ep->ovflist, NULL);
+-      write_unlock_irq(&ep->lock);
++      spin_unlock_irq(&ep->lock);
+ }
+ static void ep_done_scan(struct eventpoll *ep,
+@@ -724,7 +724,7 @@ static void ep_done_scan(struct eventpol
+ {
+       struct epitem *epi, *nepi;
+-      write_lock_irq(&ep->lock);
++      spin_lock_irq(&ep->lock);
+       /*
+        * During the time we spent inside the "sproc" callback, some
+        * other events might have been queued by the poll callback.
+@@ -765,7 +765,7 @@ static void ep_done_scan(struct eventpol
+                       wake_up(&ep->wq);
+       }
+-      write_unlock_irq(&ep->lock);
++      spin_unlock_irq(&ep->lock);
+ }
+ static void ep_get(struct eventpoll *ep)
+@@ -839,10 +839,10 @@ static bool __ep_remove(struct eventpoll
+       rb_erase_cached(&epi->rbn, &ep->rbr);
+-      write_lock_irq(&ep->lock);
++      spin_lock_irq(&ep->lock);
+       if (ep_is_linked(epi))
+               list_del_init(&epi->rdllink);
+-      write_unlock_irq(&ep->lock);
++      spin_unlock_irq(&ep->lock);
+       wakeup_source_unregister(ep_wakeup_source(epi));
+       /*
+@@ -1123,7 +1123,7 @@ static int ep_alloc(struct eventpoll **p
+               return -ENOMEM;
+       mutex_init(&ep->mtx);
+-      rwlock_init(&ep->lock);
++      spin_lock_init(&ep->lock);
+       init_waitqueue_head(&ep->wq);
+       init_waitqueue_head(&ep->poll_wait);
+       INIT_LIST_HEAD(&ep->rdllist);
+@@ -1211,99 +1211,9 @@ struct file *get_epoll_tfile_raw_ptr(str
+ #endif /* CONFIG_KCMP */
+ /*
+- * Adds a new entry to the tail of the list in a lockless way, i.e.
+- * multiple CPUs are allowed to call this function concurrently.
+- *
+- * Beware: it is necessary to prevent any other modifications of the
+- *         existing list until all changes are completed, in other words
+- *         concurrent list_add_tail_lockless() calls should be protected
+- *         with a read lock, where write lock acts as a barrier which
+- *         makes sure all list_add_tail_lockless() calls are fully
+- *         completed.
+- *
+- *        Also an element can be locklessly added to the list only in one
+- *        direction i.e. either to the tail or to the head, otherwise
+- *        concurrent access will corrupt the list.
+- *
+- * Return: %false if element has been already added to the list, %true
+- * otherwise.
+- */
+-static inline bool list_add_tail_lockless(struct list_head *new,
+-                                        struct list_head *head)
+-{
+-      struct list_head *prev;
+-
+-      /*
+-       * This is simple 'new->next = head' operation, but cmpxchg()
+-       * is used in order to detect that same element has been just
+-       * added to the list from another CPU: the winner observes
+-       * new->next == new.
+-       */
+-      if (!try_cmpxchg(&new->next, &new, head))
+-              return false;
+-
+-      /*
+-       * Initially ->next of a new element must be updated with the head
+-       * (we are inserting to the tail) and only then pointers are atomically
+-       * exchanged.  XCHG guarantees memory ordering, thus ->next should be
+-       * updated before pointers are actually swapped and pointers are
+-       * swapped before prev->next is updated.
+-       */
+-
+-      prev = xchg(&head->prev, new);
+-
+-      /*
+-       * It is safe to modify prev->next and new->prev, because a new element
+-       * is added only to the tail and new->next is updated before XCHG.
+-       */
+-
+-      prev->next = new;
+-      new->prev = prev;
+-
+-      return true;
+-}
+-
+-/*
+- * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
+- * i.e. multiple CPUs are allowed to call this function concurrently.
+- *
+- * Return: %false if epi element has been already chained, %true otherwise.
+- */
+-static inline bool chain_epi_lockless(struct epitem *epi)
+-{
+-      struct eventpoll *ep = epi->ep;
+-
+-      /* Fast preliminary check */
+-      if (epi->next != EP_UNACTIVE_PTR)
+-              return false;
+-
+-      /* Check that the same epi has not been just chained from another CPU */
+-      if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
+-              return false;
+-
+-      /* Atomically exchange tail */
+-      epi->next = xchg(&ep->ovflist, epi);
+-
+-      return true;
+-}
+-
+-/*
+  * This is the callback that is passed to the wait queue wakeup
+  * mechanism. It is called by the stored file descriptors when they
+  * have events to report.
+- *
+- * This callback takes a read lock in order not to contend with concurrent
+- * events from another file descriptor, thus all modifications to ->rdllist
+- * or ->ovflist are lockless.  Read lock is paired with the write lock from
+- * ep_start/done_scan(), which stops all list modifications and guarantees
+- * that lists state is seen correctly.
+- *
+- * Another thing worth to mention is that ep_poll_callback() can be called
+- * concurrently for the same @epi from different CPUs if poll table was inited
+- * with several wait queues entries.  Plural wakeup from different CPUs of a
+- * single wait queue is serialized by wq.lock, but the case when multiple wait
+- * queues are used should be detected accordingly.  This is detected using
+- * cmpxchg() operation.
+  */
+ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
+ {
+@@ -1314,7 +1224,7 @@ static int ep_poll_callback(wait_queue_e
+       unsigned long flags;
+       int ewake = 0;
+-      read_lock_irqsave(&ep->lock, flags);
++      spin_lock_irqsave(&ep->lock, flags);
+       ep_set_busy_poll_napi_id(epi);
+@@ -1343,12 +1253,15 @@ static int ep_poll_callback(wait_queue_e
+        * chained in ep->ovflist and requeued later on.
+        */
+       if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
+-              if (chain_epi_lockless(epi))
++              if (epi->next == EP_UNACTIVE_PTR) {
++                      epi->next = READ_ONCE(ep->ovflist);
++                      WRITE_ONCE(ep->ovflist, epi);
+                       ep_pm_stay_awake_rcu(epi);
++              }
+       } else if (!ep_is_linked(epi)) {
+               /* In the usual case, add event to ready list. */
+-              if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
+-                      ep_pm_stay_awake_rcu(epi);
++              list_add_tail(&epi->rdllink, &ep->rdllist);
++              ep_pm_stay_awake_rcu(epi);
+       }
+       /*
+@@ -1381,7 +1294,7 @@ static int ep_poll_callback(wait_queue_e
+               pwake++;
+ out_unlock:
+-      read_unlock_irqrestore(&ep->lock, flags);
++      spin_unlock_irqrestore(&ep->lock, flags);
+       /* We have to call this outside the lock */
+       if (pwake)
+@@ -1716,7 +1629,7 @@ static int ep_insert(struct eventpoll *e
+       }
+       /* We have to drop the new item inside our item list to keep track of it */
+-      write_lock_irq(&ep->lock);
++      spin_lock_irq(&ep->lock);
+       /* record NAPI ID of new item if present */
+       ep_set_busy_poll_napi_id(epi);
+@@ -1733,7 +1646,7 @@ static int ep_insert(struct eventpoll *e
+                       pwake++;
+       }
+-      write_unlock_irq(&ep->lock);
++      spin_unlock_irq(&ep->lock);
+       /* We have to call this outside the lock */
+       if (pwake)
+@@ -1797,7 +1710,7 @@ static int ep_modify(struct eventpoll *e
+        * list, push it inside.
+        */
+       if (ep_item_poll(epi, &pt, 1)) {
+-              write_lock_irq(&ep->lock);
++              spin_lock_irq(&ep->lock);
+               if (!ep_is_linked(epi)) {
+                       list_add_tail(&epi->rdllink, &ep->rdllist);
+                       ep_pm_stay_awake(epi);
+@@ -1808,7 +1721,7 @@ static int ep_modify(struct eventpoll *e
+                       if (waitqueue_active(&ep->poll_wait))
+                               pwake++;
+               }
+-              write_unlock_irq(&ep->lock);
++              spin_unlock_irq(&ep->lock);
+       }
+       /* We have to call this outside the lock */
+@@ -2041,7 +1954,7 @@ static int ep_poll(struct eventpoll *ep,
+               init_wait(&wait);
+               wait.func = ep_autoremove_wake_function;
+-              write_lock_irq(&ep->lock);
++              spin_lock_irq(&ep->lock);
+               /*
+                * Barrierless variant, waitqueue_active() is called under
+                * the same lock on wakeup ep_poll_callback() side, so it
+@@ -2060,7 +1973,7 @@ static int ep_poll(struct eventpoll *ep,
+               if (!eavail)
+                       __add_wait_queue_exclusive(&ep->wq, &wait);
+-              write_unlock_irq(&ep->lock);
++              spin_unlock_irq(&ep->lock);
+               if (!eavail)
+                       timed_out = !schedule_hrtimeout_range(to, slack,
+@@ -2075,7 +1988,7 @@ static int ep_poll(struct eventpoll *ep,
+               eavail = 1;
+               if (!list_empty_careful(&wait.entry)) {
+-                      write_lock_irq(&ep->lock);
++                      spin_lock_irq(&ep->lock);
+                       /*
+                        * If the thread timed out and is not on the wait queue,
+                        * it means that the thread was woken up after its
+@@ -2086,7 +1999,7 @@ static int ep_poll(struct eventpoll *ep,
+                       if (timed_out)
+                               eavail = list_empty(&wait.entry);
+                       __remove_wait_queue(&ep->wq, &wait);
+-                      write_unlock_irq(&ep->lock);
++                      spin_unlock_irq(&ep->lock);
+               }
+       }
+ }
diff --git a/queue-6.12/fbdev-fix-logic-error-in-offb-name-match.patch b/queue-6.12/fbdev-fix-logic-error-in-offb-name-match.patch
new file mode 100644 (file)
index 0000000..b3a1c41
--- /dev/null
@@ -0,0 +1,37 @@
+From 15df28699b28d6b49dc305040c4e26a9553df07a Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@linux-m68k.org>
+Date: Thu, 9 Oct 2025 09:56:25 +1100
+Subject: fbdev: Fix logic error in "offb" name match
+
+From: Finn Thain <fthain@linux-m68k.org>
+
+commit 15df28699b28d6b49dc305040c4e26a9553df07a upstream.
+
+A regression was reported to me recently whereby /dev/fb0 had disappeared
+from a PowerBook G3 Series "Wallstreet". The problem shows up when the
+"video=ofonly" parameter is passed to the kernel, which is what the
+bootloader does when "no video driver" is selected. The cause of the
+problem is the "offb" string comparison, which got mangled when it got
+refactored. Fix it.
+
+Cc: stable@vger.kernel.org
+Fixes: 93604a5ade3a ("fbdev: Handle video= parameter in video/cmdline.c")
+Reported-and-tested-by: Stan Johnson <userm57@yahoo.com>
+Signed-off-by: Finn Thain <fthain@linux-m68k.org>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/video/fbdev/core/fb_cmdline.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/video/fbdev/core/fb_cmdline.c
++++ b/drivers/video/fbdev/core/fb_cmdline.c
+@@ -40,7 +40,7 @@ int fb_get_options(const char *name, cha
+       bool enabled;
+       if (name)
+-              is_of = strncmp(name, "offb", 4);
++              is_of = !strncmp(name, "offb", 4);
+       enabled = __video_get_options(name, &options, is_of);
diff --git a/queue-6.12/fs-ntfs3-fix-a-resource-leak-bug-in-wnd_extend.patch b/queue-6.12/fs-ntfs3-fix-a-resource-leak-bug-in-wnd_extend.patch
new file mode 100644 (file)
index 0000000..4837a77
--- /dev/null
@@ -0,0 +1,31 @@
+From d68318471aa2e16222ebf492883e05a2d72b9b17 Mon Sep 17 00:00:00 2001
+From: Haoxiang Li <haoxiang_li2024@163.com>
+Date: Tue, 15 Jul 2025 17:51:20 +0800
+Subject: fs/ntfs3: Fix a resource leak bug in wnd_extend()
+
+From: Haoxiang Li <haoxiang_li2024@163.com>
+
+commit d68318471aa2e16222ebf492883e05a2d72b9b17 upstream.
+
+Add put_bh() to decrease the refcount of 'bh' after the job
+is finished, preventing a resource leak.
+
+Fixes: 3f3b442b5ad2 ("fs/ntfs3: Add bitmap")
+Cc: stable@vger.kernel.org
+Signed-off-by: Haoxiang Li <haoxiang_li2024@163.com>
+Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs3/bitmap.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -1399,6 +1399,7 @@ int wnd_extend(struct wnd_bitmap *wnd, s
+               mark_buffer_dirty(bh);
+               unlock_buffer(bh);
+               /* err = sync_dirty_buffer(bh); */
++              put_bh(bh);
+               b0 = 0;
+               bits -= op;
diff --git a/queue-6.12/fs-quota-create-dedicated-workqueue-for-quota_release_work.patch b/queue-6.12/fs-quota-create-dedicated-workqueue-for-quota_release_work.patch
new file mode 100644 (file)
index 0000000..7e7a2d8
--- /dev/null
@@ -0,0 +1,89 @@
+From 72b7ceca857f38a8ca7c5629feffc63769638974 Mon Sep 17 00:00:00 2001
+From: Shashank A P <shashank.ap@samsung.com>
+Date: Mon, 1 Sep 2025 14:59:00 +0530
+Subject: fs: quota: create dedicated workqueue for quota_release_work
+
+From: Shashank A P <shashank.ap@samsung.com>
+
+commit 72b7ceca857f38a8ca7c5629feffc63769638974 upstream.
+
+There is a kernel panic due to WARN_ONCE when panic_on_warn is set.
+
+This issue occurs when writeback is triggered due to sync call for an
+opened file(ie, writeback reason is WB_REASON_SYNC). When f2fs balance
+is needed at sync path, flush for quota_release_work is triggered.
+By default quota_release_work is queued to "events_unbound" queue which
+does not have WQ_MEM_RECLAIM flag. During f2fs balance "writeback"
+workqueue tries to flush quota_release_work causing kernel panic due to
+MEM_RECLAIM flag mismatch errors.
+
+This patch creates dedicated workqueue with WQ_MEM_RECLAIM flag
+for work quota_release_work.
+
+------------[ cut here ]------------
+WARNING: CPU: 4 PID: 14867 at kernel/workqueue.c:3721 check_flush_dependency+0x13c/0x148
+Call trace:
+ check_flush_dependency+0x13c/0x148
+ __flush_work+0xd0/0x398
+ flush_delayed_work+0x44/0x5c
+ dquot_writeback_dquots+0x54/0x318
+ f2fs_do_quota_sync+0xb8/0x1a8
+ f2fs_write_checkpoint+0x3cc/0x99c
+ f2fs_gc+0x190/0x750
+ f2fs_balance_fs+0x110/0x168
+ f2fs_write_single_data_page+0x474/0x7dc
+ f2fs_write_data_pages+0x7d0/0xd0c
+ do_writepages+0xe0/0x2f4
+ __writeback_single_inode+0x44/0x4ac
+ writeback_sb_inodes+0x30c/0x538
+ wb_writeback+0xf4/0x440
+ wb_workfn+0x128/0x5d4
+ process_scheduled_works+0x1c4/0x45c
+ worker_thread+0x32c/0x3e8
+ kthread+0x11c/0x1b0
+ ret_from_fork+0x10/0x20
+Kernel panic - not syncing: kernel: panic_on_warn set ...
+
+Fixes: ac6f420291b3 ("quota: flush quota_release_work upon quota writeback")
+CC: stable@vger.kernel.org
+Signed-off-by: Shashank A P <shashank.ap@samsung.com>
+Link: https://patch.msgid.link/20250901092905.2115-1-shashank.ap@samsung.com
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/quota/dquot.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -163,6 +163,9 @@ static struct quota_module_name module_n
+ /* SLAB cache for dquot structures */
+ static struct kmem_cache *dquot_cachep;
++/* workqueue for work quota_release_work*/
++static struct workqueue_struct *quota_unbound_wq;
++
+ void register_quota_format(struct quota_format_type *fmt)
+ {
+       spin_lock(&dq_list_lock);
+@@ -882,7 +885,7 @@ void dqput(struct dquot *dquot)
+       put_releasing_dquots(dquot);
+       atomic_dec(&dquot->dq_count);
+       spin_unlock(&dq_list_lock);
+-      queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
++      queue_delayed_work(quota_unbound_wq, &quota_release_work, 1);
+ }
+ EXPORT_SYMBOL(dqput);
+@@ -3042,6 +3045,11 @@ static int __init dquot_init(void)
+       shrinker_register(dqcache_shrinker);
++      quota_unbound_wq = alloc_workqueue("quota_events_unbound",
++                                         WQ_UNBOUND | WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
++      if (!quota_unbound_wq)
++              panic("Cannot create quota_unbound_wq\n");
++
+       return 0;
+ }
+ fs_initcall(dquot_init);
diff --git a/queue-6.12/fuse-fix-livelock-in-synchronous-file-put-from-fuseblk-workers.patch b/queue-6.12/fuse-fix-livelock-in-synchronous-file-put-from-fuseblk-workers.patch
new file mode 100644 (file)
index 0000000..2c89bad
--- /dev/null
@@ -0,0 +1,90 @@
+From 26e5c67deb2e1f42a951f022fdf5b9f7eb747b01 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Mon, 15 Sep 2025 17:24:17 -0700
+Subject: fuse: fix livelock in synchronous file put from fuseblk workers
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit 26e5c67deb2e1f42a951f022fdf5b9f7eb747b01 upstream.
+
+I observed a hang when running generic/323 against a fuseblk server.
+This test opens a file, initiates a lot of AIO writes to that file
+descriptor, and closes the file descriptor before the writes complete.
+Unsurprisingly, the AIO exerciser threads are mostly stuck waiting for
+responses from the fuseblk server:
+
+# cat /proc/372265/task/372313/stack
+[<0>] request_wait_answer+0x1fe/0x2a0 [fuse]
+[<0>] __fuse_simple_request+0xd3/0x2b0 [fuse]
+[<0>] fuse_do_getattr+0xfc/0x1f0 [fuse]
+[<0>] fuse_file_read_iter+0xbe/0x1c0 [fuse]
+[<0>] aio_read+0x130/0x1e0
+[<0>] io_submit_one+0x542/0x860
+[<0>] __x64_sys_io_submit+0x98/0x1a0
+[<0>] do_syscall_64+0x37/0xf0
+[<0>] entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+But the /weird/ part is that the fuseblk server threads are waiting for
+responses from itself:
+
+# cat /proc/372210/task/372232/stack
+[<0>] request_wait_answer+0x1fe/0x2a0 [fuse]
+[<0>] __fuse_simple_request+0xd3/0x2b0 [fuse]
+[<0>] fuse_file_put+0x9a/0xd0 [fuse]
+[<0>] fuse_release+0x36/0x50 [fuse]
+[<0>] __fput+0xec/0x2b0
+[<0>] task_work_run+0x55/0x90
+[<0>] syscall_exit_to_user_mode+0xe9/0x100
+[<0>] do_syscall_64+0x43/0xf0
+[<0>] entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+The fuseblk server is fuse2fs so there's nothing all that exciting in
+the server itself.  So why is the fuse server calling fuse_file_put?
+The commit message for the fstest sheds some light on that:
+
+"By closing the file descriptor before calling io_destroy, you pretty
+much guarantee that the last put on the ioctx will be done in interrupt
+context (during I/O completion).
+
+Aha.  AIO fgets a new struct file from the fd when it queues the ioctx.
+The completion of the FUSE_WRITE command from userspace causes the fuse
+server to call the AIO completion function.  The completion puts the
+struct file, queuing a delayed fput to the fuse server task.  When the
+fuse server task returns to userspace, it has to run the delayed fput,
+which in the case of a fuseblk server, it does synchronously.
+
+Sending the FUSE_RELEASE command sychronously from fuse server threads
+is a bad idea because a client program can initiate enough simultaneous
+AIOs such that all the fuse server threads end up in delayed_fput, and
+now there aren't any threads left to handle the queued fuse commands.
+
+Fix this by only using asynchronous fputs when closing files, and leave
+a comment explaining why.
+
+Cc: stable@vger.kernel.org # v2.6.38
+Fixes: 5a18ec176c934c ("fuse: fix hang of single threaded fuseblk filesystem")
+Signed-off-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/file.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -355,8 +355,14 @@ void fuse_file_release(struct inode *ino
+        * Make the release synchronous if this is a fuseblk mount,
+        * synchronous RELEASE is allowed (and desirable) in this case
+        * because the server can be trusted not to screw up.
++       *
++       * Always use the asynchronous file put because the current thread
++       * might be the fuse server.  This can happen if a process starts some
++       * aio and closes the fd before the aio completes.  Since aio takes its
++       * own ref to the file, the IO completion has to drop the ref, which is
++       * how the fuse server can end up closing its clients' files.
+        */
+-      fuse_file_put(ff, ff->fm->fc->destroy);
++      fuse_file_put(ff, false);
+ }
+ void fuse_release_common(struct file *file, bool isdir)
diff --git a/queue-6.12/fuse-fix-possibly-missing-fuse_copy_finish-call-in-fuse_notify.patch b/queue-6.12/fuse-fix-possibly-missing-fuse_copy_finish-call-in-fuse_notify.patch
new file mode 100644 (file)
index 0000000..9b0c16a
--- /dev/null
@@ -0,0 +1,36 @@
+From 0b563aad1c0a05dc7d123f68a9f82f79de206dad Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Mon, 1 Sep 2025 17:16:26 +0200
+Subject: fuse: fix possibly missing fuse_copy_finish() call in fuse_notify()
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 0b563aad1c0a05dc7d123f68a9f82f79de206dad upstream.
+
+In case of FUSE_NOTIFY_RESEND and FUSE_NOTIFY_INC_EPOCH fuse_copy_finish()
+isn't called.
+
+Fix by always calling fuse_copy_finish() after fuse_notify().  It's a no-op
+if called a second time.
+
+Fixes: 760eac73f9f6 ("fuse: Introduce a new notification type for resend pending requests")
+Fixes: 2396356a945b ("fuse: add more control over cache invalidation behaviour")
+Cc: <stable@vger.kernel.org> # v6.9
+Reviewed-by: Joanne Koong <joannelkoong@gmail.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/dev.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1989,7 +1989,7 @@ static ssize_t fuse_dev_do_write(struct
+        */
+       if (!oh.unique) {
+               err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
+-              goto out;
++              goto copy_finish;
+       }
+       err = -EINVAL;
diff --git a/queue-6.12/iio-adc-pac1934-fix-channel-disable-configuration.patch b/queue-6.12/iio-adc-pac1934-fix-channel-disable-configuration.patch
new file mode 100644 (file)
index 0000000..8d85b63
--- /dev/null
@@ -0,0 +1,71 @@
+From 3c63ba1c430af1c0dcd68dd36f2246980621dcba Mon Sep 17 00:00:00 2001
+From: Aleksandar Gerasimovski <aleksandar.gerasimovski@belden.com>
+Date: Mon, 11 Aug 2025 13:09:04 +0000
+Subject: iio/adc/pac1934: fix channel disable configuration
+
+From: Aleksandar Gerasimovski <aleksandar.gerasimovski@belden.com>
+
+commit 3c63ba1c430af1c0dcd68dd36f2246980621dcba upstream.
+
+There are two problems with the chip configuration in this driver:
+- First, is that writing 12 bytes (ARRAY_SIZE(regs)) would anyhow
+  lead to a config overflow due to HW auto increment implementation
+  in the chip.
+- Second, the i2c_smbus_write_block_data write ends up in writing
+  unexpected value to the channel_dis register, this is because
+  the smbus size that is 0x03 in this case gets written to the
+  register. The PAC1931/2/3/4 data sheet does not really specify
+  that block write is indeed supported.
+
+This problem is probably not visible on PAC1934 version where all
+channels are used as the chip is properly configured by luck,
+but in our case whenusing PAC1931 this leads to nonfunctional device.
+
+Fixes: 0fb528c8255b (iio: adc: adding support for PAC193x)
+Suggested-by: Rene Straub <mailto:rene.straub@belden.com>
+Signed-off-by: Aleksandar Gerasimovski <aleksandar.gerasimovski@belden.com>
+Reviewed-by: Marius Cristea <marius.cristea@microchip.com>
+Link: https://patch.msgid.link/20250811130904.2481790-1-aleksandar.gerasimovski@belden.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/pac1934.c |   20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/drivers/iio/adc/pac1934.c
++++ b/drivers/iio/adc/pac1934.c
+@@ -88,6 +88,7 @@
+ #define PAC1934_VPOWER_3_ADDR                 0x19
+ #define PAC1934_VPOWER_4_ADDR                 0x1A
+ #define PAC1934_REFRESH_V_REG_ADDR            0x1F
++#define PAC1934_SLOW_REG_ADDR                 0x20
+ #define PAC1934_CTRL_STAT_REGS_ADDR           0x1C
+ #define PAC1934_PID_REG_ADDR                  0xFD
+ #define PAC1934_MID_REG_ADDR                  0xFE
+@@ -1265,8 +1266,23 @@ static int pac1934_chip_configure(struct
+       /* no SLOW triggered REFRESH, clear POR */
+       regs[PAC1934_SLOW_REG_OFF] = 0;
+-      ret =  i2c_smbus_write_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
+-                                        ARRAY_SIZE(regs), (u8 *)regs);
++      /*
++       * Write the three bytes sequentially, as the device does not support
++       * block write.
++       */
++      ret = i2c_smbus_write_byte_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
++                                      regs[PAC1934_CHANNEL_DIS_REG_OFF]);
++      if (ret)
++              return ret;
++
++      ret = i2c_smbus_write_byte_data(client,
++                                      PAC1934_CTRL_STAT_REGS_ADDR + PAC1934_NEG_PWR_REG_OFF,
++                                      regs[PAC1934_NEG_PWR_REG_OFF]);
++      if (ret)
++              return ret;
++
++      ret = i2c_smbus_write_byte_data(client, PAC1934_SLOW_REG_ADDR,
++                                      regs[PAC1934_SLOW_REG_OFF]);
+       if (ret)
+               return ret;
diff --git a/queue-6.12/iio-dac-ad5360-use-int-type-to-store-negative-error-codes.patch b/queue-6.12/iio-dac-ad5360-use-int-type-to-store-negative-error-codes.patch
new file mode 100644 (file)
index 0000000..a3c00f3
--- /dev/null
@@ -0,0 +1,35 @@
+From f9381ece76de999a2065d5b4fdd87fa17883978c Mon Sep 17 00:00:00 2001
+From: Qianfeng Rong <rongqianfeng@vivo.com>
+Date: Mon, 1 Sep 2025 21:57:25 +0800
+Subject: iio: dac: ad5360: use int type to store negative error codes
+
+From: Qianfeng Rong <rongqianfeng@vivo.com>
+
+commit f9381ece76de999a2065d5b4fdd87fa17883978c upstream.
+
+Change the 'ret' variable in ad5360_update_ctrl() from unsigned int to
+int, as it needs to store either negative error codes or zero returned
+by ad5360_write_unlocked().
+
+Fixes: a3e2940c24d3 ("staging:iio:dac: Add AD5360 driver")
+Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Link: https://patch.msgid.link/20250901135726.17601-2-rongqianfeng@vivo.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/dac/ad5360.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/dac/ad5360.c
++++ b/drivers/iio/dac/ad5360.c
+@@ -262,7 +262,7 @@ static int ad5360_update_ctrl(struct iio
+       unsigned int clr)
+ {
+       struct ad5360_state *st = iio_priv(indio_dev);
+-      unsigned int ret;
++      int ret;
+       mutex_lock(&st->lock);
diff --git a/queue-6.12/iio-dac-ad5421-use-int-type-to-store-negative-error-codes.patch b/queue-6.12/iio-dac-ad5421-use-int-type-to-store-negative-error-codes.patch
new file mode 100644 (file)
index 0000000..70a3b1b
--- /dev/null
@@ -0,0 +1,35 @@
+From 3379c900320954d768ed9903691fb2520926bbe3 Mon Sep 17 00:00:00 2001
+From: Qianfeng Rong <rongqianfeng@vivo.com>
+Date: Mon, 1 Sep 2025 21:57:26 +0800
+Subject: iio: dac: ad5421: use int type to store negative error codes
+
+From: Qianfeng Rong <rongqianfeng@vivo.com>
+
+commit 3379c900320954d768ed9903691fb2520926bbe3 upstream.
+
+Change the 'ret' variable in ad5421_update_ctrl() from unsigned int to
+int, as it needs to store either negative error codes or zero returned
+by ad5421_write_unlocked().
+
+Fixes: 5691b23489db ("staging:iio:dac: Add AD5421 driver")
+Signed-off-by: Qianfeng Rong <rongqianfeng@vivo.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Link: https://patch.msgid.link/20250901135726.17601-3-rongqianfeng@vivo.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/dac/ad5421.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/dac/ad5421.c
++++ b/drivers/iio/dac/ad5421.c
+@@ -186,7 +186,7 @@ static int ad5421_update_ctrl(struct iio
+       unsigned int clr)
+ {
+       struct ad5421_state *st = iio_priv(indio_dev);
+-      unsigned int ret;
++      int ret;
+       mutex_lock(&st->lock);
diff --git a/queue-6.12/iio-frequency-adf4350-fix-prescaler-usage.patch b/queue-6.12/iio-frequency-adf4350-fix-prescaler-usage.patch
new file mode 100644 (file)
index 0000000..dcc3b4b
--- /dev/null
@@ -0,0 +1,72 @@
+From 33d7ecbf69aa7dd4145e3b77962bcb8759eede3d Mon Sep 17 00:00:00 2001
+From: Michael Hennerich <michael.hennerich@analog.com>
+Date: Fri, 29 Aug 2025 12:25:42 +0100
+Subject: iio: frequency: adf4350: Fix prescaler usage.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michael Hennerich <michael.hennerich@analog.com>
+
+commit 33d7ecbf69aa7dd4145e3b77962bcb8759eede3d upstream.
+
+The ADF4350/1 features a programmable dual-modulus prescaler of 4/5 or 8/9.
+When set to 4/5, the maximum RF frequency allowed is 3 GHz.
+Therefore, when operating the ADF4351 above 3 GHz, this must be set to 8/9.
+In this context not the RF output frequency is meant
+- it's the VCO frequency.
+
+Therefore move the prescaler selection after we derived the VCO frequency
+from the desired RF output frequency.
+
+This BUG may have caused PLL lock instabilities when operating the VCO at
+the very high range close to 4.4 GHz.
+
+Fixes: e31166f0fd48 ("iio: frequency: New driver for Analog Devices ADF4350/ADF4351 Wideband Synthesizers")
+Signed-off-by: Michael Hennerich <michael.hennerich@analog.com>
+Signed-off-by: Nuno Sá <nuno.sa@analog.com>
+Reviewed-by: Andy Shevchenko <andy@kernel.org>
+Link: https://patch.msgid.link/20250829-adf4350-fix-v2-1-0bf543ba797d@analog.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/frequency/adf4350.c |   20 +++++++++++++-------
+ 1 file changed, 13 insertions(+), 7 deletions(-)
+
+--- a/drivers/iio/frequency/adf4350.c
++++ b/drivers/iio/frequency/adf4350.c
+@@ -149,6 +149,19 @@ static int adf4350_set_freq(struct adf43
+       if (freq > ADF4350_MAX_OUT_FREQ || freq < st->min_out_freq)
+               return -EINVAL;
++      st->r4_rf_div_sel = 0;
++
++      /*
++       * !\TODO: The below computation is making sure we get a power of 2
++       * shift (st->r4_rf_div_sel) so that freq becomes higher or equal to
++       * ADF4350_MIN_VCO_FREQ. This might be simplified with fls()/fls_long()
++       * and friends.
++       */
++      while (freq < ADF4350_MIN_VCO_FREQ) {
++              freq <<= 1;
++              st->r4_rf_div_sel++;
++      }
++
+       if (freq > ADF4350_MAX_FREQ_45_PRESC) {
+               prescaler = ADF4350_REG1_PRESCALER;
+               mdiv = 75;
+@@ -157,13 +170,6 @@ static int adf4350_set_freq(struct adf43
+               mdiv = 23;
+       }
+-      st->r4_rf_div_sel = 0;
+-
+-      while (freq < ADF4350_MIN_VCO_FREQ) {
+-              freq <<= 1;
+-              st->r4_rf_div_sel++;
+-      }
+-
+       /*
+        * Allow a predefined reference division factor
+        * if not set, compute our own
diff --git a/queue-6.12/iio-imu-inv_icm42600-drop-redundant-pm_runtime-reinitialization-in-resume.patch b/queue-6.12/iio-imu-inv_icm42600-drop-redundant-pm_runtime-reinitialization-in-resume.patch
new file mode 100644 (file)
index 0000000..0a95cf9
--- /dev/null
@@ -0,0 +1,37 @@
+From a95a0b4e471a6d8860f40c6ac8f1cad9dde3189a Mon Sep 17 00:00:00 2001
+From: Sean Nyekjaer <sean@geanix.com>
+Date: Mon, 1 Sep 2025 09:49:14 +0200
+Subject: iio: imu: inv_icm42600: Drop redundant pm_runtime reinitialization in resume
+
+From: Sean Nyekjaer <sean@geanix.com>
+
+commit a95a0b4e471a6d8860f40c6ac8f1cad9dde3189a upstream.
+
+Remove unnecessary calls to pm_runtime_disable(), pm_runtime_set_active(),
+and pm_runtime_enable() from the resume path. These operations are not
+required here and can interfere with proper pm_runtime state handling,
+especially when resuming from a pm_runtime suspended state.
+
+Fixes: 31c24c1e93c3 ("iio: imu: inv_icm42600: add core of new inv_icm42600 driver")
+Signed-off-by: Sean Nyekjaer <sean@geanix.com>
+Link: https://patch.msgid.link/20250901-icm42pmreg-v3-2-ef1336246960@geanix.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/inv_icm42600/inv_icm42600_core.c |    4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -847,10 +847,6 @@ static int inv_icm42600_resume(struct de
+       if (ret)
+               goto out_unlock;
+-      pm_runtime_disable(dev);
+-      pm_runtime_set_active(dev);
+-      pm_runtime_enable(dev);
+-
+       /* restore sensors state */
+       ret = inv_icm42600_set_pwr_mgmt0(st, st->suspended.gyro,
+                                        st->suspended.accel,
diff --git a/queue-6.12/iio-xilinx-ams-fix-ams_alarm_thr_direct_mask.patch b/queue-6.12/iio-xilinx-ams-fix-ams_alarm_thr_direct_mask.patch
new file mode 100644 (file)
index 0000000..377a711
--- /dev/null
@@ -0,0 +1,38 @@
+From 1315cc2dbd5034f566e20ddce4d675cb9e6d4ddd Mon Sep 17 00:00:00 2001
+From: Sean Anderson <sean.anderson@linux.dev>
+Date: Mon, 14 Jul 2025 20:30:58 -0400
+Subject: iio: xilinx-ams: Fix AMS_ALARM_THR_DIRECT_MASK
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+commit 1315cc2dbd5034f566e20ddce4d675cb9e6d4ddd upstream.
+
+AMS_ALARM_THR_DIRECT_MASK should be bit 0, not bit 1. This would cause
+hysteresis to be enabled with a lower threshold of -28C. The temperature
+alarm would never deassert even if the temperature dropped below the
+upper threshold.
+
+Fixes: d5c70627a794 ("iio: adc: Add Xilinx AMS driver")
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Reviewed-by: O'Griofa, Conall <conall.ogriofa@amd.com>
+Tested-by: Erim, Salih <Salih.Erim@amd.com>
+Acked-by: Erim, Salih <Salih.Erim@amd.com>
+Link: https://patch.msgid.link/20250715003058.2035656-1-sean.anderson@linux.dev
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/xilinx-ams.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/adc/xilinx-ams.c
++++ b/drivers/iio/adc/xilinx-ams.c
+@@ -118,7 +118,7 @@
+ #define AMS_ALARM_THRESHOLD_OFF_10    0x10
+ #define AMS_ALARM_THRESHOLD_OFF_20    0x20
+-#define AMS_ALARM_THR_DIRECT_MASK     BIT(1)
++#define AMS_ALARM_THR_DIRECT_MASK     BIT(0)
+ #define AMS_ALARM_THR_MIN             0x0000
+ #define AMS_ALARM_THR_MAX             (BIT(16) - 1)
diff --git a/queue-6.12/iio-xilinx-ams-unmask-interrupts-after-updating-alarms.patch b/queue-6.12/iio-xilinx-ams-unmask-interrupts-after-updating-alarms.patch
new file mode 100644 (file)
index 0000000..4d0fe87
--- /dev/null
@@ -0,0 +1,107 @@
+From feb500c7ae7a198db4d2757901bce562feeefa5e Mon Sep 17 00:00:00 2001
+From: Sean Anderson <sean.anderson@linux.dev>
+Date: Mon, 14 Jul 2025 20:28:47 -0400
+Subject: iio: xilinx-ams: Unmask interrupts after updating alarms
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+commit feb500c7ae7a198db4d2757901bce562feeefa5e upstream.
+
+To convert level-triggered alarms into edge-triggered IIO events, alarms
+are masked when they are triggered. To ensure we catch subsequent
+alarms, we then periodically poll to see if the alarm is still active.
+If it isn't, we unmask it. Active but masked alarms are stored in
+current_masked_alarm.
+
+If an active alarm is disabled, it will remain set in
+current_masked_alarm until ams_unmask_worker clears it. If the alarm is
+re-enabled before ams_unmask_worker runs, then it will never be cleared
+from current_masked_alarm. This will prevent the alarm event from being
+pushed even if the alarm is still active.
+
+Fix this by recalculating current_masked_alarm immediately when enabling
+or disabling alarms.
+
+Fixes: d5c70627a794 ("iio: adc: Add Xilinx AMS driver")
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Reviewed-by: O'Griofa, Conall <conall.ogriofa@amd.com>
+Tested-by: Erim, Salih <Salih.Erim@amd.com>
+Acked-by: Erim, Salih <Salih.Erim@amd.com>
+Link: https://patch.msgid.link/20250715002847.2035228-1-sean.anderson@linux.dev
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/xilinx-ams.c |   45 +++++++++++++++++++++++--------------------
+ 1 file changed, 25 insertions(+), 20 deletions(-)
+
+--- a/drivers/iio/adc/xilinx-ams.c
++++ b/drivers/iio/adc/xilinx-ams.c
+@@ -389,6 +389,29 @@ static void ams_update_pl_alarm(struct a
+       ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
+ }
++static void ams_unmask(struct ams *ams)
++{
++      unsigned int status, unmask;
++
++      status = readl(ams->base + AMS_ISR_0);
++
++      /* Clear those bits which are not active anymore */
++      unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
++
++      /* Clear status of disabled alarm */
++      unmask |= ams->intr_mask;
++
++      ams->current_masked_alarm &= status;
++
++      /* Also clear those which are masked out anyway */
++      ams->current_masked_alarm &= ~ams->intr_mask;
++
++      /* Clear the interrupts before we unmask them */
++      writel(unmask, ams->base + AMS_ISR_0);
++
++      ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
++}
++
+ static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
+ {
+       unsigned long flags;
+@@ -401,6 +424,7 @@ static void ams_update_alarm(struct ams
+       spin_lock_irqsave(&ams->intr_lock, flags);
+       ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
++      ams_unmask(ams);
+       spin_unlock_irqrestore(&ams->intr_lock, flags);
+ }
+@@ -1035,28 +1059,9 @@ static void ams_handle_events(struct iio
+ static void ams_unmask_worker(struct work_struct *work)
+ {
+       struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
+-      unsigned int status, unmask;
+       spin_lock_irq(&ams->intr_lock);
+-
+-      status = readl(ams->base + AMS_ISR_0);
+-
+-      /* Clear those bits which are not active anymore */
+-      unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
+-
+-      /* Clear status of disabled alarm */
+-      unmask |= ams->intr_mask;
+-
+-      ams->current_masked_alarm &= status;
+-
+-      /* Also clear those which are masked out anyway */
+-      ams->current_masked_alarm &= ~ams->intr_mask;
+-
+-      /* Clear the interrupts before we unmask them */
+-      writel(unmask, ams->base + AMS_ISR_0);
+-
+-      ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
+-
++      ams_unmask(ams);
+       spin_unlock_irq(&ams->intr_lock);
+       /* If still pending some alarm re-trigger the timer */
diff --git a/queue-6.12/init-handle-bootloader-identifier-in-kernel-parameters.patch b/queue-6.12/init-handle-bootloader-identifier-in-kernel-parameters.patch
new file mode 100644 (file)
index 0000000..75b1cd8
--- /dev/null
@@ -0,0 +1,74 @@
+From e416f0ed3c500c05c55fb62ee62662717b1c7f71 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Mon, 21 Jul 2025 18:13:43 +0800
+Subject: init: handle bootloader identifier in kernel parameters
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit e416f0ed3c500c05c55fb62ee62662717b1c7f71 upstream.
+
+BootLoaders (Grub, LILO, etc) may pass an identifier such as "BOOT_IMAGE=
+/boot/vmlinuz-x.y.z" to kernel parameters.  But these identifiers are not
+recognized by the kernel itself so will be passed to userspace.  However
+user space init program also don't recognize it.
+
+KEXEC/KDUMP (kexec-tools) may also pass an identifier such as "kexec" on
+some architectures.
+
+We cannot change BootLoader's behavior, because this behavior exists for
+many years, and there are already user space programs search BOOT_IMAGE=
+in /proc/cmdline to obtain the kernel image locations:
+
+https://github.com/linuxdeepin/deepin-ab-recovery/blob/master/util.go
+(search getBootOptions)
+https://github.com/linuxdeepin/deepin-ab-recovery/blob/master/main.go
+(search getKernelReleaseWithBootOption) So the the best way is handle
+(ignore) it by the kernel itself, which can avoid such boot warnings (if
+we use something like init=/bin/bash, bootloader identifier can even cause
+a crash):
+
+Kernel command line: BOOT_IMAGE=(hd0,1)/vmlinuz-6.x root=/dev/sda3 ro console=tty
+Unknown kernel command line parameters "BOOT_IMAGE=(hd0,1)/vmlinuz-6.x", will be passed to user space.
+
+[chenhuacai@loongson.cn: use strstarts()]
+  Link: https://lkml.kernel.org/r/20250815090120.1569947-1-chenhuacai@loongson.cn
+Link: https://lkml.kernel.org/r/20250721101343.3283480-1-chenhuacai@loongson.cn
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Jan Kara <jack@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ init/main.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -543,6 +543,12 @@ static int __init unknown_bootoption(cha
+                                    const char *unused, void *arg)
+ {
+       size_t len = strlen(param);
++      /*
++       * Well-known bootloader identifiers:
++       * 1. LILO/Grub pass "BOOT_IMAGE=...";
++       * 2. kexec/kdump (kexec-tools) pass "kexec".
++       */
++      const char *bootloader[] = { "BOOT_IMAGE=", "kexec", NULL };
+       /* Handle params aliased to sysctls */
+       if (sysctl_is_alias(param))
+@@ -550,6 +556,12 @@ static int __init unknown_bootoption(cha
+       repair_env_string(param, val);
++      /* Handle bootloader identifier */
++      for (int i = 0; bootloader[i]; i++) {
++              if (strstarts(param, bootloader[i]))
++                      return 0;
++      }
++
+       /* Handle obsolete-style parameters */
+       if (obsolete_checksetup(param))
+               return 0;
diff --git a/queue-6.12/iommu-vt-d-prs-isn-t-usable-if-pds-isn-t-supported.patch b/queue-6.12/iommu-vt-d-prs-isn-t-usable-if-pds-isn-t-supported.patch
new file mode 100644 (file)
index 0000000..717be41
--- /dev/null
@@ -0,0 +1,50 @@
+From 5ef7e24c742038a5d8c626fdc0e3a21834358341 Mon Sep 17 00:00:00 2001
+From: Lu Baolu <baolu.lu@linux.intel.com>
+Date: Thu, 18 Sep 2025 13:02:02 +0800
+Subject: iommu/vt-d: PRS isn't usable if PDS isn't supported
+
+From: Lu Baolu <baolu.lu@linux.intel.com>
+
+commit 5ef7e24c742038a5d8c626fdc0e3a21834358341 upstream.
+
+The specification, Section 7.10, "Software Steps to Drain Page Requests &
+Responses," requires software to submit an Invalidation Wait Descriptor
+(inv_wait_dsc) with the Page-request Drain (PD=1) flag set, along with
+the Invalidation Wait Completion Status Write flag (SW=1). It then waits
+for the Invalidation Wait Descriptor's completion.
+
+However, the PD field in the Invalidation Wait Descriptor is optional, as
+stated in Section 6.5.2.9, "Invalidation Wait Descriptor":
+
+"Page-request Drain (PD): Remapping hardware implementations reporting
+ Page-request draining as not supported (PDS = 0 in ECAP_REG) treat this
+ field as reserved."
+
+This implies that if the IOMMU doesn't support the PDS capability, software
+can't drain page requests and group responses as expected.
+
+Do not enable PCI/PRI if the IOMMU doesn't support PDS.
+
+Reported-by: Joel Granados <joel.granados@kernel.org>
+Closes: https://lore.kernel.org/r/20250909-jag-pds-v1-1-ad8cba0e494e@kernel.org
+Fixes: 66ac4db36f4c ("iommu/vt-d: Add page request draining support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Link: https://lore.kernel.org/r/20250915062946.120196-1-baolu.lu@linux.intel.com
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/intel/iommu.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3952,7 +3952,7 @@ static struct iommu_device *intel_iommu_
+                       }
+                       if (info->ats_supported && ecap_prs(iommu->ecap) &&
+-                          pci_pri_supported(pdev))
++                          ecap_pds(iommu->ecap) && pci_pri_supported(pdev))
+                               info->pri_supported = 1;
+               }
+       }
diff --git a/queue-6.12/kernel-sys.c-fix-the-racy-usage-of-task_lock-tsk-group_leader-in-sys_prlimit64-paths.patch b/queue-6.12/kernel-sys.c-fix-the-racy-usage-of-task_lock-tsk-group_leader-in-sys_prlimit64-paths.patch
new file mode 100644 (file)
index 0000000..a498f75
--- /dev/null
@@ -0,0 +1,74 @@
+From a15f37a40145c986cdf289a4b88390f35efdecc4 Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Mon, 15 Sep 2025 14:09:17 +0200
+Subject: kernel/sys.c: fix the racy usage of task_lock(tsk->group_leader) in sys_prlimit64() paths
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+commit a15f37a40145c986cdf289a4b88390f35efdecc4 upstream.
+
+The usage of task_lock(tsk->group_leader) in sys_prlimit64()->do_prlimit()
+path is very broken.
+
+sys_prlimit64() does get_task_struct(tsk) but this only protects task_struct
+itself. If tsk != current and tsk is not a leader, this process can exit/exec
+and task_lock(tsk->group_leader) may use the already freed task_struct.
+
+Another problem is that sys_prlimit64() can race with mt-exec which changes
+->group_leader. In this case do_prlimit() may take the wrong lock, or (worse)
+->group_leader may change between task_lock() and task_unlock().
+
+Change sys_prlimit64() to take tasklist_lock when necessary. This is not
+nice, but I don't see a better fix for -stable.
+
+Link: https://lkml.kernel.org/r/20250915120917.GA27702@redhat.com
+Fixes: 18c91bb2d872 ("prlimit: do not grab the tasklist_lock")
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Jiri Slaby <jirislaby@kernel.org>
+Cc: Mateusz Guzik <mjguzik@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sys.c |   22 ++++++++++++++++++++--
+ 1 file changed, 20 insertions(+), 2 deletions(-)
+
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1698,6 +1698,7 @@ SYSCALL_DEFINE4(prlimit64, pid_t, pid, u
+       struct rlimit old, new;
+       struct task_struct *tsk;
+       unsigned int checkflags = 0;
++      bool need_tasklist;
+       int ret;
+       if (old_rlim)
+@@ -1724,8 +1725,25 @@ SYSCALL_DEFINE4(prlimit64, pid_t, pid, u
+       get_task_struct(tsk);
+       rcu_read_unlock();
+-      ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
+-                      old_rlim ? &old : NULL);
++      need_tasklist = !same_thread_group(tsk, current);
++      if (need_tasklist) {
++              /*
++               * Ensure we can't race with group exit or de_thread(),
++               * so tsk->group_leader can't be freed or changed until
++               * read_unlock(tasklist_lock) below.
++               */
++              read_lock(&tasklist_lock);
++              if (!pid_alive(tsk))
++                      ret = -ESRCH;
++      }
++
++      if (!ret) {
++              ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
++                              old_rlim ? &old : NULL);
++      }
++
++      if (need_tasklist)
++              read_unlock(&tasklist_lock);
+       if (!ret && old_rlim) {
+               rlim_to_rlim64(&old, &old64);
index 36cdbe26d95fbe41de629df9ee0306dbaca64aae..de9a674858479344c6d25a08ad7ff98aa4d45c57 100644 (file)
@@ -121,3 +121,26 @@ blk-crypto-fix-missing-blktrace-bio-split-events.patch
 btrfs-avoid-potential-out-of-bounds-in-btrfs_encode_fh.patch
 bus-mhi-ep-fix-chained-transfer-handling-in-read-path.patch
 bus-mhi-host-do-not-use-uninitialized-dev-pointer-in-mhi_init_irq_setup.patch
+clk-qcom-tcsrcc-x1e80100-set-the-bi_tcxo-as-parent-to-edp-refclk.patch
+copy_sighand-handle-architectures-where-sizeof-unsigned-long-sizeof-u64.patch
+cpufreq-cppc-avoid-using-cpufreq_eternal-as-transition-delay.patch
+cpufreq-intel_pstate-fix-object-lifecycle-issue-in-update_qos_request.patch
+crypto-aspeed-fix-dma_unmap_sg-direction.patch
+crypto-atmel-fix-dma_unmap_sg-direction.patch
+crypto-rockchip-fix-dma_unmap_sg-nents-value.patch
+eventpoll-replace-rwlock-with-spinlock.patch
+fbdev-fix-logic-error-in-offb-name-match.patch
+fs-ntfs3-fix-a-resource-leak-bug-in-wnd_extend.patch
+fs-quota-create-dedicated-workqueue-for-quota_release_work.patch
+fuse-fix-possibly-missing-fuse_copy_finish-call-in-fuse_notify.patch
+fuse-fix-livelock-in-synchronous-file-put-from-fuseblk-workers.patch
+iio-adc-pac1934-fix-channel-disable-configuration.patch
+iio-dac-ad5360-use-int-type-to-store-negative-error-codes.patch
+iio-dac-ad5421-use-int-type-to-store-negative-error-codes.patch
+iio-frequency-adf4350-fix-prescaler-usage.patch
+iio-xilinx-ams-fix-ams_alarm_thr_direct_mask.patch
+iio-xilinx-ams-unmask-interrupts-after-updating-alarms.patch
+init-handle-bootloader-identifier-in-kernel-parameters.patch
+iio-imu-inv_icm42600-drop-redundant-pm_runtime-reinitialization-in-resume.patch
+iommu-vt-d-prs-isn-t-usable-if-pds-isn-t-supported.patch
+kernel-sys.c-fix-the-racy-usage-of-task_lock-tsk-group_leader-in-sys_prlimit64-paths.patch