]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 29 Dec 2025 11:43:43 +0000 (12:43 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 29 Dec 2025 11:43:43 +0000 (12:43 +0100)
added patches:
block-rate-limit-capacity-change-info-log.patch
btrfs-don-t-rewrite-ret-from-inode_permission.patch
clk-keystone-syscon-clk-fix-regmap-leak-on-probe-failure.patch
crypto-scatterwalk-fix-memcpy_sglist-to-always-succeed.patch
dt-bindings-clock-mmcc-sdm660-add-missing-mdss-reset.patch
efi-add-missing-static-initializer-for-efi_mm-cpus_allowed_lock.patch
floppy-fix-for-page_size-4kb.patch
fs-ntfs3-fix-mount-failure-for-sparse-runs-in-run_unpack.patch
fs-pm-fix-reverse-check-in-filesystems_freeze_callback.patch
gfs2-fix-freeze-error-handling.patch
io_uring-fix-filename-leak-in-__io_openat_prep.patch
io_uring-fix-min_wait-wakeups-for-sqpoll.patch
io_uring-poll-correctly-handle-io_poll_add-return-value-on-update.patch
jbd2-fix-the-inconsistency-between-checksum-and-data-in-memory-for-journal-sb.patch
kallsyms-fix-wrong-big-kernel-symbol-type-read-from-procfs.patch
keys-trusted-fix-a-memory-leak-in-tpm2_load_cmd.patch
ktest.pl-fix-uninitialized-var-in-config-bisect.pl.patch
lib-crypto-x86-blake2s-fix-32-bit-arg-treated-as-64-bit.patch
mmc-sdhci-msm-avoid-early-clock-doubling-during-hs400-transition.patch
perf-arm_cspmu-fix-error-handling-in-arm_cspmu_impl_unregister.patch
phy-exynos5-usbdrd-fix-clock-prepare-imbalance.patch
printk-avoid-scheduling-irq_work-on-suspend.patch
rust-dma-add-helpers-for-architectures-without-config_has_dma.patch
rust-drm-gem-fix-missing-header-in-object-rustdoc.patch
rust-io-add-typedef-for-phys_addr_t.patch
rust-io-define-resourcesize-as-resource_size_t.patch
rust-io-move-resourcesize-to-top-level-io-module.patch
rust_binder-avoid-mem-take-on-delivered_deaths.patch
s390-dasd-fix-gendisk-parent-after-copy-pair-swap.patch
samples-rust-fix-endianness-issue-in-rust_driver_pci.patch
sched_ext-factor-out-local_dsq_post_enq-from-dispatch_enqueue.patch
sched_ext-fix-bypass-depth-leak-on-scx_enable-failure.patch
sched_ext-fix-missing-post-enqueue-handling-in-move_local_task_to_local_dsq.patch
sched_ext-fix-the-memleak-for-sch-helper-objects.patch
tpm-cap-the-number-of-pcr-banks.patch
wifi-mt76-fix-dts-power-limits-on-little-endian-systems.patch
x86-mce-do-not-clear-bank-s-poll-bit-in-mce_poll_banks-on-amd-smca-systems.patch

38 files changed:
queue-6.18/block-rate-limit-capacity-change-info-log.patch [new file with mode: 0644]
queue-6.18/btrfs-don-t-rewrite-ret-from-inode_permission.patch [new file with mode: 0644]
queue-6.18/clk-keystone-syscon-clk-fix-regmap-leak-on-probe-failure.patch [new file with mode: 0644]
queue-6.18/crypto-scatterwalk-fix-memcpy_sglist-to-always-succeed.patch [new file with mode: 0644]
queue-6.18/dt-bindings-clock-mmcc-sdm660-add-missing-mdss-reset.patch [new file with mode: 0644]
queue-6.18/efi-add-missing-static-initializer-for-efi_mm-cpus_allowed_lock.patch [new file with mode: 0644]
queue-6.18/floppy-fix-for-page_size-4kb.patch [new file with mode: 0644]
queue-6.18/fs-ntfs3-fix-mount-failure-for-sparse-runs-in-run_unpack.patch [new file with mode: 0644]
queue-6.18/fs-pm-fix-reverse-check-in-filesystems_freeze_callback.patch [new file with mode: 0644]
queue-6.18/gfs2-fix-freeze-error-handling.patch [new file with mode: 0644]
queue-6.18/io_uring-fix-filename-leak-in-__io_openat_prep.patch [new file with mode: 0644]
queue-6.18/io_uring-fix-min_wait-wakeups-for-sqpoll.patch [new file with mode: 0644]
queue-6.18/io_uring-poll-correctly-handle-io_poll_add-return-value-on-update.patch [new file with mode: 0644]
queue-6.18/jbd2-fix-the-inconsistency-between-checksum-and-data-in-memory-for-journal-sb.patch [new file with mode: 0644]
queue-6.18/kallsyms-fix-wrong-big-kernel-symbol-type-read-from-procfs.patch [new file with mode: 0644]
queue-6.18/keys-trusted-fix-a-memory-leak-in-tpm2_load_cmd.patch [new file with mode: 0644]
queue-6.18/ktest.pl-fix-uninitialized-var-in-config-bisect.pl.patch [new file with mode: 0644]
queue-6.18/lib-crypto-x86-blake2s-fix-32-bit-arg-treated-as-64-bit.patch [new file with mode: 0644]
queue-6.18/mmc-sdhci-msm-avoid-early-clock-doubling-during-hs400-transition.patch [new file with mode: 0644]
queue-6.18/perf-arm_cspmu-fix-error-handling-in-arm_cspmu_impl_unregister.patch [new file with mode: 0644]
queue-6.18/phy-exynos5-usbdrd-fix-clock-prepare-imbalance.patch [new file with mode: 0644]
queue-6.18/printk-avoid-scheduling-irq_work-on-suspend.patch [new file with mode: 0644]
queue-6.18/rust-dma-add-helpers-for-architectures-without-config_has_dma.patch [new file with mode: 0644]
queue-6.18/rust-drm-gem-fix-missing-header-in-object-rustdoc.patch [new file with mode: 0644]
queue-6.18/rust-io-add-typedef-for-phys_addr_t.patch [new file with mode: 0644]
queue-6.18/rust-io-define-resourcesize-as-resource_size_t.patch [new file with mode: 0644]
queue-6.18/rust-io-move-resourcesize-to-top-level-io-module.patch [new file with mode: 0644]
queue-6.18/rust_binder-avoid-mem-take-on-delivered_deaths.patch [new file with mode: 0644]
queue-6.18/s390-dasd-fix-gendisk-parent-after-copy-pair-swap.patch [new file with mode: 0644]
queue-6.18/samples-rust-fix-endianness-issue-in-rust_driver_pci.patch [new file with mode: 0644]
queue-6.18/sched_ext-factor-out-local_dsq_post_enq-from-dispatch_enqueue.patch [new file with mode: 0644]
queue-6.18/sched_ext-fix-bypass-depth-leak-on-scx_enable-failure.patch [new file with mode: 0644]
queue-6.18/sched_ext-fix-missing-post-enqueue-handling-in-move_local_task_to_local_dsq.patch [new file with mode: 0644]
queue-6.18/sched_ext-fix-the-memleak-for-sch-helper-objects.patch [new file with mode: 0644]
queue-6.18/series
queue-6.18/tpm-cap-the-number-of-pcr-banks.patch [new file with mode: 0644]
queue-6.18/wifi-mt76-fix-dts-power-limits-on-little-endian-systems.patch [new file with mode: 0644]
queue-6.18/x86-mce-do-not-clear-bank-s-poll-bit-in-mce_poll_banks-on-amd-smca-systems.patch [new file with mode: 0644]

diff --git a/queue-6.18/block-rate-limit-capacity-change-info-log.patch b/queue-6.18/block-rate-limit-capacity-change-info-log.patch
new file mode 100644 (file)
index 0000000..3743ed5
--- /dev/null
@@ -0,0 +1,39 @@
+From 3179a5f7f86bcc3acd5d6fb2a29f891ef5615852 Mon Sep 17 00:00:00 2001
+From: Li Chen <chenl311@chinatelecom.cn>
+Date: Mon, 17 Nov 2025 13:34:07 +0800
+Subject: block: rate-limit capacity change info log
+
+From: Li Chen <chenl311@chinatelecom.cn>
+
+commit 3179a5f7f86bcc3acd5d6fb2a29f891ef5615852 upstream.
+
+loop devices under heavy stress-ng loop streessor can trigger many
+capacity change events in a short time. Each event prints an info
+message from set_capacity_and_notify(), flooding the console and
+contributing to soft lockups on slow consoles.
+
+Switch the printk in set_capacity_and_notify() to
+pr_info_ratelimited() so frequent capacity changes do not spam
+the log while still reporting occasional changes.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Li Chen <chenl311@chinatelecom.cn>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/genhd.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -90,7 +90,7 @@ bool set_capacity_and_notify(struct gend
+           (disk->flags & GENHD_FL_HIDDEN))
+               return false;
+-      pr_info("%s: detected capacity change from %lld to %lld\n",
++      pr_info_ratelimited("%s: detected capacity change from %lld to %lld\n",
+               disk->disk_name, capacity, size);
+       /*
diff --git a/queue-6.18/btrfs-don-t-rewrite-ret-from-inode_permission.patch b/queue-6.18/btrfs-don-t-rewrite-ret-from-inode_permission.patch
new file mode 100644 (file)
index 0000000..ca8d8cb
--- /dev/null
@@ -0,0 +1,46 @@
+From 0185c2292c600993199bc6b1f342ad47a9e8c678 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Tue, 18 Nov 2025 17:08:41 +0100
+Subject: btrfs: don't rewrite ret from inode_permission
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 0185c2292c600993199bc6b1f342ad47a9e8c678 upstream.
+
+In our user safe ino resolve ioctl we'll just turn any ret into -EACCES
+from inode_permission().  This is redundant, and could potentially be
+wrong if we had an ENOMEM in the security layer or some such other
+error, so simply return the actual return value.
+
+Note: The patch was taken from v5 of fscrypt patchset
+(https://lore.kernel.org/linux-btrfs/cover.1706116485.git.josef@toxicpanda.com/)
+which was handled over time by various people: Omar Sandoval, Sweet Tea
+Dorminy, Josef Bacik.
+
+Fixes: 23d0b79dfaed ("btrfs: Add unprivileged version of ino_lookup ioctl")
+CC: stable@vger.kernel.org # 5.4+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Daniel Vacek <neelx@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+[ add note ]
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/ioctl.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1913,10 +1913,8 @@ static int btrfs_search_path_in_tree_use
+                       ret = inode_permission(idmap, &temp_inode->vfs_inode,
+                                              MAY_READ | MAY_EXEC);
+                       iput(&temp_inode->vfs_inode);
+-                      if (ret) {
+-                              ret = -EACCES;
++                      if (ret)
+                               goto out_put;
+-                      }
+                       if (key.offset == upper_limit)
+                               break;
diff --git a/queue-6.18/clk-keystone-syscon-clk-fix-regmap-leak-on-probe-failure.patch b/queue-6.18/clk-keystone-syscon-clk-fix-regmap-leak-on-probe-failure.patch
new file mode 100644 (file)
index 0000000..cf0d3f2
--- /dev/null
@@ -0,0 +1,40 @@
+From 9c75986a298f121ed2c6599b05e51d9a34e77068 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Thu, 27 Nov 2025 14:42:43 +0100
+Subject: clk: keystone: syscon-clk: fix regmap leak on probe failure
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 9c75986a298f121ed2c6599b05e51d9a34e77068 upstream.
+
+The mmio regmap allocated during probe is never freed.
+
+Switch to using the device managed allocator so that the regmap is
+released on probe failures (e.g. probe deferral) and on driver unbind.
+
+Fixes: a250cd4c1901 ("clk: keystone: syscon-clk: Do not use syscon helper to build regmap")
+Cc: stable@vger.kernel.org     # 6.15
+Cc: Andrew Davis <afd@ti.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/keystone/syscon-clk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c
+index c509929da854..ecf180a7949c 100644
+--- a/drivers/clk/keystone/syscon-clk.c
++++ b/drivers/clk/keystone/syscon-clk.c
+@@ -129,7 +129,7 @@ static int ti_syscon_gate_clk_probe(struct platform_device *pdev)
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+-      regmap = regmap_init_mmio(dev, base, &ti_syscon_regmap_cfg);
++      regmap = devm_regmap_init_mmio(dev, base, &ti_syscon_regmap_cfg);
+       if (IS_ERR(regmap))
+               return dev_err_probe(dev, PTR_ERR(regmap),
+                                    "failed to get regmap\n");
+-- 
+2.52.0
+
diff --git a/queue-6.18/crypto-scatterwalk-fix-memcpy_sglist-to-always-succeed.patch b/queue-6.18/crypto-scatterwalk-fix-memcpy_sglist-to-always-succeed.patch
new file mode 100644 (file)
index 0000000..8d1b782
--- /dev/null
@@ -0,0 +1,220 @@
+From 4dffc9bbffb9ccfcda730d899c97c553599e7ca8 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Sat, 15 Nov 2025 15:08:16 -0800
+Subject: crypto: scatterwalk - Fix memcpy_sglist() to always succeed
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit 4dffc9bbffb9ccfcda730d899c97c553599e7ca8 upstream.
+
+The original implementation of memcpy_sglist() was broken because it
+didn't handle scatterlists that describe exactly the same memory, which
+is a case that many callers rely on.  The current implementation is
+broken too because it calls the skcipher_walk functions which can fail.
+It ignores any errors from those functions.
+
+Fix it by replacing it with a new implementation written from scratch.
+It always succeeds.  It's also a bit faster, since it avoids the
+overhead of skcipher_walk.  skcipher_walk includes a lot of
+functionality (such as alignmask handling) that's irrelevant here.
+
+Reported-by: Colin Ian King <coking@nvidia.com>
+Closes: https://lore.kernel.org/r/20251114122620.111623-1-coking@nvidia.com
+Fixes: 131bdceca1f0 ("crypto: scatterwalk - Add memcpy_sglist")
+Fixes: 0f8d42bf128d ("crypto: scatterwalk - Move skcipher walk and use it for memcpy_sglist")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/scatterwalk.c         | 95 +++++++++++++++++++++++++++++++-----
+ include/crypto/scatterwalk.h | 52 ++++++++++++--------
+ 2 files changed, 114 insertions(+), 33 deletions(-)
+
+diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
+index 1d010e2a1b1a..b95e5974e327 100644
+--- a/crypto/scatterwalk.c
++++ b/crypto/scatterwalk.c
+@@ -101,26 +101,97 @@ void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,
+ }
+ EXPORT_SYMBOL_GPL(memcpy_to_sglist);
++/**
++ * memcpy_sglist() - Copy data from one scatterlist to another
++ * @dst: The destination scatterlist.  Can be NULL if @nbytes == 0.
++ * @src: The source scatterlist.  Can be NULL if @nbytes == 0.
++ * @nbytes: Number of bytes to copy
++ *
++ * The scatterlists can describe exactly the same memory, in which case this
++ * function is a no-op.  No other overlaps are supported.
++ *
++ * Context: Any context
++ */
+ void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
+                  unsigned int nbytes)
+ {
+-      struct skcipher_walk walk = {};
++      unsigned int src_offset, dst_offset;
+-      if (unlikely(nbytes == 0)) /* in case sg == NULL */
++      if (unlikely(nbytes == 0)) /* in case src and/or dst is NULL */
+               return;
+-      walk.total = nbytes;
++      src_offset = src->offset;
++      dst_offset = dst->offset;
++      for (;;) {
++              /* Compute the length to copy this step. */
++              unsigned int len = min3(src->offset + src->length - src_offset,
++                                      dst->offset + dst->length - dst_offset,
++                                      nbytes);
++              struct page *src_page = sg_page(src);
++              struct page *dst_page = sg_page(dst);
++              const void *src_virt;
++              void *dst_virt;
+-      scatterwalk_start(&walk.in, src);
+-      scatterwalk_start(&walk.out, dst);
++              if (IS_ENABLED(CONFIG_HIGHMEM)) {
++                      /* HIGHMEM: we may have to actually map the pages. */
++                      const unsigned int src_oip = offset_in_page(src_offset);
++                      const unsigned int dst_oip = offset_in_page(dst_offset);
++                      const unsigned int limit = PAGE_SIZE;
+-      skcipher_walk_first(&walk, true);
+-      do {
+-              if (walk.src.virt.addr != walk.dst.virt.addr)
+-                      memcpy(walk.dst.virt.addr, walk.src.virt.addr,
+-                             walk.nbytes);
+-              skcipher_walk_done(&walk, 0);
+-      } while (walk.nbytes);
++                      /* Further limit len to not cross a page boundary. */
++                      len = min3(len, limit - src_oip, limit - dst_oip);
++
++                      /* Compute the source and destination pages. */
++                      src_page += src_offset / PAGE_SIZE;
++                      dst_page += dst_offset / PAGE_SIZE;
++
++                      if (src_page != dst_page) {
++                              /* Copy between different pages. */
++                              memcpy_page(dst_page, dst_oip,
++                                          src_page, src_oip, len);
++                              flush_dcache_page(dst_page);
++                      } else if (src_oip != dst_oip) {
++                              /* Copy between different parts of same page. */
++                              dst_virt = kmap_local_page(dst_page);
++                              memcpy(dst_virt + dst_oip, dst_virt + src_oip,
++                                     len);
++                              kunmap_local(dst_virt);
++                              flush_dcache_page(dst_page);
++                      } /* Else, it's the same memory.  No action needed. */
++              } else {
++                      /*
++                       * !HIGHMEM: no mapping needed.  Just work in the linear
++                       * buffer of each sg entry.  Note that we can cross page
++                       * boundaries, as they are not significant in this case.
++                       */
++                      src_virt = page_address(src_page) + src_offset;
++                      dst_virt = page_address(dst_page) + dst_offset;
++                      if (src_virt != dst_virt) {
++                              memcpy(dst_virt, src_virt, len);
++                              if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
++                                      __scatterwalk_flush_dcache_pages(
++                                              dst_page, dst_offset, len);
++                      } /* Else, it's the same memory.  No action needed. */
++              }
++              nbytes -= len;
++              if (nbytes == 0) /* No more to copy? */
++                      break;
++
++              /*
++               * There's more to copy.  Advance the offsets by the length
++               * copied this step, and advance the sg entries as needed.
++               */
++              src_offset += len;
++              if (src_offset >= src->offset + src->length) {
++                      src = sg_next(src);
++                      src_offset = src->offset;
++              }
++              dst_offset += len;
++              if (dst_offset >= dst->offset + dst->length) {
++                      dst = sg_next(dst);
++                      dst_offset = dst->offset;
++              }
++      }
+ }
+ EXPORT_SYMBOL_GPL(memcpy_sglist);
+diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
+index 83d14376ff2b..f485454e3955 100644
+--- a/include/crypto/scatterwalk.h
++++ b/include/crypto/scatterwalk.h
+@@ -227,6 +227,34 @@ static inline void scatterwalk_done_src(struct scatter_walk *walk,
+       scatterwalk_advance(walk, nbytes);
+ }
++/*
++ * Flush the dcache of any pages that overlap the region
++ * [offset, offset + nbytes) relative to base_page.
++ *
++ * This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
++ * that all relevant code (including the call to sg_page() in the caller, if
++ * applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
++ */
++static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
++                                                  unsigned int offset,
++                                                  unsigned int nbytes)
++{
++      unsigned int num_pages;
++
++      base_page += offset / PAGE_SIZE;
++      offset %= PAGE_SIZE;
++
++      /*
++       * This is an overflow-safe version of
++       * num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
++       */
++      num_pages = nbytes / PAGE_SIZE;
++      num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
++
++      for (unsigned int i = 0; i < num_pages; i++)
++              flush_dcache_page(base_page + i);
++}
++
+ /**
+  * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
+  * @walk: the scatter_walk
+@@ -240,27 +268,9 @@ static inline void scatterwalk_done_dst(struct scatter_walk *walk,
+                                       unsigned int nbytes)
+ {
+       scatterwalk_unmap(walk);
+-      /*
+-       * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just
+-       * relying on flush_dcache_page() being a no-op when not implemented,
+-       * since otherwise the BUG_ON in sg_page() does not get optimized out.
+-       * This also avoids having to consider whether the loop would get
+-       * reliably optimized out or not.
+-       */
+-      if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {
+-              struct page *base_page;
+-              unsigned int offset;
+-              int start, end, i;
+-
+-              base_page = sg_page(walk->sg);
+-              offset = walk->offset;
+-              start = offset >> PAGE_SHIFT;
+-              end = start + (nbytes >> PAGE_SHIFT);
+-              end += (offset_in_page(offset) + offset_in_page(nbytes) +
+-                      PAGE_SIZE - 1) >> PAGE_SHIFT;
+-              for (i = start; i < end; i++)
+-                      flush_dcache_page(base_page + i);
+-      }
++      if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
++              __scatterwalk_flush_dcache_pages(sg_page(walk->sg),
++                                               walk->offset, nbytes);
+       scatterwalk_advance(walk, nbytes);
+ }
+-- 
+2.52.0
+
diff --git a/queue-6.18/dt-bindings-clock-mmcc-sdm660-add-missing-mdss-reset.patch b/queue-6.18/dt-bindings-clock-mmcc-sdm660-add-missing-mdss-reset.patch
new file mode 100644 (file)
index 0000000..96ad9b0
--- /dev/null
@@ -0,0 +1,40 @@
+From c57210bc15371caa06a5d4040e7d8aaeed4cb661 Mon Sep 17 00:00:00 2001
+From: Alexey Minnekhanov <alexeymin@postmarketos.org>
+Date: Sun, 16 Nov 2025 04:12:33 +0300
+Subject: dt-bindings: clock: mmcc-sdm660: Add missing MDSS reset
+
+From: Alexey Minnekhanov <alexeymin@postmarketos.org>
+
+commit c57210bc15371caa06a5d4040e7d8aaeed4cb661 upstream.
+
+Add definition for display subsystem reset control, so display
+driver can reset display controller properly, clearing any
+configuration left there by bootloader. Since 6.17 after
+PM domains rework it became necessary for display to function.
+
+Fixes: 0e789b491ba0 ("pmdomain: core: Leave powered-on genpds on until sync_state")
+Cc: stable@vger.kernel.org # 6.17
+Signed-off-by: Alexey Minnekhanov <alexeymin@postmarketos.org>
+Acked-by: Krzysztof Kozlowski <krzk@kernel.org>
+Link: https://lore.kernel.org/r/20251116-sdm660-mdss-reset-v2-1-6219bec0a97f@postmarketos.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/dt-bindings/clock/qcom,mmcc-sdm660.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/dt-bindings/clock/qcom,mmcc-sdm660.h b/include/dt-bindings/clock/qcom,mmcc-sdm660.h
+index f9dbc21cb5c7..ee2a89dae72d 100644
+--- a/include/dt-bindings/clock/qcom,mmcc-sdm660.h
++++ b/include/dt-bindings/clock/qcom,mmcc-sdm660.h
+@@ -157,6 +157,7 @@
+ #define BIMC_SMMU_GDSC                                                        7
+ #define CAMSS_MICRO_BCR                                0
++#define MDSS_BCR                              1
+ #endif
+-- 
+2.52.0
+
diff --git a/queue-6.18/efi-add-missing-static-initializer-for-efi_mm-cpus_allowed_lock.patch b/queue-6.18/efi-add-missing-static-initializer-for-efi_mm-cpus_allowed_lock.patch
new file mode 100644 (file)
index 0000000..0268a8a
--- /dev/null
@@ -0,0 +1,32 @@
+From 40374d308e4e456048d83991e937f13fc8bda8bf Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Wed, 15 Oct 2025 22:56:36 +0200
+Subject: efi: Add missing static initializer for efi_mm::cpus_allowed_lock
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 40374d308e4e456048d83991e937f13fc8bda8bf upstream.
+
+Initialize the cpus_allowed_lock struct member of efi_mm.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/efi/efi.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -74,6 +74,9 @@ struct mm_struct efi_mm = {
+       .page_table_lock        = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
+       .mmlist                 = LIST_HEAD_INIT(efi_mm.mmlist),
+       .cpu_bitmap             = { [BITS_TO_LONGS(NR_CPUS)] = 0},
++#ifdef CONFIG_SCHED_MM_CID
++      .cpus_allowed_lock      = __RAW_SPIN_LOCK_UNLOCKED(efi_mm.cpus_allowed_lock),
++#endif
+ };
+ struct workqueue_struct *efi_rts_wq;
diff --git a/queue-6.18/floppy-fix-for-page_size-4kb.patch b/queue-6.18/floppy-fix-for-page_size-4kb.patch
new file mode 100644 (file)
index 0000000..aa5bdab
--- /dev/null
@@ -0,0 +1,46 @@
+From 82d20481024cbae2ea87fe8b86d12961bfda7169 Mon Sep 17 00:00:00 2001
+From: Rene Rebe <rene@exactco.de>
+Date: Fri, 14 Nov 2025 14:41:27 +0100
+Subject: floppy: fix for PAGE_SIZE != 4KB
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rene Rebe <rene@exactco.de>
+
+commit 82d20481024cbae2ea87fe8b86d12961bfda7169 upstream.
+
+For years I wondered why the floppy driver does not just work on
+sparc64, e.g:
+
+root@SUNW_375_0066:# disktype /dev/fd0
+disktype: Can't open /dev/fd0: No such device or address
+
+[  525.341906] disktype: attempt to access beyond end of device
+fd0: rw=0, sector=0, nr_sectors = 16 limit=8
+[  525.341991] floppy: error 10 while reading block 0
+
+Turns out floppy.c __floppy_read_block_0 tries to read one page for
+the first test read to determine the disk size and thus fails if that
+is greater than 4k. Adjust minimum MAX_DISK_SIZE to PAGE_SIZE to fix
+floppy on sparc64 and likely all other PAGE_SIZE != 4KB configs.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: René Rebe <rene@exactco.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/floppy.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -329,7 +329,7 @@ static bool initialized;
+  * This default is used whenever the current disk size is unknown.
+  * [Now it is rather a minimum]
+  */
+-#define MAX_DISK_SIZE 4               /* 3984 */
++#define MAX_DISK_SIZE (PAGE_SIZE / 1024)
+ /*
+  * globals used by 'result()'
diff --git a/queue-6.18/fs-ntfs3-fix-mount-failure-for-sparse-runs-in-run_unpack.patch b/queue-6.18/fs-ntfs3-fix-mount-failure-for-sparse-runs-in-run_unpack.patch
new file mode 100644 (file)
index 0000000..7bff073
--- /dev/null
@@ -0,0 +1,41 @@
+From 801f614ba263cb37624982b27b4c82f3c3c597a9 Mon Sep 17 00:00:00 2001
+From: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Date: Thu, 18 Sep 2025 13:35:24 +0300
+Subject: fs/ntfs3: fix mount failure for sparse runs in run_unpack()
+
+From: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+
+commit 801f614ba263cb37624982b27b4c82f3c3c597a9 upstream.
+
+Some NTFS volumes failed to mount because sparse data runs were not
+handled correctly during runlist unpacking. The code performed arithmetic
+on the special SPARSE_LCN64 marker, leading to invalid LCN values and
+mount errors.
+
+Add an explicit check for the case described above, marking the run as
+sparse without applying arithmetic.
+
+Fixes: 736fc7bf5f68 ("fs: ntfs3: Fix integer overflow in run_unpack()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs3/run.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/fs/ntfs3/run.c
++++ b/fs/ntfs3/run.c
+@@ -984,8 +984,12 @@ int run_unpack(struct runs_tree *run, st
+                       if (!dlcn)
+                               return -EINVAL;
+-                      if (check_add_overflow(prev_lcn, dlcn, &lcn))
++                      /* Check special combination: 0 + SPARSE_LCN64. */
++                      if (!prev_lcn && dlcn == SPARSE_LCN64) {
++                              lcn = SPARSE_LCN64;
++                      } else if (check_add_overflow(prev_lcn, dlcn, &lcn)) {
+                               return -EINVAL;
++                      }
+                       prev_lcn = lcn;
+               } else {
+                       /* The size of 'dlcn' can't be > 8. */
diff --git a/queue-6.18/fs-pm-fix-reverse-check-in-filesystems_freeze_callback.patch b/queue-6.18/fs-pm-fix-reverse-check-in-filesystems_freeze_callback.patch
new file mode 100644 (file)
index 0000000..c4accb5
--- /dev/null
@@ -0,0 +1,42 @@
+From 222047f68e8565c558728f792f6fef152a1d4d51 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Tue, 2 Dec 2025 19:27:29 +0100
+Subject: fs: PM: Fix reverse check in filesystems_freeze_callback()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 222047f68e8565c558728f792f6fef152a1d4d51 upstream.
+
+The freeze_all_ptr check in filesystems_freeze_callback() introduced by
+commit a3f8f8662771 ("power: always freeze efivarfs") is reverse which
+quite confusingly causes all file systems to be frozen when
+filesystem_freeze_enabled is false.
+
+On my systems it causes the WARN_ON_ONCE() in __set_task_frozen() to
+trigger, most likely due to an attempt to freeze a file system that is
+not ready for that.
+
+Add a logical negation to the check in question to reverse it as
+appropriate.
+
+Fixes: a3f8f8662771 ("power: always freeze efivarfs")
+Cc: 6.18+ <stable@vger.kernel.org> # 6.18+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Link: https://patch.msgid.link/12788397.O9o76ZdvQC@rafael.j.wysocki
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/super.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -1188,7 +1188,7 @@ static void filesystems_freeze_callback(
+       if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super)
+               return;
+-      if (freeze_all_ptr && !(sb->s_type->fs_flags & FS_POWER_FREEZE))
++      if (!freeze_all_ptr && !(sb->s_type->fs_flags & FS_POWER_FREEZE))
+               return;
+       if (!get_active_super(sb))
diff --git a/queue-6.18/gfs2-fix-freeze-error-handling.patch b/queue-6.18/gfs2-fix-freeze-error-handling.patch
new file mode 100644 (file)
index 0000000..0669ba0
--- /dev/null
@@ -0,0 +1,42 @@
+From 4cfc7d5a4a01d2133b278cdbb1371fba1b419174 Mon Sep 17 00:00:00 2001
+From: Alexey Velichayshiy <a.velichayshiy@ispras.ru>
+Date: Mon, 17 Nov 2025 12:05:18 +0300
+Subject: gfs2: fix freeze error handling
+
+From: Alexey Velichayshiy <a.velichayshiy@ispras.ru>
+
+commit 4cfc7d5a4a01d2133b278cdbb1371fba1b419174 upstream.
+
+After commit b77b4a4815a9 ("gfs2: Rework freeze / thaw logic"),
+the freeze error handling is broken because gfs2_do_thaw()
+overwrites the 'error' variable, causing incorrect processing
+of the original freeze error.
+
+Fix this by calling gfs2_do_thaw() when gfs2_lock_fs_check_clean()
+fails but ignoring its return value to preserve the original
+freeze error for proper reporting.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: b77b4a4815a9 ("gfs2: Rework freeze / thaw logic")
+Cc: stable@vger.kernel.org # v6.5+
+Signed-off-by: Alexey Velichayshiy <a.velichayshiy@ispras.ru>
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/gfs2/super.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -749,9 +749,7 @@ static int gfs2_freeze_super(struct supe
+                       break;
+               }
+-              error = gfs2_do_thaw(sdp, who, freeze_owner);
+-              if (error)
+-                      goto out;
++              (void)gfs2_do_thaw(sdp, who, freeze_owner);
+               if (error == -EBUSY)
+                       fs_err(sdp, "waiting for recovery before freeze\n");
diff --git a/queue-6.18/io_uring-fix-filename-leak-in-__io_openat_prep.patch b/queue-6.18/io_uring-fix-filename-leak-in-__io_openat_prep.patch
new file mode 100644 (file)
index 0000000..e7bd40a
--- /dev/null
@@ -0,0 +1,49 @@
+From b14fad555302a2104948feaff70503b64c80ac01 Mon Sep 17 00:00:00 2001
+From: Prithvi Tambewagh <activprithvi@gmail.com>
+Date: Thu, 25 Dec 2025 12:58:29 +0530
+Subject: io_uring: fix filename leak in __io_openat_prep()
+
+From: Prithvi Tambewagh <activprithvi@gmail.com>
+
+commit b14fad555302a2104948feaff70503b64c80ac01 upstream.
+
+ __io_openat_prep() allocates a struct filename using getname(). However,
+for the condition of the file being installed in the fixed file table as
+well as having O_CLOEXEC flag set, the function returns early. At that
+point, the request doesn't have REQ_F_NEED_CLEANUP flag set. Due to this,
+the memory for the newly allocated struct filename is not cleaned up,
+causing a memory leak.
+
+Fix this by setting the REQ_F_NEED_CLEANUP for the request just after the
+successful getname() call, so that when the request is torn down, the
+filename will be cleaned up, along with other resources needing cleanup.
+
+Reported-by: syzbot+00e61c43eb5e4740438f@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=00e61c43eb5e4740438f
+Tested-by: syzbot+00e61c43eb5e4740438f@syzkaller.appspotmail.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Prithvi Tambewagh <activprithvi@gmail.com>
+Fixes: b9445598d8c6 ("io_uring: openat directly into fixed fd table")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/openclose.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/io_uring/openclose.c
++++ b/io_uring/openclose.c
+@@ -73,13 +73,13 @@ static int __io_openat_prep(struct io_ki
+               open->filename = NULL;
+               return ret;
+       }
++      req->flags |= REQ_F_NEED_CLEANUP;
+       open->file_slot = READ_ONCE(sqe->file_index);
+       if (open->file_slot && (open->how.flags & O_CLOEXEC))
+               return -EINVAL;
+       open->nofile = rlimit(RLIMIT_NOFILE);
+-      req->flags |= REQ_F_NEED_CLEANUP;
+       if (io_openat_force_async(open))
+               req->flags |= REQ_F_FORCE_ASYNC;
+       return 0;
diff --git a/queue-6.18/io_uring-fix-min_wait-wakeups-for-sqpoll.patch b/queue-6.18/io_uring-fix-min_wait-wakeups-for-sqpoll.patch
new file mode 100644 (file)
index 0000000..c4c9046
--- /dev/null
@@ -0,0 +1,64 @@
+From e15cb2200b934e507273510ba6bc747d5cde24a3 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Tue, 9 Dec 2025 13:25:23 -0700
+Subject: io_uring: fix min_wait wakeups for SQPOLL
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit e15cb2200b934e507273510ba6bc747d5cde24a3 upstream.
+
+Using min_wait, two timeouts are given:
+
+1) The min_wait timeout, within which up to 'wait_nr' events are
+   waited for.
+2) The overall long timeout, which is entered if no events are generated
+   in the min_wait window.
+
+If the min_wait has expired, any event being posted must wake the task.
+For SQPOLL, that isn't the case, as it won't trigger the io_has_work()
+condition, as it will have already processed the task_work that happened
+when an event was posted. This causes any event to trigger post the
+min_wait to not always cause the waiting application to wakeup, and
+instead it will wait until the overall timeout has expired. This can be
+shown in a test case that has a 1 second min_wait, with a 5 second
+overall wait, even if an event triggers after 1.5 seconds:
+
+axboe@m2max-kvm /d/iouring-mre (master)> zig-out/bin/iouring
+info: MIN_TIMEOUT supported: true, features: 0x3ffff
+info: Testing: min_wait=1000ms, timeout=5s, wait_nr=4
+info: 1 cqes in 5000.2ms
+
+where the expected result should be:
+
+axboe@m2max-kvm /d/iouring-mre (master)> zig-out/bin/iouring
+info: MIN_TIMEOUT supported: true, features: 0x3ffff
+info: Testing: min_wait=1000ms, timeout=5s, wait_nr=4
+info: 1 cqes in 1500.3ms
+
+When the min_wait timeout triggers, reset the number of completions
+needed to wake the task. This should ensure that any future events will
+wake the task, regardless of how many events it originally wanted to
+wait for.
+
+Reported-by: Tip ten Brink <tip@tenbrinkmeijs.com>
+Cc: stable@vger.kernel.org
+Fixes: 1100c4a2656d ("io_uring: add support for batch wait timeout")
+Link: https://github.com/axboe/liburing/issues/1477
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2551,6 +2551,9 @@ static enum hrtimer_restart io_cqring_mi
+                       goto out_wake;
+       }
++      /* any generated CQE posted past this time should wake us up */
++      iowq->cq_tail = iowq->cq_min_tail;
++
+       hrtimer_update_function(&iowq->t, io_cqring_timer_wakeup);
+       hrtimer_set_expires(timer, iowq->timeout);
+       return HRTIMER_RESTART;
diff --git a/queue-6.18/io_uring-poll-correctly-handle-io_poll_add-return-value-on-update.patch b/queue-6.18/io_uring-poll-correctly-handle-io_poll_add-return-value-on-update.patch
new file mode 100644 (file)
index 0000000..d8e82d0
--- /dev/null
@@ -0,0 +1,53 @@
+From 84230ad2d2afbf0c44c32967e525c0ad92e26b4e Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 1 Dec 2025 13:25:22 -0700
+Subject: io_uring/poll: correctly handle io_poll_add() return value on update
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 84230ad2d2afbf0c44c32967e525c0ad92e26b4e upstream.
+
+When the core of io_uring was updated to handle completions
+consistently and with fixed return codes, the POLL_REMOVE opcode
+with updates got slightly broken. If a POLL_ADD is pending and
+then POLL_REMOVE is used to update the events of that request, if that
+update causes the POLL_ADD to now trigger, then that completion is lost
+and a CQE is never posted.
+
+Additionally, ensure that if an update does cause an existing POLL_ADD
+to complete, that the completion value isn't always overwritten with
+-ECANCELED. For that case, whatever io_poll_add() set the value to
+should just be retained.
+
+Cc: stable@vger.kernel.org
+Fixes: 97b388d70b53 ("io_uring: handle completions in the core")
+Reported-by: syzbot+641eec6b7af1f62f2b99@syzkaller.appspotmail.com
+Tested-by: syzbot+641eec6b7af1f62f2b99@syzkaller.appspotmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/poll.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -936,12 +936,17 @@ int io_poll_remove(struct io_kiocb *req,
+               ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
+               /* successfully updated, don't complete poll request */
+-              if (!ret2 || ret2 == -EIOCBQUEUED)
++              if (ret2 == IOU_ISSUE_SKIP_COMPLETE)
+                       goto out;
++              /* request completed as part of the update, complete it */
++              else if (ret2 == IOU_COMPLETE)
++                      goto complete;
+       }
+-      req_set_fail(preq);
+       io_req_set_res(preq, -ECANCELED, 0);
++complete:
++      if (preq->cqe.res < 0)
++              req_set_fail(preq);
+       preq->io_task_work.func = io_req_task_complete;
+       io_req_task_work_add(preq);
+ out:
diff --git a/queue-6.18/jbd2-fix-the-inconsistency-between-checksum-and-data-in-memory-for-journal-sb.patch b/queue-6.18/jbd2-fix-the-inconsistency-between-checksum-and-data-in-memory-for-journal-sb.patch
new file mode 100644 (file)
index 0000000..b78b363
--- /dev/null
@@ -0,0 +1,89 @@
+From 6abfe107894af7e8ce3a2e120c619d81ee764ad5 Mon Sep 17 00:00:00 2001
+From: Ye Bin <yebin10@huawei.com>
+Date: Mon, 3 Nov 2025 09:01:23 +0800
+Subject: jbd2: fix the inconsistency between checksum and data in memory for journal sb
+
+From: Ye Bin <yebin10@huawei.com>
+
+commit 6abfe107894af7e8ce3a2e120c619d81ee764ad5 upstream.
+
+Copying the file system while it is mounted as read-only results in
+a mount failure:
+[~]# mkfs.ext4 -F /dev/sdc
+[~]# mount /dev/sdc -o ro /mnt/test
+[~]# dd if=/dev/sdc of=/dev/sda bs=1M
+[~]# mount /dev/sda /mnt/test1
+[ 1094.849826] JBD2: journal checksum error
+[ 1094.850927] EXT4-fs (sda): Could not load journal inode
+mount: mount /dev/sda on /mnt/test1 failed: Bad message
+
+The process described above is just an abstracted way I came up with to
+reproduce the issue. In the actual scenario, the file system was mounted
+read-only and then copied while it was still mounted. It was found that
+the mount operation failed. The user intended to verify the data or use
+it as a backup, and this action was performed during a version upgrade.
+Above issue may happen as follows:
+ext4_fill_super
+ set_journal_csum_feature_set(sb)
+  if (ext4_has_metadata_csum(sb))
+   incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
+  if (test_opt(sb, JOURNAL_CHECKSUM)
+   jbd2_journal_set_features(sbi->s_journal, compat, 0, incompat);
+    lock_buffer(journal->j_sb_buffer);
+    sb->s_feature_incompat  |= cpu_to_be32(incompat);
+    //The data in the journal sb was modified, but the checksum was not
+      updated, so the data remaining in memory has a mismatch between the
+      data and the checksum.
+    unlock_buffer(journal->j_sb_buffer);
+
+In this case, the journal sb copied over is in a state where the checksum
+and data are inconsistent, so mounting fails.
+To solve the above issue, update the checksum in memory after modifying
+the journal sb.
+
+Fixes: 4fd5ea43bc11 ("jbd2: checksum journal superblock")
+Signed-off-by: Ye Bin <yebin10@huawei.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Message-ID: <20251103010123.3753631-1-yebin@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/jbd2/journal.c |   14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -2349,6 +2349,12 @@ int jbd2_journal_set_features(journal_t
+       sb->s_feature_compat    |= cpu_to_be32(compat);
+       sb->s_feature_ro_compat |= cpu_to_be32(ro);
+       sb->s_feature_incompat  |= cpu_to_be32(incompat);
++      /*
++       * Update the checksum now so that it is valid even for read-only
++       * filesystems where jbd2_write_superblock() doesn't get called.
++       */
++      if (jbd2_journal_has_csum_v2or3(journal))
++              sb->s_checksum = jbd2_superblock_csum(sb);
+       unlock_buffer(journal->j_sb_buffer);
+       jbd2_journal_init_transaction_limits(journal);
+@@ -2378,9 +2384,17 @@ void jbd2_journal_clear_features(journal
+       sb = journal->j_superblock;
++      lock_buffer(journal->j_sb_buffer);
+       sb->s_feature_compat    &= ~cpu_to_be32(compat);
+       sb->s_feature_ro_compat &= ~cpu_to_be32(ro);
+       sb->s_feature_incompat  &= ~cpu_to_be32(incompat);
++      /*
++       * Update the checksum now so that it is valid even for read-only
++       * filesystems where jbd2_write_superblock() doesn't get called.
++       */
++      if (jbd2_journal_has_csum_v2or3(journal))
++              sb->s_checksum = jbd2_superblock_csum(sb);
++      unlock_buffer(journal->j_sb_buffer);
+       jbd2_journal_init_transaction_limits(journal);
+ }
+ EXPORT_SYMBOL(jbd2_journal_clear_features);
diff --git a/queue-6.18/kallsyms-fix-wrong-big-kernel-symbol-type-read-from-procfs.patch b/queue-6.18/kallsyms-fix-wrong-big-kernel-symbol-type-read-from-procfs.patch
new file mode 100644 (file)
index 0000000..c2e0ba6
--- /dev/null
@@ -0,0 +1,70 @@
+From f3f9f42232dee596d15491ca3f611d02174db49c Mon Sep 17 00:00:00 2001
+From: Zheng Yejian <zhengyejian@huaweicloud.com>
+Date: Fri, 11 Oct 2024 22:38:53 +0800
+Subject: kallsyms: Fix wrong "big" kernel symbol type read from procfs
+
+From: Zheng Yejian <zhengyejian@huaweicloud.com>
+
+commit f3f9f42232dee596d15491ca3f611d02174db49c upstream.
+
+Currently when the length of a symbol is longer than 0x7f characters,
+its type shown in /proc/kallsyms can be incorrect.
+
+I found this issue when reading the code, but it can be reproduced by
+following steps:
+
+  1. Define a function which symbol length is 130 characters:
+
+    #define X13(x) x##x##x##x##x##x##x##x##x##x##x##x##x
+    static noinline void X13(x123456789)(void)
+    {
+        printk("hello world\n");
+    }
+
+  2. The type in vmlinux is 't':
+
+    $ nm vmlinux | grep x123456
+    ffffffff816290f0 t x123456789x123456789x123456789x12[...]
+
+  3. Then boot the kernel, the type shown in /proc/kallsyms becomes 'g'
+     instead of the expected 't':
+
+    # cat /proc/kallsyms | grep x123456
+    ffffffff816290f0 g x123456789x123456789x123456789x12[...]
+
+The root cause is that, after commit 73bbb94466fd ("kallsyms: support
+"big" kernel symbols"), ULEB128 was used to encode symbol name length.
+That is, for "big" kernel symbols of which name length is longer than
+0x7f characters, the length info is encoded into 2 bytes.
+
+kallsyms_get_symbol_type() expects to read the first char of the
+symbol name which indicates the symbol type. However, due to the
+"big" symbol case not being handled, the symbol type read from
+/proc/kallsyms may be wrong, so handle it properly.
+
+Cc: stable@vger.kernel.org
+Fixes: 73bbb94466fd ("kallsyms: support "big" kernel symbols")
+Signed-off-by: Zheng Yejian <zhengyejian@huaweicloud.com>
+Acked-by: Gary Guo <gary@garyguo.net>
+Link: https://patch.msgid.link/20241011143853.3022643-1-zhengyejian@huaweicloud.com
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/kallsyms.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -103,8 +103,11 @@ static char kallsyms_get_symbol_type(uns
+ {
+       /*
+        * Get just the first code, look it up in the token table,
+-       * and return the first char from this token.
++       * and return the first char from this token. If MSB of length
++       * is 1, it is a "big" symbol, so needs an additional byte.
+        */
++      if (kallsyms_names[off] & 0x80)
++              off++;
+       return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]];
+ }
diff --git a/queue-6.18/keys-trusted-fix-a-memory-leak-in-tpm2_load_cmd.patch b/queue-6.18/keys-trusted-fix-a-memory-leak-in-tpm2_load_cmd.patch
new file mode 100644 (file)
index 0000000..1532048
--- /dev/null
@@ -0,0 +1,50 @@
+From 62cd5d480b9762ce70d720a81fa5b373052ae05f Mon Sep 17 00:00:00 2001
+From: Jarkko Sakkinen <jarkko@kernel.org>
+Date: Sat, 18 Oct 2025 13:30:36 +0300
+Subject: KEYS: trusted: Fix a memory leak in tpm2_load_cmd
+
+From: Jarkko Sakkinen <jarkko@kernel.org>
+
+commit 62cd5d480b9762ce70d720a81fa5b373052ae05f upstream.
+
+'tpm2_load_cmd' allocates a tempoary blob indirectly via 'tpm2_key_decode'
+but it is not freed in the failure paths. Address this by wrapping the blob
+into with a cleanup helper.
+
+Cc: stable@vger.kernel.org # v5.13+
+Fixes: f2219745250f ("security: keys: trusted: use ASN.1 TPM2 key format for the blobs")
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/keys/trusted-keys/trusted_tpm2.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -387,6 +387,7 @@ static int tpm2_load_cmd(struct tpm_chip
+                        struct trusted_key_options *options,
+                        u32 *blob_handle)
+ {
++      u8 *blob_ref __free(kfree) = NULL;
+       struct tpm_buf buf;
+       unsigned int private_len;
+       unsigned int public_len;
+@@ -400,6 +401,9 @@ static int tpm2_load_cmd(struct tpm_chip
+               /* old form */
+               blob = payload->blob;
+               payload->old_format = 1;
++      } else {
++              /* Bind for cleanup: */
++              blob_ref = blob;
+       }
+       /* new format carries keyhandle but old format doesn't */
+@@ -464,8 +468,6 @@ static int tpm2_load_cmd(struct tpm_chip
+                       (__be32 *) &buf.data[TPM_HEADER_SIZE]);
+ out:
+-      if (blob != payload->blob)
+-              kfree(blob);
+       tpm_buf_destroy(&buf);
+       if (rc > 0)
diff --git a/queue-6.18/ktest.pl-fix-uninitialized-var-in-config-bisect.pl.patch b/queue-6.18/ktest.pl-fix-uninitialized-var-in-config-bisect.pl.patch
new file mode 100644 (file)
index 0000000..183bfa1
--- /dev/null
@@ -0,0 +1,49 @@
+From d3042cbe84a060b4df764eb6c5300bbe20d125ca Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Wed, 3 Dec 2025 18:09:24 -0500
+Subject: ktest.pl: Fix uninitialized var in config-bisect.pl
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit d3042cbe84a060b4df764eb6c5300bbe20d125ca upstream.
+
+The error path of copying the old config used the wrong variable in the
+error message:
+
+ $ mkdir /tmp/build
+ $ ./tools/testing/ktest/config-bisect.pl -b /tmp/build config-good /tmp/config-bad
+ $ chmod 0 /tmp/build
+ $ ./tools/testing/ktest/config-bisect.pl -b /tmp/build config-good /tmp/config-bad good
+ cp /tmp/build//.config config-good.tmp ... [0 seconds] FAILED!
+ Use of uninitialized value $config in concatenation (.) or string at ./tools/testing/ktest/config-bisect.pl line 744.
+ failed to copy  to config-good.tmp
+
+When it should have shown:
+
+ failed to copy /tmp/build//.config to config-good.tmp
+
+Cc: stable@vger.kernel.org
+Cc: John 'Warthog9' Hawley <warthog9@kernel.org>
+Fixes: 0f0db065999cf ("ktest: Add standalone config-bisect.pl program")
+Link: https://patch.msgid.link/20251203180924.6862bd26@gandalf.local.home
+Reported-by: "John W. Krahn" <jwkrahn@shaw.ca>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/ktest/config-bisect.pl |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/tools/testing/ktest/config-bisect.pl
++++ b/tools/testing/ktest/config-bisect.pl
+@@ -741,9 +741,9 @@ if ($start) {
+       die "Can not find file $bad\n";
+     }
+     if ($val eq "good") {
+-      run_command "cp $output_config $good" or die "failed to copy $config to $good\n";
++      run_command "cp $output_config $good" or die "failed to copy $output_config to $good\n";
+     } elsif ($val eq "bad") {
+-      run_command "cp $output_config $bad" or die "failed to copy $config to $bad\n";
++      run_command "cp $output_config $bad" or die "failed to copy $output_config to $bad\n";
+     }
+ }
diff --git a/queue-6.18/lib-crypto-x86-blake2s-fix-32-bit-arg-treated-as-64-bit.patch b/queue-6.18/lib-crypto-x86-blake2s-fix-32-bit-arg-treated-as-64-bit.patch
new file mode 100644 (file)
index 0000000..465cfb4
--- /dev/null
@@ -0,0 +1,57 @@
+From 2f22115709fc7ebcfa40af3367a508fbbd2f71e9 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Sun, 2 Nov 2025 15:42:04 -0800
+Subject: lib/crypto: x86/blake2s: Fix 32-bit arg treated as 64-bit
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit 2f22115709fc7ebcfa40af3367a508fbbd2f71e9 upstream.
+
+In the C code, the 'inc' argument to the assembly functions
+blake2s_compress_ssse3() and blake2s_compress_avx512() is declared with
+type u32, matching blake2s_compress().  The assembly code then reads it
+from the 64-bit %rcx.  However, the ABI doesn't guarantee zero-extension
+to 64 bits, nor do gcc or clang guarantee it.  Therefore, fix these
+functions to read this argument from the 32-bit %ecx.
+
+In theory, this bug could have caused the wrong 'inc' value to be used,
+causing incorrect BLAKE2s hashes.  In practice, probably not: I've fixed
+essentially this same bug in many other assembly files too, but there's
+never been a real report of it having caused a problem.  In x86_64, all
+writes to 32-bit registers are zero-extended to 64 bits.  That results
+in zero-extension in nearly all situations.  I've only been able to
+demonstrate a lack of zero-extension with a somewhat contrived example
+involving truncation, e.g. when the C code has a u64 variable holding
+0x1234567800000040 and passes it as a u32 expecting it to be truncated
+to 0x40 (64).  But that's not what the real code does, of course.
+
+Fixes: ed0356eda153 ("crypto: blake2s - x86_64 SIMD implementation")
+Cc: stable@vger.kernel.org
+Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
+Link: https://lore.kernel.org/r/20251102234209.62133-2-ebiggers@kernel.org
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/crypto/x86/blake2s-core.S |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/lib/crypto/x86/blake2s-core.S
++++ b/lib/crypto/x86/blake2s-core.S
+@@ -52,7 +52,7 @@ SYM_FUNC_START(blake2s_compress_ssse3)
+       movdqa          ROT16(%rip),%xmm12
+       movdqa          ROR328(%rip),%xmm13
+       movdqu          0x20(%rdi),%xmm14
+-      movq            %rcx,%xmm15
++      movd            %ecx,%xmm15
+       leaq            SIGMA+0xa0(%rip),%r8
+       jmp             .Lbeginofloop
+       .align          32
+@@ -176,7 +176,7 @@ SYM_FUNC_START(blake2s_compress_avx512)
+       vmovdqu         (%rdi),%xmm0
+       vmovdqu         0x10(%rdi),%xmm1
+       vmovdqu         0x20(%rdi),%xmm4
+-      vmovq           %rcx,%xmm5
++      vmovd           %ecx,%xmm5
+       vmovdqa         IV(%rip),%xmm14
+       vmovdqa         IV+16(%rip),%xmm15
+       jmp             .Lblake2s_compress_avx512_mainloop
diff --git a/queue-6.18/mmc-sdhci-msm-avoid-early-clock-doubling-during-hs400-transition.patch b/queue-6.18/mmc-sdhci-msm-avoid-early-clock-doubling-during-hs400-transition.patch
new file mode 100644 (file)
index 0000000..993d53e
--- /dev/null
@@ -0,0 +1,121 @@
+From b1f856b1727c2eaa4be2c6d7cd7a8ed052bbeb87 Mon Sep 17 00:00:00 2001
+From: Sarthak Garg <sarthak.garg@oss.qualcomm.com>
+Date: Fri, 14 Nov 2025 13:58:24 +0530
+Subject: mmc: sdhci-msm: Avoid early clock doubling during HS400 transition
+
+From: Sarthak Garg <sarthak.garg@oss.qualcomm.com>
+
+commit b1f856b1727c2eaa4be2c6d7cd7a8ed052bbeb87 upstream.
+
+According to the hardware programming guide, the clock frequency must
+remain below 52MHz during the transition to HS400 mode.
+
+However,in the current implementation, the timing is set to HS400 (a
+DDR mode) before adjusting the clock. This causes the clock to double
+prematurely to 104MHz during the transition phase, violating the
+specification and potentially resulting in CRC errors or CMD timeouts.
+
+This change ensures that clock doubling is avoided during intermediate
+transitions and is applied only when the card requires a 200MHz clock
+for HS400 operation.
+
+Signed-off-by: Sarthak Garg <sarthak.garg@oss.qualcomm.com>
+Reviewed-by: Bjorn Andersson <andersson@kernel.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci-msm.c |   27 +++++++++++++++------------
+ 1 file changed, 15 insertions(+), 12 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -344,41 +344,43 @@ static void sdhci_msm_v5_variant_writel_
+       writel_relaxed(val, host->ioaddr + offset);
+ }
+-static unsigned int msm_get_clock_mult_for_bus_mode(struct sdhci_host *host)
++static unsigned int msm_get_clock_mult_for_bus_mode(struct sdhci_host *host,
++                                                  unsigned int clock,
++                                                  unsigned int timing)
+ {
+-      struct mmc_ios ios = host->mmc->ios;
+       /*
+        * The SDHC requires internal clock frequency to be double the
+        * actual clock that will be set for DDR mode. The controller
+        * uses the faster clock(100/400MHz) for some of its parts and
+        * send the actual required clock (50/200MHz) to the card.
+        */
+-      if (ios.timing == MMC_TIMING_UHS_DDR50 ||
+-          ios.timing == MMC_TIMING_MMC_DDR52 ||
+-          ios.timing == MMC_TIMING_MMC_HS400 ||
++      if (timing == MMC_TIMING_UHS_DDR50 ||
++          timing == MMC_TIMING_MMC_DDR52 ||
++          (timing == MMC_TIMING_MMC_HS400 &&
++          clock == MMC_HS200_MAX_DTR) ||
+           host->flags & SDHCI_HS400_TUNING)
+               return 2;
+       return 1;
+ }
+ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
+-                                          unsigned int clock)
++                                          unsigned int clock,
++                                          unsigned int timing)
+ {
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+-      struct mmc_ios curr_ios = host->mmc->ios;
+       struct clk *core_clk = msm_host->bulk_clks[0].clk;
+       unsigned long achieved_rate;
+       unsigned int desired_rate;
+       unsigned int mult;
+       int rc;
+-      mult = msm_get_clock_mult_for_bus_mode(host);
++      mult = msm_get_clock_mult_for_bus_mode(host, clock, timing);
+       desired_rate = clock * mult;
+       rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), desired_rate);
+       if (rc) {
+               pr_err("%s: Failed to set clock at rate %u at timing %d\n",
+-                     mmc_hostname(host->mmc), desired_rate, curr_ios.timing);
++                     mmc_hostname(host->mmc), desired_rate, timing);
+               return;
+       }
+@@ -397,7 +399,7 @@ static void msm_set_clock_rate_for_bus_m
+       msm_host->clk_rate = desired_rate;
+       pr_debug("%s: Setting clock at rate %lu at timing %d\n",
+-               mmc_hostname(host->mmc), achieved_rate, curr_ios.timing);
++               mmc_hostname(host->mmc), achieved_rate, timing);
+ }
+ /* Platform specific tuning */
+@@ -1239,7 +1241,7 @@ static int sdhci_msm_execute_tuning(stru
+        */
+       if (host->flags & SDHCI_HS400_TUNING) {
+               sdhci_msm_hc_select_mode(host);
+-              msm_set_clock_rate_for_bus_mode(host, ios.clock);
++              msm_set_clock_rate_for_bus_mode(host, ios.clock, ios.timing);
+               host->flags &= ~SDHCI_HS400_TUNING;
+       }
+@@ -1864,6 +1866,7 @@ static void sdhci_msm_set_clock(struct s
+ {
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
++      struct mmc_ios ios = host->mmc->ios;
+       if (!clock) {
+               host->mmc->actual_clock = msm_host->clk_rate = 0;
+@@ -1872,7 +1875,7 @@ static void sdhci_msm_set_clock(struct s
+       sdhci_msm_hc_select_mode(host);
+-      msm_set_clock_rate_for_bus_mode(host, clock);
++      msm_set_clock_rate_for_bus_mode(host, ios.clock, ios.timing);
+ out:
+       __sdhci_msm_set_clock(host, clock);
+ }
diff --git a/queue-6.18/perf-arm_cspmu-fix-error-handling-in-arm_cspmu_impl_unregister.patch b/queue-6.18/perf-arm_cspmu-fix-error-handling-in-arm_cspmu_impl_unregister.patch
new file mode 100644 (file)
index 0000000..cec3c54
--- /dev/null
@@ -0,0 +1,41 @@
+From 970e1e41805f0bd49dc234330a9390f4708d097d Mon Sep 17 00:00:00 2001
+From: Ma Ke <make24@iscas.ac.cn>
+Date: Wed, 22 Oct 2025 19:53:25 +0800
+Subject: perf: arm_cspmu: fix error handling in arm_cspmu_impl_unregister()
+
+From: Ma Ke <make24@iscas.ac.cn>
+
+commit 970e1e41805f0bd49dc234330a9390f4708d097d upstream.
+
+driver_find_device() calls get_device() to increment the reference
+count once a matching device is found. device_release_driver()
+releases the driver, but it does not decrease the reference count that
+was incremented by driver_find_device(). At the end of the loop, there
+is no put_device() to balance the reference count. To avoid reference
+count leakage, add put_device() to decrease the reference count.
+
+Found by code review.
+
+Cc: stable@vger.kernel.org
+Fixes: bfc653aa89cb ("perf: arm_cspmu: Separate Arm and vendor module")
+Signed-off-by: Ma Ke <make24@iscas.ac.cn>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/perf/arm_cspmu/arm_cspmu.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/perf/arm_cspmu/arm_cspmu.c
++++ b/drivers/perf/arm_cspmu/arm_cspmu.c
+@@ -1365,8 +1365,10 @@ void arm_cspmu_impl_unregister(const str
+       /* Unbind the driver from all matching backend devices. */
+       while ((dev = driver_find_device(&arm_cspmu_driver.driver, NULL,
+-                      match, arm_cspmu_match_device)))
++                      match, arm_cspmu_match_device))) {
+               device_release_driver(dev);
++              put_device(dev);
++      }
+       mutex_lock(&arm_cspmu_lock);
diff --git a/queue-6.18/phy-exynos5-usbdrd-fix-clock-prepare-imbalance.patch b/queue-6.18/phy-exynos5-usbdrd-fix-clock-prepare-imbalance.patch
new file mode 100644 (file)
index 0000000..7cf4720
--- /dev/null
@@ -0,0 +1,43 @@
+From 5e428e45bf17a8f3784099ca5ded16e3b5d59766 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Andr=C3=A9=20Draszik?= <andre.draszik@linaro.org>
+Date: Mon, 6 Oct 2025 09:07:12 +0100
+Subject: phy: exynos5-usbdrd: fix clock prepare imbalance
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: André Draszik <andre.draszik@linaro.org>
+
+commit 5e428e45bf17a8f3784099ca5ded16e3b5d59766 upstream.
+
+Commit f4fb9c4d7f94 ("phy: exynos5-usbdrd: allow DWC3 runtime suspend
+with UDC bound (E850+)") incorrectly added clk_bulk_disable() as the
+inverse of clk_bulk_prepare_enable() while it should have of course
+used clk_bulk_disable_unprepare(). This means incorrect reference
+counts to the CMU driver remain.
+
+Update the code accordingly.
+
+Fixes: f4fb9c4d7f94 ("phy: exynos5-usbdrd: allow DWC3 runtime suspend with UDC bound (E850+)")
+CC: stable@vger.kernel.org
+Signed-off-by: André Draszik <andre.draszik@linaro.org>
+Reviewed-by: Sam Protsenko <semen.protsenko@linaro.org>
+Reviewed-by: Peter Griffin <peter.griffin@linaro.org>
+Link: https://patch.msgid.link/20251006-gs101-usb-phy-clk-imbalance-v1-1-205b206126cf@linaro.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/phy/samsung/phy-exynos5-usbdrd.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
++++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
+@@ -1823,7 +1823,7 @@ static int exynos5_usbdrd_orien_sw_set(s
+               phy_drd->orientation = orientation;
+       }
+-      clk_bulk_disable(phy_drd->drv_data->n_clks, phy_drd->clks);
++      clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks);
+       return 0;
+ }
diff --git a/queue-6.18/printk-avoid-scheduling-irq_work-on-suspend.patch b/queue-6.18/printk-avoid-scheduling-irq_work-on-suspend.patch
new file mode 100644 (file)
index 0000000..f6c7b97
--- /dev/null
@@ -0,0 +1,211 @@
+From 26873e3e7f0cb26c45e6ad63656f9fe36b2aa31b Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 13 Nov 2025 17:09:48 +0106
+Subject: printk: Avoid scheduling irq_work on suspend
+
+From: John Ogness <john.ogness@linutronix.de>
+
+commit 26873e3e7f0cb26c45e6ad63656f9fe36b2aa31b upstream.
+
+Allowing irq_work to be scheduled while trying to suspend has shown
+to cause problems as some architectures interpret the pending
+interrupts as a reason to not suspend. This became a problem for
+printk() with the introduction of NBCON consoles. With every
+printk() call, NBCON console printing kthreads are woken by queueing
+irq_work. This means that irq_work continues to be queued due to
+printk() calls late in the suspend procedure.
+
+Avoid this problem by preventing printk() from queueing irq_work
+once console suspending has begun. This applies to triggering NBCON
+and legacy deferred printing as well as klogd waiters.
+
+Since triggering of NBCON threaded printing relies on irq_work, the
+pr_flush() within console_suspend_all() is used to perform the final
+flushing before suspending consoles and blocking irq_work queueing.
+NBCON consoles that are not suspended (due to the usage of the
+"no_console_suspend" boot argument) transition to atomic flushing.
+
+Introduce a new global variable @console_irqwork_blocked to flag
+when irq_work queueing is to be avoided. The flag is used by
+printk_get_console_flush_type() to avoid allowing deferred printing
+and switch NBCON consoles to atomic flushing. It is also used by
+vprintk_emit() to avoid klogd waking.
+
+Add WARN_ON_ONCE(console_irqwork_blocked) to the irq_work queuing
+functions to catch any code that attempts to queue printk irq_work
+during the suspending/resuming procedure.
+
+Cc: stable@vger.kernel.org # 6.13.x because no drivers in 6.12.x
+Fixes: 6b93bb41f6ea ("printk: Add non-BKL (nbcon) console basic infrastructure")
+Closes: https://lore.kernel.org/lkml/DB9PR04MB8429E7DDF2D93C2695DE401D92C4A@DB9PR04MB8429.eurprd04.prod.outlook.com
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Tested-by: Sherry Sun <sherry.sun@nxp.com>
+Link: https://patch.msgid.link/20251113160351.113031-3-john.ogness@linutronix.de
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/printk/internal.h |    8 ++++--
+ kernel/printk/nbcon.c    |    7 +++++
+ kernel/printk/printk.c   |   58 ++++++++++++++++++++++++++++++++++-------------
+ 3 files changed, 55 insertions(+), 18 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -230,6 +230,8 @@ struct console_flush_type {
+       bool    legacy_offload;
+ };
++extern bool console_irqwork_blocked;
++
+ /*
+  * Identify which console flushing methods should be used in the context of
+  * the caller.
+@@ -241,7 +243,7 @@ static inline void printk_get_console_fl
+       switch (nbcon_get_default_prio()) {
+       case NBCON_PRIO_NORMAL:
+               if (have_nbcon_console && !have_boot_console) {
+-                      if (printk_kthreads_running)
++                      if (printk_kthreads_running && !console_irqwork_blocked)
+                               ft->nbcon_offload = true;
+                       else
+                               ft->nbcon_atomic = true;
+@@ -251,7 +253,7 @@ static inline void printk_get_console_fl
+               if (have_legacy_console || have_boot_console) {
+                       if (!is_printk_legacy_deferred())
+                               ft->legacy_direct = true;
+-                      else
++                      else if (!console_irqwork_blocked)
+                               ft->legacy_offload = true;
+               }
+               break;
+@@ -264,7 +266,7 @@ static inline void printk_get_console_fl
+               if (have_legacy_console || have_boot_console) {
+                       if (!is_printk_legacy_deferred())
+                               ft->legacy_direct = true;
+-                      else
++                      else if (!console_irqwork_blocked)
+                               ft->legacy_offload = true;
+               }
+               break;
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -1276,6 +1276,13 @@ void nbcon_kthreads_wake(void)
+       if (!printk_kthreads_running)
+               return;
++      /*
++       * It is not allowed to call this function when console irq_work
++       * is blocked.
++       */
++      if (WARN_ON_ONCE(console_irqwork_blocked))
++              return;
++
+       cookie = console_srcu_read_lock();
+       for_each_console_srcu(con) {
+               if (!(console_srcu_read_flags(con) & CON_NBCON))
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -462,6 +462,9 @@ bool have_boot_console;
+ /* See printk_legacy_allow_panic_sync() for details. */
+ bool legacy_allow_panic_sync;
++/* Avoid using irq_work when suspending. */
++bool console_irqwork_blocked;
++
+ #ifdef CONFIG_PRINTK
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
+ static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
+@@ -2426,7 +2429,7 @@ asmlinkage int vprintk_emit(int facility
+       if (ft.legacy_offload)
+               defer_console_output();
+-      else
++      else if (!console_irqwork_blocked)
+               wake_up_klogd();
+       return printed_len;
+@@ -2730,10 +2733,20 @@ void console_suspend_all(void)
+ {
+       struct console *con;
++      if (console_suspend_enabled)
++              pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
++
++      /*
++       * Flush any console backlog and then avoid queueing irq_work until
++       * console_resume_all(). Until then deferred printing is no longer
++       * triggered, NBCON consoles transition to atomic flushing, and
++       * any klogd waiters are not triggered.
++       */
++      pr_flush(1000, true);
++      console_irqwork_blocked = true;
++
+       if (!console_suspend_enabled)
+               return;
+-      pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
+-      pr_flush(1000, true);
+       console_list_lock();
+       for_each_console(con)
+@@ -2754,26 +2767,34 @@ void console_resume_all(void)
+       struct console_flush_type ft;
+       struct console *con;
+-      if (!console_suspend_enabled)
+-              return;
+-
+-      console_list_lock();
+-      for_each_console(con)
+-              console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
+-      console_list_unlock();
+-
+       /*
+-       * Ensure that all SRCU list walks have completed. All printing
+-       * contexts must be able to see they are no longer suspended so
+-       * that they are guaranteed to wake up and resume printing.
++       * Allow queueing irq_work. After restoring console state, deferred
++       * printing and any klogd waiters need to be triggered in case there
++       * is now a console backlog.
+        */
+-      synchronize_srcu(&console_srcu);
++      console_irqwork_blocked = false;
++
++      if (console_suspend_enabled) {
++              console_list_lock();
++              for_each_console(con)
++                      console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
++              console_list_unlock();
++
++              /*
++               * Ensure that all SRCU list walks have completed. All printing
++               * contexts must be able to see they are no longer suspended so
++               * that they are guaranteed to wake up and resume printing.
++               */
++              synchronize_srcu(&console_srcu);
++      }
+       printk_get_console_flush_type(&ft);
+       if (ft.nbcon_offload)
+               nbcon_kthreads_wake();
+       if (ft.legacy_offload)
+               defer_console_output();
++      else
++              wake_up_klogd();
+       pr_flush(1000, true);
+ }
+@@ -4511,6 +4532,13 @@ static void __wake_up_klogd(int val)
+       if (!printk_percpu_data_ready())
+               return;
++      /*
++       * It is not allowed to call this function when console irq_work
++       * is blocked.
++       */
++      if (WARN_ON_ONCE(console_irqwork_blocked))
++              return;
++
+       preempt_disable();
+       /*
+        * Guarantee any new records can be seen by tasks preparing to wait
diff --git a/queue-6.18/rust-dma-add-helpers-for-architectures-without-config_has_dma.patch b/queue-6.18/rust-dma-add-helpers-for-architectures-without-config_has_dma.patch
new file mode 100644 (file)
index 0000000..e9d0c60
--- /dev/null
@@ -0,0 +1,110 @@
+From d8932355f8c5673106eca49abd142f8fe0c1fe8b Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@gmail.com>
+Date: Fri, 5 Dec 2025 01:06:39 +0900
+Subject: rust: dma: add helpers for architectures without CONFIG_HAS_DMA
+
+From: FUJITA Tomonori <fujita.tomonori@gmail.com>
+
+commit d8932355f8c5673106eca49abd142f8fe0c1fe8b upstream.
+
+Add dma_set_mask(), dma_set_coherent_mask(), dma_map_sgtable(), and
+dma_max_mapping_size() helpers to fix a build error when
+CONFIG_HAS_DMA is not enabled.
+
+Note that when CONFIG_HAS_DMA is enabled, they are included in both
+bindings_generated.rs and bindings_helpers_generated.rs. The former
+takes precedence so behavior remains unchanged in that case.
+
+This fixes the following build error on UML:
+
+error[E0425]: cannot find function `dma_set_mask` in crate `bindings`
+     --> rust/kernel/dma.rs:46:38
+      |
+   46 |         to_result(unsafe { bindings::dma_set_mask(self.as_ref().as_raw(), mask.value()) })
+      |                                      ^^^^^^^^^^^^ help: a function with a similar name exists: `xa_set_mark`
+      |
+     ::: rust/bindings/bindings_generated.rs:24690:5
+      |
+24690 |     pub fn xa_set_mark(arg1: *mut xarray, index: ffi::c_ulong, arg2: xa_mark_t);
+      |     ---------------------------------------------------------------------------- similarly named function `xa_set_mark` defined here
+
+error[E0425]: cannot find function `dma_set_coherent_mask` in crate `bindings`
+     --> rust/kernel/dma.rs:63:38
+      |
+   63 |         to_result(unsafe { bindings::dma_set_coherent_mask(self.as_ref().as_raw(), mask.value()) })
+      |                                      ^^^^^^^^^^^^^^^^^^^^^ help: a function with a similar name exists: `dma_coherent_ok`
+      |
+     ::: rust/bindings/bindings_generated.rs:52745:5
+      |
+52745 |     pub fn dma_coherent_ok(dev: *mut device, phys: phys_addr_t, size: usize) -> bool_;
+      |     ---------------------------------------------------------------------------------- similarly named function `dma_coherent_ok` defined here
+
+error[E0425]: cannot find function `dma_map_sgtable` in crate `bindings`
+    --> rust/kernel/scatterlist.rs:212:23
+     |
+ 212 |               bindings::dma_map_sgtable(dev.as_raw(), sgt.as_ptr(), dir.into(), 0)
+     |                         ^^^^^^^^^^^^^^^ help: a function with a similar name exists: `dma_unmap_sgtable`
+     |
+    ::: rust/bindings/bindings_helpers_generated.rs:1351:5
+     |
+1351 | /     pub fn dma_unmap_sgtable(
+1352 | |         dev: *mut device,
+1353 | |         sgt: *mut sg_table,
+1354 | |         dir: dma_data_direction,
+1355 | |         attrs: ffi::c_ulong,
+1356 | |     );
+     | |______- similarly named function `dma_unmap_sgtable` defined here
+
+error[E0425]: cannot find function `dma_max_mapping_size` in crate `bindings`
+   --> rust/kernel/scatterlist.rs:356:52
+    |
+356 |         let max_segment = match unsafe { bindings::dma_max_mapping_size(dev.as_raw()) } {
+    |                                                    ^^^^^^^^^^^^^^^^^^^^ not found in `bindings`
+
+error: aborting due to 4 previous errors
+
+Cc: stable@vger.kernel.org # v6.17+
+Fixes: 101d66828a4ee ("rust: dma: add DMA addressing capabilities")
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@gmail.com>
+Reviewed-by: David Gow <davidgow@google.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://patch.msgid.link/20251204160639.364936-1-fujita.tomonori@gmail.com
+[ Use relative paths in the error splat; add 'dma' prefix. - Danilo ]
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/helpers/dma.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+diff --git a/rust/helpers/dma.c b/rust/helpers/dma.c
+index 6e741c197242..2afa32c21c94 100644
+--- a/rust/helpers/dma.c
++++ b/rust/helpers/dma.c
+@@ -19,3 +19,24 @@ int rust_helper_dma_set_mask_and_coherent(struct device *dev, u64 mask)
+ {
+       return dma_set_mask_and_coherent(dev, mask);
+ }
++
++int rust_helper_dma_set_mask(struct device *dev, u64 mask)
++{
++      return dma_set_mask(dev, mask);
++}
++
++int rust_helper_dma_set_coherent_mask(struct device *dev, u64 mask)
++{
++      return dma_set_coherent_mask(dev, mask);
++}
++
++int rust_helper_dma_map_sgtable(struct device *dev, struct sg_table *sgt,
++                              enum dma_data_direction dir, unsigned long attrs)
++{
++      return dma_map_sgtable(dev, sgt, dir, attrs);
++}
++
++size_t rust_helper_dma_max_mapping_size(struct device *dev)
++{
++      return dma_max_mapping_size(dev);
++}
+-- 
+2.52.0
+
diff --git a/queue-6.18/rust-drm-gem-fix-missing-header-in-object-rustdoc.patch b/queue-6.18/rust-drm-gem-fix-missing-header-in-object-rustdoc.patch
new file mode 100644 (file)
index 0000000..a0c1c08
--- /dev/null
@@ -0,0 +1,34 @@
+From e54ad0cd3673c93cdafda58505eaa81610fe3aef Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Fri, 7 Nov 2025 15:25:56 -0500
+Subject: rust/drm/gem: Fix missing header in `Object` rustdoc
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit e54ad0cd3673c93cdafda58505eaa81610fe3aef upstream.
+
+Invariants should be prefixed with a # to turn it into a header.
+
+There are no functional changes in this patch.
+
+Cc: stable@vger.kernel.org
+Fixes: c284d3e42338 ("rust: drm: gem: Add GEM object abstraction")
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Link: https://patch.msgid.link/20251107202603.465932-1-lyude@redhat.com
+Signed-off-by: Alice Ryhl <aliceryhl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/kernel/drm/gem/mod.rs |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/rust/kernel/drm/gem/mod.rs
++++ b/rust/kernel/drm/gem/mod.rs
+@@ -184,7 +184,7 @@ impl<T: IntoGEMObject> BaseObject for T
+ /// A base GEM object.
+ ///
+-/// Invariants
++/// # Invariants
+ ///
+ /// - `self.obj` is a valid instance of a `struct drm_gem_object`.
+ /// - `self.dev` is always a valid pointer to a `struct drm_device`.
diff --git a/queue-6.18/rust-io-add-typedef-for-phys_addr_t.patch b/queue-6.18/rust-io-add-typedef-for-phys_addr_t.patch
new file mode 100644 (file)
index 0000000..fd21e4d
--- /dev/null
@@ -0,0 +1,136 @@
+From dd6ff5cf56fb183fce605ca6a5bfce228cd8888b Mon Sep 17 00:00:00 2001
+From: Alice Ryhl <aliceryhl@google.com>
+Date: Wed, 12 Nov 2025 09:48:35 +0000
+Subject: rust: io: add typedef for phys_addr_t
+
+From: Alice Ryhl <aliceryhl@google.com>
+
+commit dd6ff5cf56fb183fce605ca6a5bfce228cd8888b upstream.
+
+The C typedef phys_addr_t is missing an analogue in Rust, meaning that
+we end up using bindings::phys_addr_t or ResourceSize as a replacement
+in various places throughout the kernel. Fix that by introducing a new
+typedef on the Rust side. Place it next to the existing ResourceSize
+typedef since they're quite related to each other.
+
+Cc: stable@vger.kernel.org # for v6.18 [1]
+Signed-off-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://patch.msgid.link/20251112-resource-phys-typedefs-v2-4-538307384f82@google.com
+Link: https://lore.kernel.org/all/20251112-resource-phys-typedefs-v2-0-538307384f82@google.com/ [1]
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/kernel/devres.rs      |   18 +++++++++++++++---
+ rust/kernel/io.rs          |   20 +++++++++++++++++---
+ rust/kernel/io/resource.rs |    9 ++++++---
+ 3 files changed, 38 insertions(+), 9 deletions(-)
+
+--- a/rust/kernel/devres.rs
++++ b/rust/kernel/devres.rs
+@@ -52,8 +52,20 @@ struct Inner<T: Send> {
+ /// # Examples
+ ///
+ /// ```no_run
+-/// # use kernel::{bindings, device::{Bound, Device}, devres::Devres, io::{Io, IoRaw}};
+-/// # use core::ops::Deref;
++/// use kernel::{
++///     bindings,
++///     device::{
++///         Bound,
++///         Device,
++///     },
++///     devres::Devres,
++///     io::{
++///         Io,
++///         IoRaw,
++///         PhysAddr,
++///     },
++/// };
++/// use core::ops::Deref;
+ ///
+ /// // See also [`pci::Bar`] for a real example.
+ /// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
+@@ -66,7 +78,7 @@ struct Inner<T: Send> {
+ ///     unsafe fn new(paddr: usize) -> Result<Self>{
+ ///         // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
+ ///         // valid for `ioremap`.
+-///         let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
++///         let addr = unsafe { bindings::ioremap(paddr as PhysAddr, SIZE) };
+ ///         if addr.is_null() {
+ ///             return Err(ENOMEM);
+ ///         }
+--- a/rust/kernel/io.rs
++++ b/rust/kernel/io.rs
+@@ -13,6 +13,12 @@ pub mod resource;
+ pub use resource::Resource;
++/// Physical address type.
++///
++/// This is a type alias to either `u32` or `u64` depending on the config option
++/// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
++pub type PhysAddr = bindings::phys_addr_t;
++
+ /// Resource Size type.
+ ///
+ /// This is a type alias to either `u32` or `u64` depending on the config option
+@@ -68,8 +74,16 @@ impl<const SIZE: usize> IoRaw<SIZE> {
+ /// # Examples
+ ///
+ /// ```no_run
+-/// # use kernel::{bindings, ffi::c_void, io::{Io, IoRaw}};
+-/// # use core::ops::Deref;
++/// use kernel::{
++///     bindings,
++///     ffi::c_void,
++///     io::{
++///         Io,
++///         IoRaw,
++///         PhysAddr,
++///     },
++/// };
++/// use core::ops::Deref;
+ ///
+ /// // See also [`pci::Bar`] for a real example.
+ /// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
+@@ -82,7 +96,7 @@ impl<const SIZE: usize> IoRaw<SIZE> {
+ ///     unsafe fn new(paddr: usize) -> Result<Self>{
+ ///         // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
+ ///         // valid for `ioremap`.
+-///         let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
++///         let addr = unsafe { bindings::ioremap(paddr as PhysAddr, SIZE) };
+ ///         if addr.is_null() {
+ ///             return Err(ENOMEM);
+ ///         }
+--- a/rust/kernel/io/resource.rs
++++ b/rust/kernel/io/resource.rs
+@@ -12,7 +12,10 @@ use crate::prelude::*;
+ use crate::str::{CStr, CString};
+ use crate::types::Opaque;
+-pub use super::ResourceSize;
++pub use super::{
++    PhysAddr,
++    ResourceSize, //
++};
+ /// A region allocated from a parent [`Resource`].
+ ///
+@@ -93,7 +96,7 @@ impl Resource {
+     /// the region, or a part of it, is already in use.
+     pub fn request_region(
+         &self,
+-        start: ResourceSize,
++        start: PhysAddr,
+         size: ResourceSize,
+         name: CString,
+         flags: Flags,
+@@ -127,7 +130,7 @@ impl Resource {
+     }
+     /// Returns the start address of the resource.
+-    pub fn start(&self) -> ResourceSize {
++    pub fn start(&self) -> PhysAddr {
+         let inner = self.0.get();
+         // SAFETY: Safe as per the invariants of `Resource`.
+         unsafe { (*inner).start }
diff --git a/queue-6.18/rust-io-define-resourcesize-as-resource_size_t.patch b/queue-6.18/rust-io-define-resourcesize-as-resource_size_t.patch
new file mode 100644 (file)
index 0000000..5cd3509
--- /dev/null
@@ -0,0 +1,33 @@
+From 919b72922717e396be9435c83916b9969505bd23 Mon Sep 17 00:00:00 2001
+From: Alice Ryhl <aliceryhl@google.com>
+Date: Wed, 12 Nov 2025 09:48:32 +0000
+Subject: rust: io: define ResourceSize as resource_size_t
+
+From: Alice Ryhl <aliceryhl@google.com>
+
+commit 919b72922717e396be9435c83916b9969505bd23 upstream.
+
+These typedefs are always equivalent so this should not change anything,
+but the code makes a lot more sense like this.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Alice Ryhl <aliceryhl@google.com>
+Fixes: 493fc33ec252 ("rust: io: add resource abstraction")
+Link: https://patch.msgid.link/20251112-resource-phys-typedefs-v2-1-538307384f82@google.com
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/kernel/io/resource.rs |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/rust/kernel/io/resource.rs
++++ b/rust/kernel/io/resource.rs
+@@ -16,7 +16,7 @@ use crate::types::Opaque;
+ ///
+ /// This is a type alias to either `u32` or `u64` depending on the config option
+ /// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
+-pub type ResourceSize = bindings::phys_addr_t;
++pub type ResourceSize = bindings::resource_size_t;
+ /// A region allocated from a parent [`Resource`].
+ ///
diff --git a/queue-6.18/rust-io-move-resourcesize-to-top-level-io-module.patch b/queue-6.18/rust-io-move-resourcesize-to-top-level-io-module.patch
new file mode 100644 (file)
index 0000000..b0f231b
--- /dev/null
@@ -0,0 +1,72 @@
+From dfd67993044f507ba8fd6ee9956f923ba4b7e851 Mon Sep 17 00:00:00 2001
+From: Alice Ryhl <aliceryhl@google.com>
+Date: Wed, 12 Nov 2025 09:48:33 +0000
+Subject: rust: io: move ResourceSize to top-level io module
+
+From: Alice Ryhl <aliceryhl@google.com>
+
+commit dfd67993044f507ba8fd6ee9956f923ba4b7e851 upstream.
+
+Resource sizes are a general concept for dealing with physical
+addresses, and not specific to the Resource type, which is just one way
+to access physical addresses. Thus, move the typedef to the io module.
+
+Still keep a re-export under resource. This avoids this commit from
+being a flag-day, but I also think it's a useful re-export in general so
+that you can import
+
+       use kernel::io::resource::{Resource, ResourceSize};
+
+instead of having to write
+
+       use kernel::io::{
+           resource::Resource,
+           ResourceSize,
+       };
+
+in the specific cases where you need ResourceSize because you are using
+the Resource type. Therefore I think it makes sense to keep this
+re-export indefinitely and it is *not* intended as a temporary re-export
+for migration purposes.
+
+Cc: stable@vger.kernel.org # for v6.18 [1]
+Signed-off-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://patch.msgid.link/20251112-resource-phys-typedefs-v2-2-538307384f82@google.com
+Link: https://lore.kernel.org/all/20251112-resource-phys-typedefs-v2-0-538307384f82@google.com/ [1]
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/kernel/io.rs          |    6 ++++++
+ rust/kernel/io/resource.rs |    6 +-----
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/rust/kernel/io.rs
++++ b/rust/kernel/io.rs
+@@ -13,6 +13,12 @@ pub mod resource;
+ pub use resource::Resource;
++/// Resource Size type.
++///
++/// This is a type alias to either `u32` or `u64` depending on the config option
++/// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
++pub type ResourceSize = bindings::resource_size_t;
++
+ /// Raw representation of an MMIO region.
+ ///
+ /// By itself, the existence of an instance of this structure does not provide any guarantees that
+--- a/rust/kernel/io/resource.rs
++++ b/rust/kernel/io/resource.rs
+@@ -12,11 +12,7 @@ use crate::prelude::*;
+ use crate::str::{CStr, CString};
+ use crate::types::Opaque;
+-/// Resource Size type.
+-///
+-/// This is a type alias to either `u32` or `u64` depending on the config option
+-/// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
+-pub type ResourceSize = bindings::resource_size_t;
++pub use super::ResourceSize;
+ /// A region allocated from a parent [`Resource`].
+ ///
diff --git a/queue-6.18/rust_binder-avoid-mem-take-on-delivered_deaths.patch b/queue-6.18/rust_binder-avoid-mem-take-on-delivered_deaths.patch
new file mode 100644 (file)
index 0000000..a7bda5f
--- /dev/null
@@ -0,0 +1,50 @@
+From 6c37bebd8c926ad01ef157c0d123633a203e5c0d Mon Sep 17 00:00:00 2001
+From: Alice Ryhl <aliceryhl@google.com>
+Date: Tue, 11 Nov 2025 14:23:33 +0000
+Subject: rust_binder: avoid mem::take on delivered_deaths
+
+From: Alice Ryhl <aliceryhl@google.com>
+
+commit 6c37bebd8c926ad01ef157c0d123633a203e5c0d upstream.
+
+Similar to the previous commit, List::remove is used on
+delivered_deaths, so do not use mem::take on it as that may result in
+violations of the List::remove safety requirements.
+
+I don't think this particular case can be triggered because it requires
+fd close to run in parallel with an ioctl on the same fd. But let's not
+tempt fate.
+
+Cc: stable@vger.kernel.org
+Fixes: eafedbc7c050 ("rust_binder: add Rust Binder driver")
+Signed-off-by: Alice Ryhl <aliceryhl@google.com>
+Acked-by: Miguel Ojeda <ojeda@kernel.org>
+Link: https://patch.msgid.link/20251111-binder-fix-list-remove-v1-2-8ed14a0da63d@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder/process.rs | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
+index 27323070f30f..fd5dcdc8788c 100644
+--- a/drivers/android/binder/process.rs
++++ b/drivers/android/binder/process.rs
+@@ -1362,8 +1362,12 @@ fn deferred_release(self: Arc<Self>) {
+             work.into_arc().cancel();
+         }
+-        let delivered_deaths = take(&mut self.inner.lock().delivered_deaths);
+-        drop(delivered_deaths);
++        // Clear delivered_deaths list.
++        //
++        // Scope ensures that MutexGuard is dropped while executing the body.
++        while let Some(delivered_death) = { self.inner.lock().delivered_deaths.pop_front() } {
++            drop(delivered_death);
++        }
+         // Free any resources kept alive by allocated buffers.
+         let omapping = self.inner.lock().mapping.take();
+-- 
+2.52.0
+
diff --git a/queue-6.18/s390-dasd-fix-gendisk-parent-after-copy-pair-swap.patch b/queue-6.18/s390-dasd-fix-gendisk-parent-after-copy-pair-swap.patch
new file mode 100644 (file)
index 0000000..f2f11c2
--- /dev/null
@@ -0,0 +1,50 @@
+From c943bfc6afb8d0e781b9b7406f36caa8bbf95cb9 Mon Sep 17 00:00:00 2001
+From: Stefan Haberland <sth@linux.ibm.com>
+Date: Wed, 26 Nov 2025 17:06:31 +0100
+Subject: s390/dasd: Fix gendisk parent after copy pair swap
+
+From: Stefan Haberland <sth@linux.ibm.com>
+
+commit c943bfc6afb8d0e781b9b7406f36caa8bbf95cb9 upstream.
+
+After a copy pair swap the block device's "device" symlink points to
+the secondary CCW device, but the gendisk's parent remained the
+primary, leaving /sys/block/<dasdx> under the wrong parent.
+
+Move the gendisk to the secondary's device with device_move(), keeping
+the sysfs topology consistent after the swap.
+
+Fixes: 413862caad6f ("s390/dasd: add copy pair swap capability")
+Cc: stable@vger.kernel.org #6.1
+Reviewed-by: Jan Hoeppner <hoeppner@linux.ibm.com>
+Signed-off-by: Stefan Haberland <sth@linux.ibm.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/block/dasd_eckd.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -6150,6 +6150,7 @@ static int dasd_eckd_copy_pair_swap(stru
+       struct dasd_copy_relation *copy;
+       struct dasd_block *block;
+       struct gendisk *gdp;
++      int rc;
+       copy = device->copy;
+       if (!copy)
+@@ -6184,6 +6185,13 @@ static int dasd_eckd_copy_pair_swap(stru
+       /* swap blocklayer device link */
+       gdp = block->gdp;
+       dasd_add_link_to_gendisk(gdp, secondary);
++      rc = device_move(disk_to_dev(gdp), &secondary->cdev->dev, DPM_ORDER_NONE);
++      if (rc) {
++              dev_err(&primary->cdev->dev,
++                      "copy_pair_swap: moving blockdevice parent %s->%s failed (%d)\n",
++                      dev_name(&primary->cdev->dev),
++                      dev_name(&secondary->cdev->dev), rc);
++      }
+       /* re-enable device */
+       dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
diff --git a/queue-6.18/samples-rust-fix-endianness-issue-in-rust_driver_pci.patch b/queue-6.18/samples-rust-fix-endianness-issue-in-rust_driver_pci.patch
new file mode 100644 (file)
index 0000000..ceca039
--- /dev/null
@@ -0,0 +1,40 @@
+From e2f1081ca8f18c146e8f928486deac61eca2b517 Mon Sep 17 00:00:00 2001
+From: Marko Turk <mt@markoturk.info>
+Date: Wed, 10 Dec 2025 12:25:51 +0100
+Subject: samples: rust: fix endianness issue in rust_driver_pci
+
+From: Marko Turk <mt@markoturk.info>
+
+commit e2f1081ca8f18c146e8f928486deac61eca2b517 upstream.
+
+MMIO backend of PCI Bar always assumes little-endian devices and
+will convert to CPU endianness automatically. Remove the u32::from_le
+conversion which would cause a bug on big-endian machines.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Dirk Behme <dirk.behme@de.bosch.com>
+Signed-off-by: Marko Turk <mt@markoturk.info>
+Fixes: 685376d18e9a ("samples: rust: add Rust PCI sample driver")
+Link: https://patch.msgid.link/20251210112503.62925-2-mt@markoturk.info
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ samples/rust/rust_driver_pci.rs | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/samples/rust/rust_driver_pci.rs b/samples/rust/rust_driver_pci.rs
+index 5823787bea8e..fa677991a5c4 100644
+--- a/samples/rust/rust_driver_pci.rs
++++ b/samples/rust/rust_driver_pci.rs
+@@ -48,7 +48,7 @@ fn testdev(index: &TestIndex, bar: &Bar0) -> Result<u32> {
+         // Select the test.
+         bar.write8(index.0, Regs::TEST);
+-        let offset = u32::from_le(bar.read32(Regs::OFFSET)) as usize;
++        let offset = bar.read32(Regs::OFFSET) as usize;
+         let data = bar.read8(Regs::DATA);
+         // Write `data` to `offset` to increase `count` by one.
+-- 
+2.52.0
+
diff --git a/queue-6.18/sched_ext-factor-out-local_dsq_post_enq-from-dispatch_enqueue.patch b/queue-6.18/sched_ext-factor-out-local_dsq_post_enq-from-dispatch_enqueue.patch
new file mode 100644 (file)
index 0000000..e587b6d
--- /dev/null
@@ -0,0 +1,75 @@
+From 530b6637c79e728d58f1d9b66bd4acf4b735b86d Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Thu, 11 Dec 2025 15:45:03 -1000
+Subject: sched_ext: Factor out local_dsq_post_enq() from dispatch_enqueue()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 530b6637c79e728d58f1d9b66bd4acf4b735b86d upstream.
+
+Factor out local_dsq_post_enq() which performs post-enqueue handling for
+local DSQs - triggering resched_curr() if SCX_ENQ_PREEMPT is specified or if
+the current CPU is idle. No functional change.
+
+This will be used by the next patch to fix move_local_task_to_local_dsq().
+
+Cc: stable@vger.kernel.org # v6.12+
+Reviewed-by: Andrea Righi <arighi@nvidia.com>
+Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c |   34 +++++++++++++++++++---------------
+ 1 file changed, 19 insertions(+), 15 deletions(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -906,6 +906,22 @@ static void refill_task_slice_dfl(struct
+       __scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
+ }
++static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p,
++                             u64 enq_flags)
++{
++      struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
++      bool preempt = false;
++
++      if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
++          rq->curr->sched_class == &ext_sched_class) {
++              rq->curr->scx.slice = 0;
++              preempt = true;
++      }
++
++      if (preempt || sched_class_above(&ext_sched_class, rq->curr->sched_class))
++              resched_curr(rq);
++}
++
+ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
+                            struct task_struct *p, u64 enq_flags)
+ {
+@@ -1003,22 +1019,10 @@ static void dispatch_enqueue(struct scx_
+       if (enq_flags & SCX_ENQ_CLEAR_OPSS)
+               atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
+-      if (is_local) {
+-              struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
+-              bool preempt = false;
+-
+-              if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
+-                  rq->curr->sched_class == &ext_sched_class) {
+-                      rq->curr->scx.slice = 0;
+-                      preempt = true;
+-              }
+-
+-              if (preempt || sched_class_above(&ext_sched_class,
+-                                               rq->curr->sched_class))
+-                      resched_curr(rq);
+-      } else {
++      if (is_local)
++              local_dsq_post_enq(dsq, p, enq_flags);
++      else
+               raw_spin_unlock(&dsq->lock);
+-      }
+ }
+ static void task_unlink_from_dsq(struct task_struct *p,
diff --git a/queue-6.18/sched_ext-fix-bypass-depth-leak-on-scx_enable-failure.patch b/queue-6.18/sched_ext-fix-bypass-depth-leak-on-scx_enable-failure.patch
new file mode 100644 (file)
index 0000000..7438c31
--- /dev/null
@@ -0,0 +1,81 @@
+From 9f769637a93fac81689b80df6855f545839cf999 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 9 Dec 2025 11:04:33 -1000
+Subject: sched_ext: Fix bypass depth leak on scx_enable() failure
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 9f769637a93fac81689b80df6855f545839cf999 upstream.
+
+scx_enable() calls scx_bypass(true) to initialize in bypass mode and then
+scx_bypass(false) on success to exit. If scx_enable() fails during task
+initialization - e.g. scx_cgroup_init() or scx_init_task() returns an error -
+it jumps to err_disable while bypass is still active. scx_disable_workfn()
+then calls scx_bypass(true/false) for its own bypass, leaving the bypass depth
+at 1 instead of 0. This causes the system to remain permanently in bypass mode
+after a failed scx_enable().
+
+Failures after task initialization is complete - e.g. scx_tryset_enable_state()
+at the end - already call scx_bypass(false) before reaching the error path and
+are not affected. This only affects a subset of failure modes.
+
+Fix it by tracking whether scx_enable() called scx_bypass(true) in a bool and
+having scx_disable_workfn() call an extra scx_bypass(false) to clear it. This
+is a temporary measure as the bypass depth will be moved into the sched
+instance, which will make this tracking unnecessary.
+
+Fixes: 8c2090c504e9 ("sched_ext: Initialize in bypass mode")
+Cc: stable@vger.kernel.org # v6.12+
+Reported-by: Chris Mason <clm@meta.com>
+Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
+Link: https://lore.kernel.org/stable/286e6f7787a81239e1ce2989b52391ce%40kernel.org
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c |   14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -40,6 +40,13 @@ static bool scx_init_task_enabled;
+ static bool scx_switching_all;
+ DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
++/*
++ * Tracks whether scx_enable() called scx_bypass(true). Used to balance bypass
++ * depth on enable failure. Will be removed when bypass depth is moved into the
++ * sched instance.
++ */
++static bool scx_bypassed_for_enable;
++
+ static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
+ static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
+@@ -4051,6 +4058,11 @@ static void scx_disable_workfn(struct kt
+       scx_dsp_max_batch = 0;
+       free_kick_pseqs();
++      if (scx_bypassed_for_enable) {
++              scx_bypassed_for_enable = false;
++              scx_bypass(false);
++      }
++
+       mutex_unlock(&scx_enable_mutex);
+       WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
+@@ -4676,6 +4688,7 @@ static int scx_enable(struct sched_ext_o
+        * Init in bypass mode to guarantee forward progress.
+        */
+       scx_bypass(true);
++      scx_bypassed_for_enable = true;
+       for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
+               if (((void (**)(void))ops)[i])
+@@ -4780,6 +4793,7 @@ static int scx_enable(struct sched_ext_o
+       scx_task_iter_stop(&sti);
+       percpu_up_write(&scx_fork_rwsem);
++      scx_bypassed_for_enable = false;
+       scx_bypass(false);
+       if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) {
diff --git a/queue-6.18/sched_ext-fix-missing-post-enqueue-handling-in-move_local_task_to_local_dsq.patch b/queue-6.18/sched_ext-fix-missing-post-enqueue-handling-in-move_local_task_to_local_dsq.patch
new file mode 100644 (file)
index 0000000..b5f188e
--- /dev/null
@@ -0,0 +1,60 @@
+From f5e1e5ec204da11fa87fdf006d451d80ce06e118 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Thu, 11 Dec 2025 15:45:04 -1000
+Subject: sched_ext: Fix missing post-enqueue handling in move_local_task_to_local_dsq()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit f5e1e5ec204da11fa87fdf006d451d80ce06e118 upstream.
+
+move_local_task_to_local_dsq() is used when moving a task from a non-local
+DSQ to a local DSQ on the same CPU. It directly manipulates the local DSQ
+without going through dispatch_enqueue() and was missing the post-enqueue
+handling that triggers preemption when SCX_ENQ_PREEMPT is set or the idle
+task is running.
+
+The function is used by move_task_between_dsqs() which backs
+scx_bpf_dsq_move() and may be called while the CPU is busy.
+
+Add local_dsq_post_enq() call to move_local_task_to_local_dsq(). As the
+dispatch path doesn't need post-enqueue handling, add SCX_RQ_IN_BALANCE
+early exit to keep consume_dispatch_q() behavior unchanged and avoid
+triggering unnecessary resched when scx_bpf_dsq_move() is used from the
+dispatch path.
+
+Fixes: 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()")
+Cc: stable@vger.kernel.org # v6.12+
+Reviewed-by: Andrea Righi <arighi@nvidia.com>
+Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -919,6 +919,14 @@ static void local_dsq_post_enq(struct sc
+       struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
+       bool preempt = false;
++      /*
++       * If @rq is in balance, the CPU is already vacant and looking for the
++       * next task to run. No need to preempt or trigger resched after moving
++       * @p into its local DSQ.
++       */
++      if (rq->scx.flags & SCX_RQ_IN_BALANCE)
++              return;
++
+       if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
+           rq->curr->sched_class == &ext_sched_class) {
+               rq->curr->scx.slice = 0;
+@@ -1524,6 +1532,8 @@ static void move_local_task_to_local_dsq
+       dsq_mod_nr(dst_dsq, 1);
+       p->scx.dsq = dst_dsq;
++
++      local_dsq_post_enq(dst_dsq, p, enq_flags);
+ }
+ /**
diff --git a/queue-6.18/sched_ext-fix-the-memleak-for-sch-helper-objects.patch b/queue-6.18/sched_ext-fix-the-memleak-for-sch-helper-objects.patch
new file mode 100644 (file)
index 0000000..c1ace5b
--- /dev/null
@@ -0,0 +1,59 @@
+From 517a44d18537ef8ab888f71197c80116c14cee0a Mon Sep 17 00:00:00 2001
+From: Zqiang <qiang.zhang@linux.dev>
+Date: Mon, 8 Dec 2025 19:23:19 +0800
+Subject: sched_ext: Fix the memleak for sch->helper objects
+
+From: Zqiang <qiang.zhang@linux.dev>
+
+commit 517a44d18537ef8ab888f71197c80116c14cee0a upstream.
+
+This commit use kthread_destroy_worker() to release sch->helper
+objects to fix the following kmemleak:
+
+unreferenced object 0xffff888121ec7b00 (size 128):
+  comm "scx_simple", pid 1197, jiffies 4295884415
+  hex dump (first 32 bytes):
+    00 00 00 00 00 00 00 00 00 00 00 00 ad 4e ad de  .............N..
+    ff ff ff ff 00 00 00 00 ff ff ff ff ff ff ff ff  ................
+  backtrace (crc 587b3352):
+    kmemleak_alloc+0x62/0xa0
+    __kmalloc_cache_noprof+0x28d/0x3e0
+    kthread_create_worker_on_node+0xd5/0x1f0
+    scx_enable.isra.210+0x6c2/0x25b0
+    bpf_scx_reg+0x12/0x20
+    bpf_struct_ops_link_create+0x2c3/0x3b0
+    __sys_bpf+0x3102/0x4b00
+    __x64_sys_bpf+0x79/0xc0
+    x64_sys_call+0x15d9/0x1dd0
+    do_syscall_64+0xf0/0x470
+    entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Fixes: bff3b5aec1b7 ("sched_ext: Move disable machinery into scx_sched")
+Cc: stable@vger.kernel.org # v6.16+
+Signed-off-by: Zqiang <qiang.zhang@linux.dev>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -3512,7 +3512,7 @@ static void scx_sched_free_rcu_work(stru
+       int node;
+       irq_work_sync(&sch->error_irq_work);
+-      kthread_stop(sch->helper->task);
++      kthread_destroy_worker(sch->helper);
+       free_percpu(sch->pcpu);
+@@ -4504,7 +4504,7 @@ static struct scx_sched *scx_alloc_and_a
+       return sch;
+ err_stop_helper:
+-      kthread_stop(sch->helper->task);
++      kthread_destroy_worker(sch->helper);
+ err_free_pcpu:
+       free_percpu(sch->pcpu);
+ err_free_gdsqs:
index fd20c1c2f51c71c25b1306a7d59912802feb7a1b..b3f88ade41c94a6e59b94c615c116e7e20f60e8f 100644 (file)
@@ -217,3 +217,40 @@ ublk-clean-up-user-copy-references-on-ublk-server-ex.patch
 block-rnbd-clt-fix-signedness-bug-in-init_dev.patch
 vhost-vsock-improve-rcu-read-sections-around-vhost_v.patch
 cifs-fix-memory-and-information-leak-in-smb3_reconfi.patch
+rust-drm-gem-fix-missing-header-in-object-rustdoc.patch
+rust_binder-avoid-mem-take-on-delivered_deaths.patch
+rust-dma-add-helpers-for-architectures-without-config_has_dma.patch
+samples-rust-fix-endianness-issue-in-rust_driver_pci.patch
+rust-io-define-resourcesize-as-resource_size_t.patch
+rust-io-move-resourcesize-to-top-level-io-module.patch
+rust-io-add-typedef-for-phys_addr_t.patch
+keys-trusted-fix-a-memory-leak-in-tpm2_load_cmd.patch
+clk-keystone-syscon-clk-fix-regmap-leak-on-probe-failure.patch
+io_uring-poll-correctly-handle-io_poll_add-return-value-on-update.patch
+io_uring-fix-min_wait-wakeups-for-sqpoll.patch
+io_uring-fix-filename-leak-in-__io_openat_prep.patch
+printk-avoid-scheduling-irq_work-on-suspend.patch
+sched_ext-factor-out-local_dsq_post_enq-from-dispatch_enqueue.patch
+sched_ext-fix-the-memleak-for-sch-helper-objects.patch
+sched_ext-fix-bypass-depth-leak-on-scx_enable-failure.patch
+sched_ext-fix-missing-post-enqueue-handling-in-move_local_task_to_local_dsq.patch
+x86-mce-do-not-clear-bank-s-poll-bit-in-mce_poll_banks-on-amd-smca-systems.patch
+mmc-sdhci-msm-avoid-early-clock-doubling-during-hs400-transition.patch
+dt-bindings-clock-mmcc-sdm660-add-missing-mdss-reset.patch
+phy-exynos5-usbdrd-fix-clock-prepare-imbalance.patch
+efi-add-missing-static-initializer-for-efi_mm-cpus_allowed_lock.patch
+perf-arm_cspmu-fix-error-handling-in-arm_cspmu_impl_unregister.patch
+lib-crypto-x86-blake2s-fix-32-bit-arg-treated-as-64-bit.patch
+s390-dasd-fix-gendisk-parent-after-copy-pair-swap.patch
+wifi-mt76-fix-dts-power-limits-on-little-endian-systems.patch
+btrfs-don-t-rewrite-ret-from-inode_permission.patch
+gfs2-fix-freeze-error-handling.patch
+block-rate-limit-capacity-change-info-log.patch
+jbd2-fix-the-inconsistency-between-checksum-and-data-in-memory-for-journal-sb.patch
+floppy-fix-for-page_size-4kb.patch
+crypto-scatterwalk-fix-memcpy_sglist-to-always-succeed.patch
+kallsyms-fix-wrong-big-kernel-symbol-type-read-from-procfs.patch
+fs-ntfs3-fix-mount-failure-for-sparse-runs-in-run_unpack.patch
+ktest.pl-fix-uninitialized-var-in-config-bisect.pl.patch
+tpm-cap-the-number-of-pcr-banks.patch
+fs-pm-fix-reverse-check-in-filesystems_freeze_callback.patch
diff --git a/queue-6.18/tpm-cap-the-number-of-pcr-banks.patch b/queue-6.18/tpm-cap-the-number-of-pcr-banks.patch
new file mode 100644 (file)
index 0000000..172c313
--- /dev/null
@@ -0,0 +1,99 @@
+From faf07e611dfa464b201223a7253e9dc5ee0f3c9e Mon Sep 17 00:00:00 2001
+From: Jarkko Sakkinen <jarkko.sakkinen@opinsys.com>
+Date: Tue, 30 Sep 2025 15:58:02 +0300
+Subject: tpm: Cap the number of PCR banks
+
+From: Jarkko Sakkinen <jarkko.sakkinen@opinsys.com>
+
+commit faf07e611dfa464b201223a7253e9dc5ee0f3c9e upstream.
+
+tpm2_get_pcr_allocation() does not cap any upper limit for the number of
+banks. Cap the limit to eight banks so that out of bounds values coming
+from external I/O cause on only limited harm.
+
+Cc: stable@vger.kernel.org # v5.10+
+Fixes: bcfff8384f6c ("tpm: dynamically allocate the allocated_banks array")
+Tested-by: Lai Yi <yi1.lai@linux.intel.com>
+Reviewed-by: Jonathan McDowell <noodles@meta.com>
+Reviewed-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@opinsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/tpm-chip.c |    1 -
+ drivers/char/tpm/tpm1-cmd.c |    5 -----
+ drivers/char/tpm/tpm2-cmd.c |    8 +++-----
+ include/linux/tpm.h         |    8 +++++---
+ 4 files changed, 8 insertions(+), 14 deletions(-)
+
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -282,7 +282,6 @@ static void tpm_dev_release(struct devic
+       kfree(chip->work_space.context_buf);
+       kfree(chip->work_space.session_buf);
+-      kfree(chip->allocated_banks);
+ #ifdef CONFIG_TCG_TPM2_HMAC
+       kfree(chip->auth);
+ #endif
+--- a/drivers/char/tpm/tpm1-cmd.c
++++ b/drivers/char/tpm/tpm1-cmd.c
+@@ -799,11 +799,6 @@ int tpm1_pm_suspend(struct tpm_chip *chi
+  */
+ int tpm1_get_pcr_allocation(struct tpm_chip *chip)
+ {
+-      chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
+-                                      GFP_KERNEL);
+-      if (!chip->allocated_banks)
+-              return -ENOMEM;
+-
+       chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
+       chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
+       chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -538,11 +538,9 @@ ssize_t tpm2_get_pcr_allocation(struct t
+       nr_possible_banks = be32_to_cpup(
+               (__be32 *)&buf.data[TPM_HEADER_SIZE + 5]);
+-
+-      chip->allocated_banks = kcalloc(nr_possible_banks,
+-                                      sizeof(*chip->allocated_banks),
+-                                      GFP_KERNEL);
+-      if (!chip->allocated_banks) {
++      if (nr_possible_banks > TPM2_MAX_PCR_BANKS) {
++              pr_err("tpm: out of bank capacity: %u > %u\n",
++                     nr_possible_banks, TPM2_MAX_PCR_BANKS);
+               rc = -ENOMEM;
+               goto out;
+       }
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -26,7 +26,9 @@
+ #include <crypto/aes.h>
+ #define TPM_DIGEST_SIZE 20    /* Max TPM v1.2 PCR size */
+-#define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
++
++#define TPM2_MAX_DIGEST_SIZE  SHA512_DIGEST_SIZE
++#define TPM2_MAX_PCR_BANKS    8
+ struct tpm_chip;
+ struct trusted_key_payload;
+@@ -68,7 +70,7 @@ enum tpm2_curves {
+ struct tpm_digest {
+       u16 alg_id;
+-      u8 digest[TPM_MAX_DIGEST_SIZE];
++      u8 digest[TPM2_MAX_DIGEST_SIZE];
+ } __packed;
+ struct tpm_bank_info {
+@@ -189,7 +191,7 @@ struct tpm_chip {
+       unsigned int groups_cnt;
+       u32 nr_allocated_banks;
+-      struct tpm_bank_info *allocated_banks;
++      struct tpm_bank_info allocated_banks[TPM2_MAX_PCR_BANKS];
+ #ifdef CONFIG_ACPI
+       acpi_handle acpi_dev_handle;
+       char ppi_version[TPM_PPI_VERSION_LEN + 1];
diff --git a/queue-6.18/wifi-mt76-fix-dts-power-limits-on-little-endian-systems.patch b/queue-6.18/wifi-mt76-fix-dts-power-limits-on-little-endian-systems.patch
new file mode 100644 (file)
index 0000000..70bfce9
--- /dev/null
@@ -0,0 +1,134 @@
+From 38b845e1f9e810869b0a0b69f202b877b7b7fb12 Mon Sep 17 00:00:00 2001
+From: "Sven Eckelmann (Plasma Cloud)" <se@simonwunderlich.de>
+Date: Fri, 26 Sep 2025 11:32:54 +0200
+Subject: wifi: mt76: Fix DTS power-limits on little endian systems
+
+From: Sven Eckelmann (Plasma Cloud) <se@simonwunderlich.de>
+
+commit 38b845e1f9e810869b0a0b69f202b877b7b7fb12 upstream.
+
+The power-limits for ru and mcs and stored in the devicetree as bytewise
+array (often with sizes which are not a multiple of 4). These arrays have a
+prefix which defines for how many modes a line is applied. This prefix is
+also only a byte - but the code still tried to fix the endianness of this
+byte with a be32 operation. As result, loading was mostly failing or was
+sending completely unexpected values to the firmware.
+
+Since the other rates are also stored in the devicetree as bytewise arrays,
+just drop the u32 access + be32_to_cpu conversion and directly access them
+as bytes arrays.
+
+Cc: stable@vger.kernel.org
+Fixes: 22b980badc0f ("mt76: add functions for parsing rate power limits from DT")
+Fixes: a9627d992b5e ("mt76: extend DT rate power limits to support 11ax devices")
+Signed-off-by: Sven Eckelmann (Plasma Cloud) <se@simonwunderlich.de>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/eeprom.c |   37 ++++++++++++++++++----------
+ 1 file changed, 24 insertions(+), 13 deletions(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
+@@ -253,6 +253,19 @@ mt76_get_of_array(struct device_node *np
+       return prop->value;
+ }
++static const s8 *
++mt76_get_of_array_s8(struct device_node *np, char *name, size_t *len, int min)
++{
++      struct property *prop = of_find_property(np, name, NULL);
++
++      if (!prop || !prop->value || prop->length < min)
++              return NULL;
++
++      *len = prop->length;
++
++      return prop->value;
++}
++
+ struct device_node *
+ mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan)
+ {
+@@ -294,7 +307,7 @@ mt76_get_txs_delta(struct device_node *n
+ }
+ static void
+-mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const __be32 *data,
++mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const s8 *data,
+                      s8 target_power, s8 nss_delta, s8 *max_power)
+ {
+       int i;
+@@ -303,15 +316,14 @@ mt76_apply_array_limit(s8 *pwr, size_t p
+               return;
+       for (i = 0; i < pwr_len; i++) {
+-              pwr[i] = min_t(s8, target_power,
+-                             be32_to_cpu(data[i]) + nss_delta);
++              pwr[i] = min_t(s8, target_power, data[i] + nss_delta);
+               *max_power = max(*max_power, pwr[i]);
+       }
+ }
+ static void
+ mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num,
+-                           const __be32 *data, size_t len, s8 target_power,
++                           const s8 *data, size_t len, s8 target_power,
+                            s8 nss_delta, s8 *max_power)
+ {
+       int i, cur;
+@@ -319,8 +331,7 @@ mt76_apply_multi_array_limit(s8 *pwr, si
+       if (!data)
+               return;
+-      len /= 4;
+-      cur = be32_to_cpu(data[0]);
++      cur = data[0];
+       for (i = 0; i < pwr_num; i++) {
+               if (len < pwr_len + 1)
+                       break;
+@@ -335,7 +346,7 @@ mt76_apply_multi_array_limit(s8 *pwr, si
+               if (!len)
+                       break;
+-              cur = be32_to_cpu(data[0]);
++              cur = data[0];
+       }
+ }
+@@ -346,7 +357,7 @@ s8 mt76_get_rate_power_limits(struct mt7
+ {
+       struct mt76_dev *dev = phy->dev;
+       struct device_node *np;
+-      const __be32 *val;
++      const s8 *val;
+       char name[16];
+       u32 mcs_rates = dev->drv->mcs_rates;
+       u32 ru_rates = ARRAY_SIZE(dest->ru[0]);
+@@ -392,21 +403,21 @@ s8 mt76_get_rate_power_limits(struct mt7
+       txs_delta = mt76_get_txs_delta(np, hweight16(phy->chainmask));
+-      val = mt76_get_of_array(np, "rates-cck", &len, ARRAY_SIZE(dest->cck));
++      val = mt76_get_of_array_s8(np, "rates-cck", &len, ARRAY_SIZE(dest->cck));
+       mt76_apply_array_limit(dest->cck, ARRAY_SIZE(dest->cck), val,
+                              target_power, txs_delta, &max_power);
+-      val = mt76_get_of_array(np, "rates-ofdm",
+-                              &len, ARRAY_SIZE(dest->ofdm));
++      val = mt76_get_of_array_s8(np, "rates-ofdm",
++                                 &len, ARRAY_SIZE(dest->ofdm));
+       mt76_apply_array_limit(dest->ofdm, ARRAY_SIZE(dest->ofdm), val,
+                              target_power, txs_delta, &max_power);
+-      val = mt76_get_of_array(np, "rates-mcs", &len, mcs_rates + 1);
++      val = mt76_get_of_array_s8(np, "rates-mcs", &len, mcs_rates + 1);
+       mt76_apply_multi_array_limit(dest->mcs[0], ARRAY_SIZE(dest->mcs[0]),
+                                    ARRAY_SIZE(dest->mcs), val, len,
+                                    target_power, txs_delta, &max_power);
+-      val = mt76_get_of_array(np, "rates-ru", &len, ru_rates + 1);
++      val = mt76_get_of_array_s8(np, "rates-ru", &len, ru_rates + 1);
+       mt76_apply_multi_array_limit(dest->ru[0], ARRAY_SIZE(dest->ru[0]),
+                                    ARRAY_SIZE(dest->ru), val, len,
+                                    target_power, txs_delta, &max_power);
diff --git a/queue-6.18/x86-mce-do-not-clear-bank-s-poll-bit-in-mce_poll_banks-on-amd-smca-systems.patch b/queue-6.18/x86-mce-do-not-clear-bank-s-poll-bit-in-mce_poll_banks-on-amd-smca-systems.patch
new file mode 100644 (file)
index 0000000..87da038
--- /dev/null
@@ -0,0 +1,43 @@
+From d7ac083f095d894a0b8ac0573516bfd035e6b25a Mon Sep 17 00:00:00 2001
+From: Avadhut Naik <avadhut.naik@amd.com>
+Date: Fri, 21 Nov 2025 19:04:04 +0000
+Subject: x86/mce: Do not clear bank's poll bit in mce_poll_banks on AMD SMCA systems
+
+From: Avadhut Naik <avadhut.naik@amd.com>
+
+commit d7ac083f095d894a0b8ac0573516bfd035e6b25a upstream.
+
+Currently, when a CMCI storm detected on a Machine Check bank, subsides, the
+bank's corresponding bit in the mce_poll_banks per-CPU variable is cleared
+unconditionally by cmci_storm_end().
+
+On AMD SMCA systems, this essentially disables polling on that particular bank
+on that CPU. Consequently, any subsequent correctable errors or storms will not
+be logged.
+
+Since AMD SMCA systems allow banks to be managed by both polling and
+interrupts, the polling banks bitmap for a CPU, i.e., mce_poll_banks, should
+not be modified when a storm subsides.
+
+Fixes: 7eae17c4add5 ("x86/mce: Add per-bank CMCI storm mitigation")
+Signed-off-by: Avadhut Naik <avadhut.naik@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20251121190542.2447913-2-avadhut.naik@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/mce/threshold.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/mce/threshold.c
++++ b/arch/x86/kernel/cpu/mce/threshold.c
+@@ -85,7 +85,8 @@ void cmci_storm_end(unsigned int bank)
+ {
+       struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
+-      __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
++      if (!mce_flags.amd_threshold)
++              __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
+       storm->banks[bank].history = 0;
+       storm->banks[bank].in_storm_mode = false;