]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.16-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 13 Sep 2025 13:20:18 +0000 (15:20 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 13 Sep 2025 13:20:18 +0000 (15:20 +0200)
added patches:
arm64-kexec-initialize-kexec_buf-struct-in-load_other_segments.patch
btrfs-fix-squota-compressed-stats-leak.patch
btrfs-fix-subvolume-deletion-lockup-caused-by-inodes-xarray-race.patch
compiler-clang.h-define-__sanitize_-__-macros-only-when-undefined.patch
doc-mptcp-net.mptcp.pm_type-is-deprecated.patch
drm-amd-display-correct-sequences-and-delays-for-dcn35-pg-rcg.patch
drm-amd-display-disable-dpcd-probe-quirk.patch
drm-amd-display-remove-oem-i2c-adapter-on-finish.patch
drm-amdgpu-fix-a-memory-leak-in-fence-cleanup-when-unloading.patch
drm-amdgpu-vcn-allow-limiting-ctx-to-instance-0-for-av1-at-any-time.patch
drm-amdgpu-vcn4-fix-ib-parsing-with-multiple-engine-info-packages.patch
drm-dp-add-an-edid-quirk-for-the-dpcd-register-access-probe.patch
drm-edid-add-support-for-quirks-visible-to-drm-core-and-drivers.patch
drm-edid-define-the-quirks-in-an-enum-list.patch
drm-i915-power-fix-size-for-for_each_set_bit-in-abox-iteration.patch
drm-mediatek-fix-potential-of-node-use-after-free.patch
drm-xe-allow-the-pm-notifier-to-continue-on-failure.patch
drm-xe-attempt-to-bring-bos-back-to-vram-after-eviction.patch
drm-xe-block-exec-and-rebind-worker-while-evicting-for-suspend-hibernate.patch
edac-altera-delete-an-inappropriate-dma_free_coherent-call.patch
fuse-check-if-copy_file_range-returns-larger-than-requested-size.patch
fuse-do-not-allow-mapping-a-non-regular-backing-file.patch
fuse-prevent-overflow-in-copy_file_range-return-value.patch
i2c-i801-hide-intel-birch-stream-soc-tco-wdt.patch
i2c-rtl9300-ensure-data-length-is-within-supported-range.patch
i2c-rtl9300-fix-channel-number-bound-check.patch
i2c-rtl9300-remove-broken-smbus-quick-operation-support.patch
mm-damon-core-set-quota-charged_from-to-jiffies-at-first-charge-window.patch
mm-damon-lru_sort-avoid-divide-by-zero-in-damon_lru_sort_apply_parameters.patch
mm-hugetlb-add-missing-hugetlb_lock-in-__unmap_hugepage_range.patch
mm-khugepaged-fix-the-address-passed-to-notifier-on-testing-young.patch
mm-memory-failure-fix-redundant-updates-for-already-poisoned-pages.patch
mm-memory-failure-fix-vm_bug_on_page-pagepoisoned-page-when-unpoison-memory.patch
mm-vmalloc-mm-kasan-respect-gfp-mask-in-kasan_populate_vmalloc.patch
mptcp-sockopt-make-sync_socket_options-propagate-sock_keepopen.patch
mtd-nand-raw-atmel-respect-tar-tclr-in-read-setup-timing.patch
mtd-rawnand-stm32_fmc2-avoid-overlapping-mappings-on-ecc-buffer.patch
mtd-rawnand-stm32_fmc2-fix-ecc-overwrite.patch
net-libwx-fix-to-enable-rss.patch
net-usb-asix-ax88772-drop-phylink-use-in-pm-to-avoid-mdio-runtime-pm-wakeups.patch
netlink-specs-mptcp-fix-if-idx-attribute-type.patch
ocfs2-fix-recursive-semaphore-deadlock-in-fiemap-call.patch
pm-em-add-function-for-registering-a-pd-without-capacity-update.patch
pm-hibernate-restrict-gfp-mask-in-hibernation_snapshot.patch
revert-sunrpc-don-t-allow-waiting-for-exiting-tasks.patch
s390-kexec-initialize-kexec_buf-struct.patch
smb-client-fix-compound-alignment-with-encryption.patch
smb-client-fix-data-loss-due-to-broken-rename-2.patch
wifi-iwlwifi-fix-130-1030-configs.patch

50 files changed:
queue-6.16/arm64-kexec-initialize-kexec_buf-struct-in-load_other_segments.patch [new file with mode: 0644]
queue-6.16/btrfs-fix-squota-compressed-stats-leak.patch [new file with mode: 0644]
queue-6.16/btrfs-fix-subvolume-deletion-lockup-caused-by-inodes-xarray-race.patch [new file with mode: 0644]
queue-6.16/compiler-clang.h-define-__sanitize_-__-macros-only-when-undefined.patch [new file with mode: 0644]
queue-6.16/doc-mptcp-net.mptcp.pm_type-is-deprecated.patch [new file with mode: 0644]
queue-6.16/drm-amd-display-correct-sequences-and-delays-for-dcn35-pg-rcg.patch [new file with mode: 0644]
queue-6.16/drm-amd-display-disable-dpcd-probe-quirk.patch [new file with mode: 0644]
queue-6.16/drm-amd-display-remove-oem-i2c-adapter-on-finish.patch [new file with mode: 0644]
queue-6.16/drm-amdgpu-fix-a-memory-leak-in-fence-cleanup-when-unloading.patch [new file with mode: 0644]
queue-6.16/drm-amdgpu-vcn-allow-limiting-ctx-to-instance-0-for-av1-at-any-time.patch [new file with mode: 0644]
queue-6.16/drm-amdgpu-vcn4-fix-ib-parsing-with-multiple-engine-info-packages.patch [new file with mode: 0644]
queue-6.16/drm-dp-add-an-edid-quirk-for-the-dpcd-register-access-probe.patch [new file with mode: 0644]
queue-6.16/drm-edid-add-support-for-quirks-visible-to-drm-core-and-drivers.patch [new file with mode: 0644]
queue-6.16/drm-edid-define-the-quirks-in-an-enum-list.patch [new file with mode: 0644]
queue-6.16/drm-i915-power-fix-size-for-for_each_set_bit-in-abox-iteration.patch [new file with mode: 0644]
queue-6.16/drm-mediatek-fix-potential-of-node-use-after-free.patch [new file with mode: 0644]
queue-6.16/drm-xe-allow-the-pm-notifier-to-continue-on-failure.patch [new file with mode: 0644]
queue-6.16/drm-xe-attempt-to-bring-bos-back-to-vram-after-eviction.patch [new file with mode: 0644]
queue-6.16/drm-xe-block-exec-and-rebind-worker-while-evicting-for-suspend-hibernate.patch [new file with mode: 0644]
queue-6.16/edac-altera-delete-an-inappropriate-dma_free_coherent-call.patch [new file with mode: 0644]
queue-6.16/fuse-check-if-copy_file_range-returns-larger-than-requested-size.patch [new file with mode: 0644]
queue-6.16/fuse-do-not-allow-mapping-a-non-regular-backing-file.patch [new file with mode: 0644]
queue-6.16/fuse-prevent-overflow-in-copy_file_range-return-value.patch [new file with mode: 0644]
queue-6.16/i2c-i801-hide-intel-birch-stream-soc-tco-wdt.patch [new file with mode: 0644]
queue-6.16/i2c-rtl9300-ensure-data-length-is-within-supported-range.patch [new file with mode: 0644]
queue-6.16/i2c-rtl9300-fix-channel-number-bound-check.patch [new file with mode: 0644]
queue-6.16/i2c-rtl9300-remove-broken-smbus-quick-operation-support.patch [new file with mode: 0644]
queue-6.16/mm-damon-core-set-quota-charged_from-to-jiffies-at-first-charge-window.patch [new file with mode: 0644]
queue-6.16/mm-damon-lru_sort-avoid-divide-by-zero-in-damon_lru_sort_apply_parameters.patch [new file with mode: 0644]
queue-6.16/mm-hugetlb-add-missing-hugetlb_lock-in-__unmap_hugepage_range.patch [new file with mode: 0644]
queue-6.16/mm-khugepaged-fix-the-address-passed-to-notifier-on-testing-young.patch [new file with mode: 0644]
queue-6.16/mm-memory-failure-fix-redundant-updates-for-already-poisoned-pages.patch [new file with mode: 0644]
queue-6.16/mm-memory-failure-fix-vm_bug_on_page-pagepoisoned-page-when-unpoison-memory.patch [new file with mode: 0644]
queue-6.16/mm-vmalloc-mm-kasan-respect-gfp-mask-in-kasan_populate_vmalloc.patch [new file with mode: 0644]
queue-6.16/mptcp-sockopt-make-sync_socket_options-propagate-sock_keepopen.patch [new file with mode: 0644]
queue-6.16/mtd-nand-raw-atmel-respect-tar-tclr-in-read-setup-timing.patch [new file with mode: 0644]
queue-6.16/mtd-rawnand-stm32_fmc2-avoid-overlapping-mappings-on-ecc-buffer.patch [new file with mode: 0644]
queue-6.16/mtd-rawnand-stm32_fmc2-fix-ecc-overwrite.patch [new file with mode: 0644]
queue-6.16/net-libwx-fix-to-enable-rss.patch [new file with mode: 0644]
queue-6.16/net-usb-asix-ax88772-drop-phylink-use-in-pm-to-avoid-mdio-runtime-pm-wakeups.patch [new file with mode: 0644]
queue-6.16/netlink-specs-mptcp-fix-if-idx-attribute-type.patch [new file with mode: 0644]
queue-6.16/ocfs2-fix-recursive-semaphore-deadlock-in-fiemap-call.patch [new file with mode: 0644]
queue-6.16/pm-em-add-function-for-registering-a-pd-without-capacity-update.patch [new file with mode: 0644]
queue-6.16/pm-hibernate-restrict-gfp-mask-in-hibernation_snapshot.patch [new file with mode: 0644]
queue-6.16/revert-sunrpc-don-t-allow-waiting-for-exiting-tasks.patch [new file with mode: 0644]
queue-6.16/s390-kexec-initialize-kexec_buf-struct.patch [new file with mode: 0644]
queue-6.16/series
queue-6.16/smb-client-fix-compound-alignment-with-encryption.patch [new file with mode: 0644]
queue-6.16/smb-client-fix-data-loss-due-to-broken-rename-2.patch [new file with mode: 0644]
queue-6.16/wifi-iwlwifi-fix-130-1030-configs.patch [new file with mode: 0644]

diff --git a/queue-6.16/arm64-kexec-initialize-kexec_buf-struct-in-load_other_segments.patch b/queue-6.16/arm64-kexec-initialize-kexec_buf-struct-in-load_other_segments.patch
new file mode 100644 (file)
index 0000000..4c2a9fc
--- /dev/null
@@ -0,0 +1,93 @@
+From 04d3cd43700a2d0fe4bfb1012a8ec7f2e34a3507 Mon Sep 17 00:00:00 2001
+From: Breno Leitao <leitao@debian.org>
+Date: Wed, 27 Aug 2025 03:42:21 -0700
+Subject: arm64: kexec: initialize kexec_buf struct in load_other_segments()
+
+From: Breno Leitao <leitao@debian.org>
+
+commit 04d3cd43700a2d0fe4bfb1012a8ec7f2e34a3507 upstream.
+
+Patch series "kexec: Fix invalid field access".
+
+The kexec_buf structure was previously declared without initialization.
+commit bf454ec31add ("kexec_file: allow to place kexec_buf randomly")
+added a field that is always read but not consistently populated by all
+architectures.  This un-initialized field will contain garbage.
+
+This is also triggering a UBSAN warning when the uninitialized data was
+accessed:
+
+       ------------[ cut here ]------------
+       UBSAN: invalid-load in ./include/linux/kexec.h:210:10
+       load of value 252 is not a valid value for type '_Bool'
+
+Zero-initializing kexec_buf at declaration ensures all fields are cleanly
+set, preventing future instances of uninitialized memory being used.
+
+An initial fix was already landed for arm64[0], and this patchset fixes
+the problem on the remaining arm64 code and on riscv, as raised by Mark.
+
+Discussions about this problem could be found at[1][2].
+
+
+This patch (of 3):
+
+The kexec_buf structure was previously declared without initialization.
+commit bf454ec31add ("kexec_file: allow to place kexec_buf randomly")
+added a field that is always read but not consistently populated by all
+architectures. This un-initialized field will contain garbage.
+
+This is also triggering a UBSAN warning when the uninitialized data was
+accessed:
+
+       ------------[ cut here ]------------
+       UBSAN: invalid-load in ./include/linux/kexec.h:210:10
+       load of value 252 is not a valid value for type '_Bool'
+
+Zero-initializing kexec_buf at declaration ensures all fields are
+cleanly set, preventing future instances of uninitialized memory being
+used.
+
+Link: https://lkml.kernel.org/r/20250827-kbuf_all-v1-0-1df9882bb01a@debian.org
+Link: https://lkml.kernel.org/r/20250827-kbuf_all-v1-1-1df9882bb01a@debian.org
+Link: https://lore.kernel.org/all/20250826180742.f2471131255ec1c43683ea07@linux-foundation.org/ [0]
+Link: https://lore.kernel.org/all/oninomspajhxp4omtdapxnckxydbk2nzmrix7rggmpukpnzadw@c67o7njgdgm3/ [1]
+Link: https://lore.kernel.org/all/20250826-akpm-v1-1-3c831f0e3799@debian.org/ [2]
+Fixes: bf454ec31add ("kexec_file: allow to place kexec_buf randomly")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Acked-by: Baoquan He <bhe@redhat.com>
+Cc: Albert Ou <aou@eecs.berkeley.edu>
+Cc: Alexander Gordeev <agordeev@linux.ibm.com>
+Cc: Alexandre Ghiti <alex@ghiti.fr>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
+Cc: Coiby Xu <coxu@redhat.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Palmer Dabbelt <palmer@dabbelt.com>
+Cc: Paul Walmsley <paul.walmsley@sifive.com>
+Cc: Sven Schnelle <svens@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/machine_kexec_file.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
+index af1ca875c52c..410060ebd86d 100644
+--- a/arch/arm64/kernel/machine_kexec_file.c
++++ b/arch/arm64/kernel/machine_kexec_file.c
+@@ -94,7 +94,7 @@ int load_other_segments(struct kimage *image,
+                       char *initrd, unsigned long initrd_len,
+                       char *cmdline)
+ {
+-      struct kexec_buf kbuf;
++      struct kexec_buf kbuf = {};
+       void *dtb = NULL;
+       unsigned long initrd_load_addr = 0, dtb_len,
+                     orig_segments = image->nr_segments;
+-- 
+2.51.0
+
diff --git a/queue-6.16/btrfs-fix-squota-compressed-stats-leak.patch b/queue-6.16/btrfs-fix-squota-compressed-stats-leak.patch
new file mode 100644 (file)
index 0000000..92b9ab8
--- /dev/null
@@ -0,0 +1,70 @@
+From de134cb54c3a67644ff95b1c9bffe545e752c912 Mon Sep 17 00:00:00 2001
+From: Boris Burkov <boris@bur.io>
+Date: Wed, 20 Aug 2025 14:52:05 -0700
+Subject: btrfs: fix squota compressed stats leak
+
+From: Boris Burkov <boris@bur.io>
+
+commit de134cb54c3a67644ff95b1c9bffe545e752c912 upstream.
+
+The following workload on a squota enabled fs:
+
+  btrfs subvol create mnt/subvol
+
+  # ensure subvol extents get accounted
+  sync
+  btrfs qgroup create 1/1 mnt
+  btrfs qgroup assign mnt/subvol 1/1 mnt
+  btrfs qgroup delete mnt/subvol
+
+  # make the cleaner thread run
+  btrfs filesystem sync mnt
+  sleep 1
+  btrfs filesystem sync mnt
+  btrfs qgroup destroy 1/1 mnt
+
+will fail with EBUSY. The reason is that 1/1 does the quick accounting
+when we assign subvol to it, gaining its exclusive usage as excl and
+excl_cmpr. But then when we delete subvol, the decrement happens via
+record_squota_delta() which does not update excl_cmpr, as squotas does
+not make any distinction between compressed and normal extents. Thus,
+we increment excl_cmpr but never decrement it, and are unable to delete
+1/1. The two possible fixes are to make squota always mirror excl and
+excl_cmpr or to make the fast accounting separately track the plain and
+cmpr numbers. The latter felt cleaner to me so that is what I opted for.
+
+Fixes: 1e0e9d5771c3 ("btrfs: add helper for recording simple quota deltas")
+CC: stable@vger.kernel.org # 6.12+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Boris Burkov <boris@bur.io>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/qgroup.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1483,6 +1483,7 @@ static int __qgroup_excl_accounting(stru
+       struct btrfs_qgroup *qgroup;
+       LIST_HEAD(qgroup_list);
+       u64 num_bytes = src->excl;
++      u64 num_bytes_cmpr = src->excl_cmpr;
+       int ret = 0;
+       qgroup = find_qgroup_rb(fs_info, ref_root);
+@@ -1494,11 +1495,12 @@ static int __qgroup_excl_accounting(stru
+               struct btrfs_qgroup_list *glist;
+               qgroup->rfer += sign * num_bytes;
+-              qgroup->rfer_cmpr += sign * num_bytes;
++              qgroup->rfer_cmpr += sign * num_bytes_cmpr;
+               WARN_ON(sign < 0 && qgroup->excl < num_bytes);
++              WARN_ON(sign < 0 && qgroup->excl_cmpr < num_bytes_cmpr);
+               qgroup->excl += sign * num_bytes;
+-              qgroup->excl_cmpr += sign * num_bytes;
++              qgroup->excl_cmpr += sign * num_bytes_cmpr;
+               if (sign > 0)
+                       qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
diff --git a/queue-6.16/btrfs-fix-subvolume-deletion-lockup-caused-by-inodes-xarray-race.patch b/queue-6.16/btrfs-fix-subvolume-deletion-lockup-caused-by-inodes-xarray-race.patch
new file mode 100644 (file)
index 0000000..d03dd4e
--- /dev/null
@@ -0,0 +1,76 @@
+From f6a6c280059c4ddc23e12e3de1b01098e240036f Mon Sep 17 00:00:00 2001
+From: Omar Sandoval <osandov@fb.com>
+Date: Tue, 26 Aug 2025 11:24:38 -0700
+Subject: btrfs: fix subvolume deletion lockup caused by inodes xarray race
+
+From: Omar Sandoval <osandov@fb.com>
+
+commit f6a6c280059c4ddc23e12e3de1b01098e240036f upstream.
+
+There is a race condition between inode eviction and inode caching that
+can cause a live struct btrfs_inode to be missing from the root->inodes
+xarray. Specifically, there is a window during evict() between the inode
+being unhashed and deleted from the xarray. If btrfs_iget() is called
+for the same inode in that window, it will be recreated and inserted
+into the xarray, but then eviction will delete the new entry, leaving
+nothing in the xarray:
+
+Thread 1                          Thread 2
+---------------------------------------------------------------
+evict()
+  remove_inode_hash()
+                                  btrfs_iget_path()
+                                    btrfs_iget_locked()
+                                    btrfs_read_locked_inode()
+                                      btrfs_add_inode_to_root()
+  destroy_inode()
+    btrfs_destroy_inode()
+      btrfs_del_inode_from_root()
+        __xa_erase
+
+In turn, this can cause issues for subvolume deletion. Specifically, if
+an inode is in this lost state, and all other inodes are evicted, then
+btrfs_del_inode_from_root() will call btrfs_add_dead_root() prematurely.
+If the lost inode has a delayed_node attached to it, then when
+btrfs_clean_one_deleted_snapshot() calls btrfs_kill_all_delayed_nodes(),
+it will loop forever because the delayed_nodes xarray will never become
+empty (unless memory pressure forces the inode out). We saw this
+manifest as soft lockups in production.
+
+Fix it by only deleting the xarray entry if it matches the given inode
+(using __xa_cmpxchg()).
+
+Fixes: 310b2f5d5a94 ("btrfs: use an xarray to track open inodes in a root")
+Cc: stable@vger.kernel.org # 6.11+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Co-authored-by: Leo Martins <loemra.dev@gmail.com>
+Signed-off-by: Leo Martins <loemra.dev@gmail.com>
+Signed-off-by: Omar Sandoval <osandov@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c |   12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5685,7 +5685,17 @@ static void btrfs_del_inode_from_root(st
+       bool empty = false;
+       xa_lock(&root->inodes);
+-      entry = __xa_erase(&root->inodes, btrfs_ino(inode));
++      /*
++       * This btrfs_inode is being freed and has already been unhashed at this
++       * point. It's possible that another btrfs_inode has already been
++       * allocated for the same inode and inserted itself into the root, so
++       * don't delete it in that case.
++       *
++       * Note that this shouldn't need to allocate memory, so the gfp flags
++       * don't really matter.
++       */
++      entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL,
++                           GFP_ATOMIC);
+       if (entry == inode)
+               empty = xa_empty(&root->inodes);
+       xa_unlock(&root->inodes);
diff --git a/queue-6.16/compiler-clang.h-define-__sanitize_-__-macros-only-when-undefined.patch b/queue-6.16/compiler-clang.h-define-__sanitize_-__-macros-only-when-undefined.patch
new file mode 100644 (file)
index 0000000..cf288ee
--- /dev/null
@@ -0,0 +1,96 @@
+From 3fac212fe489aa0dbe8d80a42a7809840ca7b0f9 Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Tue, 2 Sep 2025 15:49:26 -0700
+Subject: compiler-clang.h: define __SANITIZE_*__ macros only when undefined
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit 3fac212fe489aa0dbe8d80a42a7809840ca7b0f9 upstream.
+
+Clang 22 recently added support for defining __SANITIZE__ macros similar
+to GCC [1], which causes warnings (or errors with CONFIG_WERROR=y or W=e)
+with the existing defines that the kernel creates to emulate this behavior
+with existing clang versions.
+
+  In file included from <built-in>:3:
+  In file included from include/linux/compiler_types.h:171:
+  include/linux/compiler-clang.h:37:9: error: '__SANITIZE_THREAD__' macro redefined [-Werror,-Wmacro-redefined]
+     37 | #define __SANITIZE_THREAD__
+        |         ^
+  <built-in>:352:9: note: previous definition is here
+    352 | #define __SANITIZE_THREAD__ 1
+        |         ^
+
+Refactor compiler-clang.h to only define the sanitizer macros when they
+are undefined and adjust the rest of the code to use these macros for
+checking if the sanitizers are enabled, clearing up the warnings and
+allowing the kernel to easily drop these defines when the minimum
+supported version of LLVM for building the kernel becomes 22.0.0 or newer.
+
+Link: https://lkml.kernel.org/r/20250902-clang-update-sanitize-defines-v1-1-cf3702ca3d92@kernel.org
+Link: https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c [1]
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reviewed-by: Justin Stitt <justinstitt@google.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Bill Wendling <morbo@google.com>
+Cc: Dmitriy Vyukov <dvyukov@google.com>
+Cc: Marco Elver <elver@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/compiler-clang.h |   29 ++++++++++++++++++++++++-----
+ 1 file changed, 24 insertions(+), 5 deletions(-)
+
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -18,23 +18,42 @@
+ #define KASAN_ABI_VERSION 5
+ /*
++ * Clang 22 added preprocessor macros to match GCC, in hopes of eventually
++ * dropping __has_feature support for sanitizers:
++ * https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c
++ * Create these macros for older versions of clang so that it is easy to clean
++ * up once the minimum supported version of LLVM for building the kernel always
++ * creates these macros.
++ *
+  * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
+  * enabled. Therefore it is not required to additionally check defined(CONFIG_*)
+  * to avoid adding redundant attributes in other configurations.
+  */
++#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
++#define __SANITIZE_ADDRESS__
++#endif
++#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__)
++#define __SANITIZE_HWADDRESS__
++#endif
++#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__)
++#define __SANITIZE_THREAD__
++#endif
+-#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
+-/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
++/*
++ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel.
++ */
++#ifdef __SANITIZE_HWADDRESS__
+ #define __SANITIZE_ADDRESS__
++#endif
++
++#ifdef __SANITIZE_ADDRESS__
+ #define __no_sanitize_address \
+               __attribute__((no_sanitize("address", "hwaddress")))
+ #else
+ #define __no_sanitize_address
+ #endif
+-#if __has_feature(thread_sanitizer)
+-/* emulate gcc's __SANITIZE_THREAD__ flag */
+-#define __SANITIZE_THREAD__
++#ifdef __SANITIZE_THREAD__
+ #define __no_sanitize_thread \
+               __attribute__((no_sanitize("thread")))
+ #else
diff --git a/queue-6.16/doc-mptcp-net.mptcp.pm_type-is-deprecated.patch b/queue-6.16/doc-mptcp-net.mptcp.pm_type-is-deprecated.patch
new file mode 100644 (file)
index 0000000..913dcbd
--- /dev/null
@@ -0,0 +1,44 @@
+From 6f021e95d0828edc8ed104a294594c2f9569383a Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Mon, 8 Sep 2025 23:27:28 +0200
+Subject: doc: mptcp: net.mptcp.pm_type is deprecated
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 6f021e95d0828edc8ed104a294594c2f9569383a upstream.
+
+The net.mptcp.pm_type sysctl knob has been deprecated in v6.15,
+net.mptcp.path_manager should be used instead.
+
+Adapt the section about path managers to suggest using the new sysctl
+knob instead of the deprecated one.
+
+Fixes: 595c26d122d1 ("mptcp: sysctl: set path manager by name")
+Cc: stable@vger.kernel.org
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250908-net-mptcp-misc-fixes-6-17-rc5-v1-2-5f2168a66079@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/networking/mptcp.rst |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/Documentation/networking/mptcp.rst
++++ b/Documentation/networking/mptcp.rst
+@@ -60,10 +60,10 @@ address announcements. Typically, it is
+ and the server side that announces additional addresses via the ``ADD_ADDR`` and
+ ``REMOVE_ADDR`` options.
+-Path managers are controlled by the ``net.mptcp.pm_type`` sysctl knob -- see
+-mptcp-sysctl.rst. There are two types: the in-kernel one (type ``0``) where the
+-same rules are applied for all the connections (see: ``ip mptcp``) ; and the
+-userspace one (type ``1``), controlled by a userspace daemon (i.e. `mptcpd
++Path managers are controlled by the ``net.mptcp.path_manager`` sysctl knob --
++see mptcp-sysctl.rst. There are two types: the in-kernel one (``kernel``) where
++the same rules are applied for all the connections (see: ``ip mptcp``) ; and the
++userspace one (``userspace``), controlled by a userspace daemon (i.e. `mptcpd
+ <https://mptcpd.mptcp.dev/>`_) where different rules can be applied for each
+ connection. The path managers can be controlled via a Netlink API; see
+ netlink_spec/mptcp_pm.rst.
diff --git a/queue-6.16/drm-amd-display-correct-sequences-and-delays-for-dcn35-pg-rcg.patch b/queue-6.16/drm-amd-display-correct-sequences-and-delays-for-dcn35-pg-rcg.patch
new file mode 100644 (file)
index 0000000..dbafad2
--- /dev/null
@@ -0,0 +1,655 @@
+From 70f0b051f82d0234ade2f6753f72a2610048db3b Mon Sep 17 00:00:00 2001
+From: Ovidiu Bunea <ovidiu.bunea@amd.com>
+Date: Mon, 25 Aug 2025 14:45:33 -0400
+Subject: drm/amd/display: Correct sequences and delays for DCN35 PG & RCG
+
+From: Ovidiu Bunea <ovidiu.bunea@amd.com>
+
+commit 70f0b051f82d0234ade2f6753f72a2610048db3b upstream.
+
+[why]
+The current PG & RCG programming in driver has some gaps and incorrect
+sequences.
+
+[how]
+Added delays after ungating clocks to allow ramp up, increased polling
+to allow more time for power up, and removed the incorrect sequences.
+
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Charlene Liu <charlene.liu@amd.com>
+Signed-off-by: Ovidiu Bunea <ovidiu.bunea@amd.com>
+Signed-off-by: Wayne Lin <wayne.lin@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 1bde5584e297921f45911ae874b0175dce5ed4b5)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h                      |    1 
+ drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c   |   74 +++++----
+ drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c  |  115 ++-------------
+ drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c   |    3 
+ drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c |    3 
+ drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h          |    1 
+ drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c  |   78 ++++++----
+ 7 files changed, 111 insertions(+), 164 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -1095,6 +1095,7 @@ struct dc_debug_options {
+       bool enable_hblank_borrow;
+       bool force_subvp_df_throttle;
+       uint32_t acpi_transition_bitmasks[MAX_PIPES];
++      bool enable_pg_cntl_debug_logs;
+ };
+--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+@@ -133,30 +133,34 @@ enum dsc_clk_source {
+ };
+-static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool enable)
++static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool allow_rcg)
+ {
+       struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+-      if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && enable)
++      if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && allow_rcg)
+               return;
+       switch (inst) {
+       case 0:
+-              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
+               break;
+       case 1:
+-              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
+               break;
+       case 2:
+-              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
+               break;
+       case 3:
+-              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
+               break;
+       default:
+               BREAK_TO_DEBUGGER();
+               return;
+       }
++
++      /* Wait for clock to ramp */
++      if (!allow_rcg)
++              udelay(10);
+ }
+ static void dccg35_set_symclk32_se_rcg(
+@@ -385,35 +389,34 @@ static void dccg35_set_dtbclk_p_rcg(stru
+       }
+ }
+-static void dccg35_set_dppclk_rcg(struct dccg *dccg,
+-                                                                                              int inst, bool enable)
++static void dccg35_set_dppclk_rcg(struct dccg *dccg, int inst, bool allow_rcg)
+ {
+-
+       struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+-
+-      if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
++      if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && allow_rcg)
+               return;
+       switch (inst) {
+       case 0:
+-              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
+               break;
+       case 1:
+-              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
+               break;
+       case 2:
+-              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
+               break;
+       case 3:
+-              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
+               break;
+       default:
+       BREAK_TO_DEBUGGER();
+               break;
+       }
+-      //DC_LOG_DEBUG("%s: inst(%d) DPPCLK rcg_disable: %d\n", __func__, inst, enable ? 0 : 1);
++      /* Wait for clock to ramp */
++      if (!allow_rcg)
++              udelay(10);
+ }
+ static void dccg35_set_dpstreamclk_rcg(
+@@ -1177,32 +1180,34 @@ static void dccg35_update_dpp_dto(struct
+ }
+ static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
+-               uint32_t dpp_inst, uint32_t enable)
++               uint32_t dpp_inst, uint32_t disallow_rcg)
+ {
+       struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+-      if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
++      if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && !disallow_rcg)
+               return;
+       switch (dpp_inst) {
+       case 0:
+-              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, disallow_rcg);
+               break;
+       case 1:
+-              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, disallow_rcg);
+               break;
+       case 2:
+-              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, disallow_rcg);
+               break;
+       case 3:
+-              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, disallow_rcg);
+               break;
+       default:
+               break;
+       }
+-      //DC_LOG_DEBUG("%s: dpp_inst(%d) rcg: %d\n", __func__, dpp_inst, enable);
++      /* Wait for clock to ramp */
++      if (disallow_rcg)
++              udelay(10);
+ }
+ static void dccg35_get_pixel_rate_div(
+@@ -1782,8 +1787,7 @@ static void dccg35_enable_dscclk(struct
+       //Disable DTO
+       switch (inst) {
+       case 0:
+-              if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+-                      REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
+               REG_UPDATE_2(DSCCLK0_DTO_PARAM,
+                               DSCCLK0_DTO_PHASE, 0,
+@@ -1791,8 +1795,7 @@ static void dccg35_enable_dscclk(struct
+               REG_UPDATE(DSCCLK_DTO_CTRL,     DSCCLK0_EN, 1);
+               break;
+       case 1:
+-              if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+-                      REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
+               REG_UPDATE_2(DSCCLK1_DTO_PARAM,
+                               DSCCLK1_DTO_PHASE, 0,
+@@ -1800,8 +1803,7 @@ static void dccg35_enable_dscclk(struct
+               REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
+               break;
+       case 2:
+-              if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+-                      REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
+               REG_UPDATE_2(DSCCLK2_DTO_PARAM,
+                               DSCCLK2_DTO_PHASE, 0,
+@@ -1809,8 +1811,7 @@ static void dccg35_enable_dscclk(struct
+               REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
+               break;
+       case 3:
+-              if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+-                      REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
++              REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
+               REG_UPDATE_2(DSCCLK3_DTO_PARAM,
+                               DSCCLK3_DTO_PHASE, 0,
+@@ -1821,6 +1822,9 @@ static void dccg35_enable_dscclk(struct
+               BREAK_TO_DEBUGGER();
+               return;
+       }
++
++      /* Wait for clock to ramp */
++      udelay(10);
+ }
+ static void dccg35_disable_dscclk(struct dccg *dccg,
+@@ -1864,6 +1868,9 @@ static void dccg35_disable_dscclk(struct
+       default:
+               return;
+       }
++
++      /* Wait for clock ramp */
++      udelay(10);
+ }
+ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
+@@ -2349,10 +2356,7 @@ static void dccg35_disable_symclk_se_cb(
+ void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating)
+ {
+-
+-      if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
+-              dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating);
+-      }
++      dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating);
+ }
+ static const struct dccg_funcs dccg35_funcs_new = {
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+@@ -113,6 +113,14 @@ static void enable_memory_low_power(stru
+ }
+ #endif
++static void print_pg_status(struct dc *dc, const char *debug_func, const char *debug_log)
++{
++      if (dc->debug.enable_pg_cntl_debug_logs && dc->res_pool->pg_cntl) {
++              if (dc->res_pool->pg_cntl->funcs->print_pg_status)
++                      dc->res_pool->pg_cntl->funcs->print_pg_status(dc->res_pool->pg_cntl, debug_func, debug_log);
++      }
++}
++
+ void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable)
+ {
+       REG_UPDATE_3(DMU_CLK_CNTL,
+@@ -137,6 +145,8 @@ void dcn35_init_hw(struct dc *dc)
+       uint32_t user_level = MAX_BACKLIGHT_LEVEL;
+       int i;
++      print_pg_status(dc, __func__, ": start");
++
+       if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+               dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+@@ -200,10 +210,7 @@ void dcn35_init_hw(struct dc *dc)
+       /* we want to turn off all dp displays before doing detection */
+       dc->link_srv->blank_all_dp_displays(dc);
+-/*
+-      if (hws->funcs.enable_power_gating_plane)
+-              hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+-*/
++
+       if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init)
+               res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub);
+       /* If taking control over from VBIOS, we may want to optimize our first
+@@ -236,6 +243,8 @@ void dcn35_init_hw(struct dc *dc)
+               }
+               hws->funcs.init_pipes(dc, dc->current_state);
++              print_pg_status(dc, __func__, ": after init_pipes");
++
+               if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
+                       !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
+                       dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+@@ -312,6 +321,7 @@ void dcn35_init_hw(struct dc *dc)
+               if (dc->res_pool->pg_cntl->funcs->init_pg_status)
+                       dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
+       }
++      print_pg_status(dc, __func__, ": after init_pg_status");
+ }
+ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
+@@ -500,97 +510,6 @@ void dcn35_physymclk_root_clock_control(
+       }
+ }
+-void dcn35_dsc_pg_control(
+-              struct dce_hwseq *hws,
+-              unsigned int dsc_inst,
+-              bool power_on)
+-{
+-      uint32_t power_gate = power_on ? 0 : 1;
+-      uint32_t pwr_status = power_on ? 0 : 2;
+-      uint32_t org_ip_request_cntl = 0;
+-
+-      if (hws->ctx->dc->debug.disable_dsc_power_gate)
+-              return;
+-      if (hws->ctx->dc->debug.ignore_pg)
+-              return;
+-      REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+-      if (org_ip_request_cntl == 0)
+-              REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+-
+-      switch (dsc_inst) {
+-      case 0: /* DSC0 */
+-              REG_UPDATE(DOMAIN16_PG_CONFIG,
+-                              DOMAIN_POWER_GATE, power_gate);
+-
+-              REG_WAIT(DOMAIN16_PG_STATUS,
+-                              DOMAIN_PGFSM_PWR_STATUS, pwr_status,
+-                              1, 1000);
+-              break;
+-      case 1: /* DSC1 */
+-              REG_UPDATE(DOMAIN17_PG_CONFIG,
+-                              DOMAIN_POWER_GATE, power_gate);
+-
+-              REG_WAIT(DOMAIN17_PG_STATUS,
+-                              DOMAIN_PGFSM_PWR_STATUS, pwr_status,
+-                              1, 1000);
+-              break;
+-      case 2: /* DSC2 */
+-              REG_UPDATE(DOMAIN18_PG_CONFIG,
+-                              DOMAIN_POWER_GATE, power_gate);
+-
+-              REG_WAIT(DOMAIN18_PG_STATUS,
+-                              DOMAIN_PGFSM_PWR_STATUS, pwr_status,
+-                              1, 1000);
+-              break;
+-      case 3: /* DSC3 */
+-              REG_UPDATE(DOMAIN19_PG_CONFIG,
+-                              DOMAIN_POWER_GATE, power_gate);
+-
+-              REG_WAIT(DOMAIN19_PG_STATUS,
+-                              DOMAIN_PGFSM_PWR_STATUS, pwr_status,
+-                              1, 1000);
+-              break;
+-      default:
+-              BREAK_TO_DEBUGGER();
+-              break;
+-      }
+-
+-      if (org_ip_request_cntl == 0)
+-              REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
+-}
+-
+-void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable)
+-{
+-      bool force_on = true; /* disable power gating */
+-      uint32_t org_ip_request_cntl = 0;
+-
+-      if (hws->ctx->dc->debug.disable_hubp_power_gate)
+-              return;
+-      if (hws->ctx->dc->debug.ignore_pg)
+-              return;
+-      REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+-      if (org_ip_request_cntl == 0)
+-              REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+-      /* DCHUBP0/1/2/3/4/5 */
+-      REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+-      REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+-      /* DPP0/1/2/3/4/5 */
+-      REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+-      REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+-
+-      force_on = true; /* disable power gating */
+-      if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
+-              force_on = false;
+-
+-      /* DCS0/1/2/3/4 */
+-      REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+-      REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+-      REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+-      REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+-
+-
+-}
+-
+ /* In headless boot cases, DIG may be turned
+  * on which causes HW/SW discrepancies.
+  * To avoid this, power down hardware on boot
+@@ -1453,6 +1372,8 @@ void dcn35_prepare_bandwidth(
+       }
+       dcn20_prepare_bandwidth(dc, context);
++
++      print_pg_status(dc, __func__, ": after rcg and power up");
+ }
+ void dcn35_optimize_bandwidth(
+@@ -1461,6 +1382,8 @@ void dcn35_optimize_bandwidth(
+ {
+       struct pg_block_update pg_update_state;
++      print_pg_status(dc, __func__, ": before rcg and power up");
++
+       dcn20_optimize_bandwidth(dc, context);
+       if (dc->hwss.calc_blocks_to_gate) {
+@@ -1472,6 +1395,8 @@ void dcn35_optimize_bandwidth(
+               if (dc->hwss.root_clock_control)
+                       dc->hwss.root_clock_control(dc, &pg_update_state, false);
+       }
++
++      print_pg_status(dc, __func__, ": after rcg and power up");
+ }
+ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+@@ -115,7 +115,6 @@ static const struct hw_sequencer_funcs d
+       .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+       .update_visual_confirm_color = dcn10_update_visual_confirm_color,
+       .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
+-      .update_dsc_pg = dcn32_update_dsc_pg,
+       .calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
+       .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
+       .hw_block_power_up = dcn35_hw_block_power_up,
+@@ -150,7 +149,6 @@ static const struct hwseq_private_funcs
+       .plane_atomic_disable = dcn35_plane_atomic_disable,
+       //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/
+       //.hubp_pg_control = dcn35_hubp_pg_control,
+-      .enable_power_gating_plane = dcn35_enable_power_gating_plane,
+       .dpp_root_clock_control = dcn35_dpp_root_clock_control,
+       .dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
+       .physymclk_root_clock_control = dcn35_physymclk_root_clock_control,
+@@ -165,7 +163,6 @@ static const struct hwseq_private_funcs
+       .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
+       .resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio,
+       .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
+-      .dsc_pg_control = dcn35_dsc_pg_control,
+       .dsc_pg_status = dcn32_dsc_pg_status,
+       .enable_plane = dcn35_enable_plane,
+       .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+@@ -114,7 +114,6 @@ static const struct hw_sequencer_funcs d
+       .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+       .update_visual_confirm_color = dcn10_update_visual_confirm_color,
+       .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
+-      .update_dsc_pg = dcn32_update_dsc_pg,
+       .calc_blocks_to_gate = dcn351_calc_blocks_to_gate,
+       .calc_blocks_to_ungate = dcn351_calc_blocks_to_ungate,
+       .hw_block_power_up = dcn351_hw_block_power_up,
+@@ -145,7 +144,6 @@ static const struct hwseq_private_funcs
+       .plane_atomic_disable = dcn35_plane_atomic_disable,
+       //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/
+       //.hubp_pg_control = dcn35_hubp_pg_control,
+-      .enable_power_gating_plane = dcn35_enable_power_gating_plane,
+       .dpp_root_clock_control = dcn35_dpp_root_clock_control,
+       .dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
+       .physymclk_root_clock_control = dcn35_physymclk_root_clock_control,
+@@ -159,7 +157,6 @@ static const struct hwseq_private_funcs
+       .setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
+       .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
+       .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
+-      .dsc_pg_control = dcn35_dsc_pg_control,
+       .dsc_pg_status = dcn32_dsc_pg_status,
+       .enable_plane = dcn35_enable_plane,
+       .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
+@@ -47,6 +47,7 @@ struct pg_cntl_funcs {
+       void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on);
+       void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
+       void (*init_pg_status)(struct pg_cntl *pg_cntl);
++      void (*print_pg_status)(struct pg_cntl *pg_cntl, const char *debug_func, const char *debug_log);
+ };
+ #endif //__DC_PG_CNTL_H__
+--- a/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c
++++ b/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c
+@@ -79,16 +79,12 @@ void pg_cntl35_dsc_pg_control(struct pg_
+       uint32_t power_gate = power_on ? 0 : 1;
+       uint32_t pwr_status = power_on ? 0 : 2;
+       uint32_t org_ip_request_cntl = 0;
+-      bool block_enabled;
+-
+-      /*need to enable dscclk regardless DSC_PG*/
+-      if (pg_cntl->ctx->dc->res_pool->dccg->funcs->enable_dsc && power_on)
+-              pg_cntl->ctx->dc->res_pool->dccg->funcs->enable_dsc(
+-                              pg_cntl->ctx->dc->res_pool->dccg, dsc_inst);
++      bool block_enabled = false;
++      bool skip_pg = pg_cntl->ctx->dc->debug.ignore_pg ||
++                     pg_cntl->ctx->dc->debug.disable_dsc_power_gate ||
++                     pg_cntl->ctx->dc->idle_optimizations_allowed;
+-      if (pg_cntl->ctx->dc->debug.ignore_pg ||
+-              pg_cntl->ctx->dc->debug.disable_dsc_power_gate ||
+-              pg_cntl->ctx->dc->idle_optimizations_allowed)
++      if (skip_pg && !power_on)
+               return;
+       block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, dsc_inst);
+@@ -111,7 +107,7 @@ void pg_cntl35_dsc_pg_control(struct pg_
+               REG_WAIT(DOMAIN16_PG_STATUS,
+                               DOMAIN_PGFSM_PWR_STATUS, pwr_status,
+-                              1, 1000);
++                              1, 10000);
+               break;
+       case 1: /* DSC1 */
+               REG_UPDATE(DOMAIN17_PG_CONFIG,
+@@ -119,7 +115,7 @@ void pg_cntl35_dsc_pg_control(struct pg_
+               REG_WAIT(DOMAIN17_PG_STATUS,
+                               DOMAIN_PGFSM_PWR_STATUS, pwr_status,
+-                              1, 1000);
++                              1, 10000);
+               break;
+       case 2: /* DSC2 */
+               REG_UPDATE(DOMAIN18_PG_CONFIG,
+@@ -127,7 +123,7 @@ void pg_cntl35_dsc_pg_control(struct pg_
+               REG_WAIT(DOMAIN18_PG_STATUS,
+                               DOMAIN_PGFSM_PWR_STATUS, pwr_status,
+-                              1, 1000);
++                              1, 10000);
+               break;
+       case 3: /* DSC3 */
+               REG_UPDATE(DOMAIN19_PG_CONFIG,
+@@ -135,7 +131,7 @@ void pg_cntl35_dsc_pg_control(struct pg_
+               REG_WAIT(DOMAIN19_PG_STATUS,
+                               DOMAIN_PGFSM_PWR_STATUS, pwr_status,
+-                              1, 1000);
++                              1, 10000);
+               break;
+       default:
+               BREAK_TO_DEBUGGER();
+@@ -144,12 +140,6 @@ void pg_cntl35_dsc_pg_control(struct pg_
+       if (dsc_inst < MAX_PIPES)
+               pg_cntl->pg_pipe_res_enable[PG_DSC][dsc_inst] = power_on;
+-
+-      if (pg_cntl->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on) {
+-              /*this is to disable dscclk*/
+-              pg_cntl->ctx->dc->res_pool->dccg->funcs->disable_dsc(
+-                      pg_cntl->ctx->dc->res_pool->dccg, dsc_inst);
+-      }
+ }
+ static bool pg_cntl35_hubp_dpp_pg_status(struct pg_cntl *pg_cntl, unsigned int hubp_dpp_inst)
+@@ -189,11 +179,12 @@ void pg_cntl35_hubp_dpp_pg_control(struc
+       uint32_t pwr_status = power_on ? 0 : 2;
+       uint32_t org_ip_request_cntl;
+       bool block_enabled;
++      bool skip_pg = pg_cntl->ctx->dc->debug.ignore_pg ||
++                     pg_cntl->ctx->dc->debug.disable_hubp_power_gate ||
++                     pg_cntl->ctx->dc->debug.disable_dpp_power_gate ||
++                     pg_cntl->ctx->dc->idle_optimizations_allowed;
+-      if (pg_cntl->ctx->dc->debug.ignore_pg ||
+-              pg_cntl->ctx->dc->debug.disable_hubp_power_gate ||
+-              pg_cntl->ctx->dc->debug.disable_dpp_power_gate ||
+-              pg_cntl->ctx->dc->idle_optimizations_allowed)
++      if (skip_pg && !power_on)
+               return;
+       block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, hubp_dpp_inst);
+@@ -213,22 +204,22 @@ void pg_cntl35_hubp_dpp_pg_control(struc
+       case 0:
+               /* DPP0 & HUBP0 */
+               REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
+-              REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
++              REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
+               break;
+       case 1:
+               /* DPP1 & HUBP1 */
+               REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
+-              REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
++              REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
+               break;
+       case 2:
+               /* DPP2 & HUBP2 */
+               REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
+-              REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
++              REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
+               break;
+       case 3:
+               /* DPP3 & HUBP3 */
+               REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
+-              REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
++              REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
+               break;
+       default:
+               BREAK_TO_DEBUGGER();
+@@ -501,6 +492,36 @@ void pg_cntl35_init_pg_status(struct pg_
+       pg_cntl->pg_res_enable[PG_DWB] = block_enabled;
+ }
++static void pg_cntl35_print_pg_status(struct pg_cntl *pg_cntl, const char *debug_func, const char *debug_log)
++{
++      int i = 0;
++      bool block_enabled = false;
++
++      DC_LOG_DEBUG("%s: %s", debug_func, debug_log);
++
++      DC_LOG_DEBUG("PG_CNTL status:\n");
++
++      block_enabled = pg_cntl35_io_clk_status(pg_cntl);
++      DC_LOG_DEBUG("ONO0=%d (DCCG, DIO, DCIO)\n", block_enabled ? 1 : 0);
++
++      block_enabled = pg_cntl35_mem_status(pg_cntl);
++      DC_LOG_DEBUG("ONO1=%d (DCHUBBUB, DCHVM, DCHUBBUBMEM)\n", block_enabled ? 1 : 0);
++
++      block_enabled = pg_cntl35_plane_otg_status(pg_cntl);
++      DC_LOG_DEBUG("ONO2=%d (MPC, OPP, OPTC, DWB)\n", block_enabled ? 1 : 0);
++
++      block_enabled = pg_cntl35_hpo_pg_status(pg_cntl);
++      DC_LOG_DEBUG("ONO3=%d (HPO)\n", block_enabled ? 1 : 0);
++
++      for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) {
++              block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, i);
++              DC_LOG_DEBUG("ONO%d=%d (DCHUBP%d, DPP%d)\n", 4 + i * 2, block_enabled ? 1 : 0, i, i);
++
++              block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, i);
++              DC_LOG_DEBUG("ONO%d=%d (DSC%d)\n", 5 + i * 2, block_enabled ? 1 : 0, i);
++      }
++}
++
+ static const struct pg_cntl_funcs pg_cntl35_funcs = {
+       .init_pg_status = pg_cntl35_init_pg_status,
+       .dsc_pg_control = pg_cntl35_dsc_pg_control,
+@@ -511,7 +532,8 @@ static const struct pg_cntl_funcs pg_cnt
+       .mpcc_pg_control = pg_cntl35_mpcc_pg_control,
+       .opp_pg_control = pg_cntl35_opp_pg_control,
+       .optc_pg_control = pg_cntl35_optc_pg_control,
+-      .dwb_pg_control = pg_cntl35_dwb_pg_control
++      .dwb_pg_control = pg_cntl35_dwb_pg_control,
++      .print_pg_status = pg_cntl35_print_pg_status
+ };
+ struct pg_cntl *pg_cntl35_create(
diff --git a/queue-6.16/drm-amd-display-disable-dpcd-probe-quirk.patch b/queue-6.16/drm-amd-display-disable-dpcd-probe-quirk.patch
new file mode 100644 (file)
index 0000000..ebc3797
--- /dev/null
@@ -0,0 +1,38 @@
+From f5c32370dba668c171c73684f489a3ea0b9503c5 Mon Sep 17 00:00:00 2001
+From: Fangzhi Zuo <Jerry.Zuo@amd.com>
+Date: Thu, 4 Sep 2025 15:13:51 -0400
+Subject: drm/amd/display: Disable DPCD Probe Quirk
+
+From: Fangzhi Zuo <Jerry.Zuo@amd.com>
+
+commit f5c32370dba668c171c73684f489a3ea0b9503c5 upstream.
+
+Disable dpcd probe quirk to native aux.
+
+Signed-off-by: Fangzhi Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Imre Deak <imre.deak@intel.com>
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4500
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
+Link: https://lore.kernel.org/r/20250904191351.746707-1-Jerry.Zuo@amd.com
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit c5f4fb40584ee591da9fa090c6f265d11cbb1acf)
+Cc: stable@vger.kernel.org # 6.16.y: 5281cbe0b55a
+Cc: stable@vger.kernel.org # 6.16.y: 0b4aa85e8981
+Cc: stable@vger.kernel.org # 6.16.y: b87ed522b364
+Cc: stable@vger.kernel.org # 6.16.y
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -809,6 +809,7 @@ void amdgpu_dm_initialize_dp_connector(s
+       drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
+       drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
+                                     &aconnector->base);
++      drm_dp_dpcd_set_probe(&aconnector->dm_dp_aux.aux, false);
+       if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+               return;
diff --git a/queue-6.16/drm-amd-display-remove-oem-i2c-adapter-on-finish.patch b/queue-6.16/drm-amd-display-remove-oem-i2c-adapter-on-finish.patch
new file mode 100644 (file)
index 0000000..b1afa10
--- /dev/null
@@ -0,0 +1,54 @@
+From 1dfd2864a1c4909147663e5a27c055f50f7c2796 Mon Sep 17 00:00:00 2001
+From: Geoffrey McRae <geoffrey.mcrae@amd.com>
+Date: Thu, 28 Aug 2025 22:26:22 +1000
+Subject: drm/amd/display: remove oem i2c adapter on finish
+
+From: Geoffrey McRae <geoffrey.mcrae@amd.com>
+
+commit 1dfd2864a1c4909147663e5a27c055f50f7c2796 upstream.
+
+Fixes a bug where unbinding of the GPU would leave the oem i2c adapter
+registered resulting in a null pointer dereference when applications try
+to access the invalid device.
+
+Fixes: 3d5470c97314 ("drm/amd/display/dm: add support for OEM i2c bus")
+Cc: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Geoffrey McRae <geoffrey.mcrae@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 89923fb7ead4fdd37b78dd49962d9bb5892403e6)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |   13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2910,6 +2910,17 @@ static int dm_oem_i2c_hw_init(struct amd
+       return 0;
+ }
++static void dm_oem_i2c_hw_fini(struct amdgpu_device *adev)
++{
++      struct amdgpu_display_manager *dm = &adev->dm;
++
++      if (dm->oem_i2c) {
++              i2c_del_adapter(&dm->oem_i2c->base);
++              kfree(dm->oem_i2c);
++              dm->oem_i2c = NULL;
++      }
++}
++
+ /**
+  * dm_hw_init() - Initialize DC device
+  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+@@ -2960,7 +2971,7 @@ static int dm_hw_fini(struct amdgpu_ip_b
+ {
+       struct amdgpu_device *adev = ip_block->adev;
+-      kfree(adev->dm.oem_i2c);
++      dm_oem_i2c_hw_fini(adev);
+       amdgpu_dm_hpd_fini(adev);
diff --git a/queue-6.16/drm-amdgpu-fix-a-memory-leak-in-fence-cleanup-when-unloading.patch b/queue-6.16/drm-amdgpu-fix-a-memory-leak-in-fence-cleanup-when-unloading.patch
new file mode 100644 (file)
index 0000000..8e41557
--- /dev/null
@@ -0,0 +1,43 @@
+From 7838fb5f119191403560eca2e23613380c0e425e Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 4 Sep 2025 12:35:05 -0400
+Subject: drm/amdgpu: fix a memory leak in fence cleanup when unloading
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 7838fb5f119191403560eca2e23613380c0e425e upstream.
+
+Commit b61badd20b44 ("drm/amdgpu: fix usage slab after free")
+reordered when amdgpu_fence_driver_sw_fini() was called after
+that patch, amdgpu_fence_driver_sw_fini() effectively became
+a no-op as the sched entities we never freed because the
+ring pointers were already set to NULL.  Remove the NULL
+setting.
+
+Reported-by: Lin.Cao <lincao12@amd.com>
+Cc: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Fixes: b61badd20b44 ("drm/amdgpu: fix usage slab after free")
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit a525fa37aac36c4591cc8b07ae8957862415fbd5)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -389,8 +389,6 @@ void amdgpu_ring_fini(struct amdgpu_ring
+       dma_fence_put(ring->vmid_wait);
+       ring->vmid_wait = NULL;
+       ring->me = 0;
+-
+-      ring->adev->rings[ring->idx] = NULL;
+ }
+ /**
diff --git a/queue-6.16/drm-amdgpu-vcn-allow-limiting-ctx-to-instance-0-for-av1-at-any-time.patch b/queue-6.16/drm-amdgpu-vcn-allow-limiting-ctx-to-instance-0-for-av1-at-any-time.patch
new file mode 100644 (file)
index 0000000..e799d62
--- /dev/null
@@ -0,0 +1,76 @@
+From 3318f2d20ce48849855df5e190813826d0bc3653 Mon Sep 17 00:00:00 2001
+From: David Rosca <david.rosca@amd.com>
+Date: Mon, 18 Aug 2025 09:18:37 +0200
+Subject: drm/amdgpu/vcn: Allow limiting ctx to instance 0 for AV1 at any time
+
+From: David Rosca <david.rosca@amd.com>
+
+commit 3318f2d20ce48849855df5e190813826d0bc3653 upstream.
+
+There is no reason to require this to happen on first submitted IB only.
+We need to wait for the queue to be idle, but it can be done at any
+time (including when there are multiple video sessions active).
+
+Signed-off-by: David Rosca <david.rosca@amd.com>
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 8908fdce0634a623404e9923ed2f536101a39db5)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c |   12 ++++++++----
+ drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c |   12 ++++++++----
+ 2 files changed, 16 insertions(+), 8 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -1875,15 +1875,19 @@ static int vcn_v3_0_limit_sched(struct a
+                               struct amdgpu_job *job)
+ {
+       struct drm_gpu_scheduler **scheds;
+-
+-      /* The create msg must be in the first IB submitted */
+-      if (atomic_read(&job->base.entity->fence_seq))
+-              return -EINVAL;
++      struct dma_fence *fence;
+       /* if VCN0 is harvested, we can't support AV1 */
+       if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
+               return -EINVAL;
++      /* wait for all jobs to finish before switching to instance 0 */
++      fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
++      if (fence) {
++              dma_fence_wait(fence, false);
++              dma_fence_put(fence);
++      }
++
+       scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
+               [AMDGPU_RING_PRIO_DEFAULT].sched;
+       drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+@@ -1807,15 +1807,19 @@ static int vcn_v4_0_limit_sched(struct a
+                               struct amdgpu_job *job)
+ {
+       struct drm_gpu_scheduler **scheds;
+-
+-      /* The create msg must be in the first IB submitted */
+-      if (atomic_read(&job->base.entity->fence_seq))
+-              return -EINVAL;
++      struct dma_fence *fence;
+       /* if VCN0 is harvested, we can't support AV1 */
+       if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
+               return -EINVAL;
++      /* wait for all jobs to finish before switching to instance 0 */
++      fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
++      if (fence) {
++              dma_fence_wait(fence, false);
++              dma_fence_put(fence);
++      }
++
+       scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
+               [AMDGPU_RING_PRIO_0].sched;
+       drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
diff --git a/queue-6.16/drm-amdgpu-vcn4-fix-ib-parsing-with-multiple-engine-info-packages.patch b/queue-6.16/drm-amdgpu-vcn4-fix-ib-parsing-with-multiple-engine-info-packages.patch
new file mode 100644 (file)
index 0000000..438cda0
--- /dev/null
@@ -0,0 +1,104 @@
+From 2b10cb58d7a3fd621ec9b2ba765a092e562ef998 Mon Sep 17 00:00:00 2001
+From: David Rosca <david.rosca@amd.com>
+Date: Mon, 18 Aug 2025 09:06:58 +0200
+Subject: drm/amdgpu/vcn4: Fix IB parsing with multiple engine info packages
+
+From: David Rosca <david.rosca@amd.com>
+
+commit 2b10cb58d7a3fd621ec9b2ba765a092e562ef998 upstream.
+
+There can be multiple engine info packages in one IB and the first one
+may be common engine, not decode/encode.
+We need to parse the entire IB instead of stopping after finding first
+engine info.
+
+Signed-off-by: David Rosca <david.rosca@amd.com>
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit dc8f9f0f45166a6b37864e7a031c726981d6e5fc)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c |   52 +++++++++++++---------------------
+ 1 file changed, 21 insertions(+), 31 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+@@ -1910,22 +1910,16 @@ out:
+ #define RADEON_VCN_ENGINE_TYPE_ENCODE                 (0x00000002)
+ #define RADEON_VCN_ENGINE_TYPE_DECODE                 (0x00000003)
+-
+ #define RADEON_VCN_ENGINE_INFO                                (0x30000001)
+-#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET             16
+-
+ #define RENCODE_ENCODE_STANDARD_AV1                   2
+ #define RENCODE_IB_PARAM_SESSION_INIT                 0x00000003
+-#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET      64
+-/* return the offset in ib if id is found, -1 otherwise
+- * to speed up the searching we only search upto max_offset
+- */
+-static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
++/* return the offset in ib if id is found, -1 otherwise */
++static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int start)
+ {
+       int i;
+-      for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
++      for (i = start; i < ib->length_dw && ib->ptr[i] >= 8; i += ib->ptr[i] / 4) {
+               if (ib->ptr[i + 1] == id)
+                       return i;
+       }
+@@ -1940,33 +1934,29 @@ static int vcn_v4_0_ring_patch_cs_in_pla
+       struct amdgpu_vcn_decode_buffer *decode_buffer;
+       uint64_t addr;
+       uint32_t val;
+-      int idx;
++      int idx = 0, sidx;
+       /* The first instance can decode anything */
+       if (!ring->me)
+               return 0;
+-      /* RADEON_VCN_ENGINE_INFO is at the top of ib block */
+-      idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
+-                      RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
+-      if (idx < 0) /* engine info is missing */
+-              return 0;
+-
+-      val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
+-      if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
+-              decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
+-
+-              if (!(decode_buffer->valid_buf_flag  & 0x1))
+-                      return 0;
+-
+-              addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
+-                      decode_buffer->msg_buffer_address_lo;
+-              return vcn_v4_0_dec_msg(p, job, addr);
+-      } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
+-              idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
+-                      RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
+-              if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
+-                      return vcn_v4_0_limit_sched(p, job);
++      while ((idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, idx)) >= 0) {
++              val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
++              if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
++                      decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
++
++                      if (!(decode_buffer->valid_buf_flag & 0x1))
++                              return 0;
++
++                      addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
++                              decode_buffer->msg_buffer_address_lo;
++                      return vcn_v4_0_dec_msg(p, job, addr);
++              } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
++                      sidx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, idx);
++                      if (sidx >= 0 && ib->ptr[sidx + 2] == RENCODE_ENCODE_STANDARD_AV1)
++                              return vcn_v4_0_limit_sched(p, job);
++              }
++              idx += ib->ptr[idx] / 4;
+       }
+       return 0;
+ }
diff --git a/queue-6.16/drm-dp-add-an-edid-quirk-for-the-dpcd-register-access-probe.patch b/queue-6.16/drm-dp-add-an-edid-quirk-for-the-dpcd-register-access-probe.patch
new file mode 100644 (file)
index 0000000..f9ea5a1
--- /dev/null
@@ -0,0 +1,146 @@
+From b87ed522b3643f096ef183ed0ccf2d2b90ddd513 Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Mon, 9 Jun 2025 15:55:55 +0300
+Subject: drm/dp: Add an EDID quirk for the DPCD register access probe
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit b87ed522b3643f096ef183ed0ccf2d2b90ddd513 upstream.
+
+Reading DPCD registers has side-effects and some of these can cause a
+problem for instance during link training. Based on this it's better to
+avoid the probing quirk done before each DPCD register read, limiting
+this to the monitor which requires it. Add an EDID quirk for this. Leave
+the quirk enabled by default, allowing it to be disabled after the
+monitor is detected.
+
+v2: Fix lockdep wrt. drm_dp_aux::hw_mutex when calling
+    drm_dp_dpcd_set_probe_quirk() with a dependent lock already held.
+v3: Add a helper for determining if DPCD probing is needed. (Jani)
+v4:
+- s/drm_dp_dpcd_set_probe_quirk/drm_dp_dpcd_set_probe (Jani)
+- Fix documentation of drm_dp_dpcd_set_probe().
+- Add comment at the end of internal quirk entries.
+
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: Jani Nikula <jani.nikula@linux.intel.com>
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Link: https://lore.kernel.org/r/20250609125556.109538-1-imre.deak@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/display/drm_dp_helper.c |   42 ++++++++++++++++++++++----------
+ drivers/gpu/drm/drm_edid.c              |    8 ++++++
+ include/drm/display/drm_dp_helper.h     |    6 ++++
+ include/drm/drm_edid.h                  |    3 ++
+ 4 files changed, 46 insertions(+), 13 deletions(-)
+
+--- a/drivers/gpu/drm/display/drm_dp_helper.c
++++ b/drivers/gpu/drm/display/drm_dp_helper.c
+@@ -692,6 +692,34 @@ void drm_dp_dpcd_set_powered(struct drm_
+ EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
+ /**
++ * drm_dp_dpcd_set_probe() - Set whether a probing before DPCD access is done
++ * @aux: DisplayPort AUX channel
++ * @enable: Enable the probing if required
++ */
++void drm_dp_dpcd_set_probe(struct drm_dp_aux *aux, bool enable)
++{
++      WRITE_ONCE(aux->dpcd_probe_disabled, !enable);
++}
++EXPORT_SYMBOL(drm_dp_dpcd_set_probe);
++
++static bool dpcd_access_needs_probe(struct drm_dp_aux *aux)
++{
++      /*
++       * HP ZR24w corrupts the first DPCD access after entering power save
++       * mode. Eg. on a read, the entire buffer will be filled with the same
++       * byte. Do a throw away read to avoid corrupting anything we care
++       * about. Afterwards things will work correctly until the monitor
++       * gets woken up and subsequently re-enters power save mode.
++       *
++       * The user pressing any button on the monitor is enough to wake it
++       * up, so there is no particularly good place to do the workaround.
++       * We just have to do it before any DPCD access and hope that the
++       * monitor doesn't power down exactly after the throw away read.
++       */
++      return !aux->is_remote && !READ_ONCE(aux->dpcd_probe_disabled);
++}
++
++/**
+  * drm_dp_dpcd_read() - read a series of bytes from the DPCD
+  * @aux: DisplayPort AUX channel (SST or MST)
+  * @offset: address of the (first) register to read
+@@ -712,19 +740,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_a
+ {
+       int ret;
+-      /*
+-       * HP ZR24w corrupts the first DPCD access after entering power save
+-       * mode. Eg. on a read, the entire buffer will be filled with the same
+-       * byte. Do a throw away read to avoid corrupting anything we care
+-       * about. Afterwards things will work correctly until the monitor
+-       * gets woken up and subsequently re-enters power save mode.
+-       *
+-       * The user pressing any button on the monitor is enough to wake it
+-       * up, so there is no particularly good place to do the workaround.
+-       * We just have to do it before any DPCD access and hope that the
+-       * monitor doesn't power down exactly after the throw away read.
+-       */
+-      if (!aux->is_remote) {
++      if (dpcd_access_needs_probe(aux)) {
+               ret = drm_dp_dpcd_probe(aux, DP_TRAINING_PATTERN_SET);
+               if (ret < 0)
+                       return ret;
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -248,6 +248,14 @@ static const struct edid_quirk {
+       /* OSVR HDK and HDK2 VR Headsets */
+       EDID_QUIRK('S', 'V', 'R', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)),
+       EDID_QUIRK('A', 'U', 'O', 0x1111, BIT(EDID_QUIRK_NON_DESKTOP)),
++
++      /*
++       * @drm_edid_internal_quirk entries end here, following with the
++       * @drm_edid_quirk entries.
++       */
++
++      /* HP ZR24w DP AUX DPCD access requires probing to prevent corruption. */
++      EDID_QUIRK('H', 'W', 'P', 0x2869, BIT(DRM_EDID_QUIRK_DP_DPCD_PROBE)),
+ };
+ /*
+--- a/include/drm/display/drm_dp_helper.h
++++ b/include/drm/display/drm_dp_helper.h
+@@ -523,10 +523,16 @@ struct drm_dp_aux {
+        * @no_zero_sized: If the hw can't use zero sized transfers (NVIDIA)
+        */
+       bool no_zero_sized;
++
++      /**
++       * @dpcd_probe_disabled: If probing before a DPCD access is disabled.
++       */
++      bool dpcd_probe_disabled;
+ };
+ int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
+ void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered);
++void drm_dp_dpcd_set_probe(struct drm_dp_aux *aux, bool enable);
+ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
+                        void *buffer, size_t size);
+ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
+--- a/include/drm/drm_edid.h
++++ b/include/drm/drm_edid.h
+@@ -110,6 +110,9 @@ struct detailed_data_string {
+ #define DRM_EDID_CVT_FLAGS_REDUCED_BLANKING  (1 << 4)
+ enum drm_edid_quirk {
++      /* Do a dummy read before DPCD accesses, to prevent corruption. */
++      DRM_EDID_QUIRK_DP_DPCD_PROBE,
++
+       DRM_EDID_QUIRK_NUM,
+ };
diff --git a/queue-6.16/drm-edid-add-support-for-quirks-visible-to-drm-core-and-drivers.patch b/queue-6.16/drm-edid-add-support-for-quirks-visible-to-drm-core-and-drivers.patch
new file mode 100644 (file)
index 0000000..168a81d
--- /dev/null
@@ -0,0 +1,95 @@
+From 0b4aa85e8981198e23a68d50ee3c490ccd7f8311 Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Thu, 5 Jun 2025 11:28:48 +0300
+Subject: drm/edid: Add support for quirks visible to DRM core and drivers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit 0b4aa85e8981198e23a68d50ee3c490ccd7f8311 upstream.
+
+Add support for EDID based quirks which can be queried outside of the
+EDID parser iteself by DRM core and drivers. There are at least two such
+quirks applicable to all drivers: the DPCD register access probe quirk
+and the 128b/132b DPRX Lane Count Conversion quirk (see 3.5.2.16.3 in
+the v2.1a DP Standard). The latter quirk applies to panels with specific
+EDID panel names, support for defining a quirk this way will be added as
+a follow-up.
+
+v2: Reset global_quirks in drm_reset_display_info().
+v3: (Jani)
+- Use one list for both the global and internal quirks.
+- Drop change for panel name specific quirks.
+- Add comment about the way quirks should be queried.
+
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: Jani Nikula <jani.nikula@linux.intel.com>
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Link: https://lore.kernel.org/r/20250605082850.65136-4-imre.deak@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_edid.c  |    8 +++++++-
+ include/drm/drm_connector.h |    4 +++-
+ include/drm/drm_edid.h      |    5 +++++
+ 3 files changed, 15 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -68,7 +68,7 @@ static int oui(u8 first, u8 second, u8 t
+ enum drm_edid_internal_quirk {
+       /* First detailed mode wrong, use largest 60Hz mode */
+-      EDID_QUIRK_PREFER_LARGE_60,
++      EDID_QUIRK_PREFER_LARGE_60 = DRM_EDID_QUIRK_NUM,
+       /* Reported 135MHz pixel clock is too high, needs adjustment */
+       EDID_QUIRK_135_CLOCK_TOO_HIGH,
+       /* Prefer the largest mode at 75 Hz */
+@@ -2959,6 +2959,12 @@ static bool drm_edid_has_internal_quirk(
+       return connector->display_info.quirks & BIT(quirk);
+ }
++bool drm_edid_has_quirk(struct drm_connector *connector, enum drm_edid_quirk quirk)
++{
++      return connector->display_info.quirks & BIT(quirk);
++}
++EXPORT_SYMBOL(drm_edid_has_quirk);
++
+ #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+ #define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
+--- a/include/drm/drm_connector.h
++++ b/include/drm/drm_connector.h
+@@ -843,7 +843,9 @@ struct drm_display_info {
+       int vics_len;
+       /**
+-       * @quirks: EDID based quirks. Internal to EDID parsing.
++       * @quirks: EDID based quirks. DRM core and drivers can query the
++       * @drm_edid_quirk quirks using drm_edid_has_quirk(), the rest of
++       * the quirks also tracked here are internal to EDID parsing.
+        */
+       u32 quirks;
+--- a/include/drm/drm_edid.h
++++ b/include/drm/drm_edid.h
+@@ -109,6 +109,10 @@ struct detailed_data_string {
+ #define DRM_EDID_CVT_FLAGS_STANDARD_BLANKING (1 << 3)
+ #define DRM_EDID_CVT_FLAGS_REDUCED_BLANKING  (1 << 4)
++enum drm_edid_quirk {
++      DRM_EDID_QUIRK_NUM,
++};
++
+ struct detailed_data_monitor_range {
+       u8 min_vfreq;
+       u8 max_vfreq;
+@@ -476,5 +480,6 @@ void drm_edid_print_product_id(struct dr
+ u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid);
+ bool drm_edid_match(const struct drm_edid *drm_edid,
+                   const struct drm_edid_ident *ident);
++bool drm_edid_has_quirk(struct drm_connector *connector, enum drm_edid_quirk quirk);
+ #endif /* __DRM_EDID_H__ */
diff --git a/queue-6.16/drm-edid-define-the-quirks-in-an-enum-list.patch b/queue-6.16/drm-edid-define-the-quirks-in-an-enum-list.patch
new file mode 100644 (file)
index 0000000..321c7d7
--- /dev/null
@@ -0,0 +1,410 @@
+From 5281cbe0b55a1ff9c6c29361540016873bdc506e Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Thu, 5 Jun 2025 11:28:47 +0300
+Subject: drm/edid: Define the quirks in an enum list
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit 5281cbe0b55a1ff9c6c29361540016873bdc506e upstream.
+
+An enum list is better suited to define a quirk list, do that. This
+makes looking up a quirk more robust and also allows for separating
+quirks internal to the EDID parser and global quirks which can be
+queried outside of the EDID parser (added as a follow-up).
+
+Suggested-by: Jani Nikula <jani.nikula@linux.intel.com>
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Link: https://lore.kernel.org/r/20250605082850.65136-3-imre.deak@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_edid.c |  218 +++++++++++++++++++++++----------------------
+ 1 file changed, 112 insertions(+), 106 deletions(-)
+
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -66,34 +66,36 @@ static int oui(u8 first, u8 second, u8 t
+  * on as many displays as possible).
+  */
+-/* First detailed mode wrong, use largest 60Hz mode */
+-#define EDID_QUIRK_PREFER_LARGE_60            (1 << 0)
+-/* Reported 135MHz pixel clock is too high, needs adjustment */
+-#define EDID_QUIRK_135_CLOCK_TOO_HIGH         (1 << 1)
+-/* Prefer the largest mode at 75 Hz */
+-#define EDID_QUIRK_PREFER_LARGE_75            (1 << 2)
+-/* Detail timing is in cm not mm */
+-#define EDID_QUIRK_DETAILED_IN_CM             (1 << 3)
+-/* Detailed timing descriptors have bogus size values, so just take the
+- * maximum size and use that.
+- */
+-#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE  (1 << 4)
+-/* use +hsync +vsync for detailed mode */
+-#define EDID_QUIRK_DETAILED_SYNC_PP           (1 << 6)
+-/* Force reduced-blanking timings for detailed modes */
+-#define EDID_QUIRK_FORCE_REDUCED_BLANKING     (1 << 7)
+-/* Force 8bpc */
+-#define EDID_QUIRK_FORCE_8BPC                 (1 << 8)
+-/* Force 12bpc */
+-#define EDID_QUIRK_FORCE_12BPC                        (1 << 9)
+-/* Force 6bpc */
+-#define EDID_QUIRK_FORCE_6BPC                 (1 << 10)
+-/* Force 10bpc */
+-#define EDID_QUIRK_FORCE_10BPC                        (1 << 11)
+-/* Non desktop display (i.e. HMD) */
+-#define EDID_QUIRK_NON_DESKTOP                        (1 << 12)
+-/* Cap the DSC target bitrate to 15bpp */
+-#define EDID_QUIRK_CAP_DSC_15BPP              (1 << 13)
++enum drm_edid_internal_quirk {
++      /* First detailed mode wrong, use largest 60Hz mode */
++      EDID_QUIRK_PREFER_LARGE_60,
++      /* Reported 135MHz pixel clock is too high, needs adjustment */
++      EDID_QUIRK_135_CLOCK_TOO_HIGH,
++      /* Prefer the largest mode at 75 Hz */
++      EDID_QUIRK_PREFER_LARGE_75,
++      /* Detail timing is in cm not mm */
++      EDID_QUIRK_DETAILED_IN_CM,
++      /* Detailed timing descriptors have bogus size values, so just take the
++       * maximum size and use that.
++       */
++      EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE,
++      /* use +hsync +vsync for detailed mode */
++      EDID_QUIRK_DETAILED_SYNC_PP,
++      /* Force reduced-blanking timings for detailed modes */
++      EDID_QUIRK_FORCE_REDUCED_BLANKING,
++      /* Force 8bpc */
++      EDID_QUIRK_FORCE_8BPC,
++      /* Force 12bpc */
++      EDID_QUIRK_FORCE_12BPC,
++      /* Force 6bpc */
++      EDID_QUIRK_FORCE_6BPC,
++      /* Force 10bpc */
++      EDID_QUIRK_FORCE_10BPC,
++      /* Non desktop display (i.e. HMD) */
++      EDID_QUIRK_NON_DESKTOP,
++      /* Cap the DSC target bitrate to 15bpp */
++      EDID_QUIRK_CAP_DSC_15BPP,
++};
+ #define MICROSOFT_IEEE_OUI    0xca125c
+@@ -128,124 +130,124 @@ static const struct edid_quirk {
+       u32 quirks;
+ } edid_quirk_list[] = {
+       /* Acer AL1706 */
+-      EDID_QUIRK('A', 'C', 'R', 44358, EDID_QUIRK_PREFER_LARGE_60),
++      EDID_QUIRK('A', 'C', 'R', 44358, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+       /* Acer F51 */
+-      EDID_QUIRK('A', 'P', 'I', 0x7602, EDID_QUIRK_PREFER_LARGE_60),
++      EDID_QUIRK('A', 'P', 'I', 0x7602, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+       /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
+-      EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
++      EDID_QUIRK('A', 'E', 'O', 0, BIT(EDID_QUIRK_FORCE_6BPC)),
+       /* BenQ GW2765 */
+-      EDID_QUIRK('B', 'N', 'Q', 0x78d6, EDID_QUIRK_FORCE_8BPC),
++      EDID_QUIRK('B', 'N', 'Q', 0x78d6, BIT(EDID_QUIRK_FORCE_8BPC)),
+       /* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
+-      EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
++      EDID_QUIRK('B', 'O', 'E', 0x78b, BIT(EDID_QUIRK_FORCE_6BPC)),
+       /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
+-      EDID_QUIRK('C', 'P', 'T', 0x17df, EDID_QUIRK_FORCE_6BPC),
++      EDID_QUIRK('C', 'P', 'T', 0x17df, BIT(EDID_QUIRK_FORCE_6BPC)),
+       /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
+-      EDID_QUIRK('S', 'D', 'C', 0x3652, EDID_QUIRK_FORCE_6BPC),
++      EDID_QUIRK('S', 'D', 'C', 0x3652, BIT(EDID_QUIRK_FORCE_6BPC)),
+       /* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
+-      EDID_QUIRK('B', 'O', 'E', 0x0771, EDID_QUIRK_FORCE_6BPC),
++      EDID_QUIRK('B', 'O', 'E', 0x0771, BIT(EDID_QUIRK_FORCE_6BPC)),
+       /* Belinea 10 15 55 */
+-      EDID_QUIRK('M', 'A', 'X', 1516, EDID_QUIRK_PREFER_LARGE_60),
+-      EDID_QUIRK('M', 'A', 'X', 0x77e, EDID_QUIRK_PREFER_LARGE_60),
++      EDID_QUIRK('M', 'A', 'X', 1516, BIT(EDID_QUIRK_PREFER_LARGE_60)),
++      EDID_QUIRK('M', 'A', 'X', 0x77e, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+       /* Envision Peripherals, Inc. EN-7100e */
+-      EDID_QUIRK('E', 'P', 'I', 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH),
++      EDID_QUIRK('E', 'P', 'I', 59264, BIT(EDID_QUIRK_135_CLOCK_TOO_HIGH)),
+       /* Envision EN2028 */
+-      EDID_QUIRK('E', 'P', 'I', 8232, EDID_QUIRK_PREFER_LARGE_60),
++      EDID_QUIRK('E', 'P', 'I', 8232, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+       /* Funai Electronics PM36B */
+-      EDID_QUIRK('F', 'C', 'M', 13600, EDID_QUIRK_PREFER_LARGE_75 |
+-                                     EDID_QUIRK_DETAILED_IN_CM),
++      EDID_QUIRK('F', 'C', 'M', 13600, BIT(EDID_QUIRK_PREFER_LARGE_75) |
++                                       BIT(EDID_QUIRK_DETAILED_IN_CM)),
+       /* LG 27GP950 */
+-      EDID_QUIRK('G', 'S', 'M', 0x5bbf, EDID_QUIRK_CAP_DSC_15BPP),
++      EDID_QUIRK('G', 'S', 'M', 0x5bbf, BIT(EDID_QUIRK_CAP_DSC_15BPP)),
+       /* LG 27GN950 */
+-      EDID_QUIRK('G', 'S', 'M', 0x5b9a, EDID_QUIRK_CAP_DSC_15BPP),
++      EDID_QUIRK('G', 'S', 'M', 0x5b9a, BIT(EDID_QUIRK_CAP_DSC_15BPP)),
+       /* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
+-      EDID_QUIRK('L', 'G', 'D', 764, EDID_QUIRK_FORCE_10BPC),
++      EDID_QUIRK('L', 'G', 'D', 764, BIT(EDID_QUIRK_FORCE_10BPC)),
+       /* LG Philips LCD LP154W01-A5 */
+-      EDID_QUIRK('L', 'P', 'L', 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
+-      EDID_QUIRK('L', 'P', 'L', 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
++      EDID_QUIRK('L', 'P', 'L', 0, BIT(EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)),
++      EDID_QUIRK('L', 'P', 'L', 0x2a00, BIT(EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)),
+       /* Samsung SyncMaster 205BW.  Note: irony */
+-      EDID_QUIRK('S', 'A', 'M', 541, EDID_QUIRK_DETAILED_SYNC_PP),
++      EDID_QUIRK('S', 'A', 'M', 541, BIT(EDID_QUIRK_DETAILED_SYNC_PP)),
+       /* Samsung SyncMaster 22[5-6]BW */
+-      EDID_QUIRK('S', 'A', 'M', 596, EDID_QUIRK_PREFER_LARGE_60),
+-      EDID_QUIRK('S', 'A', 'M', 638, EDID_QUIRK_PREFER_LARGE_60),
++      EDID_QUIRK('S', 'A', 'M', 596, BIT(EDID_QUIRK_PREFER_LARGE_60)),
++      EDID_QUIRK('S', 'A', 'M', 638, BIT(EDID_QUIRK_PREFER_LARGE_60)),
+       /* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
+-      EDID_QUIRK('S', 'N', 'Y', 0x2541, EDID_QUIRK_FORCE_12BPC),
++      EDID_QUIRK('S', 'N', 'Y', 0x2541, BIT(EDID_QUIRK_FORCE_12BPC)),
+       /* ViewSonic VA2026w */
+-      EDID_QUIRK('V', 'S', 'C', 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING),
++      EDID_QUIRK('V', 'S', 'C', 5020, BIT(EDID_QUIRK_FORCE_REDUCED_BLANKING)),
+       /* Medion MD 30217 PG */
+-      EDID_QUIRK('M', 'E', 'D', 0x7b8, EDID_QUIRK_PREFER_LARGE_75),
++      EDID_QUIRK('M', 'E', 'D', 0x7b8, BIT(EDID_QUIRK_PREFER_LARGE_75)),
+       /* Lenovo G50 */
+-      EDID_QUIRK('S', 'D', 'C', 18514, EDID_QUIRK_FORCE_6BPC),
++      EDID_QUIRK('S', 'D', 'C', 18514, BIT(EDID_QUIRK_FORCE_6BPC)),
+       /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
+-      EDID_QUIRK('S', 'E', 'C', 0xd033, EDID_QUIRK_FORCE_8BPC),
++      EDID_QUIRK('S', 'E', 'C', 0xd033, BIT(EDID_QUIRK_FORCE_8BPC)),
+       /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
+-      EDID_QUIRK('E', 'T', 'R', 13896, EDID_QUIRK_FORCE_8BPC),
++      EDID_QUIRK('E', 'T', 'R', 13896, BIT(EDID_QUIRK_FORCE_8BPC)),
+       /* Valve Index Headset */
+-      EDID_QUIRK('V', 'L', 'V', 0x91a8, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91b0, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91b1, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91b2, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91b3, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91b4, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91b5, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91b6, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91b7, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91b8, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91b9, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91ba, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91bb, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91bc, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91bd, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91be, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('V', 'L', 'V', 0x91bf, EDID_QUIRK_NON_DESKTOP),
++      EDID_QUIRK('V', 'L', 'V', 0x91a8, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91b0, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91b1, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91b2, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91b3, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91b4, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91b5, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91b6, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91b7, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91b8, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91b9, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91ba, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91bb, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91bc, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91bd, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91be, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('V', 'L', 'V', 0x91bf, BIT(EDID_QUIRK_NON_DESKTOP)),
+       /* HTC Vive and Vive Pro VR Headsets */
+-      EDID_QUIRK('H', 'V', 'R', 0xaa01, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('H', 'V', 'R', 0xaa02, EDID_QUIRK_NON_DESKTOP),
++      EDID_QUIRK('H', 'V', 'R', 0xaa01, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('H', 'V', 'R', 0xaa02, BIT(EDID_QUIRK_NON_DESKTOP)),
+       /* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
+-      EDID_QUIRK('O', 'V', 'R', 0x0001, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('O', 'V', 'R', 0x0003, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('O', 'V', 'R', 0x0004, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('O', 'V', 'R', 0x0012, EDID_QUIRK_NON_DESKTOP),
++      EDID_QUIRK('O', 'V', 'R', 0x0001, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('O', 'V', 'R', 0x0003, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('O', 'V', 'R', 0x0004, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('O', 'V', 'R', 0x0012, BIT(EDID_QUIRK_NON_DESKTOP)),
+       /* Windows Mixed Reality Headsets */
+-      EDID_QUIRK('A', 'C', 'R', 0x7fce, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('L', 'E', 'N', 0x0408, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('F', 'U', 'J', 0x1970, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('D', 'E', 'L', 0x7fce, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('S', 'E', 'C', 0x144a, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('A', 'U', 'S', 0xc102, EDID_QUIRK_NON_DESKTOP),
++      EDID_QUIRK('A', 'C', 'R', 0x7fce, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('L', 'E', 'N', 0x0408, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('F', 'U', 'J', 0x1970, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('D', 'E', 'L', 0x7fce, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('S', 'E', 'C', 0x144a, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('A', 'U', 'S', 0xc102, BIT(EDID_QUIRK_NON_DESKTOP)),
+       /* Sony PlayStation VR Headset */
+-      EDID_QUIRK('S', 'N', 'Y', 0x0704, EDID_QUIRK_NON_DESKTOP),
++      EDID_QUIRK('S', 'N', 'Y', 0x0704, BIT(EDID_QUIRK_NON_DESKTOP)),
+       /* Sensics VR Headsets */
+-      EDID_QUIRK('S', 'E', 'N', 0x1019, EDID_QUIRK_NON_DESKTOP),
++      EDID_QUIRK('S', 'E', 'N', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)),
+       /* OSVR HDK and HDK2 VR Headsets */
+-      EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
+-      EDID_QUIRK('A', 'U', 'O', 0x1111, EDID_QUIRK_NON_DESKTOP),
++      EDID_QUIRK('S', 'V', 'R', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)),
++      EDID_QUIRK('A', 'U', 'O', 0x1111, BIT(EDID_QUIRK_NON_DESKTOP)),
+ };
+ /*
+@@ -2951,6 +2953,12 @@ static u32 edid_get_quirks(const struct
+       return 0;
+ }
++static bool drm_edid_has_internal_quirk(struct drm_connector *connector,
++                                      enum drm_edid_internal_quirk quirk)
++{
++      return connector->display_info.quirks & BIT(quirk);
++}
++
+ #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+ #define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
+@@ -2960,7 +2968,6 @@ static u32 edid_get_quirks(const struct
+  */
+ static void edid_fixup_preferred(struct drm_connector *connector)
+ {
+-      const struct drm_display_info *info = &connector->display_info;
+       struct drm_display_mode *t, *cur_mode, *preferred_mode;
+       int target_refresh = 0;
+       int cur_vrefresh, preferred_vrefresh;
+@@ -2968,9 +2975,9 @@ static void edid_fixup_preferred(struct
+       if (list_empty(&connector->probed_modes))
+               return;
+-      if (info->quirks & EDID_QUIRK_PREFER_LARGE_60)
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_60))
+               target_refresh = 60;
+-      if (info->quirks & EDID_QUIRK_PREFER_LARGE_75)
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_75))
+               target_refresh = 75;
+       preferred_mode = list_first_entry(&connector->probed_modes,
+@@ -3474,7 +3481,6 @@ static struct drm_display_mode *drm_mode
+                                                 const struct drm_edid *drm_edid,
+                                                 const struct detailed_timing *timing)
+ {
+-      const struct drm_display_info *info = &connector->display_info;
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode;
+       const struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+@@ -3508,7 +3514,7 @@ static struct drm_display_mode *drm_mode
+               return NULL;
+       }
+-      if (info->quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_REDUCED_BLANKING)) {
+               mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+               if (!mode)
+                       return NULL;
+@@ -3520,7 +3526,7 @@ static struct drm_display_mode *drm_mode
+       if (!mode)
+               return NULL;
+-      if (info->quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_135_CLOCK_TOO_HIGH))
+               mode->clock = 1088 * 10;
+       else
+               mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
+@@ -3551,7 +3557,7 @@ static struct drm_display_mode *drm_mode
+       drm_mode_do_interlace_quirk(mode, pt);
+-      if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_SYNC_PP)) {
+               mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
+       } else {
+               mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+@@ -3564,12 +3570,12 @@ set_size:
+       mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+       mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
+-      if (info->quirks & EDID_QUIRK_DETAILED_IN_CM) {
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_IN_CM)) {
+               mode->width_mm *= 10;
+               mode->height_mm *= 10;
+       }
+-      if (info->quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE)) {
+               mode->width_mm = drm_edid->edid->width_cm * 10;
+               mode->height_mm = drm_edid->edid->height_cm * 10;
+       }
+@@ -6734,26 +6740,26 @@ static void update_display_info(struct d
+       drm_update_mso(connector, drm_edid);
+ out:
+-      if (info->quirks & EDID_QUIRK_NON_DESKTOP) {
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_NON_DESKTOP)) {
+               drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Non-desktop display%s\n",
+                           connector->base.id, connector->name,
+                           info->non_desktop ? " (redundant quirk)" : "");
+               info->non_desktop = true;
+       }
+-      if (info->quirks & EDID_QUIRK_CAP_DSC_15BPP)
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_CAP_DSC_15BPP))
+               info->max_dsc_bpp = 15;
+-      if (info->quirks & EDID_QUIRK_FORCE_6BPC)
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_6BPC))
+               info->bpc = 6;
+-      if (info->quirks & EDID_QUIRK_FORCE_8BPC)
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_8BPC))
+               info->bpc = 8;
+-      if (info->quirks & EDID_QUIRK_FORCE_10BPC)
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_10BPC))
+               info->bpc = 10;
+-      if (info->quirks & EDID_QUIRK_FORCE_12BPC)
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_FORCE_12BPC))
+               info->bpc = 12;
+       /* Depends on info->cea_rev set by drm_parse_cea_ext() above */
+@@ -6918,7 +6924,6 @@ static int add_displayid_detailed_modes(
+ static int _drm_edid_connector_add_modes(struct drm_connector *connector,
+                                        const struct drm_edid *drm_edid)
+ {
+-      const struct drm_display_info *info = &connector->display_info;
+       int num_modes = 0;
+       if (!drm_edid)
+@@ -6948,7 +6953,8 @@ static int _drm_edid_connector_add_modes
+       if (drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ)
+               num_modes += add_inferred_modes(connector, drm_edid);
+-      if (info->quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
++      if (drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_60) ||
++          drm_edid_has_internal_quirk(connector, EDID_QUIRK_PREFER_LARGE_75))
+               edid_fixup_preferred(connector);
+       return num_modes;
diff --git a/queue-6.16/drm-i915-power-fix-size-for-for_each_set_bit-in-abox-iteration.patch b/queue-6.16/drm-i915-power-fix-size-for-for_each_set_bit-in-abox-iteration.patch
new file mode 100644 (file)
index 0000000..13f46bb
--- /dev/null
@@ -0,0 +1,57 @@
+From cfa7b7659757f8d0fc4914429efa90d0d2577dd7 Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Fri, 5 Sep 2025 13:41:49 +0300
+Subject: drm/i915/power: fix size for for_each_set_bit() in abox iteration
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit cfa7b7659757f8d0fc4914429efa90d0d2577dd7 upstream.
+
+for_each_set_bit() expects size to be in bits, not bytes. The abox mask
+iteration uses bytes, but it works by coincidence, because the local
+variable holding the mask is unsigned long, and the mask only ever has
+bit 2 as the highest bit. Using a smaller type could lead to subtle and
+very hard to track bugs.
+
+Fixes: 62afef2811e4 ("drm/i915/rkl: RKL uses ABOX0 for pixel transfers")
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: Matt Roper <matthew.d.roper@intel.com>
+Cc: stable@vger.kernel.org # v5.9+
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://lore.kernel.org/r/20250905104149.1144751-1-jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+(cherry picked from commit 7ea3baa6efe4bb93d11e1c0e6528b1468d7debf6)
+Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_display_power.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_display_power.c
++++ b/drivers/gpu/drm/i915/display/intel_display_power.c
+@@ -1169,7 +1169,7 @@ static void icl_mbus_init(struct intel_d
+       if (DISPLAY_VER(display) == 12)
+               abox_regs |= BIT(0);
+-      for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
++      for_each_set_bit(i, &abox_regs, BITS_PER_TYPE(abox_regs))
+               intel_de_rmw(display, MBUS_ABOX_CTL(i), mask, val);
+ }
+@@ -1630,11 +1630,11 @@ static void tgl_bw_buddy_init(struct int
+       if (table[config].page_mask == 0) {
+               drm_dbg_kms(display->drm,
+                           "Unknown memory configuration; disabling address buddy logic.\n");
+-              for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
++              for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask))
+                       intel_de_write(display, BW_BUDDY_CTL(i),
+                                      BW_BUDDY_DISABLE);
+       } else {
+-              for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
++              for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask)) {
+                       intel_de_write(display, BW_BUDDY_PAGE_MASK(i),
+                                      table[config].page_mask);
diff --git a/queue-6.16/drm-mediatek-fix-potential-of-node-use-after-free.patch b/queue-6.16/drm-mediatek-fix-potential-of-node-use-after-free.patch
new file mode 100644 (file)
index 0000000..fb90123
--- /dev/null
@@ -0,0 +1,60 @@
+From 4de37a48b6b58faaded9eb765047cf0d8785ea18 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Fri, 29 Aug 2025 11:03:44 +0200
+Subject: drm/mediatek: fix potential OF node use-after-free
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 4de37a48b6b58faaded9eb765047cf0d8785ea18 upstream.
+
+The for_each_child_of_node() helper drops the reference it takes to each
+node as it iterates over children and an explicit of_node_put() is only
+needed when exiting the loop early.
+
+Drop the recently introduced bogus additional reference count decrement
+at each iteration that could potentially lead to a use-after-free.
+
+Fixes: 1f403699c40f ("drm/mediatek: Fix device/node reference count leaks in mtk_drm_get_all_drm_priv")
+Cc: Ma Ke <make24@iscas.ac.cn>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: CK Hu <ck.hu@mediatek.com>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20250829090345.21075-2-johan@kernel.org/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_drm_drv.c |   11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -388,11 +388,11 @@ static bool mtk_drm_get_all_drm_priv(str
+               of_id = of_match_node(mtk_drm_of_ids, node);
+               if (!of_id)
+-                      goto next_put_node;
++                      continue;
+               pdev = of_find_device_by_node(node);
+               if (!pdev)
+-                      goto next_put_node;
++                      continue;
+               drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match);
+               if (!drm_dev)
+@@ -418,11 +418,10 @@ next_put_device_drm_dev:
+ next_put_device_pdev_dev:
+               put_device(&pdev->dev);
+-next_put_node:
+-              of_node_put(node);
+-
+-              if (cnt == MAX_CRTC)
++              if (cnt == MAX_CRTC) {
++                      of_node_put(node);
+                       break;
++              }
+       }
+       if (drm_priv->data->mmsys_dev_num == cnt) {
diff --git a/queue-6.16/drm-xe-allow-the-pm-notifier-to-continue-on-failure.patch b/queue-6.16/drm-xe-allow-the-pm-notifier-to-continue-on-failure.patch
new file mode 100644 (file)
index 0000000..e63e591
--- /dev/null
@@ -0,0 +1,73 @@
+From d84820309ed34cc412ce76ecfa9471dae7d7d144 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Thu, 4 Sep 2025 18:07:14 +0200
+Subject: drm/xe: Allow the pm notifier to continue on failure
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit d84820309ed34cc412ce76ecfa9471dae7d7d144 upstream.
+
+Its actions are opportunistic anyway and will be completed
+on device suspend.
+
+Marking as a fix to simplify backporting of the fix
+that follows in the series.
+
+v2:
+- Keep the runtime pm reference over suspend / hibernate and
+  document why. (Matt Auld, Rodrigo Vivi):
+
+Fixes: c6a4d46ec1d7 ("drm/xe: evict user memory in PM notifier")
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Cc: <stable@vger.kernel.org> # v6.16+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://lore.kernel.org/r/20250904160715.2613-3-thomas.hellstrom@linux.intel.com
+(cherry picked from commit ebd546fdffddfcaeab08afdd68ec93052c8fa740)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_pm.c |   17 +++++++----------
+ 1 file changed, 7 insertions(+), 10 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_pm.c
++++ b/drivers/gpu/drm/xe/xe_pm.c
+@@ -296,17 +296,17 @@ static int xe_pm_notifier_callback(struc
+       case PM_SUSPEND_PREPARE:
+               xe_pm_runtime_get(xe);
+               err = xe_bo_evict_all_user(xe);
+-              if (err) {
++              if (err)
+                       drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
+-                      xe_pm_runtime_put(xe);
+-                      break;
+-              }
+               err = xe_bo_notifier_prepare_all_pinned(xe);
+-              if (err) {
++              if (err)
+                       drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
+-                      xe_pm_runtime_put(xe);
+-              }
++              /*
++               * Keep the runtime pm reference until post hibernation / post suspend to
++               * avoid a runtime suspend interfering with evicted objects or backup
++               * allocations.
++               */
+               break;
+       case PM_POST_HIBERNATION:
+       case PM_POST_SUSPEND:
+@@ -315,9 +315,6 @@ static int xe_pm_notifier_callback(struc
+               break;
+       }
+-      if (err)
+-              return NOTIFY_BAD;
+-
+       return NOTIFY_DONE;
+ }
diff --git a/queue-6.16/drm-xe-attempt-to-bring-bos-back-to-vram-after-eviction.patch b/queue-6.16/drm-xe-attempt-to-bring-bos-back-to-vram-after-eviction.patch
new file mode 100644 (file)
index 0000000..992143a
--- /dev/null
@@ -0,0 +1,158 @@
+From 5c87fee3c96ce898ad681552404a66c7605193c0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Thu, 4 Sep 2025 18:07:13 +0200
+Subject: drm/xe: Attempt to bring bos back to VRAM after eviction
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit 5c87fee3c96ce898ad681552404a66c7605193c0 upstream.
+
+VRAM+TT bos that are evicted from VRAM to TT may remain in
+TT also after a revalidation following eviction or suspend.
+
+This manifests itself as applications becoming sluggish
+after buffer objects get evicted or after a resume from
+suspend or hibernation.
+
+If the bo supports placement in both VRAM and TT, and
+we are on DGFX, mark the TT placement as fallback. This means
+that it is tried only after VRAM + eviction.
+
+This flaw has probably been present since the xe module was
+upstreamed but use a Fixes: commit below where backporting is
+likely to be simple. For earlier versions we need to open-
+code the fallback algorithm in the driver.
+
+v2:
+- Remove check for dgfx. (Matthew Auld)
+- Update the xe_dma_buf kunit test for the new strategy (CI)
+- Allow dma-buf to pin in current placement (CI)
+- Make xe_bo_validate() for pinned bos a NOP.
+
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/5995
+Fixes: a78a8da51b36 ("drm/ttm: replace busy placement with flags v6")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: <stable@vger.kernel.org> # v6.9+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://lore.kernel.org/r/20250904160715.2613-2-thomas.hellstrom@linux.intel.com
+(cherry picked from commit cb3d7b3b46b799c96b54f8e8fe36794a55a77f0b)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/tests/xe_bo.c      |    2 +-
+ drivers/gpu/drm/xe/tests/xe_dma_buf.c |   10 +---------
+ drivers/gpu/drm/xe/xe_bo.c            |   16 ++++++++++++----
+ drivers/gpu/drm/xe/xe_bo.h            |    2 +-
+ drivers/gpu/drm/xe/xe_dma_buf.c       |    2 +-
+ 5 files changed, 16 insertions(+), 16 deletions(-)
+
+--- a/drivers/gpu/drm/xe/tests/xe_bo.c
++++ b/drivers/gpu/drm/xe/tests/xe_bo.c
+@@ -236,7 +236,7 @@ static int evict_test_run_tile(struct xe
+               }
+               xe_bo_lock(external, false);
+-              err = xe_bo_pin_external(external);
++              err = xe_bo_pin_external(external, false);
+               xe_bo_unlock(external);
+               if (err) {
+                       KUNIT_FAIL(test, "external bo pin err=%pe\n",
+--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
++++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+@@ -89,15 +89,7 @@ static void check_residency(struct kunit
+               return;
+       }
+-      /*
+-       * If on different devices, the exporter is kept in system  if
+-       * possible, saving a migration step as the transfer is just
+-       * likely as fast from system memory.
+-       */
+-      if (params->mem_mask & XE_BO_FLAG_SYSTEM)
+-              KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
+-      else
+-              KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
++      KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
+       if (params->force_different_devices)
+               KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
+--- a/drivers/gpu/drm/xe/xe_bo.c
++++ b/drivers/gpu/drm/xe/xe_bo.c
+@@ -184,6 +184,8 @@ static void try_add_system(struct xe_dev
+               bo->placements[*c] = (struct ttm_place) {
+                       .mem_type = XE_PL_TT,
++                      .flags = (bo_flags & XE_BO_FLAG_VRAM_MASK) ?
++                      TTM_PL_FLAG_FALLBACK : 0,
+               };
+               *c += 1;
+       }
+@@ -2266,6 +2268,7 @@ uint64_t vram_region_gpu_offset(struct t
+ /**
+  * xe_bo_pin_external - pin an external BO
+  * @bo: buffer object to be pinned
++ * @in_place: Pin in current placement, don't attempt to migrate.
+  *
+  * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
+  * BO. Unique call compared to xe_bo_pin as this function has it own set of
+@@ -2273,7 +2276,7 @@ uint64_t vram_region_gpu_offset(struct t
+  *
+  * Returns 0 for success, negative error code otherwise.
+  */
+-int xe_bo_pin_external(struct xe_bo *bo)
++int xe_bo_pin_external(struct xe_bo *bo, bool in_place)
+ {
+       struct xe_device *xe = xe_bo_device(bo);
+       int err;
+@@ -2282,9 +2285,11 @@ int xe_bo_pin_external(struct xe_bo *bo)
+       xe_assert(xe, xe_bo_is_user(bo));
+       if (!xe_bo_is_pinned(bo)) {
+-              err = xe_bo_validate(bo, NULL, false);
+-              if (err)
+-                      return err;
++              if (!in_place) {
++                      err = xe_bo_validate(bo, NULL, false);
++                      if (err)
++                              return err;
++              }
+               spin_lock(&xe->pinned.lock);
+               list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
+@@ -2437,6 +2442,9 @@ int xe_bo_validate(struct xe_bo *bo, str
+       };
+       int ret;
++      if (xe_bo_is_pinned(bo))
++              return 0;
++
+       if (vm) {
+               lockdep_assert_held(&vm->lock);
+               xe_vm_assert_held(vm);
+--- a/drivers/gpu/drm/xe/xe_bo.h
++++ b/drivers/gpu/drm/xe/xe_bo.h
+@@ -201,7 +201,7 @@ static inline void xe_bo_unlock_vm_held(
+       }
+ }
+-int xe_bo_pin_external(struct xe_bo *bo);
++int xe_bo_pin_external(struct xe_bo *bo, bool in_place);
+ int xe_bo_pin(struct xe_bo *bo);
+ void xe_bo_unpin_external(struct xe_bo *bo);
+ void xe_bo_unpin(struct xe_bo *bo);
+--- a/drivers/gpu/drm/xe/xe_dma_buf.c
++++ b/drivers/gpu/drm/xe/xe_dma_buf.c
+@@ -72,7 +72,7 @@ static int xe_dma_buf_pin(struct dma_buf
+               return ret;
+       }
+-      ret = xe_bo_pin_external(bo);
++      ret = xe_bo_pin_external(bo, true);
+       xe_assert(xe, !ret);
+       return 0;
diff --git a/queue-6.16/drm-xe-block-exec-and-rebind-worker-while-evicting-for-suspend-hibernate.patch b/queue-6.16/drm-xe-block-exec-and-rebind-worker-while-evicting-for-suspend-hibernate.patch
new file mode 100644 (file)
index 0000000..cff6112
--- /dev/null
@@ -0,0 +1,256 @@
+From eb5723a75104605b7d2207a7d598e314166fbef4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Thu, 4 Sep 2025 18:07:15 +0200
+Subject: drm/xe: Block exec and rebind worker while evicting for suspend / hibernate
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit eb5723a75104605b7d2207a7d598e314166fbef4 upstream.
+
+When the xe pm_notifier evicts for suspend / hibernate, there might be
+racing tasks trying to re-validate again. This can lead to suspend taking
+excessive time or get stuck in a live-lock. This behaviour becomes
+much worse with the fix that actually makes re-validation bring back
+bos to VRAM rather than letting them remain in TT.
+
+Prevent that by having exec and the rebind worker waiting for a completion
+that is set to block by the pm_notifier before suspend and is signaled
+by the pm_notifier after resume / wakeup.
+
+It's probably still possible to craft malicious applications that block
+suspending. More work is pending to fix that.
+
+v3:
+- Avoid wait_for_completion() in the kernel worker since it could
+  potentially cause work item flushes from freezable processes to
+  wait forever. Instead terminate the rebind workers if needed and
+  re-launch at resume. (Matt Auld)
+v4:
+- Fix some bad naming and leftover debug printouts.
+- Fix kerneldoc.
+- Use drmm_mutex_init() for the xe->rebind_resume_lock (Matt Auld).
+- Rework the interface of xe_vm_rebind_resume_worker (Matt Auld).
+
+Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/4288
+Fixes: c6a4d46ec1d7 ("drm/xe: evict user memory in PM notifier")
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Cc: <stable@vger.kernel.org> # v6.16+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://lore.kernel.org/r/20250904160715.2613-4-thomas.hellstrom@linux.intel.com
+(cherry picked from commit 599334572a5a99111015fbbd5152ce4dedc2f8b7)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_device_types.h |    6 +++++
+ drivers/gpu/drm/xe/xe_exec.c         |    9 +++++++
+ drivers/gpu/drm/xe/xe_pm.c           |   25 ++++++++++++++++++++
+ drivers/gpu/drm/xe/xe_vm.c           |   42 ++++++++++++++++++++++++++++++++++-
+ drivers/gpu/drm/xe/xe_vm.h           |    2 +
+ drivers/gpu/drm/xe/xe_vm_types.h     |    5 ++++
+ 6 files changed, 88 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_device_types.h
++++ b/drivers/gpu/drm/xe/xe_device_types.h
+@@ -529,6 +529,12 @@ struct xe_device {
+       /** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */
+       struct notifier_block pm_notifier;
++      /** @pm_block: Completion to block validating tasks on suspend / hibernate prepare */
++      struct completion pm_block;
++      /** @rebind_resume_list: List of wq items to kick on resume. */
++      struct list_head rebind_resume_list;
++      /** @rebind_resume_lock: Lock to protect the rebind_resume_list */
++      struct mutex rebind_resume_lock;
+       /** @pmt: Support the PMT driver callback interface */
+       struct {
+--- a/drivers/gpu/drm/xe/xe_exec.c
++++ b/drivers/gpu/drm/xe/xe_exec.c
+@@ -237,6 +237,15 @@ retry:
+               goto err_unlock_list;
+       }
++      /*
++       * It's OK to block interruptible here with the vm lock held, since
++       * on task freezing during suspend / hibernate, the call will
++       * return -ERESTARTSYS and the IOCTL will be rerun.
++       */
++      err = wait_for_completion_interruptible(&xe->pm_block);
++      if (err)
++              goto err_unlock_list;
++
+       vm_exec.vm = &vm->gpuvm;
+       vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+       if (xe_vm_in_lr_mode(vm)) {
+--- a/drivers/gpu/drm/xe/xe_pm.c
++++ b/drivers/gpu/drm/xe/xe_pm.c
+@@ -23,6 +23,7 @@
+ #include "xe_pcode.h"
+ #include "xe_pxp.h"
+ #include "xe_trace.h"
++#include "xe_vm.h"
+ #include "xe_wa.h"
+ /**
+@@ -285,6 +286,19 @@ static u32 vram_threshold_value(struct x
+       return DEFAULT_VRAM_THRESHOLD;
+ }
++static void xe_pm_wake_rebind_workers(struct xe_device *xe)
++{
++      struct xe_vm *vm, *next;
++
++      mutex_lock(&xe->rebind_resume_lock);
++      list_for_each_entry_safe(vm, next, &xe->rebind_resume_list,
++                               preempt.pm_activate_link) {
++              list_del_init(&vm->preempt.pm_activate_link);
++              xe_vm_resume_rebind_worker(vm);
++      }
++      mutex_unlock(&xe->rebind_resume_lock);
++}
++
+ static int xe_pm_notifier_callback(struct notifier_block *nb,
+                                  unsigned long action, void *data)
+ {
+@@ -294,6 +308,7 @@ static int xe_pm_notifier_callback(struc
+       switch (action) {
+       case PM_HIBERNATION_PREPARE:
+       case PM_SUSPEND_PREPARE:
++              reinit_completion(&xe->pm_block);
+               xe_pm_runtime_get(xe);
+               err = xe_bo_evict_all_user(xe);
+               if (err)
+@@ -310,6 +325,8 @@ static int xe_pm_notifier_callback(struc
+               break;
+       case PM_POST_HIBERNATION:
+       case PM_POST_SUSPEND:
++              complete_all(&xe->pm_block);
++              xe_pm_wake_rebind_workers(xe);
+               xe_bo_notifier_unprepare_all_pinned(xe);
+               xe_pm_runtime_put(xe);
+               break;
+@@ -336,6 +353,14 @@ int xe_pm_init(struct xe_device *xe)
+       if (err)
+               return err;
++      err = drmm_mutex_init(&xe->drm, &xe->rebind_resume_lock);
++      if (err)
++              goto err_unregister;
++
++      init_completion(&xe->pm_block);
++      complete_all(&xe->pm_block);
++      INIT_LIST_HEAD(&xe->rebind_resume_list);
++
+       /* For now suspend/resume is only allowed with GuC */
+       if (!xe_device_uc_enabled(xe))
+               return 0;
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -393,6 +393,9 @@ static int xe_gpuvm_validate(struct drm_
+               list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
+                              &vm->rebind_list);
++      if (!try_wait_for_completion(&vm->xe->pm_block))
++              return -EAGAIN;
++
+       ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
+       if (ret)
+               return ret;
+@@ -479,6 +482,33 @@ static int xe_preempt_work_begin(struct
+       return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
+ }
++static bool vm_suspend_rebind_worker(struct xe_vm *vm)
++{
++      struct xe_device *xe = vm->xe;
++      bool ret = false;
++
++      mutex_lock(&xe->rebind_resume_lock);
++      if (!try_wait_for_completion(&vm->xe->pm_block)) {
++              ret = true;
++              list_move_tail(&vm->preempt.pm_activate_link, &xe->rebind_resume_list);
++      }
++      mutex_unlock(&xe->rebind_resume_lock);
++
++      return ret;
++}
++
++/**
++ * xe_vm_resume_rebind_worker() - Resume the rebind worker.
++ * @vm: The vm whose preempt worker to resume.
++ *
++ * Resume a preempt worker that was previously suspended by
++ * vm_suspend_rebind_worker().
++ */
++void xe_vm_resume_rebind_worker(struct xe_vm *vm)
++{
++      queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
++}
++
+ static void preempt_rebind_work_func(struct work_struct *w)
+ {
+       struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
+@@ -502,6 +532,11 @@ static void preempt_rebind_work_func(str
+       }
+ retry:
++      if (!try_wait_for_completion(&vm->xe->pm_block) && vm_suspend_rebind_worker(vm)) {
++              up_write(&vm->lock);
++              return;
++      }
++
+       if (xe_vm_userptr_check_repin(vm)) {
+               err = xe_vm_userptr_pin(vm);
+               if (err)
+@@ -1686,6 +1721,7 @@ struct xe_vm *xe_vm_create(struct xe_dev
+       if (flags & XE_VM_FLAG_LR_MODE) {
+               INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
+               xe_pm_runtime_get_noresume(xe);
++              INIT_LIST_HEAD(&vm->preempt.pm_activate_link);
+       }
+       if (flags & XE_VM_FLAG_FAULT_MODE) {
+@@ -1867,8 +1903,12 @@ void xe_vm_close_and_put(struct xe_vm *v
+       xe_assert(xe, !vm->preempt.num_exec_queues);
+       xe_vm_close(vm);
+-      if (xe_vm_in_preempt_fence_mode(vm))
++      if (xe_vm_in_preempt_fence_mode(vm)) {
++              mutex_lock(&xe->rebind_resume_lock);
++              list_del_init(&vm->preempt.pm_activate_link);
++              mutex_unlock(&xe->rebind_resume_lock);
+               flush_work(&vm->preempt.rebind_work);
++      }
+       if (xe_vm_in_fault_mode(vm))
+               xe_svm_close(vm);
+--- a/drivers/gpu/drm/xe/xe_vm.h
++++ b/drivers/gpu/drm/xe/xe_vm.h
+@@ -268,6 +268,8 @@ struct dma_fence *xe_vm_bind_kernel_bo(s
+                                      struct xe_exec_queue *q, u64 addr,
+                                      enum xe_cache_level cache_lvl);
++void xe_vm_resume_rebind_worker(struct xe_vm *vm);
++
+ /**
+  * xe_vm_resv() - Return's the vm's reservation object
+  * @vm: The vm
+--- a/drivers/gpu/drm/xe/xe_vm_types.h
++++ b/drivers/gpu/drm/xe/xe_vm_types.h
+@@ -286,6 +286,11 @@ struct xe_vm {
+                * BOs
+                */
+               struct work_struct rebind_work;
++              /**
++               * @preempt.pm_activate_link: Link to list of rebind workers to be
++               * kicked on resume.
++               */
++              struct list_head pm_activate_link;
+       } preempt;
+       /** @um: unified memory state */
diff --git a/queue-6.16/edac-altera-delete-an-inappropriate-dma_free_coherent-call.patch b/queue-6.16/edac-altera-delete-an-inappropriate-dma_free_coherent-call.patch
new file mode 100644 (file)
index 0000000..3f521f4
--- /dev/null
@@ -0,0 +1,38 @@
+From ff2a66d21fd2364ed9396d151115eec59612b200 Mon Sep 17 00:00:00 2001
+From: Salah Triki <salah.triki@gmail.com>
+Date: Thu, 31 Jul 2025 04:15:27 +0100
+Subject: EDAC/altera: Delete an inappropriate dma_free_coherent() call
+
+From: Salah Triki <salah.triki@gmail.com>
+
+commit ff2a66d21fd2364ed9396d151115eec59612b200 upstream.
+
+dma_free_coherent() must only be called if the corresponding
+dma_alloc_coherent() call has succeeded. Calling it when the allocation fails
+leads to undefined behavior.
+
+Delete the wrong call.
+
+  [ bp: Massage commit message. ]
+
+Fixes: 71bcada88b0f3 ("edac: altera: Add Altera SDRAM EDAC support")
+Signed-off-by: Salah Triki <salah.triki@gmail.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Dinh Nguyen <dinguyen@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/aIrfzzqh4IzYtDVC@pc
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/edac/altera_edac.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/edac/altera_edac.c
++++ b/drivers/edac/altera_edac.c
+@@ -128,7 +128,6 @@ static ssize_t altr_sdr_mc_err_inject_wr
+       ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
+       if (!ptemp) {
+-              dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
+               edac_printk(KERN_ERR, EDAC_MC,
+                           "Inject: Buffer Allocation error\n");
+               return -ENOMEM;
diff --git a/queue-6.16/fuse-check-if-copy_file_range-returns-larger-than-requested-size.patch b/queue-6.16/fuse-check-if-copy_file_range-returns-larger-than-requested-size.patch
new file mode 100644 (file)
index 0000000..378678f
--- /dev/null
@@ -0,0 +1,34 @@
+From e5203209b3935041dac541bc5b37efb44220cc0b Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Tue, 12 Aug 2025 14:07:54 +0200
+Subject: fuse: check if copy_file_range() returns larger than requested size
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit e5203209b3935041dac541bc5b37efb44220cc0b upstream.
+
+Just like write(), copy_file_range() should check if the return value is
+less or equal to the requested number of bytes.
+
+Reported-by: Chunsheng Luo <luochunsheng@ustc.edu>
+Closes: https://lore.kernel.org/all/20250807062425.694-1-luochunsheng@ustc.edu/
+Fixes: 88bc7d5097a1 ("fuse: add support for copy_file_range()")
+Cc: <stable@vger.kernel.org> # v4.20
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/file.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -3079,6 +3079,9 @@ static ssize_t __fuse_copy_file_range(st
+               fc->no_copy_file_range = 1;
+               err = -EOPNOTSUPP;
+       }
++      if (!err && outarg.size > len)
++              err = -EIO;
++
+       if (err)
+               goto out;
diff --git a/queue-6.16/fuse-do-not-allow-mapping-a-non-regular-backing-file.patch b/queue-6.16/fuse-do-not-allow-mapping-a-non-regular-backing-file.patch
new file mode 100644 (file)
index 0000000..8898ebe
--- /dev/null
@@ -0,0 +1,36 @@
+From e9c8da670e749f7dedc53e3af54a87b041918092 Mon Sep 17 00:00:00 2001
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Thu, 10 Jul 2025 12:08:30 +0200
+Subject: fuse: do not allow mapping a non-regular backing file
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+commit e9c8da670e749f7dedc53e3af54a87b041918092 upstream.
+
+We do not support passthrough operations other than read/write on
+regular file, so allowing non-regular backing files makes no sense.
+
+Fixes: efad7153bf93 ("fuse: allow O_PATH fd for FUSE_DEV_IOC_BACKING_OPEN")
+Cc: stable@vger.kernel.org
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Reviewed-by: Bernd Schubert <bschubert@ddn.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/passthrough.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/fuse/passthrough.c
++++ b/fs/fuse/passthrough.c
+@@ -237,6 +237,11 @@ int fuse_backing_open(struct fuse_conn *
+       if (!file)
+               goto out;
++      /* read/write/splice/mmap passthrough only relevant for regular files */
++      res = d_is_dir(file->f_path.dentry) ? -EISDIR : -EINVAL;
++      if (!d_is_reg(file->f_path.dentry))
++              goto out_fput;
++
+       backing_sb = file_inode(file)->i_sb;
+       res = -ELOOP;
+       if (backing_sb->s_stack_depth >= fc->max_stack_depth)
diff --git a/queue-6.16/fuse-prevent-overflow-in-copy_file_range-return-value.patch b/queue-6.16/fuse-prevent-overflow-in-copy_file_range-return-value.patch
new file mode 100644 (file)
index 0000000..ee175a1
--- /dev/null
@@ -0,0 +1,38 @@
+From 1e08938c3694f707bb165535df352ac97a8c75c9 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Tue, 12 Aug 2025 14:46:34 +0200
+Subject: fuse: prevent overflow in copy_file_range return value
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 1e08938c3694f707bb165535df352ac97a8c75c9 upstream.
+
+The FUSE protocol uses struct fuse_write_out to convey the return value of
+copy_file_range, which is restricted to uint32_t.  But the COPY_FILE_RANGE
+interface supports a 64-bit size copies.
+
+Currently the number of bytes copied is silently truncated to 32-bit, which
+may result in poor performance or even failure to copy in case of
+truncation to zero.
+
+Reported-by: Florian Weimer <fweimer@redhat.com>
+Closes: https://lore.kernel.org/all/lhuh5ynl8z5.fsf@oldenburg.str.redhat.com/
+Fixes: 88bc7d5097a1 ("fuse: add support for copy_file_range()")
+Cc: <stable@vger.kernel.org> # v4.20
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/file.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -3013,7 +3013,7 @@ static ssize_t __fuse_copy_file_range(st
+               .nodeid_out = ff_out->nodeid,
+               .fh_out = ff_out->fh,
+               .off_out = pos_out,
+-              .len = len,
++              .len = min_t(size_t, len, UINT_MAX & PAGE_MASK),
+               .flags = flags
+       };
+       struct fuse_write_out outarg;
diff --git a/queue-6.16/i2c-i801-hide-intel-birch-stream-soc-tco-wdt.patch b/queue-6.16/i2c-i801-hide-intel-birch-stream-soc-tco-wdt.patch
new file mode 100644 (file)
index 0000000..065eab9
--- /dev/null
@@ -0,0 +1,40 @@
+From 664596bd98bb251dd417dfd3f9b615b661e1e44a Mon Sep 17 00:00:00 2001
+From: Chiasheng Lee <chiasheng.lee@linux.intel.com>
+Date: Mon, 1 Sep 2025 20:59:43 +0800
+Subject: i2c: i801: Hide Intel Birch Stream SoC TCO WDT
+
+From: Chiasheng Lee <chiasheng.lee@linux.intel.com>
+
+commit 664596bd98bb251dd417dfd3f9b615b661e1e44a upstream.
+
+Hide the Intel Birch Stream SoC TCO WDT feature since it was removed.
+
+On platforms with PCH TCO WDT, this redundant device might be rendering
+errors like this:
+
+[   28.144542] sysfs: cannot create duplicate filename '/bus/platform/devices/iTCO_wdt'
+
+Fixes: 8c56f9ef25a3 ("i2c: i801: Add support for Intel Birch Stream SoC")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=220320
+Signed-off-by: Chiasheng Lee <chiasheng.lee@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v6.7+
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Reviewed-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Link: https://lore.kernel.org/r/20250901125943.916522-1-chiasheng.lee@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-i801.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1052,7 +1052,7 @@ static const struct pci_device_id i801_i
+       { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS,           FEATURES_ICH5 | FEATURE_TCO_CNL) },
+       { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS,       FEATURES_ICH5 | FEATURE_TCO_CNL) },
+       { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS,       FEATURES_ICH5 | FEATURE_TCO_CNL) },
+-      { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS,            FEATURES_ICH5 | FEATURE_TCO_CNL) },
++      { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS,            FEATURES_ICH5)                   },
+       { PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS,            FEATURES_ICH5 | FEATURE_TCO_CNL) },
+       { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS,          FEATURES_ICH5 | FEATURE_TCO_CNL) },
+       { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS,          FEATURES_ICH5 | FEATURE_TCO_CNL) },
diff --git a/queue-6.16/i2c-rtl9300-ensure-data-length-is-within-supported-range.patch b/queue-6.16/i2c-rtl9300-ensure-data-length-is-within-supported-range.patch
new file mode 100644 (file)
index 0000000..66aa983
--- /dev/null
@@ -0,0 +1,69 @@
+From 06418cb5a1a542a003fdb4ad8e76ea542d57cfba Mon Sep 17 00:00:00 2001
+From: Jonas Jelonek <jelonek.jonas@gmail.com>
+Date: Sun, 31 Aug 2025 10:04:47 +0000
+Subject: i2c: rtl9300: ensure data length is within supported range
+
+From: Jonas Jelonek <jelonek.jonas@gmail.com>
+
+commit 06418cb5a1a542a003fdb4ad8e76ea542d57cfba upstream.
+
+Add an explicit check for the xfer length to 'rtl9300_i2c_config_xfer'
+to ensure the data length isn't within the supported range. In
+particular a data length of 0 is not supported by the hardware and
+causes unintended or destructive behaviour.
+
+This limitation becomes obvious when looking at the register
+documentation [1]. 4 bits are reserved for DATA_WIDTH and the value
+of these 4 bits is used as N + 1, allowing a data length range of
+1 <= len <= 16.
+
+Affected by this is the SMBus Quick Operation which works with a data
+length of 0. Passing 0 as the length causes an underflow of the value
+due to:
+
+(len - 1) & 0xf
+
+and effectively specifying a transfer length of 16 via the registers.
+This causes a 16-byte write operation instead of a Quick Write. For
+example, on SFP modules without write-protected EEPROM this soft-bricks
+them by overwriting some initial bytes.
+
+For completeness, also add a quirk for the zero length.
+
+[1] https://svanheule.net/realtek/longan/register/i2c_mst1_ctrl2
+
+Fixes: c366be720235 ("i2c: Add driver for the RTL9300 I2C controller")
+Cc: stable@vger.kernel.org # v6.13+
+Signed-off-by: Jonas Jelonek <jelonek.jonas@gmail.com>
+Tested-by: Sven Eckelmann <sven@narfation.org>
+Reviewed-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
+Tested-by: Chris Packham <chris.packham@alliedtelesis.co.nz> # On RTL9302C based board
+Tested-by: Markus Stockhausen <markus.stockhausen@gmx.de>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Link: https://lore.kernel.org/r/20250831100457.3114-3-jelonek.jonas@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-rtl9300.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/i2c/busses/i2c-rtl9300.c
++++ b/drivers/i2c/busses/i2c-rtl9300.c
+@@ -99,6 +99,9 @@ static int rtl9300_i2c_config_xfer(struc
+ {
+       u32 val, mask;
++      if (len < 1 || len > 16)
++              return -EINVAL;
++
+       val = chan->bus_freq << RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS;
+       mask = RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK;
+@@ -323,7 +326,7 @@ static const struct i2c_algorithm rtl930
+ };
+ static struct i2c_adapter_quirks rtl9300_i2c_quirks = {
+-      .flags          = I2C_AQ_NO_CLK_STRETCH,
++      .flags          = I2C_AQ_NO_CLK_STRETCH | I2C_AQ_NO_ZERO_LEN,
+       .max_read_len   = 16,
+       .max_write_len  = 16,
+ };
diff --git a/queue-6.16/i2c-rtl9300-fix-channel-number-bound-check.patch b/queue-6.16/i2c-rtl9300-fix-channel-number-bound-check.patch
new file mode 100644 (file)
index 0000000..953a650
--- /dev/null
@@ -0,0 +1,48 @@
+From cd6c956fbc13156bcbcca084b46a8380caebc2a8 Mon Sep 17 00:00:00 2001
+From: Jonas Jelonek <jelonek.jonas@gmail.com>
+Date: Sun, 31 Aug 2025 10:04:46 +0000
+Subject: i2c: rtl9300: fix channel number bound check
+
+From: Jonas Jelonek <jelonek.jonas@gmail.com>
+
+commit cd6c956fbc13156bcbcca084b46a8380caebc2a8 upstream.
+
+Fix the current check for number of channels (child nodes in the device
+tree). Before, this was:
+
+if (device_get_child_node_count(dev) >= RTL9300_I2C_MUX_NCHAN)
+
+RTL9300_I2C_MUX_NCHAN gives the maximum number of channels so checking
+with '>=' isn't correct because it doesn't allow the last channel
+number. Thus, fix it to:
+
+if (device_get_child_node_count(dev) > RTL9300_I2C_MUX_NCHAN)
+
+Issue occured on a TP-Link TL-ST1008F v2.0 device (8 SFP+ ports) and fix
+is tested there.
+
+Fixes: c366be720235 ("i2c: Add driver for the RTL9300 I2C controller")
+Cc: stable@vger.kernel.org # v6.13+
+Signed-off-by: Jonas Jelonek <jelonek.jonas@gmail.com>
+Tested-by: Sven Eckelmann <sven@narfation.org>
+Reviewed-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
+Tested-by: Chris Packham <chris.packham@alliedtelesis.co.nz> # On RTL9302C based board
+Tested-by: Markus Stockhausen <markus.stockhausen@gmx.de>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Link: https://lore.kernel.org/r/20250831100457.3114-2-jelonek.jonas@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-rtl9300.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/i2c/busses/i2c-rtl9300.c
++++ b/drivers/i2c/busses/i2c-rtl9300.c
+@@ -353,7 +353,7 @@ static int rtl9300_i2c_probe(struct plat
+       platform_set_drvdata(pdev, i2c);
+-      if (device_get_child_node_count(dev) >= RTL9300_I2C_MUX_NCHAN)
++      if (device_get_child_node_count(dev) > RTL9300_I2C_MUX_NCHAN)
+               return dev_err_probe(dev, -EINVAL, "Too many channels\n");
+       device_for_each_child_node(dev, child) {
diff --git a/queue-6.16/i2c-rtl9300-remove-broken-smbus-quick-operation-support.patch b/queue-6.16/i2c-rtl9300-remove-broken-smbus-quick-operation-support.patch
new file mode 100644 (file)
index 0000000..0c297bc
--- /dev/null
@@ -0,0 +1,90 @@
+From ede965fd555ac2536cf651893a998dbfd8e57b86 Mon Sep 17 00:00:00 2001
+From: Jonas Jelonek <jelonek.jonas@gmail.com>
+Date: Sun, 31 Aug 2025 10:04:48 +0000
+Subject: i2c: rtl9300: remove broken SMBus Quick operation support
+
+From: Jonas Jelonek <jelonek.jonas@gmail.com>
+
+commit ede965fd555ac2536cf651893a998dbfd8e57b86 upstream.
+
+Remove the SMBus Quick operation from this driver because it is not
+natively supported by the hardware and is wrongly implemented in the
+driver.
+
+The I2C controllers in Realtek RTL9300 and RTL9310 are SMBus-compliant
+but there doesn't seem to be native support for the SMBus Quick
+operation. It is not explicitly mentioned in the documentation but
+looking at the registers which configure an SMBus transaction, one can
+see that the data length cannot be set to 0. This suggests that the
+hardware doesn't allow any SMBus message without data bytes (except for
+those it does on it's own, see SMBus Block Read).
+
+The current implementation of SMBus Quick operation passes a length of
+0 (which is actually invalid). Before the fix of a bug in a previous
+commit, this led to a read operation of 16 bytes from any register (the
+one of a former transaction or any other value.
+
+This caused issues like soft-bricked SFP modules after a simple probe
+with i2cdetect which uses Quick by default. Running this with SFP
+modules whose EEPROM isn't write-protected, some of the initial bytes
+are overwritten because a 16-byte write operation is executed instead of
+a Quick Write. (This temporarily soft-bricked one of my DAC cables.)
+
+Because SMBus Quick operation is obviously not supported on these
+controllers (because a length of 0 cannot be set, even when no register
+address is set), remove that instead of claiming there is support. There
+also shouldn't be any kind of emulated 'Quick' which just does another
+kind of operation in the background. Otherwise, specific issues occur
+in case of a 'Quick' Write which actually writes unknown data to an
+unknown register.
+
+Fixes: c366be720235 ("i2c: Add driver for the RTL9300 I2C controller")
+Cc: stable@vger.kernel.org # v6.13+
+Signed-off-by: Jonas Jelonek <jelonek.jonas@gmail.com>
+Tested-by: Sven Eckelmann <sven@narfation.org>
+Reviewed-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
+Tested-by: Chris Packham <chris.packham@alliedtelesis.co.nz> # On RTL9302C based board
+Tested-by: Markus Stockhausen <markus.stockhausen@gmx.de>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Link: https://lore.kernel.org/r/20250831100457.3114-4-jelonek.jonas@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-rtl9300.c | 15 +++------------
+ 1 file changed, 3 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/i2c/busses/i2c-rtl9300.c b/drivers/i2c/busses/i2c-rtl9300.c
+index 2b3e80aa1bdf..9e1f71fed0fe 100644
+--- a/drivers/i2c/busses/i2c-rtl9300.c
++++ b/drivers/i2c/busses/i2c-rtl9300.c
+@@ -225,15 +225,6 @@ static int rtl9300_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned s
+       }
+       switch (size) {
+-      case I2C_SMBUS_QUICK:
+-              ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0);
+-              if (ret)
+-                      goto out_unlock;
+-              ret = rtl9300_i2c_reg_addr_set(i2c, 0, 0);
+-              if (ret)
+-                      goto out_unlock;
+-              break;
+-
+       case I2C_SMBUS_BYTE:
+               if (read_write == I2C_SMBUS_WRITE) {
+                       ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0);
+@@ -315,9 +306,9 @@ static int rtl9300_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned s
+ static u32 rtl9300_i2c_func(struct i2c_adapter *a)
+ {
+-      return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+-             I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+-             I2C_FUNC_SMBUS_BLOCK_DATA;
++      return I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA |
++             I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA |
++             I2C_FUNC_SMBUS_I2C_BLOCK;
+ }
+ static const struct i2c_algorithm rtl9300_i2c_algo = {
+-- 
+2.51.0
+
diff --git a/queue-6.16/mm-damon-core-set-quota-charged_from-to-jiffies-at-first-charge-window.patch b/queue-6.16/mm-damon-core-set-quota-charged_from-to-jiffies-at-first-charge-window.patch
new file mode 100644 (file)
index 0000000..f810aa2
--- /dev/null
@@ -0,0 +1,76 @@
+From ce652aac9c90a96c6536681d17518efb1f660fb8 Mon Sep 17 00:00:00 2001
+From: Sang-Heon Jeon <ekffu200098@gmail.com>
+Date: Fri, 22 Aug 2025 11:50:57 +0900
+Subject: mm/damon/core: set quota->charged_from to jiffies at first charge window
+
+From: Sang-Heon Jeon <ekffu200098@gmail.com>
+
+commit ce652aac9c90a96c6536681d17518efb1f660fb8 upstream.
+
+Kernel initializes the "jiffies" timer as 5 minutes below zero, as shown
+in include/linux/jiffies.h
+
+ /*
+ * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * so jiffies wrap bugs show up earlier.
+ */
+ #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+
+And jiffies comparison help functions cast unsigned value to signed to
+cover wraparound
+
+ #define time_after_eq(a,b) \
+  (typecheck(unsigned long, a) && \
+  typecheck(unsigned long, b) && \
+  ((long)((a) - (b)) >= 0))
+
+When quota->charged_from is initialized to 0, time_after_eq() can
+incorrectly return FALSE even after reset_interval has elapsed.  This
+occurs when (jiffies - reset_interval) produces a value with MSB=1, which
+is interpreted as negative in signed arithmetic.
+
+This issue primarily affects 32-bit systems because: On 64-bit systems:
+MSB=1 values occur after ~292 million years from boot (assuming HZ=1000),
+almost impossible.
+
+On 32-bit systems: MSB=1 values occur during the first 5 minutes after
+boot, and the second half of every jiffies wraparound cycle, starting from
+day 25 (assuming HZ=1000)
+
+When above unexpected FALSE return from time_after_eq() occurs, the
+charging window will not reset.  The user impact depends on esz value at
+that time.
+
+If esz is 0, scheme ignores configured quotas and runs without any limits.
+
+If esz is not 0, scheme stops working once the quota is exhausted.  It
+remains until the charging window finally resets.
+
+So, change quota->charged_from to jiffies at damos_adjust_quota() when it
+is considered as the first charge window.  By this change, we can avoid
+unexpected FALSE return from time_after_eq()
+
+Link: https://lkml.kernel.org/r/20250822025057.1740854-1-ekffu200098@gmail.com
+Fixes: 2b8a248d5873 ("mm/damon/schemes: implement size quota for schemes application speed control") # 5.16
+Signed-off-by: Sang-Heon Jeon <ekffu200098@gmail.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -2050,6 +2050,10 @@ static void damos_adjust_quota(struct da
+       if (!quota->ms && !quota->sz && list_empty(&quota->goals))
+               return;
++      /* First charge window */
++      if (!quota->total_charged_sz && !quota->charged_from)
++              quota->charged_from = jiffies;
++
+       /* New charge window starts */
+       if (time_after_eq(jiffies, quota->charged_from +
+                               msecs_to_jiffies(quota->reset_interval))) {
diff --git a/queue-6.16/mm-damon-lru_sort-avoid-divide-by-zero-in-damon_lru_sort_apply_parameters.patch b/queue-6.16/mm-damon-lru_sort-avoid-divide-by-zero-in-damon_lru_sort_apply_parameters.patch
new file mode 100644 (file)
index 0000000..b31da2a
--- /dev/null
@@ -0,0 +1,56 @@
+From 711f19dfd783ffb37ca4324388b9c4cb87e71363 Mon Sep 17 00:00:00 2001
+From: Quanmin Yan <yanquanmin1@huawei.com>
+Date: Wed, 27 Aug 2025 19:58:57 +0800
+Subject: mm/damon/lru_sort: avoid divide-by-zero in damon_lru_sort_apply_parameters()
+
+From: Quanmin Yan <yanquanmin1@huawei.com>
+
+commit 711f19dfd783ffb37ca4324388b9c4cb87e71363 upstream.
+
+Patch series "mm/damon: avoid divide-by-zero in DAMON module's parameters
+application".
+
+DAMON's RECLAIM and LRU_SORT modules perform no validation on
+user-configured parameters during application, which may lead to
+division-by-zero errors.
+
+Avoid the divide-by-zero by adding validation checks when DAMON modules
+attempt to apply the parameters.
+
+
+This patch (of 2):
+
+During the calculation of 'hot_thres' and 'cold_thres', either
+'sample_interval' or 'aggr_interval' is used as the divisor, which may
+lead to division-by-zero errors.  Fix it by directly returning -EINVAL
+when such a case occurs.  Additionally, since 'aggr_interval' is already
+required to be set no smaller than 'sample_interval' in damon_set_attrs(),
+only the case where 'sample_interval' is zero needs to be checked.
+
+Link: https://lkml.kernel.org/r/20250827115858.1186261-2-yanquanmin1@huawei.com
+Fixes: 40e983cca927 ("mm/damon: introduce DAMON-based LRU-lists Sorting")
+Signed-off-by: Quanmin Yan <yanquanmin1@huawei.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: ze zuo <zuoze1@huawei.com>
+Cc: <stable@vger.kernel.org>   [6.0+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/lru_sort.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -198,6 +198,11 @@ static int damon_lru_sort_apply_paramete
+       if (err)
+               return err;
++      if (!damon_lru_sort_mon_attrs.sample_interval) {
++              err = -EINVAL;
++              goto out;
++      }
++
+       err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
+       if (err)
+               goto out;
diff --git a/queue-6.16/mm-hugetlb-add-missing-hugetlb_lock-in-__unmap_hugepage_range.patch b/queue-6.16/mm-hugetlb-add-missing-hugetlb_lock-in-__unmap_hugepage_range.patch
new file mode 100644 (file)
index 0000000..1f48c26
--- /dev/null
@@ -0,0 +1,84 @@
+From 21cc2b5c5062a256ae9064442d37ebbc23f5aef7 Mon Sep 17 00:00:00 2001
+From: Jeongjun Park <aha310510@gmail.com>
+Date: Sun, 24 Aug 2025 03:21:15 +0900
+Subject: mm/hugetlb: add missing hugetlb_lock in __unmap_hugepage_range()
+
+From: Jeongjun Park <aha310510@gmail.com>
+
+commit 21cc2b5c5062a256ae9064442d37ebbc23f5aef7 upstream.
+
+When restoring a reservation for an anonymous page, we need to check to
+freeing a surplus.  However, __unmap_hugepage_range() causes data race
+because it reads h->surplus_huge_pages without the protection of
+hugetlb_lock.
+
+And adjust_reservation is a boolean variable that indicates whether
+reservations for anonymous pages in each folio should be restored.
+Therefore, it should be initialized to false for each round of the loop.
+However, this variable is not initialized to false except when defining
+the current adjust_reservation variable.
+
+This means that once adjust_reservation is set to true even once within
+the loop, reservations for anonymous pages will be restored
+unconditionally in all subsequent rounds, regardless of the folio's state.
+
+To fix this, we need to add the missing hugetlb_lock, unlock the
+page_table_lock earlier so that we don't lock the hugetlb_lock inside the
+page_table_lock lock, and initialize adjust_reservation to false on each
+round within the loop.
+
+Link: https://lkml.kernel.org/r/20250823182115.1193563-1-aha310510@gmail.com
+Fixes: df7a6d1f6405 ("mm/hugetlb: restore the reservation if needed")
+Signed-off-by: Jeongjun Park <aha310510@gmail.com>
+Reported-by: syzbot+417aeb05fd190f3a6da9@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=417aeb05fd190f3a6da9
+Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
+Cc: Breno Leitao <leitao@debian.org>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5855,7 +5855,7 @@ void __unmap_hugepage_range(struct mmu_g
+       spinlock_t *ptl;
+       struct hstate *h = hstate_vma(vma);
+       unsigned long sz = huge_page_size(h);
+-      bool adjust_reservation = false;
++      bool adjust_reservation;
+       unsigned long last_addr_mask;
+       bool force_flush = false;
+@@ -5948,6 +5948,7 @@ void __unmap_hugepage_range(struct mmu_g
+                                       sz);
+               hugetlb_count_sub(pages_per_huge_page(h), mm);
+               hugetlb_remove_rmap(folio);
++              spin_unlock(ptl);
+               /*
+                * Restore the reservation for anonymous page, otherwise the
+@@ -5955,14 +5956,16 @@ void __unmap_hugepage_range(struct mmu_g
+                * If there we are freeing a surplus, do not set the restore
+                * reservation bit.
+                */
++              adjust_reservation = false;
++
++              spin_lock_irq(&hugetlb_lock);
+               if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
+                   folio_test_anon(folio)) {
+                       folio_set_hugetlb_restore_reserve(folio);
+                       /* Reservation to be adjusted after the spin lock */
+                       adjust_reservation = true;
+               }
+-
+-              spin_unlock(ptl);
++              spin_unlock_irq(&hugetlb_lock);
+               /*
+                * Adjust the reservation for the region that will have the
diff --git a/queue-6.16/mm-khugepaged-fix-the-address-passed-to-notifier-on-testing-young.patch b/queue-6.16/mm-khugepaged-fix-the-address-passed-to-notifier-on-testing-young.patch
new file mode 100644 (file)
index 0000000..389d1ab
--- /dev/null
@@ -0,0 +1,49 @@
+From 394bfac1c7f7b701c2c93834c5761b9c9ceeebcf Mon Sep 17 00:00:00 2001
+From: Wei Yang <richard.weiyang@gmail.com>
+Date: Fri, 22 Aug 2025 06:33:18 +0000
+Subject: mm/khugepaged: fix the address passed to notifier on testing young
+
+From: Wei Yang <richard.weiyang@gmail.com>
+
+commit 394bfac1c7f7b701c2c93834c5761b9c9ceeebcf upstream.
+
+Commit 8ee53820edfd ("thp: mmu_notifier_test_young") introduced
+mmu_notifier_test_young(), but we are passing the wrong address.
+In xxx_scan_pmd(), the actual iteration address is "_address" not
+"address".  We seem to misuse the variable on the very beginning.
+
+Change it to the right one.
+
+[akpm@linux-foundation.org fix whitespace, per everyone]
+Link: https://lkml.kernel.org/r/20250822063318.11644-1-richard.weiyang@gmail.com
+Fixes: 8ee53820edfd ("thp: mmu_notifier_test_young")
+Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
+Reviewed-by: Dev Jain <dev.jain@arm.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Nico Pache <npache@redhat.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/khugepaged.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1400,8 +1400,8 @@ static int hpage_collapse_scan_pmd(struc
+                */
+               if (cc->is_khugepaged &&
+                   (pte_young(pteval) || folio_test_young(folio) ||
+-                   folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
+-                                                                   address)))
++                   folio_test_referenced(folio) ||
++                   mmu_notifier_test_young(vma->vm_mm, _address)))
+                       referenced++;
+       }
+       if (!writable) {
diff --git a/queue-6.16/mm-memory-failure-fix-redundant-updates-for-already-poisoned-pages.patch b/queue-6.16/mm-memory-failure-fix-redundant-updates-for-already-poisoned-pages.patch
new file mode 100644 (file)
index 0000000..91512a5
--- /dev/null
@@ -0,0 +1,100 @@
+From 3be306cccdccede13e3cefd0c14e430cc2b7c9c7 Mon Sep 17 00:00:00 2001
+From: Kyle Meyer <kyle.meyer@hpe.com>
+Date: Thu, 28 Aug 2025 13:38:20 -0500
+Subject: mm/memory-failure: fix redundant updates for already poisoned pages
+
+From: Kyle Meyer <kyle.meyer@hpe.com>
+
+commit 3be306cccdccede13e3cefd0c14e430cc2b7c9c7 upstream.
+
+Duplicate memory errors can be reported by multiple sources.
+
+Passing an already poisoned page to action_result() causes issues:
+
+* The amount of hardware corrupted memory is incorrectly updated.
+* Per NUMA node MF stats are incorrectly updated.
+* Redundant "already poisoned" messages are printed.
+
+Avoid those issues by:
+
+* Skipping hardware corrupted memory updates for already poisoned pages.
+* Skipping per NUMA node MF stats updates for already poisoned pages.
+* Dropping redundant "already poisoned" messages.
+
+Make MF_MSG_ALREADY_POISONED consistent with other action_page_types and
+make calls to action_result() consistent for already poisoned normal pages
+and huge pages.
+
+Link: https://lkml.kernel.org/r/aLCiHMy12Ck3ouwC@hpe.com
+Fixes: b8b9488d50b7 ("mm/memory-failure: improve memory failure action_result messages")
+Signed-off-by: Kyle Meyer <kyle.meyer@hpe.com>
+Reviewed-by: Jiaqi Yan <jiaqiyan@google.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Jane Chu <jane.chu@oracle.com>
+Acked-by: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Borislav Betkov <bp@alien8.de>
+Cc: Kyle Meyer <kyle.meyer@hpe.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: "Luck, Tony" <tony.luck@intel.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Russ Anderson <russ.anderson@hpe.com>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory-failure.c |   13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -950,7 +950,7 @@ static const char * const action_page_ty
+       [MF_MSG_BUDDY]                  = "free buddy page",
+       [MF_MSG_DAX]                    = "dax page",
+       [MF_MSG_UNSPLIT_THP]            = "unsplit thp",
+-      [MF_MSG_ALREADY_POISONED]       = "already poisoned",
++      [MF_MSG_ALREADY_POISONED]       = "already poisoned page",
+       [MF_MSG_UNKNOWN]                = "unknown page",
+ };
+@@ -1343,9 +1343,10 @@ static int action_result(unsigned long p
+ {
+       trace_memory_failure_event(pfn, type, result);
+-      num_poisoned_pages_inc(pfn);
+-
+-      update_per_node_mf_stats(pfn, result);
++      if (type != MF_MSG_ALREADY_POISONED) {
++              num_poisoned_pages_inc(pfn);
++              update_per_node_mf_stats(pfn, result);
++      }
+       pr_err("%#lx: recovery action for %s: %s\n",
+               pfn, action_page_types[type], action_name[result]);
+@@ -2088,12 +2089,11 @@ retry:
+               *hugetlb = 0;
+               return 0;
+       } else if (res == -EHWPOISON) {
+-              pr_err("%#lx: already hardware poisoned\n", pfn);
+               if (flags & MF_ACTION_REQUIRED) {
+                       folio = page_folio(p);
+                       res = kill_accessing_process(current, folio_pfn(folio), flags);
+-                      action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
+               }
++              action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
+               return res;
+       } else if (res == -EBUSY) {
+               if (!(flags & MF_NO_RETRY)) {
+@@ -2279,7 +2279,6 @@ try_again:
+               goto unlock_mutex;
+       if (TestSetPageHWPoison(p)) {
+-              pr_err("%#lx: already hardware poisoned\n", pfn);
+               res = -EHWPOISON;
+               if (flags & MF_ACTION_REQUIRED)
+                       res = kill_accessing_process(current, pfn, flags);
diff --git a/queue-6.16/mm-memory-failure-fix-vm_bug_on_page-pagepoisoned-page-when-unpoison-memory.patch b/queue-6.16/mm-memory-failure-fix-vm_bug_on_page-pagepoisoned-page-when-unpoison-memory.patch
new file mode 100644 (file)
index 0000000..e111cfa
--- /dev/null
@@ -0,0 +1,108 @@
+From d613f53c83ec47089c4e25859d5e8e0359f6f8da Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Thu, 28 Aug 2025 10:46:18 +0800
+Subject: mm/memory-failure: fix VM_BUG_ON_PAGE(PagePoisoned(page)) when unpoison memory
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit d613f53c83ec47089c4e25859d5e8e0359f6f8da upstream.
+
+When I did memory failure tests, below panic occurs:
+
+page dumped because: VM_BUG_ON_PAGE(PagePoisoned(page))
+kernel BUG at include/linux/page-flags.h:616!
+Oops: invalid opcode: 0000 [#1] PREEMPT SMP NOPTI
+CPU: 3 PID: 720 Comm: bash Not tainted 6.10.0-rc1-00195-g148743902568 #40
+RIP: 0010:unpoison_memory+0x2f3/0x590
+RSP: 0018:ffffa57fc8787d60 EFLAGS: 00000246
+RAX: 0000000000000037 RBX: 0000000000000009 RCX: ffff9be25fcdc9c8
+RDX: 0000000000000000 RSI: 0000000000000027 RDI: ffff9be25fcdc9c0
+RBP: 0000000000300000 R08: ffffffffb4956f88 R09: 0000000000009ffb
+R10: 0000000000000284 R11: ffffffffb4926fa0 R12: ffffe6b00c000000
+R13: ffff9bdb453dfd00 R14: 0000000000000000 R15: fffffffffffffffe
+FS:  00007f08f04e4740(0000) GS:ffff9be25fcc0000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000564787a30410 CR3: 000000010d4e2000 CR4: 00000000000006f0
+Call Trace:
+ <TASK>
+ unpoison_memory+0x2f3/0x590
+ simple_attr_write_xsigned.constprop.0.isra.0+0xb3/0x110
+ debugfs_attr_write+0x42/0x60
+ full_proxy_write+0x5b/0x80
+ vfs_write+0xd5/0x540
+ ksys_write+0x64/0xe0
+ do_syscall_64+0xb9/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7f08f0314887
+RSP: 002b:00007ffece710078 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+RAX: ffffffffffffffda RBX: 0000000000000009 RCX: 00007f08f0314887
+RDX: 0000000000000009 RSI: 0000564787a30410 RDI: 0000000000000001
+RBP: 0000564787a30410 R08: 000000000000fefe R09: 000000007fffffff
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000009
+R13: 00007f08f041b780 R14: 00007f08f0417600 R15: 00007f08f0416a00
+ </TASK>
+Modules linked in: hwpoison_inject
+---[ end trace 0000000000000000 ]---
+RIP: 0010:unpoison_memory+0x2f3/0x590
+RSP: 0018:ffffa57fc8787d60 EFLAGS: 00000246
+RAX: 0000000000000037 RBX: 0000000000000009 RCX: ffff9be25fcdc9c8
+RDX: 0000000000000000 RSI: 0000000000000027 RDI: ffff9be25fcdc9c0
+RBP: 0000000000300000 R08: ffffffffb4956f88 R09: 0000000000009ffb
+R10: 0000000000000284 R11: ffffffffb4926fa0 R12: ffffe6b00c000000
+R13: ffff9bdb453dfd00 R14: 0000000000000000 R15: fffffffffffffffe
+FS:  00007f08f04e4740(0000) GS:ffff9be25fcc0000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000564787a30410 CR3: 000000010d4e2000 CR4: 00000000000006f0
+Kernel panic - not syncing: Fatal exception
+Kernel Offset: 0x31c00000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff)
+---[ end Kernel panic - not syncing: Fatal exception ]---
+
+The root cause is that unpoison_memory() tries to check the PG_HWPoison
+flags of an uninitialized page.  So VM_BUG_ON_PAGE(PagePoisoned(page)) is
+triggered.  This can be reproduced by below steps:
+
+1.Offline memory block:
+
+ echo offline > /sys/devices/system/memory/memory12/state
+
+2.Get offlined memory pfn:
+
+ page-types -b n -rlN
+
+3.Write pfn to unpoison-pfn
+
+ echo <pfn> > /sys/kernel/debug/hwpoison/unpoison-pfn
+
+This scenario can be identified by pfn_to_online_page() returning NULL.
+And ZONE_DEVICE pages are never expected, so we can simply fail if
+pfn_to_online_page() == NULL to fix the bug.
+
+Link: https://lkml.kernel.org/r/20250828024618.1744895-1-linmiaohe@huawei.com
+Fixes: f1dd2cd13c4b ("mm, memory_hotplug: do not associate hotadded memory to zones until online")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Suggested-by: David Hildenbrand <david@redhat.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory-failure.c |    7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -2576,10 +2576,9 @@ int unpoison_memory(unsigned long pfn)
+       static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
+                                       DEFAULT_RATELIMIT_BURST);
+-      if (!pfn_valid(pfn))
+-              return -ENXIO;
+-
+-      p = pfn_to_page(pfn);
++      p = pfn_to_online_page(pfn);
++      if (!p)
++              return -EIO;
+       folio = page_folio(p);
+       mutex_lock(&mf_mutex);
diff --git a/queue-6.16/mm-vmalloc-mm-kasan-respect-gfp-mask-in-kasan_populate_vmalloc.patch b/queue-6.16/mm-vmalloc-mm-kasan-respect-gfp-mask-in-kasan_populate_vmalloc.patch
new file mode 100644 (file)
index 0000000..43b8052
--- /dev/null
@@ -0,0 +1,200 @@
+From 79357cd06d41d0f5a11b17d7c86176e395d10ef2 Mon Sep 17 00:00:00 2001
+From: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
+Date: Sun, 31 Aug 2025 14:10:58 +0200
+Subject: mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
+
+From: Uladzislau Rezki (Sony) <urezki@gmail.com>
+
+commit 79357cd06d41d0f5a11b17d7c86176e395d10ef2 upstream.
+
+kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask and
+always allocate memory using the hardcoded GFP_KERNEL flag.  This makes
+them inconsistent with vmalloc(), which was recently extended to support
+GFP_NOFS and GFP_NOIO allocations.
+
+Page table allocations performed during shadow population also ignore the
+external gfp_mask.  To preserve the intended semantics of GFP_NOFS and
+GFP_NOIO, wrap the apply_to_page_range() calls into the appropriate
+memalloc scope.
+
+xfs calls vmalloc with GFP_NOFS, so this bug could lead to deadlock.
+
+There was a report here
+https://lkml.kernel.org/r/686ea951.050a0220.385921.0016.GAE@google.com
+
+This patch:
+ - Extends kasan_populate_vmalloc() and helpers to take gfp_mask;
+ - Passes gfp_mask down to alloc_pages_bulk() and __get_free_page();
+ - Enforces GFP_NOFS/NOIO semantics with memalloc_*_save()/restore()
+   around apply_to_page_range();
+ - Updates vmalloc.c and percpu allocator call sites accordingly.
+
+Link: https://lkml.kernel.org/r/20250831121058.92971-1-urezki@gmail.com
+Fixes: 451769ebb7e7 ("mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc")
+Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
+Reported-by: syzbot+3470c9ffee63e4abafeb@syzkaller.appspotmail.com
+Reviewed-by: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/kasan.h |    6 +++---
+ mm/kasan/shadow.c     |   31 ++++++++++++++++++++++++-------
+ mm/vmalloc.c          |    8 ++++----
+ 3 files changed, 31 insertions(+), 14 deletions(-)
+
+--- a/include/linux/kasan.h
++++ b/include/linux/kasan.h
+@@ -562,7 +562,7 @@ static inline void kasan_init_hw_tags(vo
+ #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+ void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
+-int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
++int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
+ void kasan_release_vmalloc(unsigned long start, unsigned long end,
+                          unsigned long free_region_start,
+                          unsigned long free_region_end,
+@@ -574,7 +574,7 @@ static inline void kasan_populate_early_
+                                                      unsigned long size)
+ { }
+ static inline int kasan_populate_vmalloc(unsigned long start,
+-                                      unsigned long size)
++                                      unsigned long size, gfp_t gfp_mask)
+ {
+       return 0;
+ }
+@@ -610,7 +610,7 @@ static __always_inline void kasan_poison
+ static inline void kasan_populate_early_vm_area_shadow(void *start,
+                                                      unsigned long size) { }
+ static inline int kasan_populate_vmalloc(unsigned long start,
+-                                      unsigned long size)
++                                      unsigned long size, gfp_t gfp_mask)
+ {
+       return 0;
+ }
+--- a/mm/kasan/shadow.c
++++ b/mm/kasan/shadow.c
+@@ -335,13 +335,13 @@ static void ___free_pages_bulk(struct pa
+       }
+ }
+-static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
++static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
+ {
+       unsigned long nr_populated, nr_total = nr_pages;
+       struct page **page_array = pages;
+       while (nr_pages) {
+-              nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
++              nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
+               if (!nr_populated) {
+                       ___free_pages_bulk(page_array, nr_total - nr_pages);
+                       return -ENOMEM;
+@@ -353,25 +353,42 @@ static int ___alloc_pages_bulk(struct pa
+       return 0;
+ }
+-static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
++static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
+ {
+       unsigned long nr_pages, nr_total = PFN_UP(end - start);
+       struct vmalloc_populate_data data;
++      unsigned int flags;
+       int ret = 0;
+-      data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
++      data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
+       if (!data.pages)
+               return -ENOMEM;
+       while (nr_total) {
+               nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
+-              ret = ___alloc_pages_bulk(data.pages, nr_pages);
++              ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
+               if (ret)
+                       break;
+               data.start = start;
++
++              /*
++               * page tables allocations ignore external gfp mask, enforce it
++               * by the scope API
++               */
++              if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
++                      flags = memalloc_nofs_save();
++              else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
++                      flags = memalloc_noio_save();
++
+               ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
+                                         kasan_populate_vmalloc_pte, &data);
++
++              if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
++                      memalloc_nofs_restore(flags);
++              else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
++                      memalloc_noio_restore(flags);
++
+               ___free_pages_bulk(data.pages, nr_pages);
+               if (ret)
+                       break;
+@@ -385,7 +402,7 @@ static int __kasan_populate_vmalloc(unsi
+       return ret;
+ }
+-int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
++int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
+ {
+       unsigned long shadow_start, shadow_end;
+       int ret;
+@@ -414,7 +431,7 @@ int kasan_populate_vmalloc(unsigned long
+       shadow_start = PAGE_ALIGN_DOWN(shadow_start);
+       shadow_end = PAGE_ALIGN(shadow_end);
+-      ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
++      ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
+       if (ret)
+               return ret;
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -2026,6 +2026,8 @@ static struct vmap_area *alloc_vmap_area
+       if (unlikely(!vmap_initialized))
+               return ERR_PTR(-EBUSY);
++      /* Only reclaim behaviour flags are relevant. */
++      gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
+       might_sleep();
+       /*
+@@ -2038,8 +2040,6 @@ static struct vmap_area *alloc_vmap_area
+        */
+       va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
+       if (!va) {
+-              gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
+-
+               va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
+               if (unlikely(!va))
+                       return ERR_PTR(-ENOMEM);
+@@ -2089,7 +2089,7 @@ retry:
+       BUG_ON(va->va_start < vstart);
+       BUG_ON(va->va_end > vend);
+-      ret = kasan_populate_vmalloc(addr, size);
++      ret = kasan_populate_vmalloc(addr, size, gfp_mask);
+       if (ret) {
+               free_vmap_area(va);
+               return ERR_PTR(ret);
+@@ -4826,7 +4826,7 @@ retry:
+       /* populate the kasan shadow space */
+       for (area = 0; area < nr_vms; area++) {
+-              if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
++              if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
+                       goto err_free_shadow;
+       }
diff --git a/queue-6.16/mptcp-sockopt-make-sync_socket_options-propagate-sock_keepopen.patch b/queue-6.16/mptcp-sockopt-make-sync_socket_options-propagate-sock_keepopen.patch
new file mode 100644 (file)
index 0000000..07bd467
--- /dev/null
@@ -0,0 +1,65 @@
+From 648de37416b301f046f62f1b65715c7fa8ebaa67 Mon Sep 17 00:00:00 2001
+From: Krister Johansen <kjlx@templeofstupid.com>
+Date: Mon, 8 Sep 2025 11:16:01 -0700
+Subject: mptcp: sockopt: make sync_socket_options propagate SOCK_KEEPOPEN
+
+From: Krister Johansen <kjlx@templeofstupid.com>
+
+commit 648de37416b301f046f62f1b65715c7fa8ebaa67 upstream.
+
+Users reported a scenario where MPTCP connections that were configured
+with SO_KEEPALIVE prior to connect would fail to enable their keepalives
+if MTPCP fell back to TCP mode.
+
+After investigating, this affects keepalives for any connection where
+sync_socket_options is called on a socket that is in the closed or
+listening state.  Joins are handled properly. For connects,
+sync_socket_options is called when the socket is still in the closed
+state.  The tcp_set_keepalive() function does not act on sockets that
+are closed or listening, hence keepalive is not immediately enabled.
+Since the SO_KEEPOPEN flag is absent, it is not enabled later in the
+connect sequence via tcp_finish_connect.  Setting the keepalive via
+sockopt after connect does work, but would not address any subsequently
+created flows.
+
+Fortunately, the fix here is straight-forward: set SOCK_KEEPOPEN on the
+subflow when calling sync_socket_options.
+
+The fix was valdidated both by using tcpdump to observe keepalive
+packets not being sent before the fix, and being sent after the fix.  It
+was also possible to observe via ss that the keepalive timer was not
+enabled on these sockets before the fix, but was enabled afterwards.
+
+Fixes: 1b3e7ede1365 ("mptcp: setsockopt: handle SO_KEEPALIVE and SO_PRIORITY")
+Cc: stable@vger.kernel.org
+Signed-off-by: Krister Johansen <kjlx@templeofstupid.com>
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/aL8dYfPZrwedCIh9@templeofstupid.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/sockopt.c |   11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -1508,13 +1508,12 @@ static void sync_socket_options(struct m
+ {
+       static const unsigned int tx_rx_locks = SOCK_RCVBUF_LOCK | SOCK_SNDBUF_LOCK;
+       struct sock *sk = (struct sock *)msk;
++      bool keep_open;
+-      if (ssk->sk_prot->keepalive) {
+-              if (sock_flag(sk, SOCK_KEEPOPEN))
+-                      ssk->sk_prot->keepalive(ssk, 1);
+-              else
+-                      ssk->sk_prot->keepalive(ssk, 0);
+-      }
++      keep_open = sock_flag(sk, SOCK_KEEPOPEN);
++      if (ssk->sk_prot->keepalive)
++              ssk->sk_prot->keepalive(ssk, keep_open);
++      sock_valbool_flag(ssk, SOCK_KEEPOPEN, keep_open);
+       ssk->sk_priority = sk->sk_priority;
+       ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
diff --git a/queue-6.16/mtd-nand-raw-atmel-respect-tar-tclr-in-read-setup-timing.patch b/queue-6.16/mtd-nand-raw-atmel-respect-tar-tclr-in-read-setup-timing.patch
new file mode 100644 (file)
index 0000000..f43f719
--- /dev/null
@@ -0,0 +1,58 @@
+From fd779eac2d659668be4d3dbdac0710afd5d6db12 Mon Sep 17 00:00:00 2001
+From: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+Date: Thu, 21 Aug 2025 14:00:57 +0200
+Subject: mtd: nand: raw: atmel: Respect tAR, tCLR in read setup timing
+
+From: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+
+commit fd779eac2d659668be4d3dbdac0710afd5d6db12 upstream.
+
+Having setup time 0 violates tAR, tCLR of some chips, for instance
+TOSHIBA TC58NVG2S3ETAI0 cannot be detected successfully (first ID byte
+being read duplicated, i.e. 98 98 dc 90 15 76 14 03 instead of
+98 dc 90 15 76 ...).
+
+Atmel Application Notes postulated 1 cycle NRD_SETUP without explanation
+[1], but it looks more appropriate to just calculate setup time properly.
+
+[1] Link: https://ww1.microchip.com/downloads/aemDocuments/documents/MPU32/ApplicationNotes/ApplicationNotes/doc6255.pdf
+
+Cc: stable@vger.kernel.org
+Fixes: f9ce2eddf176 ("mtd: nand: atmel: Add ->setup_data_interface() hooks")
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+Tested-by: Alexander Dahl <ada@thorsis.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/atmel/nand-controller.c |   16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
+@@ -1378,13 +1378,23 @@ static int atmel_smc_nand_prepare_smccon
+               return ret;
+       /*
++       * Read setup timing depends on the operation done on the NAND:
++       *
++       * NRD_SETUP = max(tAR, tCLR)
++       */
++      timeps = max(conf->timings.sdr.tAR_min, conf->timings.sdr.tCLR_min);
++      ncycles = DIV_ROUND_UP(timeps, mckperiodps);
++      totalcycles += ncycles;
++      ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NRD_SHIFT, ncycles);
++      if (ret)
++              return ret;
++
++      /*
+        * The read cycle timing is directly matching tRC, but is also
+        * dependent on the setup and hold timings we calculated earlier,
+        * which gives:
+        *
+-       * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
+-       *
+-       * NRD_SETUP is always 0.
++       * NRD_CYCLE = max(tRC, NRD_SETUP + NRD_PULSE + NRD_HOLD)
+        */
+       ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
+       ncycles = max(totalcycles, ncycles);
diff --git a/queue-6.16/mtd-rawnand-stm32_fmc2-avoid-overlapping-mappings-on-ecc-buffer.patch b/queue-6.16/mtd-rawnand-stm32_fmc2-avoid-overlapping-mappings-on-ecc-buffer.patch
new file mode 100644 (file)
index 0000000..a925eec
--- /dev/null
@@ -0,0 +1,134 @@
+From 513c40e59d5a414ab763a9c84797534b5e8c208d Mon Sep 17 00:00:00 2001
+From: Christophe Kerello <christophe.kerello@foss.st.com>
+Date: Tue, 12 Aug 2025 09:26:58 +0200
+Subject: mtd: rawnand: stm32_fmc2: avoid overlapping mappings on ECC buffer
+
+From: Christophe Kerello <christophe.kerello@foss.st.com>
+
+commit 513c40e59d5a414ab763a9c84797534b5e8c208d upstream.
+
+Avoid below overlapping mappings by using a contiguous
+non-cacheable buffer.
+
+[    4.077708] DMA-API: stm32_fmc2_nfc 48810000.nand-controller: cacheline tracking EEXIST,
+overlapping mappings aren't supported
+[    4.089103] WARNING: CPU: 1 PID: 44 at kernel/dma/debug.c:568 add_dma_entry+0x23c/0x300
+[    4.097071] Modules linked in:
+[    4.100101] CPU: 1 PID: 44 Comm: kworker/u4:2 Not tainted 6.1.82 #1
+[    4.106346] Hardware name: STMicroelectronics STM32MP257F VALID1 SNOR / MB1704 (LPDDR4 Power discrete) + MB1703 + MB1708 (SNOR MB1730) (DT)
+[    4.118824] Workqueue: events_unbound deferred_probe_work_func
+[    4.124674] pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+[    4.131624] pc : add_dma_entry+0x23c/0x300
+[    4.135658] lr : add_dma_entry+0x23c/0x300
+[    4.139792] sp : ffff800009dbb490
+[    4.143016] x29: ffff800009dbb4a0 x28: 0000000004008022 x27: ffff8000098a6000
+[    4.150174] x26: 0000000000000000 x25: ffff8000099e7000 x24: ffff8000099e7de8
+[    4.157231] x23: 00000000ffffffff x22: 0000000000000000 x21: ffff8000098a6a20
+[    4.164388] x20: ffff000080964180 x19: ffff800009819ba0 x18: 0000000000000006
+[    4.171545] x17: 6361727420656e69 x16: 6c6568636163203a x15: 72656c6c6f72746e
+[    4.178602] x14: 6f632d646e616e2e x13: ffff800009832f58 x12: 00000000000004ec
+[    4.185759] x11: 00000000000001a4 x10: ffff80000988af58 x9 : ffff800009832f58
+[    4.192916] x8 : 00000000ffffefff x7 : ffff80000988af58 x6 : 80000000fffff000
+[    4.199972] x5 : 000000000000bff4 x4 : 0000000000000000 x3 : 0000000000000000
+[    4.207128] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff0000812d2c40
+[    4.214185] Call trace:
+[    4.216605]  add_dma_entry+0x23c/0x300
+[    4.220338]  debug_dma_map_sg+0x198/0x350
+[    4.224373]  __dma_map_sg_attrs+0xa0/0x110
+[    4.228411]  dma_map_sg_attrs+0x10/0x2c
+[    4.232247]  stm32_fmc2_nfc_xfer.isra.0+0x1c8/0x3fc
+[    4.237088]  stm32_fmc2_nfc_seq_read_page+0xc8/0x174
+[    4.242127]  nand_read_oob+0x1d4/0x8e0
+[    4.245861]  mtd_read_oob_std+0x58/0x84
+[    4.249596]  mtd_read_oob+0x90/0x150
+[    4.253231]  mtd_read+0x68/0xac
+
+Signed-off-by: Christophe Kerello <christophe.kerello@foss.st.com>
+Cc: stable@vger.kernel.org
+Fixes: 2cd457f328c1 ("mtd: rawnand: stm32_fmc2: add STM32 FMC2 NAND flash controller driver")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/stm32_fmc2_nand.c |   28 +++++++++-------------------
+ 1 file changed, 9 insertions(+), 19 deletions(-)
+
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -272,6 +272,7 @@ struct stm32_fmc2_nfc {
+       struct sg_table dma_data_sg;
+       struct sg_table dma_ecc_sg;
+       u8 *ecc_buf;
++      dma_addr_t dma_ecc_addr;
+       int dma_ecc_len;
+       u32 tx_dma_max_burst;
+       u32 rx_dma_max_burst;
+@@ -902,17 +903,10 @@ static int stm32_fmc2_nfc_xfer(struct na
+       if (!write_data && !raw) {
+               /* Configure DMA ECC status */
+-              p = nfc->ecc_buf;
+               for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
+-                      sg_set_buf(sg, p, nfc->dma_ecc_len);
+-                      p += nfc->dma_ecc_len;
+-              }
+-
+-              ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+-                               eccsteps, dma_data_dir);
+-              if (!ret) {
+-                      ret = -EIO;
+-                      goto err_unmap_data;
++                      sg_dma_address(sg) = nfc->dma_ecc_addr +
++                                           s * nfc->dma_ecc_len;
++                      sg_dma_len(sg) = nfc->dma_ecc_len;
+               }
+               desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
+@@ -921,7 +915,7 @@ static int stm32_fmc2_nfc_xfer(struct na
+                                                  DMA_PREP_INTERRUPT);
+               if (!desc_ecc) {
+                       ret = -ENOMEM;
+-                      goto err_unmap_ecc;
++                      goto err_unmap_data;
+               }
+               reinit_completion(&nfc->dma_ecc_complete);
+@@ -929,7 +923,7 @@ static int stm32_fmc2_nfc_xfer(struct na
+               desc_ecc->callback_param = &nfc->dma_ecc_complete;
+               ret = dma_submit_error(dmaengine_submit(desc_ecc));
+               if (ret)
+-                      goto err_unmap_ecc;
++                      goto err_unmap_data;
+               dma_async_issue_pending(nfc->dma_ecc_ch);
+       }
+@@ -949,7 +943,7 @@ static int stm32_fmc2_nfc_xfer(struct na
+               if (!write_data && !raw)
+                       dmaengine_terminate_all(nfc->dma_ecc_ch);
+               ret = -ETIMEDOUT;
+-              goto err_unmap_ecc;
++              goto err_unmap_data;
+       }
+       /* Wait DMA data transfer completion */
+@@ -969,11 +963,6 @@ static int stm32_fmc2_nfc_xfer(struct na
+               }
+       }
+-err_unmap_ecc:
+-      if (!write_data && !raw)
+-              dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+-                           eccsteps, dma_data_dir);
+-
+ err_unmap_data:
+       dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
+@@ -1610,7 +1599,8 @@ static int stm32_fmc2_nfc_dma_setup(stru
+               return ret;
+       /* Allocate a buffer to store ECC status registers */
+-      nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
++      nfc->ecc_buf = dmam_alloc_coherent(nfc->dev, FMC2_MAX_ECC_BUF_LEN,
++                                         &nfc->dma_ecc_addr, GFP_KERNEL);
+       if (!nfc->ecc_buf)
+               return -ENOMEM;
diff --git a/queue-6.16/mtd-rawnand-stm32_fmc2-fix-ecc-overwrite.patch b/queue-6.16/mtd-rawnand-stm32_fmc2-fix-ecc-overwrite.patch
new file mode 100644 (file)
index 0000000..db1dc20
--- /dev/null
@@ -0,0 +1,49 @@
+From 811c0da4542df3c065f6cb843ced68780e27bb44 Mon Sep 17 00:00:00 2001
+From: Christophe Kerello <christophe.kerello@foss.st.com>
+Date: Tue, 12 Aug 2025 09:30:08 +0200
+Subject: mtd: rawnand: stm32_fmc2: fix ECC overwrite
+
+From: Christophe Kerello <christophe.kerello@foss.st.com>
+
+commit 811c0da4542df3c065f6cb843ced68780e27bb44 upstream.
+
+In case OOB write is requested during a data write, ECC is currently
+lost. Avoid this issue by only writing in the free spare area.
+This issue has been seen with a YAFFS2 file system.
+
+Signed-off-by: Christophe Kerello <christophe.kerello@foss.st.com>
+Cc: stable@vger.kernel.org
+Fixes: 2cd457f328c1 ("mtd: rawnand: stm32_fmc2: add STM32 FMC2 NAND flash controller driver")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/stm32_fmc2_nand.c |   18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -985,9 +985,21 @@ static int stm32_fmc2_nfc_seq_write(stru
+       /* Write oob */
+       if (oob_required) {
+-              ret = nand_change_write_column_op(chip, mtd->writesize,
+-                                                chip->oob_poi, mtd->oobsize,
+-                                                false);
++              unsigned int offset_in_page = mtd->writesize;
++              const void *buf = chip->oob_poi;
++              unsigned int len = mtd->oobsize;
++
++              if (!raw) {
++                      struct mtd_oob_region oob_free;
++
++                      mtd_ooblayout_free(mtd, 0, &oob_free);
++                      offset_in_page += oob_free.offset;
++                      buf += oob_free.offset;
++                      len = oob_free.length;
++              }
++
++              ret = nand_change_write_column_op(chip, offset_in_page,
++                                                buf, len, false);
+               if (ret)
+                       return ret;
+       }
diff --git a/queue-6.16/net-libwx-fix-to-enable-rss.patch b/queue-6.16/net-libwx-fix-to-enable-rss.patch
new file mode 100644 (file)
index 0000000..86cc041
--- /dev/null
@@ -0,0 +1,40 @@
+From 157cf360c4a8751f7f511a71cc3a283b5d27f889 Mon Sep 17 00:00:00 2001
+From: Jiawen Wu <jiawenwu@trustnetic.com>
+Date: Thu, 4 Sep 2025 10:43:22 +0800
+Subject: net: libwx: fix to enable RSS
+
+From: Jiawen Wu <jiawenwu@trustnetic.com>
+
+commit 157cf360c4a8751f7f511a71cc3a283b5d27f889 upstream.
+
+Now when SRIOV is enabled, PF with multiple queues can only receive
+all packets on queue 0. This is caused by an incorrect flag judgement,
+which prevents RSS from being enabled.
+
+In fact, RSS is supported for the functions when SRIOV is enabled.
+Remove the flag judgement to fix it.
+
+Fixes: c52d4b898901 ("net: libwx: Redesign flow when sriov is enabled")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/A3B7449A08A044D0+20250904024322.87145-1-jiawenwu@trustnetic.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/wangxun/libwx/wx_hw.c |    4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+@@ -2071,10 +2071,6 @@ static void wx_setup_mrqc(struct wx *wx)
+ {
+       u32 rss_field = 0;
+-      /* VT, and RSS do not coexist at the same time */
+-      if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+-              return;
+-
+       /* Disable indicating checksum in descriptor, enables RSS hash */
+       wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
diff --git a/queue-6.16/net-usb-asix-ax88772-drop-phylink-use-in-pm-to-avoid-mdio-runtime-pm-wakeups.patch b/queue-6.16/net-usb-asix-ax88772-drop-phylink-use-in-pm-to-avoid-mdio-runtime-pm-wakeups.patch
new file mode 100644 (file)
index 0000000..40189b0
--- /dev/null
@@ -0,0 +1,68 @@
+From 5537a4679403423e0b49c95b619983a4583d69c5 Mon Sep 17 00:00:00 2001
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+Date: Mon, 8 Sep 2025 13:26:19 +0200
+Subject: net: usb: asix: ax88772: drop phylink use in PM to avoid MDIO runtime PM wakeups
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+commit 5537a4679403423e0b49c95b619983a4583d69c5 upstream.
+
+Drop phylink_{suspend,resume}() from ax88772 PM callbacks.
+
+MDIO bus accesses have their own runtime-PM handling and will try to
+wake the device if it is suspended. Such wake attempts must not happen
+from PM callbacks while the device PM lock is held. Since phylink
+{sus|re}sume may trigger MDIO, it must not be called in PM context.
+
+No extra phylink PM handling is required for this driver:
+- .ndo_open/.ndo_stop control the phylink start/stop lifecycle.
+- ethtool/phylib entry points run in process context, not PM.
+- phylink MAC ops program the MAC on link changes after resume.
+
+Fixes: e0bffe3e6894 ("net: asix: ax88772: migrate to phylink")
+Reported-by: Hubert WiÅ›niewski <hubert.wisniewski.25632@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Tested-by: Hubert WiÅ›niewski <hubert.wisniewski.25632@gmail.com>
+Tested-by: Xu Yang <xu.yang_2@nxp.com>
+Link: https://patch.msgid.link/20250908112619.2900723-1-o.rempel@pengutronix.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/asix_devices.c |   13 -------------
+ 1 file changed, 13 deletions(-)
+
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -607,15 +607,8 @@ static const struct net_device_ops ax887
+ static void ax88772_suspend(struct usbnet *dev)
+ {
+-      struct asix_common_private *priv = dev->driver_priv;
+       u16 medium;
+-      if (netif_running(dev->net)) {
+-              rtnl_lock();
+-              phylink_suspend(priv->phylink, false);
+-              rtnl_unlock();
+-      }
+-
+       /* Stop MAC operation */
+       medium = asix_read_medium_status(dev, 1);
+       medium &= ~AX_MEDIUM_RE;
+@@ -644,12 +637,6 @@ static void ax88772_resume(struct usbnet
+       for (i = 0; i < 3; i++)
+               if (!priv->reset(dev, 1))
+                       break;
+-
+-      if (netif_running(dev->net)) {
+-              rtnl_lock();
+-              phylink_resume(priv->phylink);
+-              rtnl_unlock();
+-      }
+ }
+ static int asix_resume(struct usb_interface *intf)
diff --git a/queue-6.16/netlink-specs-mptcp-fix-if-idx-attribute-type.patch b/queue-6.16/netlink-specs-mptcp-fix-if-idx-attribute-type.patch
new file mode 100644 (file)
index 0000000..931fff3
--- /dev/null
@@ -0,0 +1,38 @@
+From 7094b84863e5832cb1cd9c4b9d648904775b6bd9 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Mon, 8 Sep 2025 23:27:27 +0200
+Subject: netlink: specs: mptcp: fix if-idx attribute type
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 7094b84863e5832cb1cd9c4b9d648904775b6bd9 upstream.
+
+This attribute is used as a signed number in the code in pm_netlink.c:
+
+  nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if))
+
+The specs should then reflect that. Note that other 'if-idx' attributes
+from the same .yaml file use a signed number as well.
+
+Fixes: bc8aeb2045e2 ("Documentation: netlink: add a YAML spec for mptcp")
+Cc: stable@vger.kernel.org
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250908-net-mptcp-misc-fixes-6-17-rc5-v1-1-5f2168a66079@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/netlink/specs/mptcp_pm.yaml |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/netlink/specs/mptcp_pm.yaml
++++ b/Documentation/netlink/specs/mptcp_pm.yaml
+@@ -256,7 +256,7 @@ attribute-sets:
+         type: u32
+       -
+         name: if-idx
+-        type: u32
++        type: s32
+       -
+         name: reset-reason
+         type: u32
diff --git a/queue-6.16/ocfs2-fix-recursive-semaphore-deadlock-in-fiemap-call.patch b/queue-6.16/ocfs2-fix-recursive-semaphore-deadlock-in-fiemap-call.patch
new file mode 100644 (file)
index 0000000..90c73d8
--- /dev/null
@@ -0,0 +1,138 @@
+From 04100f775c2ea501927f508f17ad824ad1f23c8d Mon Sep 17 00:00:00 2001
+From: Mark Tinguely <mark.tinguely@oracle.com>
+Date: Fri, 29 Aug 2025 10:18:15 -0500
+Subject: ocfs2: fix recursive semaphore deadlock in fiemap call
+
+From: Mark Tinguely <mark.tinguely@oracle.com>
+
+commit 04100f775c2ea501927f508f17ad824ad1f23c8d upstream.
+
+syzbot detected a OCFS2 hang due to a recursive semaphore on a
+FS_IOC_FIEMAP of the extent list on a specially crafted mmap file.
+
+context_switch kernel/sched/core.c:5357 [inline]
+   __schedule+0x1798/0x4cc0 kernel/sched/core.c:6961
+   __schedule_loop kernel/sched/core.c:7043 [inline]
+   schedule+0x165/0x360 kernel/sched/core.c:7058
+   schedule_preempt_disabled+0x13/0x30 kernel/sched/core.c:7115
+   rwsem_down_write_slowpath+0x872/0xfe0 kernel/locking/rwsem.c:1185
+   __down_write_common kernel/locking/rwsem.c:1317 [inline]
+   __down_write kernel/locking/rwsem.c:1326 [inline]
+   down_write+0x1ab/0x1f0 kernel/locking/rwsem.c:1591
+   ocfs2_page_mkwrite+0x2ff/0xc40 fs/ocfs2/mmap.c:142
+   do_page_mkwrite+0x14d/0x310 mm/memory.c:3361
+   wp_page_shared mm/memory.c:3762 [inline]
+   do_wp_page+0x268d/0x5800 mm/memory.c:3981
+   handle_pte_fault mm/memory.c:6068 [inline]
+   __handle_mm_fault+0x1033/0x5440 mm/memory.c:6195
+   handle_mm_fault+0x40a/0x8e0 mm/memory.c:6364
+   do_user_addr_fault+0x764/0x1390 arch/x86/mm/fault.c:1387
+   handle_page_fault arch/x86/mm/fault.c:1476 [inline]
+   exc_page_fault+0x76/0xf0 arch/x86/mm/fault.c:1532
+   asm_exc_page_fault+0x26/0x30 arch/x86/include/asm/idtentry.h:623
+RIP: 0010:copy_user_generic arch/x86/include/asm/uaccess_64.h:126 [inline]
+RIP: 0010:raw_copy_to_user arch/x86/include/asm/uaccess_64.h:147 [inline]
+RIP: 0010:_inline_copy_to_user include/linux/uaccess.h:197 [inline]
+RIP: 0010:_copy_to_user+0x85/0xb0 lib/usercopy.c:26
+Code: e8 00 bc f7 fc 4d 39 fc 72 3d 4d 39 ec 77 38 e8 91 b9 f7 fc 4c 89
+f7 89 de e8 47 25 5b fd 0f 01 cb 4c 89 ff 48 89 d9 4c 89 f6 <f3> a4 0f
+1f 00 48 89 cb 0f 01 ca 48 89 d8 5b 41 5c 41 5d 41 5e 41
+RSP: 0018:ffffc9000403f950 EFLAGS: 00050256
+RAX: ffffffff84c7f101 RBX: 0000000000000038 RCX: 0000000000000038
+RDX: 0000000000000000 RSI: ffffc9000403f9e0 RDI: 0000200000000060
+RBP: ffffc9000403fa90 R08: ffffc9000403fa17 R09: 1ffff92000807f42
+R10: dffffc0000000000 R11: fffff52000807f43 R12: 0000200000000098
+R13: 00007ffffffff000 R14: ffffc9000403f9e0 R15: 0000200000000060
+   copy_to_user include/linux/uaccess.h:225 [inline]
+   fiemap_fill_next_extent+0x1c0/0x390 fs/ioctl.c:145
+   ocfs2_fiemap+0x888/0xc90 fs/ocfs2/extent_map.c:806
+   ioctl_fiemap fs/ioctl.c:220 [inline]
+   do_vfs_ioctl+0x1173/0x1430 fs/ioctl.c:532
+   __do_sys_ioctl fs/ioctl.c:596 [inline]
+   __se_sys_ioctl+0x82/0x170 fs/ioctl.c:584
+   do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+   do_syscall_64+0xfa/0x3b0 arch/x86/entry/syscall_64.c:94
+   entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7f5f13850fd9
+RSP: 002b:00007ffe3b3518b8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 0000200000000000 RCX: 00007f5f13850fd9
+RDX: 0000200000000040 RSI: 00000000c020660b RDI: 0000000000000004
+RBP: 6165627472616568 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffe3b3518f0
+R13: 00007ffe3b351b18 R14: 431bde82d7b634db R15: 00007f5f1389a03b
+
+ocfs2_fiemap() takes a read lock of the ip_alloc_sem semaphore (since
+v2.6.22-527-g7307de80510a) and calls fiemap_fill_next_extent() to read the
+extent list of this running mmap executable.  The user supplied buffer to
+hold the fiemap information page faults calling ocfs2_page_mkwrite() which
+will take a write lock (since v2.6.27-38-g00dc417fa3e7) of the same
+semaphore.  This recursive semaphore will hold filesystem locks and causes
+a hang of the fileystem.
+
+The ip_alloc_sem protects the inode extent list and size.  Release the
+read semphore before calling fiemap_fill_next_extent() in ocfs2_fiemap()
+and ocfs2_fiemap_inline().  This does an unnecessary semaphore lock/unlock
+on the last extent but simplifies the error path.
+
+Link: https://lkml.kernel.org/r/61d1a62b-2631-4f12-81e2-cd689914360b@oracle.com
+Fixes: 00dc417fa3e7 ("ocfs2: fiemap support")
+Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com>
+Reported-by: syzbot+541dcc6ee768f77103e7@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=541dcc6ee768f77103e7
+Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/extent_map.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/fs/ocfs2/extent_map.c
++++ b/fs/ocfs2/extent_map.c
+@@ -706,6 +706,8 @@ out:
+  * it not only handles the fiemap for inlined files, but also deals
+  * with the fast symlink, cause they have no difference for extent
+  * mapping per se.
++ *
++ * Must be called with ip_alloc_sem semaphore held.
+  */
+ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
+                              struct fiemap_extent_info *fieinfo,
+@@ -717,6 +719,7 @@ static int ocfs2_fiemap_inline(struct in
+       u64 phys;
+       u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
+       struct ocfs2_inode_info *oi = OCFS2_I(inode);
++      lockdep_assert_held_read(&oi->ip_alloc_sem);
+       di = (struct ocfs2_dinode *)di_bh->b_data;
+       if (ocfs2_inode_is_fast_symlink(inode))
+@@ -732,8 +735,11 @@ static int ocfs2_fiemap_inline(struct in
+                       phys += offsetof(struct ocfs2_dinode,
+                                        id2.i_data.id_data);
++              /* Release the ip_alloc_sem to prevent deadlock on page fault */
++              up_read(&OCFS2_I(inode)->ip_alloc_sem);
+               ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
+                                             flags);
++              down_read(&OCFS2_I(inode)->ip_alloc_sem);
+               if (ret < 0)
+                       return ret;
+       }
+@@ -802,9 +808,11 @@ int ocfs2_fiemap(struct inode *inode, st
+               len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
+               phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
+               virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
+-
++              /* Release the ip_alloc_sem to prevent deadlock on page fault */
++              up_read(&OCFS2_I(inode)->ip_alloc_sem);
+               ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
+                                             len_bytes, fe_flags);
++              down_read(&OCFS2_I(inode)->ip_alloc_sem);
+               if (ret)
+                       break;
diff --git a/queue-6.16/pm-em-add-function-for-registering-a-pd-without-capacity-update.patch b/queue-6.16/pm-em-add-function-for-registering-a-pd-without-capacity-update.patch
new file mode 100644 (file)
index 0000000..66573e8
--- /dev/null
@@ -0,0 +1,132 @@
+From e0423541477dfb684fbc6e6b5386054bc650f264 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Fri, 5 Sep 2025 15:44:45 +0200
+Subject: PM: EM: Add function for registering a PD without capacity update
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit e0423541477dfb684fbc6e6b5386054bc650f264 upstream.
+
+The intel_pstate driver manages CPU capacity changes itself and it does
+not need an update of the capacity of all CPUs in the system to be
+carried out after registering a PD.
+
+Moreover, in some configurations (for instance, an SMT-capable
+hybrid x86 system booted with nosmt in the kernel command line) the
+em_check_capacity_update() call at the end of em_dev_register_perf_domain()
+always fails and reschedules itself to run once again in 1 s, so
+effectively it runs in vain every 1 s forever.
+
+To address this, introduce a new variant of em_dev_register_perf_domain(),
+called em_dev_register_pd_no_update(), that does not invoke
+em_check_capacity_update(), and make intel_pstate use it instead of the
+original.
+
+Fixes: 7b010f9b9061 ("cpufreq: intel_pstate: EAS support for hybrid platforms")
+Closes: https://lore.kernel.org/linux-pm/40212796-734c-4140-8a85-854f72b8144d@panix.com/
+Reported-by: Kenneth R. Crudup <kenny@panix.com>
+Tested-by: Kenneth R. Crudup <kenny@panix.com>
+Cc: 6.16+ <stable@vger.kernel.org> # 6.16+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/intel_pstate.c |  4 ++--
+ include/linux/energy_model.h   | 10 ++++++++++
+ kernel/power/energy_model.c    | 29 +++++++++++++++++++++++++----
+ 3 files changed, 37 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index f366d35c5840..0d5d283a5429 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1034,8 +1034,8 @@ static bool hybrid_register_perf_domain(unsigned int cpu)
+       if (!cpu_dev)
+               return false;
+-      if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
+-                                      cpumask_of(cpu), false))
++      if (em_dev_register_pd_no_update(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
++                                       cpumask_of(cpu), false))
+               return false;
+       cpudata->pd_registered = true;
+diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
+index 7fa1eb3cc823..61d50571ad88 100644
+--- a/include/linux/energy_model.h
++++ b/include/linux/energy_model.h
+@@ -171,6 +171,9 @@ int em_dev_update_perf_domain(struct device *dev,
+ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+                               const struct em_data_callback *cb,
+                               const cpumask_t *cpus, bool microwatts);
++int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
++                               const struct em_data_callback *cb,
++                               const cpumask_t *cpus, bool microwatts);
+ void em_dev_unregister_perf_domain(struct device *dev);
+ struct em_perf_table *em_table_alloc(struct em_perf_domain *pd);
+ void em_table_free(struct em_perf_table *table);
+@@ -350,6 +353,13 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ {
+       return -EINVAL;
+ }
++static inline
++int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
++                               const struct em_data_callback *cb,
++                               const cpumask_t *cpus, bool microwatts)
++{
++      return -EINVAL;
++}
+ static inline void em_dev_unregister_perf_domain(struct device *dev)
+ {
+ }
+diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
+index ea7995a25780..8df55397414a 100644
+--- a/kernel/power/energy_model.c
++++ b/kernel/power/energy_model.c
+@@ -552,6 +552,30 @@ EXPORT_SYMBOL_GPL(em_cpu_get);
+ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+                               const struct em_data_callback *cb,
+                               const cpumask_t *cpus, bool microwatts)
++{
++      int ret = em_dev_register_pd_no_update(dev, nr_states, cb, cpus, microwatts);
++
++      if (_is_cpu_device(dev))
++              em_check_capacity_update();
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(em_dev_register_perf_domain);
++
++/**
++ * em_dev_register_pd_no_update() - Register a perf domain for a device
++ * @dev : Device to register the PD for
++ * @nr_states : Number of performance states in the new PD
++ * @cb : Callback functions for populating the energy model
++ * @cpus : CPUs to include in the new PD (mandatory if @dev is a CPU device)
++ * @microwatts : Whether or not the power values in the EM will be in uW
++ *
++ * Like em_dev_register_perf_domain(), but does not trigger a CPU capacity
++ * update after registering the PD, even if @dev is a CPU device.
++ */
++int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
++                               const struct em_data_callback *cb,
++                               const cpumask_t *cpus, bool microwatts)
+ {
+       struct em_perf_table *em_table;
+       unsigned long cap, prev_cap = 0;
+@@ -636,12 +660,9 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ unlock:
+       mutex_unlock(&em_pd_mutex);
+-      if (_is_cpu_device(dev))
+-              em_check_capacity_update();
+-
+       return ret;
+ }
+-EXPORT_SYMBOL_GPL(em_dev_register_perf_domain);
++EXPORT_SYMBOL_GPL(em_dev_register_pd_no_update);
+ /**
+  * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device
+-- 
+2.51.0
+
diff --git a/queue-6.16/pm-hibernate-restrict-gfp-mask-in-hibernation_snapshot.patch b/queue-6.16/pm-hibernate-restrict-gfp-mask-in-hibernation_snapshot.patch
new file mode 100644 (file)
index 0000000..775c981
--- /dev/null
@@ -0,0 +1,49 @@
+From 449c9c02537a146ac97ef962327a221e21c9cab3 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Wed, 10 Sep 2025 11:41:59 +0200
+Subject: PM: hibernate: Restrict GFP mask in hibernation_snapshot()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 449c9c02537a146ac97ef962327a221e21c9cab3 upstream.
+
+Commit 12ffc3b1513e ("PM: Restrict swap use to later in the suspend
+sequence") incorrectly removed a pm_restrict_gfp_mask() call from
+hibernation_snapshot(), so memory allocations involving swap are not
+prevented from being carried out in this code path any more which may
+lead to serious breakage.
+
+The symptoms of such breakage have become visible after adding a
+shrink_shmem_memory() call to hibernation_snapshot() in commit
+2640e819474f ("PM: hibernate: shrink shmem pages after dev_pm_ops.prepare()")
+which caused this problem to be much more likely to manifest itself.
+
+However, since commit 2640e819474f was initially present in the DRM
+tree that did not include commit 12ffc3b1513e, the symptoms of this
+issue were not visible until merge commit 260f6f4fda93 ("Merge tag
+'drm-next-2025-07-30' of https://gitlab.freedesktop.org/drm/kernel")
+that exposed it through an entirely reasonable merge conflict
+resolution.
+
+Fixes: 12ffc3b1513e ("PM: Restrict swap use to later in the suspend sequence")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220555
+Reported-by: Todd Brandt <todd.e.brandt@linux.intel.com>
+Tested-by: Todd Brandt <todd.e.brandt@linux.intel.com>
+Cc: 6.16+ <stable@vger.kernel.org> # 6.16+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/power/hibernate.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -423,6 +423,7 @@ int hibernation_snapshot(int platform_mo
+       }
+       console_suspend_all();
++      pm_restrict_gfp_mask();
+       error = dpm_suspend(PMSG_FREEZE);
diff --git a/queue-6.16/revert-sunrpc-don-t-allow-waiting-for-exiting-tasks.patch b/queue-6.16/revert-sunrpc-don-t-allow-waiting-for-exiting-tasks.patch
new file mode 100644 (file)
index 0000000..4467387
--- /dev/null
@@ -0,0 +1,35 @@
+From 199cd9e8d14bc14bdbd1fa3031ce26dac9781507 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Wed, 3 Sep 2025 09:49:33 -0400
+Subject: Revert "SUNRPC: Don't allow waiting for exiting tasks"
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 199cd9e8d14bc14bdbd1fa3031ce26dac9781507 upstream.
+
+This reverts commit 14e41b16e8cb677bb440dca2edba8b041646c742.
+
+This patch breaks the LTP acct02 test, so let's revert and look for a
+better solution.
+
+Reported-by: Mark Brown <broonie@kernel.org>
+Reported-by: Harshvardhan Jha <harshvardhan.j.jha@oracle.com>
+Link: https://lore.kernel.org/linux-nfs/7d4d57b0-39a3-49f1-8ada-60364743e3b4@sirena.org.uk/
+Cc: stable@vger.kernel.org # 6.15.x
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sunrpc/sched.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -276,8 +276,6 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue
+ static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
+ {
+-      if (unlikely(current->flags & PF_EXITING))
+-              return -EINTR;
+       schedule();
+       if (signal_pending_state(mode, current))
+               return -ERESTARTSYS;
diff --git a/queue-6.16/s390-kexec-initialize-kexec_buf-struct.patch b/queue-6.16/s390-kexec-initialize-kexec_buf-struct.patch
new file mode 100644 (file)
index 0000000..9ef20e1
--- /dev/null
@@ -0,0 +1,110 @@
+From e67f0bd05519012eaabaae68618ffc4ed30ab680 Mon Sep 17 00:00:00 2001
+From: Breno Leitao <leitao@debian.org>
+Date: Wed, 27 Aug 2025 03:42:23 -0700
+Subject: s390: kexec: initialize kexec_buf struct
+
+From: Breno Leitao <leitao@debian.org>
+
+commit e67f0bd05519012eaabaae68618ffc4ed30ab680 upstream.
+
+The kexec_buf structure was previously declared without initialization.
+commit bf454ec31add ("kexec_file: allow to place kexec_buf randomly")
+added a field that is always read but not consistently populated by all
+architectures. This un-initialized field will contain garbage.
+
+This is also triggering a UBSAN warning when the uninitialized data was
+accessed:
+
+       ------------[ cut here ]------------
+       UBSAN: invalid-load in ./include/linux/kexec.h:210:10
+       load of value 252 is not a valid value for type '_Bool'
+
+Zero-initializing kexec_buf at declaration ensures all fields are
+cleanly set, preventing future instances of uninitialized memory being
+used.
+
+Link: https://lkml.kernel.org/r/20250827-kbuf_all-v1-3-1df9882bb01a@debian.org
+Fixes: bf454ec31add ("kexec_file: allow to place kexec_buf randomly")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Cc: Albert Ou <aou@eecs.berkeley.edu>
+Cc: Alexander Gordeev <agordeev@linux.ibm.com>
+Cc: Alexandre Ghiti <alex@ghiti.fr>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
+Cc: Coiby Xu <coxu@redhat.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Palmer Dabbelt <palmer@dabbelt.com>
+Cc: Paul Walmsley <paul.walmsley@sifive.com>
+Cc: Sven Schnelle <svens@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kernel/kexec_elf.c          | 2 +-
+ arch/s390/kernel/kexec_image.c        | 2 +-
+ arch/s390/kernel/machine_kexec_file.c | 6 +++---
+ 3 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
+index 4d364de43799..143e34a4eca5 100644
+--- a/arch/s390/kernel/kexec_elf.c
++++ b/arch/s390/kernel/kexec_elf.c
+@@ -16,7 +16,7 @@
+ static int kexec_file_add_kernel_elf(struct kimage *image,
+                                    struct s390_load_data *data)
+ {
+-      struct kexec_buf buf;
++      struct kexec_buf buf = {};
+       const Elf_Ehdr *ehdr;
+       const Elf_Phdr *phdr;
+       Elf_Addr entry;
+diff --git a/arch/s390/kernel/kexec_image.c b/arch/s390/kernel/kexec_image.c
+index a32ce8bea745..9a439175723c 100644
+--- a/arch/s390/kernel/kexec_image.c
++++ b/arch/s390/kernel/kexec_image.c
+@@ -16,7 +16,7 @@
+ static int kexec_file_add_kernel_image(struct kimage *image,
+                                      struct s390_load_data *data)
+ {
+-      struct kexec_buf buf;
++      struct kexec_buf buf = {};
+       buf.image = image;
+diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
+index c2bac14dd668..a36d7311c668 100644
+--- a/arch/s390/kernel/machine_kexec_file.c
++++ b/arch/s390/kernel/machine_kexec_file.c
+@@ -129,7 +129,7 @@ static int kexec_file_update_purgatory(struct kimage *image,
+ static int kexec_file_add_purgatory(struct kimage *image,
+                                   struct s390_load_data *data)
+ {
+-      struct kexec_buf buf;
++      struct kexec_buf buf = {};
+       int ret;
+       buf.image = image;
+@@ -152,7 +152,7 @@ static int kexec_file_add_purgatory(struct kimage *image,
+ static int kexec_file_add_initrd(struct kimage *image,
+                                struct s390_load_data *data)
+ {
+-      struct kexec_buf buf;
++      struct kexec_buf buf = {};
+       int ret;
+       buf.image = image;
+@@ -184,7 +184,7 @@ static int kexec_file_add_ipl_report(struct kimage *image,
+ {
+       __u32 *lc_ipl_parmblock_ptr;
+       unsigned int len, ncerts;
+-      struct kexec_buf buf;
++      struct kexec_buf buf = {};
+       unsigned long addr;
+       void *ptr, *end;
+       int ret;
+-- 
+2.51.0
+
index ef6960ab6add771cca8f86686136b578cb3e9890..5eb1edf09debd52ee6815edfa1b4c985ea6ba7d4 100644 (file)
@@ -45,3 +45,52 @@ bpf-allow-fall-back-to-interpreter-for-programs-with.patch
 bpf-tell-memcg-to-use-allow_spinning-false-path-in-b.patch
 tcp_bpf-call-sk_msg_free-when-tcp_bpf_send_verdict-f.patch
 proc-fix-type-confusion-in-pde_set_flags.patch
+edac-altera-delete-an-inappropriate-dma_free_coherent-call.patch
+i2c-rtl9300-fix-channel-number-bound-check.patch
+revert-sunrpc-don-t-allow-waiting-for-exiting-tasks.patch
+compiler-clang.h-define-__sanitize_-__-macros-only-when-undefined.patch
+arm64-kexec-initialize-kexec_buf-struct-in-load_other_segments.patch
+mptcp-sockopt-make-sync_socket_options-propagate-sock_keepopen.patch
+doc-mptcp-net.mptcp.pm_type-is-deprecated.patch
+netlink-specs-mptcp-fix-if-idx-attribute-type.patch
+ocfs2-fix-recursive-semaphore-deadlock-in-fiemap-call.patch
+btrfs-fix-squota-compressed-stats-leak.patch
+btrfs-fix-subvolume-deletion-lockup-caused-by-inodes-xarray-race.patch
+i2c-i801-hide-intel-birch-stream-soc-tco-wdt.patch
+i2c-rtl9300-ensure-data-length-is-within-supported-range.patch
+i2c-rtl9300-remove-broken-smbus-quick-operation-support.patch
+net-libwx-fix-to-enable-rss.patch
+net-usb-asix-ax88772-drop-phylink-use-in-pm-to-avoid-mdio-runtime-pm-wakeups.patch
+pm-em-add-function-for-registering-a-pd-without-capacity-update.patch
+pm-hibernate-restrict-gfp-mask-in-hibernation_snapshot.patch
+wifi-iwlwifi-fix-130-1030-configs.patch
+s390-kexec-initialize-kexec_buf-struct.patch
+smb-client-fix-compound-alignment-with-encryption.patch
+smb-client-fix-data-loss-due-to-broken-rename-2.patch
+mtd-nand-raw-atmel-respect-tar-tclr-in-read-setup-timing.patch
+mtd-rawnand-stm32_fmc2-avoid-overlapping-mappings-on-ecc-buffer.patch
+mtd-rawnand-stm32_fmc2-fix-ecc-overwrite.patch
+fuse-do-not-allow-mapping-a-non-regular-backing-file.patch
+fuse-check-if-copy_file_range-returns-larger-than-requested-size.patch
+fuse-prevent-overflow-in-copy_file_range-return-value.patch
+mm-hugetlb-add-missing-hugetlb_lock-in-__unmap_hugepage_range.patch
+mm-khugepaged-fix-the-address-passed-to-notifier-on-testing-young.patch
+mm-vmalloc-mm-kasan-respect-gfp-mask-in-kasan_populate_vmalloc.patch
+mm-memory-failure-fix-vm_bug_on_page-pagepoisoned-page-when-unpoison-memory.patch
+mm-memory-failure-fix-redundant-updates-for-already-poisoned-pages.patch
+mm-damon-core-set-quota-charged_from-to-jiffies-at-first-charge-window.patch
+mm-damon-lru_sort-avoid-divide-by-zero-in-damon_lru_sort_apply_parameters.patch
+drm-mediatek-fix-potential-of-node-use-after-free.patch
+drm-i915-power-fix-size-for-for_each_set_bit-in-abox-iteration.patch
+drm-xe-attempt-to-bring-bos-back-to-vram-after-eviction.patch
+drm-xe-allow-the-pm-notifier-to-continue-on-failure.patch
+drm-xe-block-exec-and-rebind-worker-while-evicting-for-suspend-hibernate.patch
+drm-amdgpu-fix-a-memory-leak-in-fence-cleanup-when-unloading.patch
+drm-amdgpu-vcn-allow-limiting-ctx-to-instance-0-for-av1-at-any-time.patch
+drm-amdgpu-vcn4-fix-ib-parsing-with-multiple-engine-info-packages.patch
+drm-amd-display-correct-sequences-and-delays-for-dcn35-pg-rcg.patch
+drm-amd-display-remove-oem-i2c-adapter-on-finish.patch
+drm-edid-define-the-quirks-in-an-enum-list.patch
+drm-edid-add-support-for-quirks-visible-to-drm-core-and-drivers.patch
+drm-dp-add-an-edid-quirk-for-the-dpcd-register-access-probe.patch
+drm-amd-display-disable-dpcd-probe-quirk.patch
diff --git a/queue-6.16/smb-client-fix-compound-alignment-with-encryption.patch b/queue-6.16/smb-client-fix-compound-alignment-with-encryption.patch
new file mode 100644 (file)
index 0000000..8975e19
--- /dev/null
@@ -0,0 +1,66 @@
+From 90f7c100d2dd99d5cd5be950d553edd2647e6cc8 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.org>
+Date: Sat, 6 Sep 2025 21:19:29 -0300
+Subject: smb: client: fix compound alignment with encryption
+
+From: Paulo Alcantara <pc@manguebit.org>
+
+commit 90f7c100d2dd99d5cd5be950d553edd2647e6cc8 upstream.
+
+The encryption layer can't handle the padding iovs, so flatten the
+compound request into a single buffer with required padding to prevent
+the server from dropping the connection when finding unaligned
+compound requests.
+
+Fixes: bc925c1216f0 ("smb: client: improve compound padding in encryption")
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+Reviewed-by: David Howells <dhowells@redhat.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/smb2ops.c |   28 +++++++++++++++++++++++++---
+ 1 file changed, 25 insertions(+), 3 deletions(-)
+
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -2640,13 +2640,35 @@ smb2_set_next_command(struct cifs_tcon *
+       }
+       /* SMB headers in a compound are 8 byte aligned. */
+-      if (!IS_ALIGNED(len, 8)) {
+-              num_padding = 8 - (len & 7);
++      if (IS_ALIGNED(len, 8))
++              goto out;
++
++      num_padding = 8 - (len & 7);
++      if (smb3_encryption_required(tcon)) {
++              int i;
++
++              /*
++               * Flatten request into a single buffer with required padding as
++               * the encryption layer can't handle the padding iovs.
++               */
++              for (i = 1; i < rqst->rq_nvec; i++) {
++                      memcpy(rqst->rq_iov[0].iov_base +
++                             rqst->rq_iov[0].iov_len,
++                             rqst->rq_iov[i].iov_base,
++                             rqst->rq_iov[i].iov_len);
++                      rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
++              }
++              memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
++                     0, num_padding);
++              rqst->rq_iov[0].iov_len += num_padding;
++              rqst->rq_nvec = 1;
++      } else {
+               rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
+               rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
+               rqst->rq_nvec++;
+-              len += num_padding;
+       }
++      len += num_padding;
++out:
+       shdr->NextCommand = cpu_to_le32(len);
+ }
diff --git a/queue-6.16/smb-client-fix-data-loss-due-to-broken-rename-2.patch b/queue-6.16/smb-client-fix-data-loss-due-to-broken-rename-2.patch
new file mode 100644 (file)
index 0000000..3d25440
--- /dev/null
@@ -0,0 +1,658 @@
+From c5ea3065586d790ea5193a679b85585173d59866 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.org>
+Date: Sun, 7 Sep 2025 21:24:06 -0300
+Subject: smb: client: fix data loss due to broken rename(2)
+
+From: Paulo Alcantara <pc@manguebit.org>
+
+commit c5ea3065586d790ea5193a679b85585173d59866 upstream.
+
+Rename of open files in SMB2+ has been broken for a very long time,
+resulting in data loss as the CIFS client would fail the rename(2)
+call with -ENOENT and then removing the target file.
+
+Fix this by implementing ->rename_pending_delete() for SMB2+, which
+will rename busy files to random filenames (e.g. silly rename) during
+unlink(2) or rename(2), and then marking them to delete-on-close.
+
+Besides, introduce a FIND_WR_NO_PENDING_DELETE flag to prevent open(2)
+from reusing open handles that had been marked as delete pending.
+Handle it in cifs_get_readable_path() as well.
+
+Reported-by: Jean-Baptiste Denis <jbdenis@pasteur.fr>
+Closes: https://marc.info/?i=16aeb380-30d4-4551-9134-4e7d1dc833c0@pasteur.fr
+Reviewed-by: David Howells <dhowells@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+Cc: Frank Sorenson <sorenson@redhat.com>
+Cc: Olga Kornievskaia <okorniev@redhat.com>
+Cc: Benjamin Coddington <bcodding@redhat.com>
+Cc: Scott Mayhew <smayhew@redhat.com>
+Cc: linux-cifs@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifsglob.h  |   13 ++
+ fs/smb/client/file.c      |   18 +++-
+ fs/smb/client/inode.c     |   86 +++++++++++++++----
+ fs/smb/client/smb2glob.h  |    3 
+ fs/smb/client/smb2inode.c |  204 ++++++++++++++++++++++++++++++++++++----------
+ fs/smb/client/smb2ops.c   |    4 
+ fs/smb/client/smb2proto.h |    3 
+ fs/smb/client/trace.h     |    9 --
+ 8 files changed, 268 insertions(+), 72 deletions(-)
+
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -87,7 +87,7 @@
+ #define SMB_INTERFACE_POLL_INTERVAL   600
+ /* maximum number of PDUs in one compound */
+-#define MAX_COMPOUND 7
++#define MAX_COMPOUND 10
+ /*
+  * Default number of credits to keep available for SMB3.
+@@ -1877,9 +1877,12 @@ static inline bool is_replayable_error(i
+ /* cifs_get_writable_file() flags */
+-#define FIND_WR_ANY         0
+-#define FIND_WR_FSUID_ONLY  1
+-#define FIND_WR_WITH_DELETE 2
++enum cifs_writable_file_flags {
++      FIND_WR_ANY                     = 0U,
++      FIND_WR_FSUID_ONLY              = (1U << 0),
++      FIND_WR_WITH_DELETE             = (1U << 1),
++      FIND_WR_NO_PENDING_DELETE       = (1U << 2),
++};
+ #define   MID_FREE 0
+ #define   MID_REQUEST_ALLOCATED 1
+@@ -2339,6 +2342,8 @@ struct smb2_compound_vars {
+       struct kvec qi_iov;
+       struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
+       struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
++      struct kvec unlink_iov[SMB2_SET_INFO_IOV_SIZE];
++      struct kvec rename_iov[SMB2_SET_INFO_IOV_SIZE];
+       struct kvec close_iov;
+       struct smb2_file_rename_info_hdr rename_info;
+       struct smb2_file_link_info_hdr link_info;
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -998,7 +998,10 @@ int cifs_open(struct inode *inode, struc
+       /* Get the cached handle as SMB2 close is deferred */
+       if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
+-              rc = cifs_get_writable_path(tcon, full_path, FIND_WR_FSUID_ONLY, &cfile);
++              rc = cifs_get_writable_path(tcon, full_path,
++                                          FIND_WR_FSUID_ONLY |
++                                          FIND_WR_NO_PENDING_DELETE,
++                                          &cfile);
+       } else {
+               rc = cifs_get_readable_path(tcon, full_path, &cfile);
+       }
+@@ -2530,6 +2533,9 @@ refind_writable:
+                       continue;
+               if (with_delete && !(open_file->fid.access & DELETE))
+                       continue;
++              if ((flags & FIND_WR_NO_PENDING_DELETE) &&
++                  open_file->status_file_deleted)
++                      continue;
+               if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+                       if (!open_file->invalidHandle) {
+                               /* found a good writable file */
+@@ -2647,6 +2653,16 @@ cifs_get_readable_path(struct cifs_tcon
+               spin_unlock(&tcon->open_file_lock);
+               free_dentry_path(page);
+               *ret_file = find_readable_file(cinode, 0);
++              if (*ret_file) {
++                      spin_lock(&cinode->open_file_lock);
++                      if ((*ret_file)->status_file_deleted) {
++                              spin_unlock(&cinode->open_file_lock);
++                              cifsFileInfo_put(*ret_file);
++                              *ret_file = NULL;
++                      } else {
++                              spin_unlock(&cinode->open_file_lock);
++                      }
++              }
+               return *ret_file ? 0 : -ENOENT;
+       }
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -1931,7 +1931,7 @@ cifs_drop_nlink(struct inode *inode)
+  * but will return the EACCES to the caller. Note that the VFS does not call
+  * unlink on negative dentries currently.
+  */
+-int cifs_unlink(struct inode *dir, struct dentry *dentry)
++static int __cifs_unlink(struct inode *dir, struct dentry *dentry, bool sillyrename)
+ {
+       int rc = 0;
+       unsigned int xid;
+@@ -2003,7 +2003,11 @@ retry_std_delete:
+               goto psx_del_no_retry;
+       }
+-      rc = server->ops->unlink(xid, tcon, full_path, cifs_sb, dentry);
++      if (sillyrename || (server->vals->protocol_id > SMB10_PROT_ID &&
++                          d_is_positive(dentry) && d_count(dentry) > 2))
++              rc = -EBUSY;
++      else
++              rc = server->ops->unlink(xid, tcon, full_path, cifs_sb, dentry);
+ psx_del_no_retry:
+       if (!rc) {
+@@ -2071,6 +2075,11 @@ unlink_out:
+       return rc;
+ }
++int cifs_unlink(struct inode *dir, struct dentry *dentry)
++{
++      return __cifs_unlink(dir, dentry, false);
++}
++
+ static int
+ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
+                const char *full_path, struct cifs_sb_info *cifs_sb,
+@@ -2358,14 +2367,16 @@ int cifs_rmdir(struct inode *inode, stru
+       rc = server->ops->rmdir(xid, tcon, full_path, cifs_sb);
+       cifs_put_tlink(tlink);
++      cifsInode = CIFS_I(d_inode(direntry));
++
+       if (!rc) {
++              set_bit(CIFS_INO_DELETE_PENDING, &cifsInode->flags);
+               spin_lock(&d_inode(direntry)->i_lock);
+               i_size_write(d_inode(direntry), 0);
+               clear_nlink(d_inode(direntry));
+               spin_unlock(&d_inode(direntry)->i_lock);
+       }
+-      cifsInode = CIFS_I(d_inode(direntry));
+       /* force revalidate to go get info when needed */
+       cifsInode->time = 0;
+@@ -2458,8 +2469,11 @@ cifs_do_rename(const unsigned int xid, s
+       }
+ #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+ do_rename_exit:
+-      if (rc == 0)
++      if (rc == 0) {
+               d_move(from_dentry, to_dentry);
++              /* Force a new lookup */
++              d_drop(from_dentry);
++      }
+       cifs_put_tlink(tlink);
+       return rc;
+ }
+@@ -2470,6 +2484,7 @@ cifs_rename2(struct mnt_idmap *idmap, st
+            struct dentry *target_dentry, unsigned int flags)
+ {
+       const char *from_name, *to_name;
++      struct TCP_Server_Info *server;
+       void *page1, *page2;
+       struct cifs_sb_info *cifs_sb;
+       struct tcon_link *tlink;
+@@ -2505,6 +2520,7 @@ cifs_rename2(struct mnt_idmap *idmap, st
+       if (IS_ERR(tlink))
+               return PTR_ERR(tlink);
+       tcon = tlink_tcon(tlink);
++      server = tcon->ses->server;
+       page1 = alloc_dentry_path();
+       page2 = alloc_dentry_path();
+@@ -2591,19 +2607,53 @@ cifs_rename2(struct mnt_idmap *idmap, st
+ unlink_target:
+ #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-      /* Try unlinking the target dentry if it's not negative */
+-      if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
+-              if (d_is_dir(target_dentry))
+-                      tmprc = cifs_rmdir(target_dir, target_dentry);
+-              else
+-                      tmprc = cifs_unlink(target_dir, target_dentry);
+-              if (tmprc)
+-                      goto cifs_rename_exit;
+-              rc = cifs_do_rename(xid, source_dentry, from_name,
+-                                  target_dentry, to_name);
+-              if (!rc)
+-                      rehash = false;
++      if (d_really_is_positive(target_dentry)) {
++              if (!rc) {
++                      struct inode *inode = d_inode(target_dentry);
++                      /*
++                       * Samba and ksmbd servers allow renaming a target
++                       * directory that is open, so make sure to update
++                       * ->i_nlink and then mark it as delete pending.
++                       */
++                      if (S_ISDIR(inode->i_mode)) {
++                              drop_cached_dir_by_name(xid, tcon, to_name, cifs_sb);
++                              spin_lock(&inode->i_lock);
++                              i_size_write(inode, 0);
++                              clear_nlink(inode);
++                              spin_unlock(&inode->i_lock);
++                              set_bit(CIFS_INO_DELETE_PENDING, &CIFS_I(inode)->flags);
++                              CIFS_I(inode)->time = 0; /* force reval */
++                              inode_set_ctime_current(inode);
++                              inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
++                      }
++              } else if (rc == -EACCES || rc == -EEXIST) {
++                      /*
++                       * Rename failed, possibly due to a busy target.
++                       * Retry it by unliking the target first.
++                       */
++                      if (d_is_dir(target_dentry)) {
++                              tmprc = cifs_rmdir(target_dir, target_dentry);
++                      } else {
++                              tmprc = __cifs_unlink(target_dir, target_dentry,
++                                                    server->vals->protocol_id > SMB10_PROT_ID);
++                      }
++                      if (tmprc) {
++                              /*
++                               * Some servers will return STATUS_ACCESS_DENIED
++                               * or STATUS_DIRECTORY_NOT_EMPTY when failing to
++                               * rename a non-empty directory.  Make sure to
++                               * propagate the appropriate error back to
++                               * userspace.
++                               */
++                              if (tmprc == -EEXIST || tmprc == -ENOTEMPTY)
++                                      rc = tmprc;
++                              goto cifs_rename_exit;
++                      }
++                      rc = cifs_do_rename(xid, source_dentry, from_name,
++                                          target_dentry, to_name);
++                      if (!rc)
++                              rehash = false;
++              }
+       }
+       /* force revalidate to go get info when needed */
+@@ -2629,6 +2679,8 @@ cifs_dentry_needs_reval(struct dentry *d
+       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+       struct cached_fid *cfid = NULL;
++      if (test_bit(CIFS_INO_DELETE_PENDING, &cifs_i->flags))
++              return false;
+       if (cifs_i->time == 0)
+               return true;
+--- a/fs/smb/client/smb2glob.h
++++ b/fs/smb/client/smb2glob.h
+@@ -30,10 +30,9 @@ enum smb2_compound_ops {
+       SMB2_OP_QUERY_DIR,
+       SMB2_OP_MKDIR,
+       SMB2_OP_RENAME,
+-      SMB2_OP_DELETE,
+       SMB2_OP_HARDLINK,
+       SMB2_OP_SET_EOF,
+-      SMB2_OP_RMDIR,
++      SMB2_OP_UNLINK,
+       SMB2_OP_POSIX_QUERY_INFO,
+       SMB2_OP_SET_REPARSE,
+       SMB2_OP_GET_REPARSE,
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -346,9 +346,6 @@ replay_again:
+                       trace_smb3_posix_query_info_compound_enter(xid, tcon->tid,
+                                                                  ses->Suid, full_path);
+                       break;
+-              case SMB2_OP_DELETE:
+-                      trace_smb3_delete_enter(xid, tcon->tid, ses->Suid, full_path);
+-                      break;
+               case SMB2_OP_MKDIR:
+                       /*
+                        * Directories are created through parameters in the
+@@ -356,23 +353,40 @@ replay_again:
+                        */
+                       trace_smb3_mkdir_enter(xid, tcon->tid, ses->Suid, full_path);
+                       break;
+-              case SMB2_OP_RMDIR:
+-                      rqst[num_rqst].rq_iov = &vars->si_iov[0];
++              case SMB2_OP_UNLINK:
++                      rqst[num_rqst].rq_iov = vars->unlink_iov;
+                       rqst[num_rqst].rq_nvec = 1;
+                       size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */
+                       data[0] = &delete_pending[0];
+-                      rc = SMB2_set_info_init(tcon, server,
+-                                              &rqst[num_rqst], COMPOUND_FID,
+-                                              COMPOUND_FID, current->tgid,
+-                                              FILE_DISPOSITION_INFORMATION,
+-                                              SMB2_O_INFO_FILE, 0, data, size);
+-                      if (rc)
++                      if (cfile) {
++                              rc = SMB2_set_info_init(tcon, server,
++                                                      &rqst[num_rqst],
++                                                      cfile->fid.persistent_fid,
++                                                      cfile->fid.volatile_fid,
++                                                      current->tgid,
++                                                      FILE_DISPOSITION_INFORMATION,
++                                                      SMB2_O_INFO_FILE, 0,
++                                                      data, size);
++                      } else {
++                              rc = SMB2_set_info_init(tcon, server,
++                                                      &rqst[num_rqst],
++                                                      COMPOUND_FID,
++                                                      COMPOUND_FID,
++                                                      current->tgid,
++                                                      FILE_DISPOSITION_INFORMATION,
++                                                      SMB2_O_INFO_FILE, 0,
++                                                      data, size);
++                      }
++                      if (!rc && (!cfile || num_rqst > 1)) {
++                              smb2_set_next_command(tcon, &rqst[num_rqst]);
++                              smb2_set_related(&rqst[num_rqst]);
++                      } else if (rc) {
+                               goto finished;
+-                      smb2_set_next_command(tcon, &rqst[num_rqst]);
+-                      smb2_set_related(&rqst[num_rqst++]);
+-                      trace_smb3_rmdir_enter(xid, tcon->tid, ses->Suid, full_path);
++                      }
++                      num_rqst++;
++                      trace_smb3_unlink_enter(xid, tcon->tid, ses->Suid, full_path);
+                       break;
+               case SMB2_OP_SET_EOF:
+                       rqst[num_rqst].rq_iov = &vars->si_iov[0];
+@@ -442,7 +456,7 @@ replay_again:
+                                                          ses->Suid, full_path);
+                       break;
+               case SMB2_OP_RENAME:
+-                      rqst[num_rqst].rq_iov = &vars->si_iov[0];
++                      rqst[num_rqst].rq_iov = vars->rename_iov;
+                       rqst[num_rqst].rq_nvec = 2;
+                       len = in_iov[i].iov_len;
+@@ -732,19 +746,6 @@ finished:
+                               trace_smb3_posix_query_info_compound_done(xid, tcon->tid,
+                                                                         ses->Suid);
+                       break;
+-              case SMB2_OP_DELETE:
+-                      if (rc)
+-                              trace_smb3_delete_err(xid, tcon->tid, ses->Suid, rc);
+-                      else {
+-                              /*
+-                               * If dentry (hence, inode) is NULL, lease break is going to
+-                               * take care of degrading leases on handles for deleted files.
+-                               */
+-                              if (inode)
+-                                      cifs_mark_open_handles_for_deleted_file(inode, full_path);
+-                              trace_smb3_delete_done(xid, tcon->tid, ses->Suid);
+-                      }
+-                      break;
+               case SMB2_OP_MKDIR:
+                       if (rc)
+                               trace_smb3_mkdir_err(xid, tcon->tid, ses->Suid, rc);
+@@ -765,11 +766,11 @@ finished:
+                               trace_smb3_rename_done(xid, tcon->tid, ses->Suid);
+                       SMB2_set_info_free(&rqst[num_rqst++]);
+                       break;
+-              case SMB2_OP_RMDIR:
+-                      if (rc)
+-                              trace_smb3_rmdir_err(xid, tcon->tid, ses->Suid, rc);
++              case SMB2_OP_UNLINK:
++                      if (!rc)
++                              trace_smb3_unlink_done(xid, tcon->tid, ses->Suid);
+                       else
+-                              trace_smb3_rmdir_done(xid, tcon->tid, ses->Suid);
++                              trace_smb3_unlink_err(xid, tcon->tid, ses->Suid, rc);
+                       SMB2_set_info_free(&rqst[num_rqst++]);
+                       break;
+               case SMB2_OP_SET_EOF:
+@@ -1165,7 +1166,7 @@ smb2_rmdir(const unsigned int xid, struc
+                            FILE_OPEN, CREATE_NOT_FILE, ACL_NO_MODE);
+       return smb2_compound_op(xid, tcon, cifs_sb,
+                               name, &oparms, NULL,
+-                              &(int){SMB2_OP_RMDIR}, 1,
++                              &(int){SMB2_OP_UNLINK}, 1,
+                               NULL, NULL, NULL, NULL);
+ }
+@@ -1174,20 +1175,29 @@ smb2_unlink(const unsigned int xid, stru
+           struct cifs_sb_info *cifs_sb, struct dentry *dentry)
+ {
+       struct cifs_open_parms oparms;
++      struct inode *inode = NULL;
++      int rc;
+-      oparms = CIFS_OPARMS(cifs_sb, tcon, name,
+-                           DELETE, FILE_OPEN,
+-                           CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
+-                           ACL_NO_MODE);
+-      int rc = smb2_compound_op(xid, tcon, cifs_sb, name, &oparms,
+-                                NULL, &(int){SMB2_OP_DELETE}, 1,
+-                                NULL, NULL, NULL, dentry);
++      if (dentry)
++              inode = d_inode(dentry);
++
++      oparms = CIFS_OPARMS(cifs_sb, tcon, name, DELETE,
++                           FILE_OPEN, OPEN_REPARSE_POINT, ACL_NO_MODE);
++      rc = smb2_compound_op(xid, tcon, cifs_sb, name, &oparms,
++                            NULL, &(int){SMB2_OP_UNLINK},
++                            1, NULL, NULL, NULL, dentry);
+       if (rc == -EINVAL) {
+               cifs_dbg(FYI, "invalid lease key, resending request without lease");
+               rc = smb2_compound_op(xid, tcon, cifs_sb, name, &oparms,
+-                                    NULL, &(int){SMB2_OP_DELETE}, 1,
+-                                    NULL, NULL, NULL, NULL);
++                                    NULL, &(int){SMB2_OP_UNLINK},
++                                    1, NULL, NULL, NULL, NULL);
+       }
++      /*
++       * If dentry (hence, inode) is NULL, lease break is going to
++       * take care of degrading leases on handles for deleted files.
++       */
++      if (!rc && inode)
++              cifs_mark_open_handles_for_deleted_file(inode, name);
+       return rc;
+ }
+@@ -1441,3 +1451,113 @@ out:
+       cifs_free_open_info(&data);
+       return rc;
+ }
++
++static inline __le16 *utf16_smb2_path(struct cifs_sb_info *cifs_sb,
++                                    const char *name, size_t namelen)
++{
++      int len;
++
++      if (*name == '\\' ||
++          (cifs_sb_master_tlink(cifs_sb) &&
++           cifs_sb_master_tcon(cifs_sb)->posix_extensions && *name == '/'))
++              name++;
++      return cifs_strndup_to_utf16(name, namelen, &len,
++                                   cifs_sb->local_nls,
++                                   cifs_remap(cifs_sb));
++}
++
++int smb2_rename_pending_delete(const char *full_path,
++                             struct dentry *dentry,
++                             const unsigned int xid)
++{
++      struct cifs_sb_info *cifs_sb = CIFS_SB(d_inode(dentry)->i_sb);
++      struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry));
++      __le16 *utf16_path __free(kfree) = NULL;
++      __u32 co = file_create_options(dentry);
++      int cmds[] = {
++              SMB2_OP_SET_INFO,
++              SMB2_OP_RENAME,
++              SMB2_OP_UNLINK,
++      };
++      const int num_cmds = ARRAY_SIZE(cmds);
++      char *to_name __free(kfree) = NULL;
++      __u32 attrs = cinode->cifsAttrs;
++      struct cifs_open_parms oparms;
++      static atomic_t sillycounter;
++      struct cifsFileInfo *cfile;
++      struct tcon_link *tlink;
++      struct cifs_tcon *tcon;
++      struct kvec iov[2];
++      const char *ppath;
++      void *page;
++      size_t len;
++      int rc;
++
++      tlink = cifs_sb_tlink(cifs_sb);
++      if (IS_ERR(tlink))
++              return PTR_ERR(tlink);
++      tcon = tlink_tcon(tlink);
++
++      page = alloc_dentry_path();
++
++      ppath = build_path_from_dentry(dentry->d_parent, page);
++      if (IS_ERR(ppath)) {
++              rc = PTR_ERR(ppath);
++              goto out;
++      }
++
++      len = strlen(ppath) + strlen("/.__smb1234") + 1;
++      to_name = kmalloc(len, GFP_KERNEL);
++      if (!to_name) {
++              rc = -ENOMEM;
++              goto out;
++      }
++
++      scnprintf(to_name, len, "%s%c.__smb%04X", ppath, CIFS_DIR_SEP(cifs_sb),
++                atomic_inc_return(&sillycounter) & 0xffff);
++
++      utf16_path = utf16_smb2_path(cifs_sb, to_name, len);
++      if (!utf16_path) {
++              rc = -ENOMEM;
++              goto out;
++      }
++
++      drop_cached_dir_by_name(xid, tcon, full_path, cifs_sb);
++      oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
++                           DELETE | FILE_WRITE_ATTRIBUTES,
++                           FILE_OPEN, co, ACL_NO_MODE);
++
++      attrs &= ~ATTR_READONLY;
++      if (!attrs)
++              attrs = ATTR_NORMAL;
++      if (d_inode(dentry)->i_nlink <= 1)
++              attrs |= ATTR_HIDDEN;
++      iov[0].iov_base = &(FILE_BASIC_INFO) {
++              .Attributes = cpu_to_le32(attrs),
++      };
++      iov[0].iov_len = sizeof(FILE_BASIC_INFO);
++      iov[1].iov_base = utf16_path;
++      iov[1].iov_len = sizeof(*utf16_path) * UniStrlen((wchar_t *)utf16_path);
++
++      cifs_get_writable_path(tcon, full_path, FIND_WR_WITH_DELETE, &cfile);
++      rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, iov,
++                            cmds, num_cmds, cfile, NULL, NULL, dentry);
++      if (rc == -EINVAL) {
++              cifs_dbg(FYI, "invalid lease key, resending request without lease\n");
++              cifs_get_writable_path(tcon, full_path,
++                                     FIND_WR_WITH_DELETE, &cfile);
++              rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, iov,
++                                    cmds, num_cmds, cfile, NULL, NULL, NULL);
++      }
++      if (!rc) {
++              set_bit(CIFS_INO_DELETE_PENDING, &cinode->flags);
++      } else {
++              cifs_tcon_dbg(FYI, "%s: failed to rename '%s' to '%s': %d\n",
++                            __func__, full_path, to_name, rc);
++              rc = -EIO;
++      }
++out:
++      cifs_put_tlink(tlink);
++      free_dentry_path(page);
++      return rc;
++}
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -5399,6 +5399,7 @@ struct smb_version_operations smb20_oper
+       .llseek = smb3_llseek,
+       .is_status_io_timeout = smb2_is_status_io_timeout,
+       .is_network_name_deleted = smb2_is_network_name_deleted,
++      .rename_pending_delete = smb2_rename_pending_delete,
+ };
+ #endif /* CIFS_ALLOW_INSECURE_LEGACY */
+@@ -5504,6 +5505,7 @@ struct smb_version_operations smb21_oper
+       .llseek = smb3_llseek,
+       .is_status_io_timeout = smb2_is_status_io_timeout,
+       .is_network_name_deleted = smb2_is_network_name_deleted,
++      .rename_pending_delete = smb2_rename_pending_delete,
+ };
+ struct smb_version_operations smb30_operations = {
+@@ -5620,6 +5622,7 @@ struct smb_version_operations smb30_oper
+       .llseek = smb3_llseek,
+       .is_status_io_timeout = smb2_is_status_io_timeout,
+       .is_network_name_deleted = smb2_is_network_name_deleted,
++      .rename_pending_delete = smb2_rename_pending_delete,
+ };
+ struct smb_version_operations smb311_operations = {
+@@ -5736,6 +5739,7 @@ struct smb_version_operations smb311_ope
+       .llseek = smb3_llseek,
+       .is_status_io_timeout = smb2_is_status_io_timeout,
+       .is_network_name_deleted = smb2_is_network_name_deleted,
++      .rename_pending_delete = smb2_rename_pending_delete,
+ };
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+--- a/fs/smb/client/smb2proto.h
++++ b/fs/smb/client/smb2proto.h
+@@ -320,5 +320,8 @@ int smb2_create_reparse_symlink(const un
+ int smb2_make_nfs_node(unsigned int xid, struct inode *inode,
+                      struct dentry *dentry, struct cifs_tcon *tcon,
+                      const char *full_path, umode_t mode, dev_t dev);
++int smb2_rename_pending_delete(const char *full_path,
++                             struct dentry *dentry,
++                             const unsigned int xid);
+ #endif                        /* _SMB2PROTO_H */
+--- a/fs/smb/client/trace.h
++++ b/fs/smb/client/trace.h
+@@ -669,13 +669,12 @@ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(que
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(posix_query_info_compound_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(hardlink_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rename_enter);
+-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rmdir_enter);
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(unlink_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_eof_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_info_compound_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_reparse_compound_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(get_reparse_compound_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(query_wsl_ea_compound_enter);
+-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(delete_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mkdir_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(tdis_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mknod_enter);
+@@ -710,13 +709,12 @@ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(quer
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(posix_query_info_compound_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(hardlink_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rename_done);
+-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rmdir_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(unlink_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_eof_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_info_compound_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_reparse_compound_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(get_reparse_compound_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(query_wsl_ea_compound_done);
+-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(delete_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mkdir_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(tdis_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mknod_done);
+@@ -756,14 +754,13 @@ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(query
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(posix_query_info_compound_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(hardlink_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rename_err);
+-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rmdir_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(unlink_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_eof_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_info_compound_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_reparse_compound_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(get_reparse_compound_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(query_wsl_ea_compound_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mkdir_err);
+-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(delete_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(tdis_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mknod_err);
diff --git a/queue-6.16/wifi-iwlwifi-fix-130-1030-configs.patch b/queue-6.16/wifi-iwlwifi-fix-130-1030-configs.patch
new file mode 100644 (file)
index 0000000..43442c3
--- /dev/null
@@ -0,0 +1,73 @@
+From 2682e7a317504a9d81cbb397249d4299e84dfadd Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Tue, 9 Sep 2025 12:17:34 +0300
+Subject: wifi: iwlwifi: fix 130/1030 configs
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 2682e7a317504a9d81cbb397249d4299e84dfadd upstream.
+
+The 130/1030 devices are really derivatives of 6030,
+with some small differences not pertaining to the MAC,
+so they must use the 6030 MAC config.
+
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220472
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220517
+Fixes: 35ac275ebe0c ("wifi: iwlwifi: cfg: finish config split")
+Cc: stable@vger.kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Link: https://patch.msgid.link/20250909121728.8e4911f12528.I3aa7194012a4b584fbd5ddaa3a77e483280f1de4@changeid
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 26 +++++++++----------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index f9e2095d6490..7e56e4ff7642 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -124,13 +124,13 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
+       {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_mac_cfg)},/* low 5GHz active */
+       {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_mac_cfg)},/* high 5GHz active */
+-/* 6x30 Series */
+-      {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1000_mac_cfg)},
+-      {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1000_mac_cfg)},
+-      {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1000_mac_cfg)},
+-      {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1000_mac_cfg)},
+-      {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1000_mac_cfg)},
+-      {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1000_mac_cfg)},
++/* 1030/6x30 Series */
++      {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6030_mac_cfg)},
++      {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6030_mac_cfg)},
++      {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6030_mac_cfg)},
++      {IWL_PCI_DEVICE(0x008A, 0x5327, iwl6030_mac_cfg)},
++      {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6030_mac_cfg)},
++      {IWL_PCI_DEVICE(0x008B, 0x5317, iwl6030_mac_cfg)},
+       {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_mac_cfg)},
+       {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_mac_cfg)},
+       {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_mac_cfg)},
+@@ -181,12 +181,12 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
+       {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl1000_mac_cfg)},
+ /* 130 Series WiFi */
+-      {IWL_PCI_DEVICE(0x0896, 0x5005, iwl1000_mac_cfg)},
+-      {IWL_PCI_DEVICE(0x0896, 0x5007, iwl1000_mac_cfg)},
+-      {IWL_PCI_DEVICE(0x0897, 0x5015, iwl1000_mac_cfg)},
+-      {IWL_PCI_DEVICE(0x0897, 0x5017, iwl1000_mac_cfg)},
+-      {IWL_PCI_DEVICE(0x0896, 0x5025, iwl1000_mac_cfg)},
+-      {IWL_PCI_DEVICE(0x0896, 0x5027, iwl1000_mac_cfg)},
++      {IWL_PCI_DEVICE(0x0896, 0x5005, iwl6030_mac_cfg)},
++      {IWL_PCI_DEVICE(0x0896, 0x5007, iwl6030_mac_cfg)},
++      {IWL_PCI_DEVICE(0x0897, 0x5015, iwl6030_mac_cfg)},
++      {IWL_PCI_DEVICE(0x0897, 0x5017, iwl6030_mac_cfg)},
++      {IWL_PCI_DEVICE(0x0896, 0x5025, iwl6030_mac_cfg)},
++      {IWL_PCI_DEVICE(0x0896, 0x5027, iwl6030_mac_cfg)},
+ /* 2x00 Series */
+       {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_mac_cfg)},
+-- 
+2.51.0
+