--- /dev/null
+From a3a8a80389ec29787febb8aab05f3000e8dce106 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2026 16:57:58 +0800
+Subject: ksmbd: do not expire session on binding failure
+
+From: Hyunwoo Kim <imv4bel@gmail.com>
+
+[ Upstream commit 9bbb19d21ded7d78645506f20d8c44895e3d0fb9 ]
+
+When a multichannel session binding request fails (e.g. wrong password),
+the error path unconditionally sets sess->state = SMB2_SESSION_EXPIRED.
+However, during binding, sess points to the target session looked up via
+ksmbd_session_lookup_slowpath() -- which belongs to another connection's
+user. This allows a remote attacker to invalidate any active session by
+simply sending a binding request with a wrong password (DoS).
+
+Fix this by skipping session expiration when the failed request was
+a binding attempt, since the session does not belong to the current
+connection. The reference taken by ksmbd_session_lookup_slowpath() is
+still correctly released via ksmbd_user_session_put().
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Hyunwoo Kim <imv4bel@gmail.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/smb2pdu.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 700c8070f57a7..9fef4d88ee8ba 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1938,8 +1938,14 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
+ try_delay = true;
+
+- sess->last_active = jiffies;
+- sess->state = SMB2_SESSION_EXPIRED;
++ /*
++ * For binding requests, session belongs to another
++ * connection. Do not expire it.
++ */
++ if (!(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
++ sess->last_active = jiffies;
++ sess->state = SMB2_SESSION_EXPIRED;
++ }
+ ksmbd_user_session_put(sess);
+ work->sess = NULL;
+ if (try_delay) {
+--
+2.53.0
+
x86-cpu-amd-call-the-spectral-chicken-in-the-zen2-init-function.patch
x86-cpu-amd-rename-init_amd_zn-to-init_amd_zen_common.patch
x86-cpu-amd-add-x86_feature_zen1.patch
+ksmbd-do-not-expire-session-on-binding-failure.patch
+spi-meson-spicc-fix-double-put-in-remove-path.patch
+um-virt-pci-fix-build-failure.patch
--- /dev/null
+From c06df52dc48196ed85169d6813099bdcf9ea5b40 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2026 10:32:15 +0800
+Subject: spi: meson-spicc: Fix double-put in remove path
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 63542bb402b7013171c9f621c28b609eda4dbf1f ]
+
+meson_spicc_probe() registers the controller with
+devm_spi_register_controller(), so teardown already drops the
+controller reference via devm cleanup.
+
+Calling spi_controller_put() again in meson_spicc_remove()
+causes a double-put.
+
+Fixes: 8311ee2164c5 ("spi: meson-spicc: fix memory leak in meson_spicc_remove")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20260322-rockchip-v1-1-fac3f0c6dad8@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+[ In v5.15, commit 68bf3288c7eb ("spi: meson-spicc: switch to use modern name")
+has not been applied, so the driver still uses the legacy spicc->master field
+and spi_master_put() API. The line to remove is spi_master_put(spicc->master)
+rather than spi_controller_put(spicc->host) as in the upstream patch.
+They are functionally identical. ]
+Signed-off-by: Wenshan Lan <jetlan9@163.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-meson-spicc.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
+index 6974a1c947aad..ae818e7df7919 100644
+--- a/drivers/spi/spi-meson-spicc.c
++++ b/drivers/spi/spi-meson-spicc.c
+@@ -863,8 +863,6 @@ static int meson_spicc_remove(struct platform_device *pdev)
+ clk_disable_unprepare(spicc->core);
+ clk_disable_unprepare(spicc->pclk);
+
+- spi_master_put(spicc->master);
+-
+ return 0;
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 4b68c9b1ac1e1ede7f74a2f2ca73515bcd10a296 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2026 13:52:41 -0700
+Subject: um: virt-pci: Fix build failure
+
+From: Florian Fainelli <florian.fainelli@broadcom.com>
+
+Commit a27e95a6ff3f ("um: virt-pci: properly remove PCI device from
+bus") assumed that virtio_reset_device() is present in the 5.15.y kernel
+but it is not and so backport would now cause a build failure.
+
+Fixes: a27e95a6ff3f ("um: virt-pci: properly remove PCI device from bus")
+Signed-off-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/drivers/virt-pci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
+index d762d726b66cf..0666c9e0998d1 100644
+--- a/arch/um/drivers/virt-pci.c
++++ b/arch/um/drivers/virt-pci.c
+@@ -641,7 +641,7 @@ static void um_pci_virtio_remove(struct virtio_device *vdev)
+ }
+
+ /* Stop all virtqueues */
+- virtio_reset_device(vdev);
++ vdev->config->reset(vdev);
+ dev->cmd_vq = NULL;
+ dev->irq_vq = NULL;
+ vdev->config->del_vqs(vdev);
+--
+2.53.0
+
--- /dev/null
+From 43e8744d42eaa4c1e6e534f4890ca8be7c0adb68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2026 15:07:49 +0800
+Subject: drm/amd/display: Do not skip unrelated mode changes in DSC validation
+
+From: Yussuf Khalil <dev@pp3345.net>
+
+[ Upstream commit aed3d041ab061ec8a64f50a3edda0f4db7280025 ]
+
+Starting with commit 17ce8a6907f7 ("drm/amd/display: Add dsc pre-validation in
+atomic check"), amdgpu resets the CRTC state mode_changed flag to false when
+recomputing the DSC configuration results in no timing change for a particular
+stream.
+
+However, this is incorrect in scenarios where a change in MST/DSC configuration
+happens in the same KMS commit as another (unrelated) mode change. For example,
+the integrated panel of a laptop may be configured differently (e.g., HDR
+enabled/disabled) depending on whether external screens are attached. In this
+case, plugging in external DP-MST screens may result in the mode_changed flag
+being dropped incorrectly for the integrated panel if its DSC configuration
+did not change during precomputation in pre_validate_dsc().
+
+At this point, however, dm_update_crtc_state() has already created new streams
+for CRTCs with DSC-independent mode changes. In turn,
+amdgpu_dm_commit_streams() will never release the old stream, resulting in a
+memory leak. amdgpu_dm_atomic_commit_tail() will never acquire a reference to
+the new stream either, which manifests as a use-after-free when the stream gets
+disabled later on:
+
+BUG: KASAN: use-after-free in dc_stream_release+0x25/0x90 [amdgpu]
+Write of size 4 at addr ffff88813d836524 by task kworker/9:9/29977
+
+Workqueue: events drm_mode_rmfb_work_fn
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x6e/0xa0
+ print_address_description.constprop.0+0x88/0x320
+ ? dc_stream_release+0x25/0x90 [amdgpu]
+ print_report+0xfc/0x1ff
+ ? srso_alias_return_thunk+0x5/0xfbef5
+ ? __virt_addr_valid+0x225/0x4e0
+ ? dc_stream_release+0x25/0x90 [amdgpu]
+ kasan_report+0xe1/0x180
+ ? dc_stream_release+0x25/0x90 [amdgpu]
+ kasan_check_range+0x125/0x200
+ dc_stream_release+0x25/0x90 [amdgpu]
+ dc_state_destruct+0x14d/0x5c0 [amdgpu]
+ dc_state_release.part.0+0x4e/0x130 [amdgpu]
+ dm_atomic_destroy_state+0x3f/0x70 [amdgpu]
+ drm_atomic_state_default_clear+0x8ee/0xf30
+ ? drm_mode_object_put.part.0+0xb1/0x130
+ __drm_atomic_state_free+0x15c/0x2d0
+ atomic_remove_fb+0x67e/0x980
+
+Since there is no reliable way of figuring out whether a CRTC has unrelated
+mode changes pending at the time of DSC validation, remember the value of the
+mode_changed flag from before the point where a CRTC was marked as potentially
+affected by a change in DSC configuration. Reset the mode_changed flag to this
+earlier value instead in pre_validate_dsc().
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/5004
+Fixes: 17ce8a6907f7 ("drm/amd/display: Add dsc pre-validation in atomic check")
+Signed-off-by: Yussuf Khalil <dev@pp3345.net>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit cc7c7121ae082b7b82891baa7280f1ff2608f22b)
+Signed-off-by: Fang Wang <32840572@qq.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 +++++
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 1 +
+ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 7 +++++--
+ 3 files changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 7eff2b94ab666..bb5e3a6086f2e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -9908,6 +9908,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
++ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
++ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
++ dm_new_crtc_state->mode_changed_independent_from_dsc = new_crtc_state->mode_changed;
++ }
++
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = add_affected_mst_dsc_crtcs(state, crtc);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index df18b4df1f2c1..12385b6f8443b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -698,6 +698,7 @@ struct dm_crtc_state {
+
+ bool freesync_vrr_info_changed;
+
++ bool mode_changed_independent_from_dsc;
+ bool dsc_force_changed;
+ bool vrr_supported;
+ struct mod_freesync_config freesync_config;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 495491decec1e..94c83a707acc6 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1564,8 +1564,11 @@ int pre_validate_dsc(struct drm_atomic_state *state,
+ } else {
+ int ind = find_crtc_index_in_state_by_stream(state, stream);
+
+- if (ind >= 0)
+- state->crtcs[ind].new_state->mode_changed = 0;
++ if (ind >= 0) {
++ struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(state->crtcs[ind].new_state);
++
++ dm_new_crtc_state->base.mode_changed = dm_new_crtc_state->mode_changed_independent_from_dsc;
++ }
+ }
+ }
+ clean_exit:
+--
+2.53.0
+
--- /dev/null
+From 536245a4016d5613359f422d8196beddba33ccd7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2026 14:58:29 +0800
+Subject: ext4: validate p_idx bounds in ext4_ext_correct_indexes
+
+From: Tejas Bharambe <tejas.bharambe@outlook.com>
+
+[ Upstream commit 2acb5c12ebd860f30e4faf67e6cc8c44ddfe5fe8 ]
+
+ext4_ext_correct_indexes() walks up the extent tree correcting
+index entries when the first extent in a leaf is modified. Before
+accessing path[k].p_idx->ei_block, there is no validation that
+p_idx falls within the valid range of index entries for that
+level.
+
+If the on-disk extent header contains a corrupted or crafted
+eh_entries value, p_idx can point past the end of the allocated
+buffer, causing a slab-out-of-bounds read.
+
+Fix this by validating path[k].p_idx against EXT_LAST_INDEX() at
+both access sites: before the while loop and inside it. Return
+-EFSCORRUPTED if the index pointer is out of range, consistent
+with how other bounds violations are handled in the ext4 extent
+tree code.
+
+Reported-by: syzbot+04c4e65cab786a2e5b7e@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=04c4e65cab786a2e5b7e
+Signed-off-by: Tejas Bharambe <tejas.bharambe@outlook.com>
+Link: https://patch.msgid.link/JH0PR06MB66326016F9B6AD24097D232B897CA@JH0PR06MB6632.apcprd06.prod.outlook.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+[ Minor conflict resolved. ]
+Signed-off-by: Jianqiang kang <jianqkang@sina.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 1df7174774694..6d95dab538475 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1740,6 +1740,13 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
+ err = ext4_ext_get_access(handle, inode, path + k);
+ if (err)
+ return err;
++ if (unlikely(path[k].p_idx > EXT_LAST_INDEX(path[k].p_hdr))) {
++ EXT4_ERROR_INODE(inode,
++ "path[%d].p_idx %p > EXT_LAST_INDEX %p",
++ k, path[k].p_idx,
++ EXT_LAST_INDEX(path[k].p_hdr));
++ return -EFSCORRUPTED;
++ }
+ path[k].p_idx->ei_block = border;
+ err = ext4_ext_dirty(handle, inode, path + k);
+ if (err)
+@@ -1752,6 +1759,14 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
+ err = ext4_ext_get_access(handle, inode, path + k);
+ if (err)
+ break;
++ if (unlikely(path[k].p_idx > EXT_LAST_INDEX(path[k].p_hdr))) {
++ EXT4_ERROR_INODE(inode,
++ "path[%d].p_idx %p > EXT_LAST_INDEX %p",
++ k, path[k].p_idx,
++ EXT_LAST_INDEX(path[k].p_hdr));
++ err = -EFSCORRUPTED;
++ break;
++ }
+ path[k].p_idx->ei_block = border;
+ err = ext4_ext_dirty(handle, inode, path + k);
+ if (err)
+--
+2.53.0
+
x86-cpu-amd-call-the-spectral-chicken-in-the-zen2-init-function.patch
x86-cpu-amd-rename-init_amd_zn-to-init_amd_zen_common.patch
x86-cpu-amd-add-x86_feature_zen1.patch
+drm-amd-display-do-not-skip-unrelated-mode-changes-i.patch
+spi-meson-spicc-fix-double-put-in-remove-path.patch
+ext4-validate-p_idx-bounds-in-ext4_ext_correct_index.patch
--- /dev/null
+From c89d3afc9f1cd467da04d2c7a3e3aaedbb05d122 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2026 17:32:46 +0800
+Subject: spi: meson-spicc: Fix double-put in remove path
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 63542bb402b7013171c9f621c28b609eda4dbf1f ]
+
+meson_spicc_probe() registers the controller with
+devm_spi_register_controller(), so teardown already drops the
+controller reference via devm cleanup.
+
+Calling spi_controller_put() again in meson_spicc_remove()
+causes a double-put.
+
+Fixes: 8311ee2164c5 ("spi: meson-spicc: fix memory leak in meson_spicc_remove")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20260322-rockchip-v1-1-fac3f0c6dad8@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+[ In v6.1, commit 68bf3288c7eb ("spi: meson-spicc: switch to use modern name")
+has not been applied, so the driver still uses the legacy spicc->master field
+and spi_master_put() API. The line to remove is spi_master_put(spicc->master)
+rather than spi_controller_put(spicc->host) as in the upstream patch.
+They are functionally identical. ]
+Signed-off-by: Wenshan Lan <jetlan9@163.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-meson-spicc.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
+index 1b4195c54ee26..04cf8489dd56b 100644
+--- a/drivers/spi/spi-meson-spicc.c
++++ b/drivers/spi/spi-meson-spicc.c
+@@ -883,8 +883,6 @@ static int meson_spicc_remove(struct platform_device *pdev)
+ clk_disable_unprepare(spicc->core);
+ clk_disable_unprepare(spicc->pclk);
+
+- spi_master_put(spicc->master);
+-
+ return 0;
+ }
+
+--
+2.53.0
+
--- /dev/null
+From d81db27729cc1419be5811cb748fb9ffb99d87db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Nov 2024 09:44:15 -0800
+Subject: mm: convert mm_lock_seq to a proper seqcount
+
+From: Suren Baghdasaryan <surenb@google.com>
+
+[ Upstream commit eb449bd96954b1c1e491d19066cfd2a010f0aa47 ]
+
+Convert mm_lock_seq to be seqcount_t and change all mmap_write_lock
+variants to increment it, in-line with the usual seqcount usage pattern.
+This lets us check whether the mmap_lock is write-locked by checking
+mm_lock_seq.sequence counter (odd=locked, even=unlocked). This will be
+used when implementing mmap_lock speculation functions.
+As a result vm_lock_seq is also change to be unsigned to match the type
+of mm_lock_seq.sequence.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
+Link: https://lkml.kernel.org/r/20241122174416.1367052-2-surenb@google.com
+Stable-dep-of: 52f657e34d7b ("x86: shadow stacks: proper error handling for mmap lock")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/mm.h | 12 +++----
+ include/linux/mm_types.h | 7 ++--
+ include/linux/mmap_lock.h | 55 +++++++++++++++++++++-----------
+ kernel/fork.c | 5 +--
+ mm/init-mm.c | 2 +-
+ tools/testing/vma/vma.c | 4 +--
+ tools/testing/vma/vma_internal.h | 4 +--
+ 7 files changed, 53 insertions(+), 36 deletions(-)
+
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 20f9287d23a57..01d53e7fdcce5 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -698,7 +698,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
+ * we don't rely on for anything - the mm_lock_seq read against which we
+ * need ordering is below.
+ */
+- if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq))
++ if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence))
+ return false;
+
+ if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
+@@ -715,7 +715,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
+ * after it has been unlocked.
+ * This pairs with RELEASE semantics in vma_end_write_all().
+ */
+- if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) {
++ if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) {
+ up_read(&vma->vm_lock->lock);
+ return false;
+ }
+@@ -730,7 +730,7 @@ static inline void vma_end_read(struct vm_area_struct *vma)
+ }
+
+ /* WARNING! Can only be used if mmap_lock is expected to be write-locked */
+-static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
++static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
+ {
+ mmap_assert_write_locked(vma->vm_mm);
+
+@@ -738,7 +738,7 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
+ * current task is holding mmap_write_lock, both vma->vm_lock_seq and
+ * mm->mm_lock_seq can't be concurrently modified.
+ */
+- *mm_lock_seq = vma->vm_mm->mm_lock_seq;
++ *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence;
+ return (vma->vm_lock_seq == *mm_lock_seq);
+ }
+
+@@ -749,7 +749,7 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
+ */
+ static inline void vma_start_write(struct vm_area_struct *vma)
+ {
+- int mm_lock_seq;
++ unsigned int mm_lock_seq;
+
+ if (__is_vma_write_locked(vma, &mm_lock_seq))
+ return;
+@@ -767,7 +767,7 @@ static inline void vma_start_write(struct vm_area_struct *vma)
+
+ static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+ {
+- int mm_lock_seq;
++ unsigned int mm_lock_seq;
+
+ VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
+ }
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 2c834cbf3ff5d..2113f7da182c6 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -750,7 +750,7 @@ struct vm_area_struct {
+ * counter reuse can only lead to occasional unnecessary use of the
+ * slowpath.
+ */
+- int vm_lock_seq;
++ unsigned int vm_lock_seq;
+ /* Unstable RCU readers are allowed to read this. */
+ struct vma_lock *vm_lock;
+ #endif
+@@ -922,6 +922,9 @@ struct mm_struct {
+ * Roughly speaking, incrementing the sequence number is
+ * equivalent to releasing locks on VMAs; reading the sequence
+ * number can be part of taking a read lock on a VMA.
++ * Incremented every time mmap_lock is write-locked/unlocked.
++ * Initialized to 0, therefore odd values indicate mmap_lock
++ * is write-locked and even values that it's released.
+ *
+ * Can be modified under write mmap_lock using RELEASE
+ * semantics.
+@@ -930,7 +933,7 @@ struct mm_struct {
+ * Can be read with ACQUIRE semantics if not holding write
+ * mmap_lock.
+ */
+- int mm_lock_seq;
++ seqcount_t mm_lock_seq;
+ #endif
+
+
+diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
+index de9dc20b01ba7..9715326f5a85f 100644
+--- a/include/linux/mmap_lock.h
++++ b/include/linux/mmap_lock.h
+@@ -71,39 +71,39 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm)
+ }
+
+ #ifdef CONFIG_PER_VMA_LOCK
+-/*
+- * Drop all currently-held per-VMA locks.
+- * This is called from the mmap_lock implementation directly before releasing
+- * a write-locked mmap_lock (or downgrading it to read-locked).
+- * This should normally NOT be called manually from other places.
+- * If you want to call this manually anyway, keep in mind that this will release
+- * *all* VMA write locks, including ones from further up the stack.
+- */
+-static inline void vma_end_write_all(struct mm_struct *mm)
++static inline void mm_lock_seqcount_init(struct mm_struct *mm)
+ {
+- mmap_assert_write_locked(mm);
+- /*
+- * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
+- * mmap_lock being held.
+- * We need RELEASE semantics here to ensure that preceding stores into
+- * the VMA take effect before we unlock it with this store.
+- * Pairs with ACQUIRE semantics in vma_start_read().
+- */
+- smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
++ seqcount_init(&mm->mm_lock_seq);
++}
++
++static inline void mm_lock_seqcount_begin(struct mm_struct *mm)
++{
++ do_raw_write_seqcount_begin(&mm->mm_lock_seq);
++}
++
++static inline void mm_lock_seqcount_end(struct mm_struct *mm)
++{
++ ASSERT_EXCLUSIVE_WRITER(mm->mm_lock_seq);
++ do_raw_write_seqcount_end(&mm->mm_lock_seq);
+ }
++
+ #else
+-static inline void vma_end_write_all(struct mm_struct *mm) {}
++static inline void mm_lock_seqcount_init(struct mm_struct *mm) {}
++static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {}
++static inline void mm_lock_seqcount_end(struct mm_struct *mm) {}
+ #endif
+
+ static inline void mmap_init_lock(struct mm_struct *mm)
+ {
+ init_rwsem(&mm->mmap_lock);
++ mm_lock_seqcount_init(mm);
+ }
+
+ static inline void mmap_write_lock(struct mm_struct *mm)
+ {
+ __mmap_lock_trace_start_locking(mm, true);
+ down_write(&mm->mmap_lock);
++ mm_lock_seqcount_begin(mm);
+ __mmap_lock_trace_acquire_returned(mm, true, true);
+ }
+
+@@ -111,6 +111,7 @@ static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
+ {
+ __mmap_lock_trace_start_locking(mm, true);
+ down_write_nested(&mm->mmap_lock, subclass);
++ mm_lock_seqcount_begin(mm);
+ __mmap_lock_trace_acquire_returned(mm, true, true);
+ }
+
+@@ -120,10 +121,26 @@ static inline int mmap_write_lock_killable(struct mm_struct *mm)
+
+ __mmap_lock_trace_start_locking(mm, true);
+ ret = down_write_killable(&mm->mmap_lock);
++ if (!ret)
++ mm_lock_seqcount_begin(mm);
+ __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
+ return ret;
+ }
+
++/*
++ * Drop all currently-held per-VMA locks.
++ * This is called from the mmap_lock implementation directly before releasing
++ * a write-locked mmap_lock (or downgrading it to read-locked).
++ * This should normally NOT be called manually from other places.
++ * If you want to call this manually anyway, keep in mind that this will release
++ * *all* VMA write locks, including ones from further up the stack.
++ */
++static inline void vma_end_write_all(struct mm_struct *mm)
++{
++ mmap_assert_write_locked(mm);
++ mm_lock_seqcount_end(mm);
++}
++
+ static inline void mmap_write_unlock(struct mm_struct *mm)
+ {
+ __mmap_lock_trace_released(mm, true);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 29532a57e0cd4..c6415bb0abf59 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -450,7 +450,7 @@ static bool vma_lock_alloc(struct vm_area_struct *vma)
+ return false;
+
+ init_rwsem(&vma->vm_lock->lock);
+- vma->vm_lock_seq = -1;
++ vma->vm_lock_seq = UINT_MAX;
+
+ return true;
+ }
+@@ -1280,9 +1280,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
+ seqcount_init(&mm->write_protect_seq);
+ mmap_init_lock(mm);
+ INIT_LIST_HEAD(&mm->mmlist);
+-#ifdef CONFIG_PER_VMA_LOCK
+- mm->mm_lock_seq = 0;
+-#endif
+ mm_pgtables_bytes_init(mm);
+ mm->map_count = 0;
+ mm->locked_vm = 0;
+diff --git a/mm/init-mm.c b/mm/init-mm.c
+index 24c8093792745..6af3ad675930b 100644
+--- a/mm/init-mm.c
++++ b/mm/init-mm.c
+@@ -40,7 +40,7 @@ struct mm_struct init_mm = {
+ .arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
+ .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
+ #ifdef CONFIG_PER_VMA_LOCK
+- .mm_lock_seq = 0,
++ .mm_lock_seq = SEQCNT_ZERO(init_mm.mm_lock_seq),
+ #endif
+ .user_ns = &init_user_ns,
+ .cpu_bitmap = CPU_BITS_NONE,
+diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c
+index b33b47342d418..9074aaced9c5a 100644
+--- a/tools/testing/vma/vma.c
++++ b/tools/testing/vma/vma.c
+@@ -87,7 +87,7 @@ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
+ * begun. Linking to the tree will have caused this to be incremented,
+ * which means we will get a false positive otherwise.
+ */
+- vma->vm_lock_seq = -1;
++ vma->vm_lock_seq = UINT_MAX;
+
+ return vma;
+ }
+@@ -212,7 +212,7 @@ static bool vma_write_started(struct vm_area_struct *vma)
+ int seq = vma->vm_lock_seq;
+
+ /* We reset after each check. */
+- vma->vm_lock_seq = -1;
++ vma->vm_lock_seq = UINT_MAX;
+
+ /* The vma_start_write() stub simply increments this value. */
+ return seq > -1;
+diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
+index 1d5bbc8464f18..0a95dbb0346fb 100644
+--- a/tools/testing/vma/vma_internal.h
++++ b/tools/testing/vma/vma_internal.h
+@@ -231,7 +231,7 @@ struct vm_area_struct {
+ * counter reuse can only lead to occasional unnecessary use of the
+ * slowpath.
+ */
+- int vm_lock_seq;
++ unsigned int vm_lock_seq;
+ struct vma_lock *vm_lock;
+ #endif
+
+@@ -406,7 +406,7 @@ static inline bool vma_lock_alloc(struct vm_area_struct *vma)
+ return false;
+
+ init_rwsem(&vma->vm_lock->lock);
+- vma->vm_lock_seq = -1;
++ vma->vm_lock_seq = UINT_MAX;
+
+ return true;
+ }
+--
+2.53.0
+
asoc-sof-don-t-allow-pointer-operations-on-unconfigured-streams.patch
spi-rockchip-fix-controller-deregistration.patch
ksmbd-rewrite-stop_sessions-with-restartable-iteration.patch
+mm-convert-mm_lock_seq-to-a-proper-seqcount.patch
+x86-shadow-stacks-proper-error-handling-for-mmap-loc.patch
+x86-shstk-prevent-deadlock-during-shstk-sigreturn.patch
--- /dev/null
+From 41003b1746172b4a06d126c77a2d0cb8f33f6891 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Apr 2026 13:18:57 -0700
+Subject: x86: shadow stacks: proper error handling for mmap lock
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+[ Upstream commit 52f657e34d7b21b47434d9d8b26fa7f6778b63a0 ]
+
+김영민 reports that shstk_pop_sigframe() doesn't check for errors from
+mmap_read_lock_killable(), which is a silly oversight, and also shows
+that we haven't marked those functions with "__must_check", which would
+have immediately caught it.
+
+So let's fix both issues.
+
+Reported-by: 김영민 <osori@hspace.io>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Acked-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/shstk.c | 3 ++-
+ include/linux/mmap_lock.h | 6 +++---
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
+index 059685612362d..0dc983b33b003 100644
+--- a/arch/x86/kernel/shstk.c
++++ b/arch/x86/kernel/shstk.c
+@@ -311,7 +311,8 @@ static int shstk_pop_sigframe(unsigned long *ssp)
+ need_to_check_vma = PAGE_ALIGN(*ssp) == *ssp;
+
+ if (need_to_check_vma)
+- mmap_read_lock_killable(current->mm);
++ if (mmap_read_lock_killable(current->mm))
++ return -EINTR;
+
+ err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp);
+ if (unlikely(err))
+diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
+index 9715326f5a85f..e74f3720c9399 100644
+--- a/include/linux/mmap_lock.h
++++ b/include/linux/mmap_lock.h
+@@ -115,7 +115,7 @@ static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
+ __mmap_lock_trace_acquire_returned(mm, true, true);
+ }
+
+-static inline int mmap_write_lock_killable(struct mm_struct *mm)
++static inline int __must_check mmap_write_lock_killable(struct mm_struct *mm)
+ {
+ int ret;
+
+@@ -162,7 +162,7 @@ static inline void mmap_read_lock(struct mm_struct *mm)
+ __mmap_lock_trace_acquire_returned(mm, false, true);
+ }
+
+-static inline int mmap_read_lock_killable(struct mm_struct *mm)
++static inline int __must_check mmap_read_lock_killable(struct mm_struct *mm)
+ {
+ int ret;
+
+@@ -172,7 +172,7 @@ static inline int mmap_read_lock_killable(struct mm_struct *mm)
+ return ret;
+ }
+
+-static inline bool mmap_read_trylock(struct mm_struct *mm)
++static inline bool __must_check mmap_read_trylock(struct mm_struct *mm)
+ {
+ bool ret;
+
+--
+2.53.0
+
--- /dev/null
+From 68dbc888dec2379eeb7f3aed89e520c55d8d7a48 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2026 16:53:48 -0700
+Subject: x86/shstk: Prevent deadlock during shstk sigreturn
+
+From: Rick Edgecombe <rick.p.edgecombe@intel.com>
+
+[ Upstream commit 9874b2917b9fbc30956fee209d3c4aa47201c64e ]
+
+During sigreturn the shadow stack signal frame is popped. The kernel does
+this by reading the shadow stack using normal read accesses. When it can't
+assume the memory is shadow stack, it takes extra steps to makes sure it is
+reading actual shadow stack memory and not other normal readable memory. It
+does this by holding the mmap read lock while doing the access and checking
+the flags of the VMA.
+
+Unfortunately that is not safe. If the read of the shadow stack sigframe
+hits a page fault, the fault handler will try to recursively grab another
+mmap read lock. This normally works ok, but if a writer on another CPU is
+also waiting, the second read lock could fail and cause a deadlock.
+
+Fix this by doing the read of the userspace memory via gup. Embed it in the
+get_shstk_data() helper.
+
+Currently there is a check that skips the lookup work when the SSP can be
+assumed to be on a shadow stack. While reorganizing the function, remove
+the optimization to make the tricky code flows more common, such that
+issues like this cannot escape detection for so long.
+
+[Due to missing per-vma MM sequence counter, use a simpler GUP based
+solution for the backport]
+Cc: <stable@vger.kernel.org> # Depends on https://lore.kernel.org/all/20260504205856.536296-1-rick.p.edgecombe@intel.com/
+Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/shstk.c | 46 ++++++++++++++++++++++++++---------------
+ 1 file changed, 29 insertions(+), 17 deletions(-)
+
+diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
+index 0dc983b33b003..373a44a5c478f 100644
+--- a/arch/x86/kernel/shstk.c
++++ b/arch/x86/kernel/shstk.c
+@@ -18,6 +18,7 @@
+ #include <linux/sizes.h>
+ #include <linux/user.h>
+ #include <linux/syscalls.h>
++#include <linux/highmem.h>
+ #include <asm/msr.h>
+ #include <asm/fpu/xstate.h>
+ #include <asm/fpu/types.h>
+@@ -262,11 +263,29 @@ static int put_shstk_data(u64 __user *addr, u64 data)
+ return 0;
+ }
+
++/* Copy from aligned address in userspace without risk of page fault. */
++static int shstk_copy_user_gup(unsigned long *ldata, unsigned long __user *addr)
++{
++ struct page *page;
++ void *kaddr;
++
++ mmap_assert_locked(current->mm);
++ if (get_user_pages((unsigned long)addr, 1, 0, &page) != 1)
++ return -EFAULT;
++
++ kaddr = kmap_local_page(page);
++ *ldata = *(unsigned long *)(kaddr + offset_in_page(addr));
++ kunmap_local(kaddr);
++ put_page(page);
++
++ return 0;
++}
++
+ static int get_shstk_data(unsigned long *data, unsigned long __user *addr)
+ {
+ unsigned long ldata;
+
+- if (unlikely(get_user(ldata, addr)))
++ if (shstk_copy_user_gup(&ldata, addr))
+ return -EFAULT;
+
+ if (!(ldata & SHSTK_DATA_BIT))
+@@ -296,7 +315,6 @@ static int shstk_pop_sigframe(unsigned long *ssp)
+ {
+ struct vm_area_struct *vma;
+ unsigned long token_addr;
+- bool need_to_check_vma;
+ int err = 1;
+
+ /*
+@@ -308,26 +326,21 @@ static int shstk_pop_sigframe(unsigned long *ssp)
+ if (!IS_ALIGNED(*ssp, 8))
+ return -EINVAL;
+
+- need_to_check_vma = PAGE_ALIGN(*ssp) == *ssp;
+-
+- if (need_to_check_vma)
+- if (mmap_read_lock_killable(current->mm))
+- return -EINTR;
++ if (mmap_read_lock_killable(current->mm))
++ return -EINTR;
+
+ err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp);
+ if (unlikely(err))
+ goto out_err;
+
+- if (need_to_check_vma) {
+- vma = find_vma(current->mm, *ssp);
+- if (!vma || !(vma->vm_flags & VM_SHADOW_STACK)) {
+- err = -EFAULT;
+- goto out_err;
+- }
+-
+- mmap_read_unlock(current->mm);
++ vma = find_vma(current->mm, *ssp);
++ if (!vma || !(vma->vm_flags & VM_SHADOW_STACK)) {
++ err = -EFAULT;
++ goto out_err;
+ }
+
++ mmap_read_unlock(current->mm);
++
+ /* Restore SSP aligned? */
+ if (unlikely(!IS_ALIGNED(token_addr, 8)))
+ return -EINVAL;
+@@ -340,8 +353,7 @@ static int shstk_pop_sigframe(unsigned long *ssp)
+
+ return 0;
+ out_err:
+- if (need_to_check_vma)
+- mmap_read_unlock(current->mm);
++ mmap_read_unlock(current->mm);
+ return err;
+ }
+
+--
+2.53.0
+
--- /dev/null
+From af8403e339c49ab1e3c645dd1fc15cb005b3c6c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2026 15:07:29 +0800
+Subject: drm/amd/display: Do not skip unrelated mode changes in DSC validation
+
+From: Yussuf Khalil <dev@pp3345.net>
+
+[ Upstream commit aed3d041ab061ec8a64f50a3edda0f4db7280025 ]
+
+Starting with commit 17ce8a6907f7 ("drm/amd/display: Add dsc pre-validation in
+atomic check"), amdgpu resets the CRTC state mode_changed flag to false when
+recomputing the DSC configuration results in no timing change for a particular
+stream.
+
+However, this is incorrect in scenarios where a change in MST/DSC configuration
+happens in the same KMS commit as another (unrelated) mode change. For example,
+the integrated panel of a laptop may be configured differently (e.g., HDR
+enabled/disabled) depending on whether external screens are attached. In this
+case, plugging in external DP-MST screens may result in the mode_changed flag
+being dropped incorrectly for the integrated panel if its DSC configuration
+did not change during precomputation in pre_validate_dsc().
+
+At this point, however, dm_update_crtc_state() has already created new streams
+for CRTCs with DSC-independent mode changes. In turn,
+amdgpu_dm_commit_streams() will never release the old stream, resulting in a
+memory leak. amdgpu_dm_atomic_commit_tail() will never acquire a reference to
+the new stream either, which manifests as a use-after-free when the stream gets
+disabled later on:
+
+BUG: KASAN: use-after-free in dc_stream_release+0x25/0x90 [amdgpu]
+Write of size 4 at addr ffff88813d836524 by task kworker/9:9/29977
+
+Workqueue: events drm_mode_rmfb_work_fn
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x6e/0xa0
+ print_address_description.constprop.0+0x88/0x320
+ ? dc_stream_release+0x25/0x90 [amdgpu]
+ print_report+0xfc/0x1ff
+ ? srso_alias_return_thunk+0x5/0xfbef5
+ ? __virt_addr_valid+0x225/0x4e0
+ ? dc_stream_release+0x25/0x90 [amdgpu]
+ kasan_report+0xe1/0x180
+ ? dc_stream_release+0x25/0x90 [amdgpu]
+ kasan_check_range+0x125/0x200
+ dc_stream_release+0x25/0x90 [amdgpu]
+ dc_state_destruct+0x14d/0x5c0 [amdgpu]
+ dc_state_release.part.0+0x4e/0x130 [amdgpu]
+ dm_atomic_destroy_state+0x3f/0x70 [amdgpu]
+ drm_atomic_state_default_clear+0x8ee/0xf30
+ ? drm_mode_object_put.part.0+0xb1/0x130
+ __drm_atomic_state_free+0x15c/0x2d0
+ atomic_remove_fb+0x67e/0x980
+
+Since there is no reliable way of figuring out whether a CRTC has unrelated
+mode changes pending at the time of DSC validation, remember the value of the
+mode_changed flag from before the point where a CRTC was marked as potentially
+affected by a change in DSC configuration. Reset the mode_changed flag to this
+earlier value instead in pre_validate_dsc().
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/5004
+Fixes: 17ce8a6907f7 ("drm/amd/display: Add dsc pre-validation in atomic check")
+Signed-off-by: Yussuf Khalil <dev@pp3345.net>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit cc7c7121ae082b7b82891baa7280f1ff2608f22b)
+Signed-off-by: Fang Wang <32840572@qq.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 +++++
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 1 +
+ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 7 +++++--
+ 3 files changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index f51c3921cbc26..12f75b2ad664d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -10152,6 +10152,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
++ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
++ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
++ dm_new_crtc_state->mode_changed_independent_from_dsc = new_crtc_state->mode_changed;
++ }
++
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = add_affected_mst_dsc_crtcs(state, crtc);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 88606b805330d..8d4f2cadb9157 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -737,6 +737,7 @@ struct dm_crtc_state {
+
+ bool freesync_vrr_info_changed;
+
++ bool mode_changed_independent_from_dsc;
+ bool dsc_force_changed;
+ bool vrr_supported;
+ struct mod_freesync_config freesync_config;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 2698e5c74ddfd..ab6924d3046b7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1587,8 +1587,11 @@ int pre_validate_dsc(struct drm_atomic_state *state,
+ } else {
+ int ind = find_crtc_index_in_state_by_stream(state, stream);
+
+- if (ind >= 0)
+- state->crtcs[ind].new_state->mode_changed = 0;
++ if (ind >= 0) {
++ struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(state->crtcs[ind].new_state);
++
++ dm_new_crtc_state->base.mode_changed = dm_new_crtc_state->mode_changed_independent_from_dsc;
++ }
+ }
+ }
+ clean_exit:
+--
+2.53.0
+
--- /dev/null
+From 15666697cfeb7dd5d38e97bf230027039566bb35 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2026 14:58:45 +0800
+Subject: ext4: validate p_idx bounds in ext4_ext_correct_indexes
+
+From: Tejas Bharambe <tejas.bharambe@outlook.com>
+
+[ Upstream commit 2acb5c12ebd860f30e4faf67e6cc8c44ddfe5fe8 ]
+
+ext4_ext_correct_indexes() walks up the extent tree correcting
+index entries when the first extent in a leaf is modified. Before
+accessing path[k].p_idx->ei_block, there is no validation that
+p_idx falls within the valid range of index entries for that
+level.
+
+If the on-disk extent header contains a corrupted or crafted
+eh_entries value, p_idx can point past the end of the allocated
+buffer, causing a slab-out-of-bounds read.
+
+Fix this by validating path[k].p_idx against EXT_LAST_INDEX() at
+both access sites: before the while loop and inside it. Return
+-EFSCORRUPTED if the index pointer is out of range, consistent
+with how other bounds violations are handled in the ext4 extent
+tree code.
+
+Reported-by: syzbot+04c4e65cab786a2e5b7e@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=04c4e65cab786a2e5b7e
+Signed-off-by: Tejas Bharambe <tejas.bharambe@outlook.com>
+Link: https://patch.msgid.link/JH0PR06MB66326016F9B6AD24097D232B897CA@JH0PR06MB6632.apcprd06.prod.outlook.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+[ Minor conflict resolved. ]
+Signed-off-by: Jianqiang kang <jianqkang@sina.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 7626cf2b07f1c..a94798e23c1af 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1743,6 +1743,13 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
+ err = ext4_ext_get_access(handle, inode, path + k);
+ if (err)
+ return err;
++ if (unlikely(path[k].p_idx > EXT_LAST_INDEX(path[k].p_hdr))) {
++ EXT4_ERROR_INODE(inode,
++ "path[%d].p_idx %p > EXT_LAST_INDEX %p",
++ k, path[k].p_idx,
++ EXT_LAST_INDEX(path[k].p_hdr));
++ return -EFSCORRUPTED;
++ }
+ path[k].p_idx->ei_block = border;
+ err = ext4_ext_dirty(handle, inode, path + k);
+ if (err)
+@@ -1755,6 +1762,14 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
+ err = ext4_ext_get_access(handle, inode, path + k);
+ if (err)
+ break;
++ if (unlikely(path[k].p_idx > EXT_LAST_INDEX(path[k].p_hdr))) {
++ EXT4_ERROR_INODE(inode,
++ "path[%d].p_idx %p > EXT_LAST_INDEX %p",
++ k, path[k].p_idx,
++ EXT_LAST_INDEX(path[k].p_hdr));
++ err = -EFSCORRUPTED;
++ break;
++ }
+ path[k].p_idx->ei_block = border;
+ err = ext4_ext_dirty(handle, inode, path + k);
+ if (err)
+--
+2.53.0
+
--- /dev/null
+From bb213c47e14d4dceb63b9fc342c94922fe54d6e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2026 16:31:42 +0800
+Subject: rxrpc: Fix potential UAF after skb_unshare() failure
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 1f2740150f904bfa60e4bad74d65add3ccb5e7f8 ]
+
+If skb_unshare() fails to unshare a packet due to allocation failure in
+rxrpc_input_packet(), the skb pointer in the parent (rxrpc_io_thread())
+will be NULL'd out. This will likely cause the call to
+trace_rxrpc_rx_done() to oops.
+
+Fix this by moving the unsharing down to where rxrpc_input_call_event()
+calls rxrpc_input_call_packet(). There are a number of places prior to
+that where we ignore DATA packets for a variety of reasons (such as the
+call already being complete) for which an unshare is then avoided.
+
+And with that, rxrpc_input_packet() doesn't need to take a pointer to the
+pointer to the packet, so change that to just a pointer.
+
+Fixes: 2d1faf7a0ca3 ("rxrpc: Simplify skbuff accounting in receive path")
+Closes: https://sashiko.dev/#/patchset/20260408121252.2249051-1-dhowells%40redhat.com
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: Jeffrey Altman <jaltman@auristor.com>
+cc: Simon Horman <horms@kernel.org>
+cc: linux-afs@lists.infradead.org
+cc: stable@kernel.org
+Link: https://patch.msgid.link/20260422161438.2593376-4-dhowells@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Relocated the unshare/skb_copy block from rxrpc_input_call_event()'s rx_queue dequeue loop to existing `if (skb) rxrpc_input_call_packet()` site, and substituted rxrpc_skb_put_call_rx with rxrpc_skb_put_input. ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+[ Readd rxrpc_skb_put_response_copy() or will cause a build fail with commit 24481a7f5733 ("rxrpc: Fix conn-level packet handling to unshare RESPONSE packets") ]
+Signed-off-by: Wentao Guan <guanwentao@uniontech.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/trace/events/rxrpc.h | 4 ++--
+ net/rxrpc/ar-internal.h | 1 -
+ net/rxrpc/call_event.c | 23 +++++++++++++++++++++--
+ net/rxrpc/io_thread.c | 24 ++----------------------
+ net/rxrpc/skbuff.c | 9 ---------
+ 5 files changed, 25 insertions(+), 36 deletions(-)
+
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index 539801f8ee282..f0560087637ed 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -126,8 +126,6 @@
+ E_(rxrpc_call_poke_timer_now, "Timer-now")
+
+ #define rxrpc_skb_traces \
+- EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \
+- EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
+ EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \
+ EM(rxrpc_skb_get_conn_work, "GET conn-work") \
+ EM(rxrpc_skb_get_last_nack, "GET last-nack") \
+@@ -146,12 +144,14 @@
+ EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \
+ EM(rxrpc_skb_put_last_nack, "PUT last-nack") \
+ EM(rxrpc_skb_put_purge, "PUT purge ") \
++ EM(rxrpc_skb_put_response_copy, "PUT resp-cpy ") \
+ EM(rxrpc_skb_put_rotate, "PUT rotate ") \
+ EM(rxrpc_skb_put_unknown, "PUT unknown ") \
+ EM(rxrpc_skb_see_conn_work, "SEE conn-work") \
+ EM(rxrpc_skb_see_recvmsg, "SEE recvmsg ") \
+ EM(rxrpc_skb_see_reject, "SEE reject ") \
+ EM(rxrpc_skb_see_rotate, "SEE rotate ") \
++ EM(rxrpc_skb_see_unshare_nomem, "SEE unshar-nm") \
+ E_(rxrpc_skb_see_version, "SEE version ")
+
+ #define rxrpc_local_traces \
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index f4512761f572d..1db479f3d6d3c 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -1269,7 +1269,6 @@ int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int);
+ void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
+ void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
+ void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
+-void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
+ void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
+ void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
+ void rxrpc_purge_queue(struct sk_buff_head *);
+diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
+index 0f78544d043be..c8a4a4c979eb6 100644
+--- a/net/rxrpc/call_event.c
++++ b/net/rxrpc/call_event.c
+@@ -456,8 +456,27 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
+ resend = true;
+ }
+
+- if (skb)
+- rxrpc_input_call_packet(call, skb);
++ if (skb) {
++ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
++
++ if (sp->hdr.securityIndex != 0 && skb_cloned(skb)) {
++ /* Unshare the packet so that it can be modified by
++ * in-place decryption.
++ */
++ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
++
++ if (nskb) {
++ rxrpc_new_skb(nskb, rxrpc_skb_new_unshared);
++ rxrpc_input_call_packet(call, nskb);
++ rxrpc_free_skb(nskb, rxrpc_skb_put_input);
++ } else {
++ /* OOM - Drop the packet. */
++ rxrpc_see_skb(skb, rxrpc_skb_see_unshare_nomem);
++ }
++ } else {
++ rxrpc_input_call_packet(call, skb);
++ }
++ }
+
+ rxrpc_transmit_some_data(call);
+
+diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
+index 0491f2bbf61e0..f542eda13ff0b 100644
+--- a/net/rxrpc/io_thread.c
++++ b/net/rxrpc/io_thread.c
+@@ -167,13 +167,12 @@ static bool rxrpc_extract_abort(struct sk_buff *skb)
+ /*
+ * Process packets received on the local endpoint
+ */
+-static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
++static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff *skb)
+ {
+ struct rxrpc_connection *conn;
+ struct sockaddr_rxrpc peer_srx;
+ struct rxrpc_skb_priv *sp;
+ struct rxrpc_peer *peer = NULL;
+- struct sk_buff *skb = *_skb;
+ bool ret = false;
+
+ skb_pull(skb, sizeof(struct udphdr));
+@@ -219,25 +218,6 @@ static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
+ return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
+ if (sp->hdr.seq == 0)
+ return rxrpc_bad_message(skb, rxrpc_badmsg_zero_seq);
+-
+- /* Unshare the packet so that it can be modified for in-place
+- * decryption.
+- */
+- if (sp->hdr.securityIndex != 0) {
+- skb = skb_unshare(skb, GFP_ATOMIC);
+- if (!skb) {
+- rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare_nomem);
+- *_skb = NULL;
+- return just_discard;
+- }
+-
+- if (skb != *_skb) {
+- rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare);
+- *_skb = skb;
+- rxrpc_new_skb(skb, rxrpc_skb_new_unshared);
+- sp = rxrpc_skb(skb);
+- }
+- }
+ break;
+
+ case RXRPC_PACKET_TYPE_CHALLENGE:
+@@ -479,7 +459,7 @@ int rxrpc_io_thread(void *data)
+ switch (skb->mark) {
+ case RXRPC_SKB_MARK_PACKET:
+ skb->priority = 0;
+- if (!rxrpc_input_packet(local, &skb))
++ if (!rxrpc_input_packet(local, skb))
+ rxrpc_reject_packet(local, skb);
+ trace_rxrpc_rx_done(skb->mark, skb->priority);
+ rxrpc_free_skb(skb, rxrpc_skb_put_input);
+diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
+index 3bcd6ee803960..e2169d1a14b5f 100644
+--- a/net/rxrpc/skbuff.c
++++ b/net/rxrpc/skbuff.c
+@@ -46,15 +46,6 @@ void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
+ skb_get(skb);
+ }
+
+-/*
+- * Note the dropping of a ref on a socket buffer by the core.
+- */
+-void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
+-{
+- int n = atomic_inc_return(&rxrpc_n_rx_skbs);
+- trace_rxrpc_skb(skb, 0, n, why);
+-}
+-
+ /*
+ * Note the destruction of a socket buffer.
+ */
+--
+2.53.0
+
--- /dev/null
+From d0af8b45b8e82cb96f588da9ebb8157ae3bb8077 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2026 15:40:56 -0400
+Subject: rxrpc: Fix rxrpc_input_call_event() to only unshare DATA packets
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 55b2984c96c37f909bbfe8851f13152693951382 ]
+
+Fix rxrpc_input_call_event() to only unshare DATA packets and not ACK,
+ABORT, etc..
+
+And with that, rxrpc_input_packet() doesn't need to take a pointer to the
+pointer to the packet, so change that to just a pointer.
+
+Fixes: 1f2740150f90 ("rxrpc: Fix potential UAF after skb_unshare() failure")
+Closes: https://sashiko.dev/#/patchset/20260422161438.2593376-4-dhowells@redhat.com
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: Jeffrey Altman <jaltman@auristor.com>
+cc: Simon Horman <horms@kernel.org>
+cc: linux-afs@lists.infradead.org
+cc: stable@kernel.org
+Link: https://patch.msgid.link/20260423200909.3049438-2-dhowells@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/call_event.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
+index c8a4a4c979eb6..d6dfc7c08cf04 100644
+--- a/net/rxrpc/call_event.c
++++ b/net/rxrpc/call_event.c
+@@ -459,7 +459,9 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
+ if (skb) {
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+- if (sp->hdr.securityIndex != 0 && skb_cloned(skb)) {
++ if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
++ sp->hdr.securityIndex != 0 &&
++ skb_cloned(skb)) {
+ /* Unshare the packet so that it can be modified by
+ * in-place decryption.
+ */
+--
+2.53.0
+
iommufd-fix-a-race-with-concurrent-allocation-and-unmap.patch
asoc-sof-don-t-allow-pointer-operations-on-unconfigured-streams.patch
spi-rockchip-fix-controller-deregistration.patch
+x86-shadow-stacks-proper-error-handling-for-mmap-loc.patch
+drm-amd-display-do-not-skip-unrelated-mode-changes-i.patch
+x86-shstk-prevent-deadlock-during-shstk-sigreturn.patch
+spi-meson-spicc-fix-double-put-in-remove-path.patch
+rxrpc-fix-potential-uaf-after-skb_unshare-failure.patch
+ext4-validate-p_idx-bounds-in-ext4_ext_correct_index.patch
+rxrpc-fix-rxrpc_input_call_event-to-only-unshare-dat.patch
--- /dev/null
+From 5afbab17faad2d10efc7839f71aebb4ebfbd8747 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2026 16:14:57 +0800
+Subject: spi: meson-spicc: Fix double-put in remove path
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 63542bb402b7013171c9f621c28b609eda4dbf1f ]
+
+meson_spicc_probe() registers the controller with
+devm_spi_register_controller(), so teardown already drops the
+controller reference via devm cleanup.
+
+Calling spi_controller_put() again in meson_spicc_remove()
+causes a double-put.
+
+Fixes: 8311ee2164c5 ("spi: meson-spicc: fix memory leak in meson_spicc_remove")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20260322-rockchip-v1-1-fac3f0c6dad8@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+[ In v6.6, commit 68bf3288c7eb ("spi: meson-spicc: switch to use modern name")
+has not been applied, so the driver still uses the legacy spicc->master field
+and spi_master_put() API. The line to remove is spi_master_put(spicc->master)
+rather than spi_controller_put(spicc->host) as in the upstream patch.
+They are functionally identical. ]
+Signed-off-by: Wenshan Lan <jetlan9@163.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-meson-spicc.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
+index 43d134f4b42b1..de8cf91658fd5 100644
+--- a/drivers/spi/spi-meson-spicc.c
++++ b/drivers/spi/spi-meson-spicc.c
+@@ -918,8 +918,6 @@ static void meson_spicc_remove(struct platform_device *pdev)
+
+ clk_disable_unprepare(spicc->core);
+ clk_disable_unprepare(spicc->pclk);
+-
+- spi_master_put(spicc->master);
+ }
+
+ static const struct meson_spicc_data meson_spicc_gx_data = {
+--
+2.53.0
+
--- /dev/null
+From 94a5ccca0906787a59950dfe3679b66197f815a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Apr 2026 13:18:57 -0700
+Subject: x86: shadow stacks: proper error handling for mmap lock
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+[ Upstream commit 52f657e34d7b21b47434d9d8b26fa7f6778b63a0 ]
+
+김영민 reports that shstk_pop_sigframe() doesn't check for errors from
+mmap_read_lock_killable(), which is a silly oversight, and also shows
+that we haven't marked those functions with "__must_check", which would
+have immediately caught it.
+
+So let's fix both issues.
+
+Reported-by: 김영민 <osori@hspace.io>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Acked-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/shstk.c | 3 ++-
+ include/linux/mmap_lock.h | 6 +++---
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
+index 19e4db582fb69..d259d7d5b962f 100644
+--- a/arch/x86/kernel/shstk.c
++++ b/arch/x86/kernel/shstk.c
+@@ -311,7 +311,8 @@ static int shstk_pop_sigframe(unsigned long *ssp)
+ need_to_check_vma = PAGE_ALIGN(*ssp) == *ssp;
+
+ if (need_to_check_vma)
+- mmap_read_lock_killable(current->mm);
++ if (mmap_read_lock_killable(current->mm))
++ return -EINTR;
+
+ err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp);
+ if (unlikely(err))
+diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
+index 8d38dcb6d044c..153e018677909 100644
+--- a/include/linux/mmap_lock.h
++++ b/include/linux/mmap_lock.h
+@@ -116,7 +116,7 @@ static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
+ __mmap_lock_trace_acquire_returned(mm, true, true);
+ }
+
+-static inline int mmap_write_lock_killable(struct mm_struct *mm)
++static inline int __must_check mmap_write_lock_killable(struct mm_struct *mm)
+ {
+ int ret;
+
+@@ -147,7 +147,7 @@ static inline void mmap_read_lock(struct mm_struct *mm)
+ __mmap_lock_trace_acquire_returned(mm, false, true);
+ }
+
+-static inline int mmap_read_lock_killable(struct mm_struct *mm)
++static inline int __must_check mmap_read_lock_killable(struct mm_struct *mm)
+ {
+ int ret;
+
+@@ -157,7 +157,7 @@ static inline int mmap_read_lock_killable(struct mm_struct *mm)
+ return ret;
+ }
+
+-static inline bool mmap_read_trylock(struct mm_struct *mm)
++static inline bool __must_check mmap_read_trylock(struct mm_struct *mm)
+ {
+ bool ret;
+
+--
+2.53.0
+
--- /dev/null
+From 43863234f8901f1b69c52e12122482e3e0c852d3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2026 16:53:09 -0700
+Subject: x86/shstk: Prevent deadlock during shstk sigreturn
+
+From: Rick Edgecombe <rick.p.edgecombe@intel.com>
+
+[ Upstream commit 9874b2917b9fbc30956fee209d3c4aa47201c64e ]
+
+During sigreturn the shadow stack signal frame is popped. The kernel does
+this by reading the shadow stack using normal read accesses. When it can't
+assume the memory is shadow stack, it takes extra steps to makes sure it is
+reading actual shadow stack memory and not other normal readable memory. It
+does this by holding the mmap read lock while doing the access and checking
+the flags of the VMA.
+
+Unfortunately that is not safe. If the read of the shadow stack sigframe
+hits a page fault, the fault handler will try to recursively grab another
+mmap read lock. This normally works ok, but if a writer on another CPU is
+also waiting, the second read lock could fail and cause a deadlock.
+
+Fix this by doing the read of the userspace memory via gup. Embed it in the
+get_shstk_data() helper.
+
+Currently there is a check that skips the lookup work when the SSP can be
+assumed to be on a shadow stack. While reorganizing the function, remove
+the optimization to make the tricky code flows more common, such that
+issues like this cannot escape detection for so long.
+
+[Due to missing per-vma MM sequence counter, use a simpler GUP based
+solution for the backport]
+Cc: <stable@vger.kernel.org> # Depends on https://lore.kernel.org/all/20260504205924.536382-1-rick.p.edgecombe@intel.com/
+Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/shstk.c | 46 ++++++++++++++++++++++++++---------------
+ 1 file changed, 29 insertions(+), 17 deletions(-)
+
+diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
+index d259d7d5b962f..ba93c4e6a2319 100644
+--- a/arch/x86/kernel/shstk.c
++++ b/arch/x86/kernel/shstk.c
+@@ -18,6 +18,7 @@
+ #include <linux/sizes.h>
+ #include <linux/user.h>
+ #include <linux/syscalls.h>
++#include <linux/highmem.h>
+ #include <asm/msr.h>
+ #include <asm/fpu/xstate.h>
+ #include <asm/fpu/types.h>
+@@ -262,11 +263,29 @@ static int put_shstk_data(u64 __user *addr, u64 data)
+ return 0;
+ }
+
++/* Copy from aligned address in userspace without risk of page fault. */
++static int shstk_copy_user_gup(unsigned long *ldata, unsigned long __user *addr)
++{
++ struct page *page;
++ void *kaddr;
++
++ mmap_assert_locked(current->mm);
++ if (get_user_pages((unsigned long)addr, 1, 0, &page) != 1)
++ return -EFAULT;
++
++ kaddr = kmap_local_page(page);
++ *ldata = *(unsigned long *)(kaddr + offset_in_page(addr));
++ kunmap_local(kaddr);
++ put_page(page);
++
++ return 0;
++}
++
+ static int get_shstk_data(unsigned long *data, unsigned long __user *addr)
+ {
+ unsigned long ldata;
+
+- if (unlikely(get_user(ldata, addr)))
++ if (shstk_copy_user_gup(&ldata, addr))
+ return -EFAULT;
+
+ if (!(ldata & SHSTK_DATA_BIT))
+@@ -296,7 +315,6 @@ static int shstk_pop_sigframe(unsigned long *ssp)
+ {
+ struct vm_area_struct *vma;
+ unsigned long token_addr;
+- bool need_to_check_vma;
+ int err = 1;
+
+ /*
+@@ -308,26 +326,21 @@ static int shstk_pop_sigframe(unsigned long *ssp)
+ if (!IS_ALIGNED(*ssp, 8))
+ return -EINVAL;
+
+- need_to_check_vma = PAGE_ALIGN(*ssp) == *ssp;
+-
+- if (need_to_check_vma)
+- if (mmap_read_lock_killable(current->mm))
+- return -EINTR;
++ if (mmap_read_lock_killable(current->mm))
++ return -EINTR;
+
+ err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp);
+ if (unlikely(err))
+ goto out_err;
+
+- if (need_to_check_vma) {
+- vma = find_vma(current->mm, *ssp);
+- if (!vma || !(vma->vm_flags & VM_SHADOW_STACK)) {
+- err = -EFAULT;
+- goto out_err;
+- }
+-
+- mmap_read_unlock(current->mm);
++ vma = find_vma(current->mm, *ssp);
++ if (!vma || !(vma->vm_flags & VM_SHADOW_STACK)) {
++ err = -EFAULT;
++ goto out_err;
+ }
+
++ mmap_read_unlock(current->mm);
++
+ /* Restore SSP aligned? */
+ if (unlikely(!IS_ALIGNED(token_addr, 8)))
+ return -EINVAL;
+@@ -340,8 +353,7 @@ static int shstk_pop_sigframe(unsigned long *ssp)
+
+ return 0;
+ out_err:
+- if (need_to_check_vma)
+- mmap_read_unlock(current->mm);
++ mmap_read_unlock(current->mm);
+ return err;
+ }
+
+--
+2.53.0
+