--- /dev/null
+From de134cb54c3a67644ff95b1c9bffe545e752c912 Mon Sep 17 00:00:00 2001
+From: Boris Burkov <boris@bur.io>
+Date: Wed, 20 Aug 2025 14:52:05 -0700
+Subject: btrfs: fix squota compressed stats leak
+
+From: Boris Burkov <boris@bur.io>
+
+commit de134cb54c3a67644ff95b1c9bffe545e752c912 upstream.
+
+The following workload on a squota enabled fs:
+
+ btrfs subvol create mnt/subvol
+
+ # ensure subvol extents get accounted
+ sync
+ btrfs qgroup create 1/1 mnt
+ btrfs qgroup assign mnt/subvol 1/1 mnt
+ btrfs qgroup delete mnt/subvol
+
+ # make the cleaner thread run
+ btrfs filesystem sync mnt
+ sleep 1
+ btrfs filesystem sync mnt
+ btrfs qgroup destroy 1/1 mnt
+
+will fail with EBUSY. The reason is that 1/1 does the quick accounting
+when we assign subvol to it, gaining its exclusive usage as excl and
+excl_cmpr. But then when we delete subvol, the decrement happens via
+record_squota_delta() which does not update excl_cmpr, as squotas does
+not make any distinction between compressed and normal extents. Thus,
+we increment excl_cmpr but never decrement it, and are unable to delete
+1/1. The two possible fixes are to make squota always mirror excl and
+excl_cmpr or to make the fast accounting separately track the plain and
+cmpr numbers. The latter felt cleaner to me so that is what I opted for.
+
+Fixes: 1e0e9d5771c3 ("btrfs: add helper for recording simple quota deltas")
+CC: stable@vger.kernel.org # 6.12+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Boris Burkov <boris@bur.io>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/qgroup.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1501,6 +1501,7 @@ static int __qgroup_excl_accounting(stru
+ struct btrfs_qgroup *qgroup;
+ LIST_HEAD(qgroup_list);
+ u64 num_bytes = src->excl;
++ u64 num_bytes_cmpr = src->excl_cmpr;
+ int ret = 0;
+
+ qgroup = find_qgroup_rb(fs_info, ref_root);
+@@ -1512,11 +1513,12 @@ static int __qgroup_excl_accounting(stru
+ struct btrfs_qgroup_list *glist;
+
+ qgroup->rfer += sign * num_bytes;
+- qgroup->rfer_cmpr += sign * num_bytes;
++ qgroup->rfer_cmpr += sign * num_bytes_cmpr;
+
+ WARN_ON(sign < 0 && qgroup->excl < num_bytes);
++ WARN_ON(sign < 0 && qgroup->excl_cmpr < num_bytes_cmpr);
+ qgroup->excl += sign * num_bytes;
+- qgroup->excl_cmpr += sign * num_bytes;
++ qgroup->excl_cmpr += sign * num_bytes_cmpr;
+
+ if (sign > 0)
+ qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
--- /dev/null
+From f6a6c280059c4ddc23e12e3de1b01098e240036f Mon Sep 17 00:00:00 2001
+From: Omar Sandoval <osandov@fb.com>
+Date: Tue, 26 Aug 2025 11:24:38 -0700
+Subject: btrfs: fix subvolume deletion lockup caused by inodes xarray race
+
+From: Omar Sandoval <osandov@fb.com>
+
+commit f6a6c280059c4ddc23e12e3de1b01098e240036f upstream.
+
+There is a race condition between inode eviction and inode caching that
+can cause a live struct btrfs_inode to be missing from the root->inodes
+xarray. Specifically, there is a window during evict() between the inode
+being unhashed and deleted from the xarray. If btrfs_iget() is called
+for the same inode in that window, it will be recreated and inserted
+into the xarray, but then eviction will delete the new entry, leaving
+nothing in the xarray:
+
+Thread 1 Thread 2
+---------------------------------------------------------------
+evict()
+ remove_inode_hash()
+ btrfs_iget_path()
+ btrfs_iget_locked()
+ btrfs_read_locked_inode()
+ btrfs_add_inode_to_root()
+ destroy_inode()
+ btrfs_destroy_inode()
+ btrfs_del_inode_from_root()
+ __xa_erase
+
+In turn, this can cause issues for subvolume deletion. Specifically, if
+an inode is in this lost state, and all other inodes are evicted, then
+btrfs_del_inode_from_root() will call btrfs_add_dead_root() prematurely.
+If the lost inode has a delayed_node attached to it, then when
+btrfs_clean_one_deleted_snapshot() calls btrfs_kill_all_delayed_nodes(),
+it will loop forever because the delayed_nodes xarray will never become
+empty (unless memory pressure forces the inode out). We saw this
+manifest as soft lockups in production.
+
+Fix it by only deleting the xarray entry if it matches the given inode
+(using __xa_cmpxchg()).
+
+Fixes: 310b2f5d5a94 ("btrfs: use an xarray to track open inodes in a root")
+Cc: stable@vger.kernel.org # 6.11+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Co-authored-by: Leo Martins <loemra.dev@gmail.com>
+Signed-off-by: Leo Martins <loemra.dev@gmail.com>
+Signed-off-by: Omar Sandoval <osandov@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5634,7 +5634,17 @@ static void btrfs_del_inode_from_root(st
+ bool empty = false;
+
+ xa_lock(&root->inodes);
+- entry = __xa_erase(&root->inodes, btrfs_ino(inode));
++ /*
++ * This btrfs_inode is being freed and has already been unhashed at this
++ * point. It's possible that another btrfs_inode has already been
++ * allocated for the same inode and inserted itself into the root, so
++ * don't delete it in that case.
++ *
++ * Note that this shouldn't need to allocate memory, so the gfp flags
++ * don't really matter.
++ */
++ entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL,
++ GFP_ATOMIC);
+ if (entry == inode)
+ empty = xa_empty(&root->inodes);
+ xa_unlock(&root->inodes);
--- /dev/null
+From 3fac212fe489aa0dbe8d80a42a7809840ca7b0f9 Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Tue, 2 Sep 2025 15:49:26 -0700
+Subject: compiler-clang.h: define __SANITIZE_*__ macros only when undefined
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit 3fac212fe489aa0dbe8d80a42a7809840ca7b0f9 upstream.
+
+Clang 22 recently added support for defining __SANITIZE__ macros similar
+to GCC [1], which causes warnings (or errors with CONFIG_WERROR=y or W=e)
+with the existing defines that the kernel creates to emulate this behavior
+with existing clang versions.
+
+ In file included from <built-in>:3:
+ In file included from include/linux/compiler_types.h:171:
+ include/linux/compiler-clang.h:37:9: error: '__SANITIZE_THREAD__' macro redefined [-Werror,-Wmacro-redefined]
+ 37 | #define __SANITIZE_THREAD__
+ | ^
+ <built-in>:352:9: note: previous definition is here
+ 352 | #define __SANITIZE_THREAD__ 1
+ | ^
+
+Refactor compiler-clang.h to only define the sanitizer macros when they
+are undefined and adjust the rest of the code to use these macros for
+checking if the sanitizers are enabled, clearing up the warnings and
+allowing the kernel to easily drop these defines when the minimum
+supported version of LLVM for building the kernel becomes 22.0.0 or newer.
+
+Link: https://lkml.kernel.org/r/20250902-clang-update-sanitize-defines-v1-1-cf3702ca3d92@kernel.org
+Link: https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c [1]
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reviewed-by: Justin Stitt <justinstitt@google.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Bill Wendling <morbo@google.com>
+Cc: Dmitriy Vyukov <dvyukov@google.com>
+Cc: Marco Elver <elver@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/compiler-clang.h | 29 ++++++++++++++++++++++++-----
+ 1 file changed, 24 insertions(+), 5 deletions(-)
+
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -18,23 +18,42 @@
+ #define KASAN_ABI_VERSION 5
+
+ /*
++ * Clang 22 added preprocessor macros to match GCC, in hopes of eventually
++ * dropping __has_feature support for sanitizers:
++ * https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c
++ * Create these macros for older versions of clang so that it is easy to clean
++ * up once the minimum supported version of LLVM for building the kernel always
++ * creates these macros.
++ *
+ * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
+ * enabled. Therefore it is not required to additionally check defined(CONFIG_*)
+ * to avoid adding redundant attributes in other configurations.
+ */
++#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
++#define __SANITIZE_ADDRESS__
++#endif
++#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__)
++#define __SANITIZE_HWADDRESS__
++#endif
++#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__)
++#define __SANITIZE_THREAD__
++#endif
+
+-#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
+-/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
++/*
++ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel.
++ */
++#ifdef __SANITIZE_HWADDRESS__
+ #define __SANITIZE_ADDRESS__
++#endif
++
++#ifdef __SANITIZE_ADDRESS__
+ #define __no_sanitize_address \
+ __attribute__((no_sanitize("address", "hwaddress")))
+ #else
+ #define __no_sanitize_address
+ #endif
+
+-#if __has_feature(thread_sanitizer)
+-/* emulate gcc's __SANITIZE_THREAD__ flag */
+-#define __SANITIZE_THREAD__
++#ifdef __SANITIZE_THREAD__
+ #define __no_sanitize_thread \
+ __attribute__((no_sanitize("thread")))
+ #else
--- /dev/null
+From 3318f2d20ce48849855df5e190813826d0bc3653 Mon Sep 17 00:00:00 2001
+From: David Rosca <david.rosca@amd.com>
+Date: Mon, 18 Aug 2025 09:18:37 +0200
+Subject: drm/amdgpu/vcn: Allow limiting ctx to instance 0 for AV1 at any time
+
+From: David Rosca <david.rosca@amd.com>
+
+commit 3318f2d20ce48849855df5e190813826d0bc3653 upstream.
+
+There is no reason to require this to happen on first submitted IB only.
+We need to wait for the queue to be idle, but it can be done at any
+time (including when there are multiple video sessions active).
+
+Signed-off-by: David Rosca <david.rosca@amd.com>
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 8908fdce0634a623404e9923ed2f536101a39db5)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 12 ++++++++----
+ drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 12 ++++++++----
+ 2 files changed, 16 insertions(+), 8 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -1813,15 +1813,19 @@ static int vcn_v3_0_limit_sched(struct a
+ struct amdgpu_job *job)
+ {
+ struct drm_gpu_scheduler **scheds;
+-
+- /* The create msg must be in the first IB submitted */
+- if (atomic_read(&job->base.entity->fence_seq))
+- return -EINVAL;
++ struct dma_fence *fence;
+
+ /* if VCN0 is harvested, we can't support AV1 */
+ if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
+ return -EINVAL;
+
++ /* wait for all jobs to finish before switching to instance 0 */
++ fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
++ if (fence) {
++ dma_fence_wait(fence, false);
++ dma_fence_put(fence);
++ }
++
+ scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
+ [AMDGPU_RING_PRIO_DEFAULT].sched;
+ drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+@@ -1737,15 +1737,19 @@ static int vcn_v4_0_limit_sched(struct a
+ struct amdgpu_job *job)
+ {
+ struct drm_gpu_scheduler **scheds;
+-
+- /* The create msg must be in the first IB submitted */
+- if (atomic_read(&job->base.entity->fence_seq))
+- return -EINVAL;
++ struct dma_fence *fence;
+
+ /* if VCN0 is harvested, we can't support AV1 */
+ if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
+ return -EINVAL;
+
++ /* wait for all jobs to finish before switching to instance 0 */
++ fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
++ if (fence) {
++ dma_fence_wait(fence, false);
++ dma_fence_put(fence);
++ }
++
+ scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
+ [AMDGPU_RING_PRIO_0].sched;
+ drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
--- /dev/null
+From 2b10cb58d7a3fd621ec9b2ba765a092e562ef998 Mon Sep 17 00:00:00 2001
+From: David Rosca <david.rosca@amd.com>
+Date: Mon, 18 Aug 2025 09:06:58 +0200
+Subject: drm/amdgpu/vcn4: Fix IB parsing with multiple engine info packages
+
+From: David Rosca <david.rosca@amd.com>
+
+commit 2b10cb58d7a3fd621ec9b2ba765a092e562ef998 upstream.
+
+There can be multiple engine info packages in one IB and the first one
+may be common engine, not decode/encode.
+We need to parse the entire IB instead of stopping after finding first
+engine info.
+
+Signed-off-by: David Rosca <david.rosca@amd.com>
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit dc8f9f0f45166a6b37864e7a031c726981d6e5fc)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 52 +++++++++++++---------------------
+ 1 file changed, 21 insertions(+), 31 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+@@ -1840,22 +1840,16 @@ out:
+
+ #define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002)
+ #define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
+-
+ #define RADEON_VCN_ENGINE_INFO (0x30000001)
+-#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16
+-
+ #define RENCODE_ENCODE_STANDARD_AV1 2
+ #define RENCODE_IB_PARAM_SESSION_INIT 0x00000003
+-#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64
+
+-/* return the offset in ib if id is found, -1 otherwise
+- * to speed up the searching we only search upto max_offset
+- */
+-static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
++/* return the offset in ib if id is found, -1 otherwise */
++static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int start)
+ {
+ int i;
+
+- for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
++ for (i = start; i < ib->length_dw && ib->ptr[i] >= 8; i += ib->ptr[i] / 4) {
+ if (ib->ptr[i + 1] == id)
+ return i;
+ }
+@@ -1870,33 +1864,29 @@ static int vcn_v4_0_ring_patch_cs_in_pla
+ struct amdgpu_vcn_decode_buffer *decode_buffer;
+ uint64_t addr;
+ uint32_t val;
+- int idx;
++ int idx = 0, sidx;
+
+ /* The first instance can decode anything */
+ if (!ring->me)
+ return 0;
+
+- /* RADEON_VCN_ENGINE_INFO is at the top of ib block */
+- idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
+- RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
+- if (idx < 0) /* engine info is missing */
+- return 0;
+-
+- val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
+- if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
+- decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
+-
+- if (!(decode_buffer->valid_buf_flag & 0x1))
+- return 0;
+-
+- addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
+- decode_buffer->msg_buffer_address_lo;
+- return vcn_v4_0_dec_msg(p, job, addr);
+- } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
+- idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
+- RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
+- if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
+- return vcn_v4_0_limit_sched(p, job);
++ while ((idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, idx)) >= 0) {
++ val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
++ if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
++ decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
++
++ if (!(decode_buffer->valid_buf_flag & 0x1))
++ return 0;
++
++ addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
++ decode_buffer->msg_buffer_address_lo;
++ return vcn_v4_0_dec_msg(p, job, addr);
++ } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
++ sidx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, idx);
++ if (sidx >= 0 && ib->ptr[sidx + 2] == RENCODE_ENCODE_STANDARD_AV1)
++ return vcn_v4_0_limit_sched(p, job);
++ }
++ idx += ib->ptr[idx] / 4;
+ }
+ return 0;
+ }
--- /dev/null
+From 4de37a48b6b58faaded9eb765047cf0d8785ea18 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Fri, 29 Aug 2025 11:03:44 +0200
+Subject: drm/mediatek: fix potential OF node use-after-free
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 4de37a48b6b58faaded9eb765047cf0d8785ea18 upstream.
+
+The for_each_child_of_node() helper drops the reference it takes to each
+node as it iterates over children and an explicit of_node_put() is only
+needed when exiting the loop early.
+
+Drop the recently introduced bogus additional reference count decrement
+at each iteration that could potentially lead to a use-after-free.
+
+Fixes: 1f403699c40f ("drm/mediatek: Fix device/node reference count leaks in mtk_drm_get_all_drm_priv")
+Cc: Ma Ke <make24@iscas.ac.cn>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: CK Hu <ck.hu@mediatek.com>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://patchwork.kernel.org/project/dri-devel/patch/20250829090345.21075-2-johan@kernel.org/
+Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mediatek/mtk_drm_drv.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -381,11 +381,11 @@ static bool mtk_drm_get_all_drm_priv(str
+
+ of_id = of_match_node(mtk_drm_of_ids, node);
+ if (!of_id)
+- goto next_put_node;
++ continue;
+
+ pdev = of_find_device_by_node(node);
+ if (!pdev)
+- goto next_put_node;
++ continue;
+
+ drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match);
+ if (!drm_dev)
+@@ -411,11 +411,10 @@ next_put_device_drm_dev:
+ next_put_device_pdev_dev:
+ put_device(&pdev->dev);
+
+-next_put_node:
+- of_node_put(node);
+-
+- if (cnt == MAX_CRTC)
++ if (cnt == MAX_CRTC) {
++ of_node_put(node);
+ break;
++ }
+ }
+
+ if (drm_priv->data->mmsys_dev_num == cnt) {
--- /dev/null
+From 5c87fee3c96ce898ad681552404a66c7605193c0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Thu, 4 Sep 2025 18:07:13 +0200
+Subject: drm/xe: Attempt to bring bos back to VRAM after eviction
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit 5c87fee3c96ce898ad681552404a66c7605193c0 upstream.
+
+VRAM+TT bos that are evicted from VRAM to TT may remain in
+TT also after a revalidation following eviction or suspend.
+
+This manifests itself as applications becoming sluggish
+after buffer objects get evicted or after a resume from
+suspend or hibernation.
+
+If the bo supports placement in both VRAM and TT, and
+we are on DGFX, mark the TT placement as fallback. This means
+that it is tried only after VRAM + eviction.
+
+This flaw has probably been present since the xe module was
+upstreamed but use a Fixes: commit below where backporting is
+likely to be simple. For earlier versions we need to open-
+code the fallback algorithm in the driver.
+
+v2:
+- Remove check for dgfx. (Matthew Auld)
+- Update the xe_dma_buf kunit test for the new strategy (CI)
+- Allow dma-buf to pin in current placement (CI)
+- Make xe_bo_validate() for pinned bos a NOP.
+
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/5995
+Fixes: a78a8da51b36 ("drm/ttm: replace busy placement with flags v6")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: <stable@vger.kernel.org> # v6.9+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://lore.kernel.org/r/20250904160715.2613-2-thomas.hellstrom@linux.intel.com
+(cherry picked from commit cb3d7b3b46b799c96b54f8e8fe36794a55a77f0b)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/tests/xe_bo.c | 2 +-
+ drivers/gpu/drm/xe/tests/xe_dma_buf.c | 10 +---------
+ drivers/gpu/drm/xe/xe_bo.c | 16 ++++++++++++----
+ drivers/gpu/drm/xe/xe_bo.h | 2 +-
+ drivers/gpu/drm/xe/xe_dma_buf.c | 2 +-
+ 5 files changed, 16 insertions(+), 16 deletions(-)
+
+--- a/drivers/gpu/drm/xe/tests/xe_bo.c
++++ b/drivers/gpu/drm/xe/tests/xe_bo.c
+@@ -222,7 +222,7 @@ static int evict_test_run_tile(struct xe
+ }
+
+ xe_bo_lock(external, false);
+- err = xe_bo_pin_external(external);
++ err = xe_bo_pin_external(external, false);
+ xe_bo_unlock(external);
+ if (err) {
+ KUNIT_FAIL(test, "external bo pin err=%pe\n",
+--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
++++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+@@ -89,15 +89,7 @@ static void check_residency(struct kunit
+ return;
+ }
+
+- /*
+- * If on different devices, the exporter is kept in system if
+- * possible, saving a migration step as the transfer is just
+- * likely as fast from system memory.
+- */
+- if (params->mem_mask & XE_BO_FLAG_SYSTEM)
+- KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
+- else
+- KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
++ KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
+
+ if (params->force_different_devices)
+ KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
+--- a/drivers/gpu/drm/xe/xe_bo.c
++++ b/drivers/gpu/drm/xe/xe_bo.c
+@@ -157,6 +157,8 @@ static void try_add_system(struct xe_dev
+
+ bo->placements[*c] = (struct ttm_place) {
+ .mem_type = XE_PL_TT,
++ .flags = (bo_flags & XE_BO_FLAG_VRAM_MASK) ?
++ TTM_PL_FLAG_FALLBACK : 0,
+ };
+ *c += 1;
+ }
+@@ -1743,6 +1745,7 @@ uint64_t vram_region_gpu_offset(struct t
+ /**
+ * xe_bo_pin_external - pin an external BO
+ * @bo: buffer object to be pinned
++ * @in_place: Pin in current placement, don't attempt to migrate.
+ *
+ * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
+ * BO. Unique call compared to xe_bo_pin as this function has it own set of
+@@ -1750,7 +1753,7 @@ uint64_t vram_region_gpu_offset(struct t
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+-int xe_bo_pin_external(struct xe_bo *bo)
++int xe_bo_pin_external(struct xe_bo *bo, bool in_place)
+ {
+ struct xe_device *xe = xe_bo_device(bo);
+ int err;
+@@ -1759,9 +1762,11 @@ int xe_bo_pin_external(struct xe_bo *bo)
+ xe_assert(xe, xe_bo_is_user(bo));
+
+ if (!xe_bo_is_pinned(bo)) {
+- err = xe_bo_validate(bo, NULL, false);
+- if (err)
+- return err;
++ if (!in_place) {
++ err = xe_bo_validate(bo, NULL, false);
++ if (err)
++ return err;
++ }
+
+ if (xe_bo_is_vram(bo)) {
+ spin_lock(&xe->pinned.lock);
+@@ -1913,6 +1918,9 @@ int xe_bo_validate(struct xe_bo *bo, str
+ .no_wait_gpu = false,
+ };
+
++ if (xe_bo_is_pinned(bo))
++ return 0;
++
+ if (vm) {
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+--- a/drivers/gpu/drm/xe/xe_bo.h
++++ b/drivers/gpu/drm/xe/xe_bo.h
+@@ -173,7 +173,7 @@ static inline void xe_bo_unlock_vm_held(
+ }
+ }
+
+-int xe_bo_pin_external(struct xe_bo *bo);
++int xe_bo_pin_external(struct xe_bo *bo, bool in_place);
+ int xe_bo_pin(struct xe_bo *bo);
+ void xe_bo_unpin_external(struct xe_bo *bo);
+ void xe_bo_unpin(struct xe_bo *bo);
+--- a/drivers/gpu/drm/xe/xe_dma_buf.c
++++ b/drivers/gpu/drm/xe/xe_dma_buf.c
+@@ -72,7 +72,7 @@ static int xe_dma_buf_pin(struct dma_buf
+ return ret;
+ }
+
+- ret = xe_bo_pin_external(bo);
++ ret = xe_bo_pin_external(bo, true);
+ xe_assert(xe, !ret);
+
+ return 0;
--- /dev/null
+From ff2a66d21fd2364ed9396d151115eec59612b200 Mon Sep 17 00:00:00 2001
+From: Salah Triki <salah.triki@gmail.com>
+Date: Thu, 31 Jul 2025 04:15:27 +0100
+Subject: EDAC/altera: Delete an inappropriate dma_free_coherent() call
+
+From: Salah Triki <salah.triki@gmail.com>
+
+commit ff2a66d21fd2364ed9396d151115eec59612b200 upstream.
+
+dma_free_coherent() must only be called if the corresponding
+dma_alloc_coherent() call has succeeded. Calling it when the allocation fails
+leads to undefined behavior.
+
+Delete the wrong call.
+
+ [ bp: Massage commit message. ]
+
+Fixes: 71bcada88b0f3 ("edac: altera: Add Altera SDRAM EDAC support")
+Signed-off-by: Salah Triki <salah.triki@gmail.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Dinh Nguyen <dinguyen@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/aIrfzzqh4IzYtDVC@pc
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/edac/altera_edac.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/edac/altera_edac.c
++++ b/drivers/edac/altera_edac.c
+@@ -128,7 +128,6 @@ static ssize_t altr_sdr_mc_err_inject_wr
+
+ ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
+ if (!ptemp) {
+- dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Inject: Buffer Allocation error\n");
+ return -ENOMEM;
--- /dev/null
+From e5203209b3935041dac541bc5b37efb44220cc0b Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Tue, 12 Aug 2025 14:07:54 +0200
+Subject: fuse: check if copy_file_range() returns larger than requested size
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit e5203209b3935041dac541bc5b37efb44220cc0b upstream.
+
+Just like write(), copy_file_range() should check if the return value is
+less or equal to the requested number of bytes.
+
+Reported-by: Chunsheng Luo <luochunsheng@ustc.edu>
+Closes: https://lore.kernel.org/all/20250807062425.694-1-luochunsheng@ustc.edu/
+Fixes: 88bc7d5097a1 ("fuse: add support for copy_file_range()")
+Cc: <stable@vger.kernel.org> # v4.20
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/file.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -3295,6 +3295,9 @@ static ssize_t __fuse_copy_file_range(st
+ fc->no_copy_file_range = 1;
+ err = -EOPNOTSUPP;
+ }
++ if (!err && outarg.size > len)
++ err = -EIO;
++
+ if (err)
+ goto out;
+
--- /dev/null
+From e9c8da670e749f7dedc53e3af54a87b041918092 Mon Sep 17 00:00:00 2001
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Thu, 10 Jul 2025 12:08:30 +0200
+Subject: fuse: do not allow mapping a non-regular backing file
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+commit e9c8da670e749f7dedc53e3af54a87b041918092 upstream.
+
+We do not support passthrough operations other than read/write on
+regular file, so allowing non-regular backing files makes no sense.
+
+Fixes: efad7153bf93 ("fuse: allow O_PATH fd for FUSE_DEV_IOC_BACKING_OPEN")
+Cc: stable@vger.kernel.org
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Reviewed-by: Bernd Schubert <bschubert@ddn.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/passthrough.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/fuse/passthrough.c
++++ b/fs/fuse/passthrough.c
+@@ -233,6 +233,11 @@ int fuse_backing_open(struct fuse_conn *
+ if (!file)
+ goto out;
+
++ /* read/write/splice/mmap passthrough only relevant for regular files */
++ res = d_is_dir(file->f_path.dentry) ? -EISDIR : -EINVAL;
++ if (!d_is_reg(file->f_path.dentry))
++ goto out_fput;
++
+ backing_sb = file_inode(file)->i_sb;
+ res = -ELOOP;
+ if (backing_sb->s_stack_depth >= fc->max_stack_depth)
--- /dev/null
+From 1e08938c3694f707bb165535df352ac97a8c75c9 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Tue, 12 Aug 2025 14:46:34 +0200
+Subject: fuse: prevent overflow in copy_file_range return value
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 1e08938c3694f707bb165535df352ac97a8c75c9 upstream.
+
+The FUSE protocol uses struct fuse_write_out to convey the return value of
+copy_file_range, which is restricted to uint32_t. But the COPY_FILE_RANGE
+interface supports a 64-bit size copies.
+
+Currently the number of bytes copied is silently truncated to 32-bit, which
+may result in poor performance or even failure to copy in case of
+truncation to zero.
+
+Reported-by: Florian Weimer <fweimer@redhat.com>
+Closes: https://lore.kernel.org/all/lhuh5ynl8z5.fsf@oldenburg.str.redhat.com/
+Fixes: 88bc7d5097a1 ("fuse: add support for copy_file_range()")
+Cc: <stable@vger.kernel.org> # v4.20
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/file.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -3229,7 +3229,7 @@ static ssize_t __fuse_copy_file_range(st
+ .nodeid_out = ff_out->nodeid,
+ .fh_out = ff_out->fh,
+ .off_out = pos_out,
+- .len = len,
++ .len = min_t(size_t, len, UINT_MAX & PAGE_MASK),
+ .flags = flags
+ };
+ struct fuse_write_out outarg;
--- /dev/null
+From 664596bd98bb251dd417dfd3f9b615b661e1e44a Mon Sep 17 00:00:00 2001
+From: Chiasheng Lee <chiasheng.lee@linux.intel.com>
+Date: Mon, 1 Sep 2025 20:59:43 +0800
+Subject: i2c: i801: Hide Intel Birch Stream SoC TCO WDT
+
+From: Chiasheng Lee <chiasheng.lee@linux.intel.com>
+
+commit 664596bd98bb251dd417dfd3f9b615b661e1e44a upstream.
+
+Hide the Intel Birch Stream SoC TCO WDT feature since it was removed.
+
+On platforms with PCH TCO WDT, this redundant device might be rendering
+errors like this:
+
+[ 28.144542] sysfs: cannot create duplicate filename '/bus/platform/devices/iTCO_wdt'
+
+Fixes: 8c56f9ef25a3 ("i2c: i801: Add support for Intel Birch Stream SoC")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=220320
+Signed-off-by: Chiasheng Lee <chiasheng.lee@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v6.7+
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Reviewed-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Link: https://lore.kernel.org/r/20250901125943.916522-1-chiasheng.lee@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-i801.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1057,7 +1057,7 @@ static const struct pci_device_id i801_i
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+- { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
++ { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
--- /dev/null
+From ce652aac9c90a96c6536681d17518efb1f660fb8 Mon Sep 17 00:00:00 2001
+From: Sang-Heon Jeon <ekffu200098@gmail.com>
+Date: Fri, 22 Aug 2025 11:50:57 +0900
+Subject: mm/damon/core: set quota->charged_from to jiffies at first charge window
+
+From: Sang-Heon Jeon <ekffu200098@gmail.com>
+
+commit ce652aac9c90a96c6536681d17518efb1f660fb8 upstream.
+
+Kernel initializes the "jiffies" timer as 5 minutes below zero, as shown
+in include/linux/jiffies.h
+
+ /*
+ * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * so jiffies wrap bugs show up earlier.
+ */
+ #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+
+And jiffies comparison help functions cast unsigned value to signed to
+cover wraparound
+
+ #define time_after_eq(a,b) \
+ (typecheck(unsigned long, a) && \
+ typecheck(unsigned long, b) && \
+ ((long)((a) - (b)) >= 0))
+
+When quota->charged_from is initialized to 0, time_after_eq() can
+incorrectly return FALSE even after reset_interval has elapsed. This
+occurs when (jiffies - reset_interval) produces a value with MSB=1, which
+is interpreted as negative in signed arithmetic.
+
+This issue primarily affects 32-bit systems because: On 64-bit systems:
+MSB=1 values occur after ~292 million years from boot (assuming HZ=1000),
+almost impossible.
+
+On 32-bit systems: MSB=1 values occur during the first 5 minutes after
+boot, and the second half of every jiffies wraparound cycle, starting from
+day 25 (assuming HZ=1000)
+
+When above unexpected FALSE return from time_after_eq() occurs, the
+charging window will not reset. The user impact depends on esz value at
+that time.
+
+If esz is 0, scheme ignores configured quotas and runs without any limits.
+
+If esz is not 0, scheme stops working once the quota is exhausted. It
+remains until the charging window finally resets.
+
+So, change quota->charged_from to jiffies at damos_adjust_quota() when it
+is considered as the first charge window. By this change, we can avoid
+unexpected FALSE return from time_after_eq()
+
+Link: https://lkml.kernel.org/r/20250822025057.1740854-1-ekffu200098@gmail.com
+Fixes: 2b8a248d5873 ("mm/damon/schemes: implement size quota for schemes application speed control") # 5.16
+Signed-off-by: Sang-Heon Jeon <ekffu200098@gmail.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1596,6 +1596,10 @@ static void damos_adjust_quota(struct da
+ if (!quota->ms && !quota->sz && list_empty("a->goals))
+ return;
+
++ /* First charge window */
++ if (!quota->total_charged_sz && !quota->charged_from)
++ quota->charged_from = jiffies;
++
+ /* New charge window starts */
+ if (time_after_eq(jiffies, quota->charged_from +
+ msecs_to_jiffies(quota->reset_interval))) {
--- /dev/null
+From 711f19dfd783ffb37ca4324388b9c4cb87e71363 Mon Sep 17 00:00:00 2001
+From: Quanmin Yan <yanquanmin1@huawei.com>
+Date: Wed, 27 Aug 2025 19:58:57 +0800
+Subject: mm/damon/lru_sort: avoid divide-by-zero in damon_lru_sort_apply_parameters()
+
+From: Quanmin Yan <yanquanmin1@huawei.com>
+
+commit 711f19dfd783ffb37ca4324388b9c4cb87e71363 upstream.
+
+Patch series "mm/damon: avoid divide-by-zero in DAMON module's parameters
+application".
+
+DAMON's RECLAIM and LRU_SORT modules perform no validation on
+user-configured parameters during application, which may lead to
+division-by-zero errors.
+
+Avoid the divide-by-zero by adding validation checks when DAMON modules
+attempt to apply the parameters.
+
+
+This patch (of 2):
+
+During the calculation of 'hot_thres' and 'cold_thres', either
+'sample_interval' or 'aggr_interval' is used as the divisor, which may
+lead to division-by-zero errors. Fix it by directly returning -EINVAL
+when such a case occurs. Additionally, since 'aggr_interval' is already
+required to be set no smaller than 'sample_interval' in damon_set_attrs(),
+only the case where 'sample_interval' is zero needs to be checked.
+
+Link: https://lkml.kernel.org/r/20250827115858.1186261-2-yanquanmin1@huawei.com
+Fixes: 40e983cca927 ("mm/damon: introduce DAMON-based LRU-lists Sorting")
+Signed-off-by: Quanmin Yan <yanquanmin1@huawei.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: ze zuo <zuoze1@huawei.com>
+Cc: <stable@vger.kernel.org> [6.0+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/lru_sort.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -198,6 +198,11 @@ static int damon_lru_sort_apply_paramete
+ if (err)
+ return err;
+
++ if (!damon_lru_sort_mon_attrs.sample_interval) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
+ if (err)
+ goto out;
--- /dev/null
+From 394bfac1c7f7b701c2c93834c5761b9c9ceeebcf Mon Sep 17 00:00:00 2001
+From: Wei Yang <richard.weiyang@gmail.com>
+Date: Fri, 22 Aug 2025 06:33:18 +0000
+Subject: mm/khugepaged: fix the address passed to notifier on testing young
+
+From: Wei Yang <richard.weiyang@gmail.com>
+
+commit 394bfac1c7f7b701c2c93834c5761b9c9ceeebcf upstream.
+
+Commit 8ee53820edfd ("thp: mmu_notifier_test_young") introduced
+mmu_notifier_test_young(), but we are passing the wrong address.
+In xxx_scan_pmd(), the actual iteration address is "_address" not
+"address". We seem to misuse the variable on the very beginning.
+
+Change it to the right one.
+
+[akpm@linux-foundation.org fix whitespace, per everyone]
+Link: https://lkml.kernel.org/r/20250822063318.11644-1-richard.weiyang@gmail.com
+Fixes: 8ee53820edfd ("thp: mmu_notifier_test_young")
+Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
+Reviewed-by: Dev Jain <dev.jain@arm.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Nico Pache <npache@redhat.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/khugepaged.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1403,8 +1403,8 @@ static int hpage_collapse_scan_pmd(struc
+ */
+ if (cc->is_khugepaged &&
+ (pte_young(pteval) || folio_test_young(folio) ||
+- folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
+- address)))
++ folio_test_referenced(folio) ||
++ mmu_notifier_test_young(vma->vm_mm, _address)))
+ referenced++;
+ }
+ if (!writable) {
--- /dev/null
+From 3be306cccdccede13e3cefd0c14e430cc2b7c9c7 Mon Sep 17 00:00:00 2001
+From: Kyle Meyer <kyle.meyer@hpe.com>
+Date: Thu, 28 Aug 2025 13:38:20 -0500
+Subject: mm/memory-failure: fix redundant updates for already poisoned pages
+
+From: Kyle Meyer <kyle.meyer@hpe.com>
+
+commit 3be306cccdccede13e3cefd0c14e430cc2b7c9c7 upstream.
+
+Duplicate memory errors can be reported by multiple sources.
+
+Passing an already poisoned page to action_result() causes issues:
+
+* The amount of hardware corrupted memory is incorrectly updated.
+* Per NUMA node MF stats are incorrectly updated.
+* Redundant "already poisoned" messages are printed.
+
+Avoid those issues by:
+
+* Skipping hardware corrupted memory updates for already poisoned pages.
+* Skipping per NUMA node MF stats updates for already poisoned pages.
+* Dropping redundant "already poisoned" messages.
+
+Make MF_MSG_ALREADY_POISONED consistent with other action_page_types and
+make calls to action_result() consistent for already poisoned normal pages
+and huge pages.
+
+Link: https://lkml.kernel.org/r/aLCiHMy12Ck3ouwC@hpe.com
+Fixes: b8b9488d50b7 ("mm/memory-failure: improve memory failure action_result messages")
+Signed-off-by: Kyle Meyer <kyle.meyer@hpe.com>
+Reviewed-by: Jiaqi Yan <jiaqiyan@google.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Jane Chu <jane.chu@oracle.com>
+Acked-by: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Borislav Betkov <bp@alien8.de>
+Cc: Kyle Meyer <kyle.meyer@hpe.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: "Luck, Tony" <tony.luck@intel.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Russ Anderson <russ.anderson@hpe.com>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory-failure.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -948,7 +948,7 @@ static const char * const action_page_ty
+ [MF_MSG_BUDDY] = "free buddy page",
+ [MF_MSG_DAX] = "dax page",
+ [MF_MSG_UNSPLIT_THP] = "unsplit thp",
+- [MF_MSG_ALREADY_POISONED] = "already poisoned",
++ [MF_MSG_ALREADY_POISONED] = "already poisoned page",
+ [MF_MSG_UNKNOWN] = "unknown page",
+ };
+
+@@ -1341,9 +1341,10 @@ static int action_result(unsigned long p
+ {
+ trace_memory_failure_event(pfn, type, result);
+
+- num_poisoned_pages_inc(pfn);
+-
+- update_per_node_mf_stats(pfn, result);
++ if (type != MF_MSG_ALREADY_POISONED) {
++ num_poisoned_pages_inc(pfn);
++ update_per_node_mf_stats(pfn, result);
++ }
+
+ pr_err("%#lx: recovery action for %s: %s\n",
+ pfn, action_page_types[type], action_name[result]);
+@@ -2086,12 +2087,11 @@ retry:
+ *hugetlb = 0;
+ return 0;
+ } else if (res == -EHWPOISON) {
+- pr_err("%#lx: already hardware poisoned\n", pfn);
+ if (flags & MF_ACTION_REQUIRED) {
+ folio = page_folio(p);
+ res = kill_accessing_process(current, folio_pfn(folio), flags);
+- action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
+ }
++ action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
+ return res;
+ } else if (res == -EBUSY) {
+ if (!(flags & MF_NO_RETRY)) {
+@@ -2273,7 +2273,6 @@ try_again:
+ goto unlock_mutex;
+
+ if (TestSetPageHWPoison(p)) {
+- pr_err("%#lx: already hardware poisoned\n", pfn);
+ res = -EHWPOISON;
+ if (flags & MF_ACTION_REQUIRED)
+ res = kill_accessing_process(current, pfn, flags);
--- /dev/null
+From d613f53c83ec47089c4e25859d5e8e0359f6f8da Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Thu, 28 Aug 2025 10:46:18 +0800
+Subject: mm/memory-failure: fix VM_BUG_ON_PAGE(PagePoisoned(page)) when unpoison memory
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit d613f53c83ec47089c4e25859d5e8e0359f6f8da upstream.
+
+When I did memory failure tests, below panic occurs:
+
+page dumped because: VM_BUG_ON_PAGE(PagePoisoned(page))
+kernel BUG at include/linux/page-flags.h:616!
+Oops: invalid opcode: 0000 [#1] PREEMPT SMP NOPTI
+CPU: 3 PID: 720 Comm: bash Not tainted 6.10.0-rc1-00195-g148743902568 #40
+RIP: 0010:unpoison_memory+0x2f3/0x590
+RSP: 0018:ffffa57fc8787d60 EFLAGS: 00000246
+RAX: 0000000000000037 RBX: 0000000000000009 RCX: ffff9be25fcdc9c8
+RDX: 0000000000000000 RSI: 0000000000000027 RDI: ffff9be25fcdc9c0
+RBP: 0000000000300000 R08: ffffffffb4956f88 R09: 0000000000009ffb
+R10: 0000000000000284 R11: ffffffffb4926fa0 R12: ffffe6b00c000000
+R13: ffff9bdb453dfd00 R14: 0000000000000000 R15: fffffffffffffffe
+FS: 00007f08f04e4740(0000) GS:ffff9be25fcc0000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000564787a30410 CR3: 000000010d4e2000 CR4: 00000000000006f0
+Call Trace:
+ <TASK>
+ unpoison_memory+0x2f3/0x590
+ simple_attr_write_xsigned.constprop.0.isra.0+0xb3/0x110
+ debugfs_attr_write+0x42/0x60
+ full_proxy_write+0x5b/0x80
+ vfs_write+0xd5/0x540
+ ksys_write+0x64/0xe0
+ do_syscall_64+0xb9/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7f08f0314887
+RSP: 002b:00007ffece710078 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+RAX: ffffffffffffffda RBX: 0000000000000009 RCX: 00007f08f0314887
+RDX: 0000000000000009 RSI: 0000564787a30410 RDI: 0000000000000001
+RBP: 0000564787a30410 R08: 000000000000fefe R09: 000000007fffffff
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000009
+R13: 00007f08f041b780 R14: 00007f08f0417600 R15: 00007f08f0416a00
+ </TASK>
+Modules linked in: hwpoison_inject
+---[ end trace 0000000000000000 ]---
+RIP: 0010:unpoison_memory+0x2f3/0x590
+RSP: 0018:ffffa57fc8787d60 EFLAGS: 00000246
+RAX: 0000000000000037 RBX: 0000000000000009 RCX: ffff9be25fcdc9c8
+RDX: 0000000000000000 RSI: 0000000000000027 RDI: ffff9be25fcdc9c0
+RBP: 0000000000300000 R08: ffffffffb4956f88 R09: 0000000000009ffb
+R10: 0000000000000284 R11: ffffffffb4926fa0 R12: ffffe6b00c000000
+R13: ffff9bdb453dfd00 R14: 0000000000000000 R15: fffffffffffffffe
+FS: 00007f08f04e4740(0000) GS:ffff9be25fcc0000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000564787a30410 CR3: 000000010d4e2000 CR4: 00000000000006f0
+Kernel panic - not syncing: Fatal exception
+Kernel Offset: 0x31c00000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff)
+---[ end Kernel panic - not syncing: Fatal exception ]---
+
+The root cause is that unpoison_memory() tries to check the PG_HWPoison
+flags of an uninitialized page. So VM_BUG_ON_PAGE(PagePoisoned(page)) is
+triggered. This can be reproduced by below steps:
+
+1.Offline memory block:
+
+ echo offline > /sys/devices/system/memory/memory12/state
+
+2.Get offlined memory pfn:
+
+ page-types -b n -rlN
+
+3.Write pfn to unpoison-pfn
+
+ echo <pfn> > /sys/kernel/debug/hwpoison/unpoison-pfn
+
+This scenario can be identified by pfn_to_online_page() returning NULL.
+And ZONE_DEVICE pages are never expected, so we can simply fail if
+pfn_to_online_page() == NULL to fix the bug.
+
+Link: https://lkml.kernel.org/r/20250828024618.1744895-1-linmiaohe@huawei.com
+Fixes: f1dd2cd13c4b ("mm, memory_hotplug: do not associate hotadded memory to zones until online")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Suggested-by: David Hildenbrand <david@redhat.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory-failure.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -2570,10 +2570,9 @@ int unpoison_memory(unsigned long pfn)
+ static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+- if (!pfn_valid(pfn))
+- return -ENXIO;
+-
+- p = pfn_to_page(pfn);
++ p = pfn_to_online_page(pfn);
++ if (!p)
++ return -EIO;
+ folio = page_folio(p);
+
+ mutex_lock(&mf_mutex);
--- /dev/null
+From 648de37416b301f046f62f1b65715c7fa8ebaa67 Mon Sep 17 00:00:00 2001
+From: Krister Johansen <kjlx@templeofstupid.com>
+Date: Mon, 8 Sep 2025 11:16:01 -0700
+Subject: mptcp: sockopt: make sync_socket_options propagate SOCK_KEEPOPEN
+
+From: Krister Johansen <kjlx@templeofstupid.com>
+
+commit 648de37416b301f046f62f1b65715c7fa8ebaa67 upstream.
+
+Users reported a scenario where MPTCP connections that were configured
+with SO_KEEPALIVE prior to connect would fail to enable their keepalives
+if MTPCP fell back to TCP mode.
+
+After investigating, this affects keepalives for any connection where
+sync_socket_options is called on a socket that is in the closed or
+listening state. Joins are handled properly. For connects,
+sync_socket_options is called when the socket is still in the closed
+state. The tcp_set_keepalive() function does not act on sockets that
+are closed or listening, hence keepalive is not immediately enabled.
+Since the SO_KEEPOPEN flag is absent, it is not enabled later in the
+connect sequence via tcp_finish_connect. Setting the keepalive via
+sockopt after connect does work, but would not address any subsequently
+created flows.
+
+Fortunately, the fix here is straight-forward: set SOCK_KEEPOPEN on the
+subflow when calling sync_socket_options.
+
+The fix was valdidated both by using tcpdump to observe keepalive
+packets not being sent before the fix, and being sent after the fix. It
+was also possible to observe via ss that the keepalive timer was not
+enabled on these sockets before the fix, but was enabled afterwards.
+
+Fixes: 1b3e7ede1365 ("mptcp: setsockopt: handle SO_KEEPALIVE and SO_PRIORITY")
+Cc: stable@vger.kernel.org
+Signed-off-by: Krister Johansen <kjlx@templeofstupid.com>
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/aL8dYfPZrwedCIh9@templeofstupid.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/sockopt.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -1508,13 +1508,12 @@ static void sync_socket_options(struct m
+ {
+ static const unsigned int tx_rx_locks = SOCK_RCVBUF_LOCK | SOCK_SNDBUF_LOCK;
+ struct sock *sk = (struct sock *)msk;
++ bool keep_open;
+
+- if (ssk->sk_prot->keepalive) {
+- if (sock_flag(sk, SOCK_KEEPOPEN))
+- ssk->sk_prot->keepalive(ssk, 1);
+- else
+- ssk->sk_prot->keepalive(ssk, 0);
+- }
++ keep_open = sock_flag(sk, SOCK_KEEPOPEN);
++ if (ssk->sk_prot->keepalive)
++ ssk->sk_prot->keepalive(ssk, keep_open);
++ sock_valbool_flag(ssk, SOCK_KEEPOPEN, keep_open);
+
+ ssk->sk_priority = sk->sk_priority;
+ ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
--- /dev/null
+From fd779eac2d659668be4d3dbdac0710afd5d6db12 Mon Sep 17 00:00:00 2001
+From: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+Date: Thu, 21 Aug 2025 14:00:57 +0200
+Subject: mtd: nand: raw: atmel: Respect tAR, tCLR in read setup timing
+
+From: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+
+commit fd779eac2d659668be4d3dbdac0710afd5d6db12 upstream.
+
+Having setup time 0 violates tAR, tCLR of some chips, for instance
+TOSHIBA TC58NVG2S3ETAI0 cannot be detected successfully (first ID byte
+being read duplicated, i.e. 98 98 dc 90 15 76 14 03 instead of
+98 dc 90 15 76 ...).
+
+Atmel Application Notes postulated 1 cycle NRD_SETUP without explanation
+[1], but it looks more appropriate to just calculate setup time properly.
+
+[1] Link: https://ww1.microchip.com/downloads/aemDocuments/documents/MPU32/ApplicationNotes/ApplicationNotes/doc6255.pdf
+
+Cc: stable@vger.kernel.org
+Fixes: f9ce2eddf176 ("mtd: nand: atmel: Add ->setup_data_interface() hooks")
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+Tested-by: Alexander Dahl <ada@thorsis.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/atmel/nand-controller.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
+@@ -1378,13 +1378,23 @@ static int atmel_smc_nand_prepare_smccon
+ return ret;
+
+ /*
++ * Read setup timing depends on the operation done on the NAND:
++ *
++ * NRD_SETUP = max(tAR, tCLR)
++ */
++ timeps = max(conf->timings.sdr.tAR_min, conf->timings.sdr.tCLR_min);
++ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
++ totalcycles += ncycles;
++ ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NRD_SHIFT, ncycles);
++ if (ret)
++ return ret;
++
++ /*
+ * The read cycle timing is directly matching tRC, but is also
+ * dependent on the setup and hold timings we calculated earlier,
+ * which gives:
+ *
+- * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
+- *
+- * NRD_SETUP is always 0.
++ * NRD_CYCLE = max(tRC, NRD_SETUP + NRD_PULSE + NRD_HOLD)
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
+ ncycles = max(totalcycles, ncycles);
--- /dev/null
+From 513c40e59d5a414ab763a9c84797534b5e8c208d Mon Sep 17 00:00:00 2001
+From: Christophe Kerello <christophe.kerello@foss.st.com>
+Date: Tue, 12 Aug 2025 09:26:58 +0200
+Subject: mtd: rawnand: stm32_fmc2: avoid overlapping mappings on ECC buffer
+
+From: Christophe Kerello <christophe.kerello@foss.st.com>
+
+commit 513c40e59d5a414ab763a9c84797534b5e8c208d upstream.
+
+Avoid below overlapping mappings by using a contiguous
+non-cacheable buffer.
+
+[ 4.077708] DMA-API: stm32_fmc2_nfc 48810000.nand-controller: cacheline tracking EEXIST,
+overlapping mappings aren't supported
+[ 4.089103] WARNING: CPU: 1 PID: 44 at kernel/dma/debug.c:568 add_dma_entry+0x23c/0x300
+[ 4.097071] Modules linked in:
+[ 4.100101] CPU: 1 PID: 44 Comm: kworker/u4:2 Not tainted 6.1.82 #1
+[ 4.106346] Hardware name: STMicroelectronics STM32MP257F VALID1 SNOR / MB1704 (LPDDR4 Power discrete) + MB1703 + MB1708 (SNOR MB1730) (DT)
+[ 4.118824] Workqueue: events_unbound deferred_probe_work_func
+[ 4.124674] pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+[ 4.131624] pc : add_dma_entry+0x23c/0x300
+[ 4.135658] lr : add_dma_entry+0x23c/0x300
+[ 4.139792] sp : ffff800009dbb490
+[ 4.143016] x29: ffff800009dbb4a0 x28: 0000000004008022 x27: ffff8000098a6000
+[ 4.150174] x26: 0000000000000000 x25: ffff8000099e7000 x24: ffff8000099e7de8
+[ 4.157231] x23: 00000000ffffffff x22: 0000000000000000 x21: ffff8000098a6a20
+[ 4.164388] x20: ffff000080964180 x19: ffff800009819ba0 x18: 0000000000000006
+[ 4.171545] x17: 6361727420656e69 x16: 6c6568636163203a x15: 72656c6c6f72746e
+[ 4.178602] x14: 6f632d646e616e2e x13: ffff800009832f58 x12: 00000000000004ec
+[ 4.185759] x11: 00000000000001a4 x10: ffff80000988af58 x9 : ffff800009832f58
+[ 4.192916] x8 : 00000000ffffefff x7 : ffff80000988af58 x6 : 80000000fffff000
+[ 4.199972] x5 : 000000000000bff4 x4 : 0000000000000000 x3 : 0000000000000000
+[ 4.207128] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff0000812d2c40
+[ 4.214185] Call trace:
+[ 4.216605] add_dma_entry+0x23c/0x300
+[ 4.220338] debug_dma_map_sg+0x198/0x350
+[ 4.224373] __dma_map_sg_attrs+0xa0/0x110
+[ 4.228411] dma_map_sg_attrs+0x10/0x2c
+[ 4.232247] stm32_fmc2_nfc_xfer.isra.0+0x1c8/0x3fc
+[ 4.237088] stm32_fmc2_nfc_seq_read_page+0xc8/0x174
+[ 4.242127] nand_read_oob+0x1d4/0x8e0
+[ 4.245861] mtd_read_oob_std+0x58/0x84
+[ 4.249596] mtd_read_oob+0x90/0x150
+[ 4.253231] mtd_read+0x68/0xac
+
+Signed-off-by: Christophe Kerello <christophe.kerello@foss.st.com>
+Cc: stable@vger.kernel.org
+Fixes: 2cd457f328c1 ("mtd: rawnand: stm32_fmc2: add STM32 FMC2 NAND flash controller driver")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/stm32_fmc2_nand.c | 28 +++++++++-------------------
+ 1 file changed, 9 insertions(+), 19 deletions(-)
+
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -272,6 +272,7 @@ struct stm32_fmc2_nfc {
+ struct sg_table dma_data_sg;
+ struct sg_table dma_ecc_sg;
+ u8 *ecc_buf;
++ dma_addr_t dma_ecc_addr;
+ int dma_ecc_len;
+ u32 tx_dma_max_burst;
+ u32 rx_dma_max_burst;
+@@ -902,17 +903,10 @@ static int stm32_fmc2_nfc_xfer(struct na
+
+ if (!write_data && !raw) {
+ /* Configure DMA ECC status */
+- p = nfc->ecc_buf;
+ for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
+- sg_set_buf(sg, p, nfc->dma_ecc_len);
+- p += nfc->dma_ecc_len;
+- }
+-
+- ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+- eccsteps, dma_data_dir);
+- if (!ret) {
+- ret = -EIO;
+- goto err_unmap_data;
++ sg_dma_address(sg) = nfc->dma_ecc_addr +
++ s * nfc->dma_ecc_len;
++ sg_dma_len(sg) = nfc->dma_ecc_len;
+ }
+
+ desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
+@@ -921,7 +915,7 @@ static int stm32_fmc2_nfc_xfer(struct na
+ DMA_PREP_INTERRUPT);
+ if (!desc_ecc) {
+ ret = -ENOMEM;
+- goto err_unmap_ecc;
++ goto err_unmap_data;
+ }
+
+ reinit_completion(&nfc->dma_ecc_complete);
+@@ -929,7 +923,7 @@ static int stm32_fmc2_nfc_xfer(struct na
+ desc_ecc->callback_param = &nfc->dma_ecc_complete;
+ ret = dma_submit_error(dmaengine_submit(desc_ecc));
+ if (ret)
+- goto err_unmap_ecc;
++ goto err_unmap_data;
+
+ dma_async_issue_pending(nfc->dma_ecc_ch);
+ }
+@@ -949,7 +943,7 @@ static int stm32_fmc2_nfc_xfer(struct na
+ if (!write_data && !raw)
+ dmaengine_terminate_all(nfc->dma_ecc_ch);
+ ret = -ETIMEDOUT;
+- goto err_unmap_ecc;
++ goto err_unmap_data;
+ }
+
+ /* Wait DMA data transfer completion */
+@@ -969,11 +963,6 @@ static int stm32_fmc2_nfc_xfer(struct na
+ }
+ }
+
+-err_unmap_ecc:
+- if (!write_data && !raw)
+- dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+- eccsteps, dma_data_dir);
+-
+ err_unmap_data:
+ dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
+
+@@ -1610,7 +1599,8 @@ static int stm32_fmc2_nfc_dma_setup(stru
+ return ret;
+
+ /* Allocate a buffer to store ECC status registers */
+- nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
++ nfc->ecc_buf = dmam_alloc_coherent(nfc->dev, FMC2_MAX_ECC_BUF_LEN,
++ &nfc->dma_ecc_addr, GFP_KERNEL);
+ if (!nfc->ecc_buf)
+ return -ENOMEM;
+
--- /dev/null
+From 811c0da4542df3c065f6cb843ced68780e27bb44 Mon Sep 17 00:00:00 2001
+From: Christophe Kerello <christophe.kerello@foss.st.com>
+Date: Tue, 12 Aug 2025 09:30:08 +0200
+Subject: mtd: rawnand: stm32_fmc2: fix ECC overwrite
+
+From: Christophe Kerello <christophe.kerello@foss.st.com>
+
+commit 811c0da4542df3c065f6cb843ced68780e27bb44 upstream.
+
+In case OOB write is requested during a data write, ECC is currently
+lost. Avoid this issue by only writing in the free spare area.
+This issue has been seen with a YAFFS2 file system.
+
+Signed-off-by: Christophe Kerello <christophe.kerello@foss.st.com>
+Cc: stable@vger.kernel.org
+Fixes: 2cd457f328c1 ("mtd: rawnand: stm32_fmc2: add STM32 FMC2 NAND flash controller driver")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/stm32_fmc2_nand.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -985,9 +985,21 @@ static int stm32_fmc2_nfc_seq_write(stru
+
+ /* Write oob */
+ if (oob_required) {
+- ret = nand_change_write_column_op(chip, mtd->writesize,
+- chip->oob_poi, mtd->oobsize,
+- false);
++ unsigned int offset_in_page = mtd->writesize;
++ const void *buf = chip->oob_poi;
++ unsigned int len = mtd->oobsize;
++
++ if (!raw) {
++ struct mtd_oob_region oob_free;
++
++ mtd_ooblayout_free(mtd, 0, &oob_free);
++ offset_in_page += oob_free.offset;
++ buf += oob_free.offset;
++ len = oob_free.length;
++ }
++
++ ret = nand_change_write_column_op(chip, offset_in_page,
++ buf, len, false);
+ if (ret)
+ return ret;
+ }
--- /dev/null
+From 5537a4679403423e0b49c95b619983a4583d69c5 Mon Sep 17 00:00:00 2001
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+Date: Mon, 8 Sep 2025 13:26:19 +0200
+Subject: net: usb: asix: ax88772: drop phylink use in PM to avoid MDIO runtime PM wakeups
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+commit 5537a4679403423e0b49c95b619983a4583d69c5 upstream.
+
+Drop phylink_{suspend,resume}() from ax88772 PM callbacks.
+
+MDIO bus accesses have their own runtime-PM handling and will try to
+wake the device if it is suspended. Such wake attempts must not happen
+from PM callbacks while the device PM lock is held. Since phylink
+{sus|re}sume may trigger MDIO, it must not be called in PM context.
+
+No extra phylink PM handling is required for this driver:
+- .ndo_open/.ndo_stop control the phylink start/stop lifecycle.
+- ethtool/phylib entry points run in process context, not PM.
+- phylink MAC ops program the MAC on link changes after resume.
+
+Fixes: e0bffe3e6894 ("net: asix: ax88772: migrate to phylink")
+Reported-by: Hubert Wiśniewski <hubert.wisniewski.25632@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Tested-by: Hubert Wiśniewski <hubert.wisniewski.25632@gmail.com>
+Tested-by: Xu Yang <xu.yang_2@nxp.com>
+Link: https://patch.msgid.link/20250908112619.2900723-1-o.rempel@pengutronix.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/asix_devices.c | 13 -------------
+ 1 file changed, 13 deletions(-)
+
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -607,15 +607,8 @@ static const struct net_device_ops ax887
+
+ static void ax88772_suspend(struct usbnet *dev)
+ {
+- struct asix_common_private *priv = dev->driver_priv;
+ u16 medium;
+
+- if (netif_running(dev->net)) {
+- rtnl_lock();
+- phylink_suspend(priv->phylink, false);
+- rtnl_unlock();
+- }
+-
+ /* Stop MAC operation */
+ medium = asix_read_medium_status(dev, 1);
+ medium &= ~AX_MEDIUM_RE;
+@@ -644,12 +637,6 @@ static void ax88772_resume(struct usbnet
+ for (i = 0; i < 3; i++)
+ if (!priv->reset(dev, 1))
+ break;
+-
+- if (netif_running(dev->net)) {
+- rtnl_lock();
+- phylink_resume(priv->phylink);
+- rtnl_unlock();
+- }
+ }
+
+ static int asix_resume(struct usb_interface *intf)
--- /dev/null
+From 04100f775c2ea501927f508f17ad824ad1f23c8d Mon Sep 17 00:00:00 2001
+From: Mark Tinguely <mark.tinguely@oracle.com>
+Date: Fri, 29 Aug 2025 10:18:15 -0500
+Subject: ocfs2: fix recursive semaphore deadlock in fiemap call
+
+From: Mark Tinguely <mark.tinguely@oracle.com>
+
+commit 04100f775c2ea501927f508f17ad824ad1f23c8d upstream.
+
+syzbot detected a OCFS2 hang due to a recursive semaphore on a
+FS_IOC_FIEMAP of the extent list on a specially crafted mmap file.
+
+context_switch kernel/sched/core.c:5357 [inline]
+ __schedule+0x1798/0x4cc0 kernel/sched/core.c:6961
+ __schedule_loop kernel/sched/core.c:7043 [inline]
+ schedule+0x165/0x360 kernel/sched/core.c:7058
+ schedule_preempt_disabled+0x13/0x30 kernel/sched/core.c:7115
+ rwsem_down_write_slowpath+0x872/0xfe0 kernel/locking/rwsem.c:1185
+ __down_write_common kernel/locking/rwsem.c:1317 [inline]
+ __down_write kernel/locking/rwsem.c:1326 [inline]
+ down_write+0x1ab/0x1f0 kernel/locking/rwsem.c:1591
+ ocfs2_page_mkwrite+0x2ff/0xc40 fs/ocfs2/mmap.c:142
+ do_page_mkwrite+0x14d/0x310 mm/memory.c:3361
+ wp_page_shared mm/memory.c:3762 [inline]
+ do_wp_page+0x268d/0x5800 mm/memory.c:3981
+ handle_pte_fault mm/memory.c:6068 [inline]
+ __handle_mm_fault+0x1033/0x5440 mm/memory.c:6195
+ handle_mm_fault+0x40a/0x8e0 mm/memory.c:6364
+ do_user_addr_fault+0x764/0x1390 arch/x86/mm/fault.c:1387
+ handle_page_fault arch/x86/mm/fault.c:1476 [inline]
+ exc_page_fault+0x76/0xf0 arch/x86/mm/fault.c:1532
+ asm_exc_page_fault+0x26/0x30 arch/x86/include/asm/idtentry.h:623
+RIP: 0010:copy_user_generic arch/x86/include/asm/uaccess_64.h:126 [inline]
+RIP: 0010:raw_copy_to_user arch/x86/include/asm/uaccess_64.h:147 [inline]
+RIP: 0010:_inline_copy_to_user include/linux/uaccess.h:197 [inline]
+RIP: 0010:_copy_to_user+0x85/0xb0 lib/usercopy.c:26
+Code: e8 00 bc f7 fc 4d 39 fc 72 3d 4d 39 ec 77 38 e8 91 b9 f7 fc 4c 89
+f7 89 de e8 47 25 5b fd 0f 01 cb 4c 89 ff 48 89 d9 4c 89 f6 <f3> a4 0f
+1f 00 48 89 cb 0f 01 ca 48 89 d8 5b 41 5c 41 5d 41 5e 41
+RSP: 0018:ffffc9000403f950 EFLAGS: 00050256
+RAX: ffffffff84c7f101 RBX: 0000000000000038 RCX: 0000000000000038
+RDX: 0000000000000000 RSI: ffffc9000403f9e0 RDI: 0000200000000060
+RBP: ffffc9000403fa90 R08: ffffc9000403fa17 R09: 1ffff92000807f42
+R10: dffffc0000000000 R11: fffff52000807f43 R12: 0000200000000098
+R13: 00007ffffffff000 R14: ffffc9000403f9e0 R15: 0000200000000060
+ copy_to_user include/linux/uaccess.h:225 [inline]
+ fiemap_fill_next_extent+0x1c0/0x390 fs/ioctl.c:145
+ ocfs2_fiemap+0x888/0xc90 fs/ocfs2/extent_map.c:806
+ ioctl_fiemap fs/ioctl.c:220 [inline]
+ do_vfs_ioctl+0x1173/0x1430 fs/ioctl.c:532
+ __do_sys_ioctl fs/ioctl.c:596 [inline]
+ __se_sys_ioctl+0x82/0x170 fs/ioctl.c:584
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xfa/0x3b0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7f5f13850fd9
+RSP: 002b:00007ffe3b3518b8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 0000200000000000 RCX: 00007f5f13850fd9
+RDX: 0000200000000040 RSI: 00000000c020660b RDI: 0000000000000004
+RBP: 6165627472616568 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffe3b3518f0
+R13: 00007ffe3b351b18 R14: 431bde82d7b634db R15: 00007f5f1389a03b
+
+ocfs2_fiemap() takes a read lock of the ip_alloc_sem semaphore (since
+v2.6.22-527-g7307de80510a) and calls fiemap_fill_next_extent() to read the
+extent list of this running mmap executable. The user supplied buffer to
+hold the fiemap information page faults calling ocfs2_page_mkwrite() which
+will take a write lock (since v2.6.27-38-g00dc417fa3e7) of the same
+semaphore. This recursive semaphore will hold filesystem locks and causes
+a hang of the fileystem.
+
+The ip_alloc_sem protects the inode extent list and size. Release the
+read semphore before calling fiemap_fill_next_extent() in ocfs2_fiemap()
+and ocfs2_fiemap_inline(). This does an unnecessary semaphore lock/unlock
+on the last extent but simplifies the error path.
+
+Link: https://lkml.kernel.org/r/61d1a62b-2631-4f12-81e2-cd689914360b@oracle.com
+Fixes: 00dc417fa3e7 ("ocfs2: fiemap support")
+Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com>
+Reported-by: syzbot+541dcc6ee768f77103e7@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=541dcc6ee768f77103e7
+Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/extent_map.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/fs/ocfs2/extent_map.c
++++ b/fs/ocfs2/extent_map.c
+@@ -696,6 +696,8 @@ out:
+ * it not only handles the fiemap for inlined files, but also deals
+ * with the fast symlink, cause they have no difference for extent
+ * mapping per se.
++ *
++ * Must be called with ip_alloc_sem semaphore held.
+ */
+ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
+ struct fiemap_extent_info *fieinfo,
+@@ -707,6 +709,7 @@ static int ocfs2_fiemap_inline(struct in
+ u64 phys;
+ u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
++ lockdep_assert_held_read(&oi->ip_alloc_sem);
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ if (ocfs2_inode_is_fast_symlink(inode))
+@@ -722,8 +725,11 @@ static int ocfs2_fiemap_inline(struct in
+ phys += offsetof(struct ocfs2_dinode,
+ id2.i_data.id_data);
+
++ /* Release the ip_alloc_sem to prevent deadlock on page fault */
++ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+ ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
+ flags);
++ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+ if (ret < 0)
+ return ret;
+ }
+@@ -792,9 +798,11 @@ int ocfs2_fiemap(struct inode *inode, st
+ len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
+ phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
+ virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
+-
++ /* Release the ip_alloc_sem to prevent deadlock on page fault */
++ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+ ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
+ len_bytes, fe_flags);
++ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+ if (ret)
+ break;
+
--- /dev/null
+From 199cd9e8d14bc14bdbd1fa3031ce26dac9781507 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Wed, 3 Sep 2025 09:49:33 -0400
+Subject: Revert "SUNRPC: Don't allow waiting for exiting tasks"
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 199cd9e8d14bc14bdbd1fa3031ce26dac9781507 upstream.
+
+This reverts commit 14e41b16e8cb677bb440dca2edba8b041646c742.
+
+This patch breaks the LTP acct02 test, so let's revert and look for a
+better solution.
+
+Reported-by: Mark Brown <broonie@kernel.org>
+Reported-by: Harshvardhan Jha <harshvardhan.j.jha@oracle.com>
+Link: https://lore.kernel.org/linux-nfs/7d4d57b0-39a3-49f1-8ada-60364743e3b4@sirena.org.uk/
+Cc: stable@vger.kernel.org # 6.15.x
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sunrpc/sched.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -276,8 +276,6 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue
+
+ static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
+ {
+- if (unlikely(current->flags & PF_EXITING))
+- return -EINTR;
+ schedule();
+ if (signal_pending_state(mode, current))
+ return -ERESTARTSYS;
bpf-tell-memcg-to-use-allow_spinning-false-path-in-b.patch
tcp_bpf-call-sk_msg_free-when-tcp_bpf_send_verdict-f.patch
proc-fix-type-confusion-in-pde_set_flags.patch
+edac-altera-delete-an-inappropriate-dma_free_coherent-call.patch
+revert-sunrpc-don-t-allow-waiting-for-exiting-tasks.patch
+compiler-clang.h-define-__sanitize_-__-macros-only-when-undefined.patch
+mptcp-sockopt-make-sync_socket_options-propagate-sock_keepopen.patch
+ocfs2-fix-recursive-semaphore-deadlock-in-fiemap-call.patch
+btrfs-fix-squota-compressed-stats-leak.patch
+btrfs-fix-subvolume-deletion-lockup-caused-by-inodes-xarray-race.patch
+i2c-i801-hide-intel-birch-stream-soc-tco-wdt.patch
+net-usb-asix-ax88772-drop-phylink-use-in-pm-to-avoid-mdio-runtime-pm-wakeups.patch
+mtd-nand-raw-atmel-respect-tar-tclr-in-read-setup-timing.patch
+mtd-rawnand-stm32_fmc2-avoid-overlapping-mappings-on-ecc-buffer.patch
+mtd-rawnand-stm32_fmc2-fix-ecc-overwrite.patch
+fuse-do-not-allow-mapping-a-non-regular-backing-file.patch
+fuse-check-if-copy_file_range-returns-larger-than-requested-size.patch
+fuse-prevent-overflow-in-copy_file_range-return-value.patch
+mm-khugepaged-fix-the-address-passed-to-notifier-on-testing-young.patch
+mm-memory-failure-fix-vm_bug_on_page-pagepoisoned-page-when-unpoison-memory.patch
+mm-memory-failure-fix-redundant-updates-for-already-poisoned-pages.patch
+mm-damon-core-set-quota-charged_from-to-jiffies-at-first-charge-window.patch
+mm-damon-lru_sort-avoid-divide-by-zero-in-damon_lru_sort_apply_parameters.patch
+drm-mediatek-fix-potential-of-node-use-after-free.patch
+drm-xe-attempt-to-bring-bos-back-to-vram-after-eviction.patch
+drm-amdgpu-vcn-allow-limiting-ctx-to-instance-0-for-av1-at-any-time.patch
+drm-amdgpu-vcn4-fix-ib-parsing-with-multiple-engine-info-packages.patch