--- /dev/null
+From e12603bf2c3d571476a21debfeab80bb70d8c0cc Mon Sep 17 00:00:00 2001
+From: Yang Wang <kevinyang.wang@amd.com>
+Date: Wed, 19 Nov 2025 10:46:23 +0800
+Subject: drm/amd/pm: fix amdgpu_irq enabled counter unbalanced on smu v11.0
+
+From: Yang Wang <kevinyang.wang@amd.com>
+
+commit e12603bf2c3d571476a21debfeab80bb70d8c0cc upstream.
+
+v1:
+- fix amdgpu_irq enabled counter unbalanced issue on smu_v11_0_disable_thermal_alert.
+
+v2:
+- re-enable smu thermal alert to make amdgpu irq counter balance for smu v11.0 if in runpm state
+
+[75582.361561] ------------[ cut here ]------------
+[75582.361565] WARNING: CPU: 42 PID: 533 at drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c:639 amdgpu_irq_put+0xd8/0xf0 [amdgpu]
+...
+[75582.362211] Tainted: [E]=UNSIGNED_MODULE
+[75582.362214] Hardware name: GIGABYTE MZ01-CE0-00/MZ01-CE0-00, BIOS F14a 08/14/2020
+[75582.362218] Workqueue: pm pm_runtime_work
+[75582.362225] RIP: 0010:amdgpu_irq_put+0xd8/0xf0 [amdgpu]
+[75582.362556] Code: 31 f6 31 ff e9 c9 bf cf c2 44 89 f2 4c 89 e6 4c 89 ef e8 db fc ff ff 5b 41 5c 41 5d 41 5e 5d 31 d2 31 f6 31 ff e9 a8 bf cf c2 <0f> 0b eb c3 b8 fe ff ff ff eb 97 e9 84 e8 8b 00 0f 1f 84 00 00 00
+[75582.362560] RSP: 0018:ffffd50d51297b80 EFLAGS: 00010246
+[75582.362564] RAX: 0000000000000000 RBX: 0000000000000001 RCX: 0000000000000000
+[75582.362568] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
+[75582.362570] RBP: ffffd50d51297ba0 R08: 0000000000000000 R09: 0000000000000000
+[75582.362573] R10: 0000000000000000 R11: 0000000000000000 R12: ffff8e72091d2008
+[75582.362576] R13: ffff8e720af80000 R14: 0000000000000000 R15: ffff8e720af80000
+[75582.362579] FS: 0000000000000000(0000) GS:ffff8e9158262000(0000) knlGS:0000000000000000
+[75582.362582] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[75582.362585] CR2: 000074869d040c14 CR3: 0000001e37a3e000 CR4: 00000000003506f0
+[75582.362588] Call Trace:
+[75582.362591] <TASK>
+[75582.362597] smu_v11_0_disable_thermal_alert+0x17/0x30 [amdgpu]
+[75582.362983] smu_smc_hw_cleanup+0x79/0x4f0 [amdgpu]
+[75582.363375] smu_suspend+0x92/0x110 [amdgpu]
+[75582.363762] ? gfx_v10_0_hw_fini+0xd5/0x150 [amdgpu]
+[75582.364098] amdgpu_ip_block_suspend+0x27/0x80 [amdgpu]
+[75582.364377] ? timer_delete_sync+0x10/0x20
+[75582.364384] amdgpu_device_ip_suspend_phase2+0x190/0x450 [amdgpu]
+[75582.364665] amdgpu_device_suspend+0x1ae/0x2f0 [amdgpu]
+[75582.364948] amdgpu_pmops_runtime_suspend+0xf3/0x1f0 [amdgpu]
+[75582.365230] pci_pm_runtime_suspend+0x6d/0x1f0
+[75582.365237] ? __pfx_pci_pm_runtime_suspend+0x10/0x10
+[75582.365242] __rpm_callback+0x4c/0x190
+[75582.365246] ? srso_return_thunk+0x5/0x5f
+[75582.365252] ? srso_return_thunk+0x5/0x5f
+[75582.365256] ? ktime_get_mono_fast_ns+0x43/0xe0
+[75582.365263] rpm_callback+0x6e/0x80
+[75582.365267] rpm_suspend+0x124/0x5f0
+[75582.365271] ? srso_return_thunk+0x5/0x5f
+[75582.365275] ? __schedule+0x439/0x15e0
+[75582.365281] ? srso_return_thunk+0x5/0x5f
+[75582.365285] ? __queue_delayed_work+0xb8/0x180
+[75582.365293] pm_runtime_work+0xc6/0xe0
+[75582.365297] process_one_work+0x1a1/0x3f0
+[75582.365303] worker_thread+0x2ba/0x3d0
+[75582.365309] kthread+0x107/0x220
+[75582.365313] ? __pfx_worker_thread+0x10/0x10
+[75582.365318] ? __pfx_kthread+0x10/0x10
+[75582.365323] ret_from_fork+0xa2/0x120
+[75582.365328] ? __pfx_kthread+0x10/0x10
+[75582.365332] ret_from_fork_asm+0x1a/0x30
+[75582.365343] </TASK>
+[75582.365345] ---[ end trace 0000000000000000 ]---
+[75582.365350] amdgpu 0000:05:00.0: amdgpu: Fail to disable thermal alert!
+[75582.365379] amdgpu 0000:05:00.0: amdgpu: suspend of IP block <smu> failed -22
+
+Signed-off-by: Yang Wang <kevinyang.wang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 7 +++++--
+ drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 7 ++++++-
+ 2 files changed, 11 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1655,9 +1655,12 @@ static int smu_smc_hw_setup(struct smu_c
+ if (adev->in_suspend && smu_is_dpm_running(smu)) {
+ dev_info(adev->dev, "dpm has been enabled\n");
+ ret = smu_system_features_control(smu, true);
+- if (ret)
++ if (ret) {
+ dev_err(adev->dev, "Failed system features control!\n");
+- return ret;
++ return ret;
++ }
++
++ return smu_enable_thermal_alert(smu);
+ }
+ break;
+ default:
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+@@ -1022,7 +1022,12 @@ int smu_v11_0_enable_thermal_alert(struc
+
+ int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
+ {
+- return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
++ int ret = 0;
++
++ if (smu->smu_table.thermal_controller_type)
++ ret = amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
++
++ return ret;
+ }
+
+ static uint16_t convert_to_vddc(uint8_t vid)
--- /dev/null
+From stable+bounces-231294-greg=kroah.com@vger.kernel.org Tue Mar 31 00:52:11 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2026 18:50:51 -0400
+Subject: ksmbd: fix use-after-free and NULL deref in smb_grant_oplock()
+To: stable@vger.kernel.org
+Cc: Werner Kasselman <werner@verivus.com>, ChenXiaoSong <chenxiaosong@kylinos.cn>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260330225051.1334657-1-sashal@kernel.org>
+
+From: Werner Kasselman <werner@verivus.com>
+
+[ Upstream commit 48623ec358c1c600fa1e38368746f933e0f1a617 ]
+
+smb_grant_oplock() has two issues in the oplock publication sequence:
+
+1) opinfo is linked into ci->m_op_list (via opinfo_add) before
+ add_lease_global_list() is called. If add_lease_global_list()
+ fails (kmalloc returns NULL), the error path frees the opinfo
+ via __free_opinfo() while it is still linked in ci->m_op_list.
+ Concurrent m_op_list readers (opinfo_get_list, or direct iteration
+ in smb_break_all_levII_oplock) dereference the freed node.
+
+2) opinfo->o_fp is assigned after add_lease_global_list() publishes
+ the opinfo on the global lease list. A concurrent
+ find_same_lease_key() can walk the lease list and dereference
+ opinfo->o_fp->f_ci while o_fp is still NULL.
+
+Fix by restructuring the publication sequence to eliminate post-publish
+failure:
+
+- Set opinfo->o_fp before any list publication (fixes NULL deref).
+- Preallocate lease_table via alloc_lease_table() before opinfo_add()
+ so add_lease_global_list() becomes infallible after publication.
+- Keep the original m_op_list publication order (opinfo_add before
+ lease list) so concurrent opens via same_client_has_lease() and
+ opinfo_get_list() still see the in-flight grant.
+- Use opinfo_put() instead of __free_opinfo() on err_out so that
+ the RCU-deferred free path is used.
+
+This also requires splitting add_lease_global_list() to take a
+preallocated lease_table and changing its return type from int to void,
+since it can no longer fail.
+
+Fixes: 1dfd062caa16 ("ksmbd: fix use-after-free by using call_rcu() for oplock_info")
+Cc: stable@vger.kernel.org
+Signed-off-by: Werner Kasselman <werner@verivus.com>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ adapted kmalloc_obj() macro to kmalloc(sizeof()) ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/oplock.c | 72 ++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 45 insertions(+), 27 deletions(-)
+
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -82,11 +82,19 @@ static void lease_del_list(struct oplock
+ spin_unlock(&lb->lb_lock);
+ }
+
+-static void lb_add(struct lease_table *lb)
++static struct lease_table *alloc_lease_table(struct oplock_info *opinfo)
+ {
+- write_lock(&lease_list_lock);
+- list_add(&lb->l_entry, &lease_table_list);
+- write_unlock(&lease_list_lock);
++ struct lease_table *lb;
++
++ lb = kmalloc(sizeof(struct lease_table), KSMBD_DEFAULT_GFP);
++ if (!lb)
++ return NULL;
++
++ memcpy(lb->client_guid, opinfo->conn->ClientGUID,
++ SMB2_CLIENT_GUID_SIZE);
++ INIT_LIST_HEAD(&lb->lease_list);
++ spin_lock_init(&lb->lb_lock);
++ return lb;
+ }
+
+ static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
+@@ -1042,34 +1050,27 @@ static void copy_lease(struct oplock_inf
+ lease2->version = lease1->version;
+ }
+
+-static int add_lease_global_list(struct oplock_info *opinfo)
++static void add_lease_global_list(struct oplock_info *opinfo,
++ struct lease_table *new_lb)
+ {
+ struct lease_table *lb;
+
+- read_lock(&lease_list_lock);
++ write_lock(&lease_list_lock);
+ list_for_each_entry(lb, &lease_table_list, l_entry) {
+ if (!memcmp(lb->client_guid, opinfo->conn->ClientGUID,
+ SMB2_CLIENT_GUID_SIZE)) {
+ opinfo->o_lease->l_lb = lb;
+ lease_add_list(opinfo);
+- read_unlock(&lease_list_lock);
+- return 0;
++ write_unlock(&lease_list_lock);
++ kfree(new_lb);
++ return;
+ }
+ }
+- read_unlock(&lease_list_lock);
+
+- lb = kmalloc(sizeof(struct lease_table), KSMBD_DEFAULT_GFP);
+- if (!lb)
+- return -ENOMEM;
+-
+- memcpy(lb->client_guid, opinfo->conn->ClientGUID,
+- SMB2_CLIENT_GUID_SIZE);
+- INIT_LIST_HEAD(&lb->lease_list);
+- spin_lock_init(&lb->lb_lock);
+- opinfo->o_lease->l_lb = lb;
++ opinfo->o_lease->l_lb = new_lb;
+ lease_add_list(opinfo);
+- lb_add(lb);
+- return 0;
++ list_add(&new_lb->l_entry, &lease_table_list);
++ write_unlock(&lease_list_lock);
+ }
+
+ static void set_oplock_level(struct oplock_info *opinfo, int level,
+@@ -1189,6 +1190,7 @@ int smb_grant_oplock(struct ksmbd_work *
+ int err = 0;
+ struct oplock_info *opinfo = NULL, *prev_opinfo = NULL;
+ struct ksmbd_inode *ci = fp->f_ci;
++ struct lease_table *new_lb = NULL;
+ bool prev_op_has_lease;
+ __le32 prev_op_state = 0;
+
+@@ -1291,21 +1293,37 @@ set_lev:
+ set_oplock_level(opinfo, req_op_level, lctx);
+
+ out:
+- opinfo_count_inc(fp);
+- opinfo_add(opinfo, fp);
+-
++ /*
++ * Set o_fp before any publication so that concurrent readers
++ * (e.g. find_same_lease_key() on the lease list) that
++ * dereference opinfo->o_fp don't hit a NULL pointer.
++ *
++ * Keep the original publication order so concurrent opens can
++ * still observe the in-flight grant via ci->m_op_list, but make
++ * everything after opinfo_add() no-fail by preallocating any new
++ * lease_table first.
++ */
++ opinfo->o_fp = fp;
+ if (opinfo->is_lease) {
+- err = add_lease_global_list(opinfo);
+- if (err)
++ new_lb = alloc_lease_table(opinfo);
++ if (!new_lb) {
++ err = -ENOMEM;
+ goto err_out;
++ }
+ }
+
++ opinfo_count_inc(fp);
++ opinfo_add(opinfo, fp);
++
++ if (opinfo->is_lease)
++ add_lease_global_list(opinfo, new_lb);
++
+ rcu_assign_pointer(fp->f_opinfo, opinfo);
+- opinfo->o_fp = fp;
+
+ return 0;
+ err_out:
+- __free_opinfo(opinfo);
++ kfree(new_lb);
++ opinfo_put(opinfo);
+ return err;
+ }
+
--- /dev/null
+From 26f775a054c3cda86ad465a64141894a90a9e145 Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Thu, 19 Mar 2026 07:52:17 -0700
+Subject: mm/damon/core: avoid use of half-online-committed context
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 26f775a054c3cda86ad465a64141894a90a9e145 upstream.
+
+One major usage of damon_call() is online DAMON parameters update. It is
+done by calling damon_commit_ctx() inside the damon_call() callback
+function. damon_commit_ctx() can fail for two reasons: 1) invalid
+parameters and 2) internal memory allocation failures. In case of
+failures, the damon_ctx that attempted to be updated (commit destination)
+can be partially updated (or, corrupted from a perspective), and therefore
+shouldn't be used anymore. The function only ensures the damon_ctx object
+can safely deallocated using damon_destroy_ctx().
+
+The API callers are, however, calling damon_commit_ctx() only after
+asserting the parameters are valid, to avoid damon_commit_ctx() fails due
+to invalid input parameters. But it can still theoretically fail if the
+internal memory allocation fails. In the case, DAMON may run with the
+partially updated damon_ctx. This can result in unexpected behaviors
+including even NULL pointer dereference in case of damos_commit_dests()
+failure [1]. Such allocation failure is arguably too small to fail, so
+the real world impact would be rare. But, given the bad consequence, this
+needs to be fixed.
+
+Avoid such partially-committed (maybe-corrupted) damon_ctx use by saving
+the damon_commit_ctx() failure on the damon_ctx object. For this,
+introduce damon_ctx->maybe_corrupted field. damon_commit_ctx() sets it
+when it is failed. kdamond_call() checks if the field is set after each
+damon_call_control->fn() is executed. If it is set, ignore remaining
+callback requests and return. All kdamond_call() callers including
+kdamond_fn() also check the maybe_corrupted field right after
+kdamond_call() invocations. If the field is set, break the kdamond_fn()
+main loop so that DAMON sill doesn't use the context that might be
+corrupted.
+
+[sj@kernel.org: let kdamond_call() with cancel regardless of maybe_corrupted]
+ Link: https://lkml.kernel.org/r/20260320031553.2479-1-sj@kernel.org
+ Link: https://sashiko.dev/#/patchset/20260319145218.86197-1-sj%40kernel.org
+Link: https://lkml.kernel.org/r/20260319145218.86197-1-sj@kernel.org
+Link: https://lore.kernel.org/20260319043309.97966-1-sj@kernel.org [1]
+Fixes: 3301f1861d34 ("mm/damon/sysfs: handle commit command using damon_call()")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> [6.15+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/damon.h | 6 ++++++
+ mm/damon/core.c | 8 ++++++++
+ 2 files changed, 14 insertions(+)
+
+--- a/include/linux/damon.h
++++ b/include/linux/damon.h
+@@ -786,6 +786,12 @@ struct damon_ctx {
+ struct damos_walk_control *walk_control;
+ struct mutex walk_control_lock;
+
++ /*
++ * indicate if this may be corrupted. Currentonly this is set only for
++ * damon_commit_ctx() failure.
++ */
++ bool maybe_corrupted;
++
+ /* public: */
+ struct task_struct *kdamond;
+ struct mutex kdamond_lock;
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1236,6 +1236,7 @@ int damon_commit_ctx(struct damon_ctx *d
+ {
+ int err;
+
++ dst->maybe_corrupted = true;
+ if (!is_power_of_2(src->min_sz_region))
+ return -EINVAL;
+
+@@ -1261,6 +1262,7 @@ int damon_commit_ctx(struct damon_ctx *d
+ dst->addr_unit = src->addr_unit;
+ dst->min_sz_region = src->min_sz_region;
+
++ dst->maybe_corrupted = false;
+ return 0;
+ }
+
+@@ -2562,6 +2564,8 @@ static void kdamond_call(struct damon_ct
+ } else {
+ list_add(&control->list, &repeat_controls);
+ }
++ if (!cancel && ctx->maybe_corrupted)
++ break;
+ }
+ control = list_first_entry_or_null(&repeat_controls,
+ struct damon_call_control, list);
+@@ -2594,6 +2598,8 @@ static int kdamond_wait_activation(struc
+ kdamond_usleep(min_wait_time);
+
+ kdamond_call(ctx, false);
++ if (ctx->maybe_corrupted)
++ return -EINVAL;
+ damos_walk_cancel(ctx);
+ }
+ return -EBUSY;
+@@ -2679,6 +2685,8 @@ static int kdamond_fn(void *data)
+ * kdamond_merge_regions() if possible, to reduce overhead
+ */
+ kdamond_call(ctx, false);
++ if (ctx->maybe_corrupted)
++ break;
+ if (!list_empty(&ctx->schemes))
+ kdamond_apply_schemes(ctx);
+ else
--- /dev/null
+From 84481e705ab07ed46e56587fe846af194acacafe Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Mon, 16 Mar 2026 16:51:17 -0700
+Subject: mm/damon/stat: monitor all System RAM resources
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 84481e705ab07ed46e56587fe846af194acacafe upstream.
+
+DAMON_STAT usage document (Documentation/admin-guide/mm/damon/stat.rst)
+says it monitors the system's entire physical memory. But, it is
+monitoring only the biggest System RAM resource of the system. When there
+are multiple System RAM resources, this results in monitoring only an
+unexpectedly small fraction of the physical memory. For example, suppose
+the system has a 500 GiB System RAM, 10 MiB non-System RAM, and 500 GiB
+System RAM resources in order on the physical address space. DAMON_STAT
+will monitor only the first 500 GiB System RAM. This situation is
+particularly common on NUMA systems.
+
+Select a physical address range that covers all System RAM areas of the
+system, to fix this issue and make it work as documented.
+
+[sj@kernel.org: return error if monitoring target region is invalid]
+ Link: https://lkml.kernel.org/r/20260317053631.87907-1-sj@kernel.org
+Link: https://lkml.kernel.org/r/20260316235118.873-1-sj@kernel.org
+Fixes: 369c415e6073 ("mm/damon: introduce DAMON_STAT module")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> [6.17+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/stat.c | 36 ++++++++++++++++++++++++++++++++++--
+ 1 file changed, 34 insertions(+), 2 deletions(-)
+
+--- a/mm/damon/stat.c
++++ b/mm/damon/stat.c
+@@ -145,12 +145,44 @@ static int damon_stat_damon_call_fn(void
+ return 0;
+ }
+
++struct damon_stat_system_ram_range_walk_arg {
++ bool walked;
++ struct resource res;
++};
++
++static int damon_stat_system_ram_walk_fn(struct resource *res, void *arg)
++{
++ struct damon_stat_system_ram_range_walk_arg *a = arg;
++
++ if (!a->walked) {
++ a->walked = true;
++ a->res.start = res->start;
++ }
++ a->res.end = res->end;
++ return 0;
++}
++
++static int damon_stat_set_monitoring_region(struct damon_target *t,
++ unsigned long addr_unit)
++{
++ struct damon_addr_range addr_range;
++ struct damon_stat_system_ram_range_walk_arg arg = {};
++
++ walk_system_ram_res(0, ULONG_MAX, &arg, damon_stat_system_ram_walk_fn);
++ if (!arg.walked)
++ return -EINVAL;
++ addr_range.start = arg.res.start;
++ addr_range.end = arg.res.end + 1;
++ if (addr_range.end <= addr_range.start)
++ return -EINVAL;
++ return damon_set_regions(t, &addr_range, 1, DAMON_MIN_REGION);
++}
++
+ static struct damon_ctx *damon_stat_build_ctx(void)
+ {
+ struct damon_ctx *ctx;
+ struct damon_attrs attrs;
+ struct damon_target *target;
+- unsigned long start = 0, end = 0;
+
+ ctx = damon_new_ctx();
+ if (!ctx)
+@@ -188,7 +220,7 @@ static struct damon_ctx *damon_stat_buil
+ if (!target)
+ goto free_out;
+ damon_add_target(ctx, target);
+- if (damon_set_region_biggest_system_ram_default(target, &start, &end))
++ if (damon_stat_set_monitoring_region(target, ctx->addr_unit))
+ goto free_out;
+ return ctx;
+ free_out:
--- /dev/null
+From 7fe000eb32904758a85e62f6ea9483f89d5dabfc Mon Sep 17 00:00:00 2001
+From: Josh Law <objecting@objecting.org>
+Date: Sat, 21 Mar 2026 10:54:24 -0700
+Subject: mm/damon/sysfs: fix param_ctx leak on damon_sysfs_new_test_ctx() failure
+
+From: Josh Law <objecting@objecting.org>
+
+commit 7fe000eb32904758a85e62f6ea9483f89d5dabfc upstream.
+
+Patch series "mm/damon/sysfs: fix memory leak and NULL dereference
+issues", v4.
+
+DAMON_SYSFS can leak memory under allocation failure, and do NULL pointer
+dereference when a privileged user make wrong sequences of control. Fix
+those.
+
+
+This patch (of 3):
+
+When damon_sysfs_new_test_ctx() fails in damon_sysfs_commit_input(),
+param_ctx is leaked because the early return skips the cleanup at the out
+label. Destroy param_ctx before returning.
+
+Link: https://lkml.kernel.org/r/20260321175427.86000-1-sj@kernel.org
+Link: https://lkml.kernel.org/r/20260321175427.86000-2-sj@kernel.org
+Fixes: f0c5118ebb0e ("mm/damon/sysfs: catch commit test ctx alloc failure")
+Signed-off-by: Josh Law <objecting@objecting.org>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> [6.18+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/sysfs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1476,8 +1476,10 @@ static int damon_sysfs_commit_input(void
+ if (IS_ERR(param_ctx))
+ return PTR_ERR(param_ctx);
+ test_ctx = damon_new_ctx();
+- if (!test_ctx)
++ if (!test_ctx) {
++ damon_destroy_ctx(param_ctx);
+ return -ENOMEM;
++ }
+ err = damon_commit_ctx(test_ctx, param_ctx);
+ if (err)
+ goto out;
--- /dev/null
+From stable+bounces-231307-greg=kroah.com@vger.kernel.org Tue Mar 31 03:09:37 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2026 21:09:28 -0400
+Subject: mm/huge_memory: fix folio isn't locked in softleaf_to_folio()
+To: stable@vger.kernel.org
+Cc: Jinjiang Tu <tujinjiang@huawei.com>, "David Hildenbrand (Arm)" <david@kernel.org>, "Lorenzo Stoakes (Oracle)" <ljs@kernel.org>, Barry Song <baohua@kernel.org>, Kefeng Wang <wangkefeng.wang@huawei.com>, Liam Howlett <liam.howlett@oracle.com>, Michal Hocko <mhocko@suse.com>, Mike Rapoport <rppt@kernel.org>, Nanyong Sun <sunnanyong@huawei.com>, Ryan Roberts <ryan.roberts@arm.com>, Suren Baghdasaryan <surenb@google.com>, Vlastimil Babka <vbabka@kernel.org>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331010928.1879948-1-sashal@kernel.org>
+
+From: Jinjiang Tu <tujinjiang@huawei.com>
+
+[ Upstream commit 4c5e7f0fcd592801c9cc18f29f80fbee84eb8669 ]
+
+On arm64 server, we found folio that get from migration entry isn't locked
+in softleaf_to_folio(). This issue triggers when mTHP splitting and
+zap_nonpresent_ptes() races, and the root cause is lack of memory barrier
+in softleaf_to_folio(). The race is as follows:
+
+ CPU0 CPU1
+
+deferred_split_scan() zap_nonpresent_ptes()
+ lock folio
+ split_folio()
+ unmap_folio()
+ change ptes to migration entries
+ __split_folio_to_order() softleaf_to_folio()
+ set flags(including PG_locked) for tail pages folio = pfn_folio(softleaf_to_pfn(entry))
+ smp_wmb() VM_WARN_ON_ONCE(!folio_test_locked(folio))
+ prep_compound_page() for tail pages
+
+In __split_folio_to_order(), smp_wmb() guarantees page flags of tail pages
+are visible before the tail page becomes non-compound. smp_wmb() should
+be paired with smp_rmb() in softleaf_to_folio(), which is missed. As a
+result, if zap_nonpresent_ptes() accesses migration entry that stores tail
+pfn, softleaf_to_folio() may see the updated compound_head of tail page
+before page->flags.
+
+This issue will trigger VM_WARN_ON_ONCE() in pfn_swap_entry_folio()
+because of the race between folio split and zap_nonpresent_ptes()
+leading to a folio incorrectly undergoing modification without a folio
+lock being held.
+
+This is a BUG_ON() before commit 93976a20345b ("mm: eliminate further
+swapops predicates"), which in merged in v6.19-rc1.
+
+To fix it, add missing smp_rmb() if the softleaf entry is migration entry
+in softleaf_to_folio() and softleaf_to_page().
+
+[tujinjiang@huawei.com: update function name and comments]
+ Link: https://lkml.kernel.org/r/20260321075214.3305564-1-tujinjiang@huawei.com
+Link: https://lkml.kernel.org/r/20260319012541.4158561-1-tujinjiang@huawei.com
+Fixes: e9b61f19858a ("thp: reintroduce split_huge_page()")
+Signed-off-by: Jinjiang Tu <tujinjiang@huawei.com>
+Acked-by: David Hildenbrand (Arm) <david@kernel.org>
+Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
+Cc: Barry Song <baohua@kernel.org>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Nanyong Sun <sunnanyong@huawei.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ applied fix to swapops.h using old pfn_swap_entry/swp_entry_t naming ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/swapops.h | 27 +++++++++++++++++++--------
+ 1 file changed, 19 insertions(+), 8 deletions(-)
+
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -487,15 +487,29 @@ static inline int pte_none_mostly(pte_t
+ return pte_none(pte) || is_pte_marker(pte);
+ }
+
+-static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
++static inline void swap_entry_migration_sync(swp_entry_t entry,
++ struct folio *folio)
+ {
+- struct page *p = pfn_to_page(swp_offset_pfn(entry));
++ /*
++ * Ensure we do not race with split, which might alter tail pages into new
++ * folios and thus result in observing an unlocked folio.
++ * This matches the write barrier in __split_folio_to_order().
++ */
++ smp_rmb();
+
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+- BUG_ON(is_migration_entry(entry) && !PageLocked(p));
++ BUG_ON(!folio_test_locked(folio));
++}
++
++static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
++{
++ struct page *p = pfn_to_page(swp_offset_pfn(entry));
++
++ if (is_migration_entry(entry))
++ swap_entry_migration_sync(entry, page_folio(p));
+
+ return p;
+ }
+@@ -504,11 +518,8 @@ static inline struct folio *pfn_swap_ent
+ {
+ struct folio *folio = pfn_folio(swp_offset_pfn(entry));
+
+- /*
+- * Any use of migration entries may only occur while the
+- * corresponding folio is locked
+- */
+- BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio));
++ if (is_migration_entry(entry))
++ swap_entry_migration_sync(entry, folio);
+
+ return folio;
+ }
--- /dev/null
+From 521bd39d9d28ce54cbfec7f9b89c94ad4fdb8350 Mon Sep 17 00:00:00 2001
+From: Hari Bathini <hbathini@linux.ibm.com>
+Date: Tue, 3 Mar 2026 23:40:25 +0530
+Subject: powerpc64/bpf: do not increment tailcall count when prog is NULL
+
+From: Hari Bathini <hbathini@linux.ibm.com>
+
+commit 521bd39d9d28ce54cbfec7f9b89c94ad4fdb8350 upstream.
+
+Do not increment tailcall count, if tailcall did not succeed due to
+missing BPF program.
+
+Fixes: ce0761419fae ("powerpc/bpf: Implement support for tail calls")
+Cc: stable@vger.kernel.org
+Tested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com>
+Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
+Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
+Link: https://patch.msgid.link/20260303181031.390073-2-hbathini@linux.ibm.com
+[ Conflict due to missing feature commit 2ed2d8f6fb38 ("powerpc64/bpf:
+ Support tailcalls with subprogs") resolved accordingly. ]
+Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/net/bpf_jit_comp64.c | 23 ++++++++++++++---------
+ 1 file changed, 14 insertions(+), 9 deletions(-)
+
+--- a/arch/powerpc/net/bpf_jit_comp64.c
++++ b/arch/powerpc/net/bpf_jit_comp64.c
+@@ -430,27 +430,32 @@ static int bpf_jit_emit_tail_call(u32 *i
+
+ /*
+ * tail_call_cnt++;
++ * Writeback this updated value only if tailcall succeeds.
+ */
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
+- EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
+
+ /* prog = array->ptrs[index]; */
+- EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
+- EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
+- EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
++ EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_2), b2p_index, 8));
++ EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), b2p_bpf_array));
++ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
++ offsetof(struct bpf_array, ptrs)));
+
+ /*
+ * if (prog == NULL)
+ * goto out;
+ */
+- EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
++ EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_2), 0));
+ PPC_BCC_SHORT(COND_EQ, out);
+
+ /* goto *(prog->bpf_func + prologue_size); */
+- EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
+- EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
+- FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
+- EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
++ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
++ offsetof(struct bpf_prog, bpf_func)));
++ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
++ FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
++ EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_2)));
++
++ /* Writeback updated tailcall count */
++ EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
+
+ /* tear down stack, restore NVRs, ... */
+ bpf_jit_emit_common_epilogue(image, ctx);
--- /dev/null
+From 580cc37b1de4fcd9997c48d7080e744533f09f36 Mon Sep 17 00:00:00 2001
+From: Benno Lossin <lossin@kernel.org>
+Date: Mon, 2 Mar 2026 15:04:15 +0100
+Subject: rust: pin-init: internal: init: document load-bearing fact of field accessors
+
+From: Benno Lossin <lossin@kernel.org>
+
+commit 580cc37b1de4fcd9997c48d7080e744533f09f36 upstream.
+
+The functions `[Pin]Init::__[pinned_]init` and `ptr::write` called from
+the `init!` macro require the passed pointer to be aligned. This fact is
+ensured by the creation of field accessors to previously initialized
+fields.
+
+Since we missed this very important fact from the beginning [1],
+document it in the code.
+
+Link: https://rust-for-linux.zulipchat.com/#narrow/channel/561532-pin-init/topic/initialized.20field.20accessor.20detection/with/576210658 [1]
+Fixes: 90e53c5e70a6 ("rust: add pin-init API core")
+Cc: <stable@vger.kernel.org> # 6.6.y, 6.12.y: 42415d163e5d: rust: pin-init: add references to previously initialized fields
+Cc: <stable@vger.kernel.org> # 6.6.y, 6.12.y, 6.18.y, 6.19.y
+Signed-off-by: Benno Lossin <lossin@kernel.org>
+Reviewed-by: Gary Guo <gary@garyguo.net>
+Link: https://patch.msgid.link/20260302140424.4097655-2-lossin@kernel.org
+[ Updated Cc: stable@ tags as discussed. - Miguel ]
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+[ Moved changes to the declarative macro, because 6.19.y and earlier do not
+ have `syn`. Also duplicated the comment for all field accessor creations.
+ - Benno ]
+Signed-off-by: Benno Lossin <lossin@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/pin-init/src/macros.rs | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/rust/pin-init/src/macros.rs
++++ b/rust/pin-init/src/macros.rs
+@@ -1310,6 +1310,10 @@ macro_rules! __init_internal {
+ // return when an error/panic occurs.
+ // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
+ unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+@@ -1349,6 +1353,10 @@ macro_rules! __init_internal {
+ // return when an error/panic occurs.
+ unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+@@ -1389,6 +1397,10 @@ macro_rules! __init_internal {
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+@@ -1429,6 +1441,10 @@ macro_rules! __init_internal {
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
ext4-fix-iloc.bh-leak-in-ext4_fc_replay_inode-error-paths.patch
ext4-always-drain-queued-discard-work-in-ext4_mb_release.patch
arm64-dts-imx8mn-tqma8mqnl-fix-ldo5-power-off.patch
+powerpc64-bpf-do-not-increment-tailcall-count-when-prog-is-null.patch
+unwind_user-x86-fix-arch-um-build.patch
+rust-pin-init-internal-init-document-load-bearing-fact-of-field-accessors.patch
+drm-amd-pm-fix-amdgpu_irq-enabled-counter-unbalanced-on-smu-v11.0.patch
+mm-damon-stat-monitor-all-system-ram-resources.patch
+mm-damon-core-avoid-use-of-half-online-committed-context.patch
+mm-damon-sysfs-fix-param_ctx-leak-on-damon_sysfs_new_test_ctx-failure.patch
+mm-huge_memory-fix-folio-isn-t-locked-in-softleaf_to_folio.patch
+ksmbd-fix-use-after-free-and-null-deref-in-smb_grant_oplock.patch
--- /dev/null
+From aa7387e79a5cff0585cd1b9091944142a06872b6 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 29 Oct 2025 14:24:57 +0100
+Subject: unwind_user/x86: Fix arch=um build
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit aa7387e79a5cff0585cd1b9091944142a06872b6 upstream.
+
+Add CONFIG_HAVE_UNWIND_USER_FP guards to make sure this code
+doesn't break arch=um builds.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Closes: https://lore.kernel.org/oe-kbuild-all/202510291919.FFGyU7nq-lkp@intel.com/
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/unwind_user.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/include/asm/unwind_user.h
++++ b/arch/x86/include/asm/unwind_user.h
+@@ -2,6 +2,8 @@
+ #ifndef _ASM_X86_UNWIND_USER_H
+ #define _ASM_X86_UNWIND_USER_H
+
++#ifdef CONFIG_HAVE_UNWIND_USER_FP
++
+ #include <asm/ptrace.h>
+ #include <asm/uprobes.h>
+
+@@ -34,4 +36,6 @@ static inline bool unwind_user_at_functi
+ return is_uprobe_at_func_entry(regs);
+ }
+
++#endif /* CONFIG_HAVE_UNWIND_USER_FP */
++
+ #endif /* _ASM_X86_UNWIND_USER_H */