--- /dev/null
+From e6b543ca9806d7bced863f43020e016ee996c057 Mon Sep 17 00:00:00 2001
+From: Quanmin Yan <yanquanmin1@huawei.com>
+Date: Wed, 27 Aug 2025 19:58:58 +0800
+Subject: mm/damon/reclaim: avoid divide-by-zero in damon_reclaim_apply_parameters()
+
+From: Quanmin Yan <yanquanmin1@huawei.com>
+
+commit e6b543ca9806d7bced863f43020e016ee996c057 upstream.
+
+When creating a new scheme of DAMON_RECLAIM, the calculation of
+'min_age_region' uses 'aggr_interval' as the divisor, which may lead to
+division-by-zero errors. Fix it by directly returning -EINVAL when such a
+case occurs.
+
+Link: https://lkml.kernel.org/r/20250827115858.1186261-3-yanquanmin1@huawei.com
+Fixes: f5a79d7c0c87 ("mm/damon: introduce struct damos_access_pattern")
+Signed-off-by: Quanmin Yan <yanquanmin1@huawei.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: ze zuo <zuoze1@huawei.com>
+Cc: <stable@vger.kernel.org> [6.1+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/reclaim.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/mm/damon/reclaim.c
++++ b/mm/damon/reclaim.c
+@@ -194,6 +194,11 @@ static int damon_reclaim_apply_parameter
+ if (err)
+ return err;
+
++ if (!damon_reclaim_mon_attrs.aggr_interval) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
+ if (err)
+ goto out;
--- /dev/null
+From 3260a3f0828e06f5f13fac69fb1999a6d60d9cff Mon Sep 17 00:00:00 2001
+From: Stanislav Fort <stanislav.fort@aisle.com>
+Date: Fri, 5 Sep 2025 13:10:46 +0300
+Subject: mm/damon/sysfs: fix use-after-free in state_show()
+
+From: Stanislav Fort <stanislav.fort@aisle.com>
+
+commit 3260a3f0828e06f5f13fac69fb1999a6d60d9cff upstream.
+
+state_show() reads kdamond->damon_ctx without holding damon_sysfs_lock.
+This allows a use-after-free race:
+
+CPU 0 CPU 1
+----- -----
+state_show() damon_sysfs_turn_damon_on()
+ctx = kdamond->damon_ctx; mutex_lock(&damon_sysfs_lock);
+ damon_destroy_ctx(kdamond->damon_ctx);
+ kdamond->damon_ctx = NULL;
+ mutex_unlock(&damon_sysfs_lock);
+damon_is_running(ctx); /* ctx is freed */
+mutex_lock(&ctx->kdamond_lock); /* UAF */
+
+(The race can also occur with damon_sysfs_kdamonds_rm_dirs() and
+damon_sysfs_kdamond_release(), which free or replace the context under
+damon_sysfs_lock.)
+
+Fix by taking damon_sysfs_lock before dereferencing the context, mirroring
+the locking used in pid_show().
+
+The bug has existed since state_show() first accessed kdamond->damon_ctx.
+
+Link: https://lkml.kernel.org/r/20250905101046.2288-1-disclosure@aisle.com
+Fixes: a61ea561c871 ("mm/damon/sysfs: link DAMON for virtual address spaces monitoring")
+Signed-off-by: Stanislav Fort <disclosure@aisle.com>
+Reported-by: Stanislav Fort <disclosure@aisle.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/sysfs.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1243,14 +1243,18 @@ static ssize_t state_show(struct kobject
+ {
+ struct damon_sysfs_kdamond *kdamond = container_of(kobj,
+ struct damon_sysfs_kdamond, kobj);
+- struct damon_ctx *ctx = kdamond->damon_ctx;
+- bool running;
++ struct damon_ctx *ctx;
++ bool running = false;
+
+- if (!ctx)
+- running = false;
+- else
++ if (!mutex_trylock(&damon_sysfs_lock))
++ return -EBUSY;
++
++ ctx = kdamond->damon_ctx;
++ if (ctx)
+ running = damon_sysfs_ctx_running(ctx);
+
++ mutex_unlock(&damon_sysfs_lock);
++
+ return sysfs_emit(buf, "%s\n", running ?
+ damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
+ damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);