]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.16-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 14 Sep 2025 07:53:16 +0000 (09:53 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 14 Sep 2025 07:53:16 +0000 (09:53 +0200)
added patches:
mm-damon-reclaim-avoid-divide-by-zero-in-damon_reclaim_apply_parameters.patch
mm-damon-sysfs-fix-use-after-free-in-state_show.patch

queue-6.16/mm-damon-reclaim-avoid-divide-by-zero-in-damon_reclaim_apply_parameters.patch [new file with mode: 0644]
queue-6.16/mm-damon-sysfs-fix-use-after-free-in-state_show.patch [new file with mode: 0644]
queue-6.16/series

diff --git a/queue-6.16/mm-damon-reclaim-avoid-divide-by-zero-in-damon_reclaim_apply_parameters.patch b/queue-6.16/mm-damon-reclaim-avoid-divide-by-zero-in-damon_reclaim_apply_parameters.patch
new file mode 100644 (file)
index 0000000..fc0e38f
--- /dev/null
@@ -0,0 +1,42 @@
+From e6b543ca9806d7bced863f43020e016ee996c057 Mon Sep 17 00:00:00 2001
+From: Quanmin Yan <yanquanmin1@huawei.com>
+Date: Wed, 27 Aug 2025 19:58:58 +0800
+Subject: mm/damon/reclaim: avoid divide-by-zero in damon_reclaim_apply_parameters()
+
+From: Quanmin Yan <yanquanmin1@huawei.com>
+
+commit e6b543ca9806d7bced863f43020e016ee996c057 upstream.
+
+When creating a new scheme of DAMON_RECLAIM, the calculation of
+'min_age_region' uses 'aggr_interval' as the divisor, which may lead to
+division-by-zero errors.  Fix it by directly returning -EINVAL when such a
+case occurs.
+
+Link: https://lkml.kernel.org/r/20250827115858.1186261-3-yanquanmin1@huawei.com
+Fixes: f5a79d7c0c87 ("mm/damon: introduce struct damos_access_pattern")
+Signed-off-by: Quanmin Yan <yanquanmin1@huawei.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: ze zuo <zuoze1@huawei.com>
+Cc: <stable@vger.kernel.org>   [6.1+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/reclaim.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/mm/damon/reclaim.c
++++ b/mm/damon/reclaim.c
+@@ -194,6 +194,11 @@ static int damon_reclaim_apply_parameter
+       if (err)
+               return err;
++      if (!damon_reclaim_mon_attrs.aggr_interval) {
++              err = -EINVAL;
++              goto out;
++      }
++
+       err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
+       if (err)
+               goto out;
diff --git a/queue-6.16/mm-damon-sysfs-fix-use-after-free-in-state_show.patch b/queue-6.16/mm-damon-sysfs-fix-use-after-free-in-state_show.patch
new file mode 100644 (file)
index 0000000..3ff57d1
--- /dev/null
@@ -0,0 +1,70 @@
+From 3260a3f0828e06f5f13fac69fb1999a6d60d9cff Mon Sep 17 00:00:00 2001
+From: Stanislav Fort <stanislav.fort@aisle.com>
+Date: Fri, 5 Sep 2025 13:10:46 +0300
+Subject: mm/damon/sysfs: fix use-after-free in state_show()
+
+From: Stanislav Fort <stanislav.fort@aisle.com>
+
+commit 3260a3f0828e06f5f13fac69fb1999a6d60d9cff upstream.
+
+state_show() reads kdamond->damon_ctx without holding damon_sysfs_lock.
+This allows a use-after-free race:
+
+CPU 0                         CPU 1
+-----                         -----
+state_show()                  damon_sysfs_turn_damon_on()
+ctx = kdamond->damon_ctx;     mutex_lock(&damon_sysfs_lock);
+                              damon_destroy_ctx(kdamond->damon_ctx);
+                              kdamond->damon_ctx = NULL;
+                              mutex_unlock(&damon_sysfs_lock);
+damon_is_running(ctx);        /* ctx is freed */
+mutex_lock(&ctx->kdamond_lock); /* UAF */
+
+(The race can also occur with damon_sysfs_kdamonds_rm_dirs() and
+damon_sysfs_kdamond_release(), which free or replace the context under
+damon_sysfs_lock.)
+
+Fix by taking damon_sysfs_lock before dereferencing the context, mirroring
+the locking used in pid_show().
+
+The bug has existed since state_show() first accessed kdamond->damon_ctx.
+
+Link: https://lkml.kernel.org/r/20250905101046.2288-1-disclosure@aisle.com
+Fixes: a61ea561c871 ("mm/damon/sysfs: link DAMON for virtual address spaces monitoring")
+Signed-off-by: Stanislav Fort <disclosure@aisle.com>
+Reported-by: Stanislav Fort <disclosure@aisle.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/sysfs.c |   14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1243,14 +1243,18 @@ static ssize_t state_show(struct kobject
+ {
+       struct damon_sysfs_kdamond *kdamond = container_of(kobj,
+                       struct damon_sysfs_kdamond, kobj);
+-      struct damon_ctx *ctx = kdamond->damon_ctx;
+-      bool running;
++      struct damon_ctx *ctx;
++      bool running = false;
+-      if (!ctx)
+-              running = false;
+-      else
++      if (!mutex_trylock(&damon_sysfs_lock))
++              return -EBUSY;
++
++      ctx = kdamond->damon_ctx;
++      if (ctx)
+               running = damon_sysfs_ctx_running(ctx);
++      mutex_unlock(&damon_sysfs_lock);
++
+       return sysfs_emit(buf, "%s\n", running ?
+                       damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
+                       damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
index 95b6a724301f80a6b7b9efbc40c45fd6fb960bfc..350ff0aa3682a1b8aeb76a5021fdcf7cf9fcd9f5 100644 (file)
@@ -105,3 +105,5 @@ ceph-fix-crash-after-fscrypt_encrypt_pagecache_blocks-error.patch
 mtd-spinand-add-a-configure_chip-hook.patch
 mtd-spinand-winbond-enable-high-speed-modes-on-w25n0xjw.patch
 mtd-spinand-winbond-fix-oob_layout-for-w25n01jw.patch
+mm-damon-sysfs-fix-use-after-free-in-state_show.patch
+mm-damon-reclaim-avoid-divide-by-zero-in-damon_reclaim_apply_parameters.patch