--- /dev/null
+From 711f19dfd783ffb37ca4324388b9c4cb87e71363 Mon Sep 17 00:00:00 2001
+From: Quanmin Yan <yanquanmin1@huawei.com>
+Date: Wed, 27 Aug 2025 19:58:57 +0800
+Subject: mm/damon/lru_sort: avoid divide-by-zero in damon_lru_sort_apply_parameters()
+
+From: Quanmin Yan <yanquanmin1@huawei.com>
+
+commit 711f19dfd783ffb37ca4324388b9c4cb87e71363 upstream.
+
+Patch series "mm/damon: avoid divide-by-zero in DAMON module's parameters
+application".
+
+DAMON's RECLAIM and LRU_SORT modules perform no validation on
+user-configured parameters during application, which may lead to
+division-by-zero errors.
+
+Avoid the divide-by-zero by adding validation checks when DAMON modules
+attempt to apply the parameters.
+
+
+This patch (of 2):
+
+During the calculation of 'hot_thres' and 'cold_thres', either
+'sample_interval' or 'aggr_interval' is used as the divisor, which may
+lead to division-by-zero errors. Fix it by directly returning -EINVAL
+when such a case occurs. Additionally, since 'aggr_interval' is already
+required to be set no smaller than 'sample_interval' in damon_set_attrs(),
+only the case where 'sample_interval' is zero needs to be checked.
+
+Link: https://lkml.kernel.org/r/20250827115858.1186261-2-yanquanmin1@huawei.com
+Fixes: 40e983cca927 ("mm/damon: introduce DAMON-based LRU-lists Sorting")
+Signed-off-by: Quanmin Yan <yanquanmin1@huawei.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: ze zuo <zuoze1@huawei.com>
+Cc: <stable@vger.kernel.org> [6.0+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/lru_sort.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -203,6 +203,9 @@ static int damon_lru_sort_apply_paramete
+ unsigned int hot_thres, cold_thres;
+ int err = 0;
+
++ if (!damon_lru_sort_mon_attrs.sample_interval)
++ return -EINVAL;
++
+ err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
+ if (err)
+ return err;
--- /dev/null
+From e6b543ca9806d7bced863f43020e016ee996c057 Mon Sep 17 00:00:00 2001
+From: Quanmin Yan <yanquanmin1@huawei.com>
+Date: Wed, 27 Aug 2025 19:58:58 +0800
+Subject: mm/damon/reclaim: avoid divide-by-zero in damon_reclaim_apply_parameters()
+
+From: Quanmin Yan <yanquanmin1@huawei.com>
+
+commit e6b543ca9806d7bced863f43020e016ee996c057 upstream.
+
+When creating a new scheme of DAMON_RECLAIM, the calculation of
+'min_age_region' uses 'aggr_interval' as the divisor, which may lead to
+division-by-zero errors. Fix it by directly returning -EINVAL when such a
+case occurs.
+
+Link: https://lkml.kernel.org/r/20250827115858.1186261-3-yanquanmin1@huawei.com
+Fixes: f5a79d7c0c87 ("mm/damon: introduce struct damos_access_pattern")
+Signed-off-by: Quanmin Yan <yanquanmin1@huawei.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: ze zuo <zuoze1@huawei.com>
+Cc: <stable@vger.kernel.org> [6.1+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/reclaim.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/mm/damon/reclaim.c
++++ b/mm/damon/reclaim.c
+@@ -157,6 +157,9 @@ static int damon_reclaim_apply_parameter
+ struct damos *scheme, *old_scheme;
+ int err = 0;
+
++ if (!damon_reclaim_mon_attrs.aggr_interval)
++ return -EINVAL;
++
+ err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
+ if (err)
+ return err;
--- /dev/null
+From 3260a3f0828e06f5f13fac69fb1999a6d60d9cff Mon Sep 17 00:00:00 2001
+From: Stanislav Fort <stanislav.fort@aisle.com>
+Date: Fri, 5 Sep 2025 13:10:46 +0300
+Subject: mm/damon/sysfs: fix use-after-free in state_show()
+
+From: Stanislav Fort <stanislav.fort@aisle.com>
+
+commit 3260a3f0828e06f5f13fac69fb1999a6d60d9cff upstream.
+
+state_show() reads kdamond->damon_ctx without holding damon_sysfs_lock.
+This allows a use-after-free race:
+
+CPU 0 CPU 1
+----- -----
+state_show() damon_sysfs_turn_damon_on()
+ctx = kdamond->damon_ctx; mutex_lock(&damon_sysfs_lock);
+ damon_destroy_ctx(kdamond->damon_ctx);
+ kdamond->damon_ctx = NULL;
+ mutex_unlock(&damon_sysfs_lock);
+damon_is_running(ctx); /* ctx is freed */
+mutex_lock(&ctx->kdamond_lock); /* UAF */
+
+(The race can also occur with damon_sysfs_kdamonds_rm_dirs() and
+damon_sysfs_kdamond_release(), which free or replace the context under
+damon_sysfs_lock.)
+
+Fix by taking damon_sysfs_lock before dereferencing the context, mirroring
+the locking used in pid_show().
+
+The bug has existed since state_show() first accessed kdamond->damon_ctx.
+
+Link: https://lkml.kernel.org/r/20250905101046.2288-1-disclosure@aisle.com
+Fixes: a61ea561c871 ("mm/damon/sysfs: link DAMON for virtual address spaces monitoring")
+Signed-off-by: Stanislav Fort <disclosure@aisle.com>
+Reported-by: Stanislav Fort <disclosure@aisle.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/sysfs.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -2093,14 +2093,18 @@ static ssize_t state_show(struct kobject
+ {
+ struct damon_sysfs_kdamond *kdamond = container_of(kobj,
+ struct damon_sysfs_kdamond, kobj);
+- struct damon_ctx *ctx = kdamond->damon_ctx;
+- bool running;
++ struct damon_ctx *ctx;
++ bool running = false;
+
+- if (!ctx)
+- running = false;
+- else
++ if (!mutex_trylock(&damon_sysfs_lock))
++ return -EBUSY;
++
++ ctx = kdamond->damon_ctx;
++ if (ctx)
+ running = damon_sysfs_ctx_running(ctx);
+
++ mutex_unlock(&damon_sysfs_lock);
++
+ return sysfs_emit(buf, "%s\n", running ?
+ damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
+ damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
--- /dev/null
+From stable+bounces-179519-greg=kroah.com@vger.kernel.org Sat Sep 13 21:03:45 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Sep 2025 15:03:36 -0400
+Subject: mm/khugepaged: convert hpage_collapse_scan_pmd() to use folios
+To: stable@vger.kernel.org
+Cc: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>, Rik van Riel <riel@surriel.com>, Yang Shi <shy828301@gmail.com>, Kefeng Wang <wangkefeng.wang@huawei.com>, "Matthew Wilcox (Oracle)" <willy@infradead.org>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250913190337.1520681-1-sashal@kernel.org>
+
+From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
+
+[ Upstream commit 5c07ebb372d66423e508ecfb8e00324f8797f072 ]
+
+Replaces 5 calls to compound_head(), and removes 1385 bytes of kernel
+text.
+
+Link: https://lkml.kernel.org/r/20231020183331.10770-3-vishal.moola@gmail.com
+Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 394bfac1c7f7 ("mm/khugepaged: fix the address passed to notifier on testing young")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/khugepaged.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1140,6 +1140,7 @@ static int hpage_collapse_scan_pmd(struc
+ int result = SCAN_FAIL, referenced = 0;
+ int none_or_zero = 0, shared = 0;
+ struct page *page = NULL;
++ struct folio *folio = NULL;
+ unsigned long _address;
+ spinlock_t *ptl;
+ int node = NUMA_NO_NODE, unmapped = 0;
+@@ -1221,29 +1222,28 @@ static int hpage_collapse_scan_pmd(struc
+ }
+ }
+
+- page = compound_head(page);
+-
++ folio = page_folio(page);
+ /*
+ * Record which node the original page is from and save this
+ * information to cc->node_load[].
+ * Khugepaged will allocate hugepage from the node has the max
+ * hit record.
+ */
+- node = page_to_nid(page);
++ node = folio_nid(folio);
+ if (hpage_collapse_scan_abort(node, cc)) {
+ result = SCAN_SCAN_ABORT;
+ goto out_unmap;
+ }
+ cc->node_load[node]++;
+- if (!PageLRU(page)) {
++ if (!folio_test_lru(folio)) {
+ result = SCAN_PAGE_LRU;
+ goto out_unmap;
+ }
+- if (PageLocked(page)) {
++ if (folio_test_locked(folio)) {
+ result = SCAN_PAGE_LOCK;
+ goto out_unmap;
+ }
+- if (!PageAnon(page)) {
++ if (!folio_test_anon(folio)) {
+ result = SCAN_PAGE_ANON;
+ goto out_unmap;
+ }
+@@ -1265,7 +1265,7 @@ static int hpage_collapse_scan_pmd(struc
+ * has excessive GUP pins (i.e. 512). Anyway the same check
+ * will be done again later the risk seems low.
+ */
+- if (!is_refcount_suitable(page)) {
++ if (!is_refcount_suitable(&folio->page)) {
+ result = SCAN_PAGE_COUNT;
+ goto out_unmap;
+ }
+@@ -1275,8 +1275,8 @@ static int hpage_collapse_scan_pmd(struc
+ * enough young pte to justify collapsing the page
+ */
+ if (cc->is_khugepaged &&
+- (pte_young(pteval) || page_is_young(page) ||
+- PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
++ (pte_young(pteval) || folio_test_young(folio) ||
++ folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
+ address)))
+ referenced++;
+ }
+@@ -1298,7 +1298,7 @@ out_unmap:
+ *mmap_locked = false;
+ }
+ out:
+- trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
++ trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
+ none_or_zero, result, unmapped);
+ return result;
+ }
--- /dev/null
+From stable+bounces-179520-greg=kroah.com@vger.kernel.org Sat Sep 13 21:03:53 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Sep 2025 15:03:37 -0400
+Subject: mm/khugepaged: fix the address passed to notifier on testing young
+To: stable@vger.kernel.org
+Cc: Wei Yang <richard.weiyang@gmail.com>, Dev Jain <dev.jain@arm.com>, Zi Yan <ziy@nvidia.com>, David Hildenbrand <david@redhat.com>, Lorenzo Stoakes <lorenzo.stoakes@oracle.com>, Baolin Wang <baolin.wang@linux.alibaba.com>, "Liam R. Howlett" <Liam.Howlett@oracle.com>, Nico Pache <npache@redhat.com>, Ryan Roberts <ryan.roberts@arm.com>, Barry Song <baohua@kernel.org>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250913190337.1520681-2-sashal@kernel.org>
+
+From: Wei Yang <richard.weiyang@gmail.com>
+
+[ Upstream commit 394bfac1c7f7b701c2c93834c5761b9c9ceeebcf ]
+
+Commit 8ee53820edfd ("thp: mmu_notifier_test_young") introduced
+mmu_notifier_test_young(), but we are passing the wrong address.
+In xxx_scan_pmd(), the actual iteration address is "_address" not
+"address". We seem to misuse the variable on the very beginning.
+
+Change it to the right one.
+
+[akpm@linux-foundation.org fix whitespace, per everyone]
+Link: https://lkml.kernel.org/r/20250822063318.11644-1-richard.weiyang@gmail.com
+Fixes: 8ee53820edfd ("thp: mmu_notifier_test_young")
+Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
+Reviewed-by: Dev Jain <dev.jain@arm.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Nico Pache <npache@redhat.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/khugepaged.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1276,8 +1276,8 @@ static int hpage_collapse_scan_pmd(struc
+ */
+ if (cc->is_khugepaged &&
+ (pte_young(pteval) || folio_test_young(folio) ||
+- folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
+- address)))
++ folio_test_referenced(folio) ||
++ mmu_notifier_test_young(vma->vm_mm, _address)))
+ referenced++;
+ }
+ if (!writable) {
--- /dev/null
+From stable+bounces-179493-greg=kroah.com@vger.kernel.org Sat Sep 13 17:08:41 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Sep 2025 11:08:28 -0400
+Subject: mtd: nand: raw: atmel: Fix comment in timings preparation
+To: stable@vger.kernel.org
+Cc: Alexander Dahl <ada@thorsis.com>, Nicolas Ferre <nicolas.ferre@microchip.com>, Miquel Raynal <miquel.raynal@bootlin.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250913150829.1407206-1-sashal@kernel.org>
+
+From: Alexander Dahl <ada@thorsis.com>
+
+[ Upstream commit 1c60e027ffdebd36f4da766d9c9abbd1ea4dd8f9 ]
+
+Looks like a copy'n'paste mistake introduced when initially adding the
+dynamic timings feature with commit f9ce2eddf176 ("mtd: nand: atmel: Add
+->setup_data_interface() hooks"). The context around this and
+especially the code itself suggests 'read' is meant instead of write.
+
+Signed-off-by: Alexander Dahl <ada@thorsis.com>
+Reviewed-by: Nicolas Ferre <nicolas.ferre@microchip.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/linux-mtd/20240226122537.75097-1-ada@thorsis.com
+Stable-dep-of: fd779eac2d65 ("mtd: nand: raw: atmel: Respect tAR, tCLR in read setup timing")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/atmel/nand-controller.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
+@@ -1378,7 +1378,7 @@ static int atmel_smc_nand_prepare_smccon
+ return ret;
+
+ /*
+- * The write cycle timing is directly matching tWC, but is also
++ * The read cycle timing is directly matching tRC, but is also
+ * dependent on the setup and hold timings we calculated earlier,
+ * which gives:
+ *
--- /dev/null
+From stable+bounces-179494-greg=kroah.com@vger.kernel.org Sat Sep 13 17:08:42 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Sep 2025 11:08:29 -0400
+Subject: mtd: nand: raw: atmel: Respect tAR, tCLR in read setup timing
+To: stable@vger.kernel.org
+Cc: Alexander Sverdlin <alexander.sverdlin@siemens.com>, Alexander Dahl <ada@thorsis.com>, Miquel Raynal <miquel.raynal@bootlin.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250913150829.1407206-2-sashal@kernel.org>
+
+From: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+
+[ Upstream commit fd779eac2d659668be4d3dbdac0710afd5d6db12 ]
+
+Having setup time 0 violates tAR, tCLR of some chips, for instance
+TOSHIBA TC58NVG2S3ETAI0 cannot be detected successfully (first ID byte
+being read duplicated, i.e. 98 98 dc 90 15 76 14 03 instead of
+98 dc 90 15 76 ...).
+
+Atmel Application Notes postulated 1 cycle NRD_SETUP without explanation
+[1], but it looks more appropriate to just calculate setup time properly.
+
+[1] Link: https://ww1.microchip.com/downloads/aemDocuments/documents/MPU32/ApplicationNotes/ApplicationNotes/doc6255.pdf
+
+Cc: stable@vger.kernel.org
+Fixes: f9ce2eddf176 ("mtd: nand: atmel: Add ->setup_data_interface() hooks")
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+Tested-by: Alexander Dahl <ada@thorsis.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/atmel/nand-controller.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
+@@ -1378,13 +1378,23 @@ static int atmel_smc_nand_prepare_smccon
+ return ret;
+
+ /*
++ * Read setup timing depends on the operation done on the NAND:
++ *
++ * NRD_SETUP = max(tAR, tCLR)
++ */
++ timeps = max(conf->timings.sdr.tAR_min, conf->timings.sdr.tCLR_min);
++ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
++ totalcycles += ncycles;
++ ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NRD_SHIFT, ncycles);
++ if (ret)
++ return ret;
++
++ /*
+ * The read cycle timing is directly matching tRC, but is also
+ * dependent on the setup and hold timings we calculated earlier,
+ * which gives:
+ *
+- * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
+- *
+- * NRD_SETUP is always 0.
++ * NRD_CYCLE = max(tRC, NRD_SETUP + NRD_PULSE + NRD_HOLD)
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
+ ncycles = max(totalcycles, ncycles);
fuse-check-if-copy_file_range-returns-larger-than-requested-size.patch
fuse-prevent-overflow-in-copy_file_range-return-value.patch
libceph-fix-invalid-accesses-to-ceph_connection_v1_info.patch
+mm-damon-sysfs-fix-use-after-free-in-state_show.patch
+mm-damon-reclaim-avoid-divide-by-zero-in-damon_reclaim_apply_parameters.patch
+mm-damon-lru_sort-avoid-divide-by-zero-in-damon_lru_sort_apply_parameters.patch
+mtd-nand-raw-atmel-fix-comment-in-timings-preparation.patch
+mtd-nand-raw-atmel-respect-tar-tclr-in-read-setup-timing.patch
+mm-khugepaged-convert-hpage_collapse_scan_pmd-to-use-folios.patch
+mm-khugepaged-fix-the-address-passed-to-notifier-on-testing-young.patch