]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/damon: move folio filtering from paddr to ops-common
authorBijan Tabatabai <bijantabatab@micron.com>
Wed, 9 Jul 2025 00:59:42 +0000 (19:59 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 20 Jul 2025 01:59:50 +0000 (18:59 -0700)
This patch moves damos_pa_filter_match and the functions it calls to
ops-common, renaming it to damos_folio_filter_match.  Doing so allows us
to share the filtering logic for the vaddr version of the
migrate_{hot,cold} schemes.

Link: https://lkml.kernel.org/r/20250709005952.17776-13-bijan311@gmail.com
Co-developed-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Signed-off-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Signed-off-by: Bijan Tabatabai <bijantabatab@micron.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/damon/ops-common.c
mm/damon/ops-common.h
mm/damon/paddr.c

index 918158ef3d99eefe4460fa9ac17a37e7bbcda0f1..6a9797d1d7ff71a2bba3305517a5c360259e28ed 100644 (file)
@@ -141,6 +141,156 @@ int damon_cold_score(struct damon_ctx *c, struct damon_region *r,
        return DAMOS_MAX_SCORE - hotness;
 }
 
+static bool damon_folio_mkold_one(struct folio *folio,
+               struct vm_area_struct *vma, unsigned long addr, void *arg)
+{
+       DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
+
+       while (page_vma_mapped_walk(&pvmw)) {
+               addr = pvmw.address;
+               if (pvmw.pte)
+                       damon_ptep_mkold(pvmw.pte, vma, addr);
+               else
+                       damon_pmdp_mkold(pvmw.pmd, vma, addr);
+       }
+       return true;
+}
+
+void damon_folio_mkold(struct folio *folio)
+{
+       struct rmap_walk_control rwc = {
+               .rmap_one = damon_folio_mkold_one,
+               .anon_lock = folio_lock_anon_vma_read,
+       };
+       bool need_lock;
+
+       if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
+               folio_set_idle(folio);
+               return;
+       }
+
+       need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
+       if (need_lock && !folio_trylock(folio))
+               return;
+
+       rmap_walk(folio, &rwc);
+
+       if (need_lock)
+               folio_unlock(folio);
+
+}
+
+static bool damon_folio_young_one(struct folio *folio,
+               struct vm_area_struct *vma, unsigned long addr, void *arg)
+{
+       bool *accessed = arg;
+       DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
+       pte_t pte;
+
+       *accessed = false;
+       while (page_vma_mapped_walk(&pvmw)) {
+               addr = pvmw.address;
+               if (pvmw.pte) {
+                       pte = ptep_get(pvmw.pte);
+
+                       /*
+                        * PFN swap PTEs, such as device-exclusive ones, that
+                        * actually map pages are "old" from a CPU perspective.
+                        * The MMU notifier takes care of any device aspects.
+                        */
+                       *accessed = (pte_present(pte) && pte_young(pte)) ||
+                               !folio_test_idle(folio) ||
+                               mmu_notifier_test_young(vma->vm_mm, addr);
+               } else {
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+                       *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
+                               !folio_test_idle(folio) ||
+                               mmu_notifier_test_young(vma->vm_mm, addr);
+#else
+                       WARN_ON_ONCE(1);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+               }
+               if (*accessed) {
+                       page_vma_mapped_walk_done(&pvmw);
+                       break;
+               }
+       }
+
+       /* If accessed, stop walking */
+       return *accessed == false;
+}
+
+bool damon_folio_young(struct folio *folio)
+{
+       bool accessed = false;
+       struct rmap_walk_control rwc = {
+               .arg = &accessed,
+               .rmap_one = damon_folio_young_one,
+               .anon_lock = folio_lock_anon_vma_read,
+       };
+       bool need_lock;
+
+       if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
+               if (folio_test_idle(folio))
+                       return false;
+               else
+                       return true;
+       }
+
+       need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
+       if (need_lock && !folio_trylock(folio))
+               return false;
+
+       rmap_walk(folio, &rwc);
+
+       if (need_lock)
+               folio_unlock(folio);
+
+       return accessed;
+}
+
+bool damos_folio_filter_match(struct damos_filter *filter, struct folio *folio)
+{
+       bool matched = false;
+       struct mem_cgroup *memcg;
+       size_t folio_sz;
+
+       switch (filter->type) {
+       case DAMOS_FILTER_TYPE_ANON:
+               matched = folio_test_anon(folio);
+               break;
+       case DAMOS_FILTER_TYPE_ACTIVE:
+               matched = folio_test_active(folio);
+               break;
+       case DAMOS_FILTER_TYPE_MEMCG:
+               rcu_read_lock();
+               memcg = folio_memcg_check(folio);
+               if (!memcg)
+                       matched = false;
+               else
+                       matched = filter->memcg_id == mem_cgroup_id(memcg);
+               rcu_read_unlock();
+               break;
+       case DAMOS_FILTER_TYPE_YOUNG:
+               matched = damon_folio_young(folio);
+               if (matched)
+                       damon_folio_mkold(folio);
+               break;
+       case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
+               folio_sz = folio_size(folio);
+               matched = filter->sz_range.min <= folio_sz &&
+                         folio_sz <= filter->sz_range.max;
+               break;
+       case DAMOS_FILTER_TYPE_UNMAPPED:
+               matched = !folio_mapped(folio) || !folio_raw_mapping(folio);
+               break;
+       default:
+               break;
+       }
+
+       return matched == filter->matching;
+}
+
 static unsigned int __damon_migrate_folio_list(
                struct list_head *migrate_folios, struct pglist_data *pgdat,
                int target_nid)
index 54209a7e70e67bc0d8163a8c8424e9b58374a9f3..61ad54aaf2568765dddb80c2c362a1b341be30c6 100644 (file)
@@ -11,10 +11,13 @@ struct folio *damon_get_folio(unsigned long pfn);
 
 void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr);
 void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr);
+void damon_folio_mkold(struct folio *folio);
+bool damon_folio_young(struct folio *folio);
 
 int damon_cold_score(struct damon_ctx *c, struct damon_region *r,
                        struct damos *s);
 int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
                        struct damos *s);
 
+bool damos_folio_filter_match(struct damos_filter *filter, struct folio *folio);
 unsigned long damon_migrate_pages(struct list_head *folio_list, int target_nid);
index 48e3e6fed63638c5aa43d463e825719751500d0a..53a55c5114fbef93c69de5a218014b050e2a1a27 100644 (file)
 #include "../internal.h"
 #include "ops-common.h"
 
-static bool damon_folio_mkold_one(struct folio *folio,
-               struct vm_area_struct *vma, unsigned long addr, void *arg)
-{
-       DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
-
-       while (page_vma_mapped_walk(&pvmw)) {
-               addr = pvmw.address;
-               if (pvmw.pte)
-                       damon_ptep_mkold(pvmw.pte, vma, addr);
-               else
-                       damon_pmdp_mkold(pvmw.pmd, vma, addr);
-       }
-       return true;
-}
-
-static void damon_folio_mkold(struct folio *folio)
-{
-       struct rmap_walk_control rwc = {
-               .rmap_one = damon_folio_mkold_one,
-               .anon_lock = folio_lock_anon_vma_read,
-       };
-       bool need_lock;
-
-       if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
-               folio_set_idle(folio);
-               return;
-       }
-
-       need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
-       if (need_lock && !folio_trylock(folio))
-               return;
-
-       rmap_walk(folio, &rwc);
-
-       if (need_lock)
-               folio_unlock(folio);
-
-}
-
 static void damon_pa_mkold(unsigned long paddr)
 {
        struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
@@ -86,75 +47,6 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
        }
 }
 
-static bool damon_folio_young_one(struct folio *folio,
-               struct vm_area_struct *vma, unsigned long addr, void *arg)
-{
-       bool *accessed = arg;
-       DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
-       pte_t pte;
-
-       *accessed = false;
-       while (page_vma_mapped_walk(&pvmw)) {
-               addr = pvmw.address;
-               if (pvmw.pte) {
-                       pte = ptep_get(pvmw.pte);
-
-                       /*
-                        * PFN swap PTEs, such as device-exclusive ones, that
-                        * actually map pages are "old" from a CPU perspective.
-                        * The MMU notifier takes care of any device aspects.
-                        */
-                       *accessed = (pte_present(pte) && pte_young(pte)) ||
-                               !folio_test_idle(folio) ||
-                               mmu_notifier_test_young(vma->vm_mm, addr);
-               } else {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-                       *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
-                               !folio_test_idle(folio) ||
-                               mmu_notifier_test_young(vma->vm_mm, addr);
-#else
-                       WARN_ON_ONCE(1);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-               }
-               if (*accessed) {
-                       page_vma_mapped_walk_done(&pvmw);
-                       break;
-               }
-       }
-
-       /* If accessed, stop walking */
-       return *accessed == false;
-}
-
-static bool damon_folio_young(struct folio *folio)
-{
-       bool accessed = false;
-       struct rmap_walk_control rwc = {
-               .arg = &accessed,
-               .rmap_one = damon_folio_young_one,
-               .anon_lock = folio_lock_anon_vma_read,
-       };
-       bool need_lock;
-
-       if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
-               if (folio_test_idle(folio))
-                       return false;
-               else
-                       return true;
-       }
-
-       need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
-       if (need_lock && !folio_trylock(folio))
-               return false;
-
-       rmap_walk(folio, &rwc);
-
-       if (need_lock)
-               folio_unlock(folio);
-
-       return accessed;
-}
-
 static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
 {
        struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
@@ -205,49 +97,6 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
        return max_nr_accesses;
 }
 
-static bool damos_pa_filter_match(struct damos_filter *filter,
-               struct folio *folio)
-{
-       bool matched = false;
-       struct mem_cgroup *memcg;
-       size_t folio_sz;
-
-       switch (filter->type) {
-       case DAMOS_FILTER_TYPE_ANON:
-               matched = folio_test_anon(folio);
-               break;
-       case DAMOS_FILTER_TYPE_ACTIVE:
-               matched = folio_test_active(folio);
-               break;
-       case DAMOS_FILTER_TYPE_MEMCG:
-               rcu_read_lock();
-               memcg = folio_memcg_check(folio);
-               if (!memcg)
-                       matched = false;
-               else
-                       matched = filter->memcg_id == mem_cgroup_id(memcg);
-               rcu_read_unlock();
-               break;
-       case DAMOS_FILTER_TYPE_YOUNG:
-               matched = damon_folio_young(folio);
-               if (matched)
-                       damon_folio_mkold(folio);
-               break;
-       case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
-               folio_sz = folio_size(folio);
-               matched = filter->sz_range.min <= folio_sz &&
-                         folio_sz <= filter->sz_range.max;
-               break;
-       case DAMOS_FILTER_TYPE_UNMAPPED:
-               matched = !folio_mapped(folio) || !folio_raw_mapping(folio);
-               break;
-       default:
-               break;
-       }
-
-       return matched == filter->matching;
-}
-
 /*
  * damos_pa_filter_out - Return true if the page should be filtered out.
  */
@@ -259,7 +108,7 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
                return false;
 
        damos_for_each_ops_filter(filter, scheme) {
-               if (damos_pa_filter_match(filter, folio))
+               if (damos_folio_filter_match(filter, folio))
                        return !filter->allow;
        }
        return scheme->ops_filters_default_reject;