From: Bijan Tabatabai Date: Wed, 9 Jul 2025 00:59:42 +0000 (-0500) Subject: mm/damon: move folio filtering from paddr to ops-common X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=0a707d6b04e01490f6c246fa4b6e643cc33b40a1;p=thirdparty%2Flinux.git mm/damon: move folio filtering from paddr to ops-common This patch moves damos_pa_filter_match and the functions it calls to ops-common, renaming it to damos_folio_filter_match. Doing so allows us to share the filtering logic for the vaddr version of the migrate_{hot,cold} schemes. Link: https://lkml.kernel.org/r/20250709005952.17776-13-bijan311@gmail.com Co-developed-by: Ravi Shankar Jonnalagadda Signed-off-by: Ravi Shankar Jonnalagadda Signed-off-by: Bijan Tabatabai Reviewed-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index 918158ef3d99e..6a9797d1d7ff7 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -141,6 +141,156 @@ int damon_cold_score(struct damon_ctx *c, struct damon_region *r, return DAMOS_MAX_SCORE - hotness; } +static bool damon_folio_mkold_one(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr, void *arg) +{ + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); + + while (page_vma_mapped_walk(&pvmw)) { + addr = pvmw.address; + if (pvmw.pte) + damon_ptep_mkold(pvmw.pte, vma, addr); + else + damon_pmdp_mkold(pvmw.pmd, vma, addr); + } + return true; +} + +void damon_folio_mkold(struct folio *folio) +{ + struct rmap_walk_control rwc = { + .rmap_one = damon_folio_mkold_one, + .anon_lock = folio_lock_anon_vma_read, + }; + bool need_lock; + + if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { + folio_set_idle(folio); + return; + } + + need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); + if (need_lock && !folio_trylock(folio)) + return; + + rmap_walk(folio, &rwc); + + if (need_lock) + folio_unlock(folio); + +} + +static bool damon_folio_young_one(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr, void *arg) +{ + bool *accessed = arg; + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); + pte_t pte; + + *accessed = false; + while (page_vma_mapped_walk(&pvmw)) { + addr = pvmw.address; + if (pvmw.pte) { + pte = ptep_get(pvmw.pte); + + /* + * PFN swap PTEs, such as device-exclusive ones, that + * actually map pages are "old" from a CPU perspective. + * The MMU notifier takes care of any device aspects. + */ + *accessed = (pte_present(pte) && pte_young(pte)) || + !folio_test_idle(folio) || + mmu_notifier_test_young(vma->vm_mm, addr); + } else { +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + *accessed = pmd_young(pmdp_get(pvmw.pmd)) || + !folio_test_idle(folio) || + mmu_notifier_test_young(vma->vm_mm, addr); +#else + WARN_ON_ONCE(1); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + } + if (*accessed) { + page_vma_mapped_walk_done(&pvmw); + break; + } + } + + /* If accessed, stop walking */ + return *accessed == false; +} + +bool damon_folio_young(struct folio *folio) +{ + bool accessed = false; + struct rmap_walk_control rwc = { + .arg = &accessed, + .rmap_one = damon_folio_young_one, + .anon_lock = folio_lock_anon_vma_read, + }; + bool need_lock; + + if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { + if (folio_test_idle(folio)) + return false; + else + return true; + } + + need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); + if (need_lock && !folio_trylock(folio)) + return false; + + rmap_walk(folio, &rwc); + + if (need_lock) + folio_unlock(folio); + + return accessed; +} + +bool damos_folio_filter_match(struct damos_filter *filter, struct folio *folio) +{ + bool matched = false; + struct mem_cgroup *memcg; + size_t folio_sz; + + switch (filter->type) { + case DAMOS_FILTER_TYPE_ANON: + matched = folio_test_anon(folio); + break; + case DAMOS_FILTER_TYPE_ACTIVE: + matched = folio_test_active(folio); + break; + case DAMOS_FILTER_TYPE_MEMCG: + rcu_read_lock(); + memcg = folio_memcg_check(folio); + if (!memcg) + matched = false; + else + matched = filter->memcg_id == mem_cgroup_id(memcg); + rcu_read_unlock(); + break; + case DAMOS_FILTER_TYPE_YOUNG: + matched = damon_folio_young(folio); + if (matched) + damon_folio_mkold(folio); + break; + case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: + folio_sz = folio_size(folio); + matched = filter->sz_range.min <= folio_sz && + folio_sz <= filter->sz_range.max; + break; + case DAMOS_FILTER_TYPE_UNMAPPED: + matched = !folio_mapped(folio) || !folio_raw_mapping(folio); + break; + default: + break; + } + + return matched == filter->matching; +} + static unsigned int __damon_migrate_folio_list( struct list_head *migrate_folios, struct pglist_data *pgdat, int target_nid) diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h index 54209a7e70e67..61ad54aaf2568 100644 --- a/mm/damon/ops-common.h +++ b/mm/damon/ops-common.h @@ -11,10 +11,13 @@ struct folio *damon_get_folio(unsigned long pfn); void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr); void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr); +void damon_folio_mkold(struct folio *folio); +bool damon_folio_young(struct folio *folio); int damon_cold_score(struct damon_ctx *c, struct damon_region *r, struct damos *s); int damon_hot_score(struct damon_ctx *c, struct damon_region *r, struct damos *s); +bool damos_folio_filter_match(struct damos_filter *filter, struct folio *folio); unsigned long damon_migrate_pages(struct list_head *folio_list, int target_nid); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 48e3e6fed6363..53a55c5114fbe 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -18,45 +18,6 @@ #include "../internal.h" #include "ops-common.h" -static bool damon_folio_mkold_one(struct folio *folio, - struct vm_area_struct *vma, unsigned long addr, void *arg) -{ - DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); - - while (page_vma_mapped_walk(&pvmw)) { - addr = pvmw.address; - if (pvmw.pte) - damon_ptep_mkold(pvmw.pte, vma, addr); - else - damon_pmdp_mkold(pvmw.pmd, vma, addr); - } - return true; -} - -static void damon_folio_mkold(struct folio *folio) -{ - struct rmap_walk_control rwc = { - .rmap_one = damon_folio_mkold_one, - .anon_lock = folio_lock_anon_vma_read, - }; - bool need_lock; - - if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { - folio_set_idle(folio); - return; - } - - need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); - if (need_lock && !folio_trylock(folio)) - return; - - rmap_walk(folio, &rwc); - - if (need_lock) - folio_unlock(folio); - -} - static void damon_pa_mkold(unsigned long paddr) { struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); @@ -86,75 +47,6 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) } } -static bool damon_folio_young_one(struct folio *folio, - struct vm_area_struct *vma, unsigned long addr, void *arg) -{ - bool *accessed = arg; - DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); - pte_t pte; - - *accessed = false; - while (page_vma_mapped_walk(&pvmw)) { - addr = pvmw.address; - if (pvmw.pte) { - pte = ptep_get(pvmw.pte); - - /* - * PFN swap PTEs, such as device-exclusive ones, that - * actually map pages are "old" from a CPU perspective. - * The MMU notifier takes care of any device aspects. - */ - *accessed = (pte_present(pte) && pte_young(pte)) || - !folio_test_idle(folio) || - mmu_notifier_test_young(vma->vm_mm, addr); - } else { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - *accessed = pmd_young(pmdp_get(pvmw.pmd)) || - !folio_test_idle(folio) || - mmu_notifier_test_young(vma->vm_mm, addr); -#else - WARN_ON_ONCE(1); -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - } - if (*accessed) { - page_vma_mapped_walk_done(&pvmw); - break; - } - } - - /* If accessed, stop walking */ - return *accessed == false; -} - -static bool damon_folio_young(struct folio *folio) -{ - bool accessed = false; - struct rmap_walk_control rwc = { - .arg = &accessed, - .rmap_one = damon_folio_young_one, - .anon_lock = folio_lock_anon_vma_read, - }; - bool need_lock; - - if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { - if (folio_test_idle(folio)) - return false; - else - return true; - } - - need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); - if (need_lock && !folio_trylock(folio)) - return false; - - rmap_walk(folio, &rwc); - - if (need_lock) - folio_unlock(folio); - - return accessed; -} - static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) { struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); @@ -205,49 +97,6 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) return max_nr_accesses; } -static bool damos_pa_filter_match(struct damos_filter *filter, - struct folio *folio) -{ - bool matched = false; - struct mem_cgroup *memcg; - size_t folio_sz; - - switch (filter->type) { - case DAMOS_FILTER_TYPE_ANON: - matched = folio_test_anon(folio); - break; - case DAMOS_FILTER_TYPE_ACTIVE: - matched = folio_test_active(folio); - break; - case DAMOS_FILTER_TYPE_MEMCG: - rcu_read_lock(); - memcg = folio_memcg_check(folio); - if (!memcg) - matched = false; - else - matched = filter->memcg_id == mem_cgroup_id(memcg); - rcu_read_unlock(); - break; - case DAMOS_FILTER_TYPE_YOUNG: - matched = damon_folio_young(folio); - if (matched) - damon_folio_mkold(folio); - break; - case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: - folio_sz = folio_size(folio); - matched = filter->sz_range.min <= folio_sz && - folio_sz <= filter->sz_range.max; - break; - case DAMOS_FILTER_TYPE_UNMAPPED: - matched = !folio_mapped(folio) || !folio_raw_mapping(folio); - break; - default: - break; - } - - return matched == filter->matching; -} - /* * damos_pa_filter_out - Return true if the page should be filtered out. */ @@ -259,7 +108,7 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) return false; damos_for_each_ops_filter(filter, scheme) { - if (damos_pa_filter_match(filter, folio)) + if (damos_folio_filter_match(filter, folio)) return !filter->allow; } return scheme->ops_filters_default_reject;