From 09efc56a3b1cfda995586ef27ed8d6f8f92ed917 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 12 Nov 2025 07:41:08 -0800 Subject: [PATCH] mm/damon/vaddr: consistently use only pmd_entry for damos_migrate For page table walks, it is usual [1] to have only one pmd entry function. The vaddr.c code for DAMOS_MIGRATE_{HOT,COLD} is not following the pattern. Instead, it uses both pmd and pte entry functions without a special reason. Refactor it to use only the pmd entry function, to make the code under mm/ more consistent. Link: https://lkml.kernel.org/r/20251112154114.66053-6-sj@kernel.org Signed-off-by: SeongJae Park Suggested-by: David Hildenbrand Cc: Bill Wendling Cc: Brendan Higgins Cc: David Gow Cc: Hugh Dickins Cc: Jonathan Corbet Cc: Justin Stitt Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Michal Hocko Cc: Miguel Ojeda Cc: Mike Rapoport Cc: Nathan Chancellor Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/damon/vaddr.c | 84 +++++++++++++++++++++--------------------------- 1 file changed, 37 insertions(+), 47 deletions(-) diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index b9f0c9e3f6842..2750c88e72252 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -697,7 +697,6 @@ isolate: list_add(&folio->lru, &migration_lists[i]); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { @@ -707,58 +706,49 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr, struct damos_migrate_dests *dests = &s->migrate_dests; struct folio *folio; spinlock_t *ptl; - pmd_t pmde; - - ptl = pmd_lock(walk->mm, pmd); - pmde = pmdp_get(pmd); - - if (!pmd_present(pmde) || !pmd_trans_huge(pmde)) - goto unlock; - - /* Tell page walk code to not split the PMD */ - walk->action = ACTION_CONTINUE; - - folio = vm_normal_folio_pmd(walk->vma, addr, pmde); - if (!folio) - goto unlock; - - if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd)) - goto unlock; - - damos_va_migrate_dests_add(folio, walk->vma, addr, dests, - migration_lists); - -unlock: - spin_unlock(ptl); - return 0; -} -#else -#define damos_va_migrate_pmd_entry NULL -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + pte_t *start_pte, *pte, ptent; + int nr; -static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr, - unsigned long next, struct mm_walk *walk) -{ - struct damos_va_migrate_private *priv = walk->private; - struct list_head *migration_lists = priv->migration_lists; - struct damos *s = priv->scheme; - struct damos_migrate_dests *dests = &s->migrate_dests; - struct folio *folio; - pte_t ptent; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + ptl = pmd_trans_huge_lock(pmd, walk->vma); + if (ptl) { + pmd_t pmde = pmdp_get(pmd); - ptent = ptep_get(pte); - if (pte_none(ptent) || !pte_present(ptent)) + if (!pmd_present(pmde)) + goto huge_out; + folio = vm_normal_folio_pmd(walk->vma, addr, pmde); + if (!folio) + goto huge_out; + if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd)) + goto huge_out; + damos_va_migrate_dests_add(folio, walk->vma, addr, dests, + migration_lists); +huge_out: + spin_unlock(ptl); return 0; + } +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - folio = vm_normal_folio(walk->vma, addr, ptent); - if (!folio) + start_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) return 0; - if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL)) - return 0; + for (; addr < next; pte += nr, addr += nr * PAGE_SIZE) { + nr = 1; + ptent = ptep_get(pte); - damos_va_migrate_dests_add(folio, walk->vma, addr, dests, - migration_lists); + if (pte_none(ptent) || !pte_present(ptent)) + continue; + folio = vm_normal_folio(walk->vma, addr, ptent); + if (!folio) + continue; + if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL)) + return 0; + damos_va_migrate_dests_add(folio, walk->vma, addr, dests, + migration_lists); + nr = folio_nr_pages(folio); + } + pte_unmap_unlock(start_pte, ptl); return 0; } @@ -824,7 +814,7 @@ static unsigned long damos_va_migrate(struct damon_target *target, struct damos_migrate_dests *dests = &s->migrate_dests; struct mm_walk_ops walk_ops = { .pmd_entry = damos_va_migrate_pmd_entry, - .pte_entry = damos_va_migrate_pte_entry, + .pte_entry = NULL, .walk_lock = PGWALK_RDLOCK, }; -- 2.47.3