From: Bijan Tabatabai Date: Wed, 9 Jul 2025 00:59:38 +0000 (-0500) Subject: mm/damon: move migration helpers from paddr to ops-common X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=13dde31db71f013257e2f9363edb76f683ea5728;p=thirdparty%2Flinux.git mm/damon: move migration helpers from paddr to ops-common This patch moves the damon_pa_migrate_pages function along with its corresponding helper functions from paddr to ops-common. The function prefix of "damon_pa_" was also changed to just "damon_" accordingly. This patch will allow page migration to be available to vaddr schemes as well as paddr schemes. Link: https://lkml.kernel.org/r/20250709005952.17776-9-bijan311@gmail.com Co-developed-by: Ravi Shankar Jonnalagadda Signed-off-by: Ravi Shankar Jonnalagadda Signed-off-by: Bijan Tabatabai Reviewed-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index b43620fee6bb2..918158ef3d99e 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -5,6 +5,7 @@ * Author: SeongJae Park */ +#include #include #include #include @@ -12,6 +13,7 @@ #include #include +#include "../internal.h" #include "ops-common.h" /* @@ -138,3 +140,121 @@ int damon_cold_score(struct damon_ctx *c, struct damon_region *r, /* Return coldness of the region */ return DAMOS_MAX_SCORE - hotness; } + +static unsigned int __damon_migrate_folio_list( + struct list_head *migrate_folios, struct pglist_data *pgdat, + int target_nid) +{ + unsigned int nr_succeeded = 0; + struct migration_target_control mtc = { + /* + * Allocate from 'node', or fail quickly and quietly. + * When this happens, 'page' will likely just be discarded + * instead of migrated. + */ + .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | + __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT, + .nid = target_nid, + }; + + if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE) + return 0; + + if (list_empty(migrate_folios)) + return 0; + + /* Migration ignores all cpuset and mempolicy settings */ + migrate_pages(migrate_folios, alloc_migration_target, NULL, + (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON, + &nr_succeeded); + + return nr_succeeded; +} + +static unsigned int damon_migrate_folio_list(struct list_head *folio_list, + struct pglist_data *pgdat, + int target_nid) +{ + unsigned int nr_migrated = 0; + struct folio *folio; + LIST_HEAD(ret_folios); + LIST_HEAD(migrate_folios); + + while (!list_empty(folio_list)) { + struct folio *folio; + + cond_resched(); + + folio = lru_to_folio(folio_list); + list_del(&folio->lru); + + if (!folio_trylock(folio)) + goto keep; + + /* Relocate its contents to another node. */ + list_add(&folio->lru, &migrate_folios); + folio_unlock(folio); + continue; +keep: + list_add(&folio->lru, &ret_folios); + } + /* 'folio_list' is always empty here */ + + /* Migrate folios selected for migration */ + nr_migrated += __damon_migrate_folio_list( + &migrate_folios, pgdat, target_nid); + /* + * Folios that could not be migrated are still in @migrate_folios. Add + * those back on @folio_list + */ + if (!list_empty(&migrate_folios)) + list_splice_init(&migrate_folios, folio_list); + + try_to_unmap_flush(); + + list_splice(&ret_folios, folio_list); + + while (!list_empty(folio_list)) { + folio = lru_to_folio(folio_list); + list_del(&folio->lru); + folio_putback_lru(folio); + } + + return nr_migrated; +} + +unsigned long damon_migrate_pages(struct list_head *folio_list, int target_nid) +{ + int nid; + unsigned long nr_migrated = 0; + LIST_HEAD(node_folio_list); + unsigned int noreclaim_flag; + + if (list_empty(folio_list)) + return nr_migrated; + + noreclaim_flag = memalloc_noreclaim_save(); + + nid = folio_nid(lru_to_folio(folio_list)); + do { + struct folio *folio = lru_to_folio(folio_list); + + if (nid == folio_nid(folio)) { + list_move(&folio->lru, &node_folio_list); + continue; + } + + nr_migrated += damon_migrate_folio_list(&node_folio_list, + NODE_DATA(nid), + target_nid); + nid = folio_nid(lru_to_folio(folio_list)); + } while (!list_empty(folio_list)); + + nr_migrated += damon_migrate_folio_list(&node_folio_list, + NODE_DATA(nid), + target_nid); + + memalloc_noreclaim_restore(noreclaim_flag); + + return nr_migrated; +} diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h index cc9f5da9c0127..54209a7e70e67 100644 --- a/mm/damon/ops-common.h +++ b/mm/damon/ops-common.h @@ -16,3 +16,5 @@ int damon_cold_score(struct damon_ctx *c, struct damon_region *r, struct damos *s); int damon_hot_score(struct damon_ctx *c, struct damon_region *r, struct damos *s); + +unsigned long damon_migrate_pages(struct list_head *folio_list, int target_nid); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index fcab148e68652..48e3e6fed6363 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include "../internal.h" @@ -381,125 +380,6 @@ static unsigned long damon_pa_deactivate_pages(struct damon_region *r, sz_filter_passed); } -static unsigned int __damon_pa_migrate_folio_list( - struct list_head *migrate_folios, struct pglist_data *pgdat, - int target_nid) -{ - unsigned int nr_succeeded = 0; - struct migration_target_control mtc = { - /* - * Allocate from 'node', or fail quickly and quietly. - * When this happens, 'page' will likely just be discarded - * instead of migrated. - */ - .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | - __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT, - .nid = target_nid, - }; - - if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE) - return 0; - - if (list_empty(migrate_folios)) - return 0; - - /* Migration ignores all cpuset and mempolicy settings */ - migrate_pages(migrate_folios, alloc_migration_target, NULL, - (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON, - &nr_succeeded); - - return nr_succeeded; -} - -static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list, - struct pglist_data *pgdat, - int target_nid) -{ - unsigned int nr_migrated = 0; - struct folio *folio; - LIST_HEAD(ret_folios); - LIST_HEAD(migrate_folios); - - while (!list_empty(folio_list)) { - struct folio *folio; - - cond_resched(); - - folio = lru_to_folio(folio_list); - list_del(&folio->lru); - - if (!folio_trylock(folio)) - goto keep; - - /* Relocate its contents to another node. */ - list_add(&folio->lru, &migrate_folios); - folio_unlock(folio); - continue; -keep: - list_add(&folio->lru, &ret_folios); - } - /* 'folio_list' is always empty here */ - - /* Migrate folios selected for migration */ - nr_migrated += __damon_pa_migrate_folio_list( - &migrate_folios, pgdat, target_nid); - /* - * Folios that could not be migrated are still in @migrate_folios. Add - * those back on @folio_list - */ - if (!list_empty(&migrate_folios)) - list_splice_init(&migrate_folios, folio_list); - - try_to_unmap_flush(); - - list_splice(&ret_folios, folio_list); - - while (!list_empty(folio_list)) { - folio = lru_to_folio(folio_list); - list_del(&folio->lru); - folio_putback_lru(folio); - } - - return nr_migrated; -} - -static unsigned long damon_pa_migrate_pages(struct list_head *folio_list, - int target_nid) -{ - int nid; - unsigned long nr_migrated = 0; - LIST_HEAD(node_folio_list); - unsigned int noreclaim_flag; - - if (list_empty(folio_list)) - return nr_migrated; - - noreclaim_flag = memalloc_noreclaim_save(); - - nid = folio_nid(lru_to_folio(folio_list)); - do { - struct folio *folio = lru_to_folio(folio_list); - - if (nid == folio_nid(folio)) { - list_move(&folio->lru, &node_folio_list); - continue; - } - - nr_migrated += damon_pa_migrate_folio_list(&node_folio_list, - NODE_DATA(nid), - target_nid); - nid = folio_nid(lru_to_folio(folio_list)); - } while (!list_empty(folio_list)); - - nr_migrated += damon_pa_migrate_folio_list(&node_folio_list, - NODE_DATA(nid), - target_nid); - - memalloc_noreclaim_restore(noreclaim_flag); - - return nr_migrated; -} - static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, unsigned long *sz_filter_passed) { @@ -527,7 +407,7 @@ put_folio: addr += folio_size(folio); folio_put(folio); } - applied = damon_pa_migrate_pages(&folio_list, s->target_nid); + applied = damon_migrate_pages(&folio_list, s->target_nid); cond_resched(); s->last_applied = folio; return applied * PAGE_SIZE;