From 22d103aef090dc688a88881fb955376dec1228d5 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 4 Jul 2025 12:25:09 +0200 Subject: [PATCH] mm/migration: remove PageMovable() MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Previously, if __ClearPageMovable() were invoked on a page, this would cause __PageMovable() to return false, but due to the continued existence of page movable ops, PageMovable() would have returned true. With __ClearPageMovable() gone, the two are exactly equivalent. So we can replace PageMovable() checks by __PageMovable(). In fact, __PageMovable() cannot change until a page is freed, so we can turn some PageMovable() into sanity checks for __PageMovable(). Link: https://lkml.kernel.org/r/20250704102524.326966-16-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Zi Yan Reviewed-by: Lorenzo Stoakes Reviewed-by: Harry Yoo Cc: Alistair Popple Cc: Al Viro Cc: Arnd Bergmann Cc: Brendan Jackman Cc: Byungchul Park Cc: Chengming Zhou Cc: Christian Brauner Cc: Christophe Leroy Cc: Eugenio Pé rez Cc: Greg Kroah-Hartman Cc: Gregory Price Cc: "Huang, Ying" Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Wang Cc: Jerrin Shaji George Cc: Johannes Weiner Cc: John Hubbard Cc: Jonathan Corbet Cc: Joshua Hahn Cc: Liam Howlett Cc: Madhavan Srinivasan Cc: Mathew Brost Cc: Matthew Wilcox (Oracle) Cc: Miaohe Lin Cc: Michael Ellerman Cc: "Michael S. Tsirkin" Cc: Michal Hocko Cc: Mike Rapoport Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Nicholas Piggin Cc: Oscar Salvador Cc: Peter Xu Cc: Qi Zheng Cc: Rakie Kim Cc: Rik van Riel Cc: Sergey Senozhatsky Cc: Shakeel Butt Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Xuan Zhuo Cc: xu xin Signed-off-by: Andrew Morton --- include/linux/migrate.h | 2 -- mm/compaction.c | 15 --------------- mm/migrate.c | 18 ++++++++++-------- 3 files changed, 10 insertions(+), 25 deletions(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 6eeda8eb1e0d8..25659a685e2aa 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -104,10 +104,8 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_COMPACTION -bool PageMovable(struct page *page); void __SetPageMovable(struct page *page, const struct movable_operations *ops); #else -static inline bool PageMovable(struct page *page) { return false; } static inline void __SetPageMovable(struct page *page, const struct movable_operations *ops) { diff --git a/mm/compaction.c b/mm/compaction.c index 889ec696ba96a..5c37373017014 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -114,21 +114,6 @@ static unsigned long release_free_list(struct list_head *freepages) } #ifdef CONFIG_COMPACTION -bool PageMovable(struct page *page) -{ - const struct movable_operations *mops; - - VM_BUG_ON_PAGE(!PageLocked(page), page); - if (!__PageMovable(page)) - return false; - - mops = page_movable_ops(page); - if (mops) - return true; - - return false; -} - void __SetPageMovable(struct page *page, const struct movable_operations *mops) { VM_BUG_ON_PAGE(!PageLocked(page), page); diff --git a/mm/migrate.c b/mm/migrate.c index 61e98ed46f134..1f07c8f1fb744 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -87,9 +87,12 @@ bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode) goto out; /* - * Check movable flag before taking the page lock because + * Check for movable_ops pages before taking the page lock because * we use non-atomic bitops on newly allocated page flags so * unconditionally grabbing the lock ruins page's owner side. + * + * Note that once a page has movable_ops, it will stay that way + * until the page was freed. */ if (unlikely(!__PageMovable(page))) goto out_putfolio; @@ -108,7 +111,8 @@ bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode) if (unlikely(!folio_trylock(folio))) goto out_putfolio; - if (!PageMovable(page) || PageIsolated(page)) + VM_WARN_ON_ONCE_PAGE(!__PageMovable(page), page); + if (PageIsolated(page)) goto out_no_isolated; mops = page_movable_ops(page); @@ -149,11 +153,10 @@ static void putback_movable_ops_page(struct page *page) */ struct folio *folio = page_folio(page); + VM_WARN_ON_ONCE_PAGE(!__PageMovable(page), page); VM_WARN_ON_ONCE_PAGE(!PageIsolated(page), page); folio_lock(folio); - /* If the page was released by it's owner, there is nothing to do. */ - if (PageMovable(page)) - page_movable_ops(page)->putback_page(page); + page_movable_ops(page)->putback_page(page); ClearPageIsolated(page); folio_unlock(folio); folio_put(folio); @@ -191,10 +194,9 @@ static int migrate_movable_ops_page(struct page *dst, struct page *src, { int rc = MIGRATEPAGE_SUCCESS; + VM_WARN_ON_ONCE_PAGE(!__PageMovable(src), src); VM_WARN_ON_ONCE_PAGE(!PageIsolated(src), src); - /* If the page was released by it's owner, there is nothing to do. */ - if (PageMovable(src)) - rc = page_movable_ops(src)->migrate_page(dst, src, mode); + rc = page_movable_ops(src)->migrate_page(dst, src, mode); if (rc == MIGRATEPAGE_SUCCESS) ClearPageIsolated(src); return rc; -- 2.47.2