]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: rename __PageMovable() to page_has_movable_ops()
authorDavid Hildenbrand <david@redhat.com>
Fri, 4 Jul 2025 10:25:10 +0000 (12:25 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 13 Jul 2025 23:38:29 +0000 (16:38 -0700)
Let's make it clearer that we are talking about movable_ops pages.

While at it, convert a VM_BUG_ON to a VM_WARN_ON_ONCE_PAGE.

Link: https://lkml.kernel.org/r/20250704102524.326966-17-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Eugenio Pé rez <eperezma@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Gregory Price <gourry@gourry.net>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/migrate.h
include/linux/page-flags.h
mm/compaction.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/migrate.c
mm/page_alloc.c
mm/page_isolation.c

index 25659a685e2aa244338912f39f493a7a5860dd07..e04035f70e36faf1752065f856b79fd3f36a3bf1 100644 (file)
@@ -115,7 +115,7 @@ static inline void __SetPageMovable(struct page *page,
 static inline
 const struct movable_operations *page_movable_ops(struct page *page)
 {
-       VM_BUG_ON(!__PageMovable(page));
+       VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
 
        return (const struct movable_operations *)
                ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
index 4fe5ee67535b2a0fc77d2f19628af02f19e74a38..c67163b73c5ecd2da79728e635a6b7ada216409f 100644 (file)
@@ -750,7 +750,7 @@ static __always_inline bool __folio_test_movable(const struct folio *folio)
                        PAGE_MAPPING_MOVABLE;
 }
 
-static __always_inline bool __PageMovable(const struct page *page)
+static __always_inline bool page_has_movable_ops(const struct page *page)
 {
        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
                                PAGE_MAPPING_MOVABLE;
index 5c37373017014d42c0972bdd6c57fe5160e16e70..41fd6a1fe9a3371df6e5653e2ae12f8953a12ac7 100644 (file)
@@ -1056,11 +1056,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                 * Skip any other type of page
                 */
                if (!PageLRU(page)) {
-                       /*
-                        * __PageMovable can return false positive so we need
-                        * to verify it under page_lock.
-                        */
-                       if (unlikely(__PageMovable(page)) &&
+                       /* Isolation code will deal with any races. */
+                       if (unlikely(page_has_movable_ops(page)) &&
                                        !PageIsolated(page)) {
                                if (locked) {
                                        unlock_page_lruvec_irqrestore(locked, flags);
index b91a33fb6c694fb830e73661ec0dfb55d8eea62f..9e2cff1999347af43f4fd49ce77b38a5667a6551 100644 (file)
@@ -1388,8 +1388,8 @@ static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
        if (PageSlab(page))
                return false;
 
-       /* Soft offline could migrate non-LRU movable pages */
-       if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
+       /* Soft offline could migrate movable_ops pages */
+       if ((flags & MF_SOFT_OFFLINE) && page_has_movable_ops(page))
                return true;
 
        return PageLRU(page) || is_free_buddy_page(page);
index e4009a44f883e5ee4ca4789f96b6fcc41ff9a1ae..1f15af712bc3465df3c732299716ab0d6289b9a1 100644 (file)
@@ -1739,8 +1739,8 @@ bool mhp_range_allowed(u64 start, u64 size, bool need_mapping)
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
 /*
- * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
- * non-lru movable pages and hugepages). Will skip over most unmovable
+ * Scan pfn range [start,end) to find movable/migratable pages (LRU and
+ * hugetlb folio, movable_ops pages). Will skip over most unmovable
  * pages (esp., pages that can be skipped when offlining), but bail out on
  * definitely unmovable pages.
  *
@@ -1759,13 +1759,11 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
                struct folio *folio;
 
                page = pfn_to_page(pfn);
-               if (PageLRU(page))
-                       goto found;
-               if (__PageMovable(page))
+               if (PageLRU(page) || page_has_movable_ops(page))
                        goto found;
 
                /*
-                * PageOffline() pages that are not marked __PageMovable() and
+                * PageOffline() pages that do not have movable_ops and
                 * have a reference count > 0 (after MEM_GOING_OFFLINE) are
                 * definitely unmovable. If their reference count would be 0,
                 * they could at least be skipped when offlining memory.
index 1f07c8f1fb74485ac5cb30851ac6780ebee9eb03..bf9cfdafc54cafee8a041ae508c330c15b423f6c 100644 (file)
@@ -94,7 +94,7 @@ bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
         * Note that once a page has movable_ops, it will stay that way
         * until the page was freed.
         */
-       if (unlikely(!__PageMovable(page)))
+       if (unlikely(!page_has_movable_ops(page)))
                goto out_putfolio;
 
        /*
@@ -111,7 +111,7 @@ bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
        if (unlikely(!folio_trylock(folio)))
                goto out_putfolio;
 
-       VM_WARN_ON_ONCE_PAGE(!__PageMovable(page), page);
+       VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
        if (PageIsolated(page))
                goto out_no_isolated;
 
@@ -153,7 +153,7 @@ static void putback_movable_ops_page(struct page *page)
         */
        struct folio *folio = page_folio(page);
 
-       VM_WARN_ON_ONCE_PAGE(!__PageMovable(page), page);
+       VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
        VM_WARN_ON_ONCE_PAGE(!PageIsolated(page), page);
        folio_lock(folio);
        page_movable_ops(page)->putback_page(page);
@@ -194,7 +194,7 @@ static int migrate_movable_ops_page(struct page *dst, struct page *src,
 {
        int rc = MIGRATEPAGE_SUCCESS;
 
-       VM_WARN_ON_ONCE_PAGE(!__PageMovable(src), src);
+       VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src);
        VM_WARN_ON_ONCE_PAGE(!PageIsolated(src), src);
        rc = page_movable_ops(src)->migrate_page(dst, src, mode);
        if (rc == MIGRATEPAGE_SUCCESS)
index 6318c85d678ebefd3830f8dbcbc1ed06da76de6e..036d9b7b01c018fef8c7eb2ce3ca11f3a865a853 100644 (file)
@@ -2006,7 +2006,7 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
                         * migration are movable. But we don't actually try
                         * isolating, as that would be expensive.
                         */
-                       if (PageLRU(page) || __PageMovable(page))
+                       if (PageLRU(page) || page_has_movable_ops(page))
                                (*num_movable)++;
                        pfn++;
                }
index ece3bfc56bcd52c35cf36c1e6a89347bb60053c2..b97b965b3ed01f8e9ff96752ee51a4f1ad5d2e50 100644 (file)
@@ -21,9 +21,9 @@
  * consequently belong to a single zone.
  *
  * PageLRU check without isolation or lru_lock could race so that
- * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
- * check without lock_page also may miss some movable non-lru pages at
- * race condition. So you can't expect this function should be exact.
+ * MIGRATE_MOVABLE block might include unmovable pages. Similarly, pages
+ * with movable_ops can only be identified some time after they were
+ * allocated. So you can't expect this function should be exact.
  *
  * Returns a page without holding a reference. If the caller wants to
  * dereference that page (e.g., dumping), it has to make sure that it
@@ -133,7 +133,7 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
                if ((mode == PB_ISOLATE_MODE_MEM_OFFLINE) && PageOffline(page))
                        continue;
 
-               if (__PageMovable(page) || PageLRU(page))
+               if (PageLRU(page) || page_has_movable_ops(page))
                        continue;
 
                /*
@@ -421,7 +421,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn,
                         * proper free and split handling for them.
                         */
                        VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
-                       VM_WARN_ON_ONCE_PAGE(__PageMovable(page), page);
+                       VM_WARN_ON_ONCE_PAGE(page_has_movable_ops(page), page);
 
                        goto failed;
                }