]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/migrate: rename isolate_movable_page() to isolate_movable_ops_page()
authorDavid Hildenbrand <david@redhat.com>
Fri, 4 Jul 2025 10:25:01 +0000 (12:25 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 13 Jul 2025 23:38:26 +0000 (16:38 -0700)
...  and start moving back to per-page things that will absolutely not be
folio things in the future.  Add documentation and a comment that the
remaining folio stuff (lock, refcount) will have to be reworked as well.

While at it, convert the VM_BUG_ON() into a WARN_ON_ONCE() and handle it
gracefully (relevant with further changes), and convert a WARN_ON_ONCE()
into a VM_WARN_ON_ONCE_PAGE().

Note that we will leave anything that needs a rework (lock, refcount,
->lru) to be using folios for now: that perfectly highlights the
problematic bits.

Link: https://lkml.kernel.org/r/20250704102524.326966-8-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Eugenio Pé rez <eperezma@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Gregory Price <gourry@gourry.net>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/migrate.h
mm/compaction.c
mm/migrate.c

index aaa2114498d6de12db84154ac6a8f22343404729..c0ec7422837bda287d0571595a5f947d5c39d989 100644 (file)
@@ -69,7 +69,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
                  unsigned long private, enum migrate_mode mode, int reason,
                  unsigned int *ret_succeeded);
 struct folio *alloc_migration_target(struct folio *src, unsigned long private);
-bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode);
 bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
 
 int migrate_huge_page_move_mapping(struct address_space *mapping,
@@ -90,7 +90,7 @@ static inline int migrate_pages(struct list_head *l, new_folio_t new,
 static inline struct folio *alloc_migration_target(struct folio *src,
                unsigned long private)
        { return NULL; }
-static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
        { return false; }
 static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
        { return false; }
index 3925cb61dbb8f56fe4ee9128a81f9e389252718f..17455c5a4be052991de3fa6a05c04195f9f99192 100644 (file)
@@ -1093,7 +1093,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                                        locked = NULL;
                                }
 
-                               if (isolate_movable_page(page, mode)) {
+                               if (isolate_movable_ops_page(page, mode)) {
                                        folio = page_folio(page);
                                        goto isolate_success;
                                }
index 208d2d4a2f8d4274b5b3e5aa6ba54594ec05e89f..2e648d75248e462624801934399b4db5d2e871c5 100644 (file)
 #include "internal.h"
 #include "swap.h"
 
-bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+/**
+ * isolate_movable_ops_page - isolate a movable_ops page for migration
+ * @page: The page.
+ * @mode: The isolation mode.
+ *
+ * Try to isolate a movable_ops page for migration. Will fail if the page is
+ * not a movable_ops page, if the page is already isolated for migration
+ * or if the page was just was released by its owner.
+ *
+ * Once isolated, the page cannot get freed until it is either putback
+ * or migrated.
+ *
+ * Returns true if isolation succeeded, otherwise false.
+ */
+bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
 {
+       /*
+        * TODO: these pages will not be folios in the future. All
+        * folio dependencies will have to be removed.
+        */
        struct folio *folio = folio_get_nontail_page(page);
        const struct movable_operations *mops;
 
@@ -73,7 +91,7 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
         * we use non-atomic bitops on newly allocated page flags so
         * unconditionally grabbing the lock ruins page's owner side.
         */
-       if (unlikely(!__folio_test_movable(folio)))
+       if (unlikely(!__PageMovable(page)))
                goto out_putfolio;
 
        /*
@@ -90,18 +108,19 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
        if (unlikely(!folio_trylock(folio)))
                goto out_putfolio;
 
-       if (!folio_test_movable(folio) || folio_test_isolated(folio))
+       if (!PageMovable(page) || PageIsolated(page))
                goto out_no_isolated;
 
-       mops = folio_movable_ops(folio);
-       VM_BUG_ON_FOLIO(!mops, folio);
+       mops = page_movable_ops(page);
+       if (WARN_ON_ONCE(!mops))
+               goto out_no_isolated;
 
-       if (!mops->isolate_page(&folio->page, mode))
+       if (!mops->isolate_page(page, mode))
                goto out_no_isolated;
 
        /* Driver shouldn't use the isolated flag */
-       WARN_ON_ONCE(folio_test_isolated(folio));
-       folio_set_isolated(folio);
+       VM_WARN_ON_ONCE_PAGE(PageIsolated(page), page);
+       SetPageIsolated(page);
        folio_unlock(folio);
 
        return true;
@@ -175,8 +194,8 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
        if (lru)
                isolated = folio_isolate_lru(folio);
        else
-               isolated = isolate_movable_page(&folio->page,
-                                               ISOLATE_UNEVICTABLE);
+               isolated = isolate_movable_ops_page(&folio->page,
+                                                   ISOLATE_UNEVICTABLE);
 
        if (!isolated)
                return false;