]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/migrate: factor out movable_ops page handling into migrate_movable_ops_page()
authorDavid Hildenbrand <david@redhat.com>
Fri, 4 Jul 2025 10:25:03 +0000 (12:25 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 13 Jul 2025 23:38:27 +0000 (16:38 -0700)
Let's factor it out, simplifying the calling code.

Before this change, we would have called flush_dcache_folio() also on
movable_ops pages.  As documented in Documentation/core-api/cachetlb.rst:

"This routine need only be called for page cache pages which can
 potentially ever be mapped into the address space of a user
 process."

So don't do it for movable_ops pages.  If there would ever be such a
movable_ops page user, it should do the flushing itself after performing
the copy.

Note that we can now change folio_mapping_flags() to folio_test_anon() to
make it clearer, because movable_ops pages will never take that path.

[akpm@linux-foundation.org: fix kerneldoc]
Link: https://lkml.kernel.org/r/20250704102524.326966-10-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Eugenio Pé rez <eperezma@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Gregory Price <gourry@gourry.net>
Cc: Harry Yoo <harry.yoo@oracle.com>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/migrate.c

index c3cd66b05fe2fb9c84816d91b1187b3b38a3c55f..8c4d5837db53ce2b3479923e622523d7536bcbfb 100644 (file)
@@ -159,6 +159,47 @@ static void putback_movable_ops_page(struct page *page)
        folio_put(folio);
 }
 
+/**
+ * migrate_movable_ops_page - migrate an isolated movable_ops page
+ * @dst: The destination page.
+ * @src: The source page.
+ * @mode: The migration mode.
+ *
+ * Migrate an isolated movable_ops page.
+ *
+ * If the src page was already released by its owner, the src page is
+ * un-isolated (putback) and migration succeeds; the migration core will be the
+ * owner of both pages.
+ *
+ * If the src page was not released by its owner and the migration was
+ * successful, the owner of the src page and the dst page are swapped and
+ * the src page is un-isolated.
+ *
+ * If migration fails, the ownership stays unmodified and the src page
+ * remains isolated: migration may be retried later or the page can be putback.
+ *
+ * TODO: migration core will treat both pages as folios and lock them before
+ * this call to unlock them after this call. Further, the folio refcounts on
+ * src and dst are also released by migration core. These pages will not be
+ * folios in the future, so that must be reworked.
+ *
+ * Returns MIGRATEPAGE_SUCCESS on success, otherwise a negative error
+ * code.
+ */
+static int migrate_movable_ops_page(struct page *dst, struct page *src,
+               enum migrate_mode mode)
+{
+       int rc = MIGRATEPAGE_SUCCESS;
+
+       VM_WARN_ON_ONCE_PAGE(!PageIsolated(src), src);
+       /* If the page was released by it's owner, there is nothing to do. */
+       if (PageMovable(src))
+               rc = page_movable_ops(src)->migrate_page(dst, src, mode);
+       if (rc == MIGRATEPAGE_SUCCESS)
+               ClearPageIsolated(src);
+       return rc;
+}
+
 /*
  * Put previously isolated pages back onto the appropriate lists
  * from where they were once taken off for compaction/migration.
@@ -1023,51 +1064,20 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
                                                                mode);
                else
                        rc = fallback_migrate_folio(mapping, dst, src, mode);
-       } else {
-               const struct movable_operations *mops;
 
-               /*
-                * In case of non-lru page, it could be released after
-                * isolation step. In that case, we shouldn't try migration.
-                */
-               VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
-               if (!folio_test_movable(src)) {
-                       rc = MIGRATEPAGE_SUCCESS;
-                       folio_clear_isolated(src);
+               if (rc != MIGRATEPAGE_SUCCESS)
                        goto out;
-               }
-
-               mops = folio_movable_ops(src);
-               rc = mops->migrate_page(&dst->page, &src->page, mode);
-               WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
-                               !folio_test_isolated(src));
-       }
-
-       /*
-        * When successful, old pagecache src->mapping must be cleared before
-        * src is freed; but stats require that PageAnon be left as PageAnon.
-        */
-       if (rc == MIGRATEPAGE_SUCCESS) {
-               if (__folio_test_movable(src)) {
-                       VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
-
-                       /*
-                        * We clear PG_movable under page_lock so any compactor
-                        * cannot try to migrate this page.
-                        */
-                       folio_clear_isolated(src);
-               }
-
                /*
-                * Anonymous and movable src->mapping will be cleared by
-                * free_pages_prepare so don't reset it here for keeping
-                * the type to work PageAnon, for example.
+                * For pagecache folios, src->mapping must be cleared before src
+                * is freed. Anonymous folios must stay anonymous until freed.
                 */
-               if (!folio_mapping_flags(src))
+               if (!folio_test_anon(src))
                        src->mapping = NULL;
 
                if (likely(!folio_is_zone_device(dst)))
                        flush_dcache_folio(dst);
+       } else {
+               rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
        }
 out:
        return rc;