]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: remove __folio_test_movable()
authorDavid Hildenbrand <david@redhat.com>
Fri, 4 Jul 2025 10:25:12 +0000 (12:25 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 13 Jul 2025 23:38:29 +0000 (16:38 -0700)
Convert to page_has_movable_ops().  While at it, cleanup relevant code a
bit.

The data_race() in migrate_folio_unmap() is questionable: we already hold
a page reference, and concurrent modifications can no longer happen (iow:
__ClearPageMovable() no longer exists).  Drop it for now, we'll rework
page_has_movable_ops() soon either way to no longer rely on page->mapping.

Wherever we cast from folio to page now is a clear sign that this code has
to be decoupled.

Link: https://lkml.kernel.org/r/20250704102524.326966-19-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Eugenio Pé rez <eperezma@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Gregory Price <gourry@gourry.net>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-flags.h
mm/migrate.c
mm/vmscan.c

index c67163b73c5ecd2da79728e635a6b7ada216409f..4c27ebb689e3cf24598c42e16abdb08763c9affa 100644 (file)
@@ -744,12 +744,6 @@ static __always_inline bool PageAnon(const struct page *page)
        return folio_test_anon(page_folio(page));
 }
 
-static __always_inline bool __folio_test_movable(const struct folio *folio)
-{
-       return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
-                       PAGE_MAPPING_MOVABLE;
-}
-
 static __always_inline bool page_has_movable_ops(const struct page *page)
 {
        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
index bf9cfdafc54cafee8a041ae508c330c15b423f6c..aec0774d3da32c2ef51b542d0cdf9cf2b1dec2c4 100644 (file)
@@ -221,12 +221,7 @@ void putback_movable_pages(struct list_head *l)
                        continue;
                }
                list_del(&folio->lru);
-               /*
-                * We isolated non-lru movable folio so here we can use
-                * __folio_test_movable because LRU folio's mapping cannot
-                * have PAGE_MAPPING_MOVABLE.
-                */
-               if (unlikely(__folio_test_movable(folio))) {
+               if (unlikely(page_has_movable_ops(&folio->page))) {
                        putback_movable_ops_page(&folio->page);
                } else {
                        node_stat_mod_folio(folio, NR_ISOLATED_ANON +
@@ -239,26 +234,20 @@ void putback_movable_pages(struct list_head *l)
 /* Must be called with an elevated refcount on the non-hugetlb folio */
 bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
 {
-       bool isolated, lru;
-
        if (folio_test_hugetlb(folio))
                return folio_isolate_hugetlb(folio, list);
 
-       lru = !__folio_test_movable(folio);
-       if (lru)
-               isolated = folio_isolate_lru(folio);
-       else
-               isolated = isolate_movable_ops_page(&folio->page,
-                                                   ISOLATE_UNEVICTABLE);
-
-       if (!isolated)
-               return false;
-
-       list_add(&folio->lru, list);
-       if (lru)
+       if (page_has_movable_ops(&folio->page)) {
+               if (!isolate_movable_ops_page(&folio->page,
+                                             ISOLATE_UNEVICTABLE))
+                       return false;
+       } else {
+               if (!folio_isolate_lru(folio))
+                       return false;
                node_stat_add_folio(folio, NR_ISOLATED_ANON +
                                    folio_is_file_lru(folio));
-
+       }
+       list_add(&folio->lru, list);
        return true;
 }
 
@@ -1142,12 +1131,7 @@ static void migrate_folio_undo_dst(struct folio *dst, bool locked,
 static void migrate_folio_done(struct folio *src,
                               enum migrate_reason reason)
 {
-       /*
-        * Compaction can migrate also non-LRU pages which are
-        * not accounted to NR_ISOLATED_*. They can be recognized
-        * as __folio_test_movable
-        */
-       if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
+       if (likely(!page_has_movable_ops(&src->page)) && reason != MR_DEMOTION)
                mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
                                    folio_is_file_lru(src), -folio_nr_pages(src));
 
@@ -1166,7 +1150,6 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
        int rc = -EAGAIN;
        int old_page_state = 0;
        struct anon_vma *anon_vma = NULL;
-       bool is_lru = data_race(!__folio_test_movable(src));
        bool locked = false;
        bool dst_locked = false;
 
@@ -1267,7 +1250,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
                goto out;
        dst_locked = true;
 
-       if (unlikely(!is_lru)) {
+       if (unlikely(page_has_movable_ops(&src->page))) {
                __migrate_folio_record(dst, old_page_state, anon_vma);
                return MIGRATEPAGE_UNMAP;
        }
@@ -1332,7 +1315,7 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
        prev = dst->lru.prev;
        list_del(&dst->lru);
 
-       if (unlikely(__folio_test_movable(src))) {
+       if (unlikely(page_has_movable_ops(&src->page))) {
                rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
                if (rc)
                        goto out;
index c86a2495138a83976e2041d6771541645b216d3e..b1b999734ee4dc19d9029ff0d7ac7750644d71ea 100644 (file)
@@ -1651,9 +1651,11 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
        unsigned int noreclaim_flag;
 
        list_for_each_entry_safe(folio, next, folio_list, lru) {
+               /* TODO: these pages should not even appear in this list. */
+               if (page_has_movable_ops(&folio->page))
+                       continue;
                if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) &&
-                   !folio_test_dirty(folio) && !__folio_test_movable(folio) &&
-                   !folio_test_unevictable(folio)) {
+                   !folio_test_dirty(folio) && !folio_test_unevictable(folio)) {
                        folio_clear_active(folio);
                        list_move(&folio->lru, &clean_folios);
                }