]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: remove folio_prep_large_rmappable()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 21 Mar 2024 14:24:41 +0000 (14:24 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:00 +0000 (20:56 -0700)
Now that prep_compound_page() initialises folio->_deferred_list,
folio_prep_large_rmappable()'s only purpose is to set the large_rmappable
flag, so inline it into the two callers.  Take the opportunity to convert
the large_rmappable definition from PAGEFLAG to FOLIO_FLAG and remove the
existance of PageTestLargeRmappable and friends.

Link: https://lkml.kernel.org/r/20240321142448.1645400-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
include/linux/page-flags.h
mm/huge_memory.c
mm/internal.h

index de0c89105076907bf906ae855e17b6f4ff8e3858..0e16451adaba3aed829706c5e25cbc45829885b3 100644 (file)
@@ -263,7 +263,6 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags);
 
-void folio_prep_large_rmappable(struct folio *folio);
 bool can_split_folio(struct folio *folio, int *pextra_pins);
 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
                unsigned int new_order);
@@ -411,8 +410,6 @@ static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
        return 0;
 }
 
-static inline void folio_prep_large_rmappable(struct folio *folio) {}
-
 #define transparent_hugepage_flags 0UL
 
 #define thp_get_unmapped_area  NULL
index 4bf1c25fd1dc566cccf51742286243bace192dad..6fb3cd42ee596a5cb27893c7ae25a21c2b1b1553 100644 (file)
@@ -868,9 +868,9 @@ static inline void ClearPageCompound(struct page *page)
        BUG_ON(!PageHead(page));
        ClearPageHead(page);
 }
-PAGEFLAG(LargeRmappable, large_rmappable, PF_SECOND)
+FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
 #else
-TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
+FOLIO_FLAG_FALSE(large_rmappable)
 #endif
 
 #define PG_head_mask ((1UL << PG_head))
index 4cc7133aaa4be2f001c379832e9bc68f7e686f40..16b2c5622fb1b0020c6f21ca03596b612f09f7d2 100644 (file)
@@ -789,13 +789,6 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio)
 }
 #endif
 
-void folio_prep_large_rmappable(struct folio *folio)
-{
-       if (!folio || !folio_test_large(folio))
-               return;
-       folio_set_large_rmappable(folio);
-}
-
 static inline bool is_transparent_hugepage(struct folio *folio)
 {
        if (!folio_test_large(folio))
@@ -2862,7 +2855,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
        clear_compound_head(page_tail);
        if (new_order) {
                prep_compound_page(page_tail, new_order);
-               folio_prep_large_rmappable(new_folio);
+               folio_set_large_rmappable(new_folio);
        }
 
        /* Finally unfreeze refcount. Additional reference from page cache. */
index 5c0c57c9cd196483e026d605d36f0a909d488f31..ab8250d8a5911acb62d016ecdb52692e308e5629 100644 (file)
@@ -513,7 +513,8 @@ static inline struct folio *page_rmappable_folio(struct page *page)
 {
        struct folio *folio = (struct folio *)page;
 
-       folio_prep_large_rmappable(folio);
+       if (folio && folio_test_large(folio))
+               folio_set_large_rmappable(folio);
        return folio;
 }