]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: Turn can_split_huge_page() into can_split_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 4 Feb 2022 19:13:31 +0000 (14:13 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 21 Mar 2022 17:01:35 +0000 (13:01 -0400)
This function already required a head page to be passed, so this
just adds type-safety and removes a few implicit calls to
compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
include/linux/huge_mm.h
mm/huge_memory.c
mm/vmscan.c

index 4368b314d9c856f60e2875c3d90c8bae41ae2c6c..e0348bca3d666008aa6744ed2426efc80f1589d1 100644 (file)
@@ -185,7 +185,7 @@ void prep_transhuge_page(struct page *page);
 void free_transhuge_page(struct page *page);
 bool is_transparent_hugepage(struct page *page);
 
-bool can_split_huge_page(struct page *page, int *pextra_pins);
+bool can_split_folio(struct folio *folio, int *pextra_pins);
 int split_huge_page_to_list(struct page *page, struct list_head *list);
 static inline int split_huge_page(struct page *page)
 {
@@ -387,7 +387,7 @@ static inline bool is_transparent_hugepage(struct page *page)
 #define thp_get_unmapped_area  NULL
 
 static inline bool
-can_split_huge_page(struct page *page, int *pextra_pins)
+can_split_folio(struct folio *folio, int *pextra_pins)
 {
        BUILD_BUG();
        return false;
index d874d50e703baee69a2fd189da60c4fd89ac4fb2..38e233a7d9776e80d43f33ab6a27112d9ddac511 100644 (file)
@@ -2516,18 +2516,19 @@ int page_trans_huge_mapcount(struct page *page)
 }
 
 /* Racy check whether the huge page can be split */
-bool can_split_huge_page(struct page *page, int *pextra_pins)
+bool can_split_folio(struct folio *folio, int *pextra_pins)
 {
        int extra_pins;
 
        /* Additional pins from page cache */
-       if (PageAnon(page))
-               extra_pins = PageSwapCache(page) ? thp_nr_pages(page) : 0;
+       if (folio_test_anon(folio))
+               extra_pins = folio_test_swapcache(folio) ?
+                               folio_nr_pages(folio) : 0;
        else
-               extra_pins = thp_nr_pages(page);
+               extra_pins = folio_nr_pages(folio);
        if (pextra_pins)
                *pextra_pins = extra_pins;
-       return total_mapcount(page) == page_count(page) - extra_pins - 1;
+       return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
 }
 
 /*
@@ -2619,7 +2620,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
         * Racy check if we can split the page, before unmap_page() will
         * split PMDs
         */
-       if (!can_split_huge_page(head, &extra_pins)) {
+       if (!can_split_folio(folio, &extra_pins)) {
                ret = -EBUSY;
                goto out_unlock;
        }
@@ -2928,7 +2929,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
                        goto next;
 
                total++;
-               if (!can_split_huge_page(compound_head(page), NULL))
+               if (!can_split_folio(page_folio(page), NULL))
                        goto next;
 
                if (!trylock_page(page))
index 32473e069f68d3d6b3020ad8795f1613ccc0ac3a..7db5d0237333b96970c543387e862ca9f153ec15 100644 (file)
@@ -1703,18 +1703,18 @@ retry:
                        if (!PageSwapCache(page)) {
                                if (!(sc->gfp_mask & __GFP_IO))
                                        goto keep_locked;
-                               if (page_maybe_dma_pinned(page))
+                               if (folio_maybe_dma_pinned(folio))
                                        goto keep_locked;
                                if (PageTransHuge(page)) {
                                        /* cannot split THP, skip it */
-                                       if (!can_split_huge_page(page, NULL))
+                                       if (!can_split_folio(folio, NULL))
                                                goto activate_locked;
                                        /*
                                         * Split pages without a PMD map right
                                         * away. Chances are some or all of the
                                         * tail pages can be freed without IO.
                                         */
-                                       if (!compound_mapcount(page) &&
+                                       if (!folio_entire_mapcount(folio) &&
                                            split_folio_to_list(folio,
                                                                page_list))
                                                goto activate_locked;