]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm: combine free_the_page() and free_unref_page()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 5 Apr 2024 15:32:24 +0000 (16:32 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:43 +0000 (20:56 -0700)
The pcp_allowed_order() check in free_the_page() was only being skipped by
__folio_put_small() which is about to be rearranged.

Link: https://lkml.kernel.org/r/20240405153228.2563754-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_alloc.c

index cbe3f695eda9b15f13ad2da50524b8340f1a25da..c01757251de29d32f84cc5d5cba5632bde886cee 100644 (file)
@@ -541,14 +541,6 @@ static inline bool pcp_allowed_order(unsigned int order)
        return false;
 }
 
-static inline void free_the_page(struct page *page, unsigned int order)
-{
-       if (pcp_allowed_order(order))           /* Via pcp? */
-               free_unref_page(page, order);
-       else
-               __free_pages_ok(page, order, FPI_NONE);
-}
-
 /*
  * Higher-order pages are called "compound pages".  They are structured thusly:
  *
@@ -584,7 +576,7 @@ void destroy_large_folio(struct folio *folio)
                folio_undo_large_rmappable(folio);
 
        mem_cgroup_uncharge(folio);
-       free_the_page(&folio->page, folio_order(folio));
+       free_unref_page(&folio->page, folio_order(folio));
 }
 
 static inline void set_buddy_order(struct page *page, unsigned int order)
@@ -2573,6 +2565,11 @@ void free_unref_page(struct page *page, unsigned int order)
        unsigned long pfn = page_to_pfn(page);
        int migratetype;
 
+       if (!pcp_allowed_order(order)) {
+               __free_pages_ok(page, order, FPI_NONE);
+               return;
+       }
+
        if (!free_pages_prepare(page, order))
                return;
 
@@ -4755,11 +4752,11 @@ void __free_pages(struct page *page, unsigned int order)
        struct alloc_tag *tag = pgalloc_tag_get(page);
 
        if (put_page_testzero(page))
-               free_the_page(page, order);
+               free_unref_page(page, order);
        else if (!head) {
                pgalloc_tag_sub_pages(tag, (1 << order) - 1);
                while (order-- > 0)
-                       free_the_page(page + (1 << order), order);
+                       free_unref_page(page + (1 << order), order);
        }
 }
 EXPORT_SYMBOL(__free_pages);
@@ -4821,7 +4818,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
        VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
 
        if (page_ref_sub_and_test(page, count))
-               free_the_page(page, compound_order(page));
+               free_unref_page(page, compound_order(page));
 }
 EXPORT_SYMBOL(__page_frag_cache_drain);
 
@@ -4862,7 +4859,7 @@ refill:
                        goto refill;
 
                if (unlikely(nc->pfmemalloc)) {
-                       free_the_page(page, compound_order(page));
+                       free_unref_page(page, compound_order(page));
                        goto refill;
                }
 
@@ -4906,7 +4903,7 @@ void page_frag_free(void *addr)
        struct page *page = virt_to_head_page(addr);
 
        if (unlikely(put_page_testzero(page)))
-               free_the_page(page, compound_order(page));
+               free_unref_page(page, compound_order(page));
 }
 EXPORT_SYMBOL(page_frag_free);