]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/page_alloc: move set_page_refcounted() to end of __alloc_pages()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 25 Nov 2024 21:01:44 +0000 (21:01 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:33 +0000 (22:40 -0800)
Remove some code duplication by calling set_page_refcounted() at the end
of __alloc_pages() instead of after each call that can allocate a page.
That means that we free a frozen page if we've exceeded the allowed memcg
memory.

Link: https://lkml.kernel.org/r/20241125210149.2976098-13-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_alloc.c

index e2e5cd899abd109536efab7a827cafb5e3aefea4..df5b61592792f45f6c828eb17ccdf070b9428758 100644 (file)
@@ -4750,10 +4750,8 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
 
        /* First allocation attempt */
        page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
-       if (likely(page)) {
-               set_page_refcounted(page);
+       if (likely(page))
                goto out;
-       }
 
        alloc_gfp = gfp;
        ac.spread_dirty_pages = false;
@@ -4765,15 +4763,15 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
        ac.nodemask = nodemask;
 
        page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
-       if (page)
-               set_page_refcounted(page);
 
 out:
        if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
            unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
-               __free_pages(page, order);
+               free_frozen_pages(page, order);
                page = NULL;
        }
+       if (page)
+               set_page_refcounted(page);
 
        trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
        kmsan_alloc_page(page, order, alloc_gfp);