]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/page_alloc: cache page_zone() result in free_unref_page()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 25 Nov 2024 21:01:33 +0000 (21:01 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:30 +0000 (22:40 -0800)
Patch series "Allocate and free frozen pages", v3.

Slab does not need to use the page refcount at all, and it can avoid an
atomic operation on page free.  Hugetlb wants to delay setting the
refcount until it has assembled a complete gigantic page.  We already have
the ability to freeze a page (safely reduce its reference count to 0), so
this patchset adds APIs to allocate and free pages which are in a frozen
state.

This patchset is also a step towards the Glorious Future in which struct
page doesn't have a refcount; the users which need a refcount will have
one in their per-allocation memdesc.

This patch (of 15):

Save 17 bytes of text by calculating page_zone() once instead of twice.

Link: https://lkml.kernel.org/r/20241125210149.2976098-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20241125210149.2976098-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_alloc.c

index cae7b93864c23e50d9c89b9f579ac5bb6143a3a7..a44f9ff04b1a23c32253831d497ebf50c7565bbb 100644 (file)
@@ -2666,16 +2666,16 @@ void free_unref_page(struct page *page, unsigned int order)
         * get those areas back if necessary. Otherwise, we may have to free
         * excessively into the page allocator
         */
+       zone = page_zone(page);
        migratetype = get_pfnblock_migratetype(page, pfn);
        if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
                if (unlikely(is_migrate_isolate(migratetype))) {
-                       free_one_page(page_zone(page), page, pfn, order, FPI_NONE);
+                       free_one_page(zone, page, pfn, order, FPI_NONE);
                        return;
                }
                migratetype = MIGRATE_MOVABLE;
        }
 
-       zone = page_zone(page);
        pcp_trylock_prepare(UP_flags);
        pcp = pcp_spin_trylock(zone->per_cpu_pageset);
        if (pcp) {