]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: rework compound_head() for power-of-2 sizeof(struct page)
authorKiryl Shutsemau <kas@kernel.org>
Fri, 27 Feb 2026 19:42:45 +0000 (19:42 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:08 +0000 (13:53 -0700)
For tail pages, the kernel uses the 'compound_info' field to get to the
head page.  The bit 0 of the field indicates whether the page is a tail
page, and if set, the remaining bits represent a pointer to the head page.

For cases when size of struct page is power-of-2, change the encoding of
compound_info to store a mask that can be applied to the virtual address
of the tail page in order to access the head page.  It is possible because
struct page of the head page is naturally aligned with regards to order of
the page.

The significant impact of this modification is that all tail pages of the
same order will now have identical 'compound_info', regardless of the
compound page they are associated with.  This paves the way for
eliminating fake heads.

The HugeTLB Vmemmap Optimization (HVO) creates fake heads and it is only
applied when the sizeof(struct page) is power-of-2.  Having identical tail
pages allows the same page to be mapped into the vmemmap of all pages,
maintaining memory savings without fake heads.

If sizeof(struct page) is not power-of-2, there is no functional changes.

Limit mask usage to HugeTLB vmemmap optimization (HVO) where it makes a
difference.  The approach with mask would work in the wider set of
conditions, but it requires validating that struct pages are naturally
aligned for all orders up to the MAX_FOLIO_ORDER, which can be tricky.

Link: https://lkml.kernel.org/r/20260227194302.274384-8-kas@kernel.org
Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand (Arm) <david@kernel.org>
Acked-by: Usama Arif <usamaarif642@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Baoquan He <bhe@redhat.com>
Cc: Christoph Lameter <cl@gentwo.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Frank van der Linden <fvdl@google.com>
Cc: Harry Yoo <harry.yoo@oracle.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: WANG Xuerui <kernel@xen0n.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-flags.h
mm/slab.h
mm/util.c

index 5c469d38dd6993823db947288136785136145ec7..43876b108f0a4aafb24af9d14674933b5815f08d 100644 (file)
@@ -198,6 +198,29 @@ enum pageflags {
 
 #ifndef __GENERATING_BOUNDS_H
 
+/*
+ * For tail pages, if the size of struct page is power-of-2 ->compound_info
+ * encodes the mask that converts the address of the tail page address to
+ * the head page address.
+ *
+ * Otherwise, ->compound_info has direct pointer to head pages.
+ */
+static __always_inline bool compound_info_has_mask(void)
+{
+       /*
+        * Limit mask usage to HugeTLB vmemmap optimization (HVO) where it
+        * makes a difference.
+        *
+        * The approach with mask would work in the wider set of conditions,
+        * but it requires validating that struct pages are naturally aligned
+        * for all orders up to the MAX_FOLIO_ORDER, which can be tricky.
+        */
+       if (!IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP))
+               return false;
+
+       return is_power_of_2(sizeof(struct page));
+}
+
 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
 
@@ -207,6 +230,10 @@ DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
  */
 static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
 {
+       /* Fake heads only exists if compound_info_has_mask() is true */
+       if (!compound_info_has_mask())
+               return page;
+
        if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
                return page;
 
@@ -223,10 +250,14 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
                 * because the @page is a compound page composed with at least
                 * two contiguous pages.
                 */
-               unsigned long head = READ_ONCE(page[1].compound_info);
+               unsigned long info = READ_ONCE(page[1].compound_info);
+
+               /* See set_compound_head() */
+               if (likely(info & 1)) {
+                       unsigned long p = (unsigned long)page;
 
-               if (likely(head & 1))
-                       return (const struct page *)(head - 1);
+                       return (const struct page *)(p & info);
+               }
        }
        return page;
 }
@@ -281,11 +312,26 @@ static __always_inline int page_is_fake_head(const struct page *page)
 
 static __always_inline unsigned long _compound_head(const struct page *page)
 {
-       unsigned long head = READ_ONCE(page->compound_info);
+       unsigned long info = READ_ONCE(page->compound_info);
 
-       if (unlikely(head & 1))
-               return head - 1;
-       return (unsigned long)page_fixed_fake_head(page);
+       /* Bit 0 encodes PageTail() */
+       if (!(info & 1))
+               return (unsigned long)page_fixed_fake_head(page);
+
+       /*
+        * If compound_info_has_mask() is false, the rest of compound_info is
+        * the pointer to the head page.
+        */
+       if (!compound_info_has_mask())
+               return info - 1;
+
+       /*
+        * If compound_info_has_mask() is true the rest of the info encodes
+        * the mask that converts the address of the tail page to the head page.
+        *
+        * No need to clear bit 0 in the mask as 'page' always has it clear.
+        */
+       return (unsigned long)page & info;
 }
 
 #define compound_head(page)    ((typeof(page))_compound_head(page))
@@ -293,7 +339,26 @@ static __always_inline unsigned long _compound_head(const struct page *page)
 static __always_inline void set_compound_head(struct page *tail,
                const struct page *head, unsigned int order)
 {
-       WRITE_ONCE(tail->compound_info, (unsigned long)head + 1);
+       unsigned int shift;
+       unsigned long mask;
+
+       if (!compound_info_has_mask()) {
+               WRITE_ONCE(tail->compound_info, (unsigned long)head | 1);
+               return;
+       }
+
+       /*
+        * If the size of struct page is power-of-2, bits [shift:0] of the
+        * virtual address of compound head are zero.
+        *
+        * Calculate mask that can be applied to the virtual address of
+        * the tail page to get address of the head page.
+        */
+       shift = order + order_base_2(sizeof(struct page));
+       mask = GENMASK(BITS_PER_LONG - 1, shift);
+
+       /* Bit 0 encodes PageTail() */
+       WRITE_ONCE(tail->compound_info, mask | 1);
 }
 
 static __always_inline void clear_compound_head(struct page *page)
index 0653cf5fd93a2424c42cb9323347ceabcb358017..ccbdbed18c052b0d76f8b713465cd30b9367a109 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -131,11 +131,19 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(struct freelist
  */
 static inline struct slab *page_slab(const struct page *page)
 {
-       unsigned long head;
+       unsigned long info;
+
+       info = READ_ONCE(page->compound_info);
+       if (info & 1) {
+               /* See compound_head() */
+               if (compound_info_has_mask()) {
+                       unsigned long p = (unsigned long)page;
+                       page = (struct page *)(p & info);
+               } else {
+                       page = (struct page *)(info - 1);
+               }
+       }
 
-       head = READ_ONCE(page->compound_head);
-       if (head & 1)
-               page = (struct page *)(head - 1);
        if (data_race(page->page_type >> 24) != PGTY_slab)
                page = NULL;
 
index 52400a3c5eb4182434ffb2561ab693e515484755..ce7ae80047cf4276407c2aec27750a0b3b16a0b4 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -1266,7 +1266,7 @@ static void set_ps_flags(struct page_snapshot *ps, const struct folio *folio,
  */
 void snapshot_page(struct page_snapshot *ps, const struct page *page)
 {
-       unsigned long head, nr_pages = 1;
+       unsigned long info, nr_pages = 1;
        struct folio *foliop;
        int loops = 5;
 
@@ -1276,8 +1276,8 @@ void snapshot_page(struct page_snapshot *ps, const struct page *page)
 again:
        memset(&ps->folio_snapshot, 0, sizeof(struct folio));
        memcpy(&ps->page_snapshot, page, sizeof(*page));
-       head = ps->page_snapshot.compound_info;
-       if ((head & 1) == 0) {
+       info = ps->page_snapshot.compound_info;
+       if (!(info & 1)) {
                ps->idx = 0;
                foliop = (struct folio *)&ps->page_snapshot;
                if (!folio_test_large(foliop)) {
@@ -1288,7 +1288,15 @@ again:
                }
                foliop = (struct folio *)page;
        } else {
-               foliop = (struct folio *)(head - 1);
+               /* See compound_head() */
+               if (compound_info_has_mask()) {
+                       unsigned long p = (unsigned long)page;
+
+                       foliop = (struct folio *)(p & info);
+               } else {
+                       foliop = (struct folio *)(info - 1);
+               }
+
                ps->idx = folio_page_idx(foliop, page);
        }