]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/memory: introduce is_huge_zero_pfn() and use it in vm_normal_page_pmd()
authorDavid Hildenbrand <david@redhat.com>
Mon, 14 Jul 2025 13:16:51 +0000 (09:16 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 25 Jul 2025 02:12:35 +0000 (19:12 -0700)
Patch series "mm: introduce snapshot_page()", v3.

This series introduces snapshot_page(), a helper function that can be used
to create a snapshot of a struct page and its associated struct folio.

This function is intended to help callers with a consistent view of a a
folio while reducing the chance of encountering partially updated or
inconsistent state, such as during folio splitting which could lead to
crashes and BUG_ON()s being triggered.

This patch (of 4):

Let's avoid working with the PMD when not required.  If
vm_normal_page_pmd() would be called on something that is not a present
pmd, it would already be a bug (pfn possibly garbage).

While at it, let's support passing in any pfn covered by the huge zero
folio by masking off PFN bits -- which should be rather cheap.

Link: https://lkml.kernel.org/r/cover.1752499009.git.luizcap@redhat.com
Link: https://lkml.kernel.org/r/4940826e99f0c709a7cf7beb94f53288320aea5a.1752499009.git.luizcap@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
Reviewed-by: Shivank Garg <shivankg@amd.com>
Tested-by: Harry Yoo <harry.yoo@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/memory.c

index 4d5bb67dc4eca8fabad70b47aee02d132a54e208..7748489fde1b7af59b3f926233c98e27d26dbac6 100644 (file)
@@ -482,9 +482,14 @@ static inline bool is_huge_zero_folio(const struct folio *folio)
        return READ_ONCE(huge_zero_folio) == folio;
 }
 
+static inline bool is_huge_zero_pfn(unsigned long pfn)
+{
+       return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1));
+}
+
 static inline bool is_huge_zero_pmd(pmd_t pmd)
 {
-       return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
+       return pmd_present(pmd) && is_huge_zero_pfn(pmd_pfn(pmd));
 }
 
 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
@@ -632,6 +637,11 @@ static inline bool is_huge_zero_folio(const struct folio *folio)
        return false;
 }
 
+static inline bool is_huge_zero_pfn(unsigned long pfn)
+{
+       return false;
+}
+
 static inline bool is_huge_zero_pmd(pmd_t pmd)
 {
        return false;
index b4fb559dd0c6223d9baff87be57e36eff3592c81..92fd18a5d8d1f8878ff20f678eb51c0d7d2cbd72 100644 (file)
@@ -668,7 +668,7 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
                }
        }
 
-       if (is_huge_zero_pmd(pmd))
+       if (is_huge_zero_pfn(pfn))
                return NULL;
        if (unlikely(pfn > highest_memmap_pfn))
                return NULL;