]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/huge_memory: initialise the tags of the huge zero folio
authorCatalin Marinas <catalin.marinas@arm.com>
Fri, 31 Oct 2025 16:57:50 +0000 (16:57 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 10 Nov 2025 05:19:46 +0000 (21:19 -0800)
On arm64 with MTE enabled, a page mapped as Normal Tagged (PROT_MTE) in
user space will need to have its allocation tags initialised.  This is
normally done in the arm64 set_pte_at() after checking the memory
attributes.  Such page is also marked with the PG_mte_tagged flag to avoid
subsequent clearing.  Since this relies on having a struct page,
pte_special() mappings are ignored.

Commit d82d09e48219 ("mm/huge_memory: mark PMD mappings of the huge zero
folio special") maps the huge zero folio special and the arm64
set_pmd_at() will no longer zero the tags.  There is no guarantee that the
tags are zero, especially if parts of this huge page have been previously
tagged.

It's fairly easy to detect this by regularly dropping the caches to
force the reallocation of the huge zero folio.

Allocate the huge zero folio with the __GFP_ZEROTAGS flag.  In addition,
do not warn in the arm64 __access_remote_tags() when reading tags from the
huge zero page.

I bundled the arm64 change in here as well since they are both related to
the commit mapping the huge zero folio as special.

[catalin.marinas@arm.com: handle arch mte_zero_clear_page_tags() code issuing MTE instructions]
Link: https://lkml.kernel.org/r/aQi8dA_QpXM8XqrE@arm.com
Link: https://lkml.kernel.org/r/20251031170133.280742-1-catalin.marinas@arm.com
Fixes: d82d09e48219 ("mm/huge_memory: mark PMD mappings of the huge zero folio special")
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Lance Yang <lance.yang@linux.dev>
Tested-by: Beleswar Padhi <b-padhi@ti.com>
Cc: Will Deacon <will@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Aishwarya TCV <aishwarya.tcv@arm.com>
Cc: David Hildenbrand (Red Hat) <david@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/arm64/kernel/mte.c
arch/arm64/mm/fault.c
mm/huge_memory.c

index 43f7a2f394036ba0ad140e2625268f78b967af40..32148bf09c1dc057c9b39e34e19d14dd49c7904a 100644 (file)
@@ -476,7 +476,8 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
 
                folio = page_folio(page);
                if (folio_test_hugetlb(folio))
-                       WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio));
+                       WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio) &&
+                                    !is_huge_zero_folio(folio));
                else
                        WARN_ON_ONCE(!page_mte_tagged(page) && !is_zero_page(page));
 
index d816ff44faff940ca1e306172fc6a025d81239d4..125dfa6c613b222c828c8b4745af615546e9fc98 100644 (file)
@@ -969,6 +969,16 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
 
 void tag_clear_highpage(struct page *page)
 {
+       /*
+        * Check if MTE is supported and fall back to clear_highpage().
+        * get_huge_zero_folio() unconditionally passes __GFP_ZEROTAGS and
+        * post_alloc_hook() will invoke tag_clear_highpage().
+        */
+       if (!system_supports_mte()) {
+               clear_highpage(page);
+               return;
+       }
+
        /* Newly allocated page, shouldn't have been tagged yet */
        WARN_ON_ONCE(!try_page_mte_tagging(page));
        mte_zero_clear_page_tags(page_address(page));
index b4ff49d9650138ec37e94a9d24412e571747bef4..323654fb4f8cf285efa8b50cdedcd07ccb79d682 100644 (file)
@@ -214,7 +214,8 @@ retry:
        if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
                return true;
 
-       zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
+       zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO | __GFP_ZEROTAGS) &
+                                ~__GFP_MOVABLE,
                        HPAGE_PMD_ORDER);
        if (!zero_folio) {
                count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);