]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
hugetlb: arm64: add mte support
authorYang Shi <yang@os.amperecomputing.com>
Tue, 1 Oct 2024 22:52:19 +0000 (15:52 -0700)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 16 Oct 2024 13:50:47 +0000 (14:50 +0100)
Enable MTE support for hugetlb.

The MTE page flags will be set on the folio only.  When copying
hugetlb folio (for example, CoW), the tags for all subpages will be copied
when copying the first subpage.

When freeing hugetlb folio, the MTE flags will be cleared.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
Link: https://lore.kernel.org/r/20241001225220.271178-1-yang@os.amperecomputing.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/hugetlb.h
arch/arm64/include/asm/mman.h
arch/arm64/include/asm/mte.h
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/mte.c
arch/arm64/kvm/guest.c
arch/arm64/kvm/mmu.c
arch/arm64/mm/copypage.c
fs/hugetlbfs/inode.c

index 293f880865e8d0a27b4251fe50fb0784240ebbcf..c6dff3e69539b4ff6573db3edd28359d49529a1d 100644 (file)
@@ -11,6 +11,7 @@
 #define __ASM_HUGETLB_H
 
 #include <asm/cacheflush.h>
+#include <asm/mte.h>
 #include <asm/page.h>
 
 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
@@ -21,6 +22,13 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h);
 static inline void arch_clear_hugetlb_flags(struct folio *folio)
 {
        clear_bit(PG_dcache_clean, &folio->flags);
+
+#ifdef CONFIG_ARM64_MTE
+       if (system_supports_mte()) {
+               clear_bit(PG_mte_tagged, &folio->flags);
+               clear_bit(PG_mte_lock, &folio->flags);
+       }
+#endif
 }
 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
 
index 9e39217b4afbbbd41610fda26c04158111a9f23b..65bc2b07f66638955d32f95976a6cb38f1541c50 100644 (file)
@@ -38,7 +38,8 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
         * backed by tags-capable memory. The vm_flags may be overridden by a
         * filesystem supporting MTE (RAM-based).
         */
-       if (system_supports_mte() && (flags & MAP_ANONYMOUS))
+       if (system_supports_mte() &&
+           (flags & (MAP_ANONYMOUS | MAP_HUGETLB)))
                return VM_MTE_ALLOWED;
 
        return 0;
index 0f84518632b4a629c9e0110384c4b0839f8bd443..6567df8ec8ca8180f3b7748b57819986cf2e1a9e 100644 (file)
@@ -41,6 +41,8 @@ void mte_free_tag_storage(char *storage);
 
 static inline void set_page_mte_tagged(struct page *page)
 {
+       VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
+
        /*
         * Ensure that the tags written prior to this function are visible
         * before the page flags update.
@@ -53,6 +55,8 @@ static inline bool page_mte_tagged(struct page *page)
 {
        bool ret = test_bit(PG_mte_tagged, &page->flags);
 
+       VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
+
        /*
         * If the page is tagged, ensure ordering with a likely subsequent
         * read of the tags.
@@ -76,6 +80,8 @@ static inline bool page_mte_tagged(struct page *page)
  */
 static inline bool try_page_mte_tagging(struct page *page)
 {
+       VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
+
        if (!test_and_set_bit(PG_mte_lock, &page->flags))
                return true;
 
@@ -157,6 +163,67 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
 
 #endif /* CONFIG_ARM64_MTE */
 
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_ARM64_MTE)
+static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
+{
+       VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
+
+       /*
+        * Ensure that the tags written prior to this function are visible
+        * before the folio flags update.
+        */
+       smp_wmb();
+       set_bit(PG_mte_tagged, &folio->flags);
+
+}
+
+static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
+{
+       bool ret = test_bit(PG_mte_tagged, &folio->flags);
+
+       VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
+
+       /*
+        * If the folio is tagged, ensure ordering with a likely subsequent
+        * read of the tags.
+        */
+       if (ret)
+               smp_rmb();
+       return ret;
+}
+
+static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
+{
+       VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
+
+       if (!test_and_set_bit(PG_mte_lock, &folio->flags))
+               return true;
+
+       /*
+        * The tags are either being initialised or may have been initialised
+        * already. Check if the PG_mte_tagged flag has been set or wait
+        * otherwise.
+        */
+       smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));
+
+       return false;
+}
+#else
+static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
+{
+}
+
+static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
+{
+       return false;
+}
+
+static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
+{
+       return false;
+}
+#endif
+
 static inline void mte_disable_tco_entry(struct task_struct *task)
 {
        if (!system_supports_mte())
index 7b11d84f533c9bf0d65ae3a33402aa38d8c8906d..18749e9a6c2da4bb7c808a362bf4aa181014fd59 100644 (file)
@@ -266,9 +266,15 @@ static int swsusp_mte_save_tags(void)
                max_zone_pfn = zone_end_pfn(zone);
                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
                        struct page *page = pfn_to_online_page(pfn);
+                       struct folio *folio;
 
                        if (!page)
                                continue;
+                       folio = page_folio(page);
+
+                       if (folio_test_hugetlb(folio) &&
+                           !folio_test_hugetlb_mte_tagged(folio))
+                               continue;
 
                        if (!page_mte_tagged(page))
                                continue;
index 6174671be7c18d5b71f0fbf4aee467b1235080a0..2fbfd27ff5f293e1597e9de7967e22fccbc034b6 100644 (file)
@@ -38,7 +38,24 @@ EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
 void mte_sync_tags(pte_t pte, unsigned int nr_pages)
 {
        struct page *page = pte_page(pte);
-       unsigned int i;
+       struct folio *folio = page_folio(page);
+       unsigned long i;
+
+       if (folio_test_hugetlb(folio)) {
+               unsigned long nr = folio_nr_pages(folio);
+
+               /* Hugetlb MTE flags are set for head page only */
+               if (folio_try_hugetlb_mte_tagging(folio)) {
+                       for (i = 0; i < nr; i++, page++)
+                               mte_clear_page_tags(page_address(page));
+                       folio_set_hugetlb_mte_tagged(folio);
+               }
+
+               /* ensure the tags are visible before the PTE is set */
+               smp_wmb();
+
+               return;
+       }
 
        /* if PG_mte_tagged is set, tags have already been initialised */
        for (i = 0; i < nr_pages; i++, page++) {
@@ -410,6 +427,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
                void *maddr;
                struct page *page = get_user_page_vma_remote(mm, addr,
                                                             gup_flags, &vma);
+               struct folio *folio;
 
                if (IS_ERR(page)) {
                        err = PTR_ERR(page);
@@ -428,7 +446,12 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
                        put_page(page);
                        break;
                }
-               WARN_ON_ONCE(!page_mte_tagged(page));
+
+               folio = page_folio(page);
+               if (folio_test_hugetlb(folio))
+                       WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio));
+               else
+                       WARN_ON_ONCE(!page_mte_tagged(page));
 
                /* limit access to the end of the page */
                offset = offset_in_page(addr);
index 962f985977c2d76506f2eeadc687b73625c03130..e738a353b20e43c192ac2b15688e720851077ba8 100644 (file)
@@ -1055,6 +1055,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
                void *maddr;
                unsigned long num_tags;
                struct page *page;
+               struct folio *folio;
 
                if (is_error_noslot_pfn(pfn)) {
                        ret = -EFAULT;
@@ -1068,10 +1069,13 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
                        ret = -EFAULT;
                        goto out;
                }
+               folio = page_folio(page);
                maddr = page_address(page);
 
                if (!write) {
-                       if (page_mte_tagged(page))
+                       if ((folio_test_hugetlb(folio) &&
+                            folio_test_hugetlb_mte_tagged(folio)) ||
+                            page_mte_tagged(page))
                                num_tags = mte_copy_tags_to_user(tags, maddr,
                                                        MTE_GRANULES_PER_PAGE);
                        else
@@ -1085,14 +1089,20 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
                         * __set_ptes() in the VMM but still overriding the
                         * tags, hence ignoring the return value.
                         */
-                       try_page_mte_tagging(page);
+                       if (folio_test_hugetlb(folio))
+                               folio_try_hugetlb_mte_tagging(folio);
+                       else
+                               try_page_mte_tagging(page);
                        num_tags = mte_copy_tags_from_user(maddr, tags,
                                                        MTE_GRANULES_PER_PAGE);
 
                        /* uaccess failed, don't leave stale tags */
                        if (num_tags != MTE_GRANULES_PER_PAGE)
                                mte_clear_page_tags(maddr);
-                       set_page_mte_tagged(page);
+                       if (folio_test_hugetlb(folio))
+                               folio_set_hugetlb_mte_tagged(folio);
+                       else
+                               set_page_mte_tagged(page);
 
                        kvm_release_pfn_dirty(pfn);
                }
index a509b63bd4dd50d462e779f5511e6b06687cf73a..962449f9ac2f94055104fc061fa3d66ccd361bf5 100644 (file)
@@ -1401,10 +1401,21 @@ static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
 {
        unsigned long i, nr_pages = size >> PAGE_SHIFT;
        struct page *page = pfn_to_page(pfn);
+       struct folio *folio = page_folio(page);
 
        if (!kvm_has_mte(kvm))
                return;
 
+       if (folio_test_hugetlb(folio)) {
+               /* Hugetlb has MTE flags set on head page only */
+               if (folio_try_hugetlb_mte_tagging(folio)) {
+                       for (i = 0; i < nr_pages; i++, page++)
+                               mte_clear_page_tags(page_address(page));
+                       folio_set_hugetlb_mte_tagged(folio);
+               }
+               return;
+       }
+
        for (i = 0; i < nr_pages; i++, page++) {
                if (try_page_mte_tagging(page)) {
                        mte_clear_page_tags(page_address(page));
index a7bb20055ce0948a6f586e29fd1a05bbbc1cc75f..87b3f1a2553564cbeada996d583c1038bb7e7176 100644 (file)
@@ -18,15 +18,40 @@ void copy_highpage(struct page *to, struct page *from)
 {
        void *kto = page_address(to);
        void *kfrom = page_address(from);
+       struct folio *src = page_folio(from);
+       struct folio *dst = page_folio(to);
+       unsigned int i, nr_pages;
 
        copy_page(kto, kfrom);
 
        if (kasan_hw_tags_enabled())
                page_kasan_tag_reset(to);
 
-       if (system_supports_mte() && page_mte_tagged(from)) {
+       if (!system_supports_mte())
+               return;
+
+       if (folio_test_hugetlb(src) &&
+           folio_test_hugetlb_mte_tagged(src)) {
+               if (!folio_try_hugetlb_mte_tagging(dst))
+                       return;
+
+               /*
+                * Populate tags for all subpages.
+                *
+                * Don't assume the first page is head page since
+                * huge page copy may start from any subpage.
+                */
+               nr_pages = folio_nr_pages(src);
+               for (i = 0; i < nr_pages; i++) {
+                       kfrom = page_address(folio_page(src, i));
+                       kto = page_address(folio_page(dst, i));
+                       mte_copy_page_tags(kto, kfrom);
+               }
+               folio_set_hugetlb_mte_tagged(dst);
+       } else if (page_mte_tagged(from)) {
                /* It's a new page, shouldn't have been tagged yet */
                WARN_ON_ONCE(!try_page_mte_tagging(to));
+
                mte_copy_page_tags(kto, kfrom);
                set_page_mte_tagged(to);
        }
index 5cf327337e2276e283f49cd9898216a5f721b76d..f26b3b53d7de44b0a1e4cebbbabd84daf8fd3601 100644 (file)
@@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
         * way when do_mmap unwinds (may be important on powerpc
         * and ia64).
         */
-       vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
+       vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND | VM_MTE_ALLOWED);
        vma->vm_ops = &hugetlb_vm_ops;
 
        ret = seal_check_write(info->seals, vma);