]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: remove is_hugetlb_entry_[migration, hwpoisoned]()
authorLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Mon, 10 Nov 2025 22:21:32 +0000 (22:21 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 24 Nov 2025 23:08:51 +0000 (15:08 -0800)
We do not need to have explicit helper functions for these, it adds a
level of confusion and indirection when we can simply use software leaf
entry logic here instead and spell out the special huge_pte_none() case we
must consider.

No functional change intended.

Link: https://lkml.kernel.org/r/0e92d6924d3de88cd014ce1c53e20edc08fc152e.1762812360.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Chris Li <chrisl@kernel.org>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Xu <weixugc@google.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/task_mmu.c
include/linux/hugetlb.h
mm/hugetlb.c
mm/mempolicy.c
mm/migrate.c

index 1f49c81b359100cdc55b7f688c4294258e3c8891..92ada14eabc09cf437dfc11dca4b334ea3dcf7a9 100644 (file)
@@ -2500,22 +2500,23 @@ static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
                                  unsigned long addr, pte_t *ptep,
                                  pte_t ptent)
 {
-       unsigned long psize;
+       const unsigned long psize = huge_page_size(hstate_vma(vma));
+       softleaf_t entry;
 
-       if (is_hugetlb_entry_hwpoisoned(ptent) || pte_is_marker(ptent))
-               return;
+       if (huge_pte_none(ptent))
+               set_huge_pte_at(vma->vm_mm, addr, ptep,
+                               make_pte_marker(PTE_MARKER_UFFD_WP), psize);
 
-       psize = huge_page_size(hstate_vma(vma));
+       entry = softleaf_from_pte(ptent);
+       if (softleaf_is_hwpoison(entry) || softleaf_is_marker(entry))
+               return;
 
-       if (is_hugetlb_entry_migration(ptent))
+       if (softleaf_is_migration(entry))
                set_huge_pte_at(vma->vm_mm, addr, ptep,
                                pte_swp_mkuffd_wp(ptent), psize);
-       else if (!huge_pte_none(ptent))
+       else
                huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
                                             huge_pte_mkuffd_wp(ptent));
-       else
-               set_huge_pte_at(vma->vm_mm, addr, ptep,
-                               make_pte_marker(PTE_MARKER_UFFD_WP), psize);
 }
 #endif /* CONFIG_HUGETLB_PAGE */
 
index 2387513d6ae539a0305bdc37fa639c0ee0c5c5e1..457d48ac7bcd668270964dffe95b173567f45688 100644 (file)
@@ -274,8 +274,6 @@ void hugetlb_vma_lock_release(struct kref *kref);
 long hugetlb_change_protection(struct vm_area_struct *vma,
                unsigned long address, unsigned long end, pgprot_t newprot,
                unsigned long cp_flags);
-bool is_hugetlb_entry_migration(pte_t pte);
-bool is_hugetlb_entry_hwpoisoned(pte_t pte);
 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
 void fixup_hugetlb_reservations(struct vm_area_struct *vma);
 void hugetlb_split(struct vm_area_struct *vma, unsigned long addr);
index 59d91c36770c19a3f6d881723a4c64ea65d2ce70..311c5d60131049a8c85ade0f7702b3ac3a969702 100644 (file)
@@ -4846,32 +4846,6 @@ static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma,
                set_huge_ptep_writable(vma, address, ptep);
 }
 
-bool is_hugetlb_entry_migration(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return false;
-       swp = pte_to_swp_entry(pte);
-       if (is_migration_entry(swp))
-               return true;
-       else
-               return false;
-}
-
-bool is_hugetlb_entry_hwpoisoned(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return false;
-       swp = pte_to_swp_entry(pte);
-       if (is_hwpoison_entry(swp))
-               return true;
-       else
-               return false;
-}
-
 static void
 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
                      struct folio *new_folio, pte_t old, unsigned long sz)
@@ -4900,6 +4874,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
        unsigned long npages = pages_per_huge_page(h);
        struct mmu_notifier_range range;
        unsigned long last_addr_mask;
+       softleaf_t softleaf;
        int ret = 0;
 
        if (cow) {
@@ -4947,16 +4922,16 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
 again:
                if (huge_pte_none(entry)) {
-                       /*
-                        * Skip if src entry none.
-                        */
-                       ;
-               } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
+                       /* Skip if src entry none. */
+                       goto next;
+               }
+
+               softleaf = softleaf_from_pte(entry);
+               if (unlikely(softleaf_is_hwpoison(softleaf))) {
                        if (!userfaultfd_wp(dst_vma))
                                entry = huge_pte_clear_uffd_wp(entry);
                        set_huge_pte_at(dst, addr, dst_pte, entry, sz);
-               } else if (unlikely(is_hugetlb_entry_migration(entry))) {
-                       softleaf_t softleaf = softleaf_from_pte(entry);
+               } else if (unlikely(softleaf_is_migration(softleaf))) {
                        bool uffd_wp = pte_swp_uffd_wp(entry);
 
                        if (!is_readable_migration_entry(softleaf) && cow) {
@@ -4975,7 +4950,6 @@ again:
                                entry = huge_pte_clear_uffd_wp(entry);
                        set_huge_pte_at(dst, addr, dst_pte, entry, sz);
                } else if (unlikely(pte_is_marker(entry))) {
-                       const softleaf_t softleaf = softleaf_from_pte(entry);
                        const pte_marker marker = copy_pte_marker(softleaf, dst_vma);
 
                        if (marker)
@@ -5033,9 +5007,7 @@ again:
                                }
                                hugetlb_install_folio(dst_vma, dst_pte, addr,
                                                      new_folio, src_pte_old, sz);
-                               spin_unlock(src_ptl);
-                               spin_unlock(dst_ptl);
-                               continue;
+                               goto next;
                        }
 
                        if (cow) {
@@ -5056,6 +5028,8 @@ again:
                        set_huge_pte_at(dst, addr, dst_pte, entry, sz);
                        hugetlb_count_add(npages, dst);
                }
+
+next:
                spin_unlock(src_ptl);
                spin_unlock(dst_ptl);
        }
@@ -6064,8 +6038,10 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        ret = 0;
 
        /* Not present, either a migration or a hwpoisoned entry */
-       if (!pte_present(vmf.orig_pte)) {
-               if (is_hugetlb_entry_migration(vmf.orig_pte)) {
+       if (!pte_present(vmf.orig_pte) && !huge_pte_none(vmf.orig_pte)) {
+               const softleaf_t softleaf = softleaf_from_pte(vmf.orig_pte);
+
+               if (softleaf_is_migration(softleaf)) {
                        /*
                         * Release the hugetlb fault lock now, but retain
                         * the vma lock, because it is needed to guard the
@@ -6076,9 +6052,12 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                        migration_entry_wait_huge(vma, vmf.address, vmf.pte);
                        return 0;
-               } else if (is_hugetlb_entry_hwpoisoned(vmf.orig_pte))
+               }
+               if (softleaf_is_hwpoison(softleaf)) {
                        ret = VM_FAULT_HWPOISON_LARGE |
                            VM_FAULT_SET_HINDEX(hstate_index(h));
+               }
+
                goto out_mutex;
        }
 
@@ -6460,7 +6439,9 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
        i_mmap_lock_write(vma->vm_file->f_mapping);
        last_addr_mask = hugetlb_mask_last_page(h);
        for (; address < end; address += psize) {
+               softleaf_t entry;
                spinlock_t *ptl;
+
                ptep = hugetlb_walk(vma, address, psize);
                if (!ptep) {
                        if (!uffd_wp) {
@@ -6492,15 +6473,23 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
                        continue;
                }
                pte = huge_ptep_get(mm, address, ptep);
-               if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
-                       /* Nothing to do. */
-               } else if (unlikely(is_hugetlb_entry_migration(pte))) {
-                       softleaf_t entry = softleaf_from_pte(pte);
+               if (huge_pte_none(pte)) {
+                       if (unlikely(uffd_wp))
+                               /* Safe to modify directly (none->non-present). */
+                               set_huge_pte_at(mm, address, ptep,
+                                               make_pte_marker(PTE_MARKER_UFFD_WP),
+                                               psize);
+                       goto next;
+               }
 
+               entry = softleaf_from_pte(pte);
+               if (unlikely(softleaf_is_hwpoison(entry))) {
+                       /* Nothing to do. */
+               } else if (unlikely(softleaf_is_migration(entry))) {
                        struct folio *folio = softleaf_to_folio(entry);
                        pte_t newpte = pte;
 
-                       if (is_writable_migration_entry(entry)) {
+                       if (softleaf_is_migration_write(entry)) {
                                if (folio_test_anon(folio))
                                        entry = make_readable_exclusive_migration_entry(
                                                                swp_offset(entry));
@@ -6527,7 +6516,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
                        if (pte_is_uffd_wp_marker(pte) && uffd_wp_resolve)
                                /* Safe to modify directly (non-present->none). */
                                huge_pte_clear(mm, address, ptep, psize);
-               } else if (!huge_pte_none(pte)) {
+               } else {
                        pte_t old_pte;
                        unsigned int shift = huge_page_shift(hstate_vma(vma));
 
@@ -6540,16 +6529,10 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
                                pte = huge_pte_clear_uffd_wp(pte);
                        huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
                        pages++;
-               } else {
-                       /* None pte */
-                       if (unlikely(uffd_wp))
-                               /* Safe to modify directly (none->non-present). */
-                               set_huge_pte_at(mm, address, ptep,
-                                               make_pte_marker(PTE_MARKER_UFFD_WP),
-                                               psize);
                }
-               spin_unlock(ptl);
 
+next:
+               spin_unlock(ptl);
                cond_resched();
        }
        /*
index 01c3b98f87a6b11f2484f04843ed86942d2131f0..dee95d5ecfd4e8f6c2adda90db73127e8cd98fac 100644 (file)
@@ -768,16 +768,21 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
        unsigned long flags = qp->flags;
        struct folio *folio;
        spinlock_t *ptl;
-       pte_t entry;
+       pte_t ptep;
 
        ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
-       entry = huge_ptep_get(walk->mm, addr, pte);
-       if (!pte_present(entry)) {
-               if (unlikely(is_hugetlb_entry_migration(entry)))
-                       qp->nr_failed++;
+       ptep = huge_ptep_get(walk->mm, addr, pte);
+       if (!pte_present(ptep)) {
+               if (!huge_pte_none(ptep)) {
+                       const softleaf_t entry = softleaf_from_pte(ptep);
+
+                       if (unlikely(softleaf_is_migration(entry)))
+                               qp->nr_failed++;
+               }
+
                goto unlock;
        }
-       folio = pfn_folio(pte_pfn(entry));
+       folio = pfn_folio(pte_pfn(ptep));
        if (!queue_folio_required(folio, qp))
                goto unlock;
        if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
index ca4ec170a89b77aecd2821e32592383a05eb6001..5edfd0b2f63d311fb2641eada54e7a87de0ce029 100644 (file)
@@ -515,16 +515,18 @@ out:
 void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 {
        spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
+       softleaf_t entry;
        pte_t pte;
 
        hugetlb_vma_assert_locked(vma);
        spin_lock(ptl);
        pte = huge_ptep_get(vma->vm_mm, addr, ptep);
 
-       if (unlikely(!is_hugetlb_entry_migration(pte))) {
-               spin_unlock(ptl);
-               hugetlb_vma_unlock_read(vma);
-       } else {
+       if (huge_pte_none(pte))
+               goto fail;
+
+       entry = softleaf_from_pte(pte);
+       if (softleaf_is_migration(entry)) {
                /*
                 * If migration entry existed, safe to release vma lock
                 * here because the pgtable page won't be freed without the
@@ -533,7 +535,12 @@ void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, p
                 */
                hugetlb_vma_unlock_read(vma);
                migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
+               return;
        }
+
+fail:
+       spin_unlock(ptl);
+       hugetlb_vma_unlock_read(vma);
 }
 #endif