]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/huge_memory: add a common exit path to zap_huge_pmd()
authorLorenzo Stoakes (Oracle) <ljs@kernel.org>
Fri, 20 Mar 2026 18:07:22 +0000 (18:07 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:45 +0000 (13:53 -0700)
Other than when we acquire the PTL, we always need to unlock the PTL, and
optionally need to flush on exit.

The code is currently very duplicated in this respect, so default
flush_needed to false, set it true in the case in which it's required,
then share the same logic for all exit paths.

This also makes flush_needed make more sense as a function-scope value (we
don't need to flush for the PFN map/mixed map, zero huge, error cases for
instance).

Link: https://lkml.kernel.org/r/6b281d8ed972dff0e89bdcbdd810c96c7ae8c9dc.1774029655.git.ljs@kernel.org
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nico Pache <npache@redhat.com>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 2f9aec7d4952ce8ad5561e85f272d4364fb6831f..283685dd6d9fe335b7a5e96dda344f90a49d5b44 100644 (file)
@@ -2415,7 +2415,7 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                 pmd_t *pmd, unsigned long addr)
 {
        struct folio *folio = NULL;
-       bool flush_needed = true;
+       bool flush_needed = false;
        spinlock_t *ptl;
        pmd_t orig_pmd;
 
@@ -2437,19 +2437,18 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (vma_is_special_huge(vma)) {
                if (arch_needs_pgtable_deposit())
                        zap_deposited_table(tlb->mm, pmd);
-               spin_unlock(ptl);
-               return true;
+               goto out;
        }
        if (is_huge_zero_pmd(orig_pmd)) {
                if (!vma_is_dax(vma) || arch_needs_pgtable_deposit())
                        zap_deposited_table(tlb->mm, pmd);
-               spin_unlock(ptl);
-               return true;
+               goto out;
        }
 
        if (pmd_present(orig_pmd)) {
                struct page *page = pmd_page(orig_pmd);
 
+               flush_needed = true;
                folio = page_folio(page);
                folio_remove_rmap_pmd(folio, page, vma);
                WARN_ON_ONCE(folio_mapcount(folio) < 0);
@@ -2458,14 +2457,12 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                const softleaf_t entry = softleaf_from_pmd(orig_pmd);
 
                folio = softleaf_to_folio(entry);
-               flush_needed = false;
 
                if (!thp_migration_supported())
                        WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
        } else {
                WARN_ON_ONCE(true);
-               spin_unlock(ptl);
-               return true;
+               goto out;
        }
 
        if (folio_test_anon(folio)) {
@@ -2492,10 +2489,10 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                folio_put(folio);
        }
 
+out:
        spin_unlock(ptl);
        if (flush_needed)
                tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
-
        return true;
 }