From: Lorenzo Stoakes (Oracle) Date: Fri, 20 Mar 2026 18:07:22 +0000 (+0000) Subject: mm/huge_memory: add a common exit path to zap_huge_pmd() X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=7217744e0aa373dd6f0a62b0db610ff085e50153;p=thirdparty%2Fkernel%2Flinux.git mm/huge_memory: add a common exit path to zap_huge_pmd() Other than when we acquire the PTL, we always need to unlock the PTL, and optionally need to flush on exit. The code is currently very duplicated in this respect, so default flush_needed to false, set it true in the case in which it's required, then share the same logic for all exit paths. This also makes flush_needed make more sense as a function-scope value (we don't need to flush for the PFN map/mixed map, zero huge, error cases for instance). Link: https://lkml.kernel.org/r/6b281d8ed972dff0e89bdcbdd810c96c7ae8c9dc.1774029655.git.ljs@kernel.org Signed-off-by: Lorenzo Stoakes (Oracle) Reviewed-by: Baolin Wang Reviewed-by: Suren Baghdasaryan Cc: Barry Song Cc: David Hildenbrand Cc: Dev Jain Cc: Lance Yang Cc: Liam Howlett Cc: Michal Hocko Cc: Mike Rapoport Cc: Nico Pache Cc: Qi Zheng Cc: Ryan Roberts Cc: Zi Yan Signed-off-by: Andrew Morton --- diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2f9aec7d4952c..283685dd6d9fe 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2415,7 +2415,7 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) { struct folio *folio = NULL; - bool flush_needed = true; + bool flush_needed = false; spinlock_t *ptl; pmd_t orig_pmd; @@ -2437,19 +2437,18 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (vma_is_special_huge(vma)) { if (arch_needs_pgtable_deposit()) zap_deposited_table(tlb->mm, pmd); - spin_unlock(ptl); - return true; + goto out; } if (is_huge_zero_pmd(orig_pmd)) { if (!vma_is_dax(vma) || arch_needs_pgtable_deposit()) zap_deposited_table(tlb->mm, pmd); - spin_unlock(ptl); - return true; + goto out; } if (pmd_present(orig_pmd)) { struct page *page = pmd_page(orig_pmd); + flush_needed = true; folio = page_folio(page); folio_remove_rmap_pmd(folio, page, vma); WARN_ON_ONCE(folio_mapcount(folio) < 0); @@ -2458,14 +2457,12 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, const softleaf_t entry = softleaf_from_pmd(orig_pmd); folio = softleaf_to_folio(entry); - flush_needed = false; if (!thp_migration_supported()) WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); } else { WARN_ON_ONCE(true); - spin_unlock(ptl); - return true; + goto out; } if (folio_test_anon(folio)) { @@ -2492,10 +2489,10 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, folio_put(folio); } +out: spin_unlock(ptl); if (flush_needed) tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE); - return true; }