]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/huge_memory: deduplicate zap deposited table call
authorLorenzo Stoakes (Oracle) <ljs@kernel.org>
Fri, 20 Mar 2026 18:07:24 +0000 (18:07 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:46 +0000 (13:53 -0700)
Rather than having separate logic for each case determining whether to zap
the deposited table, simply track this via a boolean.

We default this to whether the architecture requires it, and update it as
required elsewhere.

Link: https://lkml.kernel.org/r/71f576a1fbcd27a86322d12caa937bcdacf75407.1774029655.git.ljs@kernel.org
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nico Pache <npache@redhat.com>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index f072acd5b2799728f1f3ab51120970a321b91148..41506f376f4beb63a5bacb57d1d3f1af03fc0e60 100644 (file)
@@ -2414,6 +2414,7 @@ static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
 bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                 pmd_t *pmd, unsigned long addr)
 {
+       bool has_deposit = arch_needs_pgtable_deposit();
        struct folio *folio = NULL;
        bool flush_needed = false;
        spinlock_t *ptl;
@@ -2434,23 +2435,19 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                                                tlb->fullmm);
        arch_check_zapped_pmd(vma, orig_pmd);
        tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
-       if (vma_is_special_huge(vma)) {
-               if (arch_needs_pgtable_deposit())
-                       zap_deposited_table(tlb->mm, pmd);
+       if (vma_is_special_huge(vma))
                goto out;
-       }
        if (is_huge_zero_pmd(orig_pmd)) {
-               if (!vma_is_dax(vma) || arch_needs_pgtable_deposit())
-                       zap_deposited_table(tlb->mm, pmd);
+               if (!vma_is_dax(vma))
+                       has_deposit = true;
                goto out;
        }
 
        if (pmd_present(orig_pmd)) {
-               struct page *page = pmd_page(orig_pmd);
+               folio = pmd_folio(orig_pmd);
 
                flush_needed = true;
-               folio = page_folio(page);
-               folio_remove_rmap_pmd(folio, page, vma);
+               folio_remove_rmap_pmd(folio, &folio->page, vma);
                WARN_ON_ONCE(folio_mapcount(folio) < 0);
        } else if (pmd_is_valid_softleaf(orig_pmd)) {
                const softleaf_t entry = softleaf_from_pmd(orig_pmd);
@@ -2465,11 +2462,9 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        }
 
        if (folio_test_anon(folio)) {
-               zap_deposited_table(tlb->mm, pmd);
+               has_deposit = true;
                add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
        } else {
-               if (arch_needs_pgtable_deposit())
-                       zap_deposited_table(tlb->mm, pmd);
                add_mm_counter(tlb->mm, mm_counter_file(folio),
                               -HPAGE_PMD_NR);
 
@@ -2489,6 +2484,9 @@ bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        }
 
 out:
+       if (has_deposit)
+               zap_deposited_table(tlb->mm, pmd);
+
        spin_unlock(ptl);
        if (flush_needed)
                tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);