]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/huge_memory: have zap_huge_pmd return a boolean, add kdoc
authorLorenzo Stoakes (Oracle) <ljs@kernel.org>
Fri, 20 Mar 2026 18:07:20 +0000 (18:07 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:45 +0000 (13:53 -0700)
There's no need to use the ancient approach of returning an integer here,
just return a boolean.

Also update flush_needed to be a boolean, similarly.

Also add a kdoc comment describing the function.

No functional change intended.

Link: https://lkml.kernel.org/r/132274566cd49d2960a2294c36dd2450593dfc55.1774029655.git.ljs@kernel.org
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: Qi Zheng <zhengqi.arch@bytedance.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c

index 61fda1672b292d41dfe5ae136d9801ce1f490e6d..2949e5acff351bdb6b5747ad6e60459916dfd94a 100644 (file)
@@ -27,8 +27,8 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                           pmd_t *pmd, unsigned long addr, unsigned long next);
-int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
-                unsigned long addr);
+bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
+                 unsigned long addr);
 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
                 unsigned long addr);
 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
index 4dfffd6a1bbe1d25723286bd6b238247cd023a26..65e554afdf163e8c863c87865bc1487388dbeff0 100644 (file)
@@ -2402,11 +2402,20 @@ static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
        mm_dec_nr_ptes(mm);
 }
 
-int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+/**
+ * zap_huge_pmd - Zap a huge THP which is of PMD size.
+ * @tlb: The MMU gather TLB state associated with the operation.
+ * @vma: The VMA containing the range to zap.
+ * @pmd: A pointer to the leaf PMD entry.
+ * @addr: The virtual address for the range to zap.
+ *
+ * Returns: %true on success, %false otherwise.
+ */
+bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                 pmd_t *pmd, unsigned long addr)
 {
        struct folio *folio = NULL;
-       int flush_needed = 1;
+       bool flush_needed = true;
        spinlock_t *ptl;
        pmd_t orig_pmd;
 
@@ -2414,7 +2423,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 
        ptl = __pmd_trans_huge_lock(pmd, vma);
        if (!ptl)
-               return 0;
+               return false;
        /*
         * For architectures like ppc64 we look at deposited pgtable
         * when calling pmdp_huge_get_and_clear. So do the
@@ -2429,13 +2438,13 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                if (arch_needs_pgtable_deposit())
                        zap_deposited_table(tlb->mm, pmd);
                spin_unlock(ptl);
-               return 1;
+               return true;
        }
        if (is_huge_zero_pmd(orig_pmd)) {
                if (!vma_is_dax(vma) || arch_needs_pgtable_deposit())
                        zap_deposited_table(tlb->mm, pmd);
                spin_unlock(ptl);
-               return 1;
+               return true;
        }
 
        if (pmd_present(orig_pmd)) {
@@ -2449,7 +2458,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                const softleaf_t entry = softleaf_from_pmd(orig_pmd);
 
                folio = softleaf_to_folio(entry);
-               flush_needed = 0;
+               flush_needed = false;
 
                if (!thp_migration_supported())
                        WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
@@ -2483,7 +2492,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (flush_needed)
                tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
 
-       return 1;
+       return true;
 }
 
 #ifndef pmd_move_must_withdraw