]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/hugetlb: refactor __unmap_hugepage_range() to take folio instead of page
authorFan Ni <fan.ni@samsung.com>
Mon, 5 May 2025 18:22:43 +0000 (11:22 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 28 May 2025 02:38:26 +0000 (19:38 -0700)
The function __unmap_hugepage_range() has two kinds of users:
1) unmap_hugepage_range(), which passes in the head page of a folio.
   Since unmap_hugepage_range() already takes folio and there are no other
   uses of the folio struct in the function, it is natural for
   __unmap_hugepage_range() to take folio also.
2) All other uses, which pass in NULL pointer.

In both cases, we can pass in folio. Refactor __unmap_hugepage_range() to
take folio.

Link: https://lkml.kernel.org/r/20250505182345.506888-5-nifan.cxl@gmail.com
Signed-off-by: Fan Ni <fan.ni@samsung.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c

index f6d5f24e793c2dc107c8a98009736361eeef8c96..eb21619206af0de154b178b896c1244027963963 100644 (file)
@@ -134,7 +134,7 @@ void unmap_hugepage_range(struct vm_area_struct *,
 void __unmap_hugepage_range(struct mmu_gather *tlb,
                          struct vm_area_struct *vma,
                          unsigned long start, unsigned long end,
-                         struct page *ref_page, zap_flags_t zap_flags);
+                         struct folio *, zap_flags_t zap_flags);
 void hugetlb_report_meminfo(struct seq_file *);
 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
 void hugetlb_show_meminfo_node(int nid);
@@ -455,7 +455,7 @@ static inline long hugetlb_change_protection(
 
 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
                        struct vm_area_struct *vma, unsigned long start,
-                       unsigned long end, struct page *ref_page,
+                       unsigned long end, struct folio *folio,
                        zap_flags_t zap_flags)
 {
        BUG();
index c339ffe055567bd55da1bb3b7c1238f6f759b61c..443b75e116cf4c41c52f3c8951c479d2b7cd5d83 100644 (file)
@@ -5840,7 +5840,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
 
 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                            unsigned long start, unsigned long end,
-                           struct page *ref_page, zap_flags_t zap_flags)
+                           struct folio *folio, zap_flags_t zap_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address;
@@ -5913,12 +5913,12 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 
                page = pte_page(pte);
                /*
-                * If a reference page is supplied, it is because a specific
-                * page is being unmapped, not a range. Ensure the page we
-                * are about to unmap is the actual page of interest.
+                * If a folio is supplied, it is because a specific
+                * folio is being unmapped, not a range. Ensure the folio we
+                * are about to unmap is the actual folio of interest.
                 */
-               if (ref_page) {
-                       if (page != ref_page) {
+               if (folio) {
+                       if (page_folio(page) != folio) {
                                spin_unlock(ptl);
                                continue;
                        }
@@ -5982,9 +5982,9 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 
                tlb_remove_page_size(tlb, page, huge_page_size(h));
                /*
-                * Bail out after unmapping reference page if supplied
+                * If we were instructed to unmap a specific folio, we're done.
                 */
-               if (ref_page)
+               if (folio)
                        break;
        }
        tlb_end_vma(tlb, vma);
@@ -6059,7 +6059,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        tlb_gather_mmu(&tlb, vma->vm_mm);
 
        __unmap_hugepage_range(&tlb, vma, start, end,
-                              &folio->page, zap_flags);
+                              folio, zap_flags);
 
        mmu_notifier_invalidate_range_end(&range);
        tlb_finish_mmu(&tlb);