]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/hugetlb: pass folio instead of page to unmap_ref_private()
authorFan Ni <fan.ni@samsung.com>
Mon, 5 May 2025 18:22:41 +0000 (11:22 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 28 May 2025 02:38:26 +0000 (19:38 -0700)
Patch series "Let unmap_hugepage_range() and several related functions
take folio instead of page", v4.

This patch (of 4):

unmap_ref_private() has only a single user, which passes in &folio->page.
Let it take the folio directly.

Link: https://lkml.kernel.org/r/20250505182345.506888-2-nifan.cxl@gmail.com
Link: https://lkml.kernel.org/r/20250505182345.506888-3-nifan.cxl@gmail.com
Signed-off-by: Fan Ni <fan.ni@samsung.com>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 0057d1f1dc9a703568e1741f2afa95ea49fb35e5..0c2b264a7ab8f56b18b37fc3e2be6587d798c179 100644 (file)
@@ -6071,7 +6071,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  * same region.
  */
 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
-                             struct page *page, unsigned long address)
+                             struct folio *folio, unsigned long address)
 {
        struct hstate *h = hstate_vma(vma);
        struct vm_area_struct *iter_vma;
@@ -6115,7 +6115,8 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                 */
                if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
                        unmap_hugepage_range(iter_vma, address,
-                                            address + huge_page_size(h), page, 0);
+                                            address + huge_page_size(h),
+                                            &folio->page, 0);
        }
        i_mmap_unlock_write(mapping);
 }
@@ -6238,8 +6239,7 @@ retry_avoidcopy:
                        hugetlb_vma_unlock_read(vma);
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
-                       unmap_ref_private(mm, vma, &old_folio->page,
-                                       vmf->address);
+                       unmap_ref_private(mm, vma, old_folio, vmf->address);
 
                        mutex_lock(&hugetlb_fault_mutex_table[hash]);
                        hugetlb_vma_lock_read(vma);