From: Fan Ni Date: Mon, 5 May 2025 18:22:42 +0000 (-0700) Subject: mm/hugetlb: refactor unmap_hugepage_range() to take folio instead of page X-Git-Tag: v6.16-rc1~92^2~3 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=81edb1ba3232afd45ae7f3f492a91019571b18c9;p=thirdparty%2Flinux.git mm/hugetlb: refactor unmap_hugepage_range() to take folio instead of page The function unmap_hugepage_range() has two kinds of users: 1) unmap_ref_private(), which passes in the head page of a folio. Since unmap_ref_private() already takes folio and there are no other uses of the folio struct in the function, it is natural for unmap_hugepage_range() to take folio also. 2) All other uses, which pass in NULL pointer. In both cases, we can pass in folio. Refactor unmap_hugepage_range() to take folio. Link: https://lkml.kernel.org/r/20250505182345.506888-4-nifan.cxl@gmail.com Signed-off-by: Fan Ni Reviewed-by: Muchun Song Reviewed-by: Sidhartha Kumar Reviewed-by: Oscar Salvador Acked-by: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: "Vishal Moola (Oracle)" Signed-off-by: Andrew Morton --- diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 23ebf49c5d6a4..f6d5f24e793c2 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -129,8 +129,8 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *, struct vm_area_struct *); void unmap_hugepage_range(struct vm_area_struct *, - unsigned long, unsigned long, struct page *, - zap_flags_t); + unsigned long start, unsigned long end, + struct folio *, zap_flags_t); void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0c2b264a7ab8f..c339ffe055567 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6046,7 +6046,7 @@ void __hugetlb_zap_end(struct vm_area_struct *vma, } void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end, struct page *ref_page, + unsigned long end, struct folio *folio, zap_flags_t zap_flags) { struct mmu_notifier_range range; @@ -6058,7 +6058,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, mmu_notifier_invalidate_range_start(&range); tlb_gather_mmu(&tlb, vma->vm_mm); - __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags); + __unmap_hugepage_range(&tlb, vma, start, end, + &folio->page, zap_flags); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb); @@ -6116,7 +6117,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) unmap_hugepage_range(iter_vma, address, address + huge_page_size(h), - &folio->page, 0); + folio, 0); } i_mmap_unlock_write(mapping); }