From: Fan Ni Date: Mon, 5 May 2025 18:22:43 +0000 (-0700) Subject: mm/hugetlb: refactor __unmap_hugepage_range() to take folio instead of page X-Git-Tag: v6.16-rc1~92^2~2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7f4b6065d9a842721a04632fc219aa453d1b2f5c;p=thirdparty%2Flinux.git mm/hugetlb: refactor __unmap_hugepage_range() to take folio instead of page The function __unmap_hugepage_range() has two kinds of users: 1) unmap_hugepage_range(), which passes in the head page of a folio. Since unmap_hugepage_range() already takes folio and there are no other uses of the folio struct in the function, it is natural for __unmap_hugepage_range() to take folio also. 2) All other uses, which pass in NULL pointer. In both cases, we can pass in folio. Refactor __unmap_hugepage_range() to take folio. Link: https://lkml.kernel.org/r/20250505182345.506888-5-nifan.cxl@gmail.com Signed-off-by: Fan Ni Acked-by: David Hildenbrand Reviewed-by: Oscar Salvador Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Sidhartha Kumar Cc: "Vishal Moola (Oracle)" Signed-off-by: Andrew Morton --- diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index f6d5f24e793c2..eb21619206af0 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -134,7 +134,7 @@ void unmap_hugepage_range(struct vm_area_struct *, void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, - struct page *ref_page, zap_flags_t zap_flags); + struct folio *, zap_flags_t zap_flags); void hugetlb_report_meminfo(struct seq_file *); int hugetlb_report_node_meminfo(char *buf, int len, int nid); void hugetlb_show_meminfo_node(int nid); @@ -455,7 +455,7 @@ static inline long hugetlb_change_protection( static inline void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, - unsigned long end, struct page *ref_page, + unsigned long end, struct folio *folio, zap_flags_t zap_flags) { BUG(); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c339ffe055567..443b75e116cf4 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5840,7 +5840,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, - struct page *ref_page, zap_flags_t zap_flags) + struct folio *folio, zap_flags_t zap_flags) { struct mm_struct *mm = vma->vm_mm; unsigned long address; @@ -5913,12 +5913,12 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, page = pte_page(pte); /* - * If a reference page is supplied, it is because a specific - * page is being unmapped, not a range. Ensure the page we - * are about to unmap is the actual page of interest. + * If a folio is supplied, it is because a specific + * folio is being unmapped, not a range. Ensure the folio we + * are about to unmap is the actual folio of interest. */ - if (ref_page) { - if (page != ref_page) { + if (folio) { + if (page_folio(page) != folio) { spin_unlock(ptl); continue; } @@ -5982,9 +5982,9 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, tlb_remove_page_size(tlb, page, huge_page_size(h)); /* - * Bail out after unmapping reference page if supplied + * If we were instructed to unmap a specific folio, we're done. */ - if (ref_page) + if (folio) break; } tlb_end_vma(tlb, vma); @@ -6059,7 +6059,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, tlb_gather_mmu(&tlb, vma->vm_mm); __unmap_hugepage_range(&tlb, vma, start, end, - &folio->page, zap_flags); + folio, zap_flags); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb);