From: Matthew Wilcox (Oracle) Date: Wed, 26 Feb 2025 16:31:29 +0000 (+0000) Subject: hugetlb: convert hugetlb_vma_maps_page() to hugetlb_vma_maps_pfn() X-Git-Tag: v6.15-rc1~81^2~66 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=fa17ad58f8328e5c089377fed55ca8ed62f7cd1d;p=thirdparty%2Flinux.git hugetlb: convert hugetlb_vma_maps_page() to hugetlb_vma_maps_pfn() pte_page() is more expensive than pte_pfn() (often it's defined as pfn_to_page(pte_pfn())), so it makes sense to do the conversion to pfn once (by calling folio_pfn()) rather than convert the pfn to a page each time. While this is a very small advantage, the main motivation is removing a reference to folio->page. Link: https://lkml.kernel.org/r/20250226163131.3795869-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Muchun Song Signed-off-by: Andrew Morton --- diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 0fc179a598300..a427d41fbca08 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -338,8 +338,8 @@ static void hugetlb_delete_from_page_cache(struct folio *folio) * mutex for the page in the mapping. So, we can not race with page being * faulted into the vma. */ -static bool hugetlb_vma_maps_page(struct vm_area_struct *vma, - unsigned long addr, struct page *page) +static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn) { pte_t *ptep, pte; @@ -351,7 +351,7 @@ static bool hugetlb_vma_maps_page(struct vm_area_struct *vma, if (huge_pte_none(pte) || !pte_present(pte)) return false; - if (pte_page(pte) == page) + if (pte_pfn(pte) == pfn) return true; return false; @@ -396,7 +396,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h, { struct rb_root_cached *root = &mapping->i_mmap; struct hugetlb_vma_lock *vma_lock; - struct page *page = &folio->page; + unsigned long pfn = folio_pfn(folio); struct vm_area_struct *vma; unsigned long v_start; unsigned long v_end; @@ -412,7 +412,7 @@ retry: v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); - if (!hugetlb_vma_maps_page(vma, v_start, page)) + if (!hugetlb_vma_maps_pfn(vma, v_start, pfn)) continue; if (!hugetlb_vma_trylock_write(vma)) { @@ -462,7 +462,7 @@ retry: */ v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); - if (hugetlb_vma_maps_page(vma, v_start, page)) + if (hugetlb_vma_maps_pfn(vma, v_start, pfn)) unmap_hugepage_range(vma, v_start, v_end, NULL, ZAP_FLAG_DROP_MARKER);