From 357b92761d942432c90aeeb965f9eb0c94466921 Mon Sep 17 00:00:00 2001 From: Kiryl Shutsemau Date: Tue, 23 Sep 2025 12:07:10 +0100 Subject: [PATCH] mm/filemap: map entire large folio faultaround Currently, kernel only maps part of large folio that fits into start_pgoff/end_pgoff range. Map entire folio where possible. It will match finish_fault() behaviour that user hits on cold page cache. Mapping large folios at once will allow the rmap code to mlock it on add, as it will recognize that it is fully mapped and mlocking is safe. Link: https://lkml.kernel.org/r/20250923110711.690639-6-kirill@shutemov.name Signed-off-by: Kiryl Shutsemau Cc: Baolin Wang Cc: David Hildenbrand Cc: Johannes Weiner Cc: Lorenzo Stoakes Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/filemap.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/mm/filemap.c b/mm/filemap.c index 2a05b1fdd4452..a52dd38d2b4a4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3670,6 +3670,21 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, struct page *page = folio_page(folio, start); unsigned int count = 0; pte_t *old_ptep = vmf->pte; + unsigned long addr0; + + /* + * Map the large folio fully where possible. + * + * The folio must not cross VMA or page table boundary. + */ + addr0 = addr - start * PAGE_SIZE; + if (folio_within_vma(folio, vmf->vma) && + (addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) { + vmf->pte -= start; + page -= start; + addr = addr0; + nr_pages = folio_nr_pages(folio); + } do { if (PageHWPoison(page + count)) -- 2.47.3