]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/filemap: map entire large folio faultaround
authorKiryl Shutsemau <kas@kernel.org>
Tue, 23 Sep 2025 11:07:10 +0000 (12:07 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 28 Sep 2025 18:51:30 +0000 (11:51 -0700)
Currently, kernel only maps part of large folio that fits into
start_pgoff/end_pgoff range.

Map entire folio where possible.  It will match finish_fault() behaviour
that user hits on cold page cache.

Mapping large folios at once will allow the rmap code to mlock it on add,
as it will recognize that it is fully mapped and mlocking is safe.

Link: https://lkml.kernel.org/r/20250923110711.690639-6-kirill@shutemov.name
Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/filemap.c

index 2a05b1fdd4452a476c9cf95ac6f4df27ce9f3a23..a52dd38d2b4a4188c0df28b1e1edd59cabbd13a4 100644 (file)
@@ -3670,6 +3670,21 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
        struct page *page = folio_page(folio, start);
        unsigned int count = 0;
        pte_t *old_ptep = vmf->pte;
+       unsigned long addr0;
+
+       /*
+        * Map the large folio fully where possible.
+        *
+        * The folio must not cross VMA or page table boundary.
+        */
+       addr0 = addr - start * PAGE_SIZE;
+       if (folio_within_vma(folio, vmf->vma) &&
+           (addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) {
+               vmf->pte -= start;
+               page -= start;
+               addr = addr0;
+               nr_pages = folio_nr_pages(folio);
+       }
 
        do {
                if (PageHWPoison(page + count))