]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/filemap: fix logic around SIGBUS in filemap_map_pages()
authorKiryl Shutsemau <kas@kernel.org>
Thu, 20 Nov 2025 16:14:11 +0000 (16:14 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 24 Nov 2025 22:25:18 +0000 (14:25 -0800)
Chris noticed that filemap_map_pages() calculates can_map_large only once
for the first page in the fault around range.  The value is not valid for
the following pages in the range and must be recalculated.

Instead of recalculating can_map_large on each iteration, pass down
file_end to filemap_map_folio_range() and let it make the decision on what
can be mapped.

Link: https://lkml.kernel.org/r/20251120161411.859078-1-kirill@shutemov.name
Fixes: 74207de2ba10 ("mm/memory: do not populate page table entries beyond i_size")h
Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
Reported-by: Chris Mason <clm@meta.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Chris Mason <clm@meta.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: "Darrick J. Wong" <djwong@kernel.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/filemap.c

index 2f1e7e283a5102179e08d5d7cfd0624c924df47b..024b71da5224d7f7ddedb39fd73261f580bd4987 100644 (file)
@@ -3682,8 +3682,9 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
                        struct folio *folio, unsigned long start,
                        unsigned long addr, unsigned int nr_pages,
                        unsigned long *rss, unsigned short *mmap_miss,
-                       bool can_map_large)
+                       pgoff_t file_end)
 {
+       struct address_space *mapping = folio->mapping;
        unsigned int ref_from_caller = 1;
        vm_fault_t ret = 0;
        struct page *page = folio_page(folio, start);
@@ -3692,12 +3693,16 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
        unsigned long addr0;
 
        /*
-        * Map the large folio fully where possible.
+        * Map the large folio fully where possible:
         *
-        * The folio must not cross VMA or page table boundary.
+        *  - The folio is fully within size of the file or belong
+        *    to shmem/tmpfs;
+        *  - The folio doesn't cross VMA boundary;
+        *  - The folio doesn't cross page table boundary;
         */
        addr0 = addr - start * PAGE_SIZE;
-       if (can_map_large && folio_within_vma(folio, vmf->vma) &&
+       if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
+           folio_within_vma(folio, vmf->vma) &&
            (addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) {
                vmf->pte -= start;
                page -= start;
@@ -3812,7 +3817,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
        unsigned long rss = 0;
        unsigned int nr_pages = 0, folio_type;
        unsigned short mmap_miss = 0, mmap_miss_saved;
-       bool can_map_large;
 
        rcu_read_lock();
        folio = next_uptodate_folio(&xas, mapping, end_pgoff);
@@ -3823,16 +3827,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
        end_pgoff = min(end_pgoff, file_end);
 
        /*
-        * Do not allow to map with PTEs beyond i_size and with PMD
-        * across i_size to preserve SIGBUS semantics.
+        * Do not allow to map with PMD across i_size to preserve
+        * SIGBUS semantics.
         *
         * Make an exception for shmem/tmpfs that for long time
         * intentionally mapped with PMDs across i_size.
         */
-       can_map_large = shmem_mapping(mapping) ||
-               file_end >= folio_next_index(folio);
-
-       if (can_map_large && filemap_map_pmd(vmf, folio, start_pgoff)) {
+       if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
+           filemap_map_pmd(vmf, folio, start_pgoff)) {
                ret = VM_FAULT_NOPAGE;
                goto out;
        }
@@ -3861,8 +3863,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
                else
                        ret |= filemap_map_folio_range(vmf, folio,
                                        xas.xa_index - folio->index, addr,
-                                       nr_pages, &rss, &mmap_miss,
-                                       can_map_large);
+                                       nr_pages, &rss, &mmap_miss, file_end);
 
                folio_unlock(folio);
        } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);