From: Kiryl Shutsemau Date: Thu, 20 Nov 2025 16:14:11 +0000 (+0000) Subject: mm/filemap: fix logic around SIGBUS in filemap_map_pages() X-Git-Tag: v6.18~23^2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7c9580f44f90f7a4c11fc7831efe323ebe446091;p=thirdparty%2Fkernel%2Fstable.git mm/filemap: fix logic around SIGBUS in filemap_map_pages() Chris noticed that filemap_map_pages() calculates can_map_large only once for the first page in the fault around range. The value is not valid for the following pages in the range and must be recalculated. Instead of recalculating can_map_large on each iteration, pass down file_end to filemap_map_folio_range() and let it make the decision on what can be mapped. Link: https://lkml.kernel.org/r/20251120161411.859078-1-kirill@shutemov.name Fixes: 74207de2ba10 ("mm/memory: do not populate page table entries beyond i_size")h Signed-off-by: Kiryl Shutsemau Reported-by: Chris Mason Reviewed-by: Matthew Wilcox (Oracle) Cc: Al Viro Cc: Baolin Wang Cc: Chris Mason Cc: Christian Brauner Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: David Hildenbrand Cc: Hugh Dickins Cc: Johannes Weiner Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Michal Hocko Cc: Mike Rapoport Cc: Rik van Riel Cc: Shakeel Butt Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- diff --git a/mm/filemap.c b/mm/filemap.c index 2f1e7e283a510..024b71da5224d 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3682,8 +3682,9 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, struct folio *folio, unsigned long start, unsigned long addr, unsigned int nr_pages, unsigned long *rss, unsigned short *mmap_miss, - bool can_map_large) + pgoff_t file_end) { + struct address_space *mapping = folio->mapping; unsigned int ref_from_caller = 1; vm_fault_t ret = 0; struct page *page = folio_page(folio, start); @@ -3692,12 +3693,16 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, unsigned long addr0; /* - * Map the large folio fully where possible. + * Map the large folio fully where possible: * - * The folio must not cross VMA or page table boundary. + * - The folio is fully within size of the file or belong + * to shmem/tmpfs; + * - The folio doesn't cross VMA boundary; + * - The folio doesn't cross page table boundary; */ addr0 = addr - start * PAGE_SIZE; - if (can_map_large && folio_within_vma(folio, vmf->vma) && + if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) && + folio_within_vma(folio, vmf->vma) && (addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) { vmf->pte -= start; page -= start; @@ -3812,7 +3817,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, unsigned long rss = 0; unsigned int nr_pages = 0, folio_type; unsigned short mmap_miss = 0, mmap_miss_saved; - bool can_map_large; rcu_read_lock(); folio = next_uptodate_folio(&xas, mapping, end_pgoff); @@ -3823,16 +3827,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, end_pgoff = min(end_pgoff, file_end); /* - * Do not allow to map with PTEs beyond i_size and with PMD - * across i_size to preserve SIGBUS semantics. + * Do not allow to map with PMD across i_size to preserve + * SIGBUS semantics. * * Make an exception for shmem/tmpfs that for long time * intentionally mapped with PMDs across i_size. */ - can_map_large = shmem_mapping(mapping) || - file_end >= folio_next_index(folio); - - if (can_map_large && filemap_map_pmd(vmf, folio, start_pgoff)) { + if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) && + filemap_map_pmd(vmf, folio, start_pgoff)) { ret = VM_FAULT_NOPAGE; goto out; } @@ -3861,8 +3863,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, else ret |= filemap_map_folio_range(vmf, folio, xas.xa_index - folio->index, addr, - nr_pages, &rss, &mmap_miss, - can_map_large); + nr_pages, &rss, &mmap_miss, file_end); folio_unlock(folio); } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);