]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: report success more often from filemap_map_folio_range()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 20 Sep 2023 03:53:35 +0000 (04:53 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 30 Sep 2023 00:20:45 +0000 (17:20 -0700)
Even though we had successfully mapped the relevant page, we would rarely
return success from filemap_map_folio_range().  That leads to falling back
from the VMA lock path to the mmap_lock path, which is a speed &
scalability issue.  Found by inspection.

Link: https://lkml.kernel.org/r/20230920035336.854212-1-willy@infradead.org
Fixes: 617c28ecab22 ("filemap: batch PTE mappings")
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/filemap.c

index 4ea4387053e8ea9b64ae3a5a8935afd50b9ac1f5..f0a15ce1bd1ba1cca4856e05e70d9e44128709b4 100644 (file)
@@ -3503,7 +3503,7 @@ skip:
                if (count) {
                        set_pte_range(vmf, folio, page, count, addr);
                        folio_ref_add(folio, count);
-                       if (in_range(vmf->address, addr, count))
+                       if (in_range(vmf->address, addr, count * PAGE_SIZE))
                                ret = VM_FAULT_NOPAGE;
                }
 
@@ -3517,7 +3517,7 @@ skip:
        if (count) {
                set_pte_range(vmf, folio, page, count, addr);
                folio_ref_add(folio, count);
-               if (in_range(vmf->address, addr, count))
+               if (in_range(vmf->address, addr, count * PAGE_SIZE))
                        ret = VM_FAULT_NOPAGE;
        }