]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm: refactor vma_map_pages to use vm_insert_pages
authorJustin Green <greenjustin@chromium.org>
Wed, 28 Jan 2026 22:56:47 +0000 (17:56 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 6 Feb 2026 23:47:15 +0000 (15:47 -0800)
vma_map_pages currently calls vm_insert_page on each individual page in
the mapping, which creates significant overhead because we are repeatedly
spinlocking.  Instead, we should batch insert pages using vm_insert_pages,
which amortizes the cost of the spinlock.

Tested through watching hardware accelerated video on a MTK ChromeOS
device.  This particular path maps both a V4L2 buffer and a GEM allocated
buffer into userspace and converts the contents from one pixel format to
another.  Both vb2_mmap() and mtk_gem_object_mmap() exercise this pathway.

Link: https://lkml.kernel.org/r/20260128225648.2938636-1-greenjustin@chromium.org
Signed-off-by: Justin Green <greenjustin@chromium.org>
Acked-by: Brian Geffon <bgeffon@google.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Arjun Roy <arjunroy@google.com>
Cc: David Hildenbrand <david@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 187f16b7e9961f187d79fb8b16365349c5c35740..2a347e31a077a6707339e035965930f614126b9a 100644 (file)
@@ -2499,7 +2499,6 @@ static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
 {
        unsigned long count = vma_pages(vma);
        unsigned long uaddr = vma->vm_start;
-       int ret, i;
 
        /* Fail if the user requested offset is beyond the end of the object */
        if (offset >= num)
@@ -2509,14 +2508,7 @@ static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
        if (count > num - offset)
                return -ENXIO;
 
-       for (i = 0; i < count; i++) {
-               ret = vm_insert_page(vma, uaddr, pages[offset + i]);
-               if (ret < 0)
-                       return ret;
-               uaddr += PAGE_SIZE;
-       }
-
-       return 0;
+       return vm_insert_pages(vma, uaddr, pages + offset, &count);
 }
 
 /**