]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/memory.c: use folios in __access_remote_vm()
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Wed, 9 Jul 2025 19:40:17 +0000 (12:40 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 20 Jul 2025 01:59:51 +0000 (18:59 -0700)
Use kmap_local_folio() instead of kmap_local_page().  Replaces 2 calls to
compound_head() with one.

This prepares us for the removal of unmap_and_put_page().

Link: https://lkml.kernel.org/r/20250709194017.927978-5-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Jordan Rome <linux@jordanrome.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index cb2f1296854a60dfd5c98887b041664aa5969050..bc27b1990fcba34a49f8ae9b556e86878cc9a06b 100644 (file)
@@ -6691,6 +6691,7 @@ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
        while (len) {
                int bytes, offset;
                void *maddr;
+               struct folio *folio;
                struct vm_area_struct *vma = NULL;
                struct page *page = get_user_page_vma_remote(mm, addr,
                                                             gup_flags, &vma);
@@ -6722,21 +6723,22 @@ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
                        if (bytes <= 0)
                                break;
                } else {
+                       folio = page_folio(page);
                        bytes = len;
                        offset = addr & (PAGE_SIZE-1);
                        if (bytes > PAGE_SIZE-offset)
                                bytes = PAGE_SIZE-offset;
 
-                       maddr = kmap_local_page(page);
+                       maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE);
                        if (write) {
                                copy_to_user_page(vma, page, addr,
                                                  maddr + offset, buf, bytes);
-                               set_page_dirty_lock(page);
+                               folio_mark_dirty_lock(folio);
                        } else {
                                copy_from_user_page(vma, page, addr,
                                                    buf, maddr + offset, bytes);
                        }
-                       unmap_and_put_page(page, maddr);
+                       folio_release_kmap(folio, maddr);
                }
                len -= bytes;
                buf += bytes;