]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: madvise: use walk_page_range_vma() instead of walk_page_range()
authorBarry Song <v-songbaohua@oppo.com>
Thu, 5 Jun 2025 08:31:44 +0000 (20:31 +1200)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 10 Jul 2025 05:41:58 +0000 (22:41 -0700)
We've already found the VMA within madvise_walk_vmas() before calling
specific madvise behavior functions like madvise_free_single_vma().  So
calling walk_page_range() and doing find_vma() again seems unnecessary.
It also prevents potential optimizations in those madvise callbacks,
particularly the use of dedicated per-VMA locking.

[v-songbaohua@oppo.com: revert the walk_page_range_vma change for MADV_GUARD_INSTALL]
Link: https://lkml.kernel.org/r/20250609105513.10901-1-21cnbao@gmail.com
Link: https://lkml.kernel.org/r/20250605083144.43046-1-21cnbao@gmail.com
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Dev Jain <dev.jain@arm.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Tested-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Jann Horn <jannh@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Lokesh Gidra <lokeshgidra@google.com>
Cc: Tangquan Zheng <zhengtangquan@oppo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/madvise.c

index 1d44a35ae85cf104c1c34fdd800ba6723491bda1..f543ef45f6a40999ec23189bb726ae4d187a8843 100644 (file)
@@ -282,7 +282,7 @@ static long madvise_willneed(struct vm_area_struct *vma,
        *prev = vma;
 #ifdef CONFIG_SWAP
        if (!file) {
-               walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
+               walk_page_range_vma(vma, start, end, &swapin_walk_ops, vma);
                lru_add_drain(); /* Push any new pages onto the LRU now */
                return 0;
        }
@@ -582,7 +582,7 @@ static void madvise_cold_page_range(struct mmu_gather *tlb,
        };
 
        tlb_start_vma(tlb, vma);
-       walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
+       walk_page_range_vma(vma, addr, end, &cold_walk_ops, &walk_private);
        tlb_end_vma(tlb, vma);
 }
 
@@ -620,7 +620,7 @@ static void madvise_pageout_page_range(struct mmu_gather *tlb,
        };
 
        tlb_start_vma(tlb, vma);
-       walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
+       walk_page_range_vma(vma, addr, end, &cold_walk_ops, &walk_private);
        tlb_end_vma(tlb, vma);
 }
 
@@ -827,7 +827,7 @@ static int madvise_free_single_vma(struct madvise_behavior *madv_behavior,
 
        mmu_notifier_invalidate_range_start(&range);
        tlb_start_vma(tlb, vma);
-       walk_page_range(vma->vm_mm, range.start, range.end,
+       walk_page_range_vma(vma, range.start, range.end,
                        &madvise_free_walk_ops, tlb);
        tlb_end_vma(tlb, vma);
        mmu_notifier_invalidate_range_end(&range);
@@ -1246,7 +1246,7 @@ static long madvise_guard_remove(struct vm_area_struct *vma,
        if (!is_valid_guard_vma(vma, /* allow_locked = */true))
                return -EINVAL;
 
-       return walk_page_range(vma->vm_mm, start, end,
+       return walk_page_range_vma(vma, start, end,
                               &guard_remove_walk_ops, NULL);
 }