]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/memory: inline unmap_mapping_range_vma() into unmap_mapping_range_tree()
authorDavid Hildenbrand (Arm) <david@kernel.org>
Fri, 27 Feb 2026 20:08:34 +0000 (21:08 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:13 +0000 (13:53 -0700)
Let's remove the number of unmap-related functions that cause confusion by
inlining unmap_mapping_range_vma() into its single caller.  The end result
looks pretty readable.

Link: https://lkml.kernel.org/r/20260227200848.114019-4-david@kernel.org
Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Arve <arve@android.com>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Carlos Llamas <cmllamas@google.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Daniel Borkman <daniel@iogearbox.net>
Cc: Dave Airlie <airlied@gmail.com>
Cc: David Ahern <dsahern@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hartley Sweeten <hsweeten@visionengravers.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ian Abbott <abbotti@mev.co.uk>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jakub Kacinski <kuba@kernel.org>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Namhyung kim <namhyung@kernel.org>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Todd Kjos <tkjos@android.com>
Cc: Tvrtko Ursulin <tursulin@ursulin.net>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index fbd02d5bd520d283c4ca11d22a0830cbd43b81b1..f1c5d6b01a625a3335ff5e8119ad2504a23fc10c 100644 (file)
@@ -4221,18 +4221,6 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
        return wp_page_copy(vmf);
 }
 
-static void unmap_mapping_range_vma(struct vm_area_struct *vma,
-               unsigned long start_addr, unsigned long end_addr,
-               struct zap_details *details)
-{
-       struct mmu_gather tlb;
-
-       tlb_gather_mmu(&tlb, vma->vm_mm);
-       zap_page_range_single_batched(&tlb, vma, start_addr,
-                                     end_addr - start_addr, details);
-       tlb_finish_mmu(&tlb);
-}
-
 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
                                            pgoff_t first_index,
                                            pgoff_t last_index,
@@ -4240,17 +4228,20 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
 {
        struct vm_area_struct *vma;
        pgoff_t vba, vea, zba, zea;
+       unsigned long start, size;
+       struct mmu_gather tlb;
 
        vma_interval_tree_foreach(vma, root, first_index, last_index) {
                vba = vma->vm_pgoff;
                vea = vba + vma_pages(vma) - 1;
                zba = max(first_index, vba);
                zea = min(last_index, vea);
+               start = ((zba - vba) << PAGE_SHIFT) + vma->vm_start;
+               size = (zea - zba + 1) << PAGE_SHIFT;
 
-               unmap_mapping_range_vma(vma,
-                       ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
-                       ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
-                               details);
+               tlb_gather_mmu(&tlb, vma->vm_mm);
+               zap_page_range_single_batched(&tlb, vma, start, size, details);
+               tlb_finish_mmu(&tlb);
        }
 }