From: David Hildenbrand (Arm) Date: Fri, 27 Feb 2026 20:08:42 +0000 (+0100) Subject: mm/memory: inline unmap_page_range() into __zap_vma_range() X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=3a31d08d242aeb104814c93a1b93d09e483ddf8e;p=thirdparty%2Flinux.git mm/memory: inline unmap_page_range() into __zap_vma_range() Let's inline it into the single caller to reduce the number of confusing unmap/zap helpers. Get rid of the unnecessary BUG_ON(). [david@kernel.org: call the local variable simply "addr", per Lorenzo] Link: https://lkml.kernel.org/r/f7732d1c-0e85-4a14-948a-912c417018b5@kernel.org Link: https://lkml.kernel.org/r/20260227200848.114019-12-david@kernel.org Signed-off-by: David Hildenbrand (Arm) Reviewed-by: Lorenzo Stoakes (Oracle) Cc: Alexander Gordeev Cc: Alexei Starovoitov Cc: Alice Ryhl Cc: Andrii Nakryiko Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnd Bergmann Cc: Arve Cc: "Borislav Petkov (AMD)" Cc: Carlos Llamas Cc: Christian Borntraeger Cc: Christian Brauner Cc: Claudio Imbrenda Cc: Daniel Borkman Cc: Dave Airlie Cc: David Ahern Cc: David Rientjes Cc: David S. Miller Cc: Dimitri Sivanich Cc: Eric Dumazet Cc: Gerald Schaefer Cc: Greg Kroah-Hartman Cc: Hartley Sweeten Cc: Heiko Carstens Cc: Ian Abbott Cc: Ingo Molnar Cc: Jakub Kacinski Cc: Jani Nikula Cc: Jann Horn Cc: Janosch Frank Cc: Jarkko Sakkinen Cc: Jason Gunthorpe Cc: Jonas Lahtinen Cc: Leon Romanovsky Cc: Liam Howlett Cc: Madhavan Srinivasan Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Michal Hocko Cc: Miguel Ojeda Cc: Mike Rapoport Cc: Namhyung kim Cc: Neal Cardwell Cc: Paolo Abeni Cc: Pedro Falcato Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Shakeel Butt Cc: Suren Baghdasaryan Cc: Todd Kjos Cc: Tvrtko Ursulin Cc: Vasily Gorbik Cc: Vincenzo Frascino Signed-off-by: Andrew Morton --- diff --git a/mm/memory.c b/mm/memory.c index d1fd3cdd677a4..8c77a765036fd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2056,25 +2056,6 @@ static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, return addr; } -static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, - unsigned long addr, unsigned long end, - struct zap_details *details) -{ - pgd_t *pgd; - unsigned long next; - - BUG_ON(addr >= end); - tlb_start_vma(tlb, vma); - pgd = pgd_offset(vma->vm_mm, addr); - do { - next = pgd_addr_end(addr, end); - if (pgd_none_or_clear_bad(pgd)) - continue; - next = zap_p4d_range(tlb, vma, pgd, addr, next, details); - } while (pgd++, addr = next, addr != end); - tlb_end_vma(tlb, vma); -} - static void __zap_vma_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct zap_details *details) @@ -2100,7 +2081,18 @@ static void __zap_vma_range(struct mmu_gather *tlb, struct vm_area_struct *vma, return; __unmap_hugepage_range(tlb, vma, start, end, NULL, zap_flags); } else { - unmap_page_range(tlb, vma, start, end, details); + unsigned long next, addr = start; + pgd_t *pgd; + + tlb_start_vma(tlb, vma); + pgd = pgd_offset(vma->vm_mm, addr); + do { + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + next = zap_p4d_range(tlb, vma, pgd, addr, next, details); + } while (pgd++, addr = next, addr != end); + tlb_end_vma(tlb, vma); } }