]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/memory: use __zap_vma_range() in zap_vma_for_reaping()
authorDavid Hildenbrand (Arm) <david@kernel.org>
Fri, 27 Feb 2026 20:08:41 +0000 (21:08 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:14 +0000 (13:53 -0700)
Let's call __zap_vma_range() instead of unmap_page_range() to prepare for
further cleanups.

To keep the existing behavior, whereby we do not call uprobe_munmap()
which could block, add a new "reaping" member to zap_details and use it.

Likely we should handle the possible blocking in uprobe_munmap()
differently, but for now keep it unchanged.

Link: https://lkml.kernel.org/r/20260227200848.114019-11-david@kernel.org
Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Arve <arve@android.com>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Carlos Llamas <cmllamas@google.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Daniel Borkman <daniel@iogearbox.net>
Cc: Dave Airlie <airlied@gmail.com>
Cc: David Ahern <dsahern@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hartley Sweeten <hsweeten@visionengravers.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ian Abbott <abbotti@mev.co.uk>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jakub Kacinski <kuba@kernel.org>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Namhyung kim <namhyung@kernel.org>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Todd Kjos <tkjos@android.com>
Cc: Tvrtko Ursulin <tursulin@ursulin.net>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/memory.c

index cb4f5fbccaf07309ca1e84880ab094829ee5c119..488a144c9161f8e7294ae4c4a8b0b887fadd9cf3 100644 (file)
@@ -2769,6 +2769,7 @@ struct zap_details {
        struct folio *single_folio;     /* Locked folio to be unmapped */
        bool skip_cows;                 /* Do not zap COWed private pages */
        bool reclaim_pt;                /* Need reclaim page tables? */
+       bool reaping;                   /* Reaping, do not block. */
        zap_flags_t zap_flags;          /* Extra flags for zapping */
 };
 
index c66b7b8b47eb84d128fe5473b458dfb60afb97ce..d1fd3cdd677a48e3821ad015168177e864875148 100644 (file)
@@ -2079,14 +2079,18 @@ static void __zap_vma_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                unsigned long start, unsigned long end,
                struct zap_details *details)
 {
+       const bool reaping = details && details->reaping;
+
        VM_WARN_ON_ONCE(start >= end || !range_in_vma(vma, start, end));
 
-       if (vma->vm_file)
+       /* uprobe_munmap() might sleep, so skip it when reaping. */
+       if (vma->vm_file && !reaping)
                uprobe_munmap(vma, start, end);
 
        if (unlikely(is_vm_hugetlb_page(vma))) {
                zap_flags_t zap_flags = details ? details->zap_flags : 0;
 
+               VM_WARN_ON_ONCE(reaping);
                /*
                 * vm_file will be NULL when we fail early while instantiating
                 * a new mapping. In this case, no pages were mapped yet and
@@ -2111,11 +2115,12 @@ static void __zap_vma_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  */
 int zap_vma_for_reaping(struct vm_area_struct *vma)
 {
+       struct zap_details details = {
+               .reaping = true,
+       };
        struct mmu_notifier_range range;
        struct mmu_gather tlb;
 
-       VM_WARN_ON_ONCE(is_vm_hugetlb_page(vma));
-
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
                                vma->vm_start, vma->vm_end);
        tlb_gather_mmu(&tlb, vma->vm_mm);
@@ -2123,7 +2128,7 @@ int zap_vma_for_reaping(struct vm_area_struct *vma)
                tlb_finish_mmu(&tlb);
                return -EBUSY;
        }
-       unmap_page_range(&tlb, vma, range.start, range.end, NULL);
+       __zap_vma_range(&tlb, vma, range.start, range.end, &details);
        mmu_notifier_invalidate_range_end(&range);
        tlb_finish_mmu(&tlb);
        return 0;