]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/oom_kill: factor out zapping of VMA into zap_vma_for_reaping()
authorDavid Hildenbrand (Arm) <david@kernel.org>
Fri, 27 Feb 2026 20:08:37 +0000 (21:08 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:13 +0000 (13:53 -0700)
Let's factor it out so we can turn unmap_page_range() into a static
function instead, and so oom reaping has a clean interface to call.

Note that hugetlb is not supported, because it would require a bunch of
hugetlb-specific further actions (see zap_page_range_single_batched()).

Link: https://lkml.kernel.org/r/20260227200848.114019-7-david@kernel.org
Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Arve <arve@android.com>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Carlos Llamas <cmllamas@google.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Daniel Borkman <daniel@iogearbox.net>
Cc: Dave Airlie <airlied@gmail.com>
Cc: David Ahern <dsahern@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hartley Sweeten <hsweeten@visionengravers.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ian Abbott <abbotti@mev.co.uk>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jakub Kacinski <kuba@kernel.org>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Namhyung kim <namhyung@kernel.org>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Todd Kjos <tkjos@android.com>
Cc: Tvrtko Ursulin <tursulin@ursulin.net>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/memory.c
mm/oom_kill.c

index 84167b0570c963c52b092d074cba6dc7423fdf59..b0ac179d3a5ddee73eab88945740339fadd3fb58 100644 (file)
@@ -536,13 +536,10 @@ static inline void sync_with_folio_pmd_zap(struct mm_struct *mm, pmd_t *pmdp)
 }
 
 struct zap_details;
-void unmap_page_range(struct mmu_gather *tlb,
-                            struct vm_area_struct *vma,
-                            unsigned long addr, unsigned long end,
-                            struct zap_details *details);
 void zap_page_range_single_batched(struct mmu_gather *tlb,
                struct vm_area_struct *vma, unsigned long addr,
                unsigned long size, struct zap_details *details);
+int zap_vma_for_reaping(struct vm_area_struct *vma);
 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
                           gfp_t gfp);
 
index 24b768885379355288b47ebb65dd2b17061f1a87..dbc9a6d0074c148176f5b9d1c1cdc75196824088 100644 (file)
@@ -2054,10 +2054,9 @@ static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
        return addr;
 }
 
-void unmap_page_range(struct mmu_gather *tlb,
-                            struct vm_area_struct *vma,
-                            unsigned long addr, unsigned long end,
-                            struct zap_details *details)
+static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+               unsigned long addr, unsigned long end,
+               struct zap_details *details)
 {
        pgd_t *pgd;
        unsigned long next;
@@ -2115,6 +2114,35 @@ static void unmap_single_vma(struct mmu_gather *tlb,
        }
 }
 
+/**
+ * zap_vma_for_reaping - zap all page table entries in the vma without blocking
+ * @vma: The vma to zap.
+ *
+ * Zap all page table entries in the vma without blocking for use by the oom
+ * killer. Hugetlb vmas are not supported.
+ *
+ * Returns: 0 on success, -EBUSY if we would have to block.
+ */
+int zap_vma_for_reaping(struct vm_area_struct *vma)
+{
+       struct mmu_notifier_range range;
+       struct mmu_gather tlb;
+
+       VM_WARN_ON_ONCE(is_vm_hugetlb_page(vma));
+
+       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
+                               vma->vm_start, vma->vm_end);
+       tlb_gather_mmu(&tlb, vma->vm_mm);
+       if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
+               tlb_finish_mmu(&tlb);
+               return -EBUSY;
+       }
+       unmap_page_range(&tlb, vma, range.start, range.end, NULL);
+       mmu_notifier_invalidate_range_end(&range);
+       tlb_finish_mmu(&tlb);
+       return 0;
+}
+
 /**
  * unmap_vmas - unmap a range of memory covered by a list of vma's
  * @tlb: address of the caller's struct mmu_gather
index 0ba56fcd10d5ef582075bd4c8a525fbad67f9adb..54b7a8fe5136657ad92c46f87cdfcdf8eefafef0 100644 (file)
@@ -548,21 +548,8 @@ static bool __oom_reap_task_mm(struct mm_struct *mm)
                 * count elevated without a good reason.
                 */
                if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
-                       struct mmu_notifier_range range;
-                       struct mmu_gather tlb;
-
-                       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0,
-                                               mm, vma->vm_start,
-                                               vma->vm_end);
-                       tlb_gather_mmu(&tlb, mm);
-                       if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
-                               tlb_finish_mmu(&tlb);
+                       if (zap_vma_for_reaping(vma))
                                ret = false;
-                               continue;
-                       }
-                       unmap_page_range(&tlb, vma, range.start, range.end, NULL);
-                       mmu_notifier_invalidate_range_end(&range);
-                       tlb_finish_mmu(&tlb);
                }
        }