From: David Hildenbrand (Arm) Date: Fri, 27 Feb 2026 20:08:37 +0000 (+0100) Subject: mm/oom_kill: factor out zapping of VMA into zap_vma_for_reaping() X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=ba25127a8f0cb501c2d8e8879e38d203d044e50c;p=thirdparty%2Flinux.git mm/oom_kill: factor out zapping of VMA into zap_vma_for_reaping() Let's factor it out so we can turn unmap_page_range() into a static function instead, and so oom reaping has a clean interface to call. Note that hugetlb is not supported, because it would require a bunch of hugetlb-specific further actions (see zap_page_range_single_batched()). Link: https://lkml.kernel.org/r/20260227200848.114019-7-david@kernel.org Signed-off-by: David Hildenbrand (Arm) Reviewed-by: Lorenzo Stoakes (Oracle) Cc: Alexander Gordeev Cc: Alexei Starovoitov Cc: Alice Ryhl Cc: Andrii Nakryiko Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnd Bergmann Cc: Arve Cc: "Borislav Petkov (AMD)" Cc: Carlos Llamas Cc: Christian Borntraeger Cc: Christian Brauner Cc: Claudio Imbrenda Cc: Daniel Borkman Cc: Dave Airlie Cc: David Ahern Cc: David Rientjes Cc: David S. Miller Cc: Dimitri Sivanich Cc: Eric Dumazet Cc: Gerald Schaefer Cc: Greg Kroah-Hartman Cc: Hartley Sweeten Cc: Heiko Carstens Cc: Ian Abbott Cc: Ingo Molnar Cc: Jakub Kacinski Cc: Jani Nikula Cc: Jann Horn Cc: Janosch Frank Cc: Jarkko Sakkinen Cc: Jason Gunthorpe Cc: Jonas Lahtinen Cc: Leon Romanovsky Cc: Liam Howlett Cc: Madhavan Srinivasan Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Michal Hocko Cc: Miguel Ojeda Cc: Mike Rapoport Cc: Namhyung kim Cc: Neal Cardwell Cc: Paolo Abeni Cc: Pedro Falcato Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Shakeel Butt Cc: Suren Baghdasaryan Cc: Todd Kjos Cc: Tvrtko Ursulin Cc: Vasily Gorbik Cc: Vincenzo Frascino Signed-off-by: Andrew Morton --- diff --git a/mm/internal.h b/mm/internal.h index 84167b0570c96..b0ac179d3a5dd 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -536,13 +536,10 @@ static inline void sync_with_folio_pmd_zap(struct mm_struct *mm, pmd_t *pmdp) } struct zap_details; -void unmap_page_range(struct mmu_gather *tlb, - struct vm_area_struct *vma, - unsigned long addr, unsigned long end, - struct zap_details *details); void zap_page_range_single_batched(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long size, struct zap_details *details); +int zap_vma_for_reaping(struct vm_area_struct *vma); int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio, gfp_t gfp); diff --git a/mm/memory.c b/mm/memory.c index 24b7688853793..dbc9a6d0074c1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2054,10 +2054,9 @@ static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, return addr; } -void unmap_page_range(struct mmu_gather *tlb, - struct vm_area_struct *vma, - unsigned long addr, unsigned long end, - struct zap_details *details) +static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, + unsigned long addr, unsigned long end, + struct zap_details *details) { pgd_t *pgd; unsigned long next; @@ -2115,6 +2114,35 @@ static void unmap_single_vma(struct mmu_gather *tlb, } } +/** + * zap_vma_for_reaping - zap all page table entries in the vma without blocking + * @vma: The vma to zap. + * + * Zap all page table entries in the vma without blocking for use by the oom + * killer. Hugetlb vmas are not supported. + * + * Returns: 0 on success, -EBUSY if we would have to block. + */ +int zap_vma_for_reaping(struct vm_area_struct *vma) +{ + struct mmu_notifier_range range; + struct mmu_gather tlb; + + VM_WARN_ON_ONCE(is_vm_hugetlb_page(vma)); + + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, + vma->vm_start, vma->vm_end); + tlb_gather_mmu(&tlb, vma->vm_mm); + if (mmu_notifier_invalidate_range_start_nonblock(&range)) { + tlb_finish_mmu(&tlb); + return -EBUSY; + } + unmap_page_range(&tlb, vma, range.start, range.end, NULL); + mmu_notifier_invalidate_range_end(&range); + tlb_finish_mmu(&tlb); + return 0; +} + /** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlb: address of the caller's struct mmu_gather diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 0ba56fcd10d5e..54b7a8fe51366 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -548,21 +548,8 @@ static bool __oom_reap_task_mm(struct mm_struct *mm) * count elevated without a good reason. */ if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { - struct mmu_notifier_range range; - struct mmu_gather tlb; - - mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, - mm, vma->vm_start, - vma->vm_end); - tlb_gather_mmu(&tlb, mm); - if (mmu_notifier_invalidate_range_start_nonblock(&range)) { - tlb_finish_mmu(&tlb); + if (zap_vma_for_reaping(vma)) ret = false; - continue; - } - unmap_page_range(&tlb, vma, range.start, range.end, NULL); - mmu_notifier_invalidate_range_end(&range); - tlb_finish_mmu(&tlb); } }