]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/memory: remove "zap_details" parameter from zap_page_range_single()
authorDavid Hildenbrand (Arm) <david@kernel.org>
Fri, 27 Feb 2026 20:08:33 +0000 (21:08 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:13 +0000 (13:53 -0700)
Nobody except memory.c should really set that parameter to non-NULL.  So
let's just drop it and make unmap_mapping_range_vma() use
zap_page_range_single_batched() instead.

[david@kernel.org: format on a single line]
Link: https://lkml.kernel.org/r/8a27e9ac-2025-4724-a46d-0a7c90894ba7@kernel.org
Link: https://lkml.kernel.org/r/20260227200848.114019-3-david@kernel.org
Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Acked-by: Puranjay Mohan <puranjay@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Arve <arve@android.com>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Carlos Llamas <cmllamas@google.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Daniel Borkman <daniel@iogearbox.net>
Cc: Dave Airlie <airlied@gmail.com>
Cc: David Ahern <dsahern@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hartley Sweeten <hsweeten@visionengravers.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ian Abbott <abbotti@mev.co.uk>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jakub Kacinski <kuba@kernel.org>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Namhyung kim <namhyung@kernel.org>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Todd Kjos <tkjos@android.com>
Cc: Tvrtko Ursulin <tursulin@ursulin.net>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/s390/mm/gmap_helpers.c
drivers/android/binder_alloc.c
include/linux/mm.h
kernel/bpf/arena.c
kernel/events/core.c
mm/madvise.c
mm/memory.c
net/ipv4/tcp.c
rust/kernel/mm/virt.rs

index dea83e3103e5d8b7c727d07dec63158c4e941fe2..ae2d59a19313fe37d429b3d8cd4de60a69856eb8 100644 (file)
@@ -89,7 +89,7 @@ void gmap_helper_discard(struct mm_struct *mm, unsigned long vmaddr, unsigned lo
                if (!vma)
                        return;
                if (!is_vm_hugetlb_page(vma))
-                       zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr, NULL);
+                       zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr);
                vmaddr = vma->vm_end;
        }
 }
index 241f16a9b63d1215dc339345786b56fe86e0490a..dd2046bd5cdee7beba245087235c0b193153b6fd 100644 (file)
@@ -1185,7 +1185,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
        if (vma) {
                trace_binder_unmap_user_start(alloc, index);
 
-               zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
+               zap_page_range_single(vma, page_addr, PAGE_SIZE);
 
                trace_binder_unmap_user_end(alloc, index);
        }
index 08b743aab92acbdc60008f121f0178c27ef4bd71..6512d70c585267af5da58dab86776b5291ea2f39 100644 (file)
@@ -2804,11 +2804,10 @@ struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
                  unsigned long size);
 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
-                          unsigned long size, struct zap_details *details);
+                          unsigned long size);
 static inline void zap_vma_pages(struct vm_area_struct *vma)
 {
-       zap_page_range_single(vma, vma->vm_start,
-                             vma->vm_end - vma->vm_start, NULL);
+       zap_page_range_single(vma, vma->vm_start, vma->vm_end - vma->vm_start);
 }
 struct mmu_notifier_range;
 
index f355cf1c1a1693c5ace9e444069c421506b0cc16..19cca936eb9dd22f8a7869e9e3cbb0ca90b559a9 100644 (file)
@@ -656,8 +656,7 @@ static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
        guard(mutex)(&arena->lock);
        /* iterate link list under lock */
        list_for_each_entry(vml, &arena->vma_list, head)
-               zap_page_range_single(vml->vma, uaddr,
-                                     PAGE_SIZE * page_cnt, NULL);
+               zap_page_range_single(vml->vma, uaddr, PAGE_SIZE * page_cnt);
 }
 
 static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt, bool sleepable)
index 89b40e4397177310d2350c5fc5f941ec2b726712..2ecdaabf1b4ddcc4a1d986fb8b6b02724c22d648 100644 (file)
@@ -7213,7 +7213,7 @@ static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma)
 #ifdef CONFIG_MMU
        /* Clear any partial mappings on error. */
        if (err)
-               zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE, NULL);
+               zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE);
 #endif
 
        return err;
index 1313166c5514403466cfa85ed80c9e6d0f545c3b..e4a2728593a8416b00f2292a53c6820efaaddf30 100644 (file)
@@ -1193,8 +1193,7 @@ static long madvise_guard_install(struct madvise_behavior *madv_behavior)
                 * OK some of the range have non-guard pages mapped, zap
                 * them. This leaves existing guard pages in place.
                 */
-               zap_page_range_single(vma, range->start,
-                               range->end - range->start, NULL);
+               zap_page_range_single(vma, range->start, range->end - range->start);
        }
 
        /*
index f78ab3869f8db2c82d03b07d01064e75240a3812..fbd02d5bd520d283c4ca11d22a0830cbd43b81b1 100644 (file)
@@ -2203,17 +2203,16 @@ void zap_page_range_single_batched(struct mmu_gather *tlb,
  * @vma: vm_area_struct holding the applicable pages
  * @address: starting address of pages to zap
  * @size: number of bytes to zap
- * @details: details of shared cache invalidation
  *
  * The range must fit into one VMA.
  */
 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
-               unsigned long size, struct zap_details *details)
+               unsigned long size)
 {
        struct mmu_gather tlb;
 
        tlb_gather_mmu(&tlb, vma->vm_mm);
-       zap_page_range_single_batched(&tlb, vma, address, size, details);
+       zap_page_range_single_batched(&tlb, vma, address, size, NULL);
        tlb_finish_mmu(&tlb);
 }
 
@@ -2235,7 +2234,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
                        !(vma->vm_flags & VM_PFNMAP))
                return;
 
-       zap_page_range_single(vma, address, size, NULL);
+       zap_page_range_single(vma, address, size);
 }
 EXPORT_SYMBOL_GPL(zap_vma_ptes);
 
@@ -3003,7 +3002,7 @@ static int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long add
         * maintain page reference counts, and callers may free
         * pages due to the error. So zap it early.
         */
-       zap_page_range_single(vma, addr, size, NULL);
+       zap_page_range_single(vma, addr, size);
        return error;
 }
 
@@ -4226,7 +4225,12 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma,
                unsigned long start_addr, unsigned long end_addr,
                struct zap_details *details)
 {
-       zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
+       struct mmu_gather tlb;
+
+       tlb_gather_mmu(&tlb, vma->vm_mm);
+       zap_page_range_single_batched(&tlb, vma, start_addr,
+                                     end_addr - start_addr, details);
+       tlb_finish_mmu(&tlb);
 }
 
 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
index 202a4e57a218851ed5bfa433896913e010e03af9..89c962672e51725141068673c8f88f0428f54b7a 100644 (file)
@@ -2105,7 +2105,7 @@ static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma,
                maybe_zap_len = total_bytes_to_map -  /* All bytes to map */
                                *length + /* Mapped or pending */
                                (pages_remaining * PAGE_SIZE); /* Failed map. */
-               zap_page_range_single(vma, *address, maybe_zap_len, NULL);
+               zap_page_range_single(vma, *address, maybe_zap_len);
                err = 0;
        }
 
@@ -2270,8 +2270,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
        total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);
        if (total_bytes_to_map) {
                if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT))
-                       zap_page_range_single(vma, address, total_bytes_to_map,
-                                             NULL);
+                       zap_page_range_single(vma, address, total_bytes_to_map);
                zc->length = total_bytes_to_map;
                zc->recv_skip_hint = 0;
        } else {
index da21d65ccd203f3a741b2bf17113930d7ccdd8d4..6bfd91cfa1f4694663782cc5fd1d42b558f326c6 100644 (file)
@@ -123,9 +123,7 @@ impl VmaRef {
         // SAFETY: By the type invariants, the caller has read access to this VMA, which is
         // sufficient for this method call. This method has no requirements on the vma flags. The
         // address range is checked to be within the vma.
-        unsafe {
-            bindings::zap_page_range_single(self.as_ptr(), address, size, core::ptr::null_mut())
-        };
+        unsafe { bindings::zap_page_range_single(self.as_ptr(), address, size) };
     }
 
     /// If the [`VM_MIXEDMAP`] flag is set, returns a [`VmaMixedMap`] to this VMA, otherwise