]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: rename zap_vma_ptes() to zap_special_vma_range()
authorDavid Hildenbrand (Arm) <david@kernel.org>
Fri, 27 Feb 2026 20:08:46 +0000 (21:08 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:15 +0000 (13:53 -0700)
zap_vma_ptes() is the only zapping function we export to modules.

It's essentially a wrapper around zap_vma_range(), however, with some
safety checks:
* That the passed range fits fully into the VMA
* That it's only used for VM_PFNMAP

We will add support for VM_MIXEDMAP next, so use the more-generic term
"special vma", although "special" is a bit overloaded.  Maybe we'll later
just support any VM_SPECIAL flag.

While at it, improve the kerneldoc.

Link: https://lkml.kernel.org/r/20260227200848.114019-16-david@kernel.org
Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
Acked-by: Leon Romanovsky <leon@kernel.org> [drivers/infiniband]
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Arve <arve@android.com>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Carlos Llamas <cmllamas@google.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Daniel Borkman <daniel@iogearbox.net>
Cc: Dave Airlie <airlied@gmail.com>
Cc: David Ahern <dsahern@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hartley Sweeten <hsweeten@visionengravers.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ian Abbott <abbotti@mev.co.uk>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jakub Kacinski <kuba@kernel.org>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Namhyung kim <namhyung@kernel.org>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Todd Kjos <tkjos@android.com>
Cc: Tvrtko Ursulin <tursulin@ursulin.net>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/x86/kernel/cpu/sgx/encl.c
drivers/comedi/comedi_fops.c
drivers/gpu/drm/i915/i915_mm.c
drivers/infiniband/core/uverbs_main.c
drivers/misc/sgi-gru/grumain.c
include/linux/mm.h
mm/memory.c

index ac60ebde5d9bcbdf756cbd14b34f06e936e7d140..3f0222d10f6e606719b3969663fb68fa6cbce9de 100644 (file)
@@ -1220,7 +1220,7 @@ void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr)
 
                        ret = sgx_encl_find(encl_mm->mm, addr, &vma);
                        if (!ret && encl == vma->vm_private_data)
-                               zap_vma_ptes(vma, addr, PAGE_SIZE);
+                               zap_special_vma_range(vma, addr, PAGE_SIZE);
 
                        mmap_read_unlock(encl_mm->mm);
 
index 48a8a607a84c266fbb173a1eddedb30efb787296..b91e0b5ac394deed55f76e4dcde71bdc2999bd5c 100644 (file)
@@ -2588,7 +2588,7 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
         * remap_pfn_range() because we call remap_pfn_range() in a loop.
         */
        if (retval)
-               zap_vma_ptes(vma, vma->vm_start, size);
+               zap_special_vma_range(vma, vma->vm_start, size);
 #endif
 
        if (retval == 0) {
index c33bd3d830699928397c4a0f369b9137c59b1f6b..fd89e7c7d8d6f7501a7c5d5f9da1e1f276b1fdd0 100644 (file)
@@ -108,7 +108,7 @@ int remap_io_mapping(struct vm_area_struct *vma,
 
        err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
        if (unlikely(err)) {
-               zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
+               zap_special_vma_range(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
                return err;
        }
 
@@ -156,7 +156,7 @@ int remap_io_sg(struct vm_area_struct *vma,
 
        err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
        if (unlikely(err)) {
-               zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
+               zap_special_vma_range(vma, addr, r.pfn << PAGE_SHIFT);
                return err;
        }
 
index 7b68967a6301e4832140c16a13a33db1e47be5e3..f5837da47299c1294c0bfc94ddbf0768a2bc75cb 100644 (file)
@@ -756,7 +756,7 @@ out_zap:
         * point, so zap it.
         */
        vma->vm_private_data = NULL;
-       zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+       zap_special_vma_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);
 }
 
 static void rdma_umap_close(struct vm_area_struct *vma)
@@ -782,7 +782,7 @@ static void rdma_umap_close(struct vm_area_struct *vma)
 }
 
 /*
- * Once the zap_vma_ptes has been called touches to the VMA will come here and
+ * Once the zap_special_vma_range has been called touches to the VMA will come here and
  * we return a dummy writable zero page for all the pfns.
  */
 static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
@@ -878,7 +878,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
                                continue;
                        list_del_init(&priv->list);
 
-                       zap_vma_ptes(vma, vma->vm_start,
+                       zap_special_vma_range(vma, vma->vm_start,
                                     vma->vm_end - vma->vm_start);
 
                        if (priv->entry) {
index 8d749f345246865cf189a112596d2fc6ce3fc26f..278b76cbd2814883c506ec2e291361163b7f159d 100644 (file)
@@ -542,7 +542,7 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
        int ctxnum = gts->ts_ctxnum;
 
        if (!is_kernel_context(gts))
-               zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
+               zap_special_vma_range(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
        cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
 
        gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
index 10a5b9ba4eeb0ac6dad335d7d6d0b3625dcb8033..c516d517721171d3d37db3a3266807eaef16e900 100644 (file)
@@ -2802,7 +2802,7 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
 struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
                pud_t pud);
 
-void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+void zap_special_vma_range(struct vm_area_struct *vma, unsigned long address,
                  unsigned long size);
 void zap_vma_range(struct vm_area_struct *vma, unsigned long address,
                           unsigned long size);
index dd80fbf6473a456664ce7ea7774fcfec3c481207..3dc4664c9af7bec30f12627647d5669df38e0672 100644 (file)
@@ -2233,17 +2233,15 @@ void zap_vma_range(struct vm_area_struct *vma, unsigned long address,
 }
 
 /**
- * zap_vma_ptes - remove ptes mapping the vma
- * @vma: vm_area_struct holding ptes to be zapped
- * @address: starting address of pages to zap
+ * zap_special_vma_range - zap all page table entries in a special vma range
+ * @vma: the vma covering the range to zap
+ * @address: starting address of the range to zap
  * @size: number of bytes to zap
  *
- * This function only unmaps ptes assigned to VM_PFNMAP vmas.
- *
- * The entire address range must be fully contained within the vma.
- *
+ * This function does nothing when the provided address range is not fully
+ * contained in @vma, or when the @vma is not VM_PFNMAP.
  */
-void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+void zap_special_vma_range(struct vm_area_struct *vma, unsigned long address,
                unsigned long size)
 {
        if (!range_in_vma(vma, address, address + size) ||
@@ -2252,7 +2250,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
 
        zap_vma_range(vma, address, size);
 }
-EXPORT_SYMBOL_GPL(zap_vma_ptes);
+EXPORT_SYMBOL_GPL(zap_special_vma_range);
 
 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
 {