/*
* When the LPAR lost credits due to core removal or during
* migration, invalidate the existing mapping for the current
- * paste addresses and set windows in-active (zap_vma_pages in
+ * paste addresses and set windows in-active (zap_vma() in
* reconfig_close_windows()).
* New mapping will be done later after migration or new credits
* available. So continue to receive faults if the user space
* is done before the original mmap() and after the ioctl.
*/
if (vma)
- zap_vma_pages(vma);
+ zap_vma(vma);
mutex_unlock(&task_ref->mmap_mutex);
mmap_write_unlock(task_ref->mm);
unsigned long size);
void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
-static inline void zap_vma_pages(struct vm_area_struct *vma)
+/**
+ * zap_vma - zap all page table entries in a vma
+ * @vma: The vma to zap.
+ */
+static inline void zap_vma(struct vm_area_struct *vma)
{
zap_page_range_single(vma, vma->vm_start, vma->vm_end - vma->vm_start);
}
mmap_read_lock(mm);
for_each_vma(vmi, vma) {
if (vma_is_special_mapping(vma, &vdso_vvar_mapping))
- zap_vma_pages(vma);
+ zap_vma(vma);
}
mmap_read_unlock(mm);
* while this function is in progress, although it may have been truncated
* before this function is called. Most callers have the folio locked.
* A few have the folio blocked from truncation through other means (e.g.
- * zap_vma_pages() has it mapped and is holding the page table lock).
+ * zap_vma() has it mapped and is holding the page table lock).
* When called from mark_buffer_dirty(), the filesystem should hold a
* reference to the buffer_head that is being marked dirty, which causes
* try_to_free_buffers() to fail.