if (!vma)
return;
if (!is_vm_hugetlb_page(vma))
- zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr);
+ zap_vma_range(vma, vmaddr, min(end, vma->vm_end) - vmaddr);
vmaddr = vma->vm_end;
}
}
pid: Pid,
/// The mm for the relevant process.
mm: ARef<Mm>,
- /// Used to synchronize calls to `vm_insert_page` and `zap_page_range_single`.
+ /// Used to synchronize calls to `vm_insert_page` and `zap_vma_range`.
#[pin]
mm_lock: Mutex<()>,
/// Spinlock protecting changes to pages.
if let Some(unchecked_vma) = mmap_read.vma_lookup(vma_addr) {
if let Some(vma) = check_vma(unchecked_vma, range_ptr) {
let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
- vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ vma.zap_vma_range(user_page_addr, PAGE_SIZE);
}
}
if (vma) {
trace_binder_unmap_user_start(alloc, index);
- zap_page_range_single(vma, page_addr, PAGE_SIZE);
+ zap_vma_range(vma, page_addr, PAGE_SIZE);
trace_binder_unmap_user_end(alloc, index);
}
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
-void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
+void zap_vma_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
/**
* zap_vma - zap all page table entries in a vma
*/
static inline void zap_vma(struct vm_area_struct *vma)
{
- zap_page_range_single(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+ zap_vma_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);
}
struct mmu_notifier_range;
guard(mutex)(&arena->lock);
/* iterate link list under lock */
list_for_each_entry(vml, &arena->vma_list, head)
- zap_page_range_single(vml->vma, uaddr, PAGE_SIZE * page_cnt);
+ zap_vma_range(vml->vma, uaddr, PAGE_SIZE * page_cnt);
}
static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt, bool sleepable)
#ifdef CONFIG_MMU
/* Clear any partial mappings on error. */
if (err)
- zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE);
+ zap_vma_range(vma, vma->vm_start, nr_pages * PAGE_SIZE);
#endif
return err;
* Application no longer needs these pages. If the pages are dirty,
* it's OK to just throw them away. The app will be more careful about
* data it wants to keep. Be sure to free swap resources too. The
- * zap_page_range_single call sets things up for shrink_active_list to actually
+ * zap_vma_range call sets things up for shrink_active_list to actually
* free these pages later if no one else has touched them in the meantime,
* although we could add these pages to a global reuse list for
* shrink_active_list to pick up before reclaiming other pages.
* OK some of the range have non-guard pages mapped, zap
* them. This leaves existing guard pages in place.
*/
- zap_page_range_single(vma, range->start, range->end - range->start);
+ zap_vma_range(vma, range->start, range->end - range->start);
}
/*
}
/**
- * zap_page_range_single - remove user pages in a given range
- * @vma: vm_area_struct holding the applicable pages
- * @address: starting address of pages to zap
+ * zap_vma_range - zap all page table entries in a vma range
+ * @vma: the vma covering the range to zap
+ * @address: starting address of the range to zap
* @size: number of bytes to zap
*
- * The range must fit into one VMA.
+ * The provided address range must be fully contained within @vma.
*/
-void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
+void zap_vma_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size)
{
struct mmu_gather tlb;
!(vma->vm_flags & VM_PFNMAP))
return;
- zap_page_range_single(vma, address, size);
+ zap_vma_range(vma, address, size);
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);
* maintain page reference counts, and callers may free
* pages due to the error. So zap it early.
*/
- zap_page_range_single(vma, addr, size);
+ zap_vma_range(vma, addr, size);
return error;
}
maybe_zap_len = total_bytes_to_map - /* All bytes to map */
*length + /* Mapped or pending */
(pages_remaining * PAGE_SIZE); /* Failed map. */
- zap_page_range_single(vma, *address, maybe_zap_len);
+ zap_vma_range(vma, *address, maybe_zap_len);
err = 0;
}
unsigned long leftover_pages = pages_remaining;
int bytes_mapped;
- /* We called zap_page_range_single, try to reinsert. */
+ /* We called zap_vma_range, try to reinsert. */
err = vm_insert_pages(vma, *address,
pending_pages,
&pages_remaining);
total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);
if (total_bytes_to_map) {
if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT))
- zap_page_range_single(vma, address, total_bytes_to_map);
+ zap_vma_range(vma, address, total_bytes_to_map);
zc->length = total_bytes_to_map;
zc->recv_skip_hint = 0;
} else {
/// kernel goes further in freeing unused page tables, but for the purposes of this operation
/// we must only assume that the leaf level is cleared.
#[inline]
- pub fn zap_page_range_single(&self, address: usize, size: usize) {
+ pub fn zap_vma_range(&self, address: usize, size: usize) {
let (end, did_overflow) = address.overflowing_add(size);
if did_overflow || address < self.start() || self.end() < end {
// TODO: call WARN_ONCE once Rust version of it is added
// SAFETY: By the type invariants, the caller has read access to this VMA, which is
// sufficient for this method call. This method has no requirements on the vma flags. The
// address range is checked to be within the vma.
- unsafe { bindings::zap_page_range_single(self.as_ptr(), address, size) };
+ unsafe { bindings::zap_vma_range(self.as_ptr(), address, size) };
}
/// If the [`VM_MIXEDMAP`] flag is set, returns a [`VmaMixedMap`] to this VMA, otherwise