struct folio *single_folio; /* Locked folio to be unmapped */
bool skip_cows; /* Do not zap COWed private pages */
bool reclaim_pt; /* Need reclaim page tables? */
+ bool reaping; /* Reaping, do not block. */
zap_flags_t zap_flags; /* Extra flags for zapping */
};
unsigned long start, unsigned long end,
struct zap_details *details)
{
+ const bool reaping = details && details->reaping;
+
VM_WARN_ON_ONCE(start >= end || !range_in_vma(vma, start, end));
- if (vma->vm_file)
+ /* uprobe_munmap() might sleep, so skip it when reaping. */
+ if (vma->vm_file && !reaping)
uprobe_munmap(vma, start, end);
if (unlikely(is_vm_hugetlb_page(vma))) {
zap_flags_t zap_flags = details ? details->zap_flags : 0;
+ VM_WARN_ON_ONCE(reaping);
/*
* vm_file will be NULL when we fail early while instantiating
* a new mapping. In this case, no pages were mapped yet and
*/
int zap_vma_for_reaping(struct vm_area_struct *vma)
{
+ struct zap_details details = {
+ .reaping = true,
+ };
struct mmu_notifier_range range;
struct mmu_gather tlb;
- VM_WARN_ON_ONCE(is_vm_hugetlb_page(vma));
-
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
vma->vm_start, vma->vm_end);
tlb_gather_mmu(&tlb, vma->vm_mm);
tlb_finish_mmu(&tlb);
return -EBUSY;
}
- unmap_page_range(&tlb, vma, range.start, range.end, NULL);
+ __zap_vma_range(&tlb, vma, range.start, range.end, &details);
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb);
return 0;