*/
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
unsigned long vma_start, unsigned long vma_end,
- struct vm_area_struct *prev, struct vm_area_struct *next)
+ unsigned long pg_max, struct vm_area_struct *prev,
+ struct vm_area_struct *next)
{
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb;
unmap_vmas(&tlb, mas, vma, vma_start, vma_end, vma_end);
mas_set(mas, vma->vm_end);
free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
- next ? next->vm_start : USER_PGTABLES_CEILING,
- next ? next->vm_start : USER_PGTABLES_CEILING,
+ pg_max, next ? next->vm_start : USER_PGTABLES_CEILING,
/* mm_wr_locked = */ true);
tlb_finish_mmu(&tlb);
}
vma_iter_set(vmi, vma->vm_end);
/* Undo any partial mapping done by a device driver. */
unmap_region(&vmi->mas, vma, vma->vm_start, vma->vm_end,
+ map->next ? map->next->vm_start : USER_PGTABLES_CEILING,
map->prev, map->next);
return error;
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
unsigned long vma_start, unsigned long vma_end,
- struct vm_area_struct *prev, struct vm_area_struct *next);
+ unsigned long pg_max, struct vm_area_struct *prev,
+ struct vm_area_struct *next);
/**
* vma_modify_flags() - Perform any necessary split/merge in preparation for