static unsigned long expand_vma(struct vma_remap_struct *vrm)
{
unsigned long err;
- unsigned long addr = vrm->addr;
err = remap_is_valid(vrm);
if (err)
if (err)
return err;
- /*
- * We want to populate the newly expanded portion of the VMA to
- * satisfy the expectation that mlock()'ing a VMA maintains all
- * of its pages in memory.
- */
- if (vrm->mlocked)
- vrm->new_addr = addr;
-
/* OK we're done! */
- return addr;
+ return vrm->addr;
}
/*
return -EINVAL;
}
+static int check_prep_vma(struct vma_remap_struct *vrm)
+{
+ struct vm_area_struct *vma = vrm->vma;
+
+ if (!vma)
+ return -EFAULT;
+
+ /* If mseal()'d, mremap() is prohibited. */
+ if (!can_modify_vma(vma))
+ return -EPERM;
+
+ /* Align to hugetlb page size, if required. */
+ if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm))
+ return -EINVAL;
+
+ vrm_set_delta(vrm);
+ vrm->remap_type = vrm_remap_type(vrm);
+ /* For convenience, we set new_addr even if VMA won't move. */
+ if (!vrm_implies_new_addr(vrm))
+ vrm->new_addr = vrm->addr;
+
+ return 0;
+}
+
static unsigned long do_mremap(struct vma_remap_struct *vrm)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
unsigned long res;
vrm->old_len = PAGE_ALIGN(vrm->old_len);
return -EINTR;
vrm->mmap_locked = true;
- vma = vrm->vma = vma_lookup(mm, vrm->addr);
- if (!vma) {
- res = -EFAULT;
- goto out;
- }
-
- /* If mseal()'d, mremap() is prohibited. */
- if (!can_modify_vma(vma)) {
- res = -EPERM;
- goto out;
- }
-
- /* Align to hugetlb page size, if required. */
- if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm)) {
- res = -EINVAL;
+ vrm->vma = vma_lookup(current->mm, vrm->addr);
+ res = check_prep_vma(vrm);
+ if (res)
goto out;
- }
-
- vrm_set_delta(vrm);
- vrm->remap_type = vrm_remap_type(vrm);
/* Actually execute mremap. */
res = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm);