if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
+ mmap_assert_write_locked(mm);
+
/* Guard against exceeding limits of the address space. */
address &= PAGE_MASK;
if (address >= (TASK_SIZE & PAGE_MASK))
/* Lock the VMA before expanding to prevent concurrent page faults */
vma_start_write(vma);
- /*
- * vma->vm_start/vm_end cannot change under us because the caller
- * is required to hold the mmap_lock in read mode. We need the
- * anon_vma lock to serialize against concurrent expand_stacks.
- */
+ /* We update the anon VMA tree. */
anon_vma_lock_write(vma->anon_vma);
/* Somebody else might have raced and expanded it already */
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow);
if (!error) {
- /*
- * We only hold a shared mmap_lock lock here, so
- * we need to protect against concurrent vma
- * expansions. anon_vma_lock_write() doesn't
- * help here, as we don't guarantee that all
- * growable vmas in a mm share the same root
- * anon vma. So, we reuse mm->page_table_lock
- * to guard against concurrent vma expansions.
- */
- spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, grow);
/* Overwrite old entry in mtree. */
vma_iter_store(&vmi, vma);
anon_vma_interval_tree_post_update_vma(vma);
- spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
}
if (!(vma->vm_flags & VM_GROWSDOWN))
return -EFAULT;
+ mmap_assert_write_locked(mm);
+
address &= PAGE_MASK;
if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
return -EPERM;
/* Lock the VMA before expanding to prevent concurrent page faults */
vma_start_write(vma);
- /*
- * vma->vm_start/vm_end cannot change under us because the caller
- * is required to hold the mmap_lock in read mode. We need the
- * anon_vma lock to serialize against concurrent expand_stacks.
- */
+ /* We update the anon VMA tree. */
anon_vma_lock_write(vma->anon_vma);
/* Somebody else might have raced and expanded it already */
if (grow <= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow);
if (!error) {
- /*
- * We only hold a shared mmap_lock lock here, so
- * we need to protect against concurrent vma
- * expansions. anon_vma_lock_write() doesn't
- * help here, as we don't guarantee that all
- * growable vmas in a mm share the same root
- * anon vma. So, we reuse mm->page_table_lock
- * to guard against concurrent vma expansions.
- */
- spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, grow);
/* Overwrite old entry in mtree. */
vma_iter_store(&vmi, vma);
anon_vma_interval_tree_post_update_vma(vma);
- spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
}