]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/mmap: don't use __vma_adjust() in shift_arg_pages()
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Fri, 20 Jan 2023 16:26:46 +0000 (11:26 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 10 Feb 2023 00:51:38 +0000 (16:51 -0800)
Introduce shrink_vma() which uses the vma_prepare() and vma_complete()
functions to reduce the vma coverage.

Convert shift_arg_pages() to use expand_vma() and the new shrink_vma()
function.  Remove support from __vma_adjust() to reduce a vma size since
shift_arg_pages() is the only user that shrinks a VMA in this way.

Link: https://lkml.kernel.org/r/20230120162650.984577-46-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/exec.c
include/linux/mm.h
mm/mmap.c

index d52fca2dd30b435ce0825abd552cab53beb1c47a..c0df813d2b4555d59e812e5786d0f6178b7af41b 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -699,7 +699,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
        /*
         * cover the whole range: [new_start, old_end)
         */
-       if (vma_adjust(&vmi, vma, new_start, old_end, vma->vm_pgoff))
+       if (vma_expand(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL))
                return -ENOMEM;
 
        /*
@@ -733,7 +733,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 
        vma_prev(&vmi);
        /* Shrink the vma to just the new range */
-       return vma_adjust(&vmi, vma, new_start, new_end, vma->vm_pgoff);
+       return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);
 }
 
 /*
index 245fb30858c9e2ba206d3437e6ec0e1f98b3ae46..dcc34533d2f6f3708e456fb5a2727b77f182b026 100644 (file)
@@ -2831,17 +2831,11 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
 
 /* mmap.c */
 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start,
-       unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand);
-static inline int vma_adjust(struct vma_iterator *vmi,
-       struct vm_area_struct *vma, unsigned long start, unsigned long end,
-       pgoff_t pgoff)
-{
-       return __vma_adjust(vmi, vma, start, end, pgoff, NULL);
-}
 extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
                      unsigned long start, unsigned long end, pgoff_t pgoff,
                      struct vm_area_struct *next);
+extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
+                      unsigned long start, unsigned long end, pgoff_t pgoff);
 extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
        struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
        unsigned long end, unsigned long vm_flags, struct anon_vma *,
index 9599db011b1843c75a9203d4b3b89b22d71fbd2f..07b52acfd56584eb68c68ac42aad83ca1c63c7cc 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -682,6 +682,44 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
 nomem:
        return -ENOMEM;
 }
+
+/*
+ * vma_shrink() - Reduce an existing VMAs memory area
+ * @vmi: The vma iterator
+ * @vma: The VMA to modify
+ * @start: The new start
+ * @end: The new end
+ *
+ * Returns: 0 on success, -ENOMEM otherwise
+ */
+int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
+              unsigned long start, unsigned long end, pgoff_t pgoff)
+{
+       struct vma_prepare vp;
+
+       WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
+
+       if (vma_iter_prealloc(vmi))
+               return -ENOMEM;
+
+       init_vma_prep(&vp, vma);
+       vma_adjust_trans_huge(vma, start, end, 0);
+       vma_prepare(&vp);
+
+       if (vma->vm_start < start)
+               vma_iter_clear(vmi, vma->vm_start, start);
+
+       if (vma->vm_end > end)
+               vma_iter_clear(vmi, end, vma->vm_end);
+
+       vma->vm_start = start;
+       vma->vm_end = end;
+       vma->vm_pgoff = pgoff;
+       vma_complete(&vp, vmi, vma->vm_mm);
+       validate_mm(vma->vm_mm);
+       return 0;
+}
+
 /*
  * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
  * is already present in an i_mmap tree without adjusting the tree.
@@ -797,14 +835,7 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
 
        vma_prepare(&vma_prep);
 
-       if (vma->vm_start < start)
-               vma_iter_clear(vmi, vma->vm_start, start);
-       else if (start != vma->vm_start)
-               vma_changed = true;
-
-       if (vma->vm_end > end)
-               vma_iter_clear(vmi, end, vma->vm_end);
-       else if (end != vma->vm_end)
+       if (start < vma->vm_start || end > vma->vm_end)
                vma_changed = true;
 
        vma->vm_start = start;
@@ -817,7 +848,10 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
        if (adjust_next) {
                next->vm_start += adjust_next;
                next->vm_pgoff += adjust_next >> PAGE_SHIFT;
-               vma_iter_store(vmi, next);
+               if (adjust_next < 0) {
+                       WARN_ON_ONCE(vma_changed);
+                       vma_iter_store(vmi, next);
+               }
        }
 
        vma_complete(&vma_prep, vmi, mm);