]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: introduce vma_iter_store_attached() to use with attached vmas
authorSuren Baghdasaryan <surenb@google.com>
Thu, 13 Feb 2025 22:46:41 +0000 (14:46 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 17 Mar 2025 05:06:18 +0000 (22:06 -0700)
vma_iter_store() functions can be used both when adding a new vma and when
updating an existing one.  However for existing ones we do not need to
mark them attached as they are already marked that way.  With
vma->detached being a separate flag, double-marking a vmas as attached or
detached is not an issue because the flag will simply be overwritten with
the same value.  However once we fold this flag into the refcount later in
this series, re-attaching or re-detaching a vma becomes an issue since
these operations will be incrementing/decrementing a refcount.

Introduce vma_iter_store_new() and vma_iter_store_overwrite() to replace
vma_iter_store() and avoid re-attaching a vma during vma update.  Add
assertions in vma_mark_attached()/vma_mark_detached() to catch invalid
usage.  Update vma tests to check for vma detached state correctness.

Link: https://lkml.kernel.org/r/20250213224655.1680278-5-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Tested-by: Shivank Garg <shivankg@amd.com>
Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com
Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christian Brauner <brauner@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Klara Modin <klarasmodin@gmail.com>
Cc: Lokesh Gidra <lokeshgidra@google.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: "Paul E . McKenney" <paulmck@kernel.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Sourav Panda <souravpanda@google.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Will Deacon <will@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/nommu.c
mm/vma.c
mm/vma.h
tools/testing/vma/vma.c
tools/testing/vma/vma_internal.h

index d333d4070362cf374b47f88a4aeff99ca77923a3..003c3e5c0a960b798cf7c6f232bd278f7ca64f11 100644 (file)
@@ -821,8 +821,19 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
                vma_assert_write_locked(vma);
 }
 
+static inline void vma_assert_attached(struct vm_area_struct *vma)
+{
+       WARN_ON_ONCE(vma->detached);
+}
+
+static inline void vma_assert_detached(struct vm_area_struct *vma)
+{
+       WARN_ON_ONCE(!vma->detached);
+}
+
 static inline void vma_mark_attached(struct vm_area_struct *vma)
 {
+       vma_assert_detached(vma);
        vma->detached = false;
 }
 
@@ -830,6 +841,7 @@ static inline void vma_mark_detached(struct vm_area_struct *vma)
 {
        /* When detaching vma should be write-locked */
        vma_assert_write_locked(vma);
+       vma_assert_attached(vma);
        vma->detached = true;
 }
 
@@ -866,6 +878,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
 static inline void vma_start_write(struct vm_area_struct *vma) {}
 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
                { mmap_assert_write_locked(vma->vm_mm); }
+static inline void vma_assert_attached(struct vm_area_struct *vma) {}
+static inline void vma_assert_detached(struct vm_area_struct *vma) {}
 static inline void vma_mark_attached(struct vm_area_struct *vma) {}
 static inline void vma_mark_detached(struct vm_area_struct *vma) {}
 
index baa79abdaf037c92c0447169702424cf13357859..8b31d8396297e547f6dc28e8a0cfc2db0778ae35 100644 (file)
@@ -1191,7 +1191,7 @@ share:
        setup_vma_to_mm(vma, current->mm);
        current->mm->map_count++;
        /* add the VMA to the tree */
-       vma_iter_store(&vmi, vma);
+       vma_iter_store_new(&vmi, vma);
 
        /* we flush the region from the icache only when the first executable
         * mapping of it is made  */
@@ -1356,7 +1356,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 
        setup_vma_to_mm(vma, mm);
        setup_vma_to_mm(new, mm);
-       vma_iter_store(vmi, new);
+       vma_iter_store_new(vmi, new);
        mm->map_count++;
        return 0;
 
index 702e2181f8b7fa5aa7124143f1cd10a1da7ebe63..7374c04eee9b7a0a219dd623c6376c6f106047d5 100644 (file)
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -320,7 +320,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
                 * us to insert it before dropping the locks
                 * (it may either follow vma or precede it).
                 */
-               vma_iter_store(vmi, vp->insert);
+               vma_iter_store_new(vmi, vp->insert);
                mm->map_count++;
        }
 
@@ -700,7 +700,7 @@ static int commit_merge(struct vma_merge_struct *vmg)
                              vmg->__adjust_middle_start ? vmg->middle : NULL);
        vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff);
        vmg_adjust_set_range(vmg);
-       vma_iter_store(vmg->vmi, vmg->target);
+       vma_iter_store_overwrite(vmg->vmi, vmg->target);
 
        vma_complete(&vp, vmg->vmi, vma->vm_mm);
 
@@ -1711,7 +1711,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
                return -ENOMEM;
 
        vma_start_write(vma);
-       vma_iter_store(&vmi, vma);
+       vma_iter_store_new(&vmi, vma);
        vma_link_file(vma);
        mm->map_count++;
        validate_mm(mm);
@@ -2390,7 +2390,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
 
        /* Lock the VMA since it is modified after insertion into VMA tree */
        vma_start_write(vma);
-       vma_iter_store(vmi, vma);
+       vma_iter_store_new(vmi, vma);
        map->mm->map_count++;
        vma_link_file(vma);
 
@@ -2867,7 +2867,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                                anon_vma_interval_tree_pre_update_vma(vma);
                                vma->vm_end = address;
                                /* Overwrite old entry in mtree. */
-                               vma_iter_store(&vmi, vma);
+                               vma_iter_store_overwrite(&vmi, vma);
                                anon_vma_interval_tree_post_update_vma(vma);
 
                                perf_event_mmap(vma);
@@ -2947,7 +2947,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
                                vma->vm_start = address;
                                vma->vm_pgoff -= grow;
                                /* Overwrite old entry in mtree. */
-                               vma_iter_store(&vmi, vma);
+                               vma_iter_store_overwrite(&vmi, vma);
                                anon_vma_interval_tree_post_update_vma(vma);
 
                                perf_event_mmap(vma);
index bffb56afce5f4275b5bf676f12358212bdcd0fb3..55be77ff042f0a0fe224d9866cda7c90842e36f8 100644 (file)
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -413,9 +413,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
 }
 
 /* Store a VMA with preallocated memory */
-static inline void vma_iter_store(struct vma_iterator *vmi,
-                                 struct vm_area_struct *vma)
+static inline void vma_iter_store_overwrite(struct vma_iterator *vmi,
+                                           struct vm_area_struct *vma)
 {
+       vma_assert_attached(vma);
 
 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
        if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
@@ -438,7 +439,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
 
        __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
        mas_store_prealloc(&vmi->mas, vma);
+}
+
+static inline void vma_iter_store_new(struct vma_iterator *vmi,
+                                     struct vm_area_struct *vma)
+{
        vma_mark_attached(vma);
+       vma_iter_store_overwrite(vmi, vma);
 }
 
 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
index c7ffa71841ca9a8d13e0e4f168acd6829d95dbd3..11f761769b5b7794f9496811330798a9454817d8 100644 (file)
@@ -74,10 +74,22 @@ static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
        ret->vm_end = end;
        ret->vm_pgoff = pgoff;
        ret->__vm_flags = flags;
+       vma_assert_detached(ret);
 
        return ret;
 }
 
+/* Helper function to allocate a VMA and link it to the tree. */
+static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+       int res;
+
+       res = vma_link(mm, vma);
+       if (!res)
+               vma_assert_attached(vma);
+       return res;
+}
+
 /* Helper function to allocate a VMA and link it to the tree. */
 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
                                                 unsigned long start,
@@ -90,7 +102,7 @@ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
        if (vma == NULL)
                return NULL;
 
-       if (vma_link(mm, vma)) {
+       if (attach_vma(mm, vma)) {
                vm_area_free(vma);
                return NULL;
        }
@@ -108,6 +120,7 @@ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
 /* Helper function which provides a wrapper around a merge new VMA operation. */
 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
 {
+       struct vm_area_struct *vma;
        /*
         * For convenience, get prev and next VMAs. Which the new VMA operation
         * requires.
@@ -116,7 +129,11 @@ static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
        vmg->prev = vma_prev(vmg->vmi);
        vma_iter_next_range(vmg->vmi);
 
-       return vma_merge_new_range(vmg);
+       vma = vma_merge_new_range(vmg);
+       if (vma)
+               vma_assert_attached(vma);
+
+       return vma;
 }
 
 /*
@@ -125,7 +142,12 @@ static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
  */
 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
 {
-       return vma_merge_existing_range(vmg);
+       struct vm_area_struct *vma;
+
+       vma = vma_merge_existing_range(vmg);
+       if (vma)
+               vma_assert_attached(vma);
+       return vma;
 }
 
 /*
@@ -260,8 +282,8 @@ static bool test_simple_merge(void)
                .pgoff = 1,
        };
 
-       ASSERT_FALSE(vma_link(&mm, vma_left));
-       ASSERT_FALSE(vma_link(&mm, vma_right));
+       ASSERT_FALSE(attach_vma(&mm, vma_left));
+       ASSERT_FALSE(attach_vma(&mm, vma_right));
 
        vma = merge_new(&vmg);
        ASSERT_NE(vma, NULL);
@@ -285,7 +307,7 @@ static bool test_simple_modify(void)
        struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
        VMA_ITERATOR(vmi, &mm, 0x1000);
 
-       ASSERT_FALSE(vma_link(&mm, init_vma));
+       ASSERT_FALSE(attach_vma(&mm, init_vma));
 
        /*
         * The flags will not be changed, the vma_modify_flags() function
@@ -351,7 +373,7 @@ static bool test_simple_expand(void)
                .pgoff = 0,
        };
 
-       ASSERT_FALSE(vma_link(&mm, vma));
+       ASSERT_FALSE(attach_vma(&mm, vma));
 
        ASSERT_FALSE(expand_existing(&vmg));
 
@@ -372,7 +394,7 @@ static bool test_simple_shrink(void)
        struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
        VMA_ITERATOR(vmi, &mm, 0);
 
-       ASSERT_FALSE(vma_link(&mm, vma));
+       ASSERT_FALSE(attach_vma(&mm, vma));
 
        ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
 
@@ -1522,11 +1544,11 @@ static bool test_copy_vma(void)
 
        vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
        vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
-
        ASSERT_NE(vma_new, vma);
        ASSERT_EQ(vma_new->vm_start, 0);
        ASSERT_EQ(vma_new->vm_end, 0x2000);
        ASSERT_EQ(vma_new->vm_pgoff, 0);
+       vma_assert_attached(vma_new);
 
        cleanup_mm(&mm, &vmi);
 
@@ -1535,6 +1557,7 @@ static bool test_copy_vma(void)
        vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
        vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
        vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
+       vma_assert_attached(vma_new);
 
        ASSERT_EQ(vma_new, vma_next);
 
@@ -1576,6 +1599,7 @@ static bool test_expand_only_mode(void)
        ASSERT_EQ(vma->vm_pgoff, 3);
        ASSERT_TRUE(vma_write_started(vma));
        ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
+       vma_assert_attached(vma);
 
        cleanup_mm(&mm, &vmi);
        return true;
index f93f7f74f97b28cb8f713a2eecd9b3b47763a7bd..34277842156cb6fb0675c8528def4fce2aa52cfd 100644 (file)
@@ -470,6 +470,16 @@ static inline void vma_lock_init(struct vm_area_struct *vma)
        vma->vm_lock_seq = UINT_MAX;
 }
 
+static inline void vma_assert_attached(struct vm_area_struct *vma)
+{
+       WARN_ON_ONCE(vma->detached);
+}
+
+static inline void vma_assert_detached(struct vm_area_struct *vma)
+{
+       WARN_ON_ONCE(!vma->detached);
+}
+
 static inline void vma_assert_write_locked(struct vm_area_struct *);
 static inline void vma_mark_attached(struct vm_area_struct *vma)
 {