]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
tools/testing/selftests: add merge test for partial msealed range
authorLorenzo Stoakes (Oracle) <ljs@kernel.org>
Tue, 31 Mar 2026 07:36:27 +0000 (08:36 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 18 Apr 2026 07:10:52 +0000 (00:10 -0700)
Commit 2697dd8ae721 ("mm/mseal: update VMA end correctly on merge") fixed
an issue in the loop which iterates through VMAs applying mseal, which was
triggered by mseal()'ing a range of VMAs where the second was mseal()'d
and the first mergeable with it, once mseal()'d.

Add a regression test to assert that this behaviour is correct.  We place
it in the merge selftests as this is strictly an issue with merging (via a
vma_modify() invocation).

It also asserts that mseal()'d ranges are correctly merged as you'd
expect.

The test is implemented such that it is skipped if mseal() is not
available on the system.

[rppt@kernel.org: fix inclusions, to fix handle_uprobe_upon_merged_vma()]
Link: https://lore.kernel.org/ac_mCIUQWRAbuH8F@kernel.org
[ljs@kernel.org: simplifications per Pedro]
Link: https://lore.kernel.org/1c9c922d-5cb5-4cff-9273-b737cdb57ca1@lucifer.local
Link: https://lore.kernel.org/20260331073627.50010-1-ljs@kernel.org
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Signed-off-by: Mike Rapoport <rppt@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Jann Horn <jannh@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <ljs@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
tools/testing/selftests/mm/merge.c

index 10b686102b79d90c1efbb13e8cba2fee52bd5f97..519e5ac02db78c3a88e5331f744928dfc1f40d96 100644 (file)
@@ -48,6 +48,19 @@ static pid_t do_fork(struct procmap_fd *procmap)
        return 0;
 }
 
+#ifdef __NR_mseal
+static int sys_mseal(void *ptr, size_t len, unsigned long flags)
+{
+       return syscall(__NR_mseal, (unsigned long)ptr, len, flags);
+}
+#else
+static int sys_mseal(void *ptr, size_t len, unsigned long flags)
+{
+       errno = ENOSYS;
+       return -1;
+}
+#endif
+
 FIXTURE_SETUP(merge)
 {
        self->page_size = psize();
@@ -1217,6 +1230,81 @@ TEST_F(merge, mremap_correct_placed_faulted)
        ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
 }
 
+TEST_F(merge, merge_vmas_with_mseal)
+{
+       unsigned int page_size = self->page_size;
+       struct procmap_fd *procmap = &self->procmap;
+       char *ptr, *ptr2, *ptr3;
+       /* We need our own as cannot munmap() once sealed. */
+       char *carveout;
+
+       /* Invalid mseal() call to see if implemented. */
+       ASSERT_EQ(sys_mseal(NULL, 0, ~0UL), -1);
+       if (errno == ENOSYS)
+               SKIP(return, "mseal not supported, skipping.");
+
+       /* Map carveout. */
+       carveout = mmap(NULL, 5 * page_size, PROT_NONE,
+                       MAP_PRIVATE | MAP_ANON, -1, 0);
+       ASSERT_NE(carveout, MAP_FAILED);
+
+       /*
+        * Map 3 separate VMAs:
+        *
+        * |-----------|-----------|-----------|
+        * |    RW     |    RWE    |    RO     |
+        * |-----------|-----------|-----------|
+        *      ptr         ptr2        ptr3
+        */
+       ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
+                  MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+       ASSERT_NE(ptr, MAP_FAILED);
+       ptr2 = mmap(&carveout[2 * page_size], page_size,
+                   PROT_READ | PROT_WRITE | PROT_EXEC,
+                  MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+       ASSERT_NE(ptr2, MAP_FAILED);
+       ptr3 = mmap(&carveout[3 * page_size], page_size, PROT_READ,
+                  MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+       ASSERT_NE(ptr3, MAP_FAILED);
+
+       /*
+        * mseal the second VMA:
+        *
+        * |-----------|-----------|-----------|
+        * |    RW     |    RWES   |    RO     |
+        * |-----------|-----------|-----------|
+        *      ptr         ptr2        ptr3
+        */
+       ASSERT_EQ(sys_mseal(ptr2, page_size, 0), 0);
+
+       /* Make first VMA mergeable upon mseal. */
+       ASSERT_EQ(mprotect(ptr, page_size,
+                          PROT_READ | PROT_WRITE | PROT_EXEC), 0);
+       /*
+        * At this point we have:
+        *
+        * |-----------|-----------|-----------|
+        * |    RWE    |    RWES   |    RO     |
+        * |-----------|-----------|-----------|
+        *      ptr         ptr2        ptr3
+        *
+        * Now mseal all of the VMAs.
+        */
+       ASSERT_EQ(sys_mseal(ptr, 3 * page_size, 0), 0);
+
+       /*
+        * We should end up with:
+        *
+        * |-----------------------|-----------|
+        * |          RWES         |    ROS    |
+        * |-----------------------|-----------|
+        *            ptr               ptr3
+        */
+       ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+       ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+       ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
+}
+
 TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev)
 {
        struct procmap_fd *procmap = &self->procmap;