]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/vma: move unmapped_area() internals to mm/vma.c
authorLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Tue, 3 Dec 2024 18:05:09 +0000 (18:05 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:43 +0000 (22:40 -0800)
We want to be able to unit test the unmapped area logic, so move it to
mm/vma.c.  The wrappers which invoke this remain in place in mm/mmap.c.

In addition, naturally, update the existing test code to enable this to be
compiled in userland.

Link: https://lkml.kernel.org/r/53a57a52a64ea54e9d129d2e2abca3a538022379.1733248985.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Kees Cook <kees@kernel.org>
Cc: Liam R. Howlett <Liam.Howlett@Oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mmap.c
mm/vma.c
mm/vma.h
tools/testing/vma/vma.c
tools/testing/vma/vma_internal.h

index 775db706b82288c520d848666896e9e2e0fca010..7aa372a753269fec34c85ac171547eb0ba311bd1 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -580,115 +580,6 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
 }
 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
 
-/**
- * unmapped_area() - Find an area between the low_limit and the high_limit with
- * the correct alignment and offset, all from @info. Note: current->mm is used
- * for the search.
- *
- * @info: The unmapped area information including the range [low_limit -
- * high_limit), the alignment offset and mask.
- *
- * Return: A memory address or -ENOMEM.
- */
-static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
-{
-       unsigned long length, gap;
-       unsigned long low_limit, high_limit;
-       struct vm_area_struct *tmp;
-       VMA_ITERATOR(vmi, current->mm, 0);
-
-       /* Adjust search length to account for worst case alignment overhead */
-       length = info->length + info->align_mask + info->start_gap;
-       if (length < info->length)
-               return -ENOMEM;
-
-       low_limit = info->low_limit;
-       if (low_limit < mmap_min_addr)
-               low_limit = mmap_min_addr;
-       high_limit = info->high_limit;
-retry:
-       if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
-               return -ENOMEM;
-
-       /*
-        * Adjust for the gap first so it doesn't interfere with the
-        * later alignment. The first step is the minimum needed to
-        * fulill the start gap, the next steps is the minimum to align
-        * that. It is the minimum needed to fulill both.
-        */
-       gap = vma_iter_addr(&vmi) + info->start_gap;
-       gap += (info->align_offset - gap) & info->align_mask;
-       tmp = vma_next(&vmi);
-       if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
-               if (vm_start_gap(tmp) < gap + length - 1) {
-                       low_limit = tmp->vm_end;
-                       vma_iter_reset(&vmi);
-                       goto retry;
-               }
-       } else {
-               tmp = vma_prev(&vmi);
-               if (tmp && vm_end_gap(tmp) > gap) {
-                       low_limit = vm_end_gap(tmp);
-                       vma_iter_reset(&vmi);
-                       goto retry;
-               }
-       }
-
-       return gap;
-}
-
-/**
- * unmapped_area_topdown() - Find an area between the low_limit and the
- * high_limit with the correct alignment and offset at the highest available
- * address, all from @info. Note: current->mm is used for the search.
- *
- * @info: The unmapped area information including the range [low_limit -
- * high_limit), the alignment offset and mask.
- *
- * Return: A memory address or -ENOMEM.
- */
-static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
-{
-       unsigned long length, gap, gap_end;
-       unsigned long low_limit, high_limit;
-       struct vm_area_struct *tmp;
-       VMA_ITERATOR(vmi, current->mm, 0);
-
-       /* Adjust search length to account for worst case alignment overhead */
-       length = info->length + info->align_mask + info->start_gap;
-       if (length < info->length)
-               return -ENOMEM;
-
-       low_limit = info->low_limit;
-       if (low_limit < mmap_min_addr)
-               low_limit = mmap_min_addr;
-       high_limit = info->high_limit;
-retry:
-       if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
-               return -ENOMEM;
-
-       gap = vma_iter_end(&vmi) - info->length;
-       gap -= (gap - info->align_offset) & info->align_mask;
-       gap_end = vma_iter_end(&vmi);
-       tmp = vma_next(&vmi);
-       if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
-               if (vm_start_gap(tmp) < gap_end) {
-                       high_limit = vm_start_gap(tmp);
-                       vma_iter_reset(&vmi);
-                       goto retry;
-               }
-       } else {
-               tmp = vma_prev(&vmi);
-               if (tmp && vm_end_gap(tmp) > gap) {
-                       high_limit = tmp->vm_start;
-                       vma_iter_reset(&vmi);
-                       goto retry;
-               }
-       }
-
-       return gap;
-}
-
 /*
  * Determine if the allocation needs to ensure that there is no
  * existing mapping within it's guard gaps, for use as start_gap.
index 7cd174daeeec68b243f5a7d8754c5cb735bc97c8..3972376176e7fd8044050bd2c591462b33806438 100644 (file)
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -2563,3 +2563,112 @@ unacct_fail:
        vm_unacct_memory(len >> PAGE_SHIFT);
        return -ENOMEM;
 }
+
+/**
+ * unmapped_area() - Find an area between the low_limit and the high_limit with
+ * the correct alignment and offset, all from @info. Note: current->mm is used
+ * for the search.
+ *
+ * @info: The unmapped area information including the range [low_limit -
+ * high_limit), the alignment offset and mask.
+ *
+ * Return: A memory address or -ENOMEM.
+ */
+unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+{
+       unsigned long length, gap;
+       unsigned long low_limit, high_limit;
+       struct vm_area_struct *tmp;
+       VMA_ITERATOR(vmi, current->mm, 0);
+
+       /* Adjust search length to account for worst case alignment overhead */
+       length = info->length + info->align_mask + info->start_gap;
+       if (length < info->length)
+               return -ENOMEM;
+
+       low_limit = info->low_limit;
+       if (low_limit < mmap_min_addr)
+               low_limit = mmap_min_addr;
+       high_limit = info->high_limit;
+retry:
+       if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
+               return -ENOMEM;
+
+       /*
+        * Adjust for the gap first so it doesn't interfere with the
+        * later alignment. The first step is the minimum needed to
+        * fulill the start gap, the next steps is the minimum to align
+        * that. It is the minimum needed to fulill both.
+        */
+       gap = vma_iter_addr(&vmi) + info->start_gap;
+       gap += (info->align_offset - gap) & info->align_mask;
+       tmp = vma_next(&vmi);
+       if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
+               if (vm_start_gap(tmp) < gap + length - 1) {
+                       low_limit = tmp->vm_end;
+                       vma_iter_reset(&vmi);
+                       goto retry;
+               }
+       } else {
+               tmp = vma_prev(&vmi);
+               if (tmp && vm_end_gap(tmp) > gap) {
+                       low_limit = vm_end_gap(tmp);
+                       vma_iter_reset(&vmi);
+                       goto retry;
+               }
+       }
+
+       return gap;
+}
+
+/**
+ * unmapped_area_topdown() - Find an area between the low_limit and the
+ * high_limit with the correct alignment and offset at the highest available
+ * address, all from @info. Note: current->mm is used for the search.
+ *
+ * @info: The unmapped area information including the range [low_limit -
+ * high_limit), the alignment offset and mask.
+ *
+ * Return: A memory address or -ENOMEM.
+ */
+unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
+{
+       unsigned long length, gap, gap_end;
+       unsigned long low_limit, high_limit;
+       struct vm_area_struct *tmp;
+       VMA_ITERATOR(vmi, current->mm, 0);
+
+       /* Adjust search length to account for worst case alignment overhead */
+       length = info->length + info->align_mask + info->start_gap;
+       if (length < info->length)
+               return -ENOMEM;
+
+       low_limit = info->low_limit;
+       if (low_limit < mmap_min_addr)
+               low_limit = mmap_min_addr;
+       high_limit = info->high_limit;
+retry:
+       if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
+               return -ENOMEM;
+
+       gap = vma_iter_end(&vmi) - info->length;
+       gap -= (gap - info->align_offset) & info->align_mask;
+       gap_end = vma_iter_end(&vmi);
+       tmp = vma_next(&vmi);
+       if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
+               if (vm_start_gap(tmp) < gap_end) {
+                       high_limit = vm_start_gap(tmp);
+                       vma_iter_reset(&vmi);
+                       goto retry;
+               }
+       } else {
+               tmp = vma_prev(&vmi);
+               if (tmp && vm_end_gap(tmp) > gap) {
+                       high_limit = tmp->vm_start;
+                       vma_iter_reset(&vmi);
+                       goto retry;
+               }
+       }
+
+       return gap;
+}
index 83a15d3a8285fd597687f82123039ed840a2a3ae..c60f37d89eb1b63a132355a29611fbee424387a9 100644 (file)
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -250,6 +250,9 @@ unsigned long __mmap_region(struct file *file, unsigned long addr,
 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
                 unsigned long addr, unsigned long request, unsigned long flags);
 
+unsigned long unmapped_area(struct vm_unmapped_area_info *info);
+unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+
 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
 {
        /*
index 8fab5e13c7c3bb6989dfefdd059caa7ceb0df919..39ee61e5563492535a1413439d49088745c5cbc9 100644 (file)
@@ -18,6 +18,12 @@ static bool fail_prealloc;
 #define vma_iter_prealloc(vmi, vma)                                    \
        (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
 
+#define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536
+
+unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
+unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
+unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
 /*
  * Directly import the VMA implementation here. Our vma_internal.h wrapper
  * provides userland-equivalent functionality for everything vma.c uses.
index 7c3c15135c5b43f8a6a1cf8b1a9ca76eabc6b7dd..6ad8bd8edaad1bb5be6f376e7762c8a09f1beefa 100644 (file)
 #include <linux/rbtree.h>
 #include <linux/rwsem.h>
 
+extern unsigned long stack_guard_gap;
+#ifdef CONFIG_MMU
+extern unsigned long mmap_min_addr;
+extern unsigned long dac_mmap_min_addr;
+#else
+#define mmap_min_addr          0UL
+#define dac_mmap_min_addr      0UL
+#endif
+
 #define VM_WARN_ON(_expr) (WARN_ON(_expr))
 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
 #define VM_BUG_ON(_expr) (BUG_ON(_expr))
@@ -52,6 +61,8 @@
 #define VM_STACK       VM_GROWSDOWN
 #define VM_SHADOW_STACK        VM_NONE
 #define VM_SOFTDIRTY   0
+#define VM_ARCH_1      0x01000000      /* Architecture-specific flag */
+#define VM_GROWSUP     VM_NONE
 
 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
@@ -66,6 +77,8 @@
 
 #define VM_DATA_DEFAULT_FLAGS  VM_DATA_FLAGS_TSK_EXEC
 
+#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
+
 #ifdef CONFIG_64BIT
 /* VM is sealed, in vm_flags */
 #define VM_SEALED      _BITUL(63)
@@ -395,6 +408,17 @@ struct vm_operations_struct {
                                          unsigned long addr);
 };
 
+struct vm_unmapped_area_info {
+#define VM_UNMAPPED_AREA_TOPDOWN 1
+       unsigned long flags;
+       unsigned long length;
+       unsigned long low_limit;
+       unsigned long high_limit;
+       unsigned long align_mask;
+       unsigned long align_offset;
+       unsigned long start_gap;
+};
+
 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
 {
        mas_pause(&vmi->mas);
@@ -1055,4 +1079,39 @@ static inline int mmap_file(struct file *, struct vm_area_struct *)
        return 0;
 }
 
+static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
+{
+       if (vma->vm_flags & VM_GROWSDOWN)
+               return stack_guard_gap;
+
+       /* See reasoning around the VM_SHADOW_STACK definition */
+       if (vma->vm_flags & VM_SHADOW_STACK)
+               return PAGE_SIZE;
+
+       return 0;
+}
+
+static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+{
+       unsigned long gap = stack_guard_start_gap(vma);
+       unsigned long vm_start = vma->vm_start;
+
+       vm_start -= gap;
+       if (vm_start > vma->vm_start)
+               vm_start = 0;
+       return vm_start;
+}
+
+static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+{
+       unsigned long vm_end = vma->vm_end;
+
+       if (vma->vm_flags & VM_GROWSUP) {
+               vm_end += stack_guard_gap;
+               if (vm_end < vma->vm_end)
+                       vm_end = -PAGE_SIZE;
+       }
+       return vm_end;
+}
+
 #endif /* __MM_VMA_INTERNAL_H */