Concurrent reads and writes of sysctl_max_map_count are possible, so we
should READ_ONCE() and WRITE_ONCE().
The sysctl procfs logic already enforces WRITE_ONCE(), so abstract the
read side with get_sysctl_max_map_count().
While we're here, also move the field to mm/internal.h and add the getter
there since only mm interacts with it, there's no need for anybody else to
have access.
Finally, update the VMA userland tests to reflect the change.
Link: https://lkml.kernel.org/r/0715259eb37cbdfde4f9e5db92a20ec7110a1ce5.1773249037.git.ljs@kernel.org
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Reviewed-by: Pedro Falcato <pfalcato@suse.de>
Cc: Jann Horn <jannh@google.com>
Cc: Jianzhou Zhao <luckd0g@163.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#define MAPCOUNT_ELF_CORE_MARGIN (5)
#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
-extern int sysctl_max_map_count;
-
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
#endif /* CONFIG_MMU_NOTIFIER */
+extern int sysctl_max_map_count;
+static inline int get_sysctl_max_map_count(void)
+{
+ return READ_ONCE(sysctl_max_map_count);
+}
+
#endif /* __MM_INTERNAL_H */
return -EOVERFLOW;
/* Too many mappings? */
- if (mm->map_count > sysctl_max_map_count)
+ if (mm->map_count > get_sysctl_max_map_count())
return -ENOMEM;
/*
* which may not merge, then (if MREMAP_DONTUNMAP is not set) unmap the
* source, which may split, causing a net increase of 2 mappings.
*/
- if (current->mm->map_count + 2 > sysctl_max_map_count)
+ if (current->mm->map_count + 2 > get_sysctl_max_map_count())
return -ENOMEM;
if (vma->vm_ops && vma->vm_ops->may_split) {
* net increased map count of 2. In move_vma() we check for headroom of
* 2 additional mappings, so check early to avoid bailing out then.
*/
- if (current->mm->map_count + 4 > sysctl_max_map_count)
+ if (current->mm->map_count + 4 > get_sysctl_max_map_count())
return -ENOMEM;
return 0;
return -ENOMEM;
mm = vma->vm_mm;
- if (mm->map_count >= sysctl_max_map_count)
+ if (mm->map_count >= get_sysctl_max_map_count())
return -ENOMEM;
region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
- if (vma->vm_mm->map_count >= sysctl_max_map_count)
+ if (vma->vm_mm->map_count >= get_sysctl_max_map_count())
return -ENOMEM;
return __split_vma(vmi, vma, addr, new_below);
* its limit temporarily, to help free resources as expected.
*/
if (vms->end < vms->vma->vm_end &&
- vms->vma->vm_mm->map_count >= sysctl_max_map_count) {
+ vms->vma->vm_mm->map_count >= get_sysctl_max_map_count()) {
error = -ENOMEM;
goto map_count_exceeded;
}
if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT))
return -ENOMEM;
- if (mm->map_count > sysctl_max_map_count)
+ if (mm->map_count > get_sysctl_max_map_count())
return -ENOMEM;
if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
#define VM_BUG_ON(_expr) (BUG_ON(_expr))
#define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
-/* We hardcode this for now. */
-#define sysctl_max_map_count 0x1000000UL
-
#define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
/*
#define EMPTY_VMA_FLAGS ((vma_flags_t){ })
+#define MAPCOUNT_ELF_CORE_MARGIN (5)
+#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
/* What action should be taken after an .mmap_prepare call is complete? */
enum mmap_action_type {
MMAP_NOTHING, /* Mapping is complete, no further action. */
swap(vma->vm_file, file);
fput(file);
}
+
+extern int sysctl_max_map_count;
+static inline int get_sysctl_max_map_count(void)
+{
+ return READ_ONCE(sysctl_max_map_count);
+}
#include "tests/mmap.c"
#include "tests/vma.c"
+int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
+
/* Helper functions which utilise static kernel functions. */
struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)