1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
27 #include <linux/uaccess.h>
33 * kfree_const - conditionally free memory
34 * @x: pointer to the memory
36 * Function calls kfree only if @x is not in .rodata section.
38 void kfree_const(const void *x
)
40 if (!is_kernel_rodata((unsigned long)x
))
43 EXPORT_SYMBOL(kfree_const
);
46 * kstrdup - allocate space for and copy an existing string
47 * @s: the string to duplicate
48 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
50 * Return: newly allocated copy of @s or %NULL in case of error
53 char *kstrdup(const char *s
, gfp_t gfp
)
62 buf
= kmalloc_track_caller(len
, gfp
);
67 EXPORT_SYMBOL(kstrdup
);
70 * kstrdup_const - conditionally duplicate an existing const string
71 * @s: the string to duplicate
72 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
74 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
75 * must not be passed to krealloc().
77 * Return: source string if it is in .rodata section otherwise
78 * fallback to kstrdup.
80 const char *kstrdup_const(const char *s
, gfp_t gfp
)
82 if (is_kernel_rodata((unsigned long)s
))
85 return kstrdup(s
, gfp
);
87 EXPORT_SYMBOL(kstrdup_const
);
90 * kstrndup - allocate space for and copy an existing string
91 * @s: the string to duplicate
92 * @max: read at most @max chars from @s
93 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
95 * Note: Use kmemdup_nul() instead if the size is known exactly.
97 * Return: newly allocated copy of @s or %NULL in case of error
99 char *kstrndup(const char *s
, size_t max
, gfp_t gfp
)
107 len
= strnlen(s
, max
);
108 buf
= kmalloc_track_caller(len
+1, gfp
);
115 EXPORT_SYMBOL(kstrndup
);
118 * kmemdup - duplicate region of memory
120 * @src: memory region to duplicate
121 * @len: memory region length
122 * @gfp: GFP mask to use
124 * Return: newly allocated copy of @src or %NULL in case of error,
125 * result is physically contiguous. Use kfree() to free.
127 void *kmemdup(const void *src
, size_t len
, gfp_t gfp
)
131 p
= kmalloc_track_caller(len
, gfp
);
136 EXPORT_SYMBOL(kmemdup
);
139 * kvmemdup - duplicate region of memory
141 * @src: memory region to duplicate
142 * @len: memory region length
143 * @gfp: GFP mask to use
145 * Return: newly allocated copy of @src or %NULL in case of error,
146 * result may be not physically contiguous. Use kvfree() to free.
148 void *kvmemdup(const void *src
, size_t len
, gfp_t gfp
)
152 p
= kvmalloc(len
, gfp
);
157 EXPORT_SYMBOL(kvmemdup
);
160 * kmemdup_nul - Create a NUL-terminated string from unterminated data
161 * @s: The data to stringify
162 * @len: The size of the data
163 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
165 * Return: newly allocated copy of @s with NUL-termination or %NULL in
168 char *kmemdup_nul(const char *s
, size_t len
, gfp_t gfp
)
175 buf
= kmalloc_track_caller(len
+ 1, gfp
);
182 EXPORT_SYMBOL(kmemdup_nul
);
185 * memdup_user - duplicate memory region from user space
187 * @src: source address in user space
188 * @len: number of bytes to copy
190 * Return: an ERR_PTR() on failure. Result is physically
191 * contiguous, to be freed by kfree().
193 void *memdup_user(const void __user
*src
, size_t len
)
197 p
= kmalloc_track_caller(len
, GFP_USER
| __GFP_NOWARN
);
199 return ERR_PTR(-ENOMEM
);
201 if (copy_from_user(p
, src
, len
)) {
203 return ERR_PTR(-EFAULT
);
208 EXPORT_SYMBOL(memdup_user
);
211 * vmemdup_user - duplicate memory region from user space
213 * @src: source address in user space
214 * @len: number of bytes to copy
216 * Return: an ERR_PTR() on failure. Result may be not
217 * physically contiguous. Use kvfree() to free.
219 void *vmemdup_user(const void __user
*src
, size_t len
)
223 p
= kvmalloc(len
, GFP_USER
);
225 return ERR_PTR(-ENOMEM
);
227 if (copy_from_user(p
, src
, len
)) {
229 return ERR_PTR(-EFAULT
);
234 EXPORT_SYMBOL(vmemdup_user
);
237 * strndup_user - duplicate an existing string from user space
238 * @s: The string to duplicate
239 * @n: Maximum number of bytes to copy, including the trailing NUL.
241 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
243 char *strndup_user(const char __user
*s
, long n
)
248 length
= strnlen_user(s
, n
);
251 return ERR_PTR(-EFAULT
);
254 return ERR_PTR(-EINVAL
);
256 p
= memdup_user(s
, length
);
261 p
[length
- 1] = '\0';
265 EXPORT_SYMBOL(strndup_user
);
268 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
270 * @src: source address in user space
271 * @len: number of bytes to copy
273 * Return: an ERR_PTR() on failure.
275 void *memdup_user_nul(const void __user
*src
, size_t len
)
280 * Always use GFP_KERNEL, since copy_from_user() can sleep and
281 * cause pagefault, which makes it pointless to use GFP_NOFS
284 p
= kmalloc_track_caller(len
+ 1, GFP_KERNEL
);
286 return ERR_PTR(-ENOMEM
);
288 if (copy_from_user(p
, src
, len
)) {
290 return ERR_PTR(-EFAULT
);
296 EXPORT_SYMBOL(memdup_user_nul
);
298 /* Check if the vma is being used as a stack by this task */
299 int vma_is_stack_for_current(struct vm_area_struct
*vma
)
301 struct task_struct
* __maybe_unused t
= current
;
303 return (vma
->vm_start
<= KSTK_ESP(t
) && vma
->vm_end
>= KSTK_ESP(t
));
307 * Change backing file, only valid to use during initial VMA setup.
309 void vma_set_file(struct vm_area_struct
*vma
, struct file
*file
)
311 /* Changing an anonymous vma with this is illegal */
313 swap(vma
->vm_file
, file
);
316 EXPORT_SYMBOL(vma_set_file
);
318 #ifndef STACK_RND_MASK
319 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
322 unsigned long randomize_stack_top(unsigned long stack_top
)
324 unsigned long random_variable
= 0;
326 if (current
->flags
& PF_RANDOMIZE
) {
327 random_variable
= get_random_long();
328 random_variable
&= STACK_RND_MASK
;
329 random_variable
<<= PAGE_SHIFT
;
331 #ifdef CONFIG_STACK_GROWSUP
332 return PAGE_ALIGN(stack_top
) + random_variable
;
334 return PAGE_ALIGN(stack_top
) - random_variable
;
339 * randomize_page - Generate a random, page aligned address
340 * @start: The smallest acceptable address the caller will take.
341 * @range: The size of the area, starting at @start, within which the
342 * random address must fall.
344 * If @start + @range would overflow, @range is capped.
346 * NOTE: Historical use of randomize_range, which this replaces, presumed that
347 * @start was already page aligned. We now align it regardless.
349 * Return: A page aligned address within [start, start + range). On error,
350 * @start is returned.
352 unsigned long randomize_page(unsigned long start
, unsigned long range
)
354 if (!PAGE_ALIGNED(start
)) {
355 range
-= PAGE_ALIGN(start
) - start
;
356 start
= PAGE_ALIGN(start
);
359 if (start
> ULONG_MAX
- range
)
360 range
= ULONG_MAX
- start
;
362 range
>>= PAGE_SHIFT
;
367 return start
+ (get_random_long() % range
<< PAGE_SHIFT
);
370 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
371 unsigned long __weak
arch_randomize_brk(struct mm_struct
*mm
)
373 /* Is the current task 32bit ? */
374 if (!IS_ENABLED(CONFIG_64BIT
) || is_compat_task())
375 return randomize_page(mm
->brk
, SZ_32M
);
377 return randomize_page(mm
->brk
, SZ_1G
);
380 unsigned long arch_mmap_rnd(void)
384 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
385 if (is_compat_task())
386 rnd
= get_random_long() & ((1UL << mmap_rnd_compat_bits
) - 1);
388 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
389 rnd
= get_random_long() & ((1UL << mmap_rnd_bits
) - 1);
391 return rnd
<< PAGE_SHIFT
;
394 static int mmap_is_legacy(struct rlimit
*rlim_stack
)
396 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
399 /* On parisc the stack always grows up - so a unlimited stack should
400 * not be an indicator to use the legacy memory layout. */
401 if (rlim_stack
->rlim_cur
== RLIM_INFINITY
&&
402 !IS_ENABLED(CONFIG_STACK_GROWSUP
))
405 return sysctl_legacy_va_layout
;
409 * Leave enough space between the mmap area and the stack to honour ulimit in
410 * the face of randomisation.
412 #define MIN_GAP (SZ_128M)
413 #define MAX_GAP (STACK_TOP / 6 * 5)
415 static unsigned long mmap_base(unsigned long rnd
, struct rlimit
*rlim_stack
)
417 #ifdef CONFIG_STACK_GROWSUP
419 * For an upwards growing stack the calculation is much simpler.
420 * Memory for the maximum stack size is reserved at the top of the
421 * task. mmap_base starts directly below the stack and grows
424 return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack
) - rnd
);
426 unsigned long gap
= rlim_stack
->rlim_cur
;
427 unsigned long pad
= stack_guard_gap
;
429 /* Account for stack randomization if necessary */
430 if (current
->flags
& PF_RANDOMIZE
)
431 pad
+= (STACK_RND_MASK
<< PAGE_SHIFT
);
433 /* Values close to RLIM_INFINITY can overflow. */
439 else if (gap
> MAX_GAP
)
442 return PAGE_ALIGN(STACK_TOP
- gap
- rnd
);
446 void arch_pick_mmap_layout(struct mm_struct
*mm
, struct rlimit
*rlim_stack
)
448 unsigned long random_factor
= 0UL;
450 if (current
->flags
& PF_RANDOMIZE
)
451 random_factor
= arch_mmap_rnd();
453 if (mmap_is_legacy(rlim_stack
)) {
454 mm
->mmap_base
= TASK_UNMAPPED_BASE
+ random_factor
;
455 mm
->get_unmapped_area
= arch_get_unmapped_area
;
457 mm
->mmap_base
= mmap_base(random_factor
, rlim_stack
);
458 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
461 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
462 void arch_pick_mmap_layout(struct mm_struct
*mm
, struct rlimit
*rlim_stack
)
464 mm
->mmap_base
= TASK_UNMAPPED_BASE
;
465 mm
->get_unmapped_area
= arch_get_unmapped_area
;
470 * __account_locked_vm - account locked pages to an mm's locked_vm
471 * @mm: mm to account against
472 * @pages: number of pages to account
473 * @inc: %true if @pages should be considered positive, %false if not
474 * @task: task used to check RLIMIT_MEMLOCK
475 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
477 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
478 * that mmap_lock is held as writer.
482 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
484 int __account_locked_vm(struct mm_struct
*mm
, unsigned long pages
, bool inc
,
485 struct task_struct
*task
, bool bypass_rlim
)
487 unsigned long locked_vm
, limit
;
490 mmap_assert_write_locked(mm
);
492 locked_vm
= mm
->locked_vm
;
495 limit
= task_rlimit(task
, RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
496 if (locked_vm
+ pages
> limit
)
500 mm
->locked_vm
= locked_vm
+ pages
;
502 WARN_ON_ONCE(pages
> locked_vm
);
503 mm
->locked_vm
= locked_vm
- pages
;
506 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__
, task
->pid
,
507 (void *)_RET_IP_
, (inc
) ? '+' : '-', pages
<< PAGE_SHIFT
,
508 locked_vm
<< PAGE_SHIFT
, task_rlimit(task
, RLIMIT_MEMLOCK
),
509 ret
? " - exceeded" : "");
513 EXPORT_SYMBOL_GPL(__account_locked_vm
);
516 * account_locked_vm - account locked pages to an mm's locked_vm
517 * @mm: mm to account against, may be NULL
518 * @pages: number of pages to account
519 * @inc: %true if @pages should be considered positive, %false if not
521 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
524 * * 0 on success, or if mm is NULL
525 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
527 int account_locked_vm(struct mm_struct
*mm
, unsigned long pages
, bool inc
)
531 if (pages
== 0 || !mm
)
535 ret
= __account_locked_vm(mm
, pages
, inc
, current
,
536 capable(CAP_IPC_LOCK
));
537 mmap_write_unlock(mm
);
541 EXPORT_SYMBOL_GPL(account_locked_vm
);
543 unsigned long vm_mmap_pgoff(struct file
*file
, unsigned long addr
,
544 unsigned long len
, unsigned long prot
,
545 unsigned long flag
, unsigned long pgoff
)
548 struct mm_struct
*mm
= current
->mm
;
549 unsigned long populate
;
552 ret
= security_mmap_file(file
, prot
, flag
);
554 if (mmap_write_lock_killable(mm
))
556 ret
= do_mmap(file
, addr
, len
, prot
, flag
, 0, pgoff
, &populate
,
558 mmap_write_unlock(mm
);
559 userfaultfd_unmap_complete(mm
, &uf
);
561 mm_populate(ret
, populate
);
566 unsigned long vm_mmap(struct file
*file
, unsigned long addr
,
567 unsigned long len
, unsigned long prot
,
568 unsigned long flag
, unsigned long offset
)
570 if (unlikely(offset
+ PAGE_ALIGN(len
) < offset
))
572 if (unlikely(offset_in_page(offset
)))
575 return vm_mmap_pgoff(file
, addr
, len
, prot
, flag
, offset
>> PAGE_SHIFT
);
577 EXPORT_SYMBOL(vm_mmap
);
580 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
581 * failure, fall back to non-contiguous (vmalloc) allocation.
582 * @size: size of the request.
583 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
584 * @node: numa node to allocate from
586 * Uses kmalloc to get the memory but if the allocation fails then falls back
587 * to the vmalloc allocator. Use kvfree for freeing the memory.
589 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
590 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
591 * preferable to the vmalloc fallback, due to visible performance drawbacks.
593 * Return: pointer to the allocated memory of %NULL in case of failure
595 void *kvmalloc_node(size_t size
, gfp_t flags
, int node
)
597 gfp_t kmalloc_flags
= flags
;
601 * We want to attempt a large physically contiguous block first because
602 * it is less likely to fragment multiple larger blocks and therefore
603 * contribute to a long term fragmentation less than vmalloc fallback.
604 * However make sure that larger requests are not too disruptive - no
605 * OOM killer and no allocation failure warnings as we have a fallback.
607 if (size
> PAGE_SIZE
) {
608 kmalloc_flags
|= __GFP_NOWARN
;
610 if (!(kmalloc_flags
& __GFP_RETRY_MAYFAIL
))
611 kmalloc_flags
|= __GFP_NORETRY
;
613 /* nofail semantic is implemented by the vmalloc fallback */
614 kmalloc_flags
&= ~__GFP_NOFAIL
;
617 ret
= kmalloc_node(size
, kmalloc_flags
, node
);
620 * It doesn't really make sense to fallback to vmalloc for sub page
623 if (ret
|| size
<= PAGE_SIZE
)
626 /* non-sleeping allocations are not supported by vmalloc */
627 if (!gfpflags_allow_blocking(flags
))
630 /* Don't even allow crazy sizes */
631 if (unlikely(size
> INT_MAX
)) {
632 WARN_ON_ONCE(!(flags
& __GFP_NOWARN
));
637 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
638 * since the callers already cannot assume anything
639 * about the resulting pointer, and cannot play
642 return __vmalloc_node_range(size
, 1, VMALLOC_START
, VMALLOC_END
,
643 flags
, PAGE_KERNEL
, VM_ALLOW_HUGE_VMAP
,
644 node
, __builtin_return_address(0));
646 EXPORT_SYMBOL(kvmalloc_node
);
649 * kvfree() - Free memory.
650 * @addr: Pointer to allocated memory.
652 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
653 * It is slightly more efficient to use kfree() or vfree() if you are certain
654 * that you know which one to use.
656 * Context: Either preemptible task context or not-NMI interrupt.
658 void kvfree(const void *addr
)
660 if (is_vmalloc_addr(addr
))
665 EXPORT_SYMBOL(kvfree
);
668 * kvfree_sensitive - Free a data object containing sensitive information.
669 * @addr: address of the data object to be freed.
670 * @len: length of the data object.
672 * Use the special memzero_explicit() function to clear the content of a
673 * kvmalloc'ed object containing sensitive data to make sure that the
674 * compiler won't optimize out the data clearing.
676 void kvfree_sensitive(const void *addr
, size_t len
)
678 if (likely(!ZERO_OR_NULL_PTR(addr
))) {
679 memzero_explicit((void *)addr
, len
);
683 EXPORT_SYMBOL(kvfree_sensitive
);
685 void *kvrealloc(const void *p
, size_t oldsize
, size_t newsize
, gfp_t flags
)
689 if (oldsize
>= newsize
)
691 newp
= kvmalloc(newsize
, flags
);
694 memcpy(newp
, p
, oldsize
);
698 EXPORT_SYMBOL(kvrealloc
);
701 * __vmalloc_array - allocate memory for a virtually contiguous array.
702 * @n: number of elements.
703 * @size: element size.
704 * @flags: the type of memory to allocate (see kmalloc).
706 void *__vmalloc_array(size_t n
, size_t size
, gfp_t flags
)
710 if (unlikely(check_mul_overflow(n
, size
, &bytes
)))
712 return __vmalloc(bytes
, flags
);
714 EXPORT_SYMBOL(__vmalloc_array
);
717 * vmalloc_array - allocate memory for a virtually contiguous array.
718 * @n: number of elements.
719 * @size: element size.
721 void *vmalloc_array(size_t n
, size_t size
)
723 return __vmalloc_array(n
, size
, GFP_KERNEL
);
725 EXPORT_SYMBOL(vmalloc_array
);
728 * __vcalloc - allocate and zero memory for a virtually contiguous array.
729 * @n: number of elements.
730 * @size: element size.
731 * @flags: the type of memory to allocate (see kmalloc).
733 void *__vcalloc(size_t n
, size_t size
, gfp_t flags
)
735 return __vmalloc_array(n
, size
, flags
| __GFP_ZERO
);
737 EXPORT_SYMBOL(__vcalloc
);
740 * vcalloc - allocate and zero memory for a virtually contiguous array.
741 * @n: number of elements.
742 * @size: element size.
744 void *vcalloc(size_t n
, size_t size
)
746 return __vmalloc_array(n
, size
, GFP_KERNEL
| __GFP_ZERO
);
748 EXPORT_SYMBOL(vcalloc
);
750 struct anon_vma
*folio_anon_vma(struct folio
*folio
)
752 unsigned long mapping
= (unsigned long)folio
->mapping
;
754 if ((mapping
& PAGE_MAPPING_FLAGS
) != PAGE_MAPPING_ANON
)
756 return (void *)(mapping
- PAGE_MAPPING_ANON
);
760 * folio_mapping - Find the mapping where this folio is stored.
763 * For folios which are in the page cache, return the mapping that this
764 * page belongs to. Folios in the swap cache return the swap mapping
765 * this page is stored in (which is different from the mapping for the
766 * swap file or swap device where the data is stored).
768 * You can call this for folios which aren't in the swap cache or page
769 * cache and it will return NULL.
771 struct address_space
*folio_mapping(struct folio
*folio
)
773 struct address_space
*mapping
;
775 /* This happens if someone calls flush_dcache_page on slab page */
776 if (unlikely(folio_test_slab(folio
)))
779 if (unlikely(folio_test_swapcache(folio
)))
780 return swap_address_space(folio
->swap
);
782 mapping
= folio
->mapping
;
783 if ((unsigned long)mapping
& PAGE_MAPPING_FLAGS
)
788 EXPORT_SYMBOL(folio_mapping
);
791 * folio_copy - Copy the contents of one folio to another.
792 * @dst: Folio to copy to.
793 * @src: Folio to copy from.
795 * The bytes in the folio represented by @src are copied to @dst.
796 * Assumes the caller has validated that @dst is at least as large as @src.
797 * Can be called in atomic context for order-0 folios, but if the folio is
798 * larger, it may sleep.
800 void folio_copy(struct folio
*dst
, struct folio
*src
)
803 long nr
= folio_nr_pages(src
);
806 copy_highpage(folio_page(dst
, i
), folio_page(src
, i
));
812 EXPORT_SYMBOL(folio_copy
);
814 int sysctl_overcommit_memory __read_mostly
= OVERCOMMIT_GUESS
;
815 int sysctl_overcommit_ratio __read_mostly
= 50;
816 unsigned long sysctl_overcommit_kbytes __read_mostly
;
817 int sysctl_max_map_count __read_mostly
= DEFAULT_MAX_MAP_COUNT
;
818 unsigned long sysctl_user_reserve_kbytes __read_mostly
= 1UL << 17; /* 128MB */
819 unsigned long sysctl_admin_reserve_kbytes __read_mostly
= 1UL << 13; /* 8MB */
821 int overcommit_ratio_handler(struct ctl_table
*table
, int write
, void *buffer
,
822 size_t *lenp
, loff_t
*ppos
)
826 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
827 if (ret
== 0 && write
)
828 sysctl_overcommit_kbytes
= 0;
832 static void sync_overcommit_as(struct work_struct
*dummy
)
834 percpu_counter_sync(&vm_committed_as
);
837 int overcommit_policy_handler(struct ctl_table
*table
, int write
, void *buffer
,
838 size_t *lenp
, loff_t
*ppos
)
845 * The deviation of sync_overcommit_as could be big with loose policy
846 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
847 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
848 * with the strict "NEVER", and to avoid possible race condition (even
849 * though user usually won't too frequently do the switching to policy
850 * OVERCOMMIT_NEVER), the switch is done in the following order:
851 * 1. changing the batch
852 * 2. sync percpu count on each CPU
853 * 3. switch the policy
857 t
.data
= &new_policy
;
858 ret
= proc_dointvec_minmax(&t
, write
, buffer
, lenp
, ppos
);
859 if (ret
|| new_policy
== -1)
862 mm_compute_batch(new_policy
);
863 if (new_policy
== OVERCOMMIT_NEVER
)
864 schedule_on_each_cpu(sync_overcommit_as
);
865 sysctl_overcommit_memory
= new_policy
;
867 ret
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
873 int overcommit_kbytes_handler(struct ctl_table
*table
, int write
, void *buffer
,
874 size_t *lenp
, loff_t
*ppos
)
878 ret
= proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
879 if (ret
== 0 && write
)
880 sysctl_overcommit_ratio
= 0;
885 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
887 unsigned long vm_commit_limit(void)
889 unsigned long allowed
;
891 if (sysctl_overcommit_kbytes
)
892 allowed
= sysctl_overcommit_kbytes
>> (PAGE_SHIFT
- 10);
894 allowed
= ((totalram_pages() - hugetlb_total_pages())
895 * sysctl_overcommit_ratio
/ 100);
896 allowed
+= total_swap_pages
;
902 * Make sure vm_committed_as in one cacheline and not cacheline shared with
903 * other variables. It can be updated by several CPUs frequently.
905 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp
;
908 * The global memory commitment made in the system can be a metric
909 * that can be used to drive ballooning decisions when Linux is hosted
910 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
911 * balancing memory across competing virtual machines that are hosted.
912 * Several metrics drive this policy engine including the guest reported
915 * The time cost of this is very low for small platforms, and for big
916 * platform like a 2S/36C/72T Skylake server, in worst case where
917 * vm_committed_as's spinlock is under severe contention, the time cost
918 * could be about 30~40 microseconds.
920 unsigned long vm_memory_committed(void)
922 return percpu_counter_sum_positive(&vm_committed_as
);
924 EXPORT_SYMBOL_GPL(vm_memory_committed
);
927 * Check that a process has enough memory to allocate a new virtual
928 * mapping. 0 means there is enough memory for the allocation to
929 * succeed and -ENOMEM implies there is not.
931 * We currently support three overcommit policies, which are set via the
932 * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst
934 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
935 * Additional code 2002 Jul 20 by Robert Love.
937 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
939 * Note this is a helper function intended to be used by LSMs which
940 * wish to use this logic.
942 int __vm_enough_memory(struct mm_struct
*mm
, long pages
, int cap_sys_admin
)
946 vm_acct_memory(pages
);
949 * Sometimes we want to use more memory than we have
951 if (sysctl_overcommit_memory
== OVERCOMMIT_ALWAYS
)
954 if (sysctl_overcommit_memory
== OVERCOMMIT_GUESS
) {
955 if (pages
> totalram_pages() + total_swap_pages
)
960 allowed
= vm_commit_limit();
962 * Reserve some for root
965 allowed
-= sysctl_admin_reserve_kbytes
>> (PAGE_SHIFT
- 10);
968 * Don't let a single process grow so big a user can't recover
971 long reserve
= sysctl_user_reserve_kbytes
>> (PAGE_SHIFT
- 10);
973 allowed
-= min_t(long, mm
->total_vm
/ 32, reserve
);
976 if (percpu_counter_read_positive(&vm_committed_as
) < allowed
)
979 pr_warn_ratelimited("%s: pid: %d, comm: %s, not enough memory for the allocation\n",
980 __func__
, current
->pid
, current
->comm
);
981 vm_unacct_memory(pages
);
987 * get_cmdline() - copy the cmdline value to a buffer.
988 * @task: the task whose cmdline value to copy.
989 * @buffer: the buffer to copy to.
990 * @buflen: the length of the buffer. Larger cmdline values are truncated
993 * Return: the size of the cmdline field copied. Note that the copy does
994 * not guarantee an ending NULL byte.
996 int get_cmdline(struct task_struct
*task
, char *buffer
, int buflen
)
1000 struct mm_struct
*mm
= get_task_mm(task
);
1001 unsigned long arg_start
, arg_end
, env_start
, env_end
;
1005 goto out_mm
; /* Shh! No looking before we're done */
1007 spin_lock(&mm
->arg_lock
);
1008 arg_start
= mm
->arg_start
;
1009 arg_end
= mm
->arg_end
;
1010 env_start
= mm
->env_start
;
1011 env_end
= mm
->env_end
;
1012 spin_unlock(&mm
->arg_lock
);
1014 len
= arg_end
- arg_start
;
1019 res
= access_process_vm(task
, arg_start
, buffer
, len
, FOLL_FORCE
);
1022 * If the nul at the end of args has been overwritten, then
1023 * assume application is using setproctitle(3).
1025 if (res
> 0 && buffer
[res
-1] != '\0' && len
< buflen
) {
1026 len
= strnlen(buffer
, res
);
1030 len
= env_end
- env_start
;
1031 if (len
> buflen
- res
)
1033 res
+= access_process_vm(task
, env_start
,
1036 res
= strnlen(buffer
, res
);
1045 int __weak
memcmp_pages(struct page
*page1
, struct page
*page2
)
1047 char *addr1
, *addr2
;
1050 addr1
= kmap_local_page(page1
);
1051 addr2
= kmap_local_page(page2
);
1052 ret
= memcmp(addr1
, addr2
, PAGE_SIZE
);
1053 kunmap_local(addr2
);
1054 kunmap_local(addr1
);
1058 #ifdef CONFIG_PRINTK
1060 * mem_dump_obj - Print available provenance information
1061 * @object: object for which to find provenance information.
1063 * This function uses pr_cont(), so that the caller is expected to have
1064 * printed out whatever preamble is appropriate. The provenance information
1065 * depends on the type of object and on how much debugging is enabled.
1066 * For example, for a slab-cache object, the slab name is printed, and,
1067 * if available, the return address and stack trace from the allocation
1068 * and last free path of that object.
1070 void mem_dump_obj(void *object
)
1074 if (kmem_dump_obj(object
))
1077 if (vmalloc_dump_obj(object
))
1080 if (is_vmalloc_addr(object
))
1081 type
= "vmalloc memory";
1082 else if (virt_addr_valid(object
))
1083 type
= "non-slab/vmalloc memory";
1084 else if (object
== NULL
)
1085 type
= "NULL pointer";
1086 else if (object
== ZERO_SIZE_PTR
)
1087 type
= "zero-size pointer";
1089 type
= "non-paged memory";
1091 pr_cont(" %s\n", type
);
1093 EXPORT_SYMBOL_GPL(mem_dump_obj
);
1097 * A driver might set a page logically offline -- PageOffline() -- and
1098 * turn the page inaccessible in the hypervisor; after that, access to page
1099 * content can be fatal.
1101 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1102 * pages after checking PageOffline(); however, these PFN walkers can race
1103 * with drivers that set PageOffline().
1105 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1106 * synchronize with such drivers, achieving that a page cannot be set
1107 * PageOffline() while frozen.
1109 * page_offline_begin()/page_offline_end() is used by drivers that care about
1110 * such races when setting a page PageOffline().
1112 static DECLARE_RWSEM(page_offline_rwsem
);
1114 void page_offline_freeze(void)
1116 down_read(&page_offline_rwsem
);
1119 void page_offline_thaw(void)
1121 up_read(&page_offline_rwsem
);
1124 void page_offline_begin(void)
1126 down_write(&page_offline_rwsem
);
1128 EXPORT_SYMBOL(page_offline_begin
);
1130 void page_offline_end(void)
1132 up_write(&page_offline_rwsem
);
1134 EXPORT_SYMBOL(page_offline_end
);
1136 #ifndef flush_dcache_folio
1137 void flush_dcache_folio(struct folio
*folio
)
1139 long i
, nr
= folio_nr_pages(folio
);
1141 for (i
= 0; i
< nr
; i
++)
1142 flush_dcache_page(folio_page(folio
, i
));
1144 EXPORT_SYMBOL(flush_dcache_folio
);