1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
27 #include <linux/uaccess.h>
33 * kfree_const - conditionally free memory
34 * @x: pointer to the memory
36 * Function calls kfree only if @x is not in .rodata section.
38 void kfree_const(const void *x
)
40 if (!is_kernel_rodata((unsigned long)x
))
43 EXPORT_SYMBOL(kfree_const
);
46 * kstrdup - allocate space for and copy an existing string
47 * @s: the string to duplicate
48 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
50 * Return: newly allocated copy of @s or %NULL in case of error
52 char *kstrdup(const char *s
, gfp_t gfp
)
61 buf
= kmalloc_track_caller(len
, gfp
);
66 EXPORT_SYMBOL(kstrdup
);
69 * kstrdup_const - conditionally duplicate an existing const string
70 * @s: the string to duplicate
71 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
73 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
74 * must not be passed to krealloc().
76 * Return: source string if it is in .rodata section otherwise
77 * fallback to kstrdup.
79 const char *kstrdup_const(const char *s
, gfp_t gfp
)
81 if (is_kernel_rodata((unsigned long)s
))
84 return kstrdup(s
, gfp
);
86 EXPORT_SYMBOL(kstrdup_const
);
89 * kstrndup - allocate space for and copy an existing string
90 * @s: the string to duplicate
91 * @max: read at most @max chars from @s
92 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
94 * Note: Use kmemdup_nul() instead if the size is known exactly.
96 * Return: newly allocated copy of @s or %NULL in case of error
98 char *kstrndup(const char *s
, size_t max
, gfp_t gfp
)
106 len
= strnlen(s
, max
);
107 buf
= kmalloc_track_caller(len
+1, gfp
);
114 EXPORT_SYMBOL(kstrndup
);
117 * kmemdup - duplicate region of memory
119 * @src: memory region to duplicate
120 * @len: memory region length
121 * @gfp: GFP mask to use
123 * Return: newly allocated copy of @src or %NULL in case of error
125 void *kmemdup(const void *src
, size_t len
, gfp_t gfp
)
129 p
= kmalloc_track_caller(len
, gfp
);
134 EXPORT_SYMBOL(kmemdup
);
137 * kmemdup_nul - Create a NUL-terminated string from unterminated data
138 * @s: The data to stringify
139 * @len: The size of the data
140 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
142 * Return: newly allocated copy of @s with NUL-termination or %NULL in
145 char *kmemdup_nul(const char *s
, size_t len
, gfp_t gfp
)
152 buf
= kmalloc_track_caller(len
+ 1, gfp
);
159 EXPORT_SYMBOL(kmemdup_nul
);
162 * memdup_user - duplicate memory region from user space
164 * @src: source address in user space
165 * @len: number of bytes to copy
167 * Return: an ERR_PTR() on failure. Result is physically
168 * contiguous, to be freed by kfree().
170 void *memdup_user(const void __user
*src
, size_t len
)
174 p
= kmalloc_track_caller(len
, GFP_USER
| __GFP_NOWARN
);
176 return ERR_PTR(-ENOMEM
);
178 if (copy_from_user(p
, src
, len
)) {
180 return ERR_PTR(-EFAULT
);
185 EXPORT_SYMBOL(memdup_user
);
188 * vmemdup_user - duplicate memory region from user space
190 * @src: source address in user space
191 * @len: number of bytes to copy
193 * Return: an ERR_PTR() on failure. Result may be not
194 * physically contiguous. Use kvfree() to free.
196 void *vmemdup_user(const void __user
*src
, size_t len
)
200 p
= kvmalloc(len
, GFP_USER
);
202 return ERR_PTR(-ENOMEM
);
204 if (copy_from_user(p
, src
, len
)) {
206 return ERR_PTR(-EFAULT
);
211 EXPORT_SYMBOL(vmemdup_user
);
214 * strndup_user - duplicate an existing string from user space
215 * @s: The string to duplicate
216 * @n: Maximum number of bytes to copy, including the trailing NUL.
218 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
220 char *strndup_user(const char __user
*s
, long n
)
225 length
= strnlen_user(s
, n
);
228 return ERR_PTR(-EFAULT
);
231 return ERR_PTR(-EINVAL
);
233 p
= memdup_user(s
, length
);
238 p
[length
- 1] = '\0';
242 EXPORT_SYMBOL(strndup_user
);
245 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
247 * @src: source address in user space
248 * @len: number of bytes to copy
250 * Return: an ERR_PTR() on failure.
252 void *memdup_user_nul(const void __user
*src
, size_t len
)
257 * Always use GFP_KERNEL, since copy_from_user() can sleep and
258 * cause pagefault, which makes it pointless to use GFP_NOFS
261 p
= kmalloc_track_caller(len
+ 1, GFP_KERNEL
);
263 return ERR_PTR(-ENOMEM
);
265 if (copy_from_user(p
, src
, len
)) {
267 return ERR_PTR(-EFAULT
);
273 EXPORT_SYMBOL(memdup_user_nul
);
275 void __vma_link_list(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
276 struct vm_area_struct
*prev
)
278 struct vm_area_struct
*next
;
282 next
= prev
->vm_next
;
293 void __vma_unlink_list(struct mm_struct
*mm
, struct vm_area_struct
*vma
)
295 struct vm_area_struct
*prev
, *next
;
300 prev
->vm_next
= next
;
304 next
->vm_prev
= prev
;
307 /* Check if the vma is being used as a stack by this task */
308 int vma_is_stack_for_current(struct vm_area_struct
*vma
)
310 struct task_struct
* __maybe_unused t
= current
;
312 return (vma
->vm_start
<= KSTK_ESP(t
) && vma
->vm_end
>= KSTK_ESP(t
));
316 * Change backing file, only valid to use during initial VMA setup.
318 void vma_set_file(struct vm_area_struct
*vma
, struct file
*file
)
320 /* Changing an anonymous vma with this is illegal */
322 swap(vma
->vm_file
, file
);
325 EXPORT_SYMBOL(vma_set_file
);
327 #ifndef STACK_RND_MASK
328 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
331 unsigned long randomize_stack_top(unsigned long stack_top
)
333 unsigned long random_variable
= 0;
335 if (current
->flags
& PF_RANDOMIZE
) {
336 random_variable
= get_random_long();
337 random_variable
&= STACK_RND_MASK
;
338 random_variable
<<= PAGE_SHIFT
;
340 #ifdef CONFIG_STACK_GROWSUP
341 return PAGE_ALIGN(stack_top
) + random_variable
;
343 return PAGE_ALIGN(stack_top
) - random_variable
;
348 * randomize_page - Generate a random, page aligned address
349 * @start: The smallest acceptable address the caller will take.
350 * @range: The size of the area, starting at @start, within which the
351 * random address must fall.
353 * If @start + @range would overflow, @range is capped.
355 * NOTE: Historical use of randomize_range, which this replaces, presumed that
356 * @start was already page aligned. We now align it regardless.
358 * Return: A page aligned address within [start, start + range). On error,
359 * @start is returned.
361 unsigned long randomize_page(unsigned long start
, unsigned long range
)
363 if (!PAGE_ALIGNED(start
)) {
364 range
-= PAGE_ALIGN(start
) - start
;
365 start
= PAGE_ALIGN(start
);
368 if (start
> ULONG_MAX
- range
)
369 range
= ULONG_MAX
- start
;
371 range
>>= PAGE_SHIFT
;
376 return start
+ (get_random_long() % range
<< PAGE_SHIFT
);
379 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
380 unsigned long __weak
arch_randomize_brk(struct mm_struct
*mm
)
382 /* Is the current task 32bit ? */
383 if (!IS_ENABLED(CONFIG_64BIT
) || is_compat_task())
384 return randomize_page(mm
->brk
, SZ_32M
);
386 return randomize_page(mm
->brk
, SZ_1G
);
389 unsigned long arch_mmap_rnd(void)
393 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
394 if (is_compat_task())
395 rnd
= get_random_long() & ((1UL << mmap_rnd_compat_bits
) - 1);
397 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
398 rnd
= get_random_long() & ((1UL << mmap_rnd_bits
) - 1);
400 return rnd
<< PAGE_SHIFT
;
403 static int mmap_is_legacy(struct rlimit
*rlim_stack
)
405 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
408 if (rlim_stack
->rlim_cur
== RLIM_INFINITY
)
411 return sysctl_legacy_va_layout
;
415 * Leave enough space between the mmap area and the stack to honour ulimit in
416 * the face of randomisation.
418 #define MIN_GAP (SZ_128M)
419 #define MAX_GAP (STACK_TOP / 6 * 5)
421 static unsigned long mmap_base(unsigned long rnd
, struct rlimit
*rlim_stack
)
423 unsigned long gap
= rlim_stack
->rlim_cur
;
424 unsigned long pad
= stack_guard_gap
;
426 /* Account for stack randomization if necessary */
427 if (current
->flags
& PF_RANDOMIZE
)
428 pad
+= (STACK_RND_MASK
<< PAGE_SHIFT
);
430 /* Values close to RLIM_INFINITY can overflow. */
436 else if (gap
> MAX_GAP
)
439 return PAGE_ALIGN(STACK_TOP
- gap
- rnd
);
442 void arch_pick_mmap_layout(struct mm_struct
*mm
, struct rlimit
*rlim_stack
)
444 unsigned long random_factor
= 0UL;
446 if (current
->flags
& PF_RANDOMIZE
)
447 random_factor
= arch_mmap_rnd();
449 if (mmap_is_legacy(rlim_stack
)) {
450 mm
->mmap_base
= TASK_UNMAPPED_BASE
+ random_factor
;
451 mm
->get_unmapped_area
= arch_get_unmapped_area
;
453 mm
->mmap_base
= mmap_base(random_factor
, rlim_stack
);
454 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
457 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
458 void arch_pick_mmap_layout(struct mm_struct
*mm
, struct rlimit
*rlim_stack
)
460 mm
->mmap_base
= TASK_UNMAPPED_BASE
;
461 mm
->get_unmapped_area
= arch_get_unmapped_area
;
466 * __account_locked_vm - account locked pages to an mm's locked_vm
467 * @mm: mm to account against
468 * @pages: number of pages to account
469 * @inc: %true if @pages should be considered positive, %false if not
470 * @task: task used to check RLIMIT_MEMLOCK
471 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
473 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
474 * that mmap_lock is held as writer.
478 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
480 int __account_locked_vm(struct mm_struct
*mm
, unsigned long pages
, bool inc
,
481 struct task_struct
*task
, bool bypass_rlim
)
483 unsigned long locked_vm
, limit
;
486 mmap_assert_write_locked(mm
);
488 locked_vm
= mm
->locked_vm
;
491 limit
= task_rlimit(task
, RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
492 if (locked_vm
+ pages
> limit
)
496 mm
->locked_vm
= locked_vm
+ pages
;
498 WARN_ON_ONCE(pages
> locked_vm
);
499 mm
->locked_vm
= locked_vm
- pages
;
502 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__
, task
->pid
,
503 (void *)_RET_IP_
, (inc
) ? '+' : '-', pages
<< PAGE_SHIFT
,
504 locked_vm
<< PAGE_SHIFT
, task_rlimit(task
, RLIMIT_MEMLOCK
),
505 ret
? " - exceeded" : "");
509 EXPORT_SYMBOL_GPL(__account_locked_vm
);
512 * account_locked_vm - account locked pages to an mm's locked_vm
513 * @mm: mm to account against, may be NULL
514 * @pages: number of pages to account
515 * @inc: %true if @pages should be considered positive, %false if not
517 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
520 * * 0 on success, or if mm is NULL
521 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
523 int account_locked_vm(struct mm_struct
*mm
, unsigned long pages
, bool inc
)
527 if (pages
== 0 || !mm
)
531 ret
= __account_locked_vm(mm
, pages
, inc
, current
,
532 capable(CAP_IPC_LOCK
));
533 mmap_write_unlock(mm
);
537 EXPORT_SYMBOL_GPL(account_locked_vm
);
539 unsigned long vm_mmap_pgoff(struct file
*file
, unsigned long addr
,
540 unsigned long len
, unsigned long prot
,
541 unsigned long flag
, unsigned long pgoff
)
544 struct mm_struct
*mm
= current
->mm
;
545 unsigned long populate
;
548 ret
= security_mmap_file(file
, prot
, flag
);
550 if (mmap_write_lock_killable(mm
))
552 ret
= do_mmap(file
, addr
, len
, prot
, flag
, pgoff
, &populate
,
554 mmap_write_unlock(mm
);
555 userfaultfd_unmap_complete(mm
, &uf
);
557 mm_populate(ret
, populate
);
562 unsigned long vm_mmap(struct file
*file
, unsigned long addr
,
563 unsigned long len
, unsigned long prot
,
564 unsigned long flag
, unsigned long offset
)
566 if (unlikely(offset
+ PAGE_ALIGN(len
) < offset
))
568 if (unlikely(offset_in_page(offset
)))
571 return vm_mmap_pgoff(file
, addr
, len
, prot
, flag
, offset
>> PAGE_SHIFT
);
573 EXPORT_SYMBOL(vm_mmap
);
576 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
577 * failure, fall back to non-contiguous (vmalloc) allocation.
578 * @size: size of the request.
579 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
580 * @node: numa node to allocate from
582 * Uses kmalloc to get the memory but if the allocation fails then falls back
583 * to the vmalloc allocator. Use kvfree for freeing the memory.
585 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
586 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
587 * preferable to the vmalloc fallback, due to visible performance drawbacks.
589 * Return: pointer to the allocated memory of %NULL in case of failure
591 void *kvmalloc_node(size_t size
, gfp_t flags
, int node
)
593 gfp_t kmalloc_flags
= flags
;
597 * We want to attempt a large physically contiguous block first because
598 * it is less likely to fragment multiple larger blocks and therefore
599 * contribute to a long term fragmentation less than vmalloc fallback.
600 * However make sure that larger requests are not too disruptive - no
601 * OOM killer and no allocation failure warnings as we have a fallback.
603 if (size
> PAGE_SIZE
) {
604 kmalloc_flags
|= __GFP_NOWARN
;
606 if (!(kmalloc_flags
& __GFP_RETRY_MAYFAIL
))
607 kmalloc_flags
|= __GFP_NORETRY
;
609 /* nofail semantic is implemented by the vmalloc fallback */
610 kmalloc_flags
&= ~__GFP_NOFAIL
;
613 ret
= kmalloc_node(size
, kmalloc_flags
, node
);
616 * It doesn't really make sense to fallback to vmalloc for sub page
619 if (ret
|| size
<= PAGE_SIZE
)
622 /* Don't even allow crazy sizes */
623 if (unlikely(size
> INT_MAX
)) {
624 WARN_ON_ONCE(!(flags
& __GFP_NOWARN
));
629 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
630 * since the callers already cannot assume anything
631 * about the resulting pointer, and cannot play
634 return __vmalloc_node_range(size
, 1, VMALLOC_START
, VMALLOC_END
,
635 flags
, PAGE_KERNEL
, VM_ALLOW_HUGE_VMAP
,
636 node
, __builtin_return_address(0));
638 EXPORT_SYMBOL(kvmalloc_node
);
641 * kvfree() - Free memory.
642 * @addr: Pointer to allocated memory.
644 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
645 * It is slightly more efficient to use kfree() or vfree() if you are certain
646 * that you know which one to use.
648 * Context: Either preemptible task context or not-NMI interrupt.
650 void kvfree(const void *addr
)
652 if (is_vmalloc_addr(addr
))
657 EXPORT_SYMBOL(kvfree
);
660 * kvfree_sensitive - Free a data object containing sensitive information.
661 * @addr: address of the data object to be freed.
662 * @len: length of the data object.
664 * Use the special memzero_explicit() function to clear the content of a
665 * kvmalloc'ed object containing sensitive data to make sure that the
666 * compiler won't optimize out the data clearing.
668 void kvfree_sensitive(const void *addr
, size_t len
)
670 if (likely(!ZERO_OR_NULL_PTR(addr
))) {
671 memzero_explicit((void *)addr
, len
);
675 EXPORT_SYMBOL(kvfree_sensitive
);
677 void *kvrealloc(const void *p
, size_t oldsize
, size_t newsize
, gfp_t flags
)
681 if (oldsize
>= newsize
)
683 newp
= kvmalloc(newsize
, flags
);
686 memcpy(newp
, p
, oldsize
);
690 EXPORT_SYMBOL(kvrealloc
);
693 * __vmalloc_array - allocate memory for a virtually contiguous array.
694 * @n: number of elements.
695 * @size: element size.
696 * @flags: the type of memory to allocate (see kmalloc).
698 void *__vmalloc_array(size_t n
, size_t size
, gfp_t flags
)
702 if (unlikely(check_mul_overflow(n
, size
, &bytes
)))
704 return __vmalloc(bytes
, flags
);
706 EXPORT_SYMBOL(__vmalloc_array
);
709 * vmalloc_array - allocate memory for a virtually contiguous array.
710 * @n: number of elements.
711 * @size: element size.
713 void *vmalloc_array(size_t n
, size_t size
)
715 return __vmalloc_array(n
, size
, GFP_KERNEL
);
717 EXPORT_SYMBOL(vmalloc_array
);
720 * __vcalloc - allocate and zero memory for a virtually contiguous array.
721 * @n: number of elements.
722 * @size: element size.
723 * @flags: the type of memory to allocate (see kmalloc).
725 void *__vcalloc(size_t n
, size_t size
, gfp_t flags
)
727 return __vmalloc_array(n
, size
, flags
| __GFP_ZERO
);
729 EXPORT_SYMBOL(__vcalloc
);
732 * vcalloc - allocate and zero memory for a virtually contiguous array.
733 * @n: number of elements.
734 * @size: element size.
736 void *vcalloc(size_t n
, size_t size
)
738 return __vmalloc_array(n
, size
, GFP_KERNEL
| __GFP_ZERO
);
740 EXPORT_SYMBOL(vcalloc
);
742 /* Neutral page->mapping pointer to address_space or anon_vma or other */
743 void *page_rmapping(struct page
*page
)
745 return folio_raw_mapping(page_folio(page
));
749 * folio_mapped - Is this folio mapped into userspace?
752 * Return: True if any page in this folio is referenced by user page tables.
754 bool folio_mapped(struct folio
*folio
)
758 if (!folio_test_large(folio
))
759 return atomic_read(&folio
->_mapcount
) >= 0;
760 if (atomic_read(folio_mapcount_ptr(folio
)) >= 0)
762 if (folio_test_hugetlb(folio
))
765 nr
= folio_nr_pages(folio
);
766 for (i
= 0; i
< nr
; i
++) {
767 if (atomic_read(&folio_page(folio
, i
)->_mapcount
) >= 0)
772 EXPORT_SYMBOL(folio_mapped
);
774 struct anon_vma
*folio_anon_vma(struct folio
*folio
)
776 unsigned long mapping
= (unsigned long)folio
->mapping
;
778 if ((mapping
& PAGE_MAPPING_FLAGS
) != PAGE_MAPPING_ANON
)
780 return (void *)(mapping
- PAGE_MAPPING_ANON
);
784 * folio_mapping - Find the mapping where this folio is stored.
787 * For folios which are in the page cache, return the mapping that this
788 * page belongs to. Folios in the swap cache return the swap mapping
789 * this page is stored in (which is different from the mapping for the
790 * swap file or swap device where the data is stored).
792 * You can call this for folios which aren't in the swap cache or page
793 * cache and it will return NULL.
795 struct address_space
*folio_mapping(struct folio
*folio
)
797 struct address_space
*mapping
;
799 /* This happens if someone calls flush_dcache_page on slab page */
800 if (unlikely(folio_test_slab(folio
)))
803 if (unlikely(folio_test_swapcache(folio
)))
804 return swap_address_space(folio_swap_entry(folio
));
806 mapping
= folio
->mapping
;
807 if ((unsigned long)mapping
& PAGE_MAPPING_ANON
)
810 return (void *)((unsigned long)mapping
& ~PAGE_MAPPING_FLAGS
);
812 EXPORT_SYMBOL(folio_mapping
);
814 /* Slow path of page_mapcount() for compound pages */
815 int __page_mapcount(struct page
*page
)
819 ret
= atomic_read(&page
->_mapcount
) + 1;
821 * For file THP page->_mapcount contains total number of mapping
822 * of the page: no need to look into compound_mapcount.
824 if (!PageAnon(page
) && !PageHuge(page
))
826 page
= compound_head(page
);
827 ret
+= atomic_read(compound_mapcount_ptr(page
)) + 1;
828 if (PageDoubleMap(page
))
832 EXPORT_SYMBOL_GPL(__page_mapcount
);
835 * folio_mapcount() - Calculate the number of mappings of this folio.
838 * A large folio tracks both how many times the entire folio is mapped,
839 * and how many times each individual page in the folio is mapped.
840 * This function calculates the total number of times the folio is
843 * Return: The number of times this folio is mapped.
845 int folio_mapcount(struct folio
*folio
)
847 int i
, compound
, nr
, ret
;
849 if (likely(!folio_test_large(folio
)))
850 return atomic_read(&folio
->_mapcount
) + 1;
852 compound
= folio_entire_mapcount(folio
);
853 nr
= folio_nr_pages(folio
);
854 if (folio_test_hugetlb(folio
))
857 for (i
= 0; i
< nr
; i
++)
858 ret
+= atomic_read(&folio_page(folio
, i
)->_mapcount
) + 1;
859 /* File pages has compound_mapcount included in _mapcount */
860 if (!folio_test_anon(folio
))
861 return ret
- compound
* nr
;
862 if (folio_test_double_map(folio
))
868 * folio_copy - Copy the contents of one folio to another.
869 * @dst: Folio to copy to.
870 * @src: Folio to copy from.
872 * The bytes in the folio represented by @src are copied to @dst.
873 * Assumes the caller has validated that @dst is at least as large as @src.
874 * Can be called in atomic context for order-0 folios, but if the folio is
875 * larger, it may sleep.
877 void folio_copy(struct folio
*dst
, struct folio
*src
)
880 long nr
= folio_nr_pages(src
);
883 copy_highpage(folio_page(dst
, i
), folio_page(src
, i
));
890 int sysctl_overcommit_memory __read_mostly
= OVERCOMMIT_GUESS
;
891 int sysctl_overcommit_ratio __read_mostly
= 50;
892 unsigned long sysctl_overcommit_kbytes __read_mostly
;
893 int sysctl_max_map_count __read_mostly
= DEFAULT_MAX_MAP_COUNT
;
894 unsigned long sysctl_user_reserve_kbytes __read_mostly
= 1UL << 17; /* 128MB */
895 unsigned long sysctl_admin_reserve_kbytes __read_mostly
= 1UL << 13; /* 8MB */
897 int overcommit_ratio_handler(struct ctl_table
*table
, int write
, void *buffer
,
898 size_t *lenp
, loff_t
*ppos
)
902 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
903 if (ret
== 0 && write
)
904 sysctl_overcommit_kbytes
= 0;
908 static void sync_overcommit_as(struct work_struct
*dummy
)
910 percpu_counter_sync(&vm_committed_as
);
913 int overcommit_policy_handler(struct ctl_table
*table
, int write
, void *buffer
,
914 size_t *lenp
, loff_t
*ppos
)
921 * The deviation of sync_overcommit_as could be big with loose policy
922 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
923 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
924 * with the strict "NEVER", and to avoid possible race condition (even
925 * though user usually won't too frequently do the switching to policy
926 * OVERCOMMIT_NEVER), the switch is done in the following order:
927 * 1. changing the batch
928 * 2. sync percpu count on each CPU
929 * 3. switch the policy
933 t
.data
= &new_policy
;
934 ret
= proc_dointvec_minmax(&t
, write
, buffer
, lenp
, ppos
);
935 if (ret
|| new_policy
== -1)
938 mm_compute_batch(new_policy
);
939 if (new_policy
== OVERCOMMIT_NEVER
)
940 schedule_on_each_cpu(sync_overcommit_as
);
941 sysctl_overcommit_memory
= new_policy
;
943 ret
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
949 int overcommit_kbytes_handler(struct ctl_table
*table
, int write
, void *buffer
,
950 size_t *lenp
, loff_t
*ppos
)
954 ret
= proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
955 if (ret
== 0 && write
)
956 sysctl_overcommit_ratio
= 0;
961 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
963 unsigned long vm_commit_limit(void)
965 unsigned long allowed
;
967 if (sysctl_overcommit_kbytes
)
968 allowed
= sysctl_overcommit_kbytes
>> (PAGE_SHIFT
- 10);
970 allowed
= ((totalram_pages() - hugetlb_total_pages())
971 * sysctl_overcommit_ratio
/ 100);
972 allowed
+= total_swap_pages
;
978 * Make sure vm_committed_as in one cacheline and not cacheline shared with
979 * other variables. It can be updated by several CPUs frequently.
981 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp
;
984 * The global memory commitment made in the system can be a metric
985 * that can be used to drive ballooning decisions when Linux is hosted
986 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
987 * balancing memory across competing virtual machines that are hosted.
988 * Several metrics drive this policy engine including the guest reported
991 * The time cost of this is very low for small platforms, and for big
992 * platform like a 2S/36C/72T Skylake server, in worst case where
993 * vm_committed_as's spinlock is under severe contention, the time cost
994 * could be about 30~40 microseconds.
996 unsigned long vm_memory_committed(void)
998 return percpu_counter_sum_positive(&vm_committed_as
);
1000 EXPORT_SYMBOL_GPL(vm_memory_committed
);
1003 * Check that a process has enough memory to allocate a new virtual
1004 * mapping. 0 means there is enough memory for the allocation to
1005 * succeed and -ENOMEM implies there is not.
1007 * We currently support three overcommit policies, which are set via the
1008 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
1010 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1011 * Additional code 2002 Jul 20 by Robert Love.
1013 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1015 * Note this is a helper function intended to be used by LSMs which
1016 * wish to use this logic.
1018 int __vm_enough_memory(struct mm_struct
*mm
, long pages
, int cap_sys_admin
)
1022 vm_acct_memory(pages
);
1025 * Sometimes we want to use more memory than we have
1027 if (sysctl_overcommit_memory
== OVERCOMMIT_ALWAYS
)
1030 if (sysctl_overcommit_memory
== OVERCOMMIT_GUESS
) {
1031 if (pages
> totalram_pages() + total_swap_pages
)
1036 allowed
= vm_commit_limit();
1038 * Reserve some for root
1041 allowed
-= sysctl_admin_reserve_kbytes
>> (PAGE_SHIFT
- 10);
1044 * Don't let a single process grow so big a user can't recover
1047 long reserve
= sysctl_user_reserve_kbytes
>> (PAGE_SHIFT
- 10);
1049 allowed
-= min_t(long, mm
->total_vm
/ 32, reserve
);
1052 if (percpu_counter_read_positive(&vm_committed_as
) < allowed
)
1055 vm_unacct_memory(pages
);
1061 * get_cmdline() - copy the cmdline value to a buffer.
1062 * @task: the task whose cmdline value to copy.
1063 * @buffer: the buffer to copy to.
1064 * @buflen: the length of the buffer. Larger cmdline values are truncated
1067 * Return: the size of the cmdline field copied. Note that the copy does
1068 * not guarantee an ending NULL byte.
1070 int get_cmdline(struct task_struct
*task
, char *buffer
, int buflen
)
1074 struct mm_struct
*mm
= get_task_mm(task
);
1075 unsigned long arg_start
, arg_end
, env_start
, env_end
;
1079 goto out_mm
; /* Shh! No looking before we're done */
1081 spin_lock(&mm
->arg_lock
);
1082 arg_start
= mm
->arg_start
;
1083 arg_end
= mm
->arg_end
;
1084 env_start
= mm
->env_start
;
1085 env_end
= mm
->env_end
;
1086 spin_unlock(&mm
->arg_lock
);
1088 len
= arg_end
- arg_start
;
1093 res
= access_process_vm(task
, arg_start
, buffer
, len
, FOLL_FORCE
);
1096 * If the nul at the end of args has been overwritten, then
1097 * assume application is using setproctitle(3).
1099 if (res
> 0 && buffer
[res
-1] != '\0' && len
< buflen
) {
1100 len
= strnlen(buffer
, res
);
1104 len
= env_end
- env_start
;
1105 if (len
> buflen
- res
)
1107 res
+= access_process_vm(task
, env_start
,
1110 res
= strnlen(buffer
, res
);
1119 int __weak
memcmp_pages(struct page
*page1
, struct page
*page2
)
1121 char *addr1
, *addr2
;
1124 addr1
= kmap_atomic(page1
);
1125 addr2
= kmap_atomic(page2
);
1126 ret
= memcmp(addr1
, addr2
, PAGE_SIZE
);
1127 kunmap_atomic(addr2
);
1128 kunmap_atomic(addr1
);
1132 #ifdef CONFIG_PRINTK
1134 * mem_dump_obj - Print available provenance information
1135 * @object: object for which to find provenance information.
1137 * This function uses pr_cont(), so that the caller is expected to have
1138 * printed out whatever preamble is appropriate. The provenance information
1139 * depends on the type of object and on how much debugging is enabled.
1140 * For example, for a slab-cache object, the slab name is printed, and,
1141 * if available, the return address and stack trace from the allocation
1142 * and last free path of that object.
1144 void mem_dump_obj(void *object
)
1148 if (kmem_valid_obj(object
)) {
1149 kmem_dump_obj(object
);
1153 if (vmalloc_dump_obj(object
))
1156 if (virt_addr_valid(object
))
1157 type
= "non-slab/vmalloc memory";
1158 else if (object
== NULL
)
1159 type
= "NULL pointer";
1160 else if (object
== ZERO_SIZE_PTR
)
1161 type
= "zero-size pointer";
1163 type
= "non-paged memory";
1165 pr_cont(" %s\n", type
);
1167 EXPORT_SYMBOL_GPL(mem_dump_obj
);
1171 * A driver might set a page logically offline -- PageOffline() -- and
1172 * turn the page inaccessible in the hypervisor; after that, access to page
1173 * content can be fatal.
1175 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1176 * pages after checking PageOffline(); however, these PFN walkers can race
1177 * with drivers that set PageOffline().
1179 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1180 * synchronize with such drivers, achieving that a page cannot be set
1181 * PageOffline() while frozen.
1183 * page_offline_begin()/page_offline_end() is used by drivers that care about
1184 * such races when setting a page PageOffline().
1186 static DECLARE_RWSEM(page_offline_rwsem
);
1188 void page_offline_freeze(void)
1190 down_read(&page_offline_rwsem
);
1193 void page_offline_thaw(void)
1195 up_read(&page_offline_rwsem
);
1198 void page_offline_begin(void)
1200 down_write(&page_offline_rwsem
);
1202 EXPORT_SYMBOL(page_offline_begin
);
1204 void page_offline_end(void)
1206 up_write(&page_offline_rwsem
);
1208 EXPORT_SYMBOL(page_offline_end
);
1210 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
1211 void flush_dcache_folio(struct folio
*folio
)
1213 long i
, nr
= folio_nr_pages(folio
);
1215 for (i
= 0; i
< nr
; i
++)
1216 flush_dcache_page(folio_page(folio
, i
));
1218 EXPORT_SYMBOL(flush_dcache_folio
);