1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1993 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7 * Numa awareness, Christoph Lameter, SGI, June 2005
8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
11 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/memcontrol.h>
35 #include <linux/llist.h>
36 #include <linux/uio.h>
37 #include <linux/bitops.h>
38 #include <linux/rbtree_augmented.h>
39 #include <linux/overflow.h>
40 #include <linux/pgtable.h>
41 #include <linux/hugetlb.h>
42 #include <linux/sched/mm.h>
43 #include <asm/tlbflush.h>
44 #include <asm/shmparam.h>
45 #include <linux/page_owner.h>
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/vmalloc.h>
51 #include "pgalloc-track.h"
53 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
54 static unsigned int __ro_after_init ioremap_max_page_shift
= BITS_PER_LONG
- 1;
56 static int __init
set_nohugeiomap(char *str
)
58 ioremap_max_page_shift
= PAGE_SHIFT
;
61 early_param("nohugeiomap", set_nohugeiomap
);
62 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
63 static const unsigned int ioremap_max_page_shift
= PAGE_SHIFT
;
64 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
66 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
67 static bool __ro_after_init vmap_allow_huge
= true;
69 static int __init
set_nohugevmalloc(char *str
)
71 vmap_allow_huge
= false;
74 early_param("nohugevmalloc", set_nohugevmalloc
);
75 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
76 static const bool vmap_allow_huge
= false;
77 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
79 bool is_vmalloc_addr(const void *x
)
81 unsigned long addr
= (unsigned long)kasan_reset_tag(x
);
83 return addr
>= VMALLOC_START
&& addr
< VMALLOC_END
;
85 EXPORT_SYMBOL(is_vmalloc_addr
);
87 struct vfree_deferred
{
88 struct llist_head list
;
89 struct work_struct wq
;
91 static DEFINE_PER_CPU(struct vfree_deferred
, vfree_deferred
);
93 /*** Page table manipulation functions ***/
94 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
95 phys_addr_t phys_addr
, pgprot_t prot
,
96 unsigned int max_page_shift
, pgtbl_mod_mask
*mask
)
101 unsigned long size
= PAGE_SIZE
;
103 pfn
= phys_addr
>> PAGE_SHIFT
;
104 pte
= pte_alloc_kernel_track(pmd
, addr
, mask
);
108 arch_enter_lazy_mmu_mode();
111 if (unlikely(!pte_none(ptep_get(pte
)))) {
112 if (pfn_valid(pfn
)) {
113 page
= pfn_to_page(pfn
);
114 dump_page(page
, "remapping already mapped page");
119 #ifdef CONFIG_HUGETLB_PAGE
120 size
= arch_vmap_pte_range_map_size(addr
, end
, pfn
, max_page_shift
);
121 if (size
!= PAGE_SIZE
) {
122 pte_t entry
= pfn_pte(pfn
, prot
);
124 entry
= arch_make_huge_pte(entry
, ilog2(size
), 0);
125 set_huge_pte_at(&init_mm
, addr
, pte
, entry
, size
);
126 pfn
+= PFN_DOWN(size
);
130 set_pte_at(&init_mm
, addr
, pte
, pfn_pte(pfn
, prot
));
132 } while (pte
+= PFN_DOWN(size
), addr
+= size
, addr
!= end
);
134 arch_leave_lazy_mmu_mode();
135 *mask
|= PGTBL_PTE_MODIFIED
;
139 static int vmap_try_huge_pmd(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
140 phys_addr_t phys_addr
, pgprot_t prot
,
141 unsigned int max_page_shift
)
143 if (max_page_shift
< PMD_SHIFT
)
146 if (!arch_vmap_pmd_supported(prot
))
149 if ((end
- addr
) != PMD_SIZE
)
152 if (!IS_ALIGNED(addr
, PMD_SIZE
))
155 if (!IS_ALIGNED(phys_addr
, PMD_SIZE
))
158 if (pmd_present(*pmd
) && !pmd_free_pte_page(pmd
, addr
))
161 return pmd_set_huge(pmd
, phys_addr
, prot
);
164 static int vmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
,
165 phys_addr_t phys_addr
, pgprot_t prot
,
166 unsigned int max_page_shift
, pgtbl_mod_mask
*mask
)
171 pmd
= pmd_alloc_track(&init_mm
, pud
, addr
, mask
);
175 next
= pmd_addr_end(addr
, end
);
177 if (vmap_try_huge_pmd(pmd
, addr
, next
, phys_addr
, prot
,
179 *mask
|= PGTBL_PMD_MODIFIED
;
183 if (vmap_pte_range(pmd
, addr
, next
, phys_addr
, prot
, max_page_shift
, mask
))
185 } while (pmd
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
189 static int vmap_try_huge_pud(pud_t
*pud
, unsigned long addr
, unsigned long end
,
190 phys_addr_t phys_addr
, pgprot_t prot
,
191 unsigned int max_page_shift
)
193 if (max_page_shift
< PUD_SHIFT
)
196 if (!arch_vmap_pud_supported(prot
))
199 if ((end
- addr
) != PUD_SIZE
)
202 if (!IS_ALIGNED(addr
, PUD_SIZE
))
205 if (!IS_ALIGNED(phys_addr
, PUD_SIZE
))
208 if (pud_present(*pud
) && !pud_free_pmd_page(pud
, addr
))
211 return pud_set_huge(pud
, phys_addr
, prot
);
214 static int vmap_pud_range(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
215 phys_addr_t phys_addr
, pgprot_t prot
,
216 unsigned int max_page_shift
, pgtbl_mod_mask
*mask
)
221 pud
= pud_alloc_track(&init_mm
, p4d
, addr
, mask
);
225 next
= pud_addr_end(addr
, end
);
227 if (vmap_try_huge_pud(pud
, addr
, next
, phys_addr
, prot
,
229 *mask
|= PGTBL_PUD_MODIFIED
;
233 if (vmap_pmd_range(pud
, addr
, next
, phys_addr
, prot
,
234 max_page_shift
, mask
))
236 } while (pud
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
240 static int vmap_try_huge_p4d(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
241 phys_addr_t phys_addr
, pgprot_t prot
,
242 unsigned int max_page_shift
)
244 if (max_page_shift
< P4D_SHIFT
)
247 if (!arch_vmap_p4d_supported(prot
))
250 if ((end
- addr
) != P4D_SIZE
)
253 if (!IS_ALIGNED(addr
, P4D_SIZE
))
256 if (!IS_ALIGNED(phys_addr
, P4D_SIZE
))
259 if (p4d_present(*p4d
) && !p4d_free_pud_page(p4d
, addr
))
262 return p4d_set_huge(p4d
, phys_addr
, prot
);
265 static int vmap_p4d_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
266 phys_addr_t phys_addr
, pgprot_t prot
,
267 unsigned int max_page_shift
, pgtbl_mod_mask
*mask
)
272 p4d
= p4d_alloc_track(&init_mm
, pgd
, addr
, mask
);
276 next
= p4d_addr_end(addr
, end
);
278 if (vmap_try_huge_p4d(p4d
, addr
, next
, phys_addr
, prot
,
280 *mask
|= PGTBL_P4D_MODIFIED
;
284 if (vmap_pud_range(p4d
, addr
, next
, phys_addr
, prot
,
285 max_page_shift
, mask
))
287 } while (p4d
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
291 static int vmap_range_noflush(unsigned long addr
, unsigned long end
,
292 phys_addr_t phys_addr
, pgprot_t prot
,
293 unsigned int max_page_shift
)
299 pgtbl_mod_mask mask
= 0;
305 pgd
= pgd_offset_k(addr
);
307 next
= pgd_addr_end(addr
, end
);
308 err
= vmap_p4d_range(pgd
, addr
, next
, phys_addr
, prot
,
309 max_page_shift
, &mask
);
312 } while (pgd
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
314 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
315 arch_sync_kernel_mappings(start
, end
);
320 int vmap_page_range(unsigned long addr
, unsigned long end
,
321 phys_addr_t phys_addr
, pgprot_t prot
)
325 err
= vmap_range_noflush(addr
, end
, phys_addr
, pgprot_nx(prot
),
326 ioremap_max_page_shift
);
327 flush_cache_vmap(addr
, end
);
329 err
= kmsan_ioremap_page_range(addr
, end
, phys_addr
, prot
,
330 ioremap_max_page_shift
);
334 int ioremap_page_range(unsigned long addr
, unsigned long end
,
335 phys_addr_t phys_addr
, pgprot_t prot
)
337 struct vm_struct
*area
;
339 area
= find_vm_area((void *)addr
);
340 if (!area
|| !(area
->flags
& VM_IOREMAP
)) {
341 WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr
);
344 if (addr
!= (unsigned long)area
->addr
||
345 (void *)end
!= area
->addr
+ get_vm_area_size(area
)) {
346 WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
347 addr
, end
, (long)area
->addr
,
348 (long)area
->addr
+ get_vm_area_size(area
));
351 return vmap_page_range(addr
, end
, phys_addr
, prot
);
354 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
355 pgtbl_mod_mask
*mask
)
359 unsigned long size
= PAGE_SIZE
;
361 pte
= pte_offset_kernel(pmd
, addr
);
362 arch_enter_lazy_mmu_mode();
365 #ifdef CONFIG_HUGETLB_PAGE
366 size
= arch_vmap_pte_range_unmap_size(addr
, pte
);
367 if (size
!= PAGE_SIZE
) {
368 if (WARN_ON(!IS_ALIGNED(addr
, size
))) {
369 addr
= ALIGN_DOWN(addr
, size
);
370 pte
= PTR_ALIGN_DOWN(pte
, sizeof(*pte
) * (size
>> PAGE_SHIFT
));
372 ptent
= huge_ptep_get_and_clear(&init_mm
, addr
, pte
, size
);
373 if (WARN_ON(end
- addr
< size
))
377 ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
378 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
379 } while (pte
+= (size
>> PAGE_SHIFT
), addr
+= size
, addr
!= end
);
381 arch_leave_lazy_mmu_mode();
382 *mask
|= PGTBL_PTE_MODIFIED
;
385 static void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
,
386 pgtbl_mod_mask
*mask
)
392 pmd
= pmd_offset(pud
, addr
);
394 next
= pmd_addr_end(addr
, end
);
396 cleared
= pmd_clear_huge(pmd
);
397 if (cleared
|| pmd_bad(*pmd
))
398 *mask
|= PGTBL_PMD_MODIFIED
;
401 WARN_ON(next
- addr
< PMD_SIZE
);
404 if (pmd_none_or_clear_bad(pmd
))
406 vunmap_pte_range(pmd
, addr
, next
, mask
);
409 } while (pmd
++, addr
= next
, addr
!= end
);
412 static void vunmap_pud_range(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
413 pgtbl_mod_mask
*mask
)
419 pud
= pud_offset(p4d
, addr
);
421 next
= pud_addr_end(addr
, end
);
423 cleared
= pud_clear_huge(pud
);
424 if (cleared
|| pud_bad(*pud
))
425 *mask
|= PGTBL_PUD_MODIFIED
;
428 WARN_ON(next
- addr
< PUD_SIZE
);
431 if (pud_none_or_clear_bad(pud
))
433 vunmap_pmd_range(pud
, addr
, next
, mask
);
434 } while (pud
++, addr
= next
, addr
!= end
);
437 static void vunmap_p4d_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
438 pgtbl_mod_mask
*mask
)
443 p4d
= p4d_offset(pgd
, addr
);
445 next
= p4d_addr_end(addr
, end
);
449 *mask
|= PGTBL_P4D_MODIFIED
;
451 if (p4d_none_or_clear_bad(p4d
))
453 vunmap_pud_range(p4d
, addr
, next
, mask
);
454 } while (p4d
++, addr
= next
, addr
!= end
);
458 * vunmap_range_noflush is similar to vunmap_range, but does not
459 * flush caches or TLBs.
461 * The caller is responsible for calling flush_cache_vmap() before calling
462 * this function, and flush_tlb_kernel_range after it has returned
463 * successfully (and before the addresses are expected to cause a page fault
464 * or be re-mapped for something else, if TLB flushes are being delayed or
467 * This is an internal function only. Do not use outside mm/.
469 void __vunmap_range_noflush(unsigned long start
, unsigned long end
)
473 unsigned long addr
= start
;
474 pgtbl_mod_mask mask
= 0;
477 pgd
= pgd_offset_k(addr
);
479 next
= pgd_addr_end(addr
, end
);
481 mask
|= PGTBL_PGD_MODIFIED
;
482 if (pgd_none_or_clear_bad(pgd
))
484 vunmap_p4d_range(pgd
, addr
, next
, &mask
);
485 } while (pgd
++, addr
= next
, addr
!= end
);
487 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
488 arch_sync_kernel_mappings(start
, end
);
491 void vunmap_range_noflush(unsigned long start
, unsigned long end
)
493 kmsan_vunmap_range_noflush(start
, end
);
494 __vunmap_range_noflush(start
, end
);
498 * vunmap_range - unmap kernel virtual addresses
499 * @addr: start of the VM area to unmap
500 * @end: end of the VM area to unmap (non-inclusive)
502 * Clears any present PTEs in the virtual address range, flushes TLBs and
503 * caches. Any subsequent access to the address before it has been re-mapped
506 void vunmap_range(unsigned long addr
, unsigned long end
)
508 flush_cache_vunmap(addr
, end
);
509 vunmap_range_noflush(addr
, end
);
510 flush_tlb_kernel_range(addr
, end
);
513 static int vmap_pages_pte_range(pmd_t
*pmd
, unsigned long addr
,
514 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
515 pgtbl_mod_mask
*mask
)
520 * nr is a running index into the array which helps higher level
521 * callers keep track of where we're up to.
524 pte
= pte_alloc_kernel_track(pmd
, addr
, mask
);
528 arch_enter_lazy_mmu_mode();
531 struct page
*page
= pages
[*nr
];
533 if (WARN_ON(!pte_none(ptep_get(pte
))))
537 if (WARN_ON(!pfn_valid(page_to_pfn(page
))))
540 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
542 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
544 arch_leave_lazy_mmu_mode();
545 *mask
|= PGTBL_PTE_MODIFIED
;
549 static int vmap_pages_pmd_range(pud_t
*pud
, unsigned long addr
,
550 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
551 pgtbl_mod_mask
*mask
)
556 pmd
= pmd_alloc_track(&init_mm
, pud
, addr
, mask
);
560 next
= pmd_addr_end(addr
, end
);
561 if (vmap_pages_pte_range(pmd
, addr
, next
, prot
, pages
, nr
, mask
))
563 } while (pmd
++, addr
= next
, addr
!= end
);
567 static int vmap_pages_pud_range(p4d_t
*p4d
, unsigned long addr
,
568 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
569 pgtbl_mod_mask
*mask
)
574 pud
= pud_alloc_track(&init_mm
, p4d
, addr
, mask
);
578 next
= pud_addr_end(addr
, end
);
579 if (vmap_pages_pmd_range(pud
, addr
, next
, prot
, pages
, nr
, mask
))
581 } while (pud
++, addr
= next
, addr
!= end
);
585 static int vmap_pages_p4d_range(pgd_t
*pgd
, unsigned long addr
,
586 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
587 pgtbl_mod_mask
*mask
)
592 p4d
= p4d_alloc_track(&init_mm
, pgd
, addr
, mask
);
596 next
= p4d_addr_end(addr
, end
);
597 if (vmap_pages_pud_range(p4d
, addr
, next
, prot
, pages
, nr
, mask
))
599 } while (p4d
++, addr
= next
, addr
!= end
);
603 static int vmap_small_pages_range_noflush(unsigned long addr
, unsigned long end
,
604 pgprot_t prot
, struct page
**pages
)
606 unsigned long start
= addr
;
611 pgtbl_mod_mask mask
= 0;
614 pgd
= pgd_offset_k(addr
);
616 next
= pgd_addr_end(addr
, end
);
618 mask
|= PGTBL_PGD_MODIFIED
;
619 err
= vmap_pages_p4d_range(pgd
, addr
, next
, prot
, pages
, &nr
, &mask
);
622 } while (pgd
++, addr
= next
, addr
!= end
);
624 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
625 arch_sync_kernel_mappings(start
, end
);
631 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
634 * The caller is responsible for calling flush_cache_vmap() after this
635 * function returns successfully and before the addresses are accessed.
637 * This is an internal function only. Do not use outside mm/.
639 int __vmap_pages_range_noflush(unsigned long addr
, unsigned long end
,
640 pgprot_t prot
, struct page
**pages
, unsigned int page_shift
)
642 unsigned int i
, nr
= (end
- addr
) >> PAGE_SHIFT
;
644 WARN_ON(page_shift
< PAGE_SHIFT
);
646 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC
) ||
647 page_shift
== PAGE_SHIFT
)
648 return vmap_small_pages_range_noflush(addr
, end
, prot
, pages
);
650 for (i
= 0; i
< nr
; i
+= 1U << (page_shift
- PAGE_SHIFT
)) {
653 err
= vmap_range_noflush(addr
, addr
+ (1UL << page_shift
),
654 page_to_phys(pages
[i
]), prot
,
659 addr
+= 1UL << page_shift
;
665 int vmap_pages_range_noflush(unsigned long addr
, unsigned long end
,
666 pgprot_t prot
, struct page
**pages
, unsigned int page_shift
)
668 int ret
= kmsan_vmap_pages_range_noflush(addr
, end
, prot
, pages
,
673 return __vmap_pages_range_noflush(addr
, end
, prot
, pages
, page_shift
);
677 * vmap_pages_range - map pages to a kernel virtual address
678 * @addr: start of the VM area to map
679 * @end: end of the VM area to map (non-inclusive)
680 * @prot: page protection flags to use
681 * @pages: pages to map (always PAGE_SIZE pages)
682 * @page_shift: maximum shift that the pages may be mapped with, @pages must
683 * be aligned and contiguous up to at least this shift.
686 * 0 on success, -errno on failure.
688 int vmap_pages_range(unsigned long addr
, unsigned long end
,
689 pgprot_t prot
, struct page
**pages
, unsigned int page_shift
)
693 err
= vmap_pages_range_noflush(addr
, end
, prot
, pages
, page_shift
);
694 flush_cache_vmap(addr
, end
);
698 static int check_sparse_vm_area(struct vm_struct
*area
, unsigned long start
,
702 if (WARN_ON_ONCE(area
->flags
& VM_FLUSH_RESET_PERMS
))
704 if (WARN_ON_ONCE(area
->flags
& VM_NO_GUARD
))
706 if (WARN_ON_ONCE(!(area
->flags
& VM_SPARSE
)))
708 if ((end
- start
) >> PAGE_SHIFT
> totalram_pages())
710 if (start
< (unsigned long)area
->addr
||
711 (void *)end
> area
->addr
+ get_vm_area_size(area
))
717 * vm_area_map_pages - map pages inside given sparse vm_area
719 * @start: start address inside vm_area
720 * @end: end address inside vm_area
721 * @pages: pages to map (always PAGE_SIZE pages)
723 int vm_area_map_pages(struct vm_struct
*area
, unsigned long start
,
724 unsigned long end
, struct page
**pages
)
728 err
= check_sparse_vm_area(area
, start
, end
);
732 return vmap_pages_range(start
, end
, PAGE_KERNEL
, pages
, PAGE_SHIFT
);
736 * vm_area_unmap_pages - unmap pages inside given sparse vm_area
738 * @start: start address inside vm_area
739 * @end: end address inside vm_area
741 void vm_area_unmap_pages(struct vm_struct
*area
, unsigned long start
,
744 if (check_sparse_vm_area(area
, start
, end
))
747 vunmap_range(start
, end
);
750 int is_vmalloc_or_module_addr(const void *x
)
753 * ARM, x86-64 and sparc64 put modules in a special place,
754 * and fall back on vmalloc() if that fails. Others
755 * just put it in the vmalloc space.
757 #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
758 unsigned long addr
= (unsigned long)kasan_reset_tag(x
);
759 if (addr
>= MODULES_VADDR
&& addr
< MODULES_END
)
762 return is_vmalloc_addr(x
);
764 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr
);
767 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
768 * return the tail page that corresponds to the base page address, which
769 * matches small vmap mappings.
771 struct page
*vmalloc_to_page(const void *vmalloc_addr
)
773 unsigned long addr
= (unsigned long) vmalloc_addr
;
774 struct page
*page
= NULL
;
775 pgd_t
*pgd
= pgd_offset_k(addr
);
782 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
783 * architectures that do not vmalloc module space
785 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr
));
789 if (WARN_ON_ONCE(pgd_leaf(*pgd
)))
790 return NULL
; /* XXX: no allowance for huge pgd */
791 if (WARN_ON_ONCE(pgd_bad(*pgd
)))
794 p4d
= p4d_offset(pgd
, addr
);
798 return p4d_page(*p4d
) + ((addr
& ~P4D_MASK
) >> PAGE_SHIFT
);
799 if (WARN_ON_ONCE(p4d_bad(*p4d
)))
802 pud
= pud_offset(p4d
, addr
);
806 return pud_page(*pud
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
807 if (WARN_ON_ONCE(pud_bad(*pud
)))
810 pmd
= pmd_offset(pud
, addr
);
814 return pmd_page(*pmd
) + ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
815 if (WARN_ON_ONCE(pmd_bad(*pmd
)))
818 ptep
= pte_offset_kernel(pmd
, addr
);
819 pte
= ptep_get(ptep
);
820 if (pte_present(pte
))
821 page
= pte_page(pte
);
825 EXPORT_SYMBOL(vmalloc_to_page
);
828 * Map a vmalloc()-space virtual address to the physical page frame number.
830 unsigned long vmalloc_to_pfn(const void *vmalloc_addr
)
832 return page_to_pfn(vmalloc_to_page(vmalloc_addr
));
834 EXPORT_SYMBOL(vmalloc_to_pfn
);
837 /*** Global kva allocator ***/
839 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
840 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
843 static DEFINE_SPINLOCK(free_vmap_area_lock
);
844 static bool vmap_initialized __read_mostly
;
847 * This kmem_cache is used for vmap_area objects. Instead of
848 * allocating from slab we reuse an object from this cache to
849 * make things faster. Especially in "no edge" splitting of
852 static struct kmem_cache
*vmap_area_cachep
;
855 * This linked list is used in pair with free_vmap_area_root.
856 * It gives O(1) access to prev/next to perform fast coalescing.
858 static LIST_HEAD(free_vmap_area_list
);
861 * This augment red-black tree represents the free vmap space.
862 * All vmap_area objects in this tree are sorted by va->va_start
863 * address. It is used for allocation and merging when a vmap
864 * object is released.
866 * Each vmap_area node contains a maximum available free block
867 * of its sub-tree, right or left. Therefore it is possible to
868 * find a lowest match of free area.
870 static struct rb_root free_vmap_area_root
= RB_ROOT
;
873 * Preload a CPU with one object for "no edge" split case. The
874 * aim is to get rid of allocations from the atomic context, thus
875 * to use more permissive allocation masks.
877 static DEFINE_PER_CPU(struct vmap_area
*, ne_fit_preload_node
);
880 * This structure defines a single, solid model where a list and
881 * rb-tree are part of one entity protected by the lock. Nodes are
882 * sorted in ascending order, thus for O(1) access to left/right
883 * neighbors a list is used as well as for sequential traversal.
887 struct list_head head
;
892 * A fast size storage contains VAs up to 1M size. A pool consists
893 * of linked between each other ready to go VAs of certain sizes.
894 * An index in the pool-array corresponds to number of pages + 1.
896 #define MAX_VA_SIZE_PAGES 256
899 struct list_head head
;
904 * An effective vmap-node logic. Users make use of nodes instead
905 * of a global heap. It allows to balance an access and mitigate
908 static struct vmap_node
{
909 /* Simple size segregated storage. */
910 struct vmap_pool pool
[MAX_VA_SIZE_PAGES
];
911 spinlock_t pool_lock
;
914 /* Bookkeeping data of this node. */
919 * Ready-to-free areas.
921 struct list_head purge_list
;
922 struct work_struct purge_work
;
923 unsigned long nr_purged
;
927 * Initial setup consists of one single node, i.e. a balancing
928 * is fully disabled. Later on, after vmap is initialized these
929 * parameters are updated based on a system capacity.
931 static struct vmap_node
*vmap_nodes
= &single
;
932 static __read_mostly
unsigned int nr_vmap_nodes
= 1;
933 static __read_mostly
unsigned int vmap_zone_size
= 1;
935 /* A simple iterator over all vmap-nodes. */
936 #define for_each_vmap_node(vn) \
937 for ((vn) = &vmap_nodes[0]; \
938 (vn) < &vmap_nodes[nr_vmap_nodes]; (vn)++)
940 static inline unsigned int
941 addr_to_node_id(unsigned long addr
)
943 return (addr
/ vmap_zone_size
) % nr_vmap_nodes
;
946 static inline struct vmap_node
*
947 addr_to_node(unsigned long addr
)
949 return &vmap_nodes
[addr_to_node_id(addr
)];
952 static inline struct vmap_node
*
953 id_to_node(unsigned int id
)
955 return &vmap_nodes
[id
% nr_vmap_nodes
];
958 static inline unsigned int
959 node_to_id(struct vmap_node
*node
)
961 /* Pointer arithmetic. */
962 unsigned int id
= node
- vmap_nodes
;
964 if (likely(id
< nr_vmap_nodes
))
967 WARN_ONCE(1, "An address 0x%p is out-of-bounds.\n", node
);
972 * We use the value 0 to represent "no node", that is why
973 * an encoded value will be the node-id incremented by 1.
974 * It is always greater then 0. A valid node_id which can
975 * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id
976 * is not valid 0 is returned.
979 encode_vn_id(unsigned int node_id
)
981 /* Can store U8_MAX [0:254] nodes. */
982 if (node_id
< nr_vmap_nodes
)
983 return (node_id
+ 1) << BITS_PER_BYTE
;
985 /* Warn and no node encoded. */
986 WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id
);
991 * Returns an encoded node-id, the valid range is within
992 * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is
993 * returned if extracted data is wrong.
996 decode_vn_id(unsigned int val
)
998 unsigned int node_id
= (val
>> BITS_PER_BYTE
) - 1;
1000 /* Can store U8_MAX [0:254] nodes. */
1001 if (node_id
< nr_vmap_nodes
)
1004 /* If it was _not_ zero, warn. */
1005 WARN_ONCE(node_id
!= UINT_MAX
,
1006 "Decode wrong node id (%d)\n", node_id
);
1008 return nr_vmap_nodes
;
1012 is_vn_id_valid(unsigned int node_id
)
1014 if (node_id
< nr_vmap_nodes
)
1020 static __always_inline
unsigned long
1021 va_size(struct vmap_area
*va
)
1023 return (va
->va_end
- va
->va_start
);
1026 static __always_inline
unsigned long
1027 get_subtree_max_size(struct rb_node
*node
)
1029 struct vmap_area
*va
;
1031 va
= rb_entry_safe(node
, struct vmap_area
, rb_node
);
1032 return va
? va
->subtree_max_size
: 0;
1035 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb
,
1036 struct vmap_area
, rb_node
, unsigned long, subtree_max_size
, va_size
)
1038 static void reclaim_and_purge_vmap_areas(void);
1039 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list
);
1040 static void drain_vmap_area_work(struct work_struct
*work
);
1041 static DECLARE_WORK(drain_vmap_work
, drain_vmap_area_work
);
1043 static __cacheline_aligned_in_smp atomic_long_t nr_vmalloc_pages
;
1044 static __cacheline_aligned_in_smp atomic_long_t vmap_lazy_nr
;
1046 unsigned long vmalloc_nr_pages(void)
1048 return atomic_long_read(&nr_vmalloc_pages
);
1051 static struct vmap_area
*__find_vmap_area(unsigned long addr
, struct rb_root
*root
)
1053 struct rb_node
*n
= root
->rb_node
;
1055 addr
= (unsigned long)kasan_reset_tag((void *)addr
);
1058 struct vmap_area
*va
;
1060 va
= rb_entry(n
, struct vmap_area
, rb_node
);
1061 if (addr
< va
->va_start
)
1063 else if (addr
>= va
->va_end
)
1072 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
1073 static struct vmap_area
*
1074 __find_vmap_area_exceed_addr(unsigned long addr
, struct rb_root
*root
)
1076 struct vmap_area
*va
= NULL
;
1077 struct rb_node
*n
= root
->rb_node
;
1079 addr
= (unsigned long)kasan_reset_tag((void *)addr
);
1082 struct vmap_area
*tmp
;
1084 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
1085 if (tmp
->va_end
> addr
) {
1087 if (tmp
->va_start
<= addr
)
1099 * Returns a node where a first VA, that satisfies addr < va_end, resides.
1100 * If success, a node is locked. A user is responsible to unlock it when a
1101 * VA is no longer needed to be accessed.
1103 * Returns NULL if nothing found.
1105 static struct vmap_node
*
1106 find_vmap_area_exceed_addr_lock(unsigned long addr
, struct vmap_area
**va
)
1108 unsigned long va_start_lowest
;
1109 struct vmap_node
*vn
;
1112 va_start_lowest
= 0;
1114 for_each_vmap_node(vn
) {
1115 spin_lock(&vn
->busy
.lock
);
1116 *va
= __find_vmap_area_exceed_addr(addr
, &vn
->busy
.root
);
1119 if (!va_start_lowest
|| (*va
)->va_start
< va_start_lowest
)
1120 va_start_lowest
= (*va
)->va_start
;
1121 spin_unlock(&vn
->busy
.lock
);
1125 * Check if found VA exists, it might have gone away. In this case we
1126 * repeat the search because a VA has been removed concurrently and we
1127 * need to proceed to the next one, which is a rare case.
1129 if (va_start_lowest
) {
1130 vn
= addr_to_node(va_start_lowest
);
1132 spin_lock(&vn
->busy
.lock
);
1133 *va
= __find_vmap_area(va_start_lowest
, &vn
->busy
.root
);
1138 spin_unlock(&vn
->busy
.lock
);
1146 * This function returns back addresses of parent node
1147 * and its left or right link for further processing.
1149 * Otherwise NULL is returned. In that case all further
1150 * steps regarding inserting of conflicting overlap range
1151 * have to be declined and actually considered as a bug.
1153 static __always_inline
struct rb_node
**
1154 find_va_links(struct vmap_area
*va
,
1155 struct rb_root
*root
, struct rb_node
*from
,
1156 struct rb_node
**parent
)
1158 struct vmap_area
*tmp_va
;
1159 struct rb_node
**link
;
1162 link
= &root
->rb_node
;
1163 if (unlikely(!*link
)) {
1172 * Go to the bottom of the tree. When we hit the last point
1173 * we end up with parent rb_node and correct direction, i name
1174 * it link, where the new va->rb_node will be attached to.
1177 tmp_va
= rb_entry(*link
, struct vmap_area
, rb_node
);
1180 * During the traversal we also do some sanity check.
1181 * Trigger the BUG() if there are sides(left/right)
1184 if (va
->va_end
<= tmp_va
->va_start
)
1185 link
= &(*link
)->rb_left
;
1186 else if (va
->va_start
>= tmp_va
->va_end
)
1187 link
= &(*link
)->rb_right
;
1189 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
1190 va
->va_start
, va
->va_end
, tmp_va
->va_start
, tmp_va
->va_end
);
1196 *parent
= &tmp_va
->rb_node
;
1200 static __always_inline
struct list_head
*
1201 get_va_next_sibling(struct rb_node
*parent
, struct rb_node
**link
)
1203 struct list_head
*list
;
1205 if (unlikely(!parent
))
1207 * The red-black tree where we try to find VA neighbors
1208 * before merging or inserting is empty, i.e. it means
1209 * there is no free vmap space. Normally it does not
1210 * happen but we handle this case anyway.
1214 list
= &rb_entry(parent
, struct vmap_area
, rb_node
)->list
;
1215 return (&parent
->rb_right
== link
? list
->next
: list
);
1218 static __always_inline
void
1219 __link_va(struct vmap_area
*va
, struct rb_root
*root
,
1220 struct rb_node
*parent
, struct rb_node
**link
,
1221 struct list_head
*head
, bool augment
)
1224 * VA is still not in the list, but we can
1225 * identify its future previous list_head node.
1227 if (likely(parent
)) {
1228 head
= &rb_entry(parent
, struct vmap_area
, rb_node
)->list
;
1229 if (&parent
->rb_right
!= link
)
1233 /* Insert to the rb-tree */
1234 rb_link_node(&va
->rb_node
, parent
, link
);
1237 * Some explanation here. Just perform simple insertion
1238 * to the tree. We do not set va->subtree_max_size to
1239 * its current size before calling rb_insert_augmented().
1240 * It is because we populate the tree from the bottom
1241 * to parent levels when the node _is_ in the tree.
1243 * Therefore we set subtree_max_size to zero after insertion,
1244 * to let __augment_tree_propagate_from() puts everything to
1245 * the correct order later on.
1247 rb_insert_augmented(&va
->rb_node
,
1248 root
, &free_vmap_area_rb_augment_cb
);
1249 va
->subtree_max_size
= 0;
1251 rb_insert_color(&va
->rb_node
, root
);
1254 /* Address-sort this list */
1255 list_add(&va
->list
, head
);
1258 static __always_inline
void
1259 link_va(struct vmap_area
*va
, struct rb_root
*root
,
1260 struct rb_node
*parent
, struct rb_node
**link
,
1261 struct list_head
*head
)
1263 __link_va(va
, root
, parent
, link
, head
, false);
1266 static __always_inline
void
1267 link_va_augment(struct vmap_area
*va
, struct rb_root
*root
,
1268 struct rb_node
*parent
, struct rb_node
**link
,
1269 struct list_head
*head
)
1271 __link_va(va
, root
, parent
, link
, head
, true);
1274 static __always_inline
void
1275 __unlink_va(struct vmap_area
*va
, struct rb_root
*root
, bool augment
)
1277 if (WARN_ON(RB_EMPTY_NODE(&va
->rb_node
)))
1281 rb_erase_augmented(&va
->rb_node
,
1282 root
, &free_vmap_area_rb_augment_cb
);
1284 rb_erase(&va
->rb_node
, root
);
1286 list_del_init(&va
->list
);
1287 RB_CLEAR_NODE(&va
->rb_node
);
1290 static __always_inline
void
1291 unlink_va(struct vmap_area
*va
, struct rb_root
*root
)
1293 __unlink_va(va
, root
, false);
1296 static __always_inline
void
1297 unlink_va_augment(struct vmap_area
*va
, struct rb_root
*root
)
1299 __unlink_va(va
, root
, true);
1302 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1304 * Gets called when remove the node and rotate.
1306 static __always_inline
unsigned long
1307 compute_subtree_max_size(struct vmap_area
*va
)
1309 return max3(va_size(va
),
1310 get_subtree_max_size(va
->rb_node
.rb_left
),
1311 get_subtree_max_size(va
->rb_node
.rb_right
));
1315 augment_tree_propagate_check(void)
1317 struct vmap_area
*va
;
1318 unsigned long computed_size
;
1320 list_for_each_entry(va
, &free_vmap_area_list
, list
) {
1321 computed_size
= compute_subtree_max_size(va
);
1322 if (computed_size
!= va
->subtree_max_size
)
1323 pr_emerg("tree is corrupted: %lu, %lu\n",
1324 va_size(va
), va
->subtree_max_size
);
1330 * This function populates subtree_max_size from bottom to upper
1331 * levels starting from VA point. The propagation must be done
1332 * when VA size is modified by changing its va_start/va_end. Or
1333 * in case of newly inserting of VA to the tree.
1335 * It means that __augment_tree_propagate_from() must be called:
1336 * - After VA has been inserted to the tree(free path);
1337 * - After VA has been shrunk(allocation path);
1338 * - After VA has been increased(merging path).
1340 * Please note that, it does not mean that upper parent nodes
1341 * and their subtree_max_size are recalculated all the time up
1350 * For example if we modify the node 4, shrinking it to 2, then
1351 * no any modification is required. If we shrink the node 2 to 1
1352 * its subtree_max_size is updated only, and set to 1. If we shrink
1353 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1354 * node becomes 4--6.
1356 static __always_inline
void
1357 augment_tree_propagate_from(struct vmap_area
*va
)
1360 * Populate the tree from bottom towards the root until
1361 * the calculated maximum available size of checked node
1362 * is equal to its current one.
1364 free_vmap_area_rb_augment_cb_propagate(&va
->rb_node
, NULL
);
1366 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1367 augment_tree_propagate_check();
1372 insert_vmap_area(struct vmap_area
*va
,
1373 struct rb_root
*root
, struct list_head
*head
)
1375 struct rb_node
**link
;
1376 struct rb_node
*parent
;
1378 link
= find_va_links(va
, root
, NULL
, &parent
);
1380 link_va(va
, root
, parent
, link
, head
);
1384 insert_vmap_area_augment(struct vmap_area
*va
,
1385 struct rb_node
*from
, struct rb_root
*root
,
1386 struct list_head
*head
)
1388 struct rb_node
**link
;
1389 struct rb_node
*parent
;
1392 link
= find_va_links(va
, NULL
, from
, &parent
);
1394 link
= find_va_links(va
, root
, NULL
, &parent
);
1397 link_va_augment(va
, root
, parent
, link
, head
);
1398 augment_tree_propagate_from(va
);
1403 * Merge de-allocated chunk of VA memory with previous
1404 * and next free blocks. If coalesce is not done a new
1405 * free area is inserted. If VA has been merged, it is
1408 * Please note, it can return NULL in case of overlap
1409 * ranges, followed by WARN() report. Despite it is a
1410 * buggy behaviour, a system can be alive and keep
1413 static __always_inline
struct vmap_area
*
1414 __merge_or_add_vmap_area(struct vmap_area
*va
,
1415 struct rb_root
*root
, struct list_head
*head
, bool augment
)
1417 struct vmap_area
*sibling
;
1418 struct list_head
*next
;
1419 struct rb_node
**link
;
1420 struct rb_node
*parent
;
1421 bool merged
= false;
1424 * Find a place in the tree where VA potentially will be
1425 * inserted, unless it is merged with its sibling/siblings.
1427 link
= find_va_links(va
, root
, NULL
, &parent
);
1432 * Get next node of VA to check if merging can be done.
1434 next
= get_va_next_sibling(parent
, link
);
1435 if (unlikely(next
== NULL
))
1441 * |<------VA------>|<-----Next----->|
1446 sibling
= list_entry(next
, struct vmap_area
, list
);
1447 if (sibling
->va_start
== va
->va_end
) {
1448 sibling
->va_start
= va
->va_start
;
1450 /* Free vmap_area object. */
1451 kmem_cache_free(vmap_area_cachep
, va
);
1453 /* Point to the new merged area. */
1462 * |<-----Prev----->|<------VA------>|
1466 if (next
->prev
!= head
) {
1467 sibling
= list_entry(next
->prev
, struct vmap_area
, list
);
1468 if (sibling
->va_end
== va
->va_start
) {
1470 * If both neighbors are coalesced, it is important
1471 * to unlink the "next" node first, followed by merging
1472 * with "previous" one. Otherwise the tree might not be
1473 * fully populated if a sibling's augmented value is
1474 * "normalized" because of rotation operations.
1477 __unlink_va(va
, root
, augment
);
1479 sibling
->va_end
= va
->va_end
;
1481 /* Free vmap_area object. */
1482 kmem_cache_free(vmap_area_cachep
, va
);
1484 /* Point to the new merged area. */
1492 __link_va(va
, root
, parent
, link
, head
, augment
);
1497 static __always_inline
struct vmap_area
*
1498 merge_or_add_vmap_area(struct vmap_area
*va
,
1499 struct rb_root
*root
, struct list_head
*head
)
1501 return __merge_or_add_vmap_area(va
, root
, head
, false);
1504 static __always_inline
struct vmap_area
*
1505 merge_or_add_vmap_area_augment(struct vmap_area
*va
,
1506 struct rb_root
*root
, struct list_head
*head
)
1508 va
= __merge_or_add_vmap_area(va
, root
, head
, true);
1510 augment_tree_propagate_from(va
);
1515 static __always_inline
bool
1516 is_within_this_va(struct vmap_area
*va
, unsigned long size
,
1517 unsigned long align
, unsigned long vstart
)
1519 unsigned long nva_start_addr
;
1521 if (va
->va_start
> vstart
)
1522 nva_start_addr
= ALIGN(va
->va_start
, align
);
1524 nva_start_addr
= ALIGN(vstart
, align
);
1526 /* Can be overflowed due to big size or alignment. */
1527 if (nva_start_addr
+ size
< nva_start_addr
||
1528 nva_start_addr
< vstart
)
1531 return (nva_start_addr
+ size
<= va
->va_end
);
1535 * Find the first free block(lowest start address) in the tree,
1536 * that will accomplish the request corresponding to passing
1537 * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1538 * a search length is adjusted to account for worst case alignment
1541 static __always_inline
struct vmap_area
*
1542 find_vmap_lowest_match(struct rb_root
*root
, unsigned long size
,
1543 unsigned long align
, unsigned long vstart
, bool adjust_search_size
)
1545 struct vmap_area
*va
;
1546 struct rb_node
*node
;
1547 unsigned long length
;
1549 /* Start from the root. */
1550 node
= root
->rb_node
;
1552 /* Adjust the search size for alignment overhead. */
1553 length
= adjust_search_size
? size
+ align
- 1 : size
;
1556 va
= rb_entry(node
, struct vmap_area
, rb_node
);
1558 if (get_subtree_max_size(node
->rb_left
) >= length
&&
1559 vstart
< va
->va_start
) {
1560 node
= node
->rb_left
;
1562 if (is_within_this_va(va
, size
, align
, vstart
))
1566 * Does not make sense to go deeper towards the right
1567 * sub-tree if it does not have a free block that is
1568 * equal or bigger to the requested search length.
1570 if (get_subtree_max_size(node
->rb_right
) >= length
) {
1571 node
= node
->rb_right
;
1576 * OK. We roll back and find the first right sub-tree,
1577 * that will satisfy the search criteria. It can happen
1578 * due to "vstart" restriction or an alignment overhead
1579 * that is bigger then PAGE_SIZE.
1581 while ((node
= rb_parent(node
))) {
1582 va
= rb_entry(node
, struct vmap_area
, rb_node
);
1583 if (is_within_this_va(va
, size
, align
, vstart
))
1586 if (get_subtree_max_size(node
->rb_right
) >= length
&&
1587 vstart
<= va
->va_start
) {
1589 * Shift the vstart forward. Please note, we update it with
1590 * parent's start address adding "1" because we do not want
1591 * to enter same sub-tree after it has already been checked
1592 * and no suitable free block found there.
1594 vstart
= va
->va_start
+ 1;
1595 node
= node
->rb_right
;
1605 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1606 #include <linux/random.h>
1608 static struct vmap_area
*
1609 find_vmap_lowest_linear_match(struct list_head
*head
, unsigned long size
,
1610 unsigned long align
, unsigned long vstart
)
1612 struct vmap_area
*va
;
1614 list_for_each_entry(va
, head
, list
) {
1615 if (!is_within_this_va(va
, size
, align
, vstart
))
1625 find_vmap_lowest_match_check(struct rb_root
*root
, struct list_head
*head
,
1626 unsigned long size
, unsigned long align
)
1628 struct vmap_area
*va_1
, *va_2
;
1629 unsigned long vstart
;
1632 get_random_bytes(&rnd
, sizeof(rnd
));
1633 vstart
= VMALLOC_START
+ rnd
;
1635 va_1
= find_vmap_lowest_match(root
, size
, align
, vstart
, false);
1636 va_2
= find_vmap_lowest_linear_match(head
, size
, align
, vstart
);
1639 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1640 va_1
, va_2
, vstart
);
1646 FL_FIT_TYPE
= 1, /* full fit */
1647 LE_FIT_TYPE
= 2, /* left edge fit */
1648 RE_FIT_TYPE
= 3, /* right edge fit */
1649 NE_FIT_TYPE
= 4 /* no edge fit */
1652 static __always_inline
enum fit_type
1653 classify_va_fit_type(struct vmap_area
*va
,
1654 unsigned long nva_start_addr
, unsigned long size
)
1658 /* Check if it is within VA. */
1659 if (nva_start_addr
< va
->va_start
||
1660 nva_start_addr
+ size
> va
->va_end
)
1664 if (va
->va_start
== nva_start_addr
) {
1665 if (va
->va_end
== nva_start_addr
+ size
)
1669 } else if (va
->va_end
== nva_start_addr
+ size
) {
1678 static __always_inline
int
1679 va_clip(struct rb_root
*root
, struct list_head
*head
,
1680 struct vmap_area
*va
, unsigned long nva_start_addr
,
1683 struct vmap_area
*lva
= NULL
;
1684 enum fit_type type
= classify_va_fit_type(va
, nva_start_addr
, size
);
1686 if (type
== FL_FIT_TYPE
) {
1688 * No need to split VA, it fully fits.
1694 unlink_va_augment(va
, root
);
1695 kmem_cache_free(vmap_area_cachep
, va
);
1696 } else if (type
== LE_FIT_TYPE
) {
1698 * Split left edge of fit VA.
1704 va
->va_start
+= size
;
1705 } else if (type
== RE_FIT_TYPE
) {
1707 * Split right edge of fit VA.
1713 va
->va_end
= nva_start_addr
;
1714 } else if (type
== NE_FIT_TYPE
) {
1716 * Split no edge of fit VA.
1722 lva
= __this_cpu_xchg(ne_fit_preload_node
, NULL
);
1723 if (unlikely(!lva
)) {
1725 * For percpu allocator we do not do any pre-allocation
1726 * and leave it as it is. The reason is it most likely
1727 * never ends up with NE_FIT_TYPE splitting. In case of
1728 * percpu allocations offsets and sizes are aligned to
1729 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1730 * are its main fitting cases.
1732 * There are a few exceptions though, as an example it is
1733 * a first allocation (early boot up) when we have "one"
1734 * big free space that has to be split.
1736 * Also we can hit this path in case of regular "vmap"
1737 * allocations, if "this" current CPU was not preloaded.
1738 * See the comment in alloc_vmap_area() why. If so, then
1739 * GFP_NOWAIT is used instead to get an extra object for
1740 * split purpose. That is rare and most time does not
1743 * What happens if an allocation gets failed. Basically,
1744 * an "overflow" path is triggered to purge lazily freed
1745 * areas to free some memory, then, the "retry" path is
1746 * triggered to repeat one more time. See more details
1747 * in alloc_vmap_area() function.
1749 lva
= kmem_cache_alloc(vmap_area_cachep
, GFP_NOWAIT
);
1755 * Build the remainder.
1757 lva
->va_start
= va
->va_start
;
1758 lva
->va_end
= nva_start_addr
;
1761 * Shrink this VA to remaining size.
1763 va
->va_start
= nva_start_addr
+ size
;
1768 if (type
!= FL_FIT_TYPE
) {
1769 augment_tree_propagate_from(va
);
1771 if (lva
) /* type == NE_FIT_TYPE */
1772 insert_vmap_area_augment(lva
, &va
->rb_node
, root
, head
);
1778 static unsigned long
1779 va_alloc(struct vmap_area
*va
,
1780 struct rb_root
*root
, struct list_head
*head
,
1781 unsigned long size
, unsigned long align
,
1782 unsigned long vstart
, unsigned long vend
)
1784 unsigned long nva_start_addr
;
1787 if (va
->va_start
> vstart
)
1788 nva_start_addr
= ALIGN(va
->va_start
, align
);
1790 nva_start_addr
= ALIGN(vstart
, align
);
1792 /* Check the "vend" restriction. */
1793 if (nva_start_addr
+ size
> vend
)
1796 /* Update the free vmap_area. */
1797 ret
= va_clip(root
, head
, va
, nva_start_addr
, size
);
1798 if (WARN_ON_ONCE(ret
))
1801 return nva_start_addr
;
1805 * Returns a start address of the newly allocated area, if success.
1806 * Otherwise an error value is returned that indicates failure.
1808 static __always_inline
unsigned long
1809 __alloc_vmap_area(struct rb_root
*root
, struct list_head
*head
,
1810 unsigned long size
, unsigned long align
,
1811 unsigned long vstart
, unsigned long vend
)
1813 bool adjust_search_size
= true;
1814 unsigned long nva_start_addr
;
1815 struct vmap_area
*va
;
1818 * Do not adjust when:
1819 * a) align <= PAGE_SIZE, because it does not make any sense.
1820 * All blocks(their start addresses) are at least PAGE_SIZE
1822 * b) a short range where a requested size corresponds to exactly
1823 * specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1824 * With adjusted search length an allocation would not succeed.
1826 if (align
<= PAGE_SIZE
|| (align
> PAGE_SIZE
&& (vend
- vstart
) == size
))
1827 adjust_search_size
= false;
1829 va
= find_vmap_lowest_match(root
, size
, align
, vstart
, adjust_search_size
);
1833 nva_start_addr
= va_alloc(va
, root
, head
, size
, align
, vstart
, vend
);
1835 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1836 if (!IS_ERR_VALUE(nva_start_addr
))
1837 find_vmap_lowest_match_check(root
, head
, size
, align
);
1840 return nva_start_addr
;
1844 * Free a region of KVA allocated by alloc_vmap_area
1846 static void free_vmap_area(struct vmap_area
*va
)
1848 struct vmap_node
*vn
= addr_to_node(va
->va_start
);
1851 * Remove from the busy tree/list.
1853 spin_lock(&vn
->busy
.lock
);
1854 unlink_va(va
, &vn
->busy
.root
);
1855 spin_unlock(&vn
->busy
.lock
);
1858 * Insert/Merge it back to the free tree/list.
1860 spin_lock(&free_vmap_area_lock
);
1861 merge_or_add_vmap_area_augment(va
, &free_vmap_area_root
, &free_vmap_area_list
);
1862 spin_unlock(&free_vmap_area_lock
);
1866 preload_this_cpu_lock(spinlock_t
*lock
, gfp_t gfp_mask
, int node
)
1868 struct vmap_area
*va
= NULL
, *tmp
;
1871 * Preload this CPU with one extra vmap_area object. It is used
1872 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1873 * a CPU that does an allocation is preloaded.
1875 * We do it in non-atomic context, thus it allows us to use more
1876 * permissive allocation masks to be more stable under low memory
1877 * condition and high memory pressure.
1879 if (!this_cpu_read(ne_fit_preload_node
))
1880 va
= kmem_cache_alloc_node(vmap_area_cachep
, gfp_mask
, node
);
1885 if (va
&& !__this_cpu_try_cmpxchg(ne_fit_preload_node
, &tmp
, va
))
1886 kmem_cache_free(vmap_area_cachep
, va
);
1889 static struct vmap_pool
*
1890 size_to_va_pool(struct vmap_node
*vn
, unsigned long size
)
1892 unsigned int idx
= (size
- 1) / PAGE_SIZE
;
1894 if (idx
< MAX_VA_SIZE_PAGES
)
1895 return &vn
->pool
[idx
];
1901 node_pool_add_va(struct vmap_node
*n
, struct vmap_area
*va
)
1903 struct vmap_pool
*vp
;
1905 vp
= size_to_va_pool(n
, va_size(va
));
1909 spin_lock(&n
->pool_lock
);
1910 list_add(&va
->list
, &vp
->head
);
1911 WRITE_ONCE(vp
->len
, vp
->len
+ 1);
1912 spin_unlock(&n
->pool_lock
);
1917 static struct vmap_area
*
1918 node_pool_del_va(struct vmap_node
*vn
, unsigned long size
,
1919 unsigned long align
, unsigned long vstart
,
1922 struct vmap_area
*va
= NULL
;
1923 struct vmap_pool
*vp
;
1926 vp
= size_to_va_pool(vn
, size
);
1927 if (!vp
|| list_empty(&vp
->head
))
1930 spin_lock(&vn
->pool_lock
);
1931 if (!list_empty(&vp
->head
)) {
1932 va
= list_first_entry(&vp
->head
, struct vmap_area
, list
);
1934 if (IS_ALIGNED(va
->va_start
, align
)) {
1936 * Do some sanity check and emit a warning
1937 * if one of below checks detects an error.
1939 err
|= (va_size(va
) != size
);
1940 err
|= (va
->va_start
< vstart
);
1941 err
|= (va
->va_end
> vend
);
1943 if (!WARN_ON_ONCE(err
)) {
1944 list_del_init(&va
->list
);
1945 WRITE_ONCE(vp
->len
, vp
->len
- 1);
1950 list_move_tail(&va
->list
, &vp
->head
);
1954 spin_unlock(&vn
->pool_lock
);
1959 static struct vmap_area
*
1960 node_alloc(unsigned long size
, unsigned long align
,
1961 unsigned long vstart
, unsigned long vend
,
1962 unsigned long *addr
, unsigned int *vn_id
)
1964 struct vmap_area
*va
;
1970 * Fallback to a global heap if not vmalloc or there
1973 if (vstart
!= VMALLOC_START
|| vend
!= VMALLOC_END
||
1977 *vn_id
= raw_smp_processor_id() % nr_vmap_nodes
;
1978 va
= node_pool_del_va(id_to_node(*vn_id
), size
, align
, vstart
, vend
);
1979 *vn_id
= encode_vn_id(*vn_id
);
1982 *addr
= va
->va_start
;
1987 static inline void setup_vmalloc_vm(struct vm_struct
*vm
,
1988 struct vmap_area
*va
, unsigned long flags
, const void *caller
)
1991 vm
->addr
= (void *)va
->va_start
;
1992 vm
->size
= vm
->requested_size
= va_size(va
);
1993 vm
->caller
= caller
;
1998 * Allocate a region of KVA of the specified size and alignment, within the
1999 * vstart and vend. If vm is passed in, the two will also be bound.
2001 static struct vmap_area
*alloc_vmap_area(unsigned long size
,
2002 unsigned long align
,
2003 unsigned long vstart
, unsigned long vend
,
2004 int node
, gfp_t gfp_mask
,
2005 unsigned long va_flags
, struct vm_struct
*vm
)
2007 struct vmap_node
*vn
;
2008 struct vmap_area
*va
;
2009 unsigned long freed
;
2015 if (unlikely(!size
|| offset_in_page(size
) || !is_power_of_2(align
)))
2016 return ERR_PTR(-EINVAL
);
2018 if (unlikely(!vmap_initialized
))
2019 return ERR_PTR(-EBUSY
);
2024 * If a VA is obtained from a global heap(if it fails here)
2025 * it is anyway marked with this "vn_id" so it is returned
2026 * to this pool's node later. Such way gives a possibility
2027 * to populate pools based on users demand.
2029 * On success a ready to go VA is returned.
2031 va
= node_alloc(size
, align
, vstart
, vend
, &addr
, &vn_id
);
2033 gfp_mask
= gfp_mask
& GFP_RECLAIM_MASK
;
2035 va
= kmem_cache_alloc_node(vmap_area_cachep
, gfp_mask
, node
);
2037 return ERR_PTR(-ENOMEM
);
2040 * Only scan the relevant parts containing pointers to other objects
2041 * to avoid false negatives.
2043 kmemleak_scan_area(&va
->rb_node
, SIZE_MAX
, gfp_mask
);
2047 if (IS_ERR_VALUE(addr
)) {
2048 preload_this_cpu_lock(&free_vmap_area_lock
, gfp_mask
, node
);
2049 addr
= __alloc_vmap_area(&free_vmap_area_root
, &free_vmap_area_list
,
2050 size
, align
, vstart
, vend
);
2051 spin_unlock(&free_vmap_area_lock
);
2054 trace_alloc_vmap_area(addr
, size
, align
, vstart
, vend
, IS_ERR_VALUE(addr
));
2057 * If an allocation fails, the error value is
2058 * returned. Therefore trigger the overflow path.
2060 if (IS_ERR_VALUE(addr
))
2063 va
->va_start
= addr
;
2064 va
->va_end
= addr
+ size
;
2066 va
->flags
= (va_flags
| vn_id
);
2069 vm
->addr
= (void *)va
->va_start
;
2070 vm
->size
= va_size(va
);
2074 vn
= addr_to_node(va
->va_start
);
2076 spin_lock(&vn
->busy
.lock
);
2077 insert_vmap_area(va
, &vn
->busy
.root
, &vn
->busy
.head
);
2078 spin_unlock(&vn
->busy
.lock
);
2080 BUG_ON(!IS_ALIGNED(va
->va_start
, align
));
2081 BUG_ON(va
->va_start
< vstart
);
2082 BUG_ON(va
->va_end
> vend
);
2084 ret
= kasan_populate_vmalloc(addr
, size
);
2087 return ERR_PTR(ret
);
2094 reclaim_and_purge_vmap_areas();
2100 blocking_notifier_call_chain(&vmap_notify_list
, 0, &freed
);
2107 if (!(gfp_mask
& __GFP_NOWARN
) && printk_ratelimit())
2108 pr_warn("vmalloc_node_range for size %lu failed: Address range restricted to %#lx - %#lx\n",
2109 size
, vstart
, vend
);
2111 kmem_cache_free(vmap_area_cachep
, va
);
2112 return ERR_PTR(-EBUSY
);
2115 int register_vmap_purge_notifier(struct notifier_block
*nb
)
2117 return blocking_notifier_chain_register(&vmap_notify_list
, nb
);
2119 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier
);
2121 int unregister_vmap_purge_notifier(struct notifier_block
*nb
)
2123 return blocking_notifier_chain_unregister(&vmap_notify_list
, nb
);
2125 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier
);
2128 * lazy_max_pages is the maximum amount of virtual address space we gather up
2129 * before attempting to purge with a TLB flush.
2131 * There is a tradeoff here: a larger number will cover more kernel page tables
2132 * and take slightly longer to purge, but it will linearly reduce the number of
2133 * global TLB flushes that must be performed. It would seem natural to scale
2134 * this number up linearly with the number of CPUs (because vmapping activity
2135 * could also scale linearly with the number of CPUs), however it is likely
2136 * that in practice, workloads might be constrained in other ways that mean
2137 * vmap activity will not scale linearly with CPUs. Also, I want to be
2138 * conservative and not introduce a big latency on huge systems, so go with
2139 * a less aggressive log scale. It will still be an improvement over the old
2140 * code, and it will be simple to change the scale factor if we find that it
2141 * becomes a problem on bigger systems.
2143 static unsigned long lazy_max_pages(void)
2147 log
= fls(num_online_cpus());
2149 return log
* (32UL * 1024 * 1024 / PAGE_SIZE
);
2153 * Serialize vmap purging. There is no actual critical section protected
2154 * by this lock, but we want to avoid concurrent calls for performance
2155 * reasons and to make the pcpu_get_vm_areas more deterministic.
2157 static DEFINE_MUTEX(vmap_purge_lock
);
2159 /* for per-CPU blocks */
2160 static void purge_fragmented_blocks_allcpus(void);
2163 reclaim_list_global(struct list_head
*head
)
2165 struct vmap_area
*va
, *n
;
2167 if (list_empty(head
))
2170 spin_lock(&free_vmap_area_lock
);
2171 list_for_each_entry_safe(va
, n
, head
, list
)
2172 merge_or_add_vmap_area_augment(va
,
2173 &free_vmap_area_root
, &free_vmap_area_list
);
2174 spin_unlock(&free_vmap_area_lock
);
2178 decay_va_pool_node(struct vmap_node
*vn
, bool full_decay
)
2180 LIST_HEAD(decay_list
);
2181 struct rb_root decay_root
= RB_ROOT
;
2182 struct vmap_area
*va
, *nva
;
2183 unsigned long n_decay
, pool_len
;
2186 for (i
= 0; i
< MAX_VA_SIZE_PAGES
; i
++) {
2187 LIST_HEAD(tmp_list
);
2189 if (list_empty(&vn
->pool
[i
].head
))
2192 /* Detach the pool, so no-one can access it. */
2193 spin_lock(&vn
->pool_lock
);
2194 list_replace_init(&vn
->pool
[i
].head
, &tmp_list
);
2195 spin_unlock(&vn
->pool_lock
);
2197 pool_len
= n_decay
= vn
->pool
[i
].len
;
2198 WRITE_ONCE(vn
->pool
[i
].len
, 0);
2200 /* Decay a pool by ~25% out of left objects. */
2203 pool_len
-= n_decay
;
2205 list_for_each_entry_safe(va
, nva
, &tmp_list
, list
) {
2209 list_del_init(&va
->list
);
2210 merge_or_add_vmap_area(va
, &decay_root
, &decay_list
);
2214 * Attach the pool back if it has been partly decayed.
2215 * Please note, it is supposed that nobody(other contexts)
2216 * can populate the pool therefore a simple list replace
2217 * operation takes place here.
2219 if (!list_empty(&tmp_list
)) {
2220 spin_lock(&vn
->pool_lock
);
2221 list_replace_init(&tmp_list
, &vn
->pool
[i
].head
);
2222 WRITE_ONCE(vn
->pool
[i
].len
, pool_len
);
2223 spin_unlock(&vn
->pool_lock
);
2227 reclaim_list_global(&decay_list
);
2231 kasan_release_vmalloc_node(struct vmap_node
*vn
)
2233 struct vmap_area
*va
;
2234 unsigned long start
, end
;
2236 start
= list_first_entry(&vn
->purge_list
, struct vmap_area
, list
)->va_start
;
2237 end
= list_last_entry(&vn
->purge_list
, struct vmap_area
, list
)->va_end
;
2239 list_for_each_entry(va
, &vn
->purge_list
, list
) {
2240 if (is_vmalloc_or_module_addr((void *) va
->va_start
))
2241 kasan_release_vmalloc(va
->va_start
, va
->va_end
,
2242 va
->va_start
, va
->va_end
,
2243 KASAN_VMALLOC_PAGE_RANGE
);
2246 kasan_release_vmalloc(start
, end
, start
, end
, KASAN_VMALLOC_TLB_FLUSH
);
2249 static void purge_vmap_node(struct work_struct
*work
)
2251 struct vmap_node
*vn
= container_of(work
,
2252 struct vmap_node
, purge_work
);
2253 unsigned long nr_purged_pages
= 0;
2254 struct vmap_area
*va
, *n_va
;
2255 LIST_HEAD(local_list
);
2257 if (IS_ENABLED(CONFIG_KASAN_VMALLOC
))
2258 kasan_release_vmalloc_node(vn
);
2262 list_for_each_entry_safe(va
, n_va
, &vn
->purge_list
, list
) {
2263 unsigned long nr
= va_size(va
) >> PAGE_SHIFT
;
2264 unsigned int vn_id
= decode_vn_id(va
->flags
);
2266 list_del_init(&va
->list
);
2268 nr_purged_pages
+= nr
;
2271 if (is_vn_id_valid(vn_id
) && !vn
->skip_populate
)
2272 if (node_pool_add_va(vn
, va
))
2275 /* Go back to global. */
2276 list_add(&va
->list
, &local_list
);
2279 atomic_long_sub(nr_purged_pages
, &vmap_lazy_nr
);
2281 reclaim_list_global(&local_list
);
2285 * Purges all lazily-freed vmap areas.
2287 static bool __purge_vmap_area_lazy(unsigned long start
, unsigned long end
,
2288 bool full_pool_decay
)
2290 unsigned long nr_purged_areas
= 0;
2291 unsigned int nr_purge_helpers
;
2292 static cpumask_t purge_nodes
;
2293 unsigned int nr_purge_nodes
;
2294 struct vmap_node
*vn
;
2297 lockdep_assert_held(&vmap_purge_lock
);
2300 * Use cpumask to mark which node has to be processed.
2302 purge_nodes
= CPU_MASK_NONE
;
2304 for_each_vmap_node(vn
) {
2305 INIT_LIST_HEAD(&vn
->purge_list
);
2306 vn
->skip_populate
= full_pool_decay
;
2307 decay_va_pool_node(vn
, full_pool_decay
);
2309 if (RB_EMPTY_ROOT(&vn
->lazy
.root
))
2312 spin_lock(&vn
->lazy
.lock
);
2313 WRITE_ONCE(vn
->lazy
.root
.rb_node
, NULL
);
2314 list_replace_init(&vn
->lazy
.head
, &vn
->purge_list
);
2315 spin_unlock(&vn
->lazy
.lock
);
2317 start
= min(start
, list_first_entry(&vn
->purge_list
,
2318 struct vmap_area
, list
)->va_start
);
2320 end
= max(end
, list_last_entry(&vn
->purge_list
,
2321 struct vmap_area
, list
)->va_end
);
2323 cpumask_set_cpu(node_to_id(vn
), &purge_nodes
);
2326 nr_purge_nodes
= cpumask_weight(&purge_nodes
);
2327 if (nr_purge_nodes
> 0) {
2328 flush_tlb_kernel_range(start
, end
);
2330 /* One extra worker is per a lazy_max_pages() full set minus one. */
2331 nr_purge_helpers
= atomic_long_read(&vmap_lazy_nr
) / lazy_max_pages();
2332 nr_purge_helpers
= clamp(nr_purge_helpers
, 1U, nr_purge_nodes
) - 1;
2334 for_each_cpu(i
, &purge_nodes
) {
2335 vn
= &vmap_nodes
[i
];
2337 if (nr_purge_helpers
> 0) {
2338 INIT_WORK(&vn
->purge_work
, purge_vmap_node
);
2340 if (cpumask_test_cpu(i
, cpu_online_mask
))
2341 schedule_work_on(i
, &vn
->purge_work
);
2343 schedule_work(&vn
->purge_work
);
2347 vn
->purge_work
.func
= NULL
;
2348 purge_vmap_node(&vn
->purge_work
);
2349 nr_purged_areas
+= vn
->nr_purged
;
2353 for_each_cpu(i
, &purge_nodes
) {
2354 vn
= &vmap_nodes
[i
];
2356 if (vn
->purge_work
.func
) {
2357 flush_work(&vn
->purge_work
);
2358 nr_purged_areas
+= vn
->nr_purged
;
2363 trace_purge_vmap_area_lazy(start
, end
, nr_purged_areas
);
2364 return nr_purged_areas
> 0;
2368 * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
2370 static void reclaim_and_purge_vmap_areas(void)
2373 mutex_lock(&vmap_purge_lock
);
2374 purge_fragmented_blocks_allcpus();
2375 __purge_vmap_area_lazy(ULONG_MAX
, 0, true);
2376 mutex_unlock(&vmap_purge_lock
);
2379 static void drain_vmap_area_work(struct work_struct
*work
)
2381 mutex_lock(&vmap_purge_lock
);
2382 __purge_vmap_area_lazy(ULONG_MAX
, 0, false);
2383 mutex_unlock(&vmap_purge_lock
);
2387 * Free a vmap area, caller ensuring that the area has been unmapped,
2388 * unlinked and flush_cache_vunmap had been called for the correct
2391 static void free_vmap_area_noflush(struct vmap_area
*va
)
2393 unsigned long nr_lazy_max
= lazy_max_pages();
2394 unsigned long va_start
= va
->va_start
;
2395 unsigned int vn_id
= decode_vn_id(va
->flags
);
2396 struct vmap_node
*vn
;
2397 unsigned long nr_lazy
;
2399 if (WARN_ON_ONCE(!list_empty(&va
->list
)))
2402 nr_lazy
= atomic_long_add_return_relaxed(va_size(va
) >> PAGE_SHIFT
,
2406 * If it was request by a certain node we would like to
2407 * return it to that node, i.e. its pool for later reuse.
2409 vn
= is_vn_id_valid(vn_id
) ?
2410 id_to_node(vn_id
):addr_to_node(va
->va_start
);
2412 spin_lock(&vn
->lazy
.lock
);
2413 insert_vmap_area(va
, &vn
->lazy
.root
, &vn
->lazy
.head
);
2414 spin_unlock(&vn
->lazy
.lock
);
2416 trace_free_vmap_area_noflush(va_start
, nr_lazy
, nr_lazy_max
);
2418 /* After this point, we may free va at any time */
2419 if (unlikely(nr_lazy
> nr_lazy_max
))
2420 schedule_work(&drain_vmap_work
);
2424 * Free and unmap a vmap area
2426 static void free_unmap_vmap_area(struct vmap_area
*va
)
2428 flush_cache_vunmap(va
->va_start
, va
->va_end
);
2429 vunmap_range_noflush(va
->va_start
, va
->va_end
);
2430 if (debug_pagealloc_enabled_static())
2431 flush_tlb_kernel_range(va
->va_start
, va
->va_end
);
2433 free_vmap_area_noflush(va
);
2436 struct vmap_area
*find_vmap_area(unsigned long addr
)
2438 struct vmap_node
*vn
;
2439 struct vmap_area
*va
;
2442 if (unlikely(!vmap_initialized
))
2446 * An addr_to_node_id(addr) converts an address to a node index
2447 * where a VA is located. If VA spans several zones and passed
2448 * addr is not the same as va->va_start, what is not common, we
2449 * may need to scan extra nodes. See an example:
2452 * -|-----|-----|-----|-----|-
2455 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed
2456 * addr is within 2 or 0 nodes we should do extra work.
2458 i
= j
= addr_to_node_id(addr
);
2460 vn
= &vmap_nodes
[i
];
2462 spin_lock(&vn
->busy
.lock
);
2463 va
= __find_vmap_area(addr
, &vn
->busy
.root
);
2464 spin_unlock(&vn
->busy
.lock
);
2468 } while ((i
= (i
+ nr_vmap_nodes
- 1) % nr_vmap_nodes
) != j
);
2473 static struct vmap_area
*find_unlink_vmap_area(unsigned long addr
)
2475 struct vmap_node
*vn
;
2476 struct vmap_area
*va
;
2480 * Check the comment in the find_vmap_area() about the loop.
2482 i
= j
= addr_to_node_id(addr
);
2484 vn
= &vmap_nodes
[i
];
2486 spin_lock(&vn
->busy
.lock
);
2487 va
= __find_vmap_area(addr
, &vn
->busy
.root
);
2489 unlink_va(va
, &vn
->busy
.root
);
2490 spin_unlock(&vn
->busy
.lock
);
2494 } while ((i
= (i
+ nr_vmap_nodes
- 1) % nr_vmap_nodes
) != j
);
2499 /*** Per cpu kva allocator ***/
2502 * vmap space is limited especially on 32 bit architectures. Ensure there is
2503 * room for at least 16 percpu vmap blocks per CPU.
2506 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
2507 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
2508 * instead (we just need a rough idea)
2510 #if BITS_PER_LONG == 32
2511 #define VMALLOC_SPACE (128UL*1024*1024)
2513 #define VMALLOC_SPACE (128UL*1024*1024*1024)
2516 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
2517 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
2518 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
2519 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
2520 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
2521 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
2522 #define VMAP_BBMAP_BITS \
2523 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
2524 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
2525 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
2527 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
2530 * Purge threshold to prevent overeager purging of fragmented blocks for
2531 * regular operations: Purge if vb->free is less than 1/4 of the capacity.
2533 #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4)
2535 #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
2536 #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
2537 #define VMAP_FLAGS_MASK 0x3
2539 struct vmap_block_queue
{
2541 struct list_head free
;
2544 * An xarray requires an extra memory dynamically to
2545 * be allocated. If it is an issue, we can use rb-tree
2548 struct xarray vmap_blocks
;
2553 struct vmap_area
*va
;
2554 unsigned long free
, dirty
;
2555 DECLARE_BITMAP(used_map
, VMAP_BBMAP_BITS
);
2556 unsigned long dirty_min
, dirty_max
; /*< dirty range */
2557 struct list_head free_list
;
2558 struct rcu_head rcu_head
;
2559 struct list_head purge
;
2563 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
2564 static DEFINE_PER_CPU(struct vmap_block_queue
, vmap_block_queue
);
2567 * In order to fast access to any "vmap_block" associated with a
2568 * specific address, we use a hash.
2570 * A per-cpu vmap_block_queue is used in both ways, to serialize
2571 * an access to free block chains among CPUs(alloc path) and it
2572 * also acts as a vmap_block hash(alloc/free paths). It means we
2573 * overload it, since we already have the per-cpu array which is
2574 * used as a hash table. When used as a hash a 'cpu' passed to
2575 * per_cpu() is not actually a CPU but rather a hash index.
2577 * A hash function is addr_to_vb_xa() which hashes any address
2578 * to a specific index(in a hash) it belongs to. This then uses a
2579 * per_cpu() macro to access an array with generated index.
2586 * 0 10 20 30 40 50 60
2587 * |------|------|------|------|------|------|...<vmap address space>
2588 * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2
2590 * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
2591 * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
2593 * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
2594 * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
2596 * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
2597 * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
2599 * This technique almost always avoids lock contention on insert/remove,
2600 * however xarray spinlocks protect against any contention that remains.
2602 static struct xarray
*
2603 addr_to_vb_xa(unsigned long addr
)
2605 int index
= (addr
/ VMAP_BLOCK_SIZE
) % nr_cpu_ids
;
2608 * Please note, nr_cpu_ids points on a highest set
2609 * possible bit, i.e. we never invoke cpumask_next()
2610 * if an index points on it which is nr_cpu_ids - 1.
2612 if (!cpu_possible(index
))
2613 index
= cpumask_next(index
, cpu_possible_mask
);
2615 return &per_cpu(vmap_block_queue
, index
).vmap_blocks
;
2619 * We should probably have a fallback mechanism to allocate virtual memory
2620 * out of partially filled vmap blocks. However vmap block sizing should be
2621 * fairly reasonable according to the vmalloc size, so it shouldn't be a
2625 static unsigned long addr_to_vb_idx(unsigned long addr
)
2627 addr
-= VMALLOC_START
& ~(VMAP_BLOCK_SIZE
-1);
2628 addr
/= VMAP_BLOCK_SIZE
;
2632 static void *vmap_block_vaddr(unsigned long va_start
, unsigned long pages_off
)
2636 addr
= va_start
+ (pages_off
<< PAGE_SHIFT
);
2637 BUG_ON(addr_to_vb_idx(addr
) != addr_to_vb_idx(va_start
));
2638 return (void *)addr
;
2642 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2643 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
2644 * @order: how many 2^order pages should be occupied in newly allocated block
2645 * @gfp_mask: flags for the page level allocator
2647 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2649 static void *new_vmap_block(unsigned int order
, gfp_t gfp_mask
)
2651 struct vmap_block_queue
*vbq
;
2652 struct vmap_block
*vb
;
2653 struct vmap_area
*va
;
2655 unsigned long vb_idx
;
2659 node
= numa_node_id();
2661 vb
= kmalloc_node(sizeof(struct vmap_block
),
2662 gfp_mask
& GFP_RECLAIM_MASK
, node
);
2664 return ERR_PTR(-ENOMEM
);
2666 va
= alloc_vmap_area(VMAP_BLOCK_SIZE
, VMAP_BLOCK_SIZE
,
2667 VMALLOC_START
, VMALLOC_END
,
2669 VMAP_RAM
|VMAP_BLOCK
, NULL
);
2672 return ERR_CAST(va
);
2675 vaddr
= vmap_block_vaddr(va
->va_start
, 0);
2676 spin_lock_init(&vb
->lock
);
2678 /* At least something should be left free */
2679 BUG_ON(VMAP_BBMAP_BITS
<= (1UL << order
));
2680 bitmap_zero(vb
->used_map
, VMAP_BBMAP_BITS
);
2681 vb
->free
= VMAP_BBMAP_BITS
- (1UL << order
);
2683 vb
->dirty_min
= VMAP_BBMAP_BITS
;
2685 bitmap_set(vb
->used_map
, 0, (1UL << order
));
2686 INIT_LIST_HEAD(&vb
->free_list
);
2687 vb
->cpu
= raw_smp_processor_id();
2689 xa
= addr_to_vb_xa(va
->va_start
);
2690 vb_idx
= addr_to_vb_idx(va
->va_start
);
2691 err
= xa_insert(xa
, vb_idx
, vb
, gfp_mask
);
2695 return ERR_PTR(err
);
2698 * list_add_tail_rcu could happened in another core
2699 * rather than vb->cpu due to task migration, which
2700 * is safe as list_add_tail_rcu will ensure the list's
2701 * integrity together with list_for_each_rcu from read
2704 vbq
= per_cpu_ptr(&vmap_block_queue
, vb
->cpu
);
2705 spin_lock(&vbq
->lock
);
2706 list_add_tail_rcu(&vb
->free_list
, &vbq
->free
);
2707 spin_unlock(&vbq
->lock
);
2712 static void free_vmap_block(struct vmap_block
*vb
)
2714 struct vmap_node
*vn
;
2715 struct vmap_block
*tmp
;
2718 xa
= addr_to_vb_xa(vb
->va
->va_start
);
2719 tmp
= xa_erase(xa
, addr_to_vb_idx(vb
->va
->va_start
));
2722 vn
= addr_to_node(vb
->va
->va_start
);
2723 spin_lock(&vn
->busy
.lock
);
2724 unlink_va(vb
->va
, &vn
->busy
.root
);
2725 spin_unlock(&vn
->busy
.lock
);
2727 free_vmap_area_noflush(vb
->va
);
2728 kfree_rcu(vb
, rcu_head
);
2731 static bool purge_fragmented_block(struct vmap_block
*vb
,
2732 struct list_head
*purge_list
, bool force_purge
)
2734 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, vb
->cpu
);
2736 if (vb
->free
+ vb
->dirty
!= VMAP_BBMAP_BITS
||
2737 vb
->dirty
== VMAP_BBMAP_BITS
)
2740 /* Don't overeagerly purge usable blocks unless requested */
2741 if (!(force_purge
|| vb
->free
< VMAP_PURGE_THRESHOLD
))
2744 /* prevent further allocs after releasing lock */
2745 WRITE_ONCE(vb
->free
, 0);
2746 /* prevent purging it again */
2747 WRITE_ONCE(vb
->dirty
, VMAP_BBMAP_BITS
);
2749 vb
->dirty_max
= VMAP_BBMAP_BITS
;
2750 spin_lock(&vbq
->lock
);
2751 list_del_rcu(&vb
->free_list
);
2752 spin_unlock(&vbq
->lock
);
2753 list_add_tail(&vb
->purge
, purge_list
);
2757 static void free_purged_blocks(struct list_head
*purge_list
)
2759 struct vmap_block
*vb
, *n_vb
;
2761 list_for_each_entry_safe(vb
, n_vb
, purge_list
, purge
) {
2762 list_del(&vb
->purge
);
2763 free_vmap_block(vb
);
2767 static void purge_fragmented_blocks(int cpu
)
2770 struct vmap_block
*vb
;
2771 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
2774 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
2775 unsigned long free
= READ_ONCE(vb
->free
);
2776 unsigned long dirty
= READ_ONCE(vb
->dirty
);
2778 if (free
+ dirty
!= VMAP_BBMAP_BITS
||
2779 dirty
== VMAP_BBMAP_BITS
)
2782 spin_lock(&vb
->lock
);
2783 purge_fragmented_block(vb
, &purge
, true);
2784 spin_unlock(&vb
->lock
);
2787 free_purged_blocks(&purge
);
2790 static void purge_fragmented_blocks_allcpus(void)
2794 for_each_possible_cpu(cpu
)
2795 purge_fragmented_blocks(cpu
);
2798 static void *vb_alloc(unsigned long size
, gfp_t gfp_mask
)
2800 struct vmap_block_queue
*vbq
;
2801 struct vmap_block
*vb
;
2805 BUG_ON(offset_in_page(size
));
2806 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
2807 if (WARN_ON(size
== 0)) {
2809 * Allocating 0 bytes isn't what caller wants since
2810 * get_order(0) returns funny result. Just warn and terminate
2813 return ERR_PTR(-EINVAL
);
2815 order
= get_order(size
);
2818 vbq
= raw_cpu_ptr(&vmap_block_queue
);
2819 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
2820 unsigned long pages_off
;
2822 if (READ_ONCE(vb
->free
) < (1UL << order
))
2825 spin_lock(&vb
->lock
);
2826 if (vb
->free
< (1UL << order
)) {
2827 spin_unlock(&vb
->lock
);
2831 pages_off
= VMAP_BBMAP_BITS
- vb
->free
;
2832 vaddr
= vmap_block_vaddr(vb
->va
->va_start
, pages_off
);
2833 WRITE_ONCE(vb
->free
, vb
->free
- (1UL << order
));
2834 bitmap_set(vb
->used_map
, pages_off
, (1UL << order
));
2835 if (vb
->free
== 0) {
2836 spin_lock(&vbq
->lock
);
2837 list_del_rcu(&vb
->free_list
);
2838 spin_unlock(&vbq
->lock
);
2841 spin_unlock(&vb
->lock
);
2847 /* Allocate new block if nothing was found */
2849 vaddr
= new_vmap_block(order
, gfp_mask
);
2854 static void vb_free(unsigned long addr
, unsigned long size
)
2856 unsigned long offset
;
2858 struct vmap_block
*vb
;
2861 BUG_ON(offset_in_page(size
));
2862 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
2864 flush_cache_vunmap(addr
, addr
+ size
);
2866 order
= get_order(size
);
2867 offset
= (addr
& (VMAP_BLOCK_SIZE
- 1)) >> PAGE_SHIFT
;
2869 xa
= addr_to_vb_xa(addr
);
2870 vb
= xa_load(xa
, addr_to_vb_idx(addr
));
2872 spin_lock(&vb
->lock
);
2873 bitmap_clear(vb
->used_map
, offset
, (1UL << order
));
2874 spin_unlock(&vb
->lock
);
2876 vunmap_range_noflush(addr
, addr
+ size
);
2878 if (debug_pagealloc_enabled_static())
2879 flush_tlb_kernel_range(addr
, addr
+ size
);
2881 spin_lock(&vb
->lock
);
2883 /* Expand the not yet TLB flushed dirty range */
2884 vb
->dirty_min
= min(vb
->dirty_min
, offset
);
2885 vb
->dirty_max
= max(vb
->dirty_max
, offset
+ (1UL << order
));
2887 WRITE_ONCE(vb
->dirty
, vb
->dirty
+ (1UL << order
));
2888 if (vb
->dirty
== VMAP_BBMAP_BITS
) {
2890 spin_unlock(&vb
->lock
);
2891 free_vmap_block(vb
);
2893 spin_unlock(&vb
->lock
);
2896 static void _vm_unmap_aliases(unsigned long start
, unsigned long end
, int flush
)
2898 LIST_HEAD(purge_list
);
2901 if (unlikely(!vmap_initialized
))
2904 mutex_lock(&vmap_purge_lock
);
2906 for_each_possible_cpu(cpu
) {
2907 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
2908 struct vmap_block
*vb
;
2912 xa_for_each(&vbq
->vmap_blocks
, idx
, vb
) {
2913 spin_lock(&vb
->lock
);
2916 * Try to purge a fragmented block first. If it's
2917 * not purgeable, check whether there is dirty
2918 * space to be flushed.
2920 if (!purge_fragmented_block(vb
, &purge_list
, false) &&
2921 vb
->dirty_max
&& vb
->dirty
!= VMAP_BBMAP_BITS
) {
2922 unsigned long va_start
= vb
->va
->va_start
;
2925 s
= va_start
+ (vb
->dirty_min
<< PAGE_SHIFT
);
2926 e
= va_start
+ (vb
->dirty_max
<< PAGE_SHIFT
);
2928 start
= min(s
, start
);
2931 /* Prevent that this is flushed again */
2932 vb
->dirty_min
= VMAP_BBMAP_BITS
;
2937 spin_unlock(&vb
->lock
);
2941 free_purged_blocks(&purge_list
);
2943 if (!__purge_vmap_area_lazy(start
, end
, false) && flush
)
2944 flush_tlb_kernel_range(start
, end
);
2945 mutex_unlock(&vmap_purge_lock
);
2949 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2951 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2952 * to amortize TLB flushing overheads. What this means is that any page you
2953 * have now, may, in a former life, have been mapped into kernel virtual
2954 * address by the vmap layer and so there might be some CPUs with TLB entries
2955 * still referencing that page (additional to the regular 1:1 kernel mapping).
2957 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2958 * be sure that none of the pages we have control over will have any aliases
2959 * from the vmap layer.
2961 void vm_unmap_aliases(void)
2963 _vm_unmap_aliases(ULONG_MAX
, 0, 0);
2965 EXPORT_SYMBOL_GPL(vm_unmap_aliases
);
2968 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2969 * @mem: the pointer returned by vm_map_ram
2970 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2972 void vm_unmap_ram(const void *mem
, unsigned int count
)
2974 unsigned long size
= (unsigned long)count
<< PAGE_SHIFT
;
2975 unsigned long addr
= (unsigned long)kasan_reset_tag(mem
);
2976 struct vmap_area
*va
;
2980 BUG_ON(addr
< VMALLOC_START
);
2981 BUG_ON(addr
> VMALLOC_END
);
2982 BUG_ON(!PAGE_ALIGNED(addr
));
2984 kasan_poison_vmalloc(mem
, size
);
2986 if (likely(count
<= VMAP_MAX_ALLOC
)) {
2987 debug_check_no_locks_freed(mem
, size
);
2988 vb_free(addr
, size
);
2992 va
= find_unlink_vmap_area(addr
);
2993 if (WARN_ON_ONCE(!va
))
2996 debug_check_no_locks_freed((void *)va
->va_start
, va_size(va
));
2997 free_unmap_vmap_area(va
);
2999 EXPORT_SYMBOL(vm_unmap_ram
);
3002 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
3003 * @pages: an array of pointers to the pages to be mapped
3004 * @count: number of pages
3005 * @node: prefer to allocate data structures on this node
3007 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
3008 * faster than vmap so it's good. But if you mix long-life and short-life
3009 * objects with vm_map_ram(), it could consume lots of address space through
3010 * fragmentation (especially on a 32bit machine). You could see failures in
3011 * the end. Please use this function for short-lived objects.
3013 * Returns: a pointer to the address that has been mapped, or %NULL on failure
3015 void *vm_map_ram(struct page
**pages
, unsigned int count
, int node
)
3017 unsigned long size
= (unsigned long)count
<< PAGE_SHIFT
;
3021 if (likely(count
<= VMAP_MAX_ALLOC
)) {
3022 mem
= vb_alloc(size
, GFP_KERNEL
);
3025 addr
= (unsigned long)mem
;
3027 struct vmap_area
*va
;
3028 va
= alloc_vmap_area(size
, PAGE_SIZE
,
3029 VMALLOC_START
, VMALLOC_END
,
3030 node
, GFP_KERNEL
, VMAP_RAM
,
3035 addr
= va
->va_start
;
3039 if (vmap_pages_range(addr
, addr
+ size
, PAGE_KERNEL
,
3040 pages
, PAGE_SHIFT
) < 0) {
3041 vm_unmap_ram(mem
, count
);
3046 * Mark the pages as accessible, now that they are mapped.
3047 * With hardware tag-based KASAN, marking is skipped for
3048 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3050 mem
= kasan_unpoison_vmalloc(mem
, size
, KASAN_VMALLOC_PROT_NORMAL
);
3054 EXPORT_SYMBOL(vm_map_ram
);
3056 static struct vm_struct
*vmlist __initdata
;
3058 static inline unsigned int vm_area_page_order(struct vm_struct
*vm
)
3060 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
3061 return vm
->page_order
;
3067 unsigned int get_vm_area_page_order(struct vm_struct
*vm
)
3069 return vm_area_page_order(vm
);
3072 static inline void set_vm_area_page_order(struct vm_struct
*vm
, unsigned int order
)
3074 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
3075 vm
->page_order
= order
;
3082 * vm_area_add_early - add vmap area early during boot
3083 * @vm: vm_struct to add
3085 * This function is used to add fixed kernel vm area to vmlist before
3086 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
3087 * should contain proper values and the other fields should be zero.
3089 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3091 void __init
vm_area_add_early(struct vm_struct
*vm
)
3093 struct vm_struct
*tmp
, **p
;
3095 BUG_ON(vmap_initialized
);
3096 for (p
= &vmlist
; (tmp
= *p
) != NULL
; p
= &tmp
->next
) {
3097 if (tmp
->addr
>= vm
->addr
) {
3098 BUG_ON(tmp
->addr
< vm
->addr
+ vm
->size
);
3101 BUG_ON(tmp
->addr
+ tmp
->size
> vm
->addr
);
3108 * vm_area_register_early - register vmap area early during boot
3109 * @vm: vm_struct to register
3110 * @align: requested alignment
3112 * This function is used to register kernel vm area before
3113 * vmalloc_init() is called. @vm->size and @vm->flags should contain
3114 * proper values on entry and other fields should be zero. On return,
3115 * vm->addr contains the allocated address.
3117 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3119 void __init
vm_area_register_early(struct vm_struct
*vm
, size_t align
)
3121 unsigned long addr
= ALIGN(VMALLOC_START
, align
);
3122 struct vm_struct
*cur
, **p
;
3124 BUG_ON(vmap_initialized
);
3126 for (p
= &vmlist
; (cur
= *p
) != NULL
; p
= &cur
->next
) {
3127 if ((unsigned long)cur
->addr
- addr
>= vm
->size
)
3129 addr
= ALIGN((unsigned long)cur
->addr
+ cur
->size
, align
);
3132 BUG_ON(addr
> VMALLOC_END
- vm
->size
);
3133 vm
->addr
= (void *)addr
;
3136 kasan_populate_early_vm_area_shadow(vm
->addr
, vm
->size
);
3139 static void clear_vm_uninitialized_flag(struct vm_struct
*vm
)
3142 * Before removing VM_UNINITIALIZED,
3143 * we should make sure that vm has proper values.
3144 * Pair with smp_rmb() in vread_iter() and vmalloc_info_show().
3147 vm
->flags
&= ~VM_UNINITIALIZED
;
3150 struct vm_struct
*__get_vm_area_node(unsigned long size
,
3151 unsigned long align
, unsigned long shift
, unsigned long flags
,
3152 unsigned long start
, unsigned long end
, int node
,
3153 gfp_t gfp_mask
, const void *caller
)
3155 struct vmap_area
*va
;
3156 struct vm_struct
*area
;
3157 unsigned long requested_size
= size
;
3159 BUG_ON(in_interrupt());
3160 size
= ALIGN(size
, 1ul << shift
);
3161 if (unlikely(!size
))
3164 if (flags
& VM_IOREMAP
)
3165 align
= 1ul << clamp_t(int, get_count_order_long(size
),
3166 PAGE_SHIFT
, IOREMAP_MAX_ORDER
);
3168 area
= kzalloc_node(sizeof(*area
), gfp_mask
& GFP_RECLAIM_MASK
, node
);
3169 if (unlikely(!area
))
3172 if (!(flags
& VM_NO_GUARD
))
3175 area
->flags
= flags
;
3176 area
->caller
= caller
;
3177 area
->requested_size
= requested_size
;
3179 va
= alloc_vmap_area(size
, align
, start
, end
, node
, gfp_mask
, 0, area
);
3186 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
3187 * best-effort approach, as they can be mapped outside of vmalloc code.
3188 * For VM_ALLOC mappings, the pages are marked as accessible after
3189 * getting mapped in __vmalloc_node_range().
3190 * With hardware tag-based KASAN, marking is skipped for
3191 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3193 if (!(flags
& VM_ALLOC
))
3194 area
->addr
= kasan_unpoison_vmalloc(area
->addr
, requested_size
,
3195 KASAN_VMALLOC_PROT_NORMAL
);
3200 struct vm_struct
*__get_vm_area_caller(unsigned long size
, unsigned long flags
,
3201 unsigned long start
, unsigned long end
,
3204 return __get_vm_area_node(size
, 1, PAGE_SHIFT
, flags
, start
, end
,
3205 NUMA_NO_NODE
, GFP_KERNEL
, caller
);
3209 * get_vm_area - reserve a contiguous kernel virtual area
3210 * @size: size of the area
3211 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
3213 * Search an area of @size in the kernel virtual mapping area,
3214 * and reserved it for out purposes. Returns the area descriptor
3215 * on success or %NULL on failure.
3217 * Return: the area descriptor on success or %NULL on failure.
3219 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
3221 return __get_vm_area_node(size
, 1, PAGE_SHIFT
, flags
,
3222 VMALLOC_START
, VMALLOC_END
,
3223 NUMA_NO_NODE
, GFP_KERNEL
,
3224 __builtin_return_address(0));
3227 struct vm_struct
*get_vm_area_caller(unsigned long size
, unsigned long flags
,
3230 return __get_vm_area_node(size
, 1, PAGE_SHIFT
, flags
,
3231 VMALLOC_START
, VMALLOC_END
,
3232 NUMA_NO_NODE
, GFP_KERNEL
, caller
);
3236 * find_vm_area - find a continuous kernel virtual area
3237 * @addr: base address
3239 * Search for the kernel VM area starting at @addr, and return it.
3240 * It is up to the caller to do all required locking to keep the returned
3243 * Return: the area descriptor on success or %NULL on failure.
3245 struct vm_struct
*find_vm_area(const void *addr
)
3247 struct vmap_area
*va
;
3249 va
= find_vmap_area((unsigned long)addr
);
3257 * remove_vm_area - find and remove a continuous kernel virtual area
3258 * @addr: base address
3260 * Search for the kernel VM area starting at @addr, and remove it.
3261 * This function returns the found VM area, but using it is NOT safe
3262 * on SMP machines, except for its size or flags.
3264 * Return: the area descriptor on success or %NULL on failure.
3266 struct vm_struct
*remove_vm_area(const void *addr
)
3268 struct vmap_area
*va
;
3269 struct vm_struct
*vm
;
3273 if (WARN(!PAGE_ALIGNED(addr
), "Trying to vfree() bad address (%p)\n",
3277 va
= find_unlink_vmap_area((unsigned long)addr
);
3282 debug_check_no_locks_freed(vm
->addr
, get_vm_area_size(vm
));
3283 debug_check_no_obj_freed(vm
->addr
, get_vm_area_size(vm
));
3284 kasan_free_module_shadow(vm
);
3285 kasan_poison_vmalloc(vm
->addr
, get_vm_area_size(vm
));
3287 free_unmap_vmap_area(va
);
3291 static inline void set_area_direct_map(const struct vm_struct
*area
,
3292 int (*set_direct_map
)(struct page
*page
))
3296 /* HUGE_VMALLOC passes small pages to set_direct_map */
3297 for (i
= 0; i
< area
->nr_pages
; i
++)
3298 if (page_address(area
->pages
[i
]))
3299 set_direct_map(area
->pages
[i
]);
3303 * Flush the vm mapping and reset the direct map.
3305 static void vm_reset_perms(struct vm_struct
*area
)
3307 unsigned long start
= ULONG_MAX
, end
= 0;
3308 unsigned int page_order
= vm_area_page_order(area
);
3313 * Find the start and end range of the direct mappings to make sure that
3314 * the vm_unmap_aliases() flush includes the direct map.
3316 for (i
= 0; i
< area
->nr_pages
; i
+= 1U << page_order
) {
3317 unsigned long addr
= (unsigned long)page_address(area
->pages
[i
]);
3320 unsigned long page_size
;
3322 page_size
= PAGE_SIZE
<< page_order
;
3323 start
= min(addr
, start
);
3324 end
= max(addr
+ page_size
, end
);
3330 * Set direct map to something invalid so that it won't be cached if
3331 * there are any accesses after the TLB flush, then flush the TLB and
3332 * reset the direct map permissions to the default.
3334 set_area_direct_map(area
, set_direct_map_invalid_noflush
);
3335 _vm_unmap_aliases(start
, end
, flush_dmap
);
3336 set_area_direct_map(area
, set_direct_map_default_noflush
);
3339 static void delayed_vfree_work(struct work_struct
*w
)
3341 struct vfree_deferred
*p
= container_of(w
, struct vfree_deferred
, wq
);
3342 struct llist_node
*t
, *llnode
;
3344 llist_for_each_safe(llnode
, t
, llist_del_all(&p
->list
))
3349 * vfree_atomic - release memory allocated by vmalloc()
3350 * @addr: memory base address
3352 * This one is just like vfree() but can be called in any atomic context
3355 void vfree_atomic(const void *addr
)
3357 struct vfree_deferred
*p
= raw_cpu_ptr(&vfree_deferred
);
3360 kmemleak_free(addr
);
3363 * Use raw_cpu_ptr() because this can be called from preemptible
3364 * context. Preemption is absolutely fine here, because the llist_add()
3365 * implementation is lockless, so it works even if we are adding to
3366 * another cpu's list. schedule_work() should be fine with this too.
3368 if (addr
&& llist_add((struct llist_node
*)addr
, &p
->list
))
3369 schedule_work(&p
->wq
);
3373 * vfree - Release memory allocated by vmalloc()
3374 * @addr: Memory base address
3376 * Free the virtually continuous memory area starting at @addr, as obtained
3377 * from one of the vmalloc() family of APIs. This will usually also free the
3378 * physical memory underlying the virtual allocation, but that memory is
3379 * reference counted, so it will not be freed until the last user goes away.
3381 * If @addr is NULL, no operation is performed.
3384 * May sleep if called *not* from interrupt context.
3385 * Must not be called in NMI context (strictly speaking, it could be
3386 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
3387 * conventions for vfree() arch-dependent would be a really bad idea).
3389 void vfree(const void *addr
)
3391 struct vm_struct
*vm
;
3394 if (unlikely(in_interrupt())) {
3400 kmemleak_free(addr
);
3406 vm
= remove_vm_area(addr
);
3407 if (unlikely(!vm
)) {
3408 WARN(1, KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
3413 if (unlikely(vm
->flags
& VM_FLUSH_RESET_PERMS
))
3415 /* All pages of vm should be charged to same memcg, so use first one. */
3416 if (vm
->nr_pages
&& !(vm
->flags
& VM_MAP_PUT_PAGES
))
3417 mod_memcg_page_state(vm
->pages
[0], MEMCG_VMALLOC
, -vm
->nr_pages
);
3418 for (i
= 0; i
< vm
->nr_pages
; i
++) {
3419 struct page
*page
= vm
->pages
[i
];
3423 * High-order allocs for huge vmallocs are split, so
3424 * can be freed as an array of order-0 allocations
3429 if (!(vm
->flags
& VM_MAP_PUT_PAGES
))
3430 atomic_long_sub(vm
->nr_pages
, &nr_vmalloc_pages
);
3434 EXPORT_SYMBOL(vfree
);
3437 * vunmap - release virtual mapping obtained by vmap()
3438 * @addr: memory base address
3440 * Free the virtually contiguous memory area starting at @addr,
3441 * which was created from the page array passed to vmap().
3443 * Must not be called in interrupt context.
3445 void vunmap(const void *addr
)
3447 struct vm_struct
*vm
;
3449 BUG_ON(in_interrupt());
3454 vm
= remove_vm_area(addr
);
3455 if (unlikely(!vm
)) {
3456 WARN(1, KERN_ERR
"Trying to vunmap() nonexistent vm area (%p)\n",
3462 EXPORT_SYMBOL(vunmap
);
3465 * vmap - map an array of pages into virtually contiguous space
3466 * @pages: array of page pointers
3467 * @count: number of pages to map
3468 * @flags: vm_area->flags
3469 * @prot: page protection for the mapping
3471 * Maps @count pages from @pages into contiguous kernel virtual space.
3472 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
3473 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
3474 * are transferred from the caller to vmap(), and will be freed / dropped when
3475 * vfree() is called on the return value.
3477 * Return: the address of the area or %NULL on failure
3479 void *vmap(struct page
**pages
, unsigned int count
,
3480 unsigned long flags
, pgprot_t prot
)
3482 struct vm_struct
*area
;
3484 unsigned long size
; /* In bytes */
3488 if (WARN_ON_ONCE(flags
& VM_FLUSH_RESET_PERMS
))
3492 * Your top guard is someone else's bottom guard. Not having a top
3493 * guard compromises someone else's mappings too.
3495 if (WARN_ON_ONCE(flags
& VM_NO_GUARD
))
3496 flags
&= ~VM_NO_GUARD
;
3498 if (count
> totalram_pages())
3501 size
= (unsigned long)count
<< PAGE_SHIFT
;
3502 area
= get_vm_area_caller(size
, flags
, __builtin_return_address(0));
3506 addr
= (unsigned long)area
->addr
;
3507 if (vmap_pages_range(addr
, addr
+ size
, pgprot_nx(prot
),
3508 pages
, PAGE_SHIFT
) < 0) {
3513 if (flags
& VM_MAP_PUT_PAGES
) {
3514 area
->pages
= pages
;
3515 area
->nr_pages
= count
;
3519 EXPORT_SYMBOL(vmap
);
3521 #ifdef CONFIG_VMAP_PFN
3522 struct vmap_pfn_data
{
3523 unsigned long *pfns
;
3528 static int vmap_pfn_apply(pte_t
*pte
, unsigned long addr
, void *private)
3530 struct vmap_pfn_data
*data
= private;
3531 unsigned long pfn
= data
->pfns
[data
->idx
];
3534 if (WARN_ON_ONCE(pfn_valid(pfn
)))
3537 ptent
= pte_mkspecial(pfn_pte(pfn
, data
->prot
));
3538 set_pte_at(&init_mm
, addr
, pte
, ptent
);
3545 * vmap_pfn - map an array of PFNs into virtually contiguous space
3546 * @pfns: array of PFNs
3547 * @count: number of pages to map
3548 * @prot: page protection for the mapping
3550 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
3551 * the start address of the mapping.
3553 void *vmap_pfn(unsigned long *pfns
, unsigned int count
, pgprot_t prot
)
3555 struct vmap_pfn_data data
= { .pfns
= pfns
, .prot
= pgprot_nx(prot
) };
3556 struct vm_struct
*area
;
3558 area
= get_vm_area_caller(count
* PAGE_SIZE
, VM_IOREMAP
,
3559 __builtin_return_address(0));
3562 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
3563 count
* PAGE_SIZE
, vmap_pfn_apply
, &data
)) {
3568 flush_cache_vmap((unsigned long)area
->addr
,
3569 (unsigned long)area
->addr
+ count
* PAGE_SIZE
);
3573 EXPORT_SYMBOL_GPL(vmap_pfn
);
3574 #endif /* CONFIG_VMAP_PFN */
3576 static inline unsigned int
3577 vm_area_alloc_pages(gfp_t gfp
, int nid
,
3578 unsigned int order
, unsigned int nr_pages
, struct page
**pages
)
3580 unsigned int nr_allocated
= 0;
3585 * For order-0 pages we make use of bulk allocator, if
3586 * the page array is partly or not at all populated due
3587 * to fails, fallback to a single page allocator that is
3591 while (nr_allocated
< nr_pages
) {
3592 unsigned int nr
, nr_pages_request
;
3595 * A maximum allowed request is hard-coded and is 100
3596 * pages per call. That is done in order to prevent a
3597 * long preemption off scenario in the bulk-allocator
3598 * so the range is [1:100].
3600 nr_pages_request
= min(100U, nr_pages
- nr_allocated
);
3602 /* memory allocation should consider mempolicy, we can't
3603 * wrongly use nearest node when nid == NUMA_NO_NODE,
3604 * otherwise memory may be allocated in only one node,
3605 * but mempolicy wants to alloc memory by interleaving.
3607 if (IS_ENABLED(CONFIG_NUMA
) && nid
== NUMA_NO_NODE
)
3608 nr
= alloc_pages_bulk_mempolicy_noprof(gfp
,
3610 pages
+ nr_allocated
);
3612 nr
= alloc_pages_bulk_node_noprof(gfp
, nid
,
3614 pages
+ nr_allocated
);
3620 * If zero or pages were obtained partly,
3621 * fallback to a single page allocator.
3623 if (nr
!= nr_pages_request
)
3628 /* High-order pages or fallback path if "bulk" fails. */
3629 while (nr_allocated
< nr_pages
) {
3630 if (!(gfp
& __GFP_NOFAIL
) && fatal_signal_pending(current
))
3633 if (nid
== NUMA_NO_NODE
)
3634 page
= alloc_pages_noprof(gfp
, order
);
3636 page
= alloc_pages_node_noprof(nid
, gfp
, order
);
3638 if (unlikely(!page
))
3642 * High-order allocations must be able to be treated as
3643 * independent small pages by callers (as they can with
3644 * small-page vmallocs). Some drivers do their own refcounting
3645 * on vmalloc_to_page() pages, some use page->mapping,
3649 split_page(page
, order
);
3652 * Careful, we allocate and map page-order pages, but
3653 * tracking is done per PAGE_SIZE page so as to keep the
3654 * vm_struct APIs independent of the physical/mapped size.
3656 for (i
= 0; i
< (1U << order
); i
++)
3657 pages
[nr_allocated
+ i
] = page
+ i
;
3660 nr_allocated
+= 1U << order
;
3663 return nr_allocated
;
3666 static void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
3667 pgprot_t prot
, unsigned int page_shift
,
3670 const gfp_t nested_gfp
= (gfp_mask
& GFP_RECLAIM_MASK
) | __GFP_ZERO
;
3671 bool nofail
= gfp_mask
& __GFP_NOFAIL
;
3672 unsigned long addr
= (unsigned long)area
->addr
;
3673 unsigned long size
= get_vm_area_size(area
);
3674 unsigned long array_size
;
3675 unsigned int nr_small_pages
= size
>> PAGE_SHIFT
;
3676 unsigned int page_order
;
3680 array_size
= (unsigned long)nr_small_pages
* sizeof(struct page
*);
3682 if (!(gfp_mask
& (GFP_DMA
| GFP_DMA32
)))
3683 gfp_mask
|= __GFP_HIGHMEM
;
3685 /* Please note that the recursion is strictly bounded. */
3686 if (array_size
> PAGE_SIZE
) {
3687 area
->pages
= __vmalloc_node_noprof(array_size
, 1, nested_gfp
, node
,
3690 area
->pages
= kmalloc_node_noprof(array_size
, nested_gfp
, node
);
3694 warn_alloc(gfp_mask
, NULL
,
3695 "vmalloc error: size %lu, failed to allocated page array size %lu",
3696 nr_small_pages
* PAGE_SIZE
, array_size
);
3701 set_vm_area_page_order(area
, page_shift
- PAGE_SHIFT
);
3702 page_order
= vm_area_page_order(area
);
3705 * High-order nofail allocations are really expensive and
3706 * potentially dangerous (pre-mature OOM, disruptive reclaim
3707 * and compaction etc.
3709 * Please note, the __vmalloc_node_range_noprof() falls-back
3710 * to order-0 pages if high-order attempt is unsuccessful.
3712 area
->nr_pages
= vm_area_alloc_pages((page_order
?
3713 gfp_mask
& ~__GFP_NOFAIL
: gfp_mask
) | __GFP_NOWARN
,
3714 node
, page_order
, nr_small_pages
, area
->pages
);
3716 atomic_long_add(area
->nr_pages
, &nr_vmalloc_pages
);
3717 /* All pages of vm should be charged to same memcg, so use first one. */
3718 if (gfp_mask
& __GFP_ACCOUNT
&& area
->nr_pages
)
3719 mod_memcg_page_state(area
->pages
[0], MEMCG_VMALLOC
,
3723 * If not enough pages were obtained to accomplish an
3724 * allocation request, free them via vfree() if any.
3726 if (area
->nr_pages
!= nr_small_pages
) {
3728 * vm_area_alloc_pages() can fail due to insufficient memory but
3731 * - a pending fatal signal
3732 * - insufficient huge page-order pages
3734 * Since we always retry allocations at order-0 in the huge page
3735 * case a warning for either is spurious.
3737 if (!fatal_signal_pending(current
) && page_order
== 0)
3738 warn_alloc(gfp_mask
, NULL
,
3739 "vmalloc error: size %lu, failed to allocate pages",
3740 area
->nr_pages
* PAGE_SIZE
);
3745 * page tables allocations ignore external gfp mask, enforce it
3748 if ((gfp_mask
& (__GFP_FS
| __GFP_IO
)) == __GFP_IO
)
3749 flags
= memalloc_nofs_save();
3750 else if ((gfp_mask
& (__GFP_FS
| __GFP_IO
)) == 0)
3751 flags
= memalloc_noio_save();
3754 ret
= vmap_pages_range(addr
, addr
+ size
, prot
, area
->pages
,
3756 if (nofail
&& (ret
< 0))
3757 schedule_timeout_uninterruptible(1);
3758 } while (nofail
&& (ret
< 0));
3760 if ((gfp_mask
& (__GFP_FS
| __GFP_IO
)) == __GFP_IO
)
3761 memalloc_nofs_restore(flags
);
3762 else if ((gfp_mask
& (__GFP_FS
| __GFP_IO
)) == 0)
3763 memalloc_noio_restore(flags
);
3766 warn_alloc(gfp_mask
, NULL
,
3767 "vmalloc error: size %lu, failed to map pages",
3768 area
->nr_pages
* PAGE_SIZE
);
3780 * __vmalloc_node_range - allocate virtually contiguous memory
3781 * @size: allocation size
3782 * @align: desired alignment
3783 * @start: vm area range start
3784 * @end: vm area range end
3785 * @gfp_mask: flags for the page level allocator
3786 * @prot: protection mask for the allocated pages
3787 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
3788 * @node: node to use for allocation or NUMA_NO_NODE
3789 * @caller: caller's return address
3791 * Allocate enough pages to cover @size from the page level
3792 * allocator with @gfp_mask flags. Please note that the full set of gfp
3793 * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
3795 * Zone modifiers are not supported. From the reclaim modifiers
3796 * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
3797 * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
3798 * __GFP_RETRY_MAYFAIL are not supported).
3800 * __GFP_NOWARN can be used to suppress failures messages.
3802 * Map them into contiguous kernel virtual space, using a pagetable
3803 * protection of @prot.
3805 * Return: the address of the area or %NULL on failure
3807 void *__vmalloc_node_range_noprof(unsigned long size
, unsigned long align
,
3808 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
3809 pgprot_t prot
, unsigned long vm_flags
, int node
,
3812 struct vm_struct
*area
;
3814 kasan_vmalloc_flags_t kasan_flags
= KASAN_VMALLOC_NONE
;
3815 unsigned long original_align
= align
;
3816 unsigned int shift
= PAGE_SHIFT
;
3818 if (WARN_ON_ONCE(!size
))
3821 if ((size
>> PAGE_SHIFT
) > totalram_pages()) {
3822 warn_alloc(gfp_mask
, NULL
,
3823 "vmalloc error: size %lu, exceeds total pages",
3828 if (vmap_allow_huge
&& (vm_flags
& VM_ALLOW_HUGE_VMAP
)) {
3830 * Try huge pages. Only try for PAGE_KERNEL allocations,
3831 * others like modules don't yet expect huge pages in
3832 * their allocations due to apply_to_page_range not
3836 if (arch_vmap_pmd_supported(prot
) && size
>= PMD_SIZE
)
3839 shift
= arch_vmap_pte_supported_shift(size
);
3841 align
= max(original_align
, 1UL << shift
);
3845 area
= __get_vm_area_node(size
, align
, shift
, VM_ALLOC
|
3846 VM_UNINITIALIZED
| vm_flags
, start
, end
, node
,
3849 bool nofail
= gfp_mask
& __GFP_NOFAIL
;
3850 warn_alloc(gfp_mask
, NULL
,
3851 "vmalloc error: size %lu, vm_struct allocation failed%s",
3852 size
, (nofail
) ? ". Retrying." : "");
3854 schedule_timeout_uninterruptible(1);
3861 * Prepare arguments for __vmalloc_area_node() and
3862 * kasan_unpoison_vmalloc().
3864 if (pgprot_val(prot
) == pgprot_val(PAGE_KERNEL
)) {
3865 if (kasan_hw_tags_enabled()) {
3867 * Modify protection bits to allow tagging.
3868 * This must be done before mapping.
3870 prot
= arch_vmap_pgprot_tagged(prot
);
3873 * Skip page_alloc poisoning and zeroing for physical
3874 * pages backing VM_ALLOC mapping. Memory is instead
3875 * poisoned and zeroed by kasan_unpoison_vmalloc().
3877 gfp_mask
|= __GFP_SKIP_KASAN
| __GFP_SKIP_ZERO
;
3880 /* Take note that the mapping is PAGE_KERNEL. */
3881 kasan_flags
|= KASAN_VMALLOC_PROT_NORMAL
;
3884 /* Allocate physical pages and map them into vmalloc space. */
3885 ret
= __vmalloc_area_node(area
, gfp_mask
, prot
, shift
, node
);
3890 * Mark the pages as accessible, now that they are mapped.
3891 * The condition for setting KASAN_VMALLOC_INIT should complement the
3892 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
3893 * to make sure that memory is initialized under the same conditions.
3894 * Tag-based KASAN modes only assign tags to normal non-executable
3895 * allocations, see __kasan_unpoison_vmalloc().
3897 kasan_flags
|= KASAN_VMALLOC_VM_ALLOC
;
3898 if (!want_init_on_free() && want_init_on_alloc(gfp_mask
) &&
3899 (gfp_mask
& __GFP_SKIP_ZERO
))
3900 kasan_flags
|= KASAN_VMALLOC_INIT
;
3901 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3902 area
->addr
= kasan_unpoison_vmalloc(area
->addr
, size
, kasan_flags
);
3905 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3906 * flag. It means that vm_struct is not fully initialized.
3907 * Now, it is fully initialized, so remove this flag here.
3909 clear_vm_uninitialized_flag(area
);
3911 if (!(vm_flags
& VM_DEFER_KMEMLEAK
))
3912 kmemleak_vmalloc(area
, PAGE_ALIGN(size
), gfp_mask
);
3917 if (shift
> PAGE_SHIFT
) {
3919 align
= original_align
;
3927 * __vmalloc_node - allocate virtually contiguous memory
3928 * @size: allocation size
3929 * @align: desired alignment
3930 * @gfp_mask: flags for the page level allocator
3931 * @node: node to use for allocation or NUMA_NO_NODE
3932 * @caller: caller's return address
3934 * Allocate enough pages to cover @size from the page level allocator with
3935 * @gfp_mask flags. Map them into contiguous kernel virtual space.
3937 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3938 * and __GFP_NOFAIL are not supported
3940 * Any use of gfp flags outside of GFP_KERNEL should be consulted
3943 * Return: pointer to the allocated memory or %NULL on error
3945 void *__vmalloc_node_noprof(unsigned long size
, unsigned long align
,
3946 gfp_t gfp_mask
, int node
, const void *caller
)
3948 return __vmalloc_node_range_noprof(size
, align
, VMALLOC_START
, VMALLOC_END
,
3949 gfp_mask
, PAGE_KERNEL
, 0, node
, caller
);
3952 * This is only for performance analysis of vmalloc and stress purpose.
3953 * It is required by vmalloc test module, therefore do not use it other
3956 #ifdef CONFIG_TEST_VMALLOC_MODULE
3957 EXPORT_SYMBOL_GPL(__vmalloc_node_noprof
);
3960 void *__vmalloc_noprof(unsigned long size
, gfp_t gfp_mask
)
3962 return __vmalloc_node_noprof(size
, 1, gfp_mask
, NUMA_NO_NODE
,
3963 __builtin_return_address(0));
3965 EXPORT_SYMBOL(__vmalloc_noprof
);
3968 * vmalloc - allocate virtually contiguous memory
3969 * @size: allocation size
3971 * Allocate enough pages to cover @size from the page level
3972 * allocator and map them into contiguous kernel virtual space.
3974 * For tight control over page level allocator and protection flags
3975 * use __vmalloc() instead.
3977 * Return: pointer to the allocated memory or %NULL on error
3979 void *vmalloc_noprof(unsigned long size
)
3981 return __vmalloc_node_noprof(size
, 1, GFP_KERNEL
, NUMA_NO_NODE
,
3982 __builtin_return_address(0));
3984 EXPORT_SYMBOL(vmalloc_noprof
);
3987 * vmalloc_huge_node - allocate virtually contiguous memory, allow huge pages
3988 * @size: allocation size
3989 * @gfp_mask: flags for the page level allocator
3990 * @node: node to use for allocation or NUMA_NO_NODE
3992 * Allocate enough pages to cover @size from the page level
3993 * allocator and map them into contiguous kernel virtual space.
3994 * If @size is greater than or equal to PMD_SIZE, allow using
3995 * huge pages for the memory
3997 * Return: pointer to the allocated memory or %NULL on error
3999 void *vmalloc_huge_node_noprof(unsigned long size
, gfp_t gfp_mask
, int node
)
4001 return __vmalloc_node_range_noprof(size
, 1, VMALLOC_START
, VMALLOC_END
,
4002 gfp_mask
, PAGE_KERNEL
, VM_ALLOW_HUGE_VMAP
,
4003 node
, __builtin_return_address(0));
4005 EXPORT_SYMBOL_GPL(vmalloc_huge_node_noprof
);
4008 * vzalloc - allocate virtually contiguous memory with zero fill
4009 * @size: allocation size
4011 * Allocate enough pages to cover @size from the page level
4012 * allocator and map them into contiguous kernel virtual space.
4013 * The memory allocated is set to zero.
4015 * For tight control over page level allocator and protection flags
4016 * use __vmalloc() instead.
4018 * Return: pointer to the allocated memory or %NULL on error
4020 void *vzalloc_noprof(unsigned long size
)
4022 return __vmalloc_node_noprof(size
, 1, GFP_KERNEL
| __GFP_ZERO
, NUMA_NO_NODE
,
4023 __builtin_return_address(0));
4025 EXPORT_SYMBOL(vzalloc_noprof
);
4028 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
4029 * @size: allocation size
4031 * The resulting memory area is zeroed so it can be mapped to userspace
4032 * without leaking data.
4034 * Return: pointer to the allocated memory or %NULL on error
4036 void *vmalloc_user_noprof(unsigned long size
)
4038 return __vmalloc_node_range_noprof(size
, SHMLBA
, VMALLOC_START
, VMALLOC_END
,
4039 GFP_KERNEL
| __GFP_ZERO
, PAGE_KERNEL
,
4040 VM_USERMAP
, NUMA_NO_NODE
,
4041 __builtin_return_address(0));
4043 EXPORT_SYMBOL(vmalloc_user_noprof
);
4046 * vmalloc_node - allocate memory on a specific node
4047 * @size: allocation size
4050 * Allocate enough pages to cover @size from the page level
4051 * allocator and map them into contiguous kernel virtual space.
4053 * For tight control over page level allocator and protection flags
4054 * use __vmalloc() instead.
4056 * Return: pointer to the allocated memory or %NULL on error
4058 void *vmalloc_node_noprof(unsigned long size
, int node
)
4060 return __vmalloc_node_noprof(size
, 1, GFP_KERNEL
, node
,
4061 __builtin_return_address(0));
4063 EXPORT_SYMBOL(vmalloc_node_noprof
);
4066 * vzalloc_node - allocate memory on a specific node with zero fill
4067 * @size: allocation size
4070 * Allocate enough pages to cover @size from the page level
4071 * allocator and map them into contiguous kernel virtual space.
4072 * The memory allocated is set to zero.
4074 * Return: pointer to the allocated memory or %NULL on error
4076 void *vzalloc_node_noprof(unsigned long size
, int node
)
4078 return __vmalloc_node_noprof(size
, 1, GFP_KERNEL
| __GFP_ZERO
, node
,
4079 __builtin_return_address(0));
4081 EXPORT_SYMBOL(vzalloc_node_noprof
);
4084 * vrealloc - reallocate virtually contiguous memory; contents remain unchanged
4085 * @p: object to reallocate memory for
4086 * @size: the size to reallocate
4087 * @flags: the flags for the page level allocator
4089 * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and
4090 * @p is not a %NULL pointer, the object pointed to is freed.
4092 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
4093 * initial memory allocation, every subsequent call to this API for the same
4094 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
4095 * __GFP_ZERO is not fully honored by this API.
4097 * In any case, the contents of the object pointed to are preserved up to the
4098 * lesser of the new and old sizes.
4100 * This function must not be called concurrently with itself or vfree() for the
4101 * same memory allocation.
4103 * Return: pointer to the allocated memory; %NULL if @size is zero or in case of
4106 void *vrealloc_noprof(const void *p
, size_t size
, gfp_t flags
)
4108 struct vm_struct
*vm
= NULL
;
4109 size_t alloced_size
= 0;
4110 size_t old_size
= 0;
4119 vm
= find_vm_area(p
);
4120 if (unlikely(!vm
)) {
4121 WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p
);
4125 alloced_size
= get_vm_area_size(vm
);
4126 old_size
= vm
->requested_size
;
4127 if (WARN(alloced_size
< old_size
,
4128 "vrealloc() has mismatched area vs requested sizes (%p)\n", p
))
4133 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
4134 * would be a good heuristic for when to shrink the vm_area?
4136 if (size
<= old_size
) {
4137 /* Zero out "freed" memory, potentially for future realloc. */
4138 if (want_init_on_free() || want_init_on_alloc(flags
))
4139 memset((void *)p
+ size
, 0, old_size
- size
);
4140 vm
->requested_size
= size
;
4141 kasan_poison_vmalloc(p
+ size
, old_size
- size
);
4146 * We already have the bytes available in the allocation; use them.
4148 if (size
<= alloced_size
) {
4149 kasan_unpoison_vmalloc(p
+ old_size
, size
- old_size
,
4150 KASAN_VMALLOC_PROT_NORMAL
);
4152 * No need to zero memory here, as unused memory will have
4153 * already been zeroed at initial allocation time or during
4154 * realloc shrink time.
4156 vm
->requested_size
= size
;
4160 /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
4161 n
= __vmalloc_noprof(size
, flags
);
4166 memcpy(n
, p
, old_size
);
4173 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
4174 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4175 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
4176 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
4179 * 64b systems should always have either DMA or DMA32 zones. For others
4180 * GFP_DMA32 should do the right thing and use the normal zone.
4182 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4186 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
4187 * @size: allocation size
4189 * Allocate enough 32bit PA addressable pages to cover @size from the
4190 * page level allocator and map them into contiguous kernel virtual space.
4192 * Return: pointer to the allocated memory or %NULL on error
4194 void *vmalloc_32_noprof(unsigned long size
)
4196 return __vmalloc_node_noprof(size
, 1, GFP_VMALLOC32
, NUMA_NO_NODE
,
4197 __builtin_return_address(0));
4199 EXPORT_SYMBOL(vmalloc_32_noprof
);
4202 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
4203 * @size: allocation size
4205 * The resulting memory area is 32bit addressable and zeroed so it can be
4206 * mapped to userspace without leaking data.
4208 * Return: pointer to the allocated memory or %NULL on error
4210 void *vmalloc_32_user_noprof(unsigned long size
)
4212 return __vmalloc_node_range_noprof(size
, SHMLBA
, VMALLOC_START
, VMALLOC_END
,
4213 GFP_VMALLOC32
| __GFP_ZERO
, PAGE_KERNEL
,
4214 VM_USERMAP
, NUMA_NO_NODE
,
4215 __builtin_return_address(0));
4217 EXPORT_SYMBOL(vmalloc_32_user_noprof
);
4220 * Atomically zero bytes in the iterator.
4222 * Returns the number of zeroed bytes.
4224 static size_t zero_iter(struct iov_iter
*iter
, size_t count
)
4226 size_t remains
= count
;
4228 while (remains
> 0) {
4231 num
= min_t(size_t, remains
, PAGE_SIZE
);
4232 copied
= copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num
, iter
);
4239 return count
- remains
;
4243 * small helper routine, copy contents to iter from addr.
4244 * If the page is not present, fill zero.
4246 * Returns the number of copied bytes.
4248 static size_t aligned_vread_iter(struct iov_iter
*iter
,
4249 const char *addr
, size_t count
)
4251 size_t remains
= count
;
4254 while (remains
> 0) {
4255 unsigned long offset
, length
;
4258 offset
= offset_in_page(addr
);
4259 length
= PAGE_SIZE
- offset
;
4260 if (length
> remains
)
4262 page
= vmalloc_to_page(addr
);
4264 * To do safe access to this _mapped_ area, we need lock. But
4265 * adding lock here means that we need to add overhead of
4266 * vmalloc()/vfree() calls for this _debug_ interface, rarely
4267 * used. Instead of that, we'll use an local mapping via
4268 * copy_page_to_iter_nofault() and accept a small overhead in
4269 * this access function.
4272 copied
= copy_page_to_iter_nofault(page
, offset
,
4275 copied
= zero_iter(iter
, length
);
4280 if (copied
!= length
)
4284 return count
- remains
;
4288 * Read from a vm_map_ram region of memory.
4290 * Returns the number of copied bytes.
4292 static size_t vmap_ram_vread_iter(struct iov_iter
*iter
, const char *addr
,
4293 size_t count
, unsigned long flags
)
4296 struct vmap_block
*vb
;
4298 unsigned long offset
;
4299 unsigned int rs
, re
;
4303 * If it's area created by vm_map_ram() interface directly, but
4304 * not further subdividing and delegating management to vmap_block,
4307 if (!(flags
& VMAP_BLOCK
))
4308 return aligned_vread_iter(iter
, addr
, count
);
4313 * Area is split into regions and tracked with vmap_block, read out
4314 * each region and zero fill the hole between regions.
4316 xa
= addr_to_vb_xa((unsigned long) addr
);
4317 vb
= xa_load(xa
, addr_to_vb_idx((unsigned long)addr
));
4321 spin_lock(&vb
->lock
);
4322 if (bitmap_empty(vb
->used_map
, VMAP_BBMAP_BITS
)) {
4323 spin_unlock(&vb
->lock
);
4327 for_each_set_bitrange(rs
, re
, vb
->used_map
, VMAP_BBMAP_BITS
) {
4333 start
= vmap_block_vaddr(vb
->va
->va_start
, rs
);
4336 size_t to_zero
= min_t(size_t, start
- addr
, remains
);
4337 size_t zeroed
= zero_iter(iter
, to_zero
);
4342 if (remains
== 0 || zeroed
!= to_zero
)
4346 /*it could start reading from the middle of used region*/
4347 offset
= offset_in_page(addr
);
4348 n
= ((re
- rs
+ 1) << PAGE_SHIFT
) - offset
;
4352 copied
= aligned_vread_iter(iter
, start
+ offset
, n
);
4361 spin_unlock(&vb
->lock
);
4364 /* zero-fill the left dirty or free regions */
4365 return count
- remains
+ zero_iter(iter
, remains
);
4367 /* We couldn't copy/zero everything */
4368 spin_unlock(&vb
->lock
);
4369 return count
- remains
;
4373 * vread_iter() - read vmalloc area in a safe way to an iterator.
4374 * @iter: the iterator to which data should be written.
4375 * @addr: vm address.
4376 * @count: number of bytes to be read.
4378 * This function checks that addr is a valid vmalloc'ed area, and
4379 * copy data from that area to a given buffer. If the given memory range
4380 * of [addr...addr+count) includes some valid address, data is copied to
4381 * proper area of @buf. If there are memory holes, they'll be zero-filled.
4382 * IOREMAP area is treated as memory hole and no copy is done.
4384 * If [addr...addr+count) doesn't includes any intersects with alive
4385 * vm_struct area, returns 0. @buf should be kernel's buffer.
4387 * Note: In usual ops, vread() is never necessary because the caller
4388 * should know vmalloc() area is valid and can use memcpy().
4389 * This is for routines which have to access vmalloc area without
4390 * any information, as /proc/kcore.
4392 * Return: number of bytes for which addr and buf should be increased
4393 * (same number as @count) or %0 if [addr...addr+count) doesn't
4394 * include any intersection with valid vmalloc area
4396 long vread_iter(struct iov_iter
*iter
, const char *addr
, size_t count
)
4398 struct vmap_node
*vn
;
4399 struct vmap_area
*va
;
4400 struct vm_struct
*vm
;
4402 size_t n
, size
, flags
, remains
;
4405 addr
= kasan_reset_tag(addr
);
4407 /* Don't allow overflow */
4408 if ((unsigned long) addr
+ count
< count
)
4409 count
= -(unsigned long) addr
;
4413 vn
= find_vmap_area_exceed_addr_lock((unsigned long) addr
, &va
);
4417 /* no intersects with alive vmap_area */
4418 if ((unsigned long)addr
+ remains
<= va
->va_start
)
4428 flags
= va
->flags
& VMAP_FLAGS_MASK
;
4430 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
4431 * be set together with VMAP_RAM.
4433 WARN_ON(flags
== VMAP_BLOCK
);
4438 if (vm
&& (vm
->flags
& VM_UNINITIALIZED
))
4441 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4444 vaddr
= (char *) va
->va_start
;
4445 size
= vm
? get_vm_area_size(vm
) : va_size(va
);
4447 if (addr
>= vaddr
+ size
)
4451 size_t to_zero
= min_t(size_t, vaddr
- addr
, remains
);
4452 size_t zeroed
= zero_iter(iter
, to_zero
);
4457 if (remains
== 0 || zeroed
!= to_zero
)
4461 n
= vaddr
+ size
- addr
;
4465 if (flags
& VMAP_RAM
)
4466 copied
= vmap_ram_vread_iter(iter
, addr
, n
, flags
);
4467 else if (!(vm
&& (vm
->flags
& (VM_IOREMAP
| VM_SPARSE
))))
4468 copied
= aligned_vread_iter(iter
, addr
, n
);
4469 else /* IOREMAP | SPARSE area is treated as memory hole */
4470 copied
= zero_iter(iter
, n
);
4480 spin_unlock(&vn
->busy
.lock
);
4481 } while ((vn
= find_vmap_area_exceed_addr_lock(next
, &va
)));
4485 spin_unlock(&vn
->busy
.lock
);
4487 /* zero-fill memory holes */
4488 return count
- remains
+ zero_iter(iter
, remains
);
4490 /* Nothing remains, or We couldn't copy/zero everything. */
4492 spin_unlock(&vn
->busy
.lock
);
4494 return count
- remains
;
4498 * remap_vmalloc_range_partial - map vmalloc pages to userspace
4499 * @vma: vma to cover
4500 * @uaddr: target user address to start at
4501 * @kaddr: virtual address of vmalloc kernel memory
4502 * @pgoff: offset from @kaddr to start at
4503 * @size: size of map area
4505 * Returns: 0 for success, -Exxx on failure
4507 * This function checks that @kaddr is a valid vmalloc'ed area,
4508 * and that it is big enough to cover the range starting at
4509 * @uaddr in @vma. Will return failure if that criteria isn't
4512 * Similar to remap_pfn_range() (see mm/memory.c)
4514 int remap_vmalloc_range_partial(struct vm_area_struct
*vma
, unsigned long uaddr
,
4515 void *kaddr
, unsigned long pgoff
,
4518 struct vm_struct
*area
;
4520 unsigned long end_index
;
4522 if (check_shl_overflow(pgoff
, PAGE_SHIFT
, &off
))
4525 size
= PAGE_ALIGN(size
);
4527 if (!PAGE_ALIGNED(uaddr
) || !PAGE_ALIGNED(kaddr
))
4530 area
= find_vm_area(kaddr
);
4534 if (!(area
->flags
& (VM_USERMAP
| VM_DMA_COHERENT
)))
4537 if (check_add_overflow(size
, off
, &end_index
) ||
4538 end_index
> get_vm_area_size(area
))
4543 struct page
*page
= vmalloc_to_page(kaddr
);
4546 ret
= vm_insert_page(vma
, uaddr
, page
);
4555 vm_flags_set(vma
, VM_DONTEXPAND
| VM_DONTDUMP
);
4561 * remap_vmalloc_range - map vmalloc pages to userspace
4562 * @vma: vma to cover (map full range of vma)
4563 * @addr: vmalloc memory
4564 * @pgoff: number of pages into addr before first page to map
4566 * Returns: 0 for success, -Exxx on failure
4568 * This function checks that addr is a valid vmalloc'ed area, and
4569 * that it is big enough to cover the vma. Will return failure if
4570 * that criteria isn't met.
4572 * Similar to remap_pfn_range() (see mm/memory.c)
4574 int remap_vmalloc_range(struct vm_area_struct
*vma
, void *addr
,
4575 unsigned long pgoff
)
4577 return remap_vmalloc_range_partial(vma
, vma
->vm_start
,
4579 vma
->vm_end
- vma
->vm_start
);
4581 EXPORT_SYMBOL(remap_vmalloc_range
);
4583 void free_vm_area(struct vm_struct
*area
)
4585 struct vm_struct
*ret
;
4586 ret
= remove_vm_area(area
->addr
);
4587 BUG_ON(ret
!= area
);
4590 EXPORT_SYMBOL_GPL(free_vm_area
);
4593 static struct vmap_area
*node_to_va(struct rb_node
*n
)
4595 return rb_entry_safe(n
, struct vmap_area
, rb_node
);
4599 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
4600 * @addr: target address
4602 * Returns: vmap_area if it is found. If there is no such area
4603 * the first highest(reverse order) vmap_area is returned
4604 * i.e. va->va_start < addr && va->va_end < addr or NULL
4605 * if there are no any areas before @addr.
4607 static struct vmap_area
*
4608 pvm_find_va_enclose_addr(unsigned long addr
)
4610 struct vmap_area
*va
, *tmp
;
4613 n
= free_vmap_area_root
.rb_node
;
4617 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
4618 if (tmp
->va_start
<= addr
) {
4620 if (tmp
->va_end
>= addr
)
4633 * pvm_determine_end_from_reverse - find the highest aligned address
4634 * of free block below VMALLOC_END
4636 * in - the VA we start the search(reverse order);
4637 * out - the VA with the highest aligned end address.
4638 * @align: alignment for required highest address
4640 * Returns: determined end address within vmap_area
4642 static unsigned long
4643 pvm_determine_end_from_reverse(struct vmap_area
**va
, unsigned long align
)
4645 unsigned long vmalloc_end
= VMALLOC_END
& ~(align
- 1);
4649 list_for_each_entry_from_reverse((*va
),
4650 &free_vmap_area_list
, list
) {
4651 addr
= min((*va
)->va_end
& ~(align
- 1), vmalloc_end
);
4652 if ((*va
)->va_start
< addr
)
4661 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4662 * @offsets: array containing offset of each area
4663 * @sizes: array containing size of each area
4664 * @nr_vms: the number of areas to allocate
4665 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4667 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4668 * vm_structs on success, %NULL on failure
4670 * Percpu allocator wants to use congruent vm areas so that it can
4671 * maintain the offsets among percpu areas. This function allocates
4672 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
4673 * be scattered pretty far, distance between two areas easily going up
4674 * to gigabytes. To avoid interacting with regular vmallocs, these
4675 * areas are allocated from top.
4677 * Despite its complicated look, this allocator is rather simple. It
4678 * does everything top-down and scans free blocks from the end looking
4679 * for matching base. While scanning, if any of the areas do not fit the
4680 * base address is pulled down to fit the area. Scanning is repeated till
4681 * all the areas fit and then all necessary data structures are inserted
4682 * and the result is returned.
4684 struct vm_struct
**pcpu_get_vm_areas(const unsigned long *offsets
,
4685 const size_t *sizes
, int nr_vms
,
4688 const unsigned long vmalloc_start
= ALIGN(VMALLOC_START
, align
);
4689 const unsigned long vmalloc_end
= VMALLOC_END
& ~(align
- 1);
4690 struct vmap_area
**vas
, *va
;
4691 struct vm_struct
**vms
;
4692 int area
, area2
, last_area
, term_area
;
4693 unsigned long base
, start
, size
, end
, last_end
, orig_start
, orig_end
;
4694 bool purged
= false;
4696 /* verify parameters and allocate data structures */
4697 BUG_ON(offset_in_page(align
) || !is_power_of_2(align
));
4698 for (last_area
= 0, area
= 0; area
< nr_vms
; area
++) {
4699 start
= offsets
[area
];
4700 end
= start
+ sizes
[area
];
4702 /* is everything aligned properly? */
4703 BUG_ON(!IS_ALIGNED(offsets
[area
], align
));
4704 BUG_ON(!IS_ALIGNED(sizes
[area
], align
));
4706 /* detect the area with the highest address */
4707 if (start
> offsets
[last_area
])
4710 for (area2
= area
+ 1; area2
< nr_vms
; area2
++) {
4711 unsigned long start2
= offsets
[area2
];
4712 unsigned long end2
= start2
+ sizes
[area2
];
4714 BUG_ON(start2
< end
&& start
< end2
);
4717 last_end
= offsets
[last_area
] + sizes
[last_area
];
4719 if (vmalloc_end
- vmalloc_start
< last_end
) {
4724 vms
= kcalloc(nr_vms
, sizeof(vms
[0]), GFP_KERNEL
);
4725 vas
= kcalloc(nr_vms
, sizeof(vas
[0]), GFP_KERNEL
);
4729 for (area
= 0; area
< nr_vms
; area
++) {
4730 vas
[area
] = kmem_cache_zalloc(vmap_area_cachep
, GFP_KERNEL
);
4731 vms
[area
] = kzalloc(sizeof(struct vm_struct
), GFP_KERNEL
);
4732 if (!vas
[area
] || !vms
[area
])
4736 spin_lock(&free_vmap_area_lock
);
4738 /* start scanning - we scan from the top, begin with the last area */
4739 area
= term_area
= last_area
;
4740 start
= offsets
[area
];
4741 end
= start
+ sizes
[area
];
4743 va
= pvm_find_va_enclose_addr(vmalloc_end
);
4744 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
4748 * base might have underflowed, add last_end before
4751 if (base
+ last_end
< vmalloc_start
+ last_end
)
4755 * Fitting base has not been found.
4761 * If required width exceeds current VA block, move
4762 * base downwards and then recheck.
4764 if (base
+ end
> va
->va_end
) {
4765 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
4771 * If this VA does not fit, move base downwards and recheck.
4773 if (base
+ start
< va
->va_start
) {
4774 va
= node_to_va(rb_prev(&va
->rb_node
));
4775 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
4781 * This area fits, move on to the previous one. If
4782 * the previous one is the terminal one, we're done.
4784 area
= (area
+ nr_vms
- 1) % nr_vms
;
4785 if (area
== term_area
)
4788 start
= offsets
[area
];
4789 end
= start
+ sizes
[area
];
4790 va
= pvm_find_va_enclose_addr(base
+ end
);
4793 /* we've found a fitting base, insert all va's */
4794 for (area
= 0; area
< nr_vms
; area
++) {
4797 start
= base
+ offsets
[area
];
4800 va
= pvm_find_va_enclose_addr(start
);
4801 if (WARN_ON_ONCE(va
== NULL
))
4802 /* It is a BUG(), but trigger recovery instead. */
4805 ret
= va_clip(&free_vmap_area_root
,
4806 &free_vmap_area_list
, va
, start
, size
);
4807 if (WARN_ON_ONCE(unlikely(ret
)))
4808 /* It is a BUG(), but trigger recovery instead. */
4811 /* Allocated area. */
4813 va
->va_start
= start
;
4814 va
->va_end
= start
+ size
;
4817 spin_unlock(&free_vmap_area_lock
);
4819 /* populate the kasan shadow space */
4820 for (area
= 0; area
< nr_vms
; area
++) {
4821 if (kasan_populate_vmalloc(vas
[area
]->va_start
, sizes
[area
]))
4822 goto err_free_shadow
;
4825 /* insert all vm's */
4826 for (area
= 0; area
< nr_vms
; area
++) {
4827 struct vmap_node
*vn
= addr_to_node(vas
[area
]->va_start
);
4829 spin_lock(&vn
->busy
.lock
);
4830 insert_vmap_area(vas
[area
], &vn
->busy
.root
, &vn
->busy
.head
);
4831 setup_vmalloc_vm(vms
[area
], vas
[area
], VM_ALLOC
,
4833 spin_unlock(&vn
->busy
.lock
);
4837 * Mark allocated areas as accessible. Do it now as a best-effort
4838 * approach, as they can be mapped outside of vmalloc code.
4839 * With hardware tag-based KASAN, marking is skipped for
4840 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
4842 for (area
= 0; area
< nr_vms
; area
++)
4843 vms
[area
]->addr
= kasan_unpoison_vmalloc(vms
[area
]->addr
,
4844 vms
[area
]->size
, KASAN_VMALLOC_PROT_NORMAL
);
4851 * Remove previously allocated areas. There is no
4852 * need in removing these areas from the busy tree,
4853 * because they are inserted only on the final step
4854 * and when pcpu_get_vm_areas() is success.
4857 orig_start
= vas
[area
]->va_start
;
4858 orig_end
= vas
[area
]->va_end
;
4859 va
= merge_or_add_vmap_area_augment(vas
[area
], &free_vmap_area_root
,
4860 &free_vmap_area_list
);
4862 kasan_release_vmalloc(orig_start
, orig_end
,
4863 va
->va_start
, va
->va_end
,
4864 KASAN_VMALLOC_PAGE_RANGE
| KASAN_VMALLOC_TLB_FLUSH
);
4869 spin_unlock(&free_vmap_area_lock
);
4871 reclaim_and_purge_vmap_areas();
4874 /* Before "retry", check if we recover. */
4875 for (area
= 0; area
< nr_vms
; area
++) {
4879 vas
[area
] = kmem_cache_zalloc(
4880 vmap_area_cachep
, GFP_KERNEL
);
4889 for (area
= 0; area
< nr_vms
; area
++) {
4891 kmem_cache_free(vmap_area_cachep
, vas
[area
]);
4901 spin_lock(&free_vmap_area_lock
);
4903 * We release all the vmalloc shadows, even the ones for regions that
4904 * hadn't been successfully added. This relies on kasan_release_vmalloc
4905 * being able to tolerate this case.
4907 for (area
= 0; area
< nr_vms
; area
++) {
4908 orig_start
= vas
[area
]->va_start
;
4909 orig_end
= vas
[area
]->va_end
;
4910 va
= merge_or_add_vmap_area_augment(vas
[area
], &free_vmap_area_root
,
4911 &free_vmap_area_list
);
4913 kasan_release_vmalloc(orig_start
, orig_end
,
4914 va
->va_start
, va
->va_end
,
4915 KASAN_VMALLOC_PAGE_RANGE
| KASAN_VMALLOC_TLB_FLUSH
);
4919 spin_unlock(&free_vmap_area_lock
);
4926 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4927 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4928 * @nr_vms: the number of allocated areas
4930 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4932 void pcpu_free_vm_areas(struct vm_struct
**vms
, int nr_vms
)
4936 for (i
= 0; i
< nr_vms
; i
++)
4937 free_vm_area(vms
[i
]);
4940 #endif /* CONFIG_SMP */
4942 #ifdef CONFIG_PRINTK
4943 bool vmalloc_dump_obj(void *object
)
4946 struct vm_struct
*vm
;
4947 struct vmap_area
*va
;
4948 struct vmap_node
*vn
;
4950 unsigned int nr_pages
;
4952 addr
= PAGE_ALIGN((unsigned long) object
);
4953 vn
= addr_to_node(addr
);
4955 if (!spin_trylock(&vn
->busy
.lock
))
4958 va
= __find_vmap_area(addr
, &vn
->busy
.root
);
4959 if (!va
|| !va
->vm
) {
4960 spin_unlock(&vn
->busy
.lock
);
4965 addr
= (unsigned long) vm
->addr
;
4966 caller
= vm
->caller
;
4967 nr_pages
= vm
->nr_pages
;
4968 spin_unlock(&vn
->busy
.lock
);
4970 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
4971 nr_pages
, addr
, caller
);
4977 #ifdef CONFIG_PROC_FS
4980 * Print number of pages allocated on each memory node.
4982 * This function can only be called if CONFIG_NUMA is enabled
4983 * and VM_UNINITIALIZED bit in v->flags is disabled.
4985 static void show_numa_info(struct seq_file
*m
, struct vm_struct
*v
,
4986 unsigned int *counters
)
4989 unsigned int step
= 1U << vm_area_page_order(v
);
4994 memset(counters
, 0, nr_node_ids
* sizeof(unsigned int));
4996 for (nr
= 0; nr
< v
->nr_pages
; nr
+= step
)
4997 counters
[page_to_nid(v
->pages
[nr
])] += step
;
4998 for_each_node_state(nr
, N_HIGH_MEMORY
)
5000 seq_printf(m
, " N%u=%u", nr
, counters
[nr
]);
5003 static void show_purge_info(struct seq_file
*m
)
5005 struct vmap_node
*vn
;
5006 struct vmap_area
*va
;
5008 for_each_vmap_node(vn
) {
5009 spin_lock(&vn
->lazy
.lock
);
5010 list_for_each_entry(va
, &vn
->lazy
.head
, list
) {
5011 seq_printf(m
, "0x%pK-0x%pK %7ld unpurged vm_area\n",
5012 (void *)va
->va_start
, (void *)va
->va_end
,
5015 spin_unlock(&vn
->lazy
.lock
);
5019 static int vmalloc_info_show(struct seq_file
*m
, void *p
)
5021 struct vmap_node
*vn
;
5022 struct vmap_area
*va
;
5023 struct vm_struct
*v
;
5024 unsigned int *counters
;
5026 if (IS_ENABLED(CONFIG_NUMA
))
5027 counters
= kmalloc(nr_node_ids
* sizeof(unsigned int), GFP_KERNEL
);
5029 for_each_vmap_node(vn
) {
5030 spin_lock(&vn
->busy
.lock
);
5031 list_for_each_entry(va
, &vn
->busy
.head
, list
) {
5033 if (va
->flags
& VMAP_RAM
)
5034 seq_printf(m
, "0x%pK-0x%pK %7ld vm_map_ram\n",
5035 (void *)va
->va_start
, (void *)va
->va_end
,
5042 if (v
->flags
& VM_UNINITIALIZED
)
5045 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
5048 seq_printf(m
, "0x%pK-0x%pK %7ld",
5049 v
->addr
, v
->addr
+ v
->size
, v
->size
);
5052 seq_printf(m
, " %pS", v
->caller
);
5055 seq_printf(m
, " pages=%d", v
->nr_pages
);
5058 seq_printf(m
, " phys=%pa", &v
->phys_addr
);
5060 if (v
->flags
& VM_IOREMAP
)
5061 seq_puts(m
, " ioremap");
5063 if (v
->flags
& VM_SPARSE
)
5064 seq_puts(m
, " sparse");
5066 if (v
->flags
& VM_ALLOC
)
5067 seq_puts(m
, " vmalloc");
5069 if (v
->flags
& VM_MAP
)
5070 seq_puts(m
, " vmap");
5072 if (v
->flags
& VM_USERMAP
)
5073 seq_puts(m
, " user");
5075 if (v
->flags
& VM_DMA_COHERENT
)
5076 seq_puts(m
, " dma-coherent");
5078 if (is_vmalloc_addr(v
->pages
))
5079 seq_puts(m
, " vpages");
5081 if (IS_ENABLED(CONFIG_NUMA
))
5082 show_numa_info(m
, v
, counters
);
5086 spin_unlock(&vn
->busy
.lock
);
5090 * As a final step, dump "unpurged" areas.
5093 if (IS_ENABLED(CONFIG_NUMA
))
5098 static int __init
proc_vmalloc_init(void)
5100 proc_create_single("vmallocinfo", 0400, NULL
, vmalloc_info_show
);
5103 module_init(proc_vmalloc_init
);
5107 static void __init
vmap_init_free_space(void)
5109 unsigned long vmap_start
= 1;
5110 const unsigned long vmap_end
= ULONG_MAX
;
5111 struct vmap_area
*free
;
5112 struct vm_struct
*busy
;
5116 * -|-----|.....|-----|-----|-----|.....|-
5118 * |<--------------------------------->|
5120 for (busy
= vmlist
; busy
; busy
= busy
->next
) {
5121 if ((unsigned long) busy
->addr
- vmap_start
> 0) {
5122 free
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
5123 if (!WARN_ON_ONCE(!free
)) {
5124 free
->va_start
= vmap_start
;
5125 free
->va_end
= (unsigned long) busy
->addr
;
5127 insert_vmap_area_augment(free
, NULL
,
5128 &free_vmap_area_root
,
5129 &free_vmap_area_list
);
5133 vmap_start
= (unsigned long) busy
->addr
+ busy
->size
;
5136 if (vmap_end
- vmap_start
> 0) {
5137 free
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
5138 if (!WARN_ON_ONCE(!free
)) {
5139 free
->va_start
= vmap_start
;
5140 free
->va_end
= vmap_end
;
5142 insert_vmap_area_augment(free
, NULL
,
5143 &free_vmap_area_root
,
5144 &free_vmap_area_list
);
5149 static void vmap_init_nodes(void)
5151 struct vmap_node
*vn
;
5154 #if BITS_PER_LONG == 64
5156 * A high threshold of max nodes is fixed and bound to 128,
5157 * thus a scale factor is 1 for systems where number of cores
5158 * are less or equal to specified threshold.
5160 * As for NUMA-aware notes. For bigger systems, for example
5161 * NUMA with multi-sockets, where we can end-up with thousands
5162 * of cores in total, a "sub-numa-clustering" should be added.
5164 * In this case a NUMA domain is considered as a single entity
5165 * with dedicated sub-nodes in it which describe one group or
5166 * set of cores. Therefore a per-domain purging is supposed to
5167 * be added as well as a per-domain balancing.
5169 int n
= clamp_t(unsigned int, num_possible_cpus(), 1, 128);
5172 vn
= kmalloc_array(n
, sizeof(*vn
), GFP_NOWAIT
| __GFP_NOWARN
);
5174 /* Node partition is 16 pages. */
5175 vmap_zone_size
= (1 << 4) * PAGE_SIZE
;
5179 pr_err("Failed to allocate an array. Disable a node layer\n");
5184 for_each_vmap_node(vn
) {
5185 vn
->busy
.root
= RB_ROOT
;
5186 INIT_LIST_HEAD(&vn
->busy
.head
);
5187 spin_lock_init(&vn
->busy
.lock
);
5189 vn
->lazy
.root
= RB_ROOT
;
5190 INIT_LIST_HEAD(&vn
->lazy
.head
);
5191 spin_lock_init(&vn
->lazy
.lock
);
5193 for (i
= 0; i
< MAX_VA_SIZE_PAGES
; i
++) {
5194 INIT_LIST_HEAD(&vn
->pool
[i
].head
);
5195 WRITE_ONCE(vn
->pool
[i
].len
, 0);
5198 spin_lock_init(&vn
->pool_lock
);
5202 static unsigned long
5203 vmap_node_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
5205 unsigned long count
= 0;
5206 struct vmap_node
*vn
;
5209 for_each_vmap_node(vn
) {
5210 for (i
= 0; i
< MAX_VA_SIZE_PAGES
; i
++)
5211 count
+= READ_ONCE(vn
->pool
[i
].len
);
5214 return count
? count
: SHRINK_EMPTY
;
5217 static unsigned long
5218 vmap_node_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
5220 struct vmap_node
*vn
;
5222 for_each_vmap_node(vn
)
5223 decay_va_pool_node(vn
, true);
5228 void __init
vmalloc_init(void)
5230 struct shrinker
*vmap_node_shrinker
;
5231 struct vmap_area
*va
;
5232 struct vmap_node
*vn
;
5233 struct vm_struct
*tmp
;
5237 * Create the cache for vmap_area objects.
5239 vmap_area_cachep
= KMEM_CACHE(vmap_area
, SLAB_PANIC
);
5241 for_each_possible_cpu(i
) {
5242 struct vmap_block_queue
*vbq
;
5243 struct vfree_deferred
*p
;
5245 vbq
= &per_cpu(vmap_block_queue
, i
);
5246 spin_lock_init(&vbq
->lock
);
5247 INIT_LIST_HEAD(&vbq
->free
);
5248 p
= &per_cpu(vfree_deferred
, i
);
5249 init_llist_head(&p
->list
);
5250 INIT_WORK(&p
->wq
, delayed_vfree_work
);
5251 xa_init(&vbq
->vmap_blocks
);
5255 * Setup nodes before importing vmlist.
5259 /* Import existing vmlist entries. */
5260 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
5261 va
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
5262 if (WARN_ON_ONCE(!va
))
5265 va
->va_start
= (unsigned long)tmp
->addr
;
5266 va
->va_end
= va
->va_start
+ tmp
->size
;
5269 vn
= addr_to_node(va
->va_start
);
5270 insert_vmap_area(va
, &vn
->busy
.root
, &vn
->busy
.head
);
5274 * Now we can initialize a free vmap space.
5276 vmap_init_free_space();
5277 vmap_initialized
= true;
5279 vmap_node_shrinker
= shrinker_alloc(0, "vmap-node");
5280 if (!vmap_node_shrinker
) {
5281 pr_err("Failed to allocate vmap-node shrinker!\n");
5285 vmap_node_shrinker
->count_objects
= vmap_node_shrink_count
;
5286 vmap_node_shrinker
->scan_objects
= vmap_node_shrink_scan
;
5287 shrinker_register(vmap_node_shrinker
);