1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1993 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7 * Numa awareness, Christoph Lameter, SGI, June 2005
8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
11 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/memcontrol.h>
35 #include <linux/llist.h>
36 #include <linux/uio.h>
37 #include <linux/bitops.h>
38 #include <linux/rbtree_augmented.h>
39 #include <linux/overflow.h>
40 #include <linux/pgtable.h>
41 #include <linux/hugetlb.h>
42 #include <linux/sched/mm.h>
43 #include <asm/tlbflush.h>
44 #include <asm/shmparam.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/vmalloc.h>
50 #include "pgalloc-track.h"
52 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
53 static unsigned int __ro_after_init ioremap_max_page_shift
= BITS_PER_LONG
- 1;
55 static int __init
set_nohugeiomap(char *str
)
57 ioremap_max_page_shift
= PAGE_SHIFT
;
60 early_param("nohugeiomap", set_nohugeiomap
);
61 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
62 static const unsigned int ioremap_max_page_shift
= PAGE_SHIFT
;
63 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
65 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
66 static bool __ro_after_init vmap_allow_huge
= true;
68 static int __init
set_nohugevmalloc(char *str
)
70 vmap_allow_huge
= false;
73 early_param("nohugevmalloc", set_nohugevmalloc
);
74 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
75 static const bool vmap_allow_huge
= false;
76 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
78 bool is_vmalloc_addr(const void *x
)
80 unsigned long addr
= (unsigned long)kasan_reset_tag(x
);
82 return addr
>= VMALLOC_START
&& addr
< VMALLOC_END
;
84 EXPORT_SYMBOL(is_vmalloc_addr
);
86 struct vfree_deferred
{
87 struct llist_head list
;
88 struct work_struct wq
;
90 static DEFINE_PER_CPU(struct vfree_deferred
, vfree_deferred
);
92 /*** Page table manipulation functions ***/
93 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
94 phys_addr_t phys_addr
, pgprot_t prot
,
95 unsigned int max_page_shift
, pgtbl_mod_mask
*mask
)
99 unsigned long size
= PAGE_SIZE
;
101 pfn
= phys_addr
>> PAGE_SHIFT
;
102 pte
= pte_alloc_kernel_track(pmd
, addr
, mask
);
106 BUG_ON(!pte_none(ptep_get(pte
)));
108 #ifdef CONFIG_HUGETLB_PAGE
109 size
= arch_vmap_pte_range_map_size(addr
, end
, pfn
, max_page_shift
);
110 if (size
!= PAGE_SIZE
) {
111 pte_t entry
= pfn_pte(pfn
, prot
);
113 entry
= arch_make_huge_pte(entry
, ilog2(size
), 0);
114 set_huge_pte_at(&init_mm
, addr
, pte
, entry
, size
);
115 pfn
+= PFN_DOWN(size
);
119 set_pte_at(&init_mm
, addr
, pte
, pfn_pte(pfn
, prot
));
121 } while (pte
+= PFN_DOWN(size
), addr
+= size
, addr
!= end
);
122 *mask
|= PGTBL_PTE_MODIFIED
;
126 static int vmap_try_huge_pmd(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
127 phys_addr_t phys_addr
, pgprot_t prot
,
128 unsigned int max_page_shift
)
130 if (max_page_shift
< PMD_SHIFT
)
133 if (!arch_vmap_pmd_supported(prot
))
136 if ((end
- addr
) != PMD_SIZE
)
139 if (!IS_ALIGNED(addr
, PMD_SIZE
))
142 if (!IS_ALIGNED(phys_addr
, PMD_SIZE
))
145 if (pmd_present(*pmd
) && !pmd_free_pte_page(pmd
, addr
))
148 return pmd_set_huge(pmd
, phys_addr
, prot
);
151 static int vmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
,
152 phys_addr_t phys_addr
, pgprot_t prot
,
153 unsigned int max_page_shift
, pgtbl_mod_mask
*mask
)
158 pmd
= pmd_alloc_track(&init_mm
, pud
, addr
, mask
);
162 next
= pmd_addr_end(addr
, end
);
164 if (vmap_try_huge_pmd(pmd
, addr
, next
, phys_addr
, prot
,
166 *mask
|= PGTBL_PMD_MODIFIED
;
170 if (vmap_pte_range(pmd
, addr
, next
, phys_addr
, prot
, max_page_shift
, mask
))
172 } while (pmd
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
176 static int vmap_try_huge_pud(pud_t
*pud
, unsigned long addr
, unsigned long end
,
177 phys_addr_t phys_addr
, pgprot_t prot
,
178 unsigned int max_page_shift
)
180 if (max_page_shift
< PUD_SHIFT
)
183 if (!arch_vmap_pud_supported(prot
))
186 if ((end
- addr
) != PUD_SIZE
)
189 if (!IS_ALIGNED(addr
, PUD_SIZE
))
192 if (!IS_ALIGNED(phys_addr
, PUD_SIZE
))
195 if (pud_present(*pud
) && !pud_free_pmd_page(pud
, addr
))
198 return pud_set_huge(pud
, phys_addr
, prot
);
201 static int vmap_pud_range(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
202 phys_addr_t phys_addr
, pgprot_t prot
,
203 unsigned int max_page_shift
, pgtbl_mod_mask
*mask
)
208 pud
= pud_alloc_track(&init_mm
, p4d
, addr
, mask
);
212 next
= pud_addr_end(addr
, end
);
214 if (vmap_try_huge_pud(pud
, addr
, next
, phys_addr
, prot
,
216 *mask
|= PGTBL_PUD_MODIFIED
;
220 if (vmap_pmd_range(pud
, addr
, next
, phys_addr
, prot
,
221 max_page_shift
, mask
))
223 } while (pud
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
227 static int vmap_try_huge_p4d(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
228 phys_addr_t phys_addr
, pgprot_t prot
,
229 unsigned int max_page_shift
)
231 if (max_page_shift
< P4D_SHIFT
)
234 if (!arch_vmap_p4d_supported(prot
))
237 if ((end
- addr
) != P4D_SIZE
)
240 if (!IS_ALIGNED(addr
, P4D_SIZE
))
243 if (!IS_ALIGNED(phys_addr
, P4D_SIZE
))
246 if (p4d_present(*p4d
) && !p4d_free_pud_page(p4d
, addr
))
249 return p4d_set_huge(p4d
, phys_addr
, prot
);
252 static int vmap_p4d_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
253 phys_addr_t phys_addr
, pgprot_t prot
,
254 unsigned int max_page_shift
, pgtbl_mod_mask
*mask
)
259 p4d
= p4d_alloc_track(&init_mm
, pgd
, addr
, mask
);
263 next
= p4d_addr_end(addr
, end
);
265 if (vmap_try_huge_p4d(p4d
, addr
, next
, phys_addr
, prot
,
267 *mask
|= PGTBL_P4D_MODIFIED
;
271 if (vmap_pud_range(p4d
, addr
, next
, phys_addr
, prot
,
272 max_page_shift
, mask
))
274 } while (p4d
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
278 static int vmap_range_noflush(unsigned long addr
, unsigned long end
,
279 phys_addr_t phys_addr
, pgprot_t prot
,
280 unsigned int max_page_shift
)
286 pgtbl_mod_mask mask
= 0;
292 pgd
= pgd_offset_k(addr
);
294 next
= pgd_addr_end(addr
, end
);
295 err
= vmap_p4d_range(pgd
, addr
, next
, phys_addr
, prot
,
296 max_page_shift
, &mask
);
299 } while (pgd
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
301 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
302 arch_sync_kernel_mappings(start
, end
);
307 int vmap_page_range(unsigned long addr
, unsigned long end
,
308 phys_addr_t phys_addr
, pgprot_t prot
)
312 err
= vmap_range_noflush(addr
, end
, phys_addr
, pgprot_nx(prot
),
313 ioremap_max_page_shift
);
314 flush_cache_vmap(addr
, end
);
316 err
= kmsan_ioremap_page_range(addr
, end
, phys_addr
, prot
,
317 ioremap_max_page_shift
);
321 int ioremap_page_range(unsigned long addr
, unsigned long end
,
322 phys_addr_t phys_addr
, pgprot_t prot
)
324 struct vm_struct
*area
;
326 area
= find_vm_area((void *)addr
);
327 if (!area
|| !(area
->flags
& VM_IOREMAP
)) {
328 WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr
);
331 if (addr
!= (unsigned long)area
->addr
||
332 (void *)end
!= area
->addr
+ get_vm_area_size(area
)) {
333 WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
334 addr
, end
, (long)area
->addr
,
335 (long)area
->addr
+ get_vm_area_size(area
));
338 return vmap_page_range(addr
, end
, phys_addr
, prot
);
341 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
342 pgtbl_mod_mask
*mask
)
346 pte
= pte_offset_kernel(pmd
, addr
);
348 pte_t ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
349 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
350 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
351 *mask
|= PGTBL_PTE_MODIFIED
;
354 static void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
,
355 pgtbl_mod_mask
*mask
)
361 pmd
= pmd_offset(pud
, addr
);
363 next
= pmd_addr_end(addr
, end
);
365 cleared
= pmd_clear_huge(pmd
);
366 if (cleared
|| pmd_bad(*pmd
))
367 *mask
|= PGTBL_PMD_MODIFIED
;
371 if (pmd_none_or_clear_bad(pmd
))
373 vunmap_pte_range(pmd
, addr
, next
, mask
);
376 } while (pmd
++, addr
= next
, addr
!= end
);
379 static void vunmap_pud_range(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
380 pgtbl_mod_mask
*mask
)
386 pud
= pud_offset(p4d
, addr
);
388 next
= pud_addr_end(addr
, end
);
390 cleared
= pud_clear_huge(pud
);
391 if (cleared
|| pud_bad(*pud
))
392 *mask
|= PGTBL_PUD_MODIFIED
;
396 if (pud_none_or_clear_bad(pud
))
398 vunmap_pmd_range(pud
, addr
, next
, mask
);
399 } while (pud
++, addr
= next
, addr
!= end
);
402 static void vunmap_p4d_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
403 pgtbl_mod_mask
*mask
)
408 p4d
= p4d_offset(pgd
, addr
);
410 next
= p4d_addr_end(addr
, end
);
414 *mask
|= PGTBL_P4D_MODIFIED
;
416 if (p4d_none_or_clear_bad(p4d
))
418 vunmap_pud_range(p4d
, addr
, next
, mask
);
419 } while (p4d
++, addr
= next
, addr
!= end
);
423 * vunmap_range_noflush is similar to vunmap_range, but does not
424 * flush caches or TLBs.
426 * The caller is responsible for calling flush_cache_vmap() before calling
427 * this function, and flush_tlb_kernel_range after it has returned
428 * successfully (and before the addresses are expected to cause a page fault
429 * or be re-mapped for something else, if TLB flushes are being delayed or
432 * This is an internal function only. Do not use outside mm/.
434 void __vunmap_range_noflush(unsigned long start
, unsigned long end
)
438 unsigned long addr
= start
;
439 pgtbl_mod_mask mask
= 0;
442 pgd
= pgd_offset_k(addr
);
444 next
= pgd_addr_end(addr
, end
);
446 mask
|= PGTBL_PGD_MODIFIED
;
447 if (pgd_none_or_clear_bad(pgd
))
449 vunmap_p4d_range(pgd
, addr
, next
, &mask
);
450 } while (pgd
++, addr
= next
, addr
!= end
);
452 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
453 arch_sync_kernel_mappings(start
, end
);
456 void vunmap_range_noflush(unsigned long start
, unsigned long end
)
458 kmsan_vunmap_range_noflush(start
, end
);
459 __vunmap_range_noflush(start
, end
);
463 * vunmap_range - unmap kernel virtual addresses
464 * @addr: start of the VM area to unmap
465 * @end: end of the VM area to unmap (non-inclusive)
467 * Clears any present PTEs in the virtual address range, flushes TLBs and
468 * caches. Any subsequent access to the address before it has been re-mapped
471 void vunmap_range(unsigned long addr
, unsigned long end
)
473 flush_cache_vunmap(addr
, end
);
474 vunmap_range_noflush(addr
, end
);
475 flush_tlb_kernel_range(addr
, end
);
478 static int vmap_pages_pte_range(pmd_t
*pmd
, unsigned long addr
,
479 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
480 pgtbl_mod_mask
*mask
)
485 * nr is a running index into the array which helps higher level
486 * callers keep track of where we're up to.
489 pte
= pte_alloc_kernel_track(pmd
, addr
, mask
);
493 struct page
*page
= pages
[*nr
];
495 if (WARN_ON(!pte_none(ptep_get(pte
))))
499 if (WARN_ON(!pfn_valid(page_to_pfn(page
))))
502 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
504 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
505 *mask
|= PGTBL_PTE_MODIFIED
;
509 static int vmap_pages_pmd_range(pud_t
*pud
, unsigned long addr
,
510 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
511 pgtbl_mod_mask
*mask
)
516 pmd
= pmd_alloc_track(&init_mm
, pud
, addr
, mask
);
520 next
= pmd_addr_end(addr
, end
);
521 if (vmap_pages_pte_range(pmd
, addr
, next
, prot
, pages
, nr
, mask
))
523 } while (pmd
++, addr
= next
, addr
!= end
);
527 static int vmap_pages_pud_range(p4d_t
*p4d
, unsigned long addr
,
528 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
529 pgtbl_mod_mask
*mask
)
534 pud
= pud_alloc_track(&init_mm
, p4d
, addr
, mask
);
538 next
= pud_addr_end(addr
, end
);
539 if (vmap_pages_pmd_range(pud
, addr
, next
, prot
, pages
, nr
, mask
))
541 } while (pud
++, addr
= next
, addr
!= end
);
545 static int vmap_pages_p4d_range(pgd_t
*pgd
, unsigned long addr
,
546 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
,
547 pgtbl_mod_mask
*mask
)
552 p4d
= p4d_alloc_track(&init_mm
, pgd
, addr
, mask
);
556 next
= p4d_addr_end(addr
, end
);
557 if (vmap_pages_pud_range(p4d
, addr
, next
, prot
, pages
, nr
, mask
))
559 } while (p4d
++, addr
= next
, addr
!= end
);
563 static int vmap_small_pages_range_noflush(unsigned long addr
, unsigned long end
,
564 pgprot_t prot
, struct page
**pages
)
566 unsigned long start
= addr
;
571 pgtbl_mod_mask mask
= 0;
574 pgd
= pgd_offset_k(addr
);
576 next
= pgd_addr_end(addr
, end
);
578 mask
|= PGTBL_PGD_MODIFIED
;
579 err
= vmap_pages_p4d_range(pgd
, addr
, next
, prot
, pages
, &nr
, &mask
);
582 } while (pgd
++, addr
= next
, addr
!= end
);
584 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
585 arch_sync_kernel_mappings(start
, end
);
591 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
594 * The caller is responsible for calling flush_cache_vmap() after this
595 * function returns successfully and before the addresses are accessed.
597 * This is an internal function only. Do not use outside mm/.
599 int __vmap_pages_range_noflush(unsigned long addr
, unsigned long end
,
600 pgprot_t prot
, struct page
**pages
, unsigned int page_shift
)
602 unsigned int i
, nr
= (end
- addr
) >> PAGE_SHIFT
;
604 WARN_ON(page_shift
< PAGE_SHIFT
);
606 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC
) ||
607 page_shift
== PAGE_SHIFT
)
608 return vmap_small_pages_range_noflush(addr
, end
, prot
, pages
);
610 for (i
= 0; i
< nr
; i
+= 1U << (page_shift
- PAGE_SHIFT
)) {
613 err
= vmap_range_noflush(addr
, addr
+ (1UL << page_shift
),
614 page_to_phys(pages
[i
]), prot
,
619 addr
+= 1UL << page_shift
;
625 int vmap_pages_range_noflush(unsigned long addr
, unsigned long end
,
626 pgprot_t prot
, struct page
**pages
, unsigned int page_shift
)
628 int ret
= kmsan_vmap_pages_range_noflush(addr
, end
, prot
, pages
,
633 return __vmap_pages_range_noflush(addr
, end
, prot
, pages
, page_shift
);
637 * vmap_pages_range - map pages to a kernel virtual address
638 * @addr: start of the VM area to map
639 * @end: end of the VM area to map (non-inclusive)
640 * @prot: page protection flags to use
641 * @pages: pages to map (always PAGE_SIZE pages)
642 * @page_shift: maximum shift that the pages may be mapped with, @pages must
643 * be aligned and contiguous up to at least this shift.
646 * 0 on success, -errno on failure.
648 static int vmap_pages_range(unsigned long addr
, unsigned long end
,
649 pgprot_t prot
, struct page
**pages
, unsigned int page_shift
)
653 err
= vmap_pages_range_noflush(addr
, end
, prot
, pages
, page_shift
);
654 flush_cache_vmap(addr
, end
);
658 static int check_sparse_vm_area(struct vm_struct
*area
, unsigned long start
,
662 if (WARN_ON_ONCE(area
->flags
& VM_FLUSH_RESET_PERMS
))
664 if (WARN_ON_ONCE(area
->flags
& VM_NO_GUARD
))
666 if (WARN_ON_ONCE(!(area
->flags
& VM_SPARSE
)))
668 if ((end
- start
) >> PAGE_SHIFT
> totalram_pages())
670 if (start
< (unsigned long)area
->addr
||
671 (void *)end
> area
->addr
+ get_vm_area_size(area
))
677 * vm_area_map_pages - map pages inside given sparse vm_area
679 * @start: start address inside vm_area
680 * @end: end address inside vm_area
681 * @pages: pages to map (always PAGE_SIZE pages)
683 int vm_area_map_pages(struct vm_struct
*area
, unsigned long start
,
684 unsigned long end
, struct page
**pages
)
688 err
= check_sparse_vm_area(area
, start
, end
);
692 return vmap_pages_range(start
, end
, PAGE_KERNEL
, pages
, PAGE_SHIFT
);
696 * vm_area_unmap_pages - unmap pages inside given sparse vm_area
698 * @start: start address inside vm_area
699 * @end: end address inside vm_area
701 void vm_area_unmap_pages(struct vm_struct
*area
, unsigned long start
,
704 if (check_sparse_vm_area(area
, start
, end
))
707 vunmap_range(start
, end
);
710 int is_vmalloc_or_module_addr(const void *x
)
713 * ARM, x86-64 and sparc64 put modules in a special place,
714 * and fall back on vmalloc() if that fails. Others
715 * just put it in the vmalloc space.
717 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
718 unsigned long addr
= (unsigned long)kasan_reset_tag(x
);
719 if (addr
>= MODULES_VADDR
&& addr
< MODULES_END
)
722 return is_vmalloc_addr(x
);
724 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr
);
727 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
728 * return the tail page that corresponds to the base page address, which
729 * matches small vmap mappings.
731 struct page
*vmalloc_to_page(const void *vmalloc_addr
)
733 unsigned long addr
= (unsigned long) vmalloc_addr
;
734 struct page
*page
= NULL
;
735 pgd_t
*pgd
= pgd_offset_k(addr
);
742 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
743 * architectures that do not vmalloc module space
745 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr
));
749 if (WARN_ON_ONCE(pgd_leaf(*pgd
)))
750 return NULL
; /* XXX: no allowance for huge pgd */
751 if (WARN_ON_ONCE(pgd_bad(*pgd
)))
754 p4d
= p4d_offset(pgd
, addr
);
758 return p4d_page(*p4d
) + ((addr
& ~P4D_MASK
) >> PAGE_SHIFT
);
759 if (WARN_ON_ONCE(p4d_bad(*p4d
)))
762 pud
= pud_offset(p4d
, addr
);
766 return pud_page(*pud
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
767 if (WARN_ON_ONCE(pud_bad(*pud
)))
770 pmd
= pmd_offset(pud
, addr
);
774 return pmd_page(*pmd
) + ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
775 if (WARN_ON_ONCE(pmd_bad(*pmd
)))
778 ptep
= pte_offset_kernel(pmd
, addr
);
779 pte
= ptep_get(ptep
);
780 if (pte_present(pte
))
781 page
= pte_page(pte
);
785 EXPORT_SYMBOL(vmalloc_to_page
);
788 * Map a vmalloc()-space virtual address to the physical page frame number.
790 unsigned long vmalloc_to_pfn(const void *vmalloc_addr
)
792 return page_to_pfn(vmalloc_to_page(vmalloc_addr
));
794 EXPORT_SYMBOL(vmalloc_to_pfn
);
797 /*** Global kva allocator ***/
799 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
800 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
803 static DEFINE_SPINLOCK(vmap_area_lock
);
804 static DEFINE_SPINLOCK(free_vmap_area_lock
);
805 /* Export for kexec only */
806 LIST_HEAD(vmap_area_list
);
807 static struct rb_root vmap_area_root
= RB_ROOT
;
808 static bool vmap_initialized __read_mostly
;
810 static struct rb_root purge_vmap_area_root
= RB_ROOT
;
811 static LIST_HEAD(purge_vmap_area_list
);
812 static DEFINE_SPINLOCK(purge_vmap_area_lock
);
815 * This kmem_cache is used for vmap_area objects. Instead of
816 * allocating from slab we reuse an object from this cache to
817 * make things faster. Especially in "no edge" splitting of
820 static struct kmem_cache
*vmap_area_cachep
;
823 * This linked list is used in pair with free_vmap_area_root.
824 * It gives O(1) access to prev/next to perform fast coalescing.
826 static LIST_HEAD(free_vmap_area_list
);
829 * This augment red-black tree represents the free vmap space.
830 * All vmap_area objects in this tree are sorted by va->va_start
831 * address. It is used for allocation and merging when a vmap
832 * object is released.
834 * Each vmap_area node contains a maximum available free block
835 * of its sub-tree, right or left. Therefore it is possible to
836 * find a lowest match of free area.
838 static struct rb_root free_vmap_area_root
= RB_ROOT
;
841 * Preload a CPU with one object for "no edge" split case. The
842 * aim is to get rid of allocations from the atomic context, thus
843 * to use more permissive allocation masks.
845 static DEFINE_PER_CPU(struct vmap_area
*, ne_fit_preload_node
);
847 static __always_inline
unsigned long
848 va_size(struct vmap_area
*va
)
850 return (va
->va_end
- va
->va_start
);
853 static __always_inline
unsigned long
854 get_subtree_max_size(struct rb_node
*node
)
856 struct vmap_area
*va
;
858 va
= rb_entry_safe(node
, struct vmap_area
, rb_node
);
859 return va
? va
->subtree_max_size
: 0;
862 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb
,
863 struct vmap_area
, rb_node
, unsigned long, subtree_max_size
, va_size
)
865 static void reclaim_and_purge_vmap_areas(void);
866 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list
);
867 static void drain_vmap_area_work(struct work_struct
*work
);
868 static DECLARE_WORK(drain_vmap_work
, drain_vmap_area_work
);
870 static atomic_long_t nr_vmalloc_pages
;
872 unsigned long vmalloc_nr_pages(void)
874 return atomic_long_read(&nr_vmalloc_pages
);
877 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
878 static struct vmap_area
*find_vmap_area_exceed_addr(unsigned long addr
)
880 struct vmap_area
*va
= NULL
;
881 struct rb_node
*n
= vmap_area_root
.rb_node
;
883 addr
= (unsigned long)kasan_reset_tag((void *)addr
);
886 struct vmap_area
*tmp
;
888 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
889 if (tmp
->va_end
> addr
) {
891 if (tmp
->va_start
<= addr
)
902 static struct vmap_area
*__find_vmap_area(unsigned long addr
, struct rb_root
*root
)
904 struct rb_node
*n
= root
->rb_node
;
906 addr
= (unsigned long)kasan_reset_tag((void *)addr
);
909 struct vmap_area
*va
;
911 va
= rb_entry(n
, struct vmap_area
, rb_node
);
912 if (addr
< va
->va_start
)
914 else if (addr
>= va
->va_end
)
924 * This function returns back addresses of parent node
925 * and its left or right link for further processing.
927 * Otherwise NULL is returned. In that case all further
928 * steps regarding inserting of conflicting overlap range
929 * have to be declined and actually considered as a bug.
931 static __always_inline
struct rb_node
**
932 find_va_links(struct vmap_area
*va
,
933 struct rb_root
*root
, struct rb_node
*from
,
934 struct rb_node
**parent
)
936 struct vmap_area
*tmp_va
;
937 struct rb_node
**link
;
940 link
= &root
->rb_node
;
941 if (unlikely(!*link
)) {
950 * Go to the bottom of the tree. When we hit the last point
951 * we end up with parent rb_node and correct direction, i name
952 * it link, where the new va->rb_node will be attached to.
955 tmp_va
= rb_entry(*link
, struct vmap_area
, rb_node
);
958 * During the traversal we also do some sanity check.
959 * Trigger the BUG() if there are sides(left/right)
962 if (va
->va_end
<= tmp_va
->va_start
)
963 link
= &(*link
)->rb_left
;
964 else if (va
->va_start
>= tmp_va
->va_end
)
965 link
= &(*link
)->rb_right
;
967 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
968 va
->va_start
, va
->va_end
, tmp_va
->va_start
, tmp_va
->va_end
);
974 *parent
= &tmp_va
->rb_node
;
978 static __always_inline
struct list_head
*
979 get_va_next_sibling(struct rb_node
*parent
, struct rb_node
**link
)
981 struct list_head
*list
;
983 if (unlikely(!parent
))
985 * The red-black tree where we try to find VA neighbors
986 * before merging or inserting is empty, i.e. it means
987 * there is no free vmap space. Normally it does not
988 * happen but we handle this case anyway.
992 list
= &rb_entry(parent
, struct vmap_area
, rb_node
)->list
;
993 return (&parent
->rb_right
== link
? list
->next
: list
);
996 static __always_inline
void
997 __link_va(struct vmap_area
*va
, struct rb_root
*root
,
998 struct rb_node
*parent
, struct rb_node
**link
,
999 struct list_head
*head
, bool augment
)
1002 * VA is still not in the list, but we can
1003 * identify its future previous list_head node.
1005 if (likely(parent
)) {
1006 head
= &rb_entry(parent
, struct vmap_area
, rb_node
)->list
;
1007 if (&parent
->rb_right
!= link
)
1011 /* Insert to the rb-tree */
1012 rb_link_node(&va
->rb_node
, parent
, link
);
1015 * Some explanation here. Just perform simple insertion
1016 * to the tree. We do not set va->subtree_max_size to
1017 * its current size before calling rb_insert_augmented().
1018 * It is because we populate the tree from the bottom
1019 * to parent levels when the node _is_ in the tree.
1021 * Therefore we set subtree_max_size to zero after insertion,
1022 * to let __augment_tree_propagate_from() puts everything to
1023 * the correct order later on.
1025 rb_insert_augmented(&va
->rb_node
,
1026 root
, &free_vmap_area_rb_augment_cb
);
1027 va
->subtree_max_size
= 0;
1029 rb_insert_color(&va
->rb_node
, root
);
1032 /* Address-sort this list */
1033 list_add(&va
->list
, head
);
1036 static __always_inline
void
1037 link_va(struct vmap_area
*va
, struct rb_root
*root
,
1038 struct rb_node
*parent
, struct rb_node
**link
,
1039 struct list_head
*head
)
1041 __link_va(va
, root
, parent
, link
, head
, false);
1044 static __always_inline
void
1045 link_va_augment(struct vmap_area
*va
, struct rb_root
*root
,
1046 struct rb_node
*parent
, struct rb_node
**link
,
1047 struct list_head
*head
)
1049 __link_va(va
, root
, parent
, link
, head
, true);
1052 static __always_inline
void
1053 __unlink_va(struct vmap_area
*va
, struct rb_root
*root
, bool augment
)
1055 if (WARN_ON(RB_EMPTY_NODE(&va
->rb_node
)))
1059 rb_erase_augmented(&va
->rb_node
,
1060 root
, &free_vmap_area_rb_augment_cb
);
1062 rb_erase(&va
->rb_node
, root
);
1064 list_del_init(&va
->list
);
1065 RB_CLEAR_NODE(&va
->rb_node
);
1068 static __always_inline
void
1069 unlink_va(struct vmap_area
*va
, struct rb_root
*root
)
1071 __unlink_va(va
, root
, false);
1074 static __always_inline
void
1075 unlink_va_augment(struct vmap_area
*va
, struct rb_root
*root
)
1077 __unlink_va(va
, root
, true);
1080 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1082 * Gets called when remove the node and rotate.
1084 static __always_inline
unsigned long
1085 compute_subtree_max_size(struct vmap_area
*va
)
1087 return max3(va_size(va
),
1088 get_subtree_max_size(va
->rb_node
.rb_left
),
1089 get_subtree_max_size(va
->rb_node
.rb_right
));
1093 augment_tree_propagate_check(void)
1095 struct vmap_area
*va
;
1096 unsigned long computed_size
;
1098 list_for_each_entry(va
, &free_vmap_area_list
, list
) {
1099 computed_size
= compute_subtree_max_size(va
);
1100 if (computed_size
!= va
->subtree_max_size
)
1101 pr_emerg("tree is corrupted: %lu, %lu\n",
1102 va_size(va
), va
->subtree_max_size
);
1108 * This function populates subtree_max_size from bottom to upper
1109 * levels starting from VA point. The propagation must be done
1110 * when VA size is modified by changing its va_start/va_end. Or
1111 * in case of newly inserting of VA to the tree.
1113 * It means that __augment_tree_propagate_from() must be called:
1114 * - After VA has been inserted to the tree(free path);
1115 * - After VA has been shrunk(allocation path);
1116 * - After VA has been increased(merging path).
1118 * Please note that, it does not mean that upper parent nodes
1119 * and their subtree_max_size are recalculated all the time up
1128 * For example if we modify the node 4, shrinking it to 2, then
1129 * no any modification is required. If we shrink the node 2 to 1
1130 * its subtree_max_size is updated only, and set to 1. If we shrink
1131 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1132 * node becomes 4--6.
1134 static __always_inline
void
1135 augment_tree_propagate_from(struct vmap_area
*va
)
1138 * Populate the tree from bottom towards the root until
1139 * the calculated maximum available size of checked node
1140 * is equal to its current one.
1142 free_vmap_area_rb_augment_cb_propagate(&va
->rb_node
, NULL
);
1144 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1145 augment_tree_propagate_check();
1150 insert_vmap_area(struct vmap_area
*va
,
1151 struct rb_root
*root
, struct list_head
*head
)
1153 struct rb_node
**link
;
1154 struct rb_node
*parent
;
1156 link
= find_va_links(va
, root
, NULL
, &parent
);
1158 link_va(va
, root
, parent
, link
, head
);
1162 insert_vmap_area_augment(struct vmap_area
*va
,
1163 struct rb_node
*from
, struct rb_root
*root
,
1164 struct list_head
*head
)
1166 struct rb_node
**link
;
1167 struct rb_node
*parent
;
1170 link
= find_va_links(va
, NULL
, from
, &parent
);
1172 link
= find_va_links(va
, root
, NULL
, &parent
);
1175 link_va_augment(va
, root
, parent
, link
, head
);
1176 augment_tree_propagate_from(va
);
1181 * Merge de-allocated chunk of VA memory with previous
1182 * and next free blocks. If coalesce is not done a new
1183 * free area is inserted. If VA has been merged, it is
1186 * Please note, it can return NULL in case of overlap
1187 * ranges, followed by WARN() report. Despite it is a
1188 * buggy behaviour, a system can be alive and keep
1191 static __always_inline
struct vmap_area
*
1192 __merge_or_add_vmap_area(struct vmap_area
*va
,
1193 struct rb_root
*root
, struct list_head
*head
, bool augment
)
1195 struct vmap_area
*sibling
;
1196 struct list_head
*next
;
1197 struct rb_node
**link
;
1198 struct rb_node
*parent
;
1199 bool merged
= false;
1202 * Find a place in the tree where VA potentially will be
1203 * inserted, unless it is merged with its sibling/siblings.
1205 link
= find_va_links(va
, root
, NULL
, &parent
);
1210 * Get next node of VA to check if merging can be done.
1212 next
= get_va_next_sibling(parent
, link
);
1213 if (unlikely(next
== NULL
))
1219 * |<------VA------>|<-----Next----->|
1224 sibling
= list_entry(next
, struct vmap_area
, list
);
1225 if (sibling
->va_start
== va
->va_end
) {
1226 sibling
->va_start
= va
->va_start
;
1228 /* Free vmap_area object. */
1229 kmem_cache_free(vmap_area_cachep
, va
);
1231 /* Point to the new merged area. */
1240 * |<-----Prev----->|<------VA------>|
1244 if (next
->prev
!= head
) {
1245 sibling
= list_entry(next
->prev
, struct vmap_area
, list
);
1246 if (sibling
->va_end
== va
->va_start
) {
1248 * If both neighbors are coalesced, it is important
1249 * to unlink the "next" node first, followed by merging
1250 * with "previous" one. Otherwise the tree might not be
1251 * fully populated if a sibling's augmented value is
1252 * "normalized" because of rotation operations.
1255 __unlink_va(va
, root
, augment
);
1257 sibling
->va_end
= va
->va_end
;
1259 /* Free vmap_area object. */
1260 kmem_cache_free(vmap_area_cachep
, va
);
1262 /* Point to the new merged area. */
1270 __link_va(va
, root
, parent
, link
, head
, augment
);
1275 static __always_inline
struct vmap_area
*
1276 merge_or_add_vmap_area(struct vmap_area
*va
,
1277 struct rb_root
*root
, struct list_head
*head
)
1279 return __merge_or_add_vmap_area(va
, root
, head
, false);
1282 static __always_inline
struct vmap_area
*
1283 merge_or_add_vmap_area_augment(struct vmap_area
*va
,
1284 struct rb_root
*root
, struct list_head
*head
)
1286 va
= __merge_or_add_vmap_area(va
, root
, head
, true);
1288 augment_tree_propagate_from(va
);
1293 static __always_inline
bool
1294 is_within_this_va(struct vmap_area
*va
, unsigned long size
,
1295 unsigned long align
, unsigned long vstart
)
1297 unsigned long nva_start_addr
;
1299 if (va
->va_start
> vstart
)
1300 nva_start_addr
= ALIGN(va
->va_start
, align
);
1302 nva_start_addr
= ALIGN(vstart
, align
);
1304 /* Can be overflowed due to big size or alignment. */
1305 if (nva_start_addr
+ size
< nva_start_addr
||
1306 nva_start_addr
< vstart
)
1309 return (nva_start_addr
+ size
<= va
->va_end
);
1313 * Find the first free block(lowest start address) in the tree,
1314 * that will accomplish the request corresponding to passing
1315 * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1316 * a search length is adjusted to account for worst case alignment
1319 static __always_inline
struct vmap_area
*
1320 find_vmap_lowest_match(struct rb_root
*root
, unsigned long size
,
1321 unsigned long align
, unsigned long vstart
, bool adjust_search_size
)
1323 struct vmap_area
*va
;
1324 struct rb_node
*node
;
1325 unsigned long length
;
1327 /* Start from the root. */
1328 node
= root
->rb_node
;
1330 /* Adjust the search size for alignment overhead. */
1331 length
= adjust_search_size
? size
+ align
- 1 : size
;
1334 va
= rb_entry(node
, struct vmap_area
, rb_node
);
1336 if (get_subtree_max_size(node
->rb_left
) >= length
&&
1337 vstart
< va
->va_start
) {
1338 node
= node
->rb_left
;
1340 if (is_within_this_va(va
, size
, align
, vstart
))
1344 * Does not make sense to go deeper towards the right
1345 * sub-tree if it does not have a free block that is
1346 * equal or bigger to the requested search length.
1348 if (get_subtree_max_size(node
->rb_right
) >= length
) {
1349 node
= node
->rb_right
;
1354 * OK. We roll back and find the first right sub-tree,
1355 * that will satisfy the search criteria. It can happen
1356 * due to "vstart" restriction or an alignment overhead
1357 * that is bigger then PAGE_SIZE.
1359 while ((node
= rb_parent(node
))) {
1360 va
= rb_entry(node
, struct vmap_area
, rb_node
);
1361 if (is_within_this_va(va
, size
, align
, vstart
))
1364 if (get_subtree_max_size(node
->rb_right
) >= length
&&
1365 vstart
<= va
->va_start
) {
1367 * Shift the vstart forward. Please note, we update it with
1368 * parent's start address adding "1" because we do not want
1369 * to enter same sub-tree after it has already been checked
1370 * and no suitable free block found there.
1372 vstart
= va
->va_start
+ 1;
1373 node
= node
->rb_right
;
1383 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1384 #include <linux/random.h>
1386 static struct vmap_area
*
1387 find_vmap_lowest_linear_match(struct list_head
*head
, unsigned long size
,
1388 unsigned long align
, unsigned long vstart
)
1390 struct vmap_area
*va
;
1392 list_for_each_entry(va
, head
, list
) {
1393 if (!is_within_this_va(va
, size
, align
, vstart
))
1403 find_vmap_lowest_match_check(struct rb_root
*root
, struct list_head
*head
,
1404 unsigned long size
, unsigned long align
)
1406 struct vmap_area
*va_1
, *va_2
;
1407 unsigned long vstart
;
1410 get_random_bytes(&rnd
, sizeof(rnd
));
1411 vstart
= VMALLOC_START
+ rnd
;
1413 va_1
= find_vmap_lowest_match(root
, size
, align
, vstart
, false);
1414 va_2
= find_vmap_lowest_linear_match(head
, size
, align
, vstart
);
1417 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1418 va_1
, va_2
, vstart
);
1424 FL_FIT_TYPE
= 1, /* full fit */
1425 LE_FIT_TYPE
= 2, /* left edge fit */
1426 RE_FIT_TYPE
= 3, /* right edge fit */
1427 NE_FIT_TYPE
= 4 /* no edge fit */
1430 static __always_inline
enum fit_type
1431 classify_va_fit_type(struct vmap_area
*va
,
1432 unsigned long nva_start_addr
, unsigned long size
)
1436 /* Check if it is within VA. */
1437 if (nva_start_addr
< va
->va_start
||
1438 nva_start_addr
+ size
> va
->va_end
)
1442 if (va
->va_start
== nva_start_addr
) {
1443 if (va
->va_end
== nva_start_addr
+ size
)
1447 } else if (va
->va_end
== nva_start_addr
+ size
) {
1456 static __always_inline
int
1457 adjust_va_to_fit_type(struct rb_root
*root
, struct list_head
*head
,
1458 struct vmap_area
*va
, unsigned long nva_start_addr
,
1461 struct vmap_area
*lva
= NULL
;
1462 enum fit_type type
= classify_va_fit_type(va
, nva_start_addr
, size
);
1464 if (type
== FL_FIT_TYPE
) {
1466 * No need to split VA, it fully fits.
1472 unlink_va_augment(va
, root
);
1473 kmem_cache_free(vmap_area_cachep
, va
);
1474 } else if (type
== LE_FIT_TYPE
) {
1476 * Split left edge of fit VA.
1482 va
->va_start
+= size
;
1483 } else if (type
== RE_FIT_TYPE
) {
1485 * Split right edge of fit VA.
1491 va
->va_end
= nva_start_addr
;
1492 } else if (type
== NE_FIT_TYPE
) {
1494 * Split no edge of fit VA.
1500 lva
= __this_cpu_xchg(ne_fit_preload_node
, NULL
);
1501 if (unlikely(!lva
)) {
1503 * For percpu allocator we do not do any pre-allocation
1504 * and leave it as it is. The reason is it most likely
1505 * never ends up with NE_FIT_TYPE splitting. In case of
1506 * percpu allocations offsets and sizes are aligned to
1507 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1508 * are its main fitting cases.
1510 * There are a few exceptions though, as an example it is
1511 * a first allocation (early boot up) when we have "one"
1512 * big free space that has to be split.
1514 * Also we can hit this path in case of regular "vmap"
1515 * allocations, if "this" current CPU was not preloaded.
1516 * See the comment in alloc_vmap_area() why. If so, then
1517 * GFP_NOWAIT is used instead to get an extra object for
1518 * split purpose. That is rare and most time does not
1521 * What happens if an allocation gets failed. Basically,
1522 * an "overflow" path is triggered to purge lazily freed
1523 * areas to free some memory, then, the "retry" path is
1524 * triggered to repeat one more time. See more details
1525 * in alloc_vmap_area() function.
1527 lva
= kmem_cache_alloc(vmap_area_cachep
, GFP_NOWAIT
);
1533 * Build the remainder.
1535 lva
->va_start
= va
->va_start
;
1536 lva
->va_end
= nva_start_addr
;
1539 * Shrink this VA to remaining size.
1541 va
->va_start
= nva_start_addr
+ size
;
1546 if (type
!= FL_FIT_TYPE
) {
1547 augment_tree_propagate_from(va
);
1549 if (lva
) /* type == NE_FIT_TYPE */
1550 insert_vmap_area_augment(lva
, &va
->rb_node
, root
, head
);
1557 * Returns a start address of the newly allocated area, if success.
1558 * Otherwise a vend is returned that indicates failure.
1560 static __always_inline
unsigned long
1561 __alloc_vmap_area(struct rb_root
*root
, struct list_head
*head
,
1562 unsigned long size
, unsigned long align
,
1563 unsigned long vstart
, unsigned long vend
)
1565 bool adjust_search_size
= true;
1566 unsigned long nva_start_addr
;
1567 struct vmap_area
*va
;
1571 * Do not adjust when:
1572 * a) align <= PAGE_SIZE, because it does not make any sense.
1573 * All blocks(their start addresses) are at least PAGE_SIZE
1575 * b) a short range where a requested size corresponds to exactly
1576 * specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1577 * With adjusted search length an allocation would not succeed.
1579 if (align
<= PAGE_SIZE
|| (align
> PAGE_SIZE
&& (vend
- vstart
) == size
))
1580 adjust_search_size
= false;
1582 va
= find_vmap_lowest_match(root
, size
, align
, vstart
, adjust_search_size
);
1586 if (va
->va_start
> vstart
)
1587 nva_start_addr
= ALIGN(va
->va_start
, align
);
1589 nva_start_addr
= ALIGN(vstart
, align
);
1591 /* Check the "vend" restriction. */
1592 if (nva_start_addr
+ size
> vend
)
1595 /* Update the free vmap_area. */
1596 ret
= adjust_va_to_fit_type(root
, head
, va
, nva_start_addr
, size
);
1597 if (WARN_ON_ONCE(ret
))
1600 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1601 find_vmap_lowest_match_check(root
, head
, size
, align
);
1604 return nva_start_addr
;
1608 * Free a region of KVA allocated by alloc_vmap_area
1610 static void free_vmap_area(struct vmap_area
*va
)
1613 * Remove from the busy tree/list.
1615 spin_lock(&vmap_area_lock
);
1616 unlink_va(va
, &vmap_area_root
);
1617 spin_unlock(&vmap_area_lock
);
1620 * Insert/Merge it back to the free tree/list.
1622 spin_lock(&free_vmap_area_lock
);
1623 merge_or_add_vmap_area_augment(va
, &free_vmap_area_root
, &free_vmap_area_list
);
1624 spin_unlock(&free_vmap_area_lock
);
1628 preload_this_cpu_lock(spinlock_t
*lock
, gfp_t gfp_mask
, int node
)
1630 struct vmap_area
*va
= NULL
;
1633 * Preload this CPU with one extra vmap_area object. It is used
1634 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1635 * a CPU that does an allocation is preloaded.
1637 * We do it in non-atomic context, thus it allows us to use more
1638 * permissive allocation masks to be more stable under low memory
1639 * condition and high memory pressure.
1641 if (!this_cpu_read(ne_fit_preload_node
))
1642 va
= kmem_cache_alloc_node(vmap_area_cachep
, gfp_mask
, node
);
1646 if (va
&& __this_cpu_cmpxchg(ne_fit_preload_node
, NULL
, va
))
1647 kmem_cache_free(vmap_area_cachep
, va
);
1651 * Allocate a region of KVA of the specified size and alignment, within the
1654 static struct vmap_area
*alloc_vmap_area(unsigned long size
,
1655 unsigned long align
,
1656 unsigned long vstart
, unsigned long vend
,
1657 int node
, gfp_t gfp_mask
,
1658 unsigned long va_flags
)
1660 struct vmap_area
*va
;
1661 unsigned long freed
;
1666 if (unlikely(!size
|| offset_in_page(size
) || !is_power_of_2(align
)))
1667 return ERR_PTR(-EINVAL
);
1669 if (unlikely(!vmap_initialized
))
1670 return ERR_PTR(-EBUSY
);
1673 gfp_mask
= gfp_mask
& GFP_RECLAIM_MASK
;
1675 va
= kmem_cache_alloc_node(vmap_area_cachep
, gfp_mask
, node
);
1677 return ERR_PTR(-ENOMEM
);
1680 * Only scan the relevant parts containing pointers to other objects
1681 * to avoid false negatives.
1683 kmemleak_scan_area(&va
->rb_node
, SIZE_MAX
, gfp_mask
);
1686 preload_this_cpu_lock(&free_vmap_area_lock
, gfp_mask
, node
);
1687 addr
= __alloc_vmap_area(&free_vmap_area_root
, &free_vmap_area_list
,
1688 size
, align
, vstart
, vend
);
1689 spin_unlock(&free_vmap_area_lock
);
1691 trace_alloc_vmap_area(addr
, size
, align
, vstart
, vend
, addr
== vend
);
1694 * If an allocation fails, the "vend" address is
1695 * returned. Therefore trigger the overflow path.
1697 if (unlikely(addr
== vend
))
1700 va
->va_start
= addr
;
1701 va
->va_end
= addr
+ size
;
1703 va
->flags
= va_flags
;
1705 spin_lock(&vmap_area_lock
);
1706 insert_vmap_area(va
, &vmap_area_root
, &vmap_area_list
);
1707 spin_unlock(&vmap_area_lock
);
1709 BUG_ON(!IS_ALIGNED(va
->va_start
, align
));
1710 BUG_ON(va
->va_start
< vstart
);
1711 BUG_ON(va
->va_end
> vend
);
1713 ret
= kasan_populate_vmalloc(addr
, size
);
1716 return ERR_PTR(ret
);
1723 reclaim_and_purge_vmap_areas();
1729 blocking_notifier_call_chain(&vmap_notify_list
, 0, &freed
);
1736 if (!(gfp_mask
& __GFP_NOWARN
) && printk_ratelimit())
1737 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1740 kmem_cache_free(vmap_area_cachep
, va
);
1741 return ERR_PTR(-EBUSY
);
1744 int register_vmap_purge_notifier(struct notifier_block
*nb
)
1746 return blocking_notifier_chain_register(&vmap_notify_list
, nb
);
1748 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier
);
1750 int unregister_vmap_purge_notifier(struct notifier_block
*nb
)
1752 return blocking_notifier_chain_unregister(&vmap_notify_list
, nb
);
1754 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier
);
1757 * lazy_max_pages is the maximum amount of virtual address space we gather up
1758 * before attempting to purge with a TLB flush.
1760 * There is a tradeoff here: a larger number will cover more kernel page tables
1761 * and take slightly longer to purge, but it will linearly reduce the number of
1762 * global TLB flushes that must be performed. It would seem natural to scale
1763 * this number up linearly with the number of CPUs (because vmapping activity
1764 * could also scale linearly with the number of CPUs), however it is likely
1765 * that in practice, workloads might be constrained in other ways that mean
1766 * vmap activity will not scale linearly with CPUs. Also, I want to be
1767 * conservative and not introduce a big latency on huge systems, so go with
1768 * a less aggressive log scale. It will still be an improvement over the old
1769 * code, and it will be simple to change the scale factor if we find that it
1770 * becomes a problem on bigger systems.
1772 static unsigned long lazy_max_pages(void)
1776 log
= fls(num_online_cpus());
1778 return log
* (32UL * 1024 * 1024 / PAGE_SIZE
);
1781 static atomic_long_t vmap_lazy_nr
= ATOMIC_LONG_INIT(0);
1784 * Serialize vmap purging. There is no actual critical section protected
1785 * by this lock, but we want to avoid concurrent calls for performance
1786 * reasons and to make the pcpu_get_vm_areas more deterministic.
1788 static DEFINE_MUTEX(vmap_purge_lock
);
1790 /* for per-CPU blocks */
1791 static void purge_fragmented_blocks_allcpus(void);
1794 * Purges all lazily-freed vmap areas.
1796 static bool __purge_vmap_area_lazy(unsigned long start
, unsigned long end
)
1798 unsigned long resched_threshold
;
1799 unsigned int num_purged_areas
= 0;
1800 struct list_head local_purge_list
;
1801 struct vmap_area
*va
, *n_va
;
1803 lockdep_assert_held(&vmap_purge_lock
);
1805 spin_lock(&purge_vmap_area_lock
);
1806 purge_vmap_area_root
= RB_ROOT
;
1807 list_replace_init(&purge_vmap_area_list
, &local_purge_list
);
1808 spin_unlock(&purge_vmap_area_lock
);
1810 if (unlikely(list_empty(&local_purge_list
)))
1814 list_first_entry(&local_purge_list
,
1815 struct vmap_area
, list
)->va_start
);
1818 list_last_entry(&local_purge_list
,
1819 struct vmap_area
, list
)->va_end
);
1821 flush_tlb_kernel_range(start
, end
);
1822 resched_threshold
= lazy_max_pages() << 1;
1824 spin_lock(&free_vmap_area_lock
);
1825 list_for_each_entry_safe(va
, n_va
, &local_purge_list
, list
) {
1826 unsigned long nr
= (va
->va_end
- va
->va_start
) >> PAGE_SHIFT
;
1827 unsigned long orig_start
= va
->va_start
;
1828 unsigned long orig_end
= va
->va_end
;
1831 * Finally insert or merge lazily-freed area. It is
1832 * detached and there is no need to "unlink" it from
1835 va
= merge_or_add_vmap_area_augment(va
, &free_vmap_area_root
,
1836 &free_vmap_area_list
);
1841 if (is_vmalloc_or_module_addr((void *)orig_start
))
1842 kasan_release_vmalloc(orig_start
, orig_end
,
1843 va
->va_start
, va
->va_end
);
1845 atomic_long_sub(nr
, &vmap_lazy_nr
);
1848 if (atomic_long_read(&vmap_lazy_nr
) < resched_threshold
)
1849 cond_resched_lock(&free_vmap_area_lock
);
1851 spin_unlock(&free_vmap_area_lock
);
1854 trace_purge_vmap_area_lazy(start
, end
, num_purged_areas
);
1855 return num_purged_areas
> 0;
1859 * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
1861 static void reclaim_and_purge_vmap_areas(void)
1864 mutex_lock(&vmap_purge_lock
);
1865 purge_fragmented_blocks_allcpus();
1866 __purge_vmap_area_lazy(ULONG_MAX
, 0);
1867 mutex_unlock(&vmap_purge_lock
);
1870 static void drain_vmap_area_work(struct work_struct
*work
)
1872 unsigned long nr_lazy
;
1875 mutex_lock(&vmap_purge_lock
);
1876 __purge_vmap_area_lazy(ULONG_MAX
, 0);
1877 mutex_unlock(&vmap_purge_lock
);
1879 /* Recheck if further work is required. */
1880 nr_lazy
= atomic_long_read(&vmap_lazy_nr
);
1881 } while (nr_lazy
> lazy_max_pages());
1885 * Free a vmap area, caller ensuring that the area has been unmapped,
1886 * unlinked and flush_cache_vunmap had been called for the correct
1889 static void free_vmap_area_noflush(struct vmap_area
*va
)
1891 unsigned long nr_lazy_max
= lazy_max_pages();
1892 unsigned long va_start
= va
->va_start
;
1893 unsigned long nr_lazy
;
1895 if (WARN_ON_ONCE(!list_empty(&va
->list
)))
1898 nr_lazy
= atomic_long_add_return((va
->va_end
- va
->va_start
) >>
1899 PAGE_SHIFT
, &vmap_lazy_nr
);
1902 * Merge or place it to the purge tree/list.
1904 spin_lock(&purge_vmap_area_lock
);
1905 merge_or_add_vmap_area(va
,
1906 &purge_vmap_area_root
, &purge_vmap_area_list
);
1907 spin_unlock(&purge_vmap_area_lock
);
1909 trace_free_vmap_area_noflush(va_start
, nr_lazy
, nr_lazy_max
);
1911 /* After this point, we may free va at any time */
1912 if (unlikely(nr_lazy
> nr_lazy_max
))
1913 schedule_work(&drain_vmap_work
);
1917 * Free and unmap a vmap area
1919 static void free_unmap_vmap_area(struct vmap_area
*va
)
1921 flush_cache_vunmap(va
->va_start
, va
->va_end
);
1922 vunmap_range_noflush(va
->va_start
, va
->va_end
);
1923 if (debug_pagealloc_enabled_static())
1924 flush_tlb_kernel_range(va
->va_start
, va
->va_end
);
1926 free_vmap_area_noflush(va
);
1929 struct vmap_area
*find_vmap_area(unsigned long addr
)
1931 struct vmap_area
*va
;
1933 spin_lock(&vmap_area_lock
);
1934 va
= __find_vmap_area(addr
, &vmap_area_root
);
1935 spin_unlock(&vmap_area_lock
);
1940 static struct vmap_area
*find_unlink_vmap_area(unsigned long addr
)
1942 struct vmap_area
*va
;
1944 spin_lock(&vmap_area_lock
);
1945 va
= __find_vmap_area(addr
, &vmap_area_root
);
1947 unlink_va(va
, &vmap_area_root
);
1948 spin_unlock(&vmap_area_lock
);
1953 /*** Per cpu kva allocator ***/
1956 * vmap space is limited especially on 32 bit architectures. Ensure there is
1957 * room for at least 16 percpu vmap blocks per CPU.
1960 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1961 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1962 * instead (we just need a rough idea)
1964 #if BITS_PER_LONG == 32
1965 #define VMALLOC_SPACE (128UL*1024*1024)
1967 #define VMALLOC_SPACE (128UL*1024*1024*1024)
1970 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1971 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
1972 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
1973 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1974 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
1975 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
1976 #define VMAP_BBMAP_BITS \
1977 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1978 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1979 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1981 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1984 * Purge threshold to prevent overeager purging of fragmented blocks for
1985 * regular operations: Purge if vb->free is less than 1/4 of the capacity.
1987 #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4)
1989 #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
1990 #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
1991 #define VMAP_FLAGS_MASK 0x3
1993 struct vmap_block_queue
{
1995 struct list_head free
;
1998 * An xarray requires an extra memory dynamically to
1999 * be allocated. If it is an issue, we can use rb-tree
2002 struct xarray vmap_blocks
;
2007 struct vmap_area
*va
;
2008 unsigned long free
, dirty
;
2009 DECLARE_BITMAP(used_map
, VMAP_BBMAP_BITS
);
2010 unsigned long dirty_min
, dirty_max
; /*< dirty range */
2011 struct list_head free_list
;
2012 struct rcu_head rcu_head
;
2013 struct list_head purge
;
2016 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
2017 static DEFINE_PER_CPU(struct vmap_block_queue
, vmap_block_queue
);
2020 * In order to fast access to any "vmap_block" associated with a
2021 * specific address, we use a hash.
2023 * A per-cpu vmap_block_queue is used in both ways, to serialize
2024 * an access to free block chains among CPUs(alloc path) and it
2025 * also acts as a vmap_block hash(alloc/free paths). It means we
2026 * overload it, since we already have the per-cpu array which is
2027 * used as a hash table. When used as a hash a 'cpu' passed to
2028 * per_cpu() is not actually a CPU but rather a hash index.
2030 * A hash function is addr_to_vb_xa() which hashes any address
2031 * to a specific index(in a hash) it belongs to. This then uses a
2032 * per_cpu() macro to access an array with generated index.
2039 * 0 10 20 30 40 50 60
2040 * |------|------|------|------|------|------|...<vmap address space>
2041 * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2
2043 * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
2044 * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
2046 * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
2047 * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
2049 * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
2050 * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
2052 * This technique almost always avoids lock contention on insert/remove,
2053 * however xarray spinlocks protect against any contention that remains.
2055 static struct xarray
*
2056 addr_to_vb_xa(unsigned long addr
)
2058 int index
= (addr
/ VMAP_BLOCK_SIZE
) % num_possible_cpus();
2060 return &per_cpu(vmap_block_queue
, index
).vmap_blocks
;
2064 * We should probably have a fallback mechanism to allocate virtual memory
2065 * out of partially filled vmap blocks. However vmap block sizing should be
2066 * fairly reasonable according to the vmalloc size, so it shouldn't be a
2070 static unsigned long addr_to_vb_idx(unsigned long addr
)
2072 addr
-= VMALLOC_START
& ~(VMAP_BLOCK_SIZE
-1);
2073 addr
/= VMAP_BLOCK_SIZE
;
2077 static void *vmap_block_vaddr(unsigned long va_start
, unsigned long pages_off
)
2081 addr
= va_start
+ (pages_off
<< PAGE_SHIFT
);
2082 BUG_ON(addr_to_vb_idx(addr
) != addr_to_vb_idx(va_start
));
2083 return (void *)addr
;
2087 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2088 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
2089 * @order: how many 2^order pages should be occupied in newly allocated block
2090 * @gfp_mask: flags for the page level allocator
2092 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2094 static void *new_vmap_block(unsigned int order
, gfp_t gfp_mask
)
2096 struct vmap_block_queue
*vbq
;
2097 struct vmap_block
*vb
;
2098 struct vmap_area
*va
;
2100 unsigned long vb_idx
;
2104 node
= numa_node_id();
2106 vb
= kmalloc_node(sizeof(struct vmap_block
),
2107 gfp_mask
& GFP_RECLAIM_MASK
, node
);
2109 return ERR_PTR(-ENOMEM
);
2111 va
= alloc_vmap_area(VMAP_BLOCK_SIZE
, VMAP_BLOCK_SIZE
,
2112 VMALLOC_START
, VMALLOC_END
,
2114 VMAP_RAM
|VMAP_BLOCK
);
2117 return ERR_CAST(va
);
2120 vaddr
= vmap_block_vaddr(va
->va_start
, 0);
2121 spin_lock_init(&vb
->lock
);
2123 /* At least something should be left free */
2124 BUG_ON(VMAP_BBMAP_BITS
<= (1UL << order
));
2125 bitmap_zero(vb
->used_map
, VMAP_BBMAP_BITS
);
2126 vb
->free
= VMAP_BBMAP_BITS
- (1UL << order
);
2128 vb
->dirty_min
= VMAP_BBMAP_BITS
;
2130 bitmap_set(vb
->used_map
, 0, (1UL << order
));
2131 INIT_LIST_HEAD(&vb
->free_list
);
2133 xa
= addr_to_vb_xa(va
->va_start
);
2134 vb_idx
= addr_to_vb_idx(va
->va_start
);
2135 err
= xa_insert(xa
, vb_idx
, vb
, gfp_mask
);
2139 return ERR_PTR(err
);
2142 vbq
= raw_cpu_ptr(&vmap_block_queue
);
2143 spin_lock(&vbq
->lock
);
2144 list_add_tail_rcu(&vb
->free_list
, &vbq
->free
);
2145 spin_unlock(&vbq
->lock
);
2150 static void free_vmap_block(struct vmap_block
*vb
)
2152 struct vmap_block
*tmp
;
2155 xa
= addr_to_vb_xa(vb
->va
->va_start
);
2156 tmp
= xa_erase(xa
, addr_to_vb_idx(vb
->va
->va_start
));
2159 spin_lock(&vmap_area_lock
);
2160 unlink_va(vb
->va
, &vmap_area_root
);
2161 spin_unlock(&vmap_area_lock
);
2163 free_vmap_area_noflush(vb
->va
);
2164 kfree_rcu(vb
, rcu_head
);
2167 static bool purge_fragmented_block(struct vmap_block
*vb
,
2168 struct vmap_block_queue
*vbq
, struct list_head
*purge_list
,
2171 if (vb
->free
+ vb
->dirty
!= VMAP_BBMAP_BITS
||
2172 vb
->dirty
== VMAP_BBMAP_BITS
)
2175 /* Don't overeagerly purge usable blocks unless requested */
2176 if (!(force_purge
|| vb
->free
< VMAP_PURGE_THRESHOLD
))
2179 /* prevent further allocs after releasing lock */
2180 WRITE_ONCE(vb
->free
, 0);
2181 /* prevent purging it again */
2182 WRITE_ONCE(vb
->dirty
, VMAP_BBMAP_BITS
);
2184 vb
->dirty_max
= VMAP_BBMAP_BITS
;
2185 spin_lock(&vbq
->lock
);
2186 list_del_rcu(&vb
->free_list
);
2187 spin_unlock(&vbq
->lock
);
2188 list_add_tail(&vb
->purge
, purge_list
);
2192 static void free_purged_blocks(struct list_head
*purge_list
)
2194 struct vmap_block
*vb
, *n_vb
;
2196 list_for_each_entry_safe(vb
, n_vb
, purge_list
, purge
) {
2197 list_del(&vb
->purge
);
2198 free_vmap_block(vb
);
2202 static void purge_fragmented_blocks(int cpu
)
2205 struct vmap_block
*vb
;
2206 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
2209 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
2210 unsigned long free
= READ_ONCE(vb
->free
);
2211 unsigned long dirty
= READ_ONCE(vb
->dirty
);
2213 if (free
+ dirty
!= VMAP_BBMAP_BITS
||
2214 dirty
== VMAP_BBMAP_BITS
)
2217 spin_lock(&vb
->lock
);
2218 purge_fragmented_block(vb
, vbq
, &purge
, true);
2219 spin_unlock(&vb
->lock
);
2222 free_purged_blocks(&purge
);
2225 static void purge_fragmented_blocks_allcpus(void)
2229 for_each_possible_cpu(cpu
)
2230 purge_fragmented_blocks(cpu
);
2233 static void *vb_alloc(unsigned long size
, gfp_t gfp_mask
)
2235 struct vmap_block_queue
*vbq
;
2236 struct vmap_block
*vb
;
2240 BUG_ON(offset_in_page(size
));
2241 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
2242 if (WARN_ON(size
== 0)) {
2244 * Allocating 0 bytes isn't what caller wants since
2245 * get_order(0) returns funny result. Just warn and terminate
2250 order
= get_order(size
);
2253 vbq
= raw_cpu_ptr(&vmap_block_queue
);
2254 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
2255 unsigned long pages_off
;
2257 if (READ_ONCE(vb
->free
) < (1UL << order
))
2260 spin_lock(&vb
->lock
);
2261 if (vb
->free
< (1UL << order
)) {
2262 spin_unlock(&vb
->lock
);
2266 pages_off
= VMAP_BBMAP_BITS
- vb
->free
;
2267 vaddr
= vmap_block_vaddr(vb
->va
->va_start
, pages_off
);
2268 WRITE_ONCE(vb
->free
, vb
->free
- (1UL << order
));
2269 bitmap_set(vb
->used_map
, pages_off
, (1UL << order
));
2270 if (vb
->free
== 0) {
2271 spin_lock(&vbq
->lock
);
2272 list_del_rcu(&vb
->free_list
);
2273 spin_unlock(&vbq
->lock
);
2276 spin_unlock(&vb
->lock
);
2282 /* Allocate new block if nothing was found */
2284 vaddr
= new_vmap_block(order
, gfp_mask
);
2289 static void vb_free(unsigned long addr
, unsigned long size
)
2291 unsigned long offset
;
2293 struct vmap_block
*vb
;
2296 BUG_ON(offset_in_page(size
));
2297 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
2299 flush_cache_vunmap(addr
, addr
+ size
);
2301 order
= get_order(size
);
2302 offset
= (addr
& (VMAP_BLOCK_SIZE
- 1)) >> PAGE_SHIFT
;
2304 xa
= addr_to_vb_xa(addr
);
2305 vb
= xa_load(xa
, addr_to_vb_idx(addr
));
2307 spin_lock(&vb
->lock
);
2308 bitmap_clear(vb
->used_map
, offset
, (1UL << order
));
2309 spin_unlock(&vb
->lock
);
2311 vunmap_range_noflush(addr
, addr
+ size
);
2313 if (debug_pagealloc_enabled_static())
2314 flush_tlb_kernel_range(addr
, addr
+ size
);
2316 spin_lock(&vb
->lock
);
2318 /* Expand the not yet TLB flushed dirty range */
2319 vb
->dirty_min
= min(vb
->dirty_min
, offset
);
2320 vb
->dirty_max
= max(vb
->dirty_max
, offset
+ (1UL << order
));
2322 WRITE_ONCE(vb
->dirty
, vb
->dirty
+ (1UL << order
));
2323 if (vb
->dirty
== VMAP_BBMAP_BITS
) {
2325 spin_unlock(&vb
->lock
);
2326 free_vmap_block(vb
);
2328 spin_unlock(&vb
->lock
);
2331 static void _vm_unmap_aliases(unsigned long start
, unsigned long end
, int flush
)
2333 LIST_HEAD(purge_list
);
2336 if (unlikely(!vmap_initialized
))
2339 mutex_lock(&vmap_purge_lock
);
2341 for_each_possible_cpu(cpu
) {
2342 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
2343 struct vmap_block
*vb
;
2347 xa_for_each(&vbq
->vmap_blocks
, idx
, vb
) {
2348 spin_lock(&vb
->lock
);
2351 * Try to purge a fragmented block first. If it's
2352 * not purgeable, check whether there is dirty
2353 * space to be flushed.
2355 if (!purge_fragmented_block(vb
, vbq
, &purge_list
, false) &&
2356 vb
->dirty_max
&& vb
->dirty
!= VMAP_BBMAP_BITS
) {
2357 unsigned long va_start
= vb
->va
->va_start
;
2360 s
= va_start
+ (vb
->dirty_min
<< PAGE_SHIFT
);
2361 e
= va_start
+ (vb
->dirty_max
<< PAGE_SHIFT
);
2363 start
= min(s
, start
);
2366 /* Prevent that this is flushed again */
2367 vb
->dirty_min
= VMAP_BBMAP_BITS
;
2372 spin_unlock(&vb
->lock
);
2376 free_purged_blocks(&purge_list
);
2378 if (!__purge_vmap_area_lazy(start
, end
) && flush
)
2379 flush_tlb_kernel_range(start
, end
);
2380 mutex_unlock(&vmap_purge_lock
);
2384 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2386 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2387 * to amortize TLB flushing overheads. What this means is that any page you
2388 * have now, may, in a former life, have been mapped into kernel virtual
2389 * address by the vmap layer and so there might be some CPUs with TLB entries
2390 * still referencing that page (additional to the regular 1:1 kernel mapping).
2392 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2393 * be sure that none of the pages we have control over will have any aliases
2394 * from the vmap layer.
2396 void vm_unmap_aliases(void)
2398 unsigned long start
= ULONG_MAX
, end
= 0;
2401 _vm_unmap_aliases(start
, end
, flush
);
2403 EXPORT_SYMBOL_GPL(vm_unmap_aliases
);
2406 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2407 * @mem: the pointer returned by vm_map_ram
2408 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2410 void vm_unmap_ram(const void *mem
, unsigned int count
)
2412 unsigned long size
= (unsigned long)count
<< PAGE_SHIFT
;
2413 unsigned long addr
= (unsigned long)kasan_reset_tag(mem
);
2414 struct vmap_area
*va
;
2418 BUG_ON(addr
< VMALLOC_START
);
2419 BUG_ON(addr
> VMALLOC_END
);
2420 BUG_ON(!PAGE_ALIGNED(addr
));
2422 kasan_poison_vmalloc(mem
, size
);
2424 if (likely(count
<= VMAP_MAX_ALLOC
)) {
2425 debug_check_no_locks_freed(mem
, size
);
2426 vb_free(addr
, size
);
2430 va
= find_unlink_vmap_area(addr
);
2431 if (WARN_ON_ONCE(!va
))
2434 debug_check_no_locks_freed((void *)va
->va_start
,
2435 (va
->va_end
- va
->va_start
));
2436 free_unmap_vmap_area(va
);
2438 EXPORT_SYMBOL(vm_unmap_ram
);
2441 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2442 * @pages: an array of pointers to the pages to be mapped
2443 * @count: number of pages
2444 * @node: prefer to allocate data structures on this node
2446 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2447 * faster than vmap so it's good. But if you mix long-life and short-life
2448 * objects with vm_map_ram(), it could consume lots of address space through
2449 * fragmentation (especially on a 32bit machine). You could see failures in
2450 * the end. Please use this function for short-lived objects.
2452 * Returns: a pointer to the address that has been mapped, or %NULL on failure
2454 void *vm_map_ram(struct page
**pages
, unsigned int count
, int node
)
2456 unsigned long size
= (unsigned long)count
<< PAGE_SHIFT
;
2460 if (likely(count
<= VMAP_MAX_ALLOC
)) {
2461 mem
= vb_alloc(size
, GFP_KERNEL
);
2464 addr
= (unsigned long)mem
;
2466 struct vmap_area
*va
;
2467 va
= alloc_vmap_area(size
, PAGE_SIZE
,
2468 VMALLOC_START
, VMALLOC_END
,
2469 node
, GFP_KERNEL
, VMAP_RAM
);
2473 addr
= va
->va_start
;
2477 if (vmap_pages_range(addr
, addr
+ size
, PAGE_KERNEL
,
2478 pages
, PAGE_SHIFT
) < 0) {
2479 vm_unmap_ram(mem
, count
);
2484 * Mark the pages as accessible, now that they are mapped.
2485 * With hardware tag-based KASAN, marking is skipped for
2486 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2488 mem
= kasan_unpoison_vmalloc(mem
, size
, KASAN_VMALLOC_PROT_NORMAL
);
2492 EXPORT_SYMBOL(vm_map_ram
);
2494 static struct vm_struct
*vmlist __initdata
;
2496 static inline unsigned int vm_area_page_order(struct vm_struct
*vm
)
2498 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2499 return vm
->page_order
;
2505 static inline void set_vm_area_page_order(struct vm_struct
*vm
, unsigned int order
)
2507 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2508 vm
->page_order
= order
;
2515 * vm_area_add_early - add vmap area early during boot
2516 * @vm: vm_struct to add
2518 * This function is used to add fixed kernel vm area to vmlist before
2519 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
2520 * should contain proper values and the other fields should be zero.
2522 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2524 void __init
vm_area_add_early(struct vm_struct
*vm
)
2526 struct vm_struct
*tmp
, **p
;
2528 BUG_ON(vmap_initialized
);
2529 for (p
= &vmlist
; (tmp
= *p
) != NULL
; p
= &tmp
->next
) {
2530 if (tmp
->addr
>= vm
->addr
) {
2531 BUG_ON(tmp
->addr
< vm
->addr
+ vm
->size
);
2534 BUG_ON(tmp
->addr
+ tmp
->size
> vm
->addr
);
2541 * vm_area_register_early - register vmap area early during boot
2542 * @vm: vm_struct to register
2543 * @align: requested alignment
2545 * This function is used to register kernel vm area before
2546 * vmalloc_init() is called. @vm->size and @vm->flags should contain
2547 * proper values on entry and other fields should be zero. On return,
2548 * vm->addr contains the allocated address.
2550 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2552 void __init
vm_area_register_early(struct vm_struct
*vm
, size_t align
)
2554 unsigned long addr
= ALIGN(VMALLOC_START
, align
);
2555 struct vm_struct
*cur
, **p
;
2557 BUG_ON(vmap_initialized
);
2559 for (p
= &vmlist
; (cur
= *p
) != NULL
; p
= &cur
->next
) {
2560 if ((unsigned long)cur
->addr
- addr
>= vm
->size
)
2562 addr
= ALIGN((unsigned long)cur
->addr
+ cur
->size
, align
);
2565 BUG_ON(addr
> VMALLOC_END
- vm
->size
);
2566 vm
->addr
= (void *)addr
;
2569 kasan_populate_early_vm_area_shadow(vm
->addr
, vm
->size
);
2572 static void vmap_init_free_space(void)
2574 unsigned long vmap_start
= 1;
2575 const unsigned long vmap_end
= ULONG_MAX
;
2576 struct vmap_area
*busy
, *free
;
2580 * -|-----|.....|-----|-----|-----|.....|-
2582 * |<--------------------------------->|
2584 list_for_each_entry(busy
, &vmap_area_list
, list
) {
2585 if (busy
->va_start
- vmap_start
> 0) {
2586 free
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
2587 if (!WARN_ON_ONCE(!free
)) {
2588 free
->va_start
= vmap_start
;
2589 free
->va_end
= busy
->va_start
;
2591 insert_vmap_area_augment(free
, NULL
,
2592 &free_vmap_area_root
,
2593 &free_vmap_area_list
);
2597 vmap_start
= busy
->va_end
;
2600 if (vmap_end
- vmap_start
> 0) {
2601 free
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
2602 if (!WARN_ON_ONCE(!free
)) {
2603 free
->va_start
= vmap_start
;
2604 free
->va_end
= vmap_end
;
2606 insert_vmap_area_augment(free
, NULL
,
2607 &free_vmap_area_root
,
2608 &free_vmap_area_list
);
2613 static inline void setup_vmalloc_vm_locked(struct vm_struct
*vm
,
2614 struct vmap_area
*va
, unsigned long flags
, const void *caller
)
2617 vm
->addr
= (void *)va
->va_start
;
2618 vm
->size
= va
->va_end
- va
->va_start
;
2619 vm
->caller
= caller
;
2623 static void setup_vmalloc_vm(struct vm_struct
*vm
, struct vmap_area
*va
,
2624 unsigned long flags
, const void *caller
)
2626 spin_lock(&vmap_area_lock
);
2627 setup_vmalloc_vm_locked(vm
, va
, flags
, caller
);
2628 spin_unlock(&vmap_area_lock
);
2631 static void clear_vm_uninitialized_flag(struct vm_struct
*vm
)
2634 * Before removing VM_UNINITIALIZED,
2635 * we should make sure that vm has proper values.
2636 * Pair with smp_rmb() in show_numa_info().
2639 vm
->flags
&= ~VM_UNINITIALIZED
;
2642 static struct vm_struct
*__get_vm_area_node(unsigned long size
,
2643 unsigned long align
, unsigned long shift
, unsigned long flags
,
2644 unsigned long start
, unsigned long end
, int node
,
2645 gfp_t gfp_mask
, const void *caller
)
2647 struct vmap_area
*va
;
2648 struct vm_struct
*area
;
2649 unsigned long requested_size
= size
;
2651 BUG_ON(in_interrupt());
2652 size
= ALIGN(size
, 1ul << shift
);
2653 if (unlikely(!size
))
2656 if (flags
& VM_IOREMAP
)
2657 align
= 1ul << clamp_t(int, get_count_order_long(size
),
2658 PAGE_SHIFT
, IOREMAP_MAX_ORDER
);
2660 area
= kzalloc_node(sizeof(*area
), gfp_mask
& GFP_RECLAIM_MASK
, node
);
2661 if (unlikely(!area
))
2664 if (!(flags
& VM_NO_GUARD
))
2667 va
= alloc_vmap_area(size
, align
, start
, end
, node
, gfp_mask
, 0);
2673 setup_vmalloc_vm(area
, va
, flags
, caller
);
2676 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
2677 * best-effort approach, as they can be mapped outside of vmalloc code.
2678 * For VM_ALLOC mappings, the pages are marked as accessible after
2679 * getting mapped in __vmalloc_node_range().
2680 * With hardware tag-based KASAN, marking is skipped for
2681 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2683 if (!(flags
& VM_ALLOC
))
2684 area
->addr
= kasan_unpoison_vmalloc(area
->addr
, requested_size
,
2685 KASAN_VMALLOC_PROT_NORMAL
);
2690 struct vm_struct
*__get_vm_area_caller(unsigned long size
, unsigned long flags
,
2691 unsigned long start
, unsigned long end
,
2694 return __get_vm_area_node(size
, 1, PAGE_SHIFT
, flags
, start
, end
,
2695 NUMA_NO_NODE
, GFP_KERNEL
, caller
);
2699 * get_vm_area - reserve a contiguous kernel virtual area
2700 * @size: size of the area
2701 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
2703 * Search an area of @size in the kernel virtual mapping area,
2704 * and reserved it for out purposes. Returns the area descriptor
2705 * on success or %NULL on failure.
2707 * Return: the area descriptor on success or %NULL on failure.
2709 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
2711 return __get_vm_area_node(size
, 1, PAGE_SHIFT
, flags
,
2712 VMALLOC_START
, VMALLOC_END
,
2713 NUMA_NO_NODE
, GFP_KERNEL
,
2714 __builtin_return_address(0));
2717 struct vm_struct
*get_vm_area_caller(unsigned long size
, unsigned long flags
,
2720 return __get_vm_area_node(size
, 1, PAGE_SHIFT
, flags
,
2721 VMALLOC_START
, VMALLOC_END
,
2722 NUMA_NO_NODE
, GFP_KERNEL
, caller
);
2726 * find_vm_area - find a continuous kernel virtual area
2727 * @addr: base address
2729 * Search for the kernel VM area starting at @addr, and return it.
2730 * It is up to the caller to do all required locking to keep the returned
2733 * Return: the area descriptor on success or %NULL on failure.
2735 struct vm_struct
*find_vm_area(const void *addr
)
2737 struct vmap_area
*va
;
2739 va
= find_vmap_area((unsigned long)addr
);
2747 * remove_vm_area - find and remove a continuous kernel virtual area
2748 * @addr: base address
2750 * Search for the kernel VM area starting at @addr, and remove it.
2751 * This function returns the found VM area, but using it is NOT safe
2752 * on SMP machines, except for its size or flags.
2754 * Return: the area descriptor on success or %NULL on failure.
2756 struct vm_struct
*remove_vm_area(const void *addr
)
2758 struct vmap_area
*va
;
2759 struct vm_struct
*vm
;
2763 if (WARN(!PAGE_ALIGNED(addr
), "Trying to vfree() bad address (%p)\n",
2767 va
= find_unlink_vmap_area((unsigned long)addr
);
2772 debug_check_no_locks_freed(vm
->addr
, get_vm_area_size(vm
));
2773 debug_check_no_obj_freed(vm
->addr
, get_vm_area_size(vm
));
2774 kasan_free_module_shadow(vm
);
2775 kasan_poison_vmalloc(vm
->addr
, get_vm_area_size(vm
));
2777 free_unmap_vmap_area(va
);
2781 static inline void set_area_direct_map(const struct vm_struct
*area
,
2782 int (*set_direct_map
)(struct page
*page
))
2786 /* HUGE_VMALLOC passes small pages to set_direct_map */
2787 for (i
= 0; i
< area
->nr_pages
; i
++)
2788 if (page_address(area
->pages
[i
]))
2789 set_direct_map(area
->pages
[i
]);
2793 * Flush the vm mapping and reset the direct map.
2795 static void vm_reset_perms(struct vm_struct
*area
)
2797 unsigned long start
= ULONG_MAX
, end
= 0;
2798 unsigned int page_order
= vm_area_page_order(area
);
2803 * Find the start and end range of the direct mappings to make sure that
2804 * the vm_unmap_aliases() flush includes the direct map.
2806 for (i
= 0; i
< area
->nr_pages
; i
+= 1U << page_order
) {
2807 unsigned long addr
= (unsigned long)page_address(area
->pages
[i
]);
2810 unsigned long page_size
;
2812 page_size
= PAGE_SIZE
<< page_order
;
2813 start
= min(addr
, start
);
2814 end
= max(addr
+ page_size
, end
);
2820 * Set direct map to something invalid so that it won't be cached if
2821 * there are any accesses after the TLB flush, then flush the TLB and
2822 * reset the direct map permissions to the default.
2824 set_area_direct_map(area
, set_direct_map_invalid_noflush
);
2825 _vm_unmap_aliases(start
, end
, flush_dmap
);
2826 set_area_direct_map(area
, set_direct_map_default_noflush
);
2829 static void delayed_vfree_work(struct work_struct
*w
)
2831 struct vfree_deferred
*p
= container_of(w
, struct vfree_deferred
, wq
);
2832 struct llist_node
*t
, *llnode
;
2834 llist_for_each_safe(llnode
, t
, llist_del_all(&p
->list
))
2839 * vfree_atomic - release memory allocated by vmalloc()
2840 * @addr: memory base address
2842 * This one is just like vfree() but can be called in any atomic context
2845 void vfree_atomic(const void *addr
)
2847 struct vfree_deferred
*p
= raw_cpu_ptr(&vfree_deferred
);
2850 kmemleak_free(addr
);
2853 * Use raw_cpu_ptr() because this can be called from preemptible
2854 * context. Preemption is absolutely fine here, because the llist_add()
2855 * implementation is lockless, so it works even if we are adding to
2856 * another cpu's list. schedule_work() should be fine with this too.
2858 if (addr
&& llist_add((struct llist_node
*)addr
, &p
->list
))
2859 schedule_work(&p
->wq
);
2863 * vfree - Release memory allocated by vmalloc()
2864 * @addr: Memory base address
2866 * Free the virtually continuous memory area starting at @addr, as obtained
2867 * from one of the vmalloc() family of APIs. This will usually also free the
2868 * physical memory underlying the virtual allocation, but that memory is
2869 * reference counted, so it will not be freed until the last user goes away.
2871 * If @addr is NULL, no operation is performed.
2874 * May sleep if called *not* from interrupt context.
2875 * Must not be called in NMI context (strictly speaking, it could be
2876 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2877 * conventions for vfree() arch-dependent would be a really bad idea).
2879 void vfree(const void *addr
)
2881 struct vm_struct
*vm
;
2884 if (unlikely(in_interrupt())) {
2890 kmemleak_free(addr
);
2896 vm
= remove_vm_area(addr
);
2897 if (unlikely(!vm
)) {
2898 WARN(1, KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
2903 if (unlikely(vm
->flags
& VM_FLUSH_RESET_PERMS
))
2905 for (i
= 0; i
< vm
->nr_pages
; i
++) {
2906 struct page
*page
= vm
->pages
[i
];
2909 mod_memcg_page_state(page
, MEMCG_VMALLOC
, -1);
2911 * High-order allocs for huge vmallocs are split, so
2912 * can be freed as an array of order-0 allocations
2917 atomic_long_sub(vm
->nr_pages
, &nr_vmalloc_pages
);
2921 EXPORT_SYMBOL(vfree
);
2924 * vunmap - release virtual mapping obtained by vmap()
2925 * @addr: memory base address
2927 * Free the virtually contiguous memory area starting at @addr,
2928 * which was created from the page array passed to vmap().
2930 * Must not be called in interrupt context.
2932 void vunmap(const void *addr
)
2934 struct vm_struct
*vm
;
2936 BUG_ON(in_interrupt());
2941 vm
= remove_vm_area(addr
);
2942 if (unlikely(!vm
)) {
2943 WARN(1, KERN_ERR
"Trying to vunmap() nonexistent vm area (%p)\n",
2949 EXPORT_SYMBOL(vunmap
);
2952 * vmap - map an array of pages into virtually contiguous space
2953 * @pages: array of page pointers
2954 * @count: number of pages to map
2955 * @flags: vm_area->flags
2956 * @prot: page protection for the mapping
2958 * Maps @count pages from @pages into contiguous kernel virtual space.
2959 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2960 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2961 * are transferred from the caller to vmap(), and will be freed / dropped when
2962 * vfree() is called on the return value.
2964 * Return: the address of the area or %NULL on failure
2966 void *vmap(struct page
**pages
, unsigned int count
,
2967 unsigned long flags
, pgprot_t prot
)
2969 struct vm_struct
*area
;
2971 unsigned long size
; /* In bytes */
2975 if (WARN_ON_ONCE(flags
& VM_FLUSH_RESET_PERMS
))
2979 * Your top guard is someone else's bottom guard. Not having a top
2980 * guard compromises someone else's mappings too.
2982 if (WARN_ON_ONCE(flags
& VM_NO_GUARD
))
2983 flags
&= ~VM_NO_GUARD
;
2985 if (count
> totalram_pages())
2988 size
= (unsigned long)count
<< PAGE_SHIFT
;
2989 area
= get_vm_area_caller(size
, flags
, __builtin_return_address(0));
2993 addr
= (unsigned long)area
->addr
;
2994 if (vmap_pages_range(addr
, addr
+ size
, pgprot_nx(prot
),
2995 pages
, PAGE_SHIFT
) < 0) {
3000 if (flags
& VM_MAP_PUT_PAGES
) {
3001 area
->pages
= pages
;
3002 area
->nr_pages
= count
;
3006 EXPORT_SYMBOL(vmap
);
3008 #ifdef CONFIG_VMAP_PFN
3009 struct vmap_pfn_data
{
3010 unsigned long *pfns
;
3015 static int vmap_pfn_apply(pte_t
*pte
, unsigned long addr
, void *private)
3017 struct vmap_pfn_data
*data
= private;
3018 unsigned long pfn
= data
->pfns
[data
->idx
];
3021 if (WARN_ON_ONCE(pfn_valid(pfn
)))
3024 ptent
= pte_mkspecial(pfn_pte(pfn
, data
->prot
));
3025 set_pte_at(&init_mm
, addr
, pte
, ptent
);
3032 * vmap_pfn - map an array of PFNs into virtually contiguous space
3033 * @pfns: array of PFNs
3034 * @count: number of pages to map
3035 * @prot: page protection for the mapping
3037 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
3038 * the start address of the mapping.
3040 void *vmap_pfn(unsigned long *pfns
, unsigned int count
, pgprot_t prot
)
3042 struct vmap_pfn_data data
= { .pfns
= pfns
, .prot
= pgprot_nx(prot
) };
3043 struct vm_struct
*area
;
3045 area
= get_vm_area_caller(count
* PAGE_SIZE
, VM_IOREMAP
,
3046 __builtin_return_address(0));
3049 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
3050 count
* PAGE_SIZE
, vmap_pfn_apply
, &data
)) {
3055 flush_cache_vmap((unsigned long)area
->addr
,
3056 (unsigned long)area
->addr
+ count
* PAGE_SIZE
);
3060 EXPORT_SYMBOL_GPL(vmap_pfn
);
3061 #endif /* CONFIG_VMAP_PFN */
3063 static inline unsigned int
3064 vm_area_alloc_pages(gfp_t gfp
, int nid
,
3065 unsigned int order
, unsigned int nr_pages
, struct page
**pages
)
3067 unsigned int nr_allocated
= 0;
3068 gfp_t alloc_gfp
= gfp
;
3069 bool nofail
= false;
3074 * For order-0 pages we make use of bulk allocator, if
3075 * the page array is partly or not at all populated due
3076 * to fails, fallback to a single page allocator that is
3080 /* bulk allocator doesn't support nofail req. officially */
3081 gfp_t bulk_gfp
= gfp
& ~__GFP_NOFAIL
;
3083 while (nr_allocated
< nr_pages
) {
3084 unsigned int nr
, nr_pages_request
;
3087 * A maximum allowed request is hard-coded and is 100
3088 * pages per call. That is done in order to prevent a
3089 * long preemption off scenario in the bulk-allocator
3090 * so the range is [1:100].
3092 nr_pages_request
= min(100U, nr_pages
- nr_allocated
);
3094 /* memory allocation should consider mempolicy, we can't
3095 * wrongly use nearest node when nid == NUMA_NO_NODE,
3096 * otherwise memory may be allocated in only one node,
3097 * but mempolicy wants to alloc memory by interleaving.
3099 if (IS_ENABLED(CONFIG_NUMA
) && nid
== NUMA_NO_NODE
)
3100 nr
= alloc_pages_bulk_array_mempolicy(bulk_gfp
,
3102 pages
+ nr_allocated
);
3105 nr
= alloc_pages_bulk_array_node(bulk_gfp
, nid
,
3107 pages
+ nr_allocated
);
3113 * If zero or pages were obtained partly,
3114 * fallback to a single page allocator.
3116 if (nr
!= nr_pages_request
)
3119 } else if (gfp
& __GFP_NOFAIL
) {
3121 * Higher order nofail allocations are really expensive and
3122 * potentially dangerous (pre-mature OOM, disruptive reclaim
3123 * and compaction etc.
3125 alloc_gfp
&= ~__GFP_NOFAIL
;
3129 /* High-order pages or fallback path if "bulk" fails. */
3130 while (nr_allocated
< nr_pages
) {
3131 if (fatal_signal_pending(current
))
3134 if (nid
== NUMA_NO_NODE
)
3135 page
= alloc_pages(alloc_gfp
, order
);
3137 page
= alloc_pages_node(nid
, alloc_gfp
, order
);
3138 if (unlikely(!page
)) {
3142 /* fall back to the zero order allocations */
3143 alloc_gfp
|= __GFP_NOFAIL
;
3149 * Higher order allocations must be able to be treated as
3150 * indepdenent small pages by callers (as they can with
3151 * small-page vmallocs). Some drivers do their own refcounting
3152 * on vmalloc_to_page() pages, some use page->mapping,
3156 split_page(page
, order
);
3159 * Careful, we allocate and map page-order pages, but
3160 * tracking is done per PAGE_SIZE page so as to keep the
3161 * vm_struct APIs independent of the physical/mapped size.
3163 for (i
= 0; i
< (1U << order
); i
++)
3164 pages
[nr_allocated
+ i
] = page
+ i
;
3167 nr_allocated
+= 1U << order
;
3170 return nr_allocated
;
3173 static void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
3174 pgprot_t prot
, unsigned int page_shift
,
3177 const gfp_t nested_gfp
= (gfp_mask
& GFP_RECLAIM_MASK
) | __GFP_ZERO
;
3178 bool nofail
= gfp_mask
& __GFP_NOFAIL
;
3179 unsigned long addr
= (unsigned long)area
->addr
;
3180 unsigned long size
= get_vm_area_size(area
);
3181 unsigned long array_size
;
3182 unsigned int nr_small_pages
= size
>> PAGE_SHIFT
;
3183 unsigned int page_order
;
3187 array_size
= (unsigned long)nr_small_pages
* sizeof(struct page
*);
3189 if (!(gfp_mask
& (GFP_DMA
| GFP_DMA32
)))
3190 gfp_mask
|= __GFP_HIGHMEM
;
3192 /* Please note that the recursion is strictly bounded. */
3193 if (array_size
> PAGE_SIZE
) {
3194 area
->pages
= __vmalloc_node(array_size
, 1, nested_gfp
, node
,
3197 area
->pages
= kmalloc_node(array_size
, nested_gfp
, node
);
3201 warn_alloc(gfp_mask
, NULL
,
3202 "vmalloc error: size %lu, failed to allocated page array size %lu",
3203 nr_small_pages
* PAGE_SIZE
, array_size
);
3208 set_vm_area_page_order(area
, page_shift
- PAGE_SHIFT
);
3209 page_order
= vm_area_page_order(area
);
3211 area
->nr_pages
= vm_area_alloc_pages(gfp_mask
| __GFP_NOWARN
,
3212 node
, page_order
, nr_small_pages
, area
->pages
);
3214 atomic_long_add(area
->nr_pages
, &nr_vmalloc_pages
);
3215 if (gfp_mask
& __GFP_ACCOUNT
) {
3218 for (i
= 0; i
< area
->nr_pages
; i
++)
3219 mod_memcg_page_state(area
->pages
[i
], MEMCG_VMALLOC
, 1);
3223 * If not enough pages were obtained to accomplish an
3224 * allocation request, free them via vfree() if any.
3226 if (area
->nr_pages
!= nr_small_pages
) {
3228 * vm_area_alloc_pages() can fail due to insufficient memory but
3231 * - a pending fatal signal
3232 * - insufficient huge page-order pages
3234 * Since we always retry allocations at order-0 in the huge page
3235 * case a warning for either is spurious.
3237 if (!fatal_signal_pending(current
) && page_order
== 0)
3238 warn_alloc(gfp_mask
, NULL
,
3239 "vmalloc error: size %lu, failed to allocate pages",
3240 area
->nr_pages
* PAGE_SIZE
);
3245 * page tables allocations ignore external gfp mask, enforce it
3248 if ((gfp_mask
& (__GFP_FS
| __GFP_IO
)) == __GFP_IO
)
3249 flags
= memalloc_nofs_save();
3250 else if ((gfp_mask
& (__GFP_FS
| __GFP_IO
)) == 0)
3251 flags
= memalloc_noio_save();
3254 ret
= vmap_pages_range(addr
, addr
+ size
, prot
, area
->pages
,
3256 if (nofail
&& (ret
< 0))
3257 schedule_timeout_uninterruptible(1);
3258 } while (nofail
&& (ret
< 0));
3260 if ((gfp_mask
& (__GFP_FS
| __GFP_IO
)) == __GFP_IO
)
3261 memalloc_nofs_restore(flags
);
3262 else if ((gfp_mask
& (__GFP_FS
| __GFP_IO
)) == 0)
3263 memalloc_noio_restore(flags
);
3266 warn_alloc(gfp_mask
, NULL
,
3267 "vmalloc error: size %lu, failed to map pages",
3268 area
->nr_pages
* PAGE_SIZE
);
3280 * __vmalloc_node_range - allocate virtually contiguous memory
3281 * @size: allocation size
3282 * @align: desired alignment
3283 * @start: vm area range start
3284 * @end: vm area range end
3285 * @gfp_mask: flags for the page level allocator
3286 * @prot: protection mask for the allocated pages
3287 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
3288 * @node: node to use for allocation or NUMA_NO_NODE
3289 * @caller: caller's return address
3291 * Allocate enough pages to cover @size from the page level
3292 * allocator with @gfp_mask flags. Please note that the full set of gfp
3293 * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
3295 * Zone modifiers are not supported. From the reclaim modifiers
3296 * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
3297 * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
3298 * __GFP_RETRY_MAYFAIL are not supported).
3300 * __GFP_NOWARN can be used to suppress failures messages.
3302 * Map them into contiguous kernel virtual space, using a pagetable
3303 * protection of @prot.
3305 * Return: the address of the area or %NULL on failure
3307 void *__vmalloc_node_range(unsigned long size
, unsigned long align
,
3308 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
3309 pgprot_t prot
, unsigned long vm_flags
, int node
,
3312 struct vm_struct
*area
;
3314 kasan_vmalloc_flags_t kasan_flags
= KASAN_VMALLOC_NONE
;
3315 unsigned long real_size
= size
;
3316 unsigned long real_align
= align
;
3317 unsigned int shift
= PAGE_SHIFT
;
3319 if (WARN_ON_ONCE(!size
))
3322 if ((size
>> PAGE_SHIFT
) > totalram_pages()) {
3323 warn_alloc(gfp_mask
, NULL
,
3324 "vmalloc error: size %lu, exceeds total pages",
3329 if (vmap_allow_huge
&& (vm_flags
& VM_ALLOW_HUGE_VMAP
)) {
3330 unsigned long size_per_node
;
3333 * Try huge pages. Only try for PAGE_KERNEL allocations,
3334 * others like modules don't yet expect huge pages in
3335 * their allocations due to apply_to_page_range not
3339 size_per_node
= size
;
3340 if (node
== NUMA_NO_NODE
)
3341 size_per_node
/= num_online_nodes();
3342 if (arch_vmap_pmd_supported(prot
) && size_per_node
>= PMD_SIZE
)
3345 shift
= arch_vmap_pte_supported_shift(size_per_node
);
3347 align
= max(real_align
, 1UL << shift
);
3348 size
= ALIGN(real_size
, 1UL << shift
);
3352 area
= __get_vm_area_node(real_size
, align
, shift
, VM_ALLOC
|
3353 VM_UNINITIALIZED
| vm_flags
, start
, end
, node
,
3356 bool nofail
= gfp_mask
& __GFP_NOFAIL
;
3357 warn_alloc(gfp_mask
, NULL
,
3358 "vmalloc error: size %lu, vm_struct allocation failed%s",
3359 real_size
, (nofail
) ? ". Retrying." : "");
3361 schedule_timeout_uninterruptible(1);
3368 * Prepare arguments for __vmalloc_area_node() and
3369 * kasan_unpoison_vmalloc().
3371 if (pgprot_val(prot
) == pgprot_val(PAGE_KERNEL
)) {
3372 if (kasan_hw_tags_enabled()) {
3374 * Modify protection bits to allow tagging.
3375 * This must be done before mapping.
3377 prot
= arch_vmap_pgprot_tagged(prot
);
3380 * Skip page_alloc poisoning and zeroing for physical
3381 * pages backing VM_ALLOC mapping. Memory is instead
3382 * poisoned and zeroed by kasan_unpoison_vmalloc().
3384 gfp_mask
|= __GFP_SKIP_KASAN
| __GFP_SKIP_ZERO
;
3387 /* Take note that the mapping is PAGE_KERNEL. */
3388 kasan_flags
|= KASAN_VMALLOC_PROT_NORMAL
;
3391 /* Allocate physical pages and map them into vmalloc space. */
3392 ret
= __vmalloc_area_node(area
, gfp_mask
, prot
, shift
, node
);
3397 * Mark the pages as accessible, now that they are mapped.
3398 * The condition for setting KASAN_VMALLOC_INIT should complement the
3399 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
3400 * to make sure that memory is initialized under the same conditions.
3401 * Tag-based KASAN modes only assign tags to normal non-executable
3402 * allocations, see __kasan_unpoison_vmalloc().
3404 kasan_flags
|= KASAN_VMALLOC_VM_ALLOC
;
3405 if (!want_init_on_free() && want_init_on_alloc(gfp_mask
) &&
3406 (gfp_mask
& __GFP_SKIP_ZERO
))
3407 kasan_flags
|= KASAN_VMALLOC_INIT
;
3408 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3409 area
->addr
= kasan_unpoison_vmalloc(area
->addr
, real_size
, kasan_flags
);
3412 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3413 * flag. It means that vm_struct is not fully initialized.
3414 * Now, it is fully initialized, so remove this flag here.
3416 clear_vm_uninitialized_flag(area
);
3418 size
= PAGE_ALIGN(size
);
3419 if (!(vm_flags
& VM_DEFER_KMEMLEAK
))
3420 kmemleak_vmalloc(area
, size
, gfp_mask
);
3425 if (shift
> PAGE_SHIFT
) {
3436 * __vmalloc_node - allocate virtually contiguous memory
3437 * @size: allocation size
3438 * @align: desired alignment
3439 * @gfp_mask: flags for the page level allocator
3440 * @node: node to use for allocation or NUMA_NO_NODE
3441 * @caller: caller's return address
3443 * Allocate enough pages to cover @size from the page level allocator with
3444 * @gfp_mask flags. Map them into contiguous kernel virtual space.
3446 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3447 * and __GFP_NOFAIL are not supported
3449 * Any use of gfp flags outside of GFP_KERNEL should be consulted
3452 * Return: pointer to the allocated memory or %NULL on error
3454 void *__vmalloc_node(unsigned long size
, unsigned long align
,
3455 gfp_t gfp_mask
, int node
, const void *caller
)
3457 return __vmalloc_node_range(size
, align
, VMALLOC_START
, VMALLOC_END
,
3458 gfp_mask
, PAGE_KERNEL
, 0, node
, caller
);
3461 * This is only for performance analysis of vmalloc and stress purpose.
3462 * It is required by vmalloc test module, therefore do not use it other
3465 #ifdef CONFIG_TEST_VMALLOC_MODULE
3466 EXPORT_SYMBOL_GPL(__vmalloc_node
);
3469 void *__vmalloc(unsigned long size
, gfp_t gfp_mask
)
3471 return __vmalloc_node(size
, 1, gfp_mask
, NUMA_NO_NODE
,
3472 __builtin_return_address(0));
3474 EXPORT_SYMBOL(__vmalloc
);
3477 * vmalloc - allocate virtually contiguous memory
3478 * @size: allocation size
3480 * Allocate enough pages to cover @size from the page level
3481 * allocator and map them into contiguous kernel virtual space.
3483 * For tight control over page level allocator and protection flags
3484 * use __vmalloc() instead.
3486 * Return: pointer to the allocated memory or %NULL on error
3488 void *vmalloc(unsigned long size
)
3490 return __vmalloc_node(size
, 1, GFP_KERNEL
, NUMA_NO_NODE
,
3491 __builtin_return_address(0));
3493 EXPORT_SYMBOL(vmalloc
);
3496 * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
3497 * @size: allocation size
3498 * @gfp_mask: flags for the page level allocator
3500 * Allocate enough pages to cover @size from the page level
3501 * allocator and map them into contiguous kernel virtual space.
3502 * If @size is greater than or equal to PMD_SIZE, allow using
3503 * huge pages for the memory
3505 * Return: pointer to the allocated memory or %NULL on error
3507 void *vmalloc_huge(unsigned long size
, gfp_t gfp_mask
)
3509 return __vmalloc_node_range(size
, 1, VMALLOC_START
, VMALLOC_END
,
3510 gfp_mask
, PAGE_KERNEL
, VM_ALLOW_HUGE_VMAP
,
3511 NUMA_NO_NODE
, __builtin_return_address(0));
3513 EXPORT_SYMBOL_GPL(vmalloc_huge
);
3516 * vzalloc - allocate virtually contiguous memory with zero fill
3517 * @size: allocation size
3519 * Allocate enough pages to cover @size from the page level
3520 * allocator and map them into contiguous kernel virtual space.
3521 * The memory allocated is set to zero.
3523 * For tight control over page level allocator and protection flags
3524 * use __vmalloc() instead.
3526 * Return: pointer to the allocated memory or %NULL on error
3528 void *vzalloc(unsigned long size
)
3530 return __vmalloc_node(size
, 1, GFP_KERNEL
| __GFP_ZERO
, NUMA_NO_NODE
,
3531 __builtin_return_address(0));
3533 EXPORT_SYMBOL(vzalloc
);
3536 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3537 * @size: allocation size
3539 * The resulting memory area is zeroed so it can be mapped to userspace
3540 * without leaking data.
3542 * Return: pointer to the allocated memory or %NULL on error
3544 void *vmalloc_user(unsigned long size
)
3546 return __vmalloc_node_range(size
, SHMLBA
, VMALLOC_START
, VMALLOC_END
,
3547 GFP_KERNEL
| __GFP_ZERO
, PAGE_KERNEL
,
3548 VM_USERMAP
, NUMA_NO_NODE
,
3549 __builtin_return_address(0));
3551 EXPORT_SYMBOL(vmalloc_user
);
3554 * vmalloc_node - allocate memory on a specific node
3555 * @size: allocation size
3558 * Allocate enough pages to cover @size from the page level
3559 * allocator and map them into contiguous kernel virtual space.
3561 * For tight control over page level allocator and protection flags
3562 * use __vmalloc() instead.
3564 * Return: pointer to the allocated memory or %NULL on error
3566 void *vmalloc_node(unsigned long size
, int node
)
3568 return __vmalloc_node(size
, 1, GFP_KERNEL
, node
,
3569 __builtin_return_address(0));
3571 EXPORT_SYMBOL(vmalloc_node
);
3574 * vzalloc_node - allocate memory on a specific node with zero fill
3575 * @size: allocation size
3578 * Allocate enough pages to cover @size from the page level
3579 * allocator and map them into contiguous kernel virtual space.
3580 * The memory allocated is set to zero.
3582 * Return: pointer to the allocated memory or %NULL on error
3584 void *vzalloc_node(unsigned long size
, int node
)
3586 return __vmalloc_node(size
, 1, GFP_KERNEL
| __GFP_ZERO
, node
,
3587 __builtin_return_address(0));
3589 EXPORT_SYMBOL(vzalloc_node
);
3591 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3592 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3593 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3594 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3597 * 64b systems should always have either DMA or DMA32 zones. For others
3598 * GFP_DMA32 should do the right thing and use the normal zone.
3600 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3604 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3605 * @size: allocation size
3607 * Allocate enough 32bit PA addressable pages to cover @size from the
3608 * page level allocator and map them into contiguous kernel virtual space.
3610 * Return: pointer to the allocated memory or %NULL on error
3612 void *vmalloc_32(unsigned long size
)
3614 return __vmalloc_node(size
, 1, GFP_VMALLOC32
, NUMA_NO_NODE
,
3615 __builtin_return_address(0));
3617 EXPORT_SYMBOL(vmalloc_32
);
3620 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3621 * @size: allocation size
3623 * The resulting memory area is 32bit addressable and zeroed so it can be
3624 * mapped to userspace without leaking data.
3626 * Return: pointer to the allocated memory or %NULL on error
3628 void *vmalloc_32_user(unsigned long size
)
3630 return __vmalloc_node_range(size
, SHMLBA
, VMALLOC_START
, VMALLOC_END
,
3631 GFP_VMALLOC32
| __GFP_ZERO
, PAGE_KERNEL
,
3632 VM_USERMAP
, NUMA_NO_NODE
,
3633 __builtin_return_address(0));
3635 EXPORT_SYMBOL(vmalloc_32_user
);
3638 * Atomically zero bytes in the iterator.
3640 * Returns the number of zeroed bytes.
3642 static size_t zero_iter(struct iov_iter
*iter
, size_t count
)
3644 size_t remains
= count
;
3646 while (remains
> 0) {
3649 num
= min_t(size_t, remains
, PAGE_SIZE
);
3650 copied
= copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num
, iter
);
3657 return count
- remains
;
3661 * small helper routine, copy contents to iter from addr.
3662 * If the page is not present, fill zero.
3664 * Returns the number of copied bytes.
3666 static size_t aligned_vread_iter(struct iov_iter
*iter
,
3667 const char *addr
, size_t count
)
3669 size_t remains
= count
;
3672 while (remains
> 0) {
3673 unsigned long offset
, length
;
3676 offset
= offset_in_page(addr
);
3677 length
= PAGE_SIZE
- offset
;
3678 if (length
> remains
)
3680 page
= vmalloc_to_page(addr
);
3682 * To do safe access to this _mapped_ area, we need lock. But
3683 * adding lock here means that we need to add overhead of
3684 * vmalloc()/vfree() calls for this _debug_ interface, rarely
3685 * used. Instead of that, we'll use an local mapping via
3686 * copy_page_to_iter_nofault() and accept a small overhead in
3687 * this access function.
3690 copied
= copy_page_to_iter_nofault(page
, offset
,
3693 copied
= zero_iter(iter
, length
);
3698 if (copied
!= length
)
3702 return count
- remains
;
3706 * Read from a vm_map_ram region of memory.
3708 * Returns the number of copied bytes.
3710 static size_t vmap_ram_vread_iter(struct iov_iter
*iter
, const char *addr
,
3711 size_t count
, unsigned long flags
)
3714 struct vmap_block
*vb
;
3716 unsigned long offset
;
3717 unsigned int rs
, re
;
3721 * If it's area created by vm_map_ram() interface directly, but
3722 * not further subdividing and delegating management to vmap_block,
3725 if (!(flags
& VMAP_BLOCK
))
3726 return aligned_vread_iter(iter
, addr
, count
);
3731 * Area is split into regions and tracked with vmap_block, read out
3732 * each region and zero fill the hole between regions.
3734 xa
= addr_to_vb_xa((unsigned long) addr
);
3735 vb
= xa_load(xa
, addr_to_vb_idx((unsigned long)addr
));
3739 spin_lock(&vb
->lock
);
3740 if (bitmap_empty(vb
->used_map
, VMAP_BBMAP_BITS
)) {
3741 spin_unlock(&vb
->lock
);
3745 for_each_set_bitrange(rs
, re
, vb
->used_map
, VMAP_BBMAP_BITS
) {
3751 start
= vmap_block_vaddr(vb
->va
->va_start
, rs
);
3754 size_t to_zero
= min_t(size_t, start
- addr
, remains
);
3755 size_t zeroed
= zero_iter(iter
, to_zero
);
3760 if (remains
== 0 || zeroed
!= to_zero
)
3764 /*it could start reading from the middle of used region*/
3765 offset
= offset_in_page(addr
);
3766 n
= ((re
- rs
+ 1) << PAGE_SHIFT
) - offset
;
3770 copied
= aligned_vread_iter(iter
, start
+ offset
, n
);
3779 spin_unlock(&vb
->lock
);
3782 /* zero-fill the left dirty or free regions */
3783 return count
- remains
+ zero_iter(iter
, remains
);
3785 /* We couldn't copy/zero everything */
3786 spin_unlock(&vb
->lock
);
3787 return count
- remains
;
3791 * vread_iter() - read vmalloc area in a safe way to an iterator.
3792 * @iter: the iterator to which data should be written.
3793 * @addr: vm address.
3794 * @count: number of bytes to be read.
3796 * This function checks that addr is a valid vmalloc'ed area, and
3797 * copy data from that area to a given buffer. If the given memory range
3798 * of [addr...addr+count) includes some valid address, data is copied to
3799 * proper area of @buf. If there are memory holes, they'll be zero-filled.
3800 * IOREMAP area is treated as memory hole and no copy is done.
3802 * If [addr...addr+count) doesn't includes any intersects with alive
3803 * vm_struct area, returns 0. @buf should be kernel's buffer.
3805 * Note: In usual ops, vread() is never necessary because the caller
3806 * should know vmalloc() area is valid and can use memcpy().
3807 * This is for routines which have to access vmalloc area without
3808 * any information, as /proc/kcore.
3810 * Return: number of bytes for which addr and buf should be increased
3811 * (same number as @count) or %0 if [addr...addr+count) doesn't
3812 * include any intersection with valid vmalloc area
3814 long vread_iter(struct iov_iter
*iter
, const char *addr
, size_t count
)
3816 struct vmap_area
*va
;
3817 struct vm_struct
*vm
;
3819 size_t n
, size
, flags
, remains
;
3821 addr
= kasan_reset_tag(addr
);
3823 /* Don't allow overflow */
3824 if ((unsigned long) addr
+ count
< count
)
3825 count
= -(unsigned long) addr
;
3829 spin_lock(&vmap_area_lock
);
3830 va
= find_vmap_area_exceed_addr((unsigned long)addr
);
3834 /* no intersects with alive vmap_area */
3835 if ((unsigned long)addr
+ remains
<= va
->va_start
)
3838 list_for_each_entry_from(va
, &vmap_area_list
, list
) {
3845 flags
= va
->flags
& VMAP_FLAGS_MASK
;
3847 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
3848 * be set together with VMAP_RAM.
3850 WARN_ON(flags
== VMAP_BLOCK
);
3855 if (vm
&& (vm
->flags
& VM_UNINITIALIZED
))
3858 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3861 vaddr
= (char *) va
->va_start
;
3862 size
= vm
? get_vm_area_size(vm
) : va_size(va
);
3864 if (addr
>= vaddr
+ size
)
3868 size_t to_zero
= min_t(size_t, vaddr
- addr
, remains
);
3869 size_t zeroed
= zero_iter(iter
, to_zero
);
3874 if (remains
== 0 || zeroed
!= to_zero
)
3878 n
= vaddr
+ size
- addr
;
3882 if (flags
& VMAP_RAM
)
3883 copied
= vmap_ram_vread_iter(iter
, addr
, n
, flags
);
3884 else if (!(vm
&& (vm
->flags
& (VM_IOREMAP
| VM_SPARSE
))))
3885 copied
= aligned_vread_iter(iter
, addr
, n
);
3886 else /* IOREMAP | SPARSE area is treated as memory hole */
3887 copied
= zero_iter(iter
, n
);
3897 spin_unlock(&vmap_area_lock
);
3898 /* zero-fill memory holes */
3899 return count
- remains
+ zero_iter(iter
, remains
);
3901 /* Nothing remains, or We couldn't copy/zero everything. */
3902 spin_unlock(&vmap_area_lock
);
3904 return count
- remains
;
3908 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3909 * @vma: vma to cover
3910 * @uaddr: target user address to start at
3911 * @kaddr: virtual address of vmalloc kernel memory
3912 * @pgoff: offset from @kaddr to start at
3913 * @size: size of map area
3915 * Returns: 0 for success, -Exxx on failure
3917 * This function checks that @kaddr is a valid vmalloc'ed area,
3918 * and that it is big enough to cover the range starting at
3919 * @uaddr in @vma. Will return failure if that criteria isn't
3922 * Similar to remap_pfn_range() (see mm/memory.c)
3924 int remap_vmalloc_range_partial(struct vm_area_struct
*vma
, unsigned long uaddr
,
3925 void *kaddr
, unsigned long pgoff
,
3928 struct vm_struct
*area
;
3930 unsigned long end_index
;
3932 if (check_shl_overflow(pgoff
, PAGE_SHIFT
, &off
))
3935 size
= PAGE_ALIGN(size
);
3937 if (!PAGE_ALIGNED(uaddr
) || !PAGE_ALIGNED(kaddr
))
3940 area
= find_vm_area(kaddr
);
3944 if (!(area
->flags
& (VM_USERMAP
| VM_DMA_COHERENT
)))
3947 if (check_add_overflow(size
, off
, &end_index
) ||
3948 end_index
> get_vm_area_size(area
))
3953 struct page
*page
= vmalloc_to_page(kaddr
);
3956 ret
= vm_insert_page(vma
, uaddr
, page
);
3965 vm_flags_set(vma
, VM_DONTEXPAND
| VM_DONTDUMP
);
3971 * remap_vmalloc_range - map vmalloc pages to userspace
3972 * @vma: vma to cover (map full range of vma)
3973 * @addr: vmalloc memory
3974 * @pgoff: number of pages into addr before first page to map
3976 * Returns: 0 for success, -Exxx on failure
3978 * This function checks that addr is a valid vmalloc'ed area, and
3979 * that it is big enough to cover the vma. Will return failure if
3980 * that criteria isn't met.
3982 * Similar to remap_pfn_range() (see mm/memory.c)
3984 int remap_vmalloc_range(struct vm_area_struct
*vma
, void *addr
,
3985 unsigned long pgoff
)
3987 return remap_vmalloc_range_partial(vma
, vma
->vm_start
,
3989 vma
->vm_end
- vma
->vm_start
);
3991 EXPORT_SYMBOL(remap_vmalloc_range
);
3993 void free_vm_area(struct vm_struct
*area
)
3995 struct vm_struct
*ret
;
3996 ret
= remove_vm_area(area
->addr
);
3997 BUG_ON(ret
!= area
);
4000 EXPORT_SYMBOL_GPL(free_vm_area
);
4003 static struct vmap_area
*node_to_va(struct rb_node
*n
)
4005 return rb_entry_safe(n
, struct vmap_area
, rb_node
);
4009 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
4010 * @addr: target address
4012 * Returns: vmap_area if it is found. If there is no such area
4013 * the first highest(reverse order) vmap_area is returned
4014 * i.e. va->va_start < addr && va->va_end < addr or NULL
4015 * if there are no any areas before @addr.
4017 static struct vmap_area
*
4018 pvm_find_va_enclose_addr(unsigned long addr
)
4020 struct vmap_area
*va
, *tmp
;
4023 n
= free_vmap_area_root
.rb_node
;
4027 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
4028 if (tmp
->va_start
<= addr
) {
4030 if (tmp
->va_end
>= addr
)
4043 * pvm_determine_end_from_reverse - find the highest aligned address
4044 * of free block below VMALLOC_END
4046 * in - the VA we start the search(reverse order);
4047 * out - the VA with the highest aligned end address.
4048 * @align: alignment for required highest address
4050 * Returns: determined end address within vmap_area
4052 static unsigned long
4053 pvm_determine_end_from_reverse(struct vmap_area
**va
, unsigned long align
)
4055 unsigned long vmalloc_end
= VMALLOC_END
& ~(align
- 1);
4059 list_for_each_entry_from_reverse((*va
),
4060 &free_vmap_area_list
, list
) {
4061 addr
= min((*va
)->va_end
& ~(align
- 1), vmalloc_end
);
4062 if ((*va
)->va_start
< addr
)
4071 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4072 * @offsets: array containing offset of each area
4073 * @sizes: array containing size of each area
4074 * @nr_vms: the number of areas to allocate
4075 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4077 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4078 * vm_structs on success, %NULL on failure
4080 * Percpu allocator wants to use congruent vm areas so that it can
4081 * maintain the offsets among percpu areas. This function allocates
4082 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
4083 * be scattered pretty far, distance between two areas easily going up
4084 * to gigabytes. To avoid interacting with regular vmallocs, these
4085 * areas are allocated from top.
4087 * Despite its complicated look, this allocator is rather simple. It
4088 * does everything top-down and scans free blocks from the end looking
4089 * for matching base. While scanning, if any of the areas do not fit the
4090 * base address is pulled down to fit the area. Scanning is repeated till
4091 * all the areas fit and then all necessary data structures are inserted
4092 * and the result is returned.
4094 struct vm_struct
**pcpu_get_vm_areas(const unsigned long *offsets
,
4095 const size_t *sizes
, int nr_vms
,
4098 const unsigned long vmalloc_start
= ALIGN(VMALLOC_START
, align
);
4099 const unsigned long vmalloc_end
= VMALLOC_END
& ~(align
- 1);
4100 struct vmap_area
**vas
, *va
;
4101 struct vm_struct
**vms
;
4102 int area
, area2
, last_area
, term_area
;
4103 unsigned long base
, start
, size
, end
, last_end
, orig_start
, orig_end
;
4104 bool purged
= false;
4106 /* verify parameters and allocate data structures */
4107 BUG_ON(offset_in_page(align
) || !is_power_of_2(align
));
4108 for (last_area
= 0, area
= 0; area
< nr_vms
; area
++) {
4109 start
= offsets
[area
];
4110 end
= start
+ sizes
[area
];
4112 /* is everything aligned properly? */
4113 BUG_ON(!IS_ALIGNED(offsets
[area
], align
));
4114 BUG_ON(!IS_ALIGNED(sizes
[area
], align
));
4116 /* detect the area with the highest address */
4117 if (start
> offsets
[last_area
])
4120 for (area2
= area
+ 1; area2
< nr_vms
; area2
++) {
4121 unsigned long start2
= offsets
[area2
];
4122 unsigned long end2
= start2
+ sizes
[area2
];
4124 BUG_ON(start2
< end
&& start
< end2
);
4127 last_end
= offsets
[last_area
] + sizes
[last_area
];
4129 if (vmalloc_end
- vmalloc_start
< last_end
) {
4134 vms
= kcalloc(nr_vms
, sizeof(vms
[0]), GFP_KERNEL
);
4135 vas
= kcalloc(nr_vms
, sizeof(vas
[0]), GFP_KERNEL
);
4139 for (area
= 0; area
< nr_vms
; area
++) {
4140 vas
[area
] = kmem_cache_zalloc(vmap_area_cachep
, GFP_KERNEL
);
4141 vms
[area
] = kzalloc(sizeof(struct vm_struct
), GFP_KERNEL
);
4142 if (!vas
[area
] || !vms
[area
])
4146 spin_lock(&free_vmap_area_lock
);
4148 /* start scanning - we scan from the top, begin with the last area */
4149 area
= term_area
= last_area
;
4150 start
= offsets
[area
];
4151 end
= start
+ sizes
[area
];
4153 va
= pvm_find_va_enclose_addr(vmalloc_end
);
4154 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
4158 * base might have underflowed, add last_end before
4161 if (base
+ last_end
< vmalloc_start
+ last_end
)
4165 * Fitting base has not been found.
4171 * If required width exceeds current VA block, move
4172 * base downwards and then recheck.
4174 if (base
+ end
> va
->va_end
) {
4175 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
4181 * If this VA does not fit, move base downwards and recheck.
4183 if (base
+ start
< va
->va_start
) {
4184 va
= node_to_va(rb_prev(&va
->rb_node
));
4185 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
4191 * This area fits, move on to the previous one. If
4192 * the previous one is the terminal one, we're done.
4194 area
= (area
+ nr_vms
- 1) % nr_vms
;
4195 if (area
== term_area
)
4198 start
= offsets
[area
];
4199 end
= start
+ sizes
[area
];
4200 va
= pvm_find_va_enclose_addr(base
+ end
);
4203 /* we've found a fitting base, insert all va's */
4204 for (area
= 0; area
< nr_vms
; area
++) {
4207 start
= base
+ offsets
[area
];
4210 va
= pvm_find_va_enclose_addr(start
);
4211 if (WARN_ON_ONCE(va
== NULL
))
4212 /* It is a BUG(), but trigger recovery instead. */
4215 ret
= adjust_va_to_fit_type(&free_vmap_area_root
,
4216 &free_vmap_area_list
,
4218 if (WARN_ON_ONCE(unlikely(ret
)))
4219 /* It is a BUG(), but trigger recovery instead. */
4222 /* Allocated area. */
4224 va
->va_start
= start
;
4225 va
->va_end
= start
+ size
;
4228 spin_unlock(&free_vmap_area_lock
);
4230 /* populate the kasan shadow space */
4231 for (area
= 0; area
< nr_vms
; area
++) {
4232 if (kasan_populate_vmalloc(vas
[area
]->va_start
, sizes
[area
]))
4233 goto err_free_shadow
;
4236 /* insert all vm's */
4237 spin_lock(&vmap_area_lock
);
4238 for (area
= 0; area
< nr_vms
; area
++) {
4239 insert_vmap_area(vas
[area
], &vmap_area_root
, &vmap_area_list
);
4241 setup_vmalloc_vm_locked(vms
[area
], vas
[area
], VM_ALLOC
,
4244 spin_unlock(&vmap_area_lock
);
4247 * Mark allocated areas as accessible. Do it now as a best-effort
4248 * approach, as they can be mapped outside of vmalloc code.
4249 * With hardware tag-based KASAN, marking is skipped for
4250 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
4252 for (area
= 0; area
< nr_vms
; area
++)
4253 vms
[area
]->addr
= kasan_unpoison_vmalloc(vms
[area
]->addr
,
4254 vms
[area
]->size
, KASAN_VMALLOC_PROT_NORMAL
);
4261 * Remove previously allocated areas. There is no
4262 * need in removing these areas from the busy tree,
4263 * because they are inserted only on the final step
4264 * and when pcpu_get_vm_areas() is success.
4267 orig_start
= vas
[area
]->va_start
;
4268 orig_end
= vas
[area
]->va_end
;
4269 va
= merge_or_add_vmap_area_augment(vas
[area
], &free_vmap_area_root
,
4270 &free_vmap_area_list
);
4272 kasan_release_vmalloc(orig_start
, orig_end
,
4273 va
->va_start
, va
->va_end
);
4278 spin_unlock(&free_vmap_area_lock
);
4280 reclaim_and_purge_vmap_areas();
4283 /* Before "retry", check if we recover. */
4284 for (area
= 0; area
< nr_vms
; area
++) {
4288 vas
[area
] = kmem_cache_zalloc(
4289 vmap_area_cachep
, GFP_KERNEL
);
4298 for (area
= 0; area
< nr_vms
; area
++) {
4300 kmem_cache_free(vmap_area_cachep
, vas
[area
]);
4310 spin_lock(&free_vmap_area_lock
);
4312 * We release all the vmalloc shadows, even the ones for regions that
4313 * hadn't been successfully added. This relies on kasan_release_vmalloc
4314 * being able to tolerate this case.
4316 for (area
= 0; area
< nr_vms
; area
++) {
4317 orig_start
= vas
[area
]->va_start
;
4318 orig_end
= vas
[area
]->va_end
;
4319 va
= merge_or_add_vmap_area_augment(vas
[area
], &free_vmap_area_root
,
4320 &free_vmap_area_list
);
4322 kasan_release_vmalloc(orig_start
, orig_end
,
4323 va
->va_start
, va
->va_end
);
4327 spin_unlock(&free_vmap_area_lock
);
4334 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4335 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4336 * @nr_vms: the number of allocated areas
4338 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4340 void pcpu_free_vm_areas(struct vm_struct
**vms
, int nr_vms
)
4344 for (i
= 0; i
< nr_vms
; i
++)
4345 free_vm_area(vms
[i
]);
4348 #endif /* CONFIG_SMP */
4350 #ifdef CONFIG_PRINTK
4351 bool vmalloc_dump_obj(void *object
)
4353 void *objp
= (void *)PAGE_ALIGN((unsigned long)object
);
4355 struct vm_struct
*vm
;
4356 struct vmap_area
*va
;
4358 unsigned int nr_pages
;
4360 if (!spin_trylock(&vmap_area_lock
))
4362 va
= __find_vmap_area((unsigned long)objp
, &vmap_area_root
);
4364 spin_unlock(&vmap_area_lock
);
4370 spin_unlock(&vmap_area_lock
);
4373 addr
= (unsigned long)vm
->addr
;
4374 caller
= vm
->caller
;
4375 nr_pages
= vm
->nr_pages
;
4376 spin_unlock(&vmap_area_lock
);
4377 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
4378 nr_pages
, addr
, caller
);
4383 #ifdef CONFIG_PROC_FS
4384 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
4385 __acquires(&vmap_purge_lock
)
4386 __acquires(&vmap_area_lock
)
4388 mutex_lock(&vmap_purge_lock
);
4389 spin_lock(&vmap_area_lock
);
4391 return seq_list_start(&vmap_area_list
, *pos
);
4394 static void *s_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
4396 return seq_list_next(p
, &vmap_area_list
, pos
);
4399 static void s_stop(struct seq_file
*m
, void *p
)
4400 __releases(&vmap_area_lock
)
4401 __releases(&vmap_purge_lock
)
4403 spin_unlock(&vmap_area_lock
);
4404 mutex_unlock(&vmap_purge_lock
);
4407 static void show_numa_info(struct seq_file
*m
, struct vm_struct
*v
)
4409 if (IS_ENABLED(CONFIG_NUMA
)) {
4410 unsigned int nr
, *counters
= m
->private;
4411 unsigned int step
= 1U << vm_area_page_order(v
);
4416 if (v
->flags
& VM_UNINITIALIZED
)
4418 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4421 memset(counters
, 0, nr_node_ids
* sizeof(unsigned int));
4423 for (nr
= 0; nr
< v
->nr_pages
; nr
+= step
)
4424 counters
[page_to_nid(v
->pages
[nr
])] += step
;
4425 for_each_node_state(nr
, N_HIGH_MEMORY
)
4427 seq_printf(m
, " N%u=%u", nr
, counters
[nr
]);
4431 static void show_purge_info(struct seq_file
*m
)
4433 struct vmap_area
*va
;
4435 spin_lock(&purge_vmap_area_lock
);
4436 list_for_each_entry(va
, &purge_vmap_area_list
, list
) {
4437 seq_printf(m
, "0x%pK-0x%pK %7ld unpurged vm_area\n",
4438 (void *)va
->va_start
, (void *)va
->va_end
,
4439 va
->va_end
- va
->va_start
);
4441 spin_unlock(&purge_vmap_area_lock
);
4444 static int s_show(struct seq_file
*m
, void *p
)
4446 struct vmap_area
*va
;
4447 struct vm_struct
*v
;
4449 va
= list_entry(p
, struct vmap_area
, list
);
4452 if (va
->flags
& VMAP_RAM
)
4453 seq_printf(m
, "0x%pK-0x%pK %7ld vm_map_ram\n",
4454 (void *)va
->va_start
, (void *)va
->va_end
,
4455 va
->va_end
- va
->va_start
);
4462 seq_printf(m
, "0x%pK-0x%pK %7ld",
4463 v
->addr
, v
->addr
+ v
->size
, v
->size
);
4466 seq_printf(m
, " %pS", v
->caller
);
4469 seq_printf(m
, " pages=%d", v
->nr_pages
);
4472 seq_printf(m
, " phys=%pa", &v
->phys_addr
);
4474 if (v
->flags
& VM_IOREMAP
)
4475 seq_puts(m
, " ioremap");
4477 if (v
->flags
& VM_SPARSE
)
4478 seq_puts(m
, " sparse");
4480 if (v
->flags
& VM_ALLOC
)
4481 seq_puts(m
, " vmalloc");
4483 if (v
->flags
& VM_MAP
)
4484 seq_puts(m
, " vmap");
4486 if (v
->flags
& VM_USERMAP
)
4487 seq_puts(m
, " user");
4489 if (v
->flags
& VM_DMA_COHERENT
)
4490 seq_puts(m
, " dma-coherent");
4492 if (is_vmalloc_addr(v
->pages
))
4493 seq_puts(m
, " vpages");
4495 show_numa_info(m
, v
);
4499 * As a final step, dump "unpurged" areas.
4502 if (list_is_last(&va
->list
, &vmap_area_list
))
4508 static const struct seq_operations vmalloc_op
= {
4515 static int __init
proc_vmalloc_init(void)
4517 if (IS_ENABLED(CONFIG_NUMA
))
4518 proc_create_seq_private("vmallocinfo", 0400, NULL
,
4520 nr_node_ids
* sizeof(unsigned int), NULL
);
4522 proc_create_seq("vmallocinfo", 0400, NULL
, &vmalloc_op
);
4525 module_init(proc_vmalloc_init
);
4529 void __init
vmalloc_init(void)
4531 struct vmap_area
*va
;
4532 struct vm_struct
*tmp
;
4536 * Create the cache for vmap_area objects.
4538 vmap_area_cachep
= KMEM_CACHE(vmap_area
, SLAB_PANIC
);
4540 for_each_possible_cpu(i
) {
4541 struct vmap_block_queue
*vbq
;
4542 struct vfree_deferred
*p
;
4544 vbq
= &per_cpu(vmap_block_queue
, i
);
4545 spin_lock_init(&vbq
->lock
);
4546 INIT_LIST_HEAD(&vbq
->free
);
4547 p
= &per_cpu(vfree_deferred
, i
);
4548 init_llist_head(&p
->list
);
4549 INIT_WORK(&p
->wq
, delayed_vfree_work
);
4550 xa_init(&vbq
->vmap_blocks
);
4553 /* Import existing vmlist entries. */
4554 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
4555 va
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
4556 if (WARN_ON_ONCE(!va
))
4559 va
->va_start
= (unsigned long)tmp
->addr
;
4560 va
->va_end
= va
->va_start
+ tmp
->size
;
4562 insert_vmap_area(va
, &vmap_area_root
, &vmap_area_list
);
4566 * Now we can initialize a free vmap space.
4568 vmap_init_free_space();
4569 vmap_initialized
= true;