]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/vmalloc.c
hugetlbfs: document min_size mount option and cleanup
[thirdparty/linux.git] / mm / vmalloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
930fc45a 8 * Numa awareness, Christoph Lameter, SGI, June 2005
1da177e4
LT
9 */
10
db64fe02 11#include <linux/vmalloc.h>
1da177e4
LT
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
d43c36dc 15#include <linux/sched.h>
1da177e4
LT
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
5f6a6a9c 19#include <linux/proc_fs.h>
a10aa579 20#include <linux/seq_file.h>
3ac7fe5a 21#include <linux/debugobjects.h>
23016969 22#include <linux/kallsyms.h>
db64fe02
NP
23#include <linux/list.h>
24#include <linux/rbtree.h>
25#include <linux/radix-tree.h>
26#include <linux/rcupdate.h>
f0aa6617 27#include <linux/pfn.h>
89219d37 28#include <linux/kmemleak.h>
60063497 29#include <linux/atomic.h>
3b32123d 30#include <linux/compiler.h>
32fcfd40 31#include <linux/llist.h>
0f616be1 32#include <linux/bitops.h>
3b32123d 33
1da177e4
LT
34#include <asm/uaccess.h>
35#include <asm/tlbflush.h>
2dca6999 36#include <asm/shmparam.h>
1da177e4 37
32fcfd40
AV
38struct vfree_deferred {
39 struct llist_head list;
40 struct work_struct wq;
41};
42static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
43
44static void __vunmap(const void *, int);
45
46static void free_work(struct work_struct *w)
47{
48 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
49 struct llist_node *llnode = llist_del_all(&p->list);
50 while (llnode) {
51 void *p = llnode;
52 llnode = llist_next(llnode);
53 __vunmap(p, 1);
54 }
55}
56
db64fe02 57/*** Page table manipulation functions ***/
b221385b 58
1da177e4
LT
59static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
60{
61 pte_t *pte;
62
63 pte = pte_offset_kernel(pmd, addr);
64 do {
65 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67 } while (pte++, addr += PAGE_SIZE, addr != end);
68}
69
db64fe02 70static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
1da177e4
LT
71{
72 pmd_t *pmd;
73 unsigned long next;
74
75 pmd = pmd_offset(pud, addr);
76 do {
77 next = pmd_addr_end(addr, end);
b9820d8f
TK
78 if (pmd_clear_huge(pmd))
79 continue;
1da177e4
LT
80 if (pmd_none_or_clear_bad(pmd))
81 continue;
82 vunmap_pte_range(pmd, addr, next);
83 } while (pmd++, addr = next, addr != end);
84}
85
db64fe02 86static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
1da177e4
LT
87{
88 pud_t *pud;
89 unsigned long next;
90
91 pud = pud_offset(pgd, addr);
92 do {
93 next = pud_addr_end(addr, end);
b9820d8f
TK
94 if (pud_clear_huge(pud))
95 continue;
1da177e4
LT
96 if (pud_none_or_clear_bad(pud))
97 continue;
98 vunmap_pmd_range(pud, addr, next);
99 } while (pud++, addr = next, addr != end);
100}
101
db64fe02 102static void vunmap_page_range(unsigned long addr, unsigned long end)
1da177e4
LT
103{
104 pgd_t *pgd;
105 unsigned long next;
1da177e4
LT
106
107 BUG_ON(addr >= end);
108 pgd = pgd_offset_k(addr);
1da177e4
LT
109 do {
110 next = pgd_addr_end(addr, end);
111 if (pgd_none_or_clear_bad(pgd))
112 continue;
113 vunmap_pud_range(pgd, addr, next);
114 } while (pgd++, addr = next, addr != end);
1da177e4
LT
115}
116
117static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
db64fe02 118 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
119{
120 pte_t *pte;
121
db64fe02
NP
122 /*
123 * nr is a running index into the array which helps higher level
124 * callers keep track of where we're up to.
125 */
126
872fec16 127 pte = pte_alloc_kernel(pmd, addr);
1da177e4
LT
128 if (!pte)
129 return -ENOMEM;
130 do {
db64fe02
NP
131 struct page *page = pages[*nr];
132
133 if (WARN_ON(!pte_none(*pte)))
134 return -EBUSY;
135 if (WARN_ON(!page))
1da177e4
LT
136 return -ENOMEM;
137 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
db64fe02 138 (*nr)++;
1da177e4
LT
139 } while (pte++, addr += PAGE_SIZE, addr != end);
140 return 0;
141}
142
db64fe02
NP
143static int vmap_pmd_range(pud_t *pud, unsigned long addr,
144 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
145{
146 pmd_t *pmd;
147 unsigned long next;
148
149 pmd = pmd_alloc(&init_mm, pud, addr);
150 if (!pmd)
151 return -ENOMEM;
152 do {
153 next = pmd_addr_end(addr, end);
db64fe02 154 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
1da177e4
LT
155 return -ENOMEM;
156 } while (pmd++, addr = next, addr != end);
157 return 0;
158}
159
db64fe02
NP
160static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
161 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
162{
163 pud_t *pud;
164 unsigned long next;
165
166 pud = pud_alloc(&init_mm, pgd, addr);
167 if (!pud)
168 return -ENOMEM;
169 do {
170 next = pud_addr_end(addr, end);
db64fe02 171 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
1da177e4
LT
172 return -ENOMEM;
173 } while (pud++, addr = next, addr != end);
174 return 0;
175}
176
db64fe02
NP
177/*
178 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
179 * will have pfns corresponding to the "pages" array.
180 *
181 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
182 */
8fc48985
TH
183static int vmap_page_range_noflush(unsigned long start, unsigned long end,
184 pgprot_t prot, struct page **pages)
1da177e4
LT
185{
186 pgd_t *pgd;
187 unsigned long next;
2e4e27c7 188 unsigned long addr = start;
db64fe02
NP
189 int err = 0;
190 int nr = 0;
1da177e4
LT
191
192 BUG_ON(addr >= end);
193 pgd = pgd_offset_k(addr);
1da177e4
LT
194 do {
195 next = pgd_addr_end(addr, end);
db64fe02 196 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
1da177e4 197 if (err)
bf88c8c8 198 return err;
1da177e4 199 } while (pgd++, addr = next, addr != end);
db64fe02 200
db64fe02 201 return nr;
1da177e4
LT
202}
203
8fc48985
TH
204static int vmap_page_range(unsigned long start, unsigned long end,
205 pgprot_t prot, struct page **pages)
206{
207 int ret;
208
209 ret = vmap_page_range_noflush(start, end, prot, pages);
210 flush_cache_vmap(start, end);
211 return ret;
212}
213
81ac3ad9 214int is_vmalloc_or_module_addr(const void *x)
73bdf0a6
LT
215{
216 /*
ab4f2ee1 217 * ARM, x86-64 and sparc64 put modules in a special place,
73bdf0a6
LT
218 * and fall back on vmalloc() if that fails. Others
219 * just put it in the vmalloc space.
220 */
221#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
222 unsigned long addr = (unsigned long)x;
223 if (addr >= MODULES_VADDR && addr < MODULES_END)
224 return 1;
225#endif
226 return is_vmalloc_addr(x);
227}
228
48667e7a 229/*
add688fb 230 * Walk a vmap address to the struct page it maps.
48667e7a 231 */
add688fb 232struct page *vmalloc_to_page(const void *vmalloc_addr)
48667e7a
CL
233{
234 unsigned long addr = (unsigned long) vmalloc_addr;
add688fb 235 struct page *page = NULL;
48667e7a 236 pgd_t *pgd = pgd_offset_k(addr);
48667e7a 237
7aa413de
IM
238 /*
239 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
240 * architectures that do not vmalloc module space
241 */
73bdf0a6 242 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
59ea7463 243
48667e7a 244 if (!pgd_none(*pgd)) {
db64fe02 245 pud_t *pud = pud_offset(pgd, addr);
48667e7a 246 if (!pud_none(*pud)) {
db64fe02 247 pmd_t *pmd = pmd_offset(pud, addr);
48667e7a 248 if (!pmd_none(*pmd)) {
db64fe02
NP
249 pte_t *ptep, pte;
250
48667e7a
CL
251 ptep = pte_offset_map(pmd, addr);
252 pte = *ptep;
253 if (pte_present(pte))
add688fb 254 page = pte_page(pte);
48667e7a
CL
255 pte_unmap(ptep);
256 }
257 }
258 }
add688fb 259 return page;
48667e7a 260}
add688fb 261EXPORT_SYMBOL(vmalloc_to_page);
48667e7a
CL
262
263/*
add688fb 264 * Map a vmalloc()-space virtual address to the physical page frame number.
48667e7a 265 */
add688fb 266unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
48667e7a 267{
add688fb 268 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
48667e7a 269}
add688fb 270EXPORT_SYMBOL(vmalloc_to_pfn);
48667e7a 271
db64fe02
NP
272
273/*** Global kva allocator ***/
274
275#define VM_LAZY_FREE 0x01
276#define VM_LAZY_FREEING 0x02
277#define VM_VM_AREA 0x04
278
db64fe02 279static DEFINE_SPINLOCK(vmap_area_lock);
f1c4069e
JK
280/* Export for kexec only */
281LIST_HEAD(vmap_area_list);
89699605
NP
282static struct rb_root vmap_area_root = RB_ROOT;
283
284/* The vmap cache globals are protected by vmap_area_lock */
285static struct rb_node *free_vmap_cache;
286static unsigned long cached_hole_size;
287static unsigned long cached_vstart;
288static unsigned long cached_align;
289
ca23e405 290static unsigned long vmap_area_pcpu_hole;
db64fe02
NP
291
292static struct vmap_area *__find_vmap_area(unsigned long addr)
1da177e4 293{
db64fe02
NP
294 struct rb_node *n = vmap_area_root.rb_node;
295
296 while (n) {
297 struct vmap_area *va;
298
299 va = rb_entry(n, struct vmap_area, rb_node);
300 if (addr < va->va_start)
301 n = n->rb_left;
cef2ac3f 302 else if (addr >= va->va_end)
db64fe02
NP
303 n = n->rb_right;
304 else
305 return va;
306 }
307
308 return NULL;
309}
310
311static void __insert_vmap_area(struct vmap_area *va)
312{
313 struct rb_node **p = &vmap_area_root.rb_node;
314 struct rb_node *parent = NULL;
315 struct rb_node *tmp;
316
317 while (*p) {
170168d0 318 struct vmap_area *tmp_va;
db64fe02
NP
319
320 parent = *p;
170168d0
NK
321 tmp_va = rb_entry(parent, struct vmap_area, rb_node);
322 if (va->va_start < tmp_va->va_end)
db64fe02 323 p = &(*p)->rb_left;
170168d0 324 else if (va->va_end > tmp_va->va_start)
db64fe02
NP
325 p = &(*p)->rb_right;
326 else
327 BUG();
328 }
329
330 rb_link_node(&va->rb_node, parent, p);
331 rb_insert_color(&va->rb_node, &vmap_area_root);
332
4341fa45 333 /* address-sort this list */
db64fe02
NP
334 tmp = rb_prev(&va->rb_node);
335 if (tmp) {
336 struct vmap_area *prev;
337 prev = rb_entry(tmp, struct vmap_area, rb_node);
338 list_add_rcu(&va->list, &prev->list);
339 } else
340 list_add_rcu(&va->list, &vmap_area_list);
341}
342
343static void purge_vmap_area_lazy(void);
344
345/*
346 * Allocate a region of KVA of the specified size and alignment, within the
347 * vstart and vend.
348 */
349static struct vmap_area *alloc_vmap_area(unsigned long size,
350 unsigned long align,
351 unsigned long vstart, unsigned long vend,
352 int node, gfp_t gfp_mask)
353{
354 struct vmap_area *va;
355 struct rb_node *n;
1da177e4 356 unsigned long addr;
db64fe02 357 int purged = 0;
89699605 358 struct vmap_area *first;
db64fe02 359
7766970c 360 BUG_ON(!size);
db64fe02 361 BUG_ON(size & ~PAGE_MASK);
89699605 362 BUG_ON(!is_power_of_2(align));
db64fe02 363
db64fe02
NP
364 va = kmalloc_node(sizeof(struct vmap_area),
365 gfp_mask & GFP_RECLAIM_MASK, node);
366 if (unlikely(!va))
367 return ERR_PTR(-ENOMEM);
368
7f88f88f
CM
369 /*
370 * Only scan the relevant parts containing pointers to other objects
371 * to avoid false negatives.
372 */
373 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
374
db64fe02
NP
375retry:
376 spin_lock(&vmap_area_lock);
89699605
NP
377 /*
378 * Invalidate cache if we have more permissive parameters.
379 * cached_hole_size notes the largest hole noticed _below_
380 * the vmap_area cached in free_vmap_cache: if size fits
381 * into that hole, we want to scan from vstart to reuse
382 * the hole instead of allocating above free_vmap_cache.
383 * Note that __free_vmap_area may update free_vmap_cache
384 * without updating cached_hole_size or cached_align.
385 */
386 if (!free_vmap_cache ||
387 size < cached_hole_size ||
388 vstart < cached_vstart ||
389 align < cached_align) {
390nocache:
391 cached_hole_size = 0;
392 free_vmap_cache = NULL;
393 }
394 /* record if we encounter less permissive parameters */
395 cached_vstart = vstart;
396 cached_align = align;
397
398 /* find starting point for our search */
399 if (free_vmap_cache) {
400 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
248ac0e1 401 addr = ALIGN(first->va_end, align);
89699605
NP
402 if (addr < vstart)
403 goto nocache;
bcb615a8 404 if (addr + size < addr)
89699605
NP
405 goto overflow;
406
407 } else {
408 addr = ALIGN(vstart, align);
bcb615a8 409 if (addr + size < addr)
89699605
NP
410 goto overflow;
411
412 n = vmap_area_root.rb_node;
413 first = NULL;
414
415 while (n) {
db64fe02
NP
416 struct vmap_area *tmp;
417 tmp = rb_entry(n, struct vmap_area, rb_node);
418 if (tmp->va_end >= addr) {
db64fe02 419 first = tmp;
89699605
NP
420 if (tmp->va_start <= addr)
421 break;
422 n = n->rb_left;
423 } else
db64fe02 424 n = n->rb_right;
89699605 425 }
db64fe02
NP
426
427 if (!first)
428 goto found;
db64fe02 429 }
89699605
NP
430
431 /* from the starting point, walk areas until a suitable hole is found */
248ac0e1 432 while (addr + size > first->va_start && addr + size <= vend) {
89699605
NP
433 if (addr + cached_hole_size < first->va_start)
434 cached_hole_size = first->va_start - addr;
248ac0e1 435 addr = ALIGN(first->va_end, align);
bcb615a8 436 if (addr + size < addr)
89699605
NP
437 goto overflow;
438
92ca922f 439 if (list_is_last(&first->list, &vmap_area_list))
89699605 440 goto found;
92ca922f
H
441
442 first = list_entry(first->list.next,
443 struct vmap_area, list);
db64fe02
NP
444 }
445
89699605
NP
446found:
447 if (addr + size > vend)
448 goto overflow;
db64fe02
NP
449
450 va->va_start = addr;
451 va->va_end = addr + size;
452 va->flags = 0;
453 __insert_vmap_area(va);
89699605 454 free_vmap_cache = &va->rb_node;
db64fe02
NP
455 spin_unlock(&vmap_area_lock);
456
89699605
NP
457 BUG_ON(va->va_start & (align-1));
458 BUG_ON(va->va_start < vstart);
459 BUG_ON(va->va_end > vend);
460
db64fe02 461 return va;
89699605
NP
462
463overflow:
464 spin_unlock(&vmap_area_lock);
465 if (!purged) {
466 purge_vmap_area_lazy();
467 purged = 1;
468 goto retry;
469 }
470 if (printk_ratelimit())
0cbc8533 471 pr_warn("vmap allocation for size %lu failed: "
89699605
NP
472 "use vmalloc=<size> to increase size.\n", size);
473 kfree(va);
474 return ERR_PTR(-EBUSY);
db64fe02
NP
475}
476
db64fe02
NP
477static void __free_vmap_area(struct vmap_area *va)
478{
479 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
89699605
NP
480
481 if (free_vmap_cache) {
482 if (va->va_end < cached_vstart) {
483 free_vmap_cache = NULL;
484 } else {
485 struct vmap_area *cache;
486 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
487 if (va->va_start <= cache->va_start) {
488 free_vmap_cache = rb_prev(&va->rb_node);
489 /*
490 * We don't try to update cached_hole_size or
491 * cached_align, but it won't go very wrong.
492 */
493 }
494 }
495 }
db64fe02
NP
496 rb_erase(&va->rb_node, &vmap_area_root);
497 RB_CLEAR_NODE(&va->rb_node);
498 list_del_rcu(&va->list);
499
ca23e405
TH
500 /*
501 * Track the highest possible candidate for pcpu area
502 * allocation. Areas outside of vmalloc area can be returned
503 * here too, consider only end addresses which fall inside
504 * vmalloc area proper.
505 */
506 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
507 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
508
14769de9 509 kfree_rcu(va, rcu_head);
db64fe02
NP
510}
511
512/*
513 * Free a region of KVA allocated by alloc_vmap_area
514 */
515static void free_vmap_area(struct vmap_area *va)
516{
517 spin_lock(&vmap_area_lock);
518 __free_vmap_area(va);
519 spin_unlock(&vmap_area_lock);
520}
521
522/*
523 * Clear the pagetable entries of a given vmap_area
524 */
525static void unmap_vmap_area(struct vmap_area *va)
526{
527 vunmap_page_range(va->va_start, va->va_end);
528}
529
cd52858c
NP
530static void vmap_debug_free_range(unsigned long start, unsigned long end)
531{
532 /*
533 * Unmap page tables and force a TLB flush immediately if
534 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
535 * bugs similarly to those in linear kernel virtual address
536 * space after a page has been freed.
537 *
538 * All the lazy freeing logic is still retained, in order to
539 * minimise intrusiveness of this debugging feature.
540 *
541 * This is going to be *slow* (linear kernel virtual address
542 * debugging doesn't do a broadcast TLB flush so it is a lot
543 * faster).
544 */
545#ifdef CONFIG_DEBUG_PAGEALLOC
546 vunmap_page_range(start, end);
547 flush_tlb_kernel_range(start, end);
548#endif
549}
550
db64fe02
NP
551/*
552 * lazy_max_pages is the maximum amount of virtual address space we gather up
553 * before attempting to purge with a TLB flush.
554 *
555 * There is a tradeoff here: a larger number will cover more kernel page tables
556 * and take slightly longer to purge, but it will linearly reduce the number of
557 * global TLB flushes that must be performed. It would seem natural to scale
558 * this number up linearly with the number of CPUs (because vmapping activity
559 * could also scale linearly with the number of CPUs), however it is likely
560 * that in practice, workloads might be constrained in other ways that mean
561 * vmap activity will not scale linearly with CPUs. Also, I want to be
562 * conservative and not introduce a big latency on huge systems, so go with
563 * a less aggressive log scale. It will still be an improvement over the old
564 * code, and it will be simple to change the scale factor if we find that it
565 * becomes a problem on bigger systems.
566 */
567static unsigned long lazy_max_pages(void)
568{
569 unsigned int log;
570
571 log = fls(num_online_cpus());
572
573 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
574}
575
576static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
577
02b709df
NP
578/* for per-CPU blocks */
579static void purge_fragmented_blocks_allcpus(void);
580
3ee48b6a
CW
581/*
582 * called before a call to iounmap() if the caller wants vm_area_struct's
583 * immediately freed.
584 */
585void set_iounmap_nonlazy(void)
586{
587 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
588}
589
db64fe02
NP
590/*
591 * Purges all lazily-freed vmap areas.
592 *
593 * If sync is 0 then don't purge if there is already a purge in progress.
594 * If force_flush is 1, then flush kernel TLBs between *start and *end even
595 * if we found no lazy vmap areas to unmap (callers can use this to optimise
596 * their own TLB flushing).
597 * Returns with *start = min(*start, lowest purged address)
598 * *end = max(*end, highest purged address)
599 */
600static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
601 int sync, int force_flush)
602{
46666d8a 603 static DEFINE_SPINLOCK(purge_lock);
db64fe02
NP
604 LIST_HEAD(valist);
605 struct vmap_area *va;
cbb76676 606 struct vmap_area *n_va;
db64fe02
NP
607 int nr = 0;
608
609 /*
610 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
611 * should not expect such behaviour. This just simplifies locking for
612 * the case that isn't actually used at the moment anyway.
613 */
614 if (!sync && !force_flush) {
46666d8a 615 if (!spin_trylock(&purge_lock))
db64fe02
NP
616 return;
617 } else
46666d8a 618 spin_lock(&purge_lock);
db64fe02 619
02b709df
NP
620 if (sync)
621 purge_fragmented_blocks_allcpus();
622
db64fe02
NP
623 rcu_read_lock();
624 list_for_each_entry_rcu(va, &vmap_area_list, list) {
625 if (va->flags & VM_LAZY_FREE) {
626 if (va->va_start < *start)
627 *start = va->va_start;
628 if (va->va_end > *end)
629 *end = va->va_end;
630 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
db64fe02
NP
631 list_add_tail(&va->purge_list, &valist);
632 va->flags |= VM_LAZY_FREEING;
633 va->flags &= ~VM_LAZY_FREE;
634 }
635 }
636 rcu_read_unlock();
637
88f50044 638 if (nr)
db64fe02 639 atomic_sub(nr, &vmap_lazy_nr);
db64fe02
NP
640
641 if (nr || force_flush)
642 flush_tlb_kernel_range(*start, *end);
643
644 if (nr) {
645 spin_lock(&vmap_area_lock);
cbb76676 646 list_for_each_entry_safe(va, n_va, &valist, purge_list)
db64fe02
NP
647 __free_vmap_area(va);
648 spin_unlock(&vmap_area_lock);
649 }
46666d8a 650 spin_unlock(&purge_lock);
db64fe02
NP
651}
652
496850e5
NP
653/*
654 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
655 * is already purging.
656 */
657static void try_purge_vmap_area_lazy(void)
658{
659 unsigned long start = ULONG_MAX, end = 0;
660
661 __purge_vmap_area_lazy(&start, &end, 0, 0);
662}
663
db64fe02
NP
664/*
665 * Kick off a purge of the outstanding lazy areas.
666 */
667static void purge_vmap_area_lazy(void)
668{
669 unsigned long start = ULONG_MAX, end = 0;
670
496850e5 671 __purge_vmap_area_lazy(&start, &end, 1, 0);
db64fe02
NP
672}
673
674/*
64141da5
JF
675 * Free a vmap area, caller ensuring that the area has been unmapped
676 * and flush_cache_vunmap had been called for the correct range
677 * previously.
db64fe02 678 */
64141da5 679static void free_vmap_area_noflush(struct vmap_area *va)
db64fe02
NP
680{
681 va->flags |= VM_LAZY_FREE;
682 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
683 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
496850e5 684 try_purge_vmap_area_lazy();
db64fe02
NP
685}
686
64141da5
JF
687/*
688 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
689 * called for the correct range previously.
690 */
691static void free_unmap_vmap_area_noflush(struct vmap_area *va)
692{
693 unmap_vmap_area(va);
694 free_vmap_area_noflush(va);
695}
696
b29acbdc
NP
697/*
698 * Free and unmap a vmap area
699 */
700static void free_unmap_vmap_area(struct vmap_area *va)
701{
702 flush_cache_vunmap(va->va_start, va->va_end);
703 free_unmap_vmap_area_noflush(va);
704}
705
db64fe02
NP
706static struct vmap_area *find_vmap_area(unsigned long addr)
707{
708 struct vmap_area *va;
709
710 spin_lock(&vmap_area_lock);
711 va = __find_vmap_area(addr);
712 spin_unlock(&vmap_area_lock);
713
714 return va;
715}
716
717static void free_unmap_vmap_area_addr(unsigned long addr)
718{
719 struct vmap_area *va;
720
721 va = find_vmap_area(addr);
722 BUG_ON(!va);
723 free_unmap_vmap_area(va);
724}
725
726
727/*** Per cpu kva allocator ***/
728
729/*
730 * vmap space is limited especially on 32 bit architectures. Ensure there is
731 * room for at least 16 percpu vmap blocks per CPU.
732 */
733/*
734 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
735 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
736 * instead (we just need a rough idea)
737 */
738#if BITS_PER_LONG == 32
739#define VMALLOC_SPACE (128UL*1024*1024)
740#else
741#define VMALLOC_SPACE (128UL*1024*1024*1024)
742#endif
743
744#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
745#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
746#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
747#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
748#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
749#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
f982f915
CL
750#define VMAP_BBMAP_BITS \
751 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
752 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
753 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
db64fe02
NP
754
755#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
756
9b463334
JF
757static bool vmap_initialized __read_mostly = false;
758
db64fe02
NP
759struct vmap_block_queue {
760 spinlock_t lock;
761 struct list_head free;
db64fe02
NP
762};
763
764struct vmap_block {
765 spinlock_t lock;
766 struct vmap_area *va;
db64fe02 767 unsigned long free, dirty;
db64fe02 768 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
de560423
NP
769 struct list_head free_list;
770 struct rcu_head rcu_head;
02b709df 771 struct list_head purge;
db64fe02
NP
772};
773
774/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
775static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
776
777/*
778 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
779 * in the free path. Could get rid of this if we change the API to return a
780 * "cookie" from alloc, to be passed to free. But no big deal yet.
781 */
782static DEFINE_SPINLOCK(vmap_block_tree_lock);
783static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
784
785/*
786 * We should probably have a fallback mechanism to allocate virtual memory
787 * out of partially filled vmap blocks. However vmap block sizing should be
788 * fairly reasonable according to the vmalloc size, so it shouldn't be a
789 * big problem.
790 */
791
792static unsigned long addr_to_vb_idx(unsigned long addr)
793{
794 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
795 addr /= VMAP_BLOCK_SIZE;
796 return addr;
797}
798
799static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
800{
801 struct vmap_block_queue *vbq;
802 struct vmap_block *vb;
803 struct vmap_area *va;
804 unsigned long vb_idx;
805 int node, err;
806
807 node = numa_node_id();
808
809 vb = kmalloc_node(sizeof(struct vmap_block),
810 gfp_mask & GFP_RECLAIM_MASK, node);
811 if (unlikely(!vb))
812 return ERR_PTR(-ENOMEM);
813
814 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
815 VMALLOC_START, VMALLOC_END,
816 node, gfp_mask);
ddf9c6d4 817 if (IS_ERR(va)) {
db64fe02 818 kfree(vb);
e7d86340 819 return ERR_CAST(va);
db64fe02
NP
820 }
821
822 err = radix_tree_preload(gfp_mask);
823 if (unlikely(err)) {
824 kfree(vb);
825 free_vmap_area(va);
826 return ERR_PTR(err);
827 }
828
829 spin_lock_init(&vb->lock);
830 vb->va = va;
831 vb->free = VMAP_BBMAP_BITS;
832 vb->dirty = 0;
db64fe02
NP
833 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
834 INIT_LIST_HEAD(&vb->free_list);
db64fe02
NP
835
836 vb_idx = addr_to_vb_idx(va->va_start);
837 spin_lock(&vmap_block_tree_lock);
838 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
839 spin_unlock(&vmap_block_tree_lock);
840 BUG_ON(err);
841 radix_tree_preload_end();
842
843 vbq = &get_cpu_var(vmap_block_queue);
db64fe02 844 spin_lock(&vbq->lock);
de560423 845 list_add_rcu(&vb->free_list, &vbq->free);
db64fe02 846 spin_unlock(&vbq->lock);
3f04ba85 847 put_cpu_var(vmap_block_queue);
db64fe02
NP
848
849 return vb;
850}
851
db64fe02
NP
852static void free_vmap_block(struct vmap_block *vb)
853{
854 struct vmap_block *tmp;
855 unsigned long vb_idx;
856
db64fe02
NP
857 vb_idx = addr_to_vb_idx(vb->va->va_start);
858 spin_lock(&vmap_block_tree_lock);
859 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
860 spin_unlock(&vmap_block_tree_lock);
861 BUG_ON(tmp != vb);
862
64141da5 863 free_vmap_area_noflush(vb->va);
22a3c7d1 864 kfree_rcu(vb, rcu_head);
db64fe02
NP
865}
866
02b709df
NP
867static void purge_fragmented_blocks(int cpu)
868{
869 LIST_HEAD(purge);
870 struct vmap_block *vb;
871 struct vmap_block *n_vb;
872 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
873
874 rcu_read_lock();
875 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
876
877 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
878 continue;
879
880 spin_lock(&vb->lock);
881 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
882 vb->free = 0; /* prevent further allocs after releasing lock */
883 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
02b709df
NP
884 bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
885 spin_lock(&vbq->lock);
886 list_del_rcu(&vb->free_list);
887 spin_unlock(&vbq->lock);
888 spin_unlock(&vb->lock);
889 list_add_tail(&vb->purge, &purge);
890 } else
891 spin_unlock(&vb->lock);
892 }
893 rcu_read_unlock();
894
895 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
896 list_del(&vb->purge);
897 free_vmap_block(vb);
898 }
899}
900
02b709df
NP
901static void purge_fragmented_blocks_allcpus(void)
902{
903 int cpu;
904
905 for_each_possible_cpu(cpu)
906 purge_fragmented_blocks(cpu);
907}
908
db64fe02
NP
909static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
910{
911 struct vmap_block_queue *vbq;
912 struct vmap_block *vb;
913 unsigned long addr = 0;
914 unsigned int order;
915
916 BUG_ON(size & ~PAGE_MASK);
917 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
aa91c4d8
JK
918 if (WARN_ON(size == 0)) {
919 /*
920 * Allocating 0 bytes isn't what caller wants since
921 * get_order(0) returns funny result. Just warn and terminate
922 * early.
923 */
924 return NULL;
925 }
db64fe02
NP
926 order = get_order(size);
927
928again:
929 rcu_read_lock();
930 vbq = &get_cpu_var(vmap_block_queue);
931 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
932 int i;
933
934 spin_lock(&vb->lock);
02b709df
NP
935 if (vb->free < 1UL << order)
936 goto next;
937
3fcd76e8 938 i = VMAP_BBMAP_BITS - vb->free;
02b709df
NP
939 addr = vb->va->va_start + (i << PAGE_SHIFT);
940 BUG_ON(addr_to_vb_idx(addr) !=
941 addr_to_vb_idx(vb->va->va_start));
942 vb->free -= 1UL << order;
943 if (vb->free == 0) {
944 spin_lock(&vbq->lock);
945 list_del_rcu(&vb->free_list);
946 spin_unlock(&vbq->lock);
947 }
948 spin_unlock(&vb->lock);
949 break;
950next:
db64fe02
NP
951 spin_unlock(&vb->lock);
952 }
02b709df 953
3f04ba85 954 put_cpu_var(vmap_block_queue);
db64fe02
NP
955 rcu_read_unlock();
956
957 if (!addr) {
958 vb = new_vmap_block(gfp_mask);
959 if (IS_ERR(vb))
960 return vb;
961 goto again;
962 }
963
964 return (void *)addr;
965}
966
967static void vb_free(const void *addr, unsigned long size)
968{
969 unsigned long offset;
970 unsigned long vb_idx;
971 unsigned int order;
972 struct vmap_block *vb;
973
974 BUG_ON(size & ~PAGE_MASK);
975 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
b29acbdc
NP
976
977 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
978
db64fe02
NP
979 order = get_order(size);
980
981 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
982
983 vb_idx = addr_to_vb_idx((unsigned long)addr);
984 rcu_read_lock();
985 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
986 rcu_read_unlock();
987 BUG_ON(!vb);
988
64141da5
JF
989 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
990
db64fe02 991 spin_lock(&vb->lock);
de560423 992 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
d086817d 993
db64fe02
NP
994 vb->dirty += 1UL << order;
995 if (vb->dirty == VMAP_BBMAP_BITS) {
de560423 996 BUG_ON(vb->free);
db64fe02
NP
997 spin_unlock(&vb->lock);
998 free_vmap_block(vb);
999 } else
1000 spin_unlock(&vb->lock);
1001}
1002
1003/**
1004 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1005 *
1006 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1007 * to amortize TLB flushing overheads. What this means is that any page you
1008 * have now, may, in a former life, have been mapped into kernel virtual
1009 * address by the vmap layer and so there might be some CPUs with TLB entries
1010 * still referencing that page (additional to the regular 1:1 kernel mapping).
1011 *
1012 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1013 * be sure that none of the pages we have control over will have any aliases
1014 * from the vmap layer.
1015 */
1016void vm_unmap_aliases(void)
1017{
1018 unsigned long start = ULONG_MAX, end = 0;
1019 int cpu;
1020 int flush = 0;
1021
9b463334
JF
1022 if (unlikely(!vmap_initialized))
1023 return;
1024
db64fe02
NP
1025 for_each_possible_cpu(cpu) {
1026 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1027 struct vmap_block *vb;
1028
1029 rcu_read_lock();
1030 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
b136be5e 1031 int i, j;
db64fe02
NP
1032
1033 spin_lock(&vb->lock);
1034 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
b136be5e 1035 if (i < VMAP_BBMAP_BITS) {
db64fe02 1036 unsigned long s, e;
b136be5e
JK
1037
1038 j = find_last_bit(vb->dirty_map,
1039 VMAP_BBMAP_BITS);
1040 j = j + 1; /* need exclusive index */
db64fe02
NP
1041
1042 s = vb->va->va_start + (i << PAGE_SHIFT);
1043 e = vb->va->va_start + (j << PAGE_SHIFT);
db64fe02
NP
1044 flush = 1;
1045
1046 if (s < start)
1047 start = s;
1048 if (e > end)
1049 end = e;
db64fe02
NP
1050 }
1051 spin_unlock(&vb->lock);
1052 }
1053 rcu_read_unlock();
1054 }
1055
1056 __purge_vmap_area_lazy(&start, &end, 1, flush);
1057}
1058EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1059
1060/**
1061 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1062 * @mem: the pointer returned by vm_map_ram
1063 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1064 */
1065void vm_unmap_ram(const void *mem, unsigned int count)
1066{
1067 unsigned long size = count << PAGE_SHIFT;
1068 unsigned long addr = (unsigned long)mem;
1069
1070 BUG_ON(!addr);
1071 BUG_ON(addr < VMALLOC_START);
1072 BUG_ON(addr > VMALLOC_END);
1073 BUG_ON(addr & (PAGE_SIZE-1));
1074
1075 debug_check_no_locks_freed(mem, size);
cd52858c 1076 vmap_debug_free_range(addr, addr+size);
db64fe02
NP
1077
1078 if (likely(count <= VMAP_MAX_ALLOC))
1079 vb_free(mem, size);
1080 else
1081 free_unmap_vmap_area_addr(addr);
1082}
1083EXPORT_SYMBOL(vm_unmap_ram);
1084
1085/**
1086 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1087 * @pages: an array of pointers to the pages to be mapped
1088 * @count: number of pages
1089 * @node: prefer to allocate data structures on this node
1090 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
e99c97ad 1091 *
36437638
GK
1092 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1093 * faster than vmap so it's good. But if you mix long-life and short-life
1094 * objects with vm_map_ram(), it could consume lots of address space through
1095 * fragmentation (especially on a 32bit machine). You could see failures in
1096 * the end. Please use this function for short-lived objects.
1097 *
e99c97ad 1098 * Returns: a pointer to the address that has been mapped, or %NULL on failure
db64fe02
NP
1099 */
1100void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1101{
1102 unsigned long size = count << PAGE_SHIFT;
1103 unsigned long addr;
1104 void *mem;
1105
1106 if (likely(count <= VMAP_MAX_ALLOC)) {
1107 mem = vb_alloc(size, GFP_KERNEL);
1108 if (IS_ERR(mem))
1109 return NULL;
1110 addr = (unsigned long)mem;
1111 } else {
1112 struct vmap_area *va;
1113 va = alloc_vmap_area(size, PAGE_SIZE,
1114 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1115 if (IS_ERR(va))
1116 return NULL;
1117
1118 addr = va->va_start;
1119 mem = (void *)addr;
1120 }
1121 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1122 vm_unmap_ram(mem, count);
1123 return NULL;
1124 }
1125 return mem;
1126}
1127EXPORT_SYMBOL(vm_map_ram);
1128
4341fa45 1129static struct vm_struct *vmlist __initdata;
be9b7335
NP
1130/**
1131 * vm_area_add_early - add vmap area early during boot
1132 * @vm: vm_struct to add
1133 *
1134 * This function is used to add fixed kernel vm area to vmlist before
1135 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1136 * should contain proper values and the other fields should be zero.
1137 *
1138 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1139 */
1140void __init vm_area_add_early(struct vm_struct *vm)
1141{
1142 struct vm_struct *tmp, **p;
1143
1144 BUG_ON(vmap_initialized);
1145 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1146 if (tmp->addr >= vm->addr) {
1147 BUG_ON(tmp->addr < vm->addr + vm->size);
1148 break;
1149 } else
1150 BUG_ON(tmp->addr + tmp->size > vm->addr);
1151 }
1152 vm->next = *p;
1153 *p = vm;
1154}
1155
f0aa6617
TH
1156/**
1157 * vm_area_register_early - register vmap area early during boot
1158 * @vm: vm_struct to register
c0c0a293 1159 * @align: requested alignment
f0aa6617
TH
1160 *
1161 * This function is used to register kernel vm area before
1162 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1163 * proper values on entry and other fields should be zero. On return,
1164 * vm->addr contains the allocated address.
1165 *
1166 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1167 */
c0c0a293 1168void __init vm_area_register_early(struct vm_struct *vm, size_t align)
f0aa6617
TH
1169{
1170 static size_t vm_init_off __initdata;
c0c0a293
TH
1171 unsigned long addr;
1172
1173 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1174 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
f0aa6617 1175
c0c0a293 1176 vm->addr = (void *)addr;
f0aa6617 1177
be9b7335 1178 vm_area_add_early(vm);
f0aa6617
TH
1179}
1180
db64fe02
NP
1181void __init vmalloc_init(void)
1182{
822c18f2
IK
1183 struct vmap_area *va;
1184 struct vm_struct *tmp;
db64fe02
NP
1185 int i;
1186
1187 for_each_possible_cpu(i) {
1188 struct vmap_block_queue *vbq;
32fcfd40 1189 struct vfree_deferred *p;
db64fe02
NP
1190
1191 vbq = &per_cpu(vmap_block_queue, i);
1192 spin_lock_init(&vbq->lock);
1193 INIT_LIST_HEAD(&vbq->free);
32fcfd40
AV
1194 p = &per_cpu(vfree_deferred, i);
1195 init_llist_head(&p->list);
1196 INIT_WORK(&p->wq, free_work);
db64fe02 1197 }
9b463334 1198
822c18f2
IK
1199 /* Import existing vmlist entries. */
1200 for (tmp = vmlist; tmp; tmp = tmp->next) {
43ebdac4 1201 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
dbda591d 1202 va->flags = VM_VM_AREA;
822c18f2
IK
1203 va->va_start = (unsigned long)tmp->addr;
1204 va->va_end = va->va_start + tmp->size;
dbda591d 1205 va->vm = tmp;
822c18f2
IK
1206 __insert_vmap_area(va);
1207 }
ca23e405
TH
1208
1209 vmap_area_pcpu_hole = VMALLOC_END;
1210
9b463334 1211 vmap_initialized = true;
db64fe02
NP
1212}
1213
8fc48985
TH
1214/**
1215 * map_kernel_range_noflush - map kernel VM area with the specified pages
1216 * @addr: start of the VM area to map
1217 * @size: size of the VM area to map
1218 * @prot: page protection flags to use
1219 * @pages: pages to map
1220 *
1221 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1222 * specify should have been allocated using get_vm_area() and its
1223 * friends.
1224 *
1225 * NOTE:
1226 * This function does NOT do any cache flushing. The caller is
1227 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1228 * before calling this function.
1229 *
1230 * RETURNS:
1231 * The number of pages mapped on success, -errno on failure.
1232 */
1233int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1234 pgprot_t prot, struct page **pages)
1235{
1236 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1237}
1238
1239/**
1240 * unmap_kernel_range_noflush - unmap kernel VM area
1241 * @addr: start of the VM area to unmap
1242 * @size: size of the VM area to unmap
1243 *
1244 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
1245 * specify should have been allocated using get_vm_area() and its
1246 * friends.
1247 *
1248 * NOTE:
1249 * This function does NOT do any cache flushing. The caller is
1250 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1251 * before calling this function and flush_tlb_kernel_range() after.
1252 */
1253void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1254{
1255 vunmap_page_range(addr, addr + size);
1256}
81e88fdc 1257EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
8fc48985
TH
1258
1259/**
1260 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1261 * @addr: start of the VM area to unmap
1262 * @size: size of the VM area to unmap
1263 *
1264 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1265 * the unmapping and tlb after.
1266 */
db64fe02
NP
1267void unmap_kernel_range(unsigned long addr, unsigned long size)
1268{
1269 unsigned long end = addr + size;
f6fcba70
TH
1270
1271 flush_cache_vunmap(addr, end);
db64fe02
NP
1272 vunmap_page_range(addr, end);
1273 flush_tlb_kernel_range(addr, end);
1274}
93ef6d6c 1275EXPORT_SYMBOL_GPL(unmap_kernel_range);
db64fe02 1276
f6f8ed47 1277int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
db64fe02
NP
1278{
1279 unsigned long addr = (unsigned long)area->addr;
762216ab 1280 unsigned long end = addr + get_vm_area_size(area);
db64fe02
NP
1281 int err;
1282
f6f8ed47 1283 err = vmap_page_range(addr, end, prot, pages);
db64fe02 1284
f6f8ed47 1285 return err > 0 ? 0 : err;
db64fe02
NP
1286}
1287EXPORT_SYMBOL_GPL(map_vm_area);
1288
f5252e00 1289static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
5e6cafc8 1290 unsigned long flags, const void *caller)
cf88c790 1291{
c69480ad 1292 spin_lock(&vmap_area_lock);
cf88c790
TH
1293 vm->flags = flags;
1294 vm->addr = (void *)va->va_start;
1295 vm->size = va->va_end - va->va_start;
1296 vm->caller = caller;
db1aecaf 1297 va->vm = vm;
cf88c790 1298 va->flags |= VM_VM_AREA;
c69480ad 1299 spin_unlock(&vmap_area_lock);
f5252e00 1300}
cf88c790 1301
20fc02b4 1302static void clear_vm_uninitialized_flag(struct vm_struct *vm)
f5252e00 1303{
d4033afd 1304 /*
20fc02b4 1305 * Before removing VM_UNINITIALIZED,
d4033afd
JK
1306 * we should make sure that vm has proper values.
1307 * Pair with smp_rmb() in show_numa_info().
1308 */
1309 smp_wmb();
20fc02b4 1310 vm->flags &= ~VM_UNINITIALIZED;
cf88c790
TH
1311}
1312
db64fe02 1313static struct vm_struct *__get_vm_area_node(unsigned long size,
2dca6999 1314 unsigned long align, unsigned long flags, unsigned long start,
5e6cafc8 1315 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
db64fe02 1316{
0006526d 1317 struct vmap_area *va;
db64fe02 1318 struct vm_struct *area;
1da177e4 1319
52fd24ca 1320 BUG_ON(in_interrupt());
0f2d4a8e 1321 if (flags & VM_IOREMAP)
0f616be1
TK
1322 align = 1ul << clamp_t(int, fls_long(size),
1323 PAGE_SHIFT, IOREMAP_MAX_ORDER);
db64fe02 1324
1da177e4 1325 size = PAGE_ALIGN(size);
31be8309
OH
1326 if (unlikely(!size))
1327 return NULL;
1da177e4 1328
cf88c790 1329 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1da177e4
LT
1330 if (unlikely(!area))
1331 return NULL;
1332
71394fe5
AR
1333 if (!(flags & VM_NO_GUARD))
1334 size += PAGE_SIZE;
1da177e4 1335
db64fe02
NP
1336 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1337 if (IS_ERR(va)) {
1338 kfree(area);
1339 return NULL;
1da177e4 1340 }
1da177e4 1341
d82b1d85 1342 setup_vmalloc_vm(area, va, flags, caller);
f5252e00 1343
1da177e4 1344 return area;
1da177e4
LT
1345}
1346
930fc45a
CL
1347struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1348 unsigned long start, unsigned long end)
1349{
00ef2d2f
DR
1350 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1351 GFP_KERNEL, __builtin_return_address(0));
930fc45a 1352}
5992b6da 1353EXPORT_SYMBOL_GPL(__get_vm_area);
930fc45a 1354
c2968612
BH
1355struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1356 unsigned long start, unsigned long end,
5e6cafc8 1357 const void *caller)
c2968612 1358{
00ef2d2f
DR
1359 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1360 GFP_KERNEL, caller);
c2968612
BH
1361}
1362
1da177e4 1363/**
183ff22b 1364 * get_vm_area - reserve a contiguous kernel virtual area
1da177e4
LT
1365 * @size: size of the area
1366 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1367 *
1368 * Search an area of @size in the kernel virtual mapping area,
1369 * and reserved it for out purposes. Returns the area descriptor
1370 * on success or %NULL on failure.
1371 */
1372struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1373{
2dca6999 1374 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
00ef2d2f
DR
1375 NUMA_NO_NODE, GFP_KERNEL,
1376 __builtin_return_address(0));
23016969
CL
1377}
1378
1379struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
5e6cafc8 1380 const void *caller)
23016969 1381{
2dca6999 1382 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
00ef2d2f 1383 NUMA_NO_NODE, GFP_KERNEL, caller);
1da177e4
LT
1384}
1385
e9da6e99
MS
1386/**
1387 * find_vm_area - find a continuous kernel virtual area
1388 * @addr: base address
1389 *
1390 * Search for the kernel VM area starting at @addr, and return it.
1391 * It is up to the caller to do all required locking to keep the returned
1392 * pointer valid.
1393 */
1394struct vm_struct *find_vm_area(const void *addr)
83342314 1395{
db64fe02 1396 struct vmap_area *va;
83342314 1397
db64fe02
NP
1398 va = find_vmap_area((unsigned long)addr);
1399 if (va && va->flags & VM_VM_AREA)
db1aecaf 1400 return va->vm;
1da177e4 1401
1da177e4 1402 return NULL;
1da177e4
LT
1403}
1404
7856dfeb 1405/**
183ff22b 1406 * remove_vm_area - find and remove a continuous kernel virtual area
7856dfeb
AK
1407 * @addr: base address
1408 *
1409 * Search for the kernel VM area starting at @addr, and remove it.
1410 * This function returns the found VM area, but using it is NOT safe
1411 * on SMP machines, except for its size or flags.
1412 */
b3bdda02 1413struct vm_struct *remove_vm_area(const void *addr)
7856dfeb 1414{
db64fe02
NP
1415 struct vmap_area *va;
1416
1417 va = find_vmap_area((unsigned long)addr);
1418 if (va && va->flags & VM_VM_AREA) {
db1aecaf 1419 struct vm_struct *vm = va->vm;
f5252e00 1420
c69480ad
JK
1421 spin_lock(&vmap_area_lock);
1422 va->vm = NULL;
1423 va->flags &= ~VM_VM_AREA;
1424 spin_unlock(&vmap_area_lock);
1425
dd32c279 1426 vmap_debug_free_range(va->va_start, va->va_end);
a5af5aa8 1427 kasan_free_shadow(vm);
dd32c279
KH
1428 free_unmap_vmap_area(va);
1429 vm->size -= PAGE_SIZE;
1430
db64fe02
NP
1431 return vm;
1432 }
1433 return NULL;
7856dfeb
AK
1434}
1435
b3bdda02 1436static void __vunmap(const void *addr, int deallocate_pages)
1da177e4
LT
1437{
1438 struct vm_struct *area;
1439
1440 if (!addr)
1441 return;
1442
e69e9d4a 1443 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
ab15d9b4 1444 addr))
1da177e4 1445 return;
1da177e4
LT
1446
1447 area = remove_vm_area(addr);
1448 if (unlikely(!area)) {
4c8573e2 1449 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1da177e4 1450 addr);
1da177e4
LT
1451 return;
1452 }
1453
9a11b49a 1454 debug_check_no_locks_freed(addr, area->size);
3ac7fe5a 1455 debug_check_no_obj_freed(addr, area->size);
9a11b49a 1456
1da177e4
LT
1457 if (deallocate_pages) {
1458 int i;
1459
1460 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
1461 struct page *page = area->pages[i];
1462
1463 BUG_ON(!page);
1464 __free_page(page);
1da177e4
LT
1465 }
1466
8757d5fa 1467 if (area->flags & VM_VPAGES)
1da177e4
LT
1468 vfree(area->pages);
1469 else
1470 kfree(area->pages);
1471 }
1472
1473 kfree(area);
1474 return;
1475}
32fcfd40 1476
1da177e4
LT
1477/**
1478 * vfree - release memory allocated by vmalloc()
1da177e4
LT
1479 * @addr: memory base address
1480 *
183ff22b 1481 * Free the virtually continuous memory area starting at @addr, as
80e93eff
PE
1482 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1483 * NULL, no operation is performed.
1da177e4 1484 *
32fcfd40
AV
1485 * Must not be called in NMI context (strictly speaking, only if we don't
1486 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
1487 * conventions for vfree() arch-depenedent would be a really bad idea)
c9fcee51
AM
1488 *
1489 * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
1da177e4 1490 */
b3bdda02 1491void vfree(const void *addr)
1da177e4 1492{
32fcfd40 1493 BUG_ON(in_nmi());
89219d37
CM
1494
1495 kmemleak_free(addr);
1496
32fcfd40
AV
1497 if (!addr)
1498 return;
1499 if (unlikely(in_interrupt())) {
7c8e0181 1500 struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
59d3132f
ON
1501 if (llist_add((struct llist_node *)addr, &p->list))
1502 schedule_work(&p->wq);
32fcfd40
AV
1503 } else
1504 __vunmap(addr, 1);
1da177e4 1505}
1da177e4
LT
1506EXPORT_SYMBOL(vfree);
1507
1508/**
1509 * vunmap - release virtual mapping obtained by vmap()
1da177e4
LT
1510 * @addr: memory base address
1511 *
1512 * Free the virtually contiguous memory area starting at @addr,
1513 * which was created from the page array passed to vmap().
1514 *
80e93eff 1515 * Must not be called in interrupt context.
1da177e4 1516 */
b3bdda02 1517void vunmap(const void *addr)
1da177e4
LT
1518{
1519 BUG_ON(in_interrupt());
34754b69 1520 might_sleep();
32fcfd40
AV
1521 if (addr)
1522 __vunmap(addr, 0);
1da177e4 1523}
1da177e4
LT
1524EXPORT_SYMBOL(vunmap);
1525
1526/**
1527 * vmap - map an array of pages into virtually contiguous space
1da177e4
LT
1528 * @pages: array of page pointers
1529 * @count: number of pages to map
1530 * @flags: vm_area->flags
1531 * @prot: page protection for the mapping
1532 *
1533 * Maps @count pages from @pages into contiguous kernel virtual
1534 * space.
1535 */
1536void *vmap(struct page **pages, unsigned int count,
1537 unsigned long flags, pgprot_t prot)
1538{
1539 struct vm_struct *area;
1540
34754b69
PZ
1541 might_sleep();
1542
4481374c 1543 if (count > totalram_pages)
1da177e4
LT
1544 return NULL;
1545
23016969
CL
1546 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1547 __builtin_return_address(0));
1da177e4
LT
1548 if (!area)
1549 return NULL;
23016969 1550
f6f8ed47 1551 if (map_vm_area(area, prot, pages)) {
1da177e4
LT
1552 vunmap(area->addr);
1553 return NULL;
1554 }
1555
1556 return area->addr;
1557}
1da177e4
LT
1558EXPORT_SYMBOL(vmap);
1559
2dca6999
DM
1560static void *__vmalloc_node(unsigned long size, unsigned long align,
1561 gfp_t gfp_mask, pgprot_t prot,
5e6cafc8 1562 int node, const void *caller);
e31d9eb5 1563static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3722e13c 1564 pgprot_t prot, int node)
1da177e4 1565{
22943ab1 1566 const int order = 0;
1da177e4
LT
1567 struct page **pages;
1568 unsigned int nr_pages, array_size, i;
930f036b
DR
1569 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1570 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
1da177e4 1571
762216ab 1572 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1da177e4
LT
1573 array_size = (nr_pages * sizeof(struct page *));
1574
1575 area->nr_pages = nr_pages;
1576 /* Please note that the recursion is strictly bounded. */
8757d5fa 1577 if (array_size > PAGE_SIZE) {
976d6dfb 1578 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
3722e13c 1579 PAGE_KERNEL, node, area->caller);
8757d5fa 1580 area->flags |= VM_VPAGES;
286e1ea3 1581 } else {
976d6dfb 1582 pages = kmalloc_node(array_size, nested_gfp, node);
286e1ea3 1583 }
1da177e4
LT
1584 area->pages = pages;
1585 if (!area->pages) {
1586 remove_vm_area(area->addr);
1587 kfree(area);
1588 return NULL;
1589 }
1da177e4
LT
1590
1591 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
1592 struct page *page;
1593
4b90951c 1594 if (node == NUMA_NO_NODE)
930f036b 1595 page = alloc_page(alloc_mask);
930fc45a 1596 else
930f036b 1597 page = alloc_pages_node(node, alloc_mask, order);
bf53d6f8
CL
1598
1599 if (unlikely(!page)) {
1da177e4
LT
1600 /* Successfully allocated i pages, free them in __vunmap() */
1601 area->nr_pages = i;
1602 goto fail;
1603 }
bf53d6f8 1604 area->pages[i] = page;
660654f9
ED
1605 if (gfp_mask & __GFP_WAIT)
1606 cond_resched();
1da177e4
LT
1607 }
1608
f6f8ed47 1609 if (map_vm_area(area, prot, pages))
1da177e4
LT
1610 goto fail;
1611 return area->addr;
1612
1613fail:
3ee9a4f0
JP
1614 warn_alloc_failed(gfp_mask, order,
1615 "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
22943ab1 1616 (area->nr_pages*PAGE_SIZE), area->size);
1da177e4
LT
1617 vfree(area->addr);
1618 return NULL;
1619}
1620
1621/**
d0a21265 1622 * __vmalloc_node_range - allocate virtually contiguous memory
1da177e4 1623 * @size: allocation size
2dca6999 1624 * @align: desired alignment
d0a21265
DR
1625 * @start: vm area range start
1626 * @end: vm area range end
1da177e4
LT
1627 * @gfp_mask: flags for the page level allocator
1628 * @prot: protection mask for the allocated pages
cb9e3c29 1629 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
00ef2d2f 1630 * @node: node to use for allocation or NUMA_NO_NODE
c85d194b 1631 * @caller: caller's return address
1da177e4
LT
1632 *
1633 * Allocate enough pages to cover @size from the page level
1634 * allocator with @gfp_mask flags. Map them into contiguous
1635 * kernel virtual space, using a pagetable protection of @prot.
1636 */
d0a21265
DR
1637void *__vmalloc_node_range(unsigned long size, unsigned long align,
1638 unsigned long start, unsigned long end, gfp_t gfp_mask,
cb9e3c29
AR
1639 pgprot_t prot, unsigned long vm_flags, int node,
1640 const void *caller)
1da177e4
LT
1641{
1642 struct vm_struct *area;
89219d37
CM
1643 void *addr;
1644 unsigned long real_size = size;
1da177e4
LT
1645
1646 size = PAGE_ALIGN(size);
4481374c 1647 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
de7d2b56 1648 goto fail;
1da177e4 1649
cb9e3c29
AR
1650 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
1651 vm_flags, start, end, node, gfp_mask, caller);
1da177e4 1652 if (!area)
de7d2b56 1653 goto fail;
1da177e4 1654
3722e13c 1655 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1368edf0 1656 if (!addr)
b82225f3 1657 return NULL;
89219d37 1658
f5252e00 1659 /*
20fc02b4
ZY
1660 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
1661 * flag. It means that vm_struct is not fully initialized.
4341fa45 1662 * Now, it is fully initialized, so remove this flag here.
f5252e00 1663 */
20fc02b4 1664 clear_vm_uninitialized_flag(area);
f5252e00 1665
89219d37 1666 /*
7f88f88f
CM
1667 * A ref_count = 2 is needed because vm_struct allocated in
1668 * __get_vm_area_node() contains a reference to the virtual address of
1669 * the vmalloc'ed block.
89219d37 1670 */
7f88f88f 1671 kmemleak_alloc(addr, real_size, 2, gfp_mask);
89219d37
CM
1672
1673 return addr;
de7d2b56
JP
1674
1675fail:
1676 warn_alloc_failed(gfp_mask, 0,
1677 "vmalloc: allocation failure: %lu bytes\n",
1678 real_size);
1679 return NULL;
1da177e4
LT
1680}
1681
d0a21265
DR
1682/**
1683 * __vmalloc_node - allocate virtually contiguous memory
1684 * @size: allocation size
1685 * @align: desired alignment
1686 * @gfp_mask: flags for the page level allocator
1687 * @prot: protection mask for the allocated pages
00ef2d2f 1688 * @node: node to use for allocation or NUMA_NO_NODE
d0a21265
DR
1689 * @caller: caller's return address
1690 *
1691 * Allocate enough pages to cover @size from the page level
1692 * allocator with @gfp_mask flags. Map them into contiguous
1693 * kernel virtual space, using a pagetable protection of @prot.
1694 */
1695static void *__vmalloc_node(unsigned long size, unsigned long align,
1696 gfp_t gfp_mask, pgprot_t prot,
5e6cafc8 1697 int node, const void *caller)
d0a21265
DR
1698{
1699 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
cb9e3c29 1700 gfp_mask, prot, 0, node, caller);
d0a21265
DR
1701}
1702
930fc45a
CL
1703void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1704{
00ef2d2f 1705 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
23016969 1706 __builtin_return_address(0));
930fc45a 1707}
1da177e4
LT
1708EXPORT_SYMBOL(__vmalloc);
1709
e1ca7788
DY
1710static inline void *__vmalloc_node_flags(unsigned long size,
1711 int node, gfp_t flags)
1712{
1713 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1714 node, __builtin_return_address(0));
1715}
1716
1da177e4
LT
1717/**
1718 * vmalloc - allocate virtually contiguous memory
1da177e4 1719 * @size: allocation size
1da177e4
LT
1720 * Allocate enough pages to cover @size from the page level
1721 * allocator and map them into contiguous kernel virtual space.
1722 *
c1c8897f 1723 * For tight control over page level allocator and protection flags
1da177e4
LT
1724 * use __vmalloc() instead.
1725 */
1726void *vmalloc(unsigned long size)
1727{
00ef2d2f
DR
1728 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1729 GFP_KERNEL | __GFP_HIGHMEM);
1da177e4 1730}
1da177e4
LT
1731EXPORT_SYMBOL(vmalloc);
1732
e1ca7788
DY
1733/**
1734 * vzalloc - allocate virtually contiguous memory with zero fill
1735 * @size: allocation size
1736 * Allocate enough pages to cover @size from the page level
1737 * allocator and map them into contiguous kernel virtual space.
1738 * The memory allocated is set to zero.
1739 *
1740 * For tight control over page level allocator and protection flags
1741 * use __vmalloc() instead.
1742 */
1743void *vzalloc(unsigned long size)
1744{
00ef2d2f 1745 return __vmalloc_node_flags(size, NUMA_NO_NODE,
e1ca7788
DY
1746 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1747}
1748EXPORT_SYMBOL(vzalloc);
1749
83342314 1750/**
ead04089
REB
1751 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1752 * @size: allocation size
83342314 1753 *
ead04089
REB
1754 * The resulting memory area is zeroed so it can be mapped to userspace
1755 * without leaking data.
83342314
NP
1756 */
1757void *vmalloc_user(unsigned long size)
1758{
1759 struct vm_struct *area;
1760 void *ret;
1761
2dca6999
DM
1762 ret = __vmalloc_node(size, SHMLBA,
1763 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
00ef2d2f
DR
1764 PAGE_KERNEL, NUMA_NO_NODE,
1765 __builtin_return_address(0));
2b4ac44e 1766 if (ret) {
db64fe02 1767 area = find_vm_area(ret);
2b4ac44e 1768 area->flags |= VM_USERMAP;
2b4ac44e 1769 }
83342314
NP
1770 return ret;
1771}
1772EXPORT_SYMBOL(vmalloc_user);
1773
930fc45a
CL
1774/**
1775 * vmalloc_node - allocate memory on a specific node
930fc45a 1776 * @size: allocation size
d44e0780 1777 * @node: numa node
930fc45a
CL
1778 *
1779 * Allocate enough pages to cover @size from the page level
1780 * allocator and map them into contiguous kernel virtual space.
1781 *
c1c8897f 1782 * For tight control over page level allocator and protection flags
930fc45a
CL
1783 * use __vmalloc() instead.
1784 */
1785void *vmalloc_node(unsigned long size, int node)
1786{
2dca6999 1787 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
23016969 1788 node, __builtin_return_address(0));
930fc45a
CL
1789}
1790EXPORT_SYMBOL(vmalloc_node);
1791
e1ca7788
DY
1792/**
1793 * vzalloc_node - allocate memory on a specific node with zero fill
1794 * @size: allocation size
1795 * @node: numa node
1796 *
1797 * Allocate enough pages to cover @size from the page level
1798 * allocator and map them into contiguous kernel virtual space.
1799 * The memory allocated is set to zero.
1800 *
1801 * For tight control over page level allocator and protection flags
1802 * use __vmalloc_node() instead.
1803 */
1804void *vzalloc_node(unsigned long size, int node)
1805{
1806 return __vmalloc_node_flags(size, node,
1807 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1808}
1809EXPORT_SYMBOL(vzalloc_node);
1810
4dc3b16b
PP
1811#ifndef PAGE_KERNEL_EXEC
1812# define PAGE_KERNEL_EXEC PAGE_KERNEL
1813#endif
1814
1da177e4
LT
1815/**
1816 * vmalloc_exec - allocate virtually contiguous, executable memory
1da177e4
LT
1817 * @size: allocation size
1818 *
1819 * Kernel-internal function to allocate enough pages to cover @size
1820 * the page level allocator and map them into contiguous and
1821 * executable kernel virtual space.
1822 *
c1c8897f 1823 * For tight control over page level allocator and protection flags
1da177e4
LT
1824 * use __vmalloc() instead.
1825 */
1826
1da177e4
LT
1827void *vmalloc_exec(unsigned long size)
1828{
2dca6999 1829 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
00ef2d2f 1830 NUMA_NO_NODE, __builtin_return_address(0));
1da177e4
LT
1831}
1832
0d08e0d3 1833#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
7ac674f5 1834#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
0d08e0d3 1835#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
7ac674f5 1836#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
0d08e0d3
AK
1837#else
1838#define GFP_VMALLOC32 GFP_KERNEL
1839#endif
1840
1da177e4
LT
1841/**
1842 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1da177e4
LT
1843 * @size: allocation size
1844 *
1845 * Allocate enough 32bit PA addressable pages to cover @size from the
1846 * page level allocator and map them into contiguous kernel virtual space.
1847 */
1848void *vmalloc_32(unsigned long size)
1849{
2dca6999 1850 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
00ef2d2f 1851 NUMA_NO_NODE, __builtin_return_address(0));
1da177e4 1852}
1da177e4
LT
1853EXPORT_SYMBOL(vmalloc_32);
1854
83342314 1855/**
ead04089 1856 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
83342314 1857 * @size: allocation size
ead04089
REB
1858 *
1859 * The resulting memory area is 32bit addressable and zeroed so it can be
1860 * mapped to userspace without leaking data.
83342314
NP
1861 */
1862void *vmalloc_32_user(unsigned long size)
1863{
1864 struct vm_struct *area;
1865 void *ret;
1866
2dca6999 1867 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
00ef2d2f 1868 NUMA_NO_NODE, __builtin_return_address(0));
2b4ac44e 1869 if (ret) {
db64fe02 1870 area = find_vm_area(ret);
2b4ac44e 1871 area->flags |= VM_USERMAP;
2b4ac44e 1872 }
83342314
NP
1873 return ret;
1874}
1875EXPORT_SYMBOL(vmalloc_32_user);
1876
d0107eb0
KH
1877/*
1878 * small helper routine , copy contents to buf from addr.
1879 * If the page is not present, fill zero.
1880 */
1881
1882static int aligned_vread(char *buf, char *addr, unsigned long count)
1883{
1884 struct page *p;
1885 int copied = 0;
1886
1887 while (count) {
1888 unsigned long offset, length;
1889
1890 offset = (unsigned long)addr & ~PAGE_MASK;
1891 length = PAGE_SIZE - offset;
1892 if (length > count)
1893 length = count;
1894 p = vmalloc_to_page(addr);
1895 /*
1896 * To do safe access to this _mapped_ area, we need
1897 * lock. But adding lock here means that we need to add
1898 * overhead of vmalloc()/vfree() calles for this _debug_
1899 * interface, rarely used. Instead of that, we'll use
1900 * kmap() and get small overhead in this access function.
1901 */
1902 if (p) {
1903 /*
1904 * we can expect USER0 is not used (see vread/vwrite's
1905 * function description)
1906 */
9b04c5fe 1907 void *map = kmap_atomic(p);
d0107eb0 1908 memcpy(buf, map + offset, length);
9b04c5fe 1909 kunmap_atomic(map);
d0107eb0
KH
1910 } else
1911 memset(buf, 0, length);
1912
1913 addr += length;
1914 buf += length;
1915 copied += length;
1916 count -= length;
1917 }
1918 return copied;
1919}
1920
1921static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1922{
1923 struct page *p;
1924 int copied = 0;
1925
1926 while (count) {
1927 unsigned long offset, length;
1928
1929 offset = (unsigned long)addr & ~PAGE_MASK;
1930 length = PAGE_SIZE - offset;
1931 if (length > count)
1932 length = count;
1933 p = vmalloc_to_page(addr);
1934 /*
1935 * To do safe access to this _mapped_ area, we need
1936 * lock. But adding lock here means that we need to add
1937 * overhead of vmalloc()/vfree() calles for this _debug_
1938 * interface, rarely used. Instead of that, we'll use
1939 * kmap() and get small overhead in this access function.
1940 */
1941 if (p) {
1942 /*
1943 * we can expect USER0 is not used (see vread/vwrite's
1944 * function description)
1945 */
9b04c5fe 1946 void *map = kmap_atomic(p);
d0107eb0 1947 memcpy(map + offset, buf, length);
9b04c5fe 1948 kunmap_atomic(map);
d0107eb0
KH
1949 }
1950 addr += length;
1951 buf += length;
1952 copied += length;
1953 count -= length;
1954 }
1955 return copied;
1956}
1957
1958/**
1959 * vread() - read vmalloc area in a safe way.
1960 * @buf: buffer for reading data
1961 * @addr: vm address.
1962 * @count: number of bytes to be read.
1963 *
1964 * Returns # of bytes which addr and buf should be increased.
1965 * (same number to @count). Returns 0 if [addr...addr+count) doesn't
1966 * includes any intersect with alive vmalloc area.
1967 *
1968 * This function checks that addr is a valid vmalloc'ed area, and
1969 * copy data from that area to a given buffer. If the given memory range
1970 * of [addr...addr+count) includes some valid address, data is copied to
1971 * proper area of @buf. If there are memory holes, they'll be zero-filled.
1972 * IOREMAP area is treated as memory hole and no copy is done.
1973 *
1974 * If [addr...addr+count) doesn't includes any intersects with alive
a8e5202d 1975 * vm_struct area, returns 0. @buf should be kernel's buffer.
d0107eb0
KH
1976 *
1977 * Note: In usual ops, vread() is never necessary because the caller
1978 * should know vmalloc() area is valid and can use memcpy().
1979 * This is for routines which have to access vmalloc area without
1980 * any informaion, as /dev/kmem.
1981 *
1982 */
1983
1da177e4
LT
1984long vread(char *buf, char *addr, unsigned long count)
1985{
e81ce85f
JK
1986 struct vmap_area *va;
1987 struct vm_struct *vm;
1da177e4 1988 char *vaddr, *buf_start = buf;
d0107eb0 1989 unsigned long buflen = count;
1da177e4
LT
1990 unsigned long n;
1991
1992 /* Don't allow overflow */
1993 if ((unsigned long) addr + count < count)
1994 count = -(unsigned long) addr;
1995
e81ce85f
JK
1996 spin_lock(&vmap_area_lock);
1997 list_for_each_entry(va, &vmap_area_list, list) {
1998 if (!count)
1999 break;
2000
2001 if (!(va->flags & VM_VM_AREA))
2002 continue;
2003
2004 vm = va->vm;
2005 vaddr = (char *) vm->addr;
762216ab 2006 if (addr >= vaddr + get_vm_area_size(vm))
1da177e4
LT
2007 continue;
2008 while (addr < vaddr) {
2009 if (count == 0)
2010 goto finished;
2011 *buf = '\0';
2012 buf++;
2013 addr++;
2014 count--;
2015 }
762216ab 2016 n = vaddr + get_vm_area_size(vm) - addr;
d0107eb0
KH
2017 if (n > count)
2018 n = count;
e81ce85f 2019 if (!(vm->flags & VM_IOREMAP))
d0107eb0
KH
2020 aligned_vread(buf, addr, n);
2021 else /* IOREMAP area is treated as memory hole */
2022 memset(buf, 0, n);
2023 buf += n;
2024 addr += n;
2025 count -= n;
1da177e4
LT
2026 }
2027finished:
e81ce85f 2028 spin_unlock(&vmap_area_lock);
d0107eb0
KH
2029
2030 if (buf == buf_start)
2031 return 0;
2032 /* zero-fill memory holes */
2033 if (buf != buf_start + buflen)
2034 memset(buf, 0, buflen - (buf - buf_start));
2035
2036 return buflen;
1da177e4
LT
2037}
2038
d0107eb0
KH
2039/**
2040 * vwrite() - write vmalloc area in a safe way.
2041 * @buf: buffer for source data
2042 * @addr: vm address.
2043 * @count: number of bytes to be read.
2044 *
2045 * Returns # of bytes which addr and buf should be incresed.
2046 * (same number to @count).
2047 * If [addr...addr+count) doesn't includes any intersect with valid
2048 * vmalloc area, returns 0.
2049 *
2050 * This function checks that addr is a valid vmalloc'ed area, and
2051 * copy data from a buffer to the given addr. If specified range of
2052 * [addr...addr+count) includes some valid address, data is copied from
2053 * proper area of @buf. If there are memory holes, no copy to hole.
2054 * IOREMAP area is treated as memory hole and no copy is done.
2055 *
2056 * If [addr...addr+count) doesn't includes any intersects with alive
a8e5202d 2057 * vm_struct area, returns 0. @buf should be kernel's buffer.
d0107eb0
KH
2058 *
2059 * Note: In usual ops, vwrite() is never necessary because the caller
2060 * should know vmalloc() area is valid and can use memcpy().
2061 * This is for routines which have to access vmalloc area without
2062 * any informaion, as /dev/kmem.
d0107eb0
KH
2063 */
2064
1da177e4
LT
2065long vwrite(char *buf, char *addr, unsigned long count)
2066{
e81ce85f
JK
2067 struct vmap_area *va;
2068 struct vm_struct *vm;
d0107eb0
KH
2069 char *vaddr;
2070 unsigned long n, buflen;
2071 int copied = 0;
1da177e4
LT
2072
2073 /* Don't allow overflow */
2074 if ((unsigned long) addr + count < count)
2075 count = -(unsigned long) addr;
d0107eb0 2076 buflen = count;
1da177e4 2077
e81ce85f
JK
2078 spin_lock(&vmap_area_lock);
2079 list_for_each_entry(va, &vmap_area_list, list) {
2080 if (!count)
2081 break;
2082
2083 if (!(va->flags & VM_VM_AREA))
2084 continue;
2085
2086 vm = va->vm;
2087 vaddr = (char *) vm->addr;
762216ab 2088 if (addr >= vaddr + get_vm_area_size(vm))
1da177e4
LT
2089 continue;
2090 while (addr < vaddr) {
2091 if (count == 0)
2092 goto finished;
2093 buf++;
2094 addr++;
2095 count--;
2096 }
762216ab 2097 n = vaddr + get_vm_area_size(vm) - addr;
d0107eb0
KH
2098 if (n > count)
2099 n = count;
e81ce85f 2100 if (!(vm->flags & VM_IOREMAP)) {
d0107eb0
KH
2101 aligned_vwrite(buf, addr, n);
2102 copied++;
2103 }
2104 buf += n;
2105 addr += n;
2106 count -= n;
1da177e4
LT
2107 }
2108finished:
e81ce85f 2109 spin_unlock(&vmap_area_lock);
d0107eb0
KH
2110 if (!copied)
2111 return 0;
2112 return buflen;
1da177e4 2113}
83342314
NP
2114
2115/**
e69e9d4a
HD
2116 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2117 * @vma: vma to cover
2118 * @uaddr: target user address to start at
2119 * @kaddr: virtual address of vmalloc kernel memory
2120 * @size: size of map area
7682486b
RD
2121 *
2122 * Returns: 0 for success, -Exxx on failure
83342314 2123 *
e69e9d4a
HD
2124 * This function checks that @kaddr is a valid vmalloc'ed area,
2125 * and that it is big enough to cover the range starting at
2126 * @uaddr in @vma. Will return failure if that criteria isn't
2127 * met.
83342314 2128 *
72fd4a35 2129 * Similar to remap_pfn_range() (see mm/memory.c)
83342314 2130 */
e69e9d4a
HD
2131int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2132 void *kaddr, unsigned long size)
83342314
NP
2133{
2134 struct vm_struct *area;
83342314 2135
e69e9d4a
HD
2136 size = PAGE_ALIGN(size);
2137
2138 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
83342314
NP
2139 return -EINVAL;
2140
e69e9d4a 2141 area = find_vm_area(kaddr);
83342314 2142 if (!area)
db64fe02 2143 return -EINVAL;
83342314
NP
2144
2145 if (!(area->flags & VM_USERMAP))
db64fe02 2146 return -EINVAL;
83342314 2147
e69e9d4a 2148 if (kaddr + size > area->addr + area->size)
db64fe02 2149 return -EINVAL;
83342314 2150
83342314 2151 do {
e69e9d4a 2152 struct page *page = vmalloc_to_page(kaddr);
db64fe02
NP
2153 int ret;
2154
83342314
NP
2155 ret = vm_insert_page(vma, uaddr, page);
2156 if (ret)
2157 return ret;
2158
2159 uaddr += PAGE_SIZE;
e69e9d4a
HD
2160 kaddr += PAGE_SIZE;
2161 size -= PAGE_SIZE;
2162 } while (size > 0);
83342314 2163
314e51b9 2164 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
83342314 2165
db64fe02 2166 return 0;
83342314 2167}
e69e9d4a
HD
2168EXPORT_SYMBOL(remap_vmalloc_range_partial);
2169
2170/**
2171 * remap_vmalloc_range - map vmalloc pages to userspace
2172 * @vma: vma to cover (map full range of vma)
2173 * @addr: vmalloc memory
2174 * @pgoff: number of pages into addr before first page to map
2175 *
2176 * Returns: 0 for success, -Exxx on failure
2177 *
2178 * This function checks that addr is a valid vmalloc'ed area, and
2179 * that it is big enough to cover the vma. Will return failure if
2180 * that criteria isn't met.
2181 *
2182 * Similar to remap_pfn_range() (see mm/memory.c)
2183 */
2184int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2185 unsigned long pgoff)
2186{
2187 return remap_vmalloc_range_partial(vma, vma->vm_start,
2188 addr + (pgoff << PAGE_SHIFT),
2189 vma->vm_end - vma->vm_start);
2190}
83342314
NP
2191EXPORT_SYMBOL(remap_vmalloc_range);
2192
1eeb66a1
CH
2193/*
2194 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
2195 * have one.
2196 */
3b32123d 2197void __weak vmalloc_sync_all(void)
1eeb66a1
CH
2198{
2199}
5f4352fb
JF
2200
2201
2f569afd 2202static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
5f4352fb 2203{
cd12909c
DV
2204 pte_t ***p = data;
2205
2206 if (p) {
2207 *(*p) = pte;
2208 (*p)++;
2209 }
5f4352fb
JF
2210 return 0;
2211}
2212
2213/**
2214 * alloc_vm_area - allocate a range of kernel address space
2215 * @size: size of the area
cd12909c 2216 * @ptes: returns the PTEs for the address space
7682486b
RD
2217 *
2218 * Returns: NULL on failure, vm_struct on success
5f4352fb
JF
2219 *
2220 * This function reserves a range of kernel address space, and
2221 * allocates pagetables to map that range. No actual mappings
cd12909c
DV
2222 * are created.
2223 *
2224 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2225 * allocated for the VM area are returned.
5f4352fb 2226 */
cd12909c 2227struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
5f4352fb
JF
2228{
2229 struct vm_struct *area;
2230
23016969
CL
2231 area = get_vm_area_caller(size, VM_IOREMAP,
2232 __builtin_return_address(0));
5f4352fb
JF
2233 if (area == NULL)
2234 return NULL;
2235
2236 /*
2237 * This ensures that page tables are constructed for this region
2238 * of kernel virtual address space and mapped into init_mm.
2239 */
2240 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
cd12909c 2241 size, f, ptes ? &ptes : NULL)) {
5f4352fb
JF
2242 free_vm_area(area);
2243 return NULL;
2244 }
2245
5f4352fb
JF
2246 return area;
2247}
2248EXPORT_SYMBOL_GPL(alloc_vm_area);
2249
2250void free_vm_area(struct vm_struct *area)
2251{
2252 struct vm_struct *ret;
2253 ret = remove_vm_area(area->addr);
2254 BUG_ON(ret != area);
2255 kfree(area);
2256}
2257EXPORT_SYMBOL_GPL(free_vm_area);
a10aa579 2258
4f8b02b4 2259#ifdef CONFIG_SMP
ca23e405
TH
2260static struct vmap_area *node_to_va(struct rb_node *n)
2261{
2262 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
2263}
2264
2265/**
2266 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
2267 * @end: target address
2268 * @pnext: out arg for the next vmap_area
2269 * @pprev: out arg for the previous vmap_area
2270 *
2271 * Returns: %true if either or both of next and prev are found,
2272 * %false if no vmap_area exists
2273 *
2274 * Find vmap_areas end addresses of which enclose @end. ie. if not
2275 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
2276 */
2277static bool pvm_find_next_prev(unsigned long end,
2278 struct vmap_area **pnext,
2279 struct vmap_area **pprev)
2280{
2281 struct rb_node *n = vmap_area_root.rb_node;
2282 struct vmap_area *va = NULL;
2283
2284 while (n) {
2285 va = rb_entry(n, struct vmap_area, rb_node);
2286 if (end < va->va_end)
2287 n = n->rb_left;
2288 else if (end > va->va_end)
2289 n = n->rb_right;
2290 else
2291 break;
2292 }
2293
2294 if (!va)
2295 return false;
2296
2297 if (va->va_end > end) {
2298 *pnext = va;
2299 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2300 } else {
2301 *pprev = va;
2302 *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2303 }
2304 return true;
2305}
2306
2307/**
2308 * pvm_determine_end - find the highest aligned address between two vmap_areas
2309 * @pnext: in/out arg for the next vmap_area
2310 * @pprev: in/out arg for the previous vmap_area
2311 * @align: alignment
2312 *
2313 * Returns: determined end address
2314 *
2315 * Find the highest aligned address between *@pnext and *@pprev below
2316 * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned
2317 * down address is between the end addresses of the two vmap_areas.
2318 *
2319 * Please note that the address returned by this function may fall
2320 * inside *@pnext vmap_area. The caller is responsible for checking
2321 * that.
2322 */
2323static unsigned long pvm_determine_end(struct vmap_area **pnext,
2324 struct vmap_area **pprev,
2325 unsigned long align)
2326{
2327 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2328 unsigned long addr;
2329
2330 if (*pnext)
2331 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2332 else
2333 addr = vmalloc_end;
2334
2335 while (*pprev && (*pprev)->va_end > addr) {
2336 *pnext = *pprev;
2337 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2338 }
2339
2340 return addr;
2341}
2342
2343/**
2344 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
2345 * @offsets: array containing offset of each area
2346 * @sizes: array containing size of each area
2347 * @nr_vms: the number of areas to allocate
2348 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
ca23e405
TH
2349 *
2350 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
2351 * vm_structs on success, %NULL on failure
2352 *
2353 * Percpu allocator wants to use congruent vm areas so that it can
2354 * maintain the offsets among percpu areas. This function allocates
ec3f64fc
DR
2355 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
2356 * be scattered pretty far, distance between two areas easily going up
2357 * to gigabytes. To avoid interacting with regular vmallocs, these
2358 * areas are allocated from top.
ca23e405
TH
2359 *
2360 * Despite its complicated look, this allocator is rather simple. It
2361 * does everything top-down and scans areas from the end looking for
2362 * matching slot. While scanning, if any of the areas overlaps with
2363 * existing vmap_area, the base address is pulled down to fit the
2364 * area. Scanning is repeated till all the areas fit and then all
2365 * necessary data structres are inserted and the result is returned.
2366 */
2367struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2368 const size_t *sizes, int nr_vms,
ec3f64fc 2369 size_t align)
ca23e405
TH
2370{
2371 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2372 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2373 struct vmap_area **vas, *prev, *next;
2374 struct vm_struct **vms;
2375 int area, area2, last_area, term_area;
2376 unsigned long base, start, end, last_end;
2377 bool purged = false;
2378
ca23e405
TH
2379 /* verify parameters and allocate data structures */
2380 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
2381 for (last_area = 0, area = 0; area < nr_vms; area++) {
2382 start = offsets[area];
2383 end = start + sizes[area];
2384
2385 /* is everything aligned properly? */
2386 BUG_ON(!IS_ALIGNED(offsets[area], align));
2387 BUG_ON(!IS_ALIGNED(sizes[area], align));
2388
2389 /* detect the area with the highest address */
2390 if (start > offsets[last_area])
2391 last_area = area;
2392
2393 for (area2 = 0; area2 < nr_vms; area2++) {
2394 unsigned long start2 = offsets[area2];
2395 unsigned long end2 = start2 + sizes[area2];
2396
2397 if (area2 == area)
2398 continue;
2399
2400 BUG_ON(start2 >= start && start2 < end);
2401 BUG_ON(end2 <= end && end2 > start);
2402 }
2403 }
2404 last_end = offsets[last_area] + sizes[last_area];
2405
2406 if (vmalloc_end - vmalloc_start < last_end) {
2407 WARN_ON(true);
2408 return NULL;
2409 }
2410
4d67d860
TM
2411 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2412 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
ca23e405 2413 if (!vas || !vms)
f1db7afd 2414 goto err_free2;
ca23e405
TH
2415
2416 for (area = 0; area < nr_vms; area++) {
ec3f64fc
DR
2417 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2418 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
ca23e405
TH
2419 if (!vas[area] || !vms[area])
2420 goto err_free;
2421 }
2422retry:
2423 spin_lock(&vmap_area_lock);
2424
2425 /* start scanning - we scan from the top, begin with the last area */
2426 area = term_area = last_area;
2427 start = offsets[area];
2428 end = start + sizes[area];
2429
2430 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2431 base = vmalloc_end - last_end;
2432 goto found;
2433 }
2434 base = pvm_determine_end(&next, &prev, align) - end;
2435
2436 while (true) {
2437 BUG_ON(next && next->va_end <= base + end);
2438 BUG_ON(prev && prev->va_end > base + end);
2439
2440 /*
2441 * base might have underflowed, add last_end before
2442 * comparing.
2443 */
2444 if (base + last_end < vmalloc_start + last_end) {
2445 spin_unlock(&vmap_area_lock);
2446 if (!purged) {
2447 purge_vmap_area_lazy();
2448 purged = true;
2449 goto retry;
2450 }
2451 goto err_free;
2452 }
2453
2454 /*
2455 * If next overlaps, move base downwards so that it's
2456 * right below next and then recheck.
2457 */
2458 if (next && next->va_start < base + end) {
2459 base = pvm_determine_end(&next, &prev, align) - end;
2460 term_area = area;
2461 continue;
2462 }
2463
2464 /*
2465 * If prev overlaps, shift down next and prev and move
2466 * base so that it's right below new next and then
2467 * recheck.
2468 */
2469 if (prev && prev->va_end > base + start) {
2470 next = prev;
2471 prev = node_to_va(rb_prev(&next->rb_node));
2472 base = pvm_determine_end(&next, &prev, align) - end;
2473 term_area = area;
2474 continue;
2475 }
2476
2477 /*
2478 * This area fits, move on to the previous one. If
2479 * the previous one is the terminal one, we're done.
2480 */
2481 area = (area + nr_vms - 1) % nr_vms;
2482 if (area == term_area)
2483 break;
2484 start = offsets[area];
2485 end = start + sizes[area];
2486 pvm_find_next_prev(base + end, &next, &prev);
2487 }
2488found:
2489 /* we've found a fitting base, insert all va's */
2490 for (area = 0; area < nr_vms; area++) {
2491 struct vmap_area *va = vas[area];
2492
2493 va->va_start = base + offsets[area];
2494 va->va_end = va->va_start + sizes[area];
2495 __insert_vmap_area(va);
2496 }
2497
2498 vmap_area_pcpu_hole = base + offsets[last_area];
2499
2500 spin_unlock(&vmap_area_lock);
2501
2502 /* insert all vm's */
2503 for (area = 0; area < nr_vms; area++)
3645cb4a
ZY
2504 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2505 pcpu_get_vm_areas);
ca23e405
TH
2506
2507 kfree(vas);
2508 return vms;
2509
2510err_free:
2511 for (area = 0; area < nr_vms; area++) {
f1db7afd
KC
2512 kfree(vas[area]);
2513 kfree(vms[area]);
ca23e405 2514 }
f1db7afd 2515err_free2:
ca23e405
TH
2516 kfree(vas);
2517 kfree(vms);
2518 return NULL;
2519}
2520
2521/**
2522 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
2523 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
2524 * @nr_vms: the number of allocated areas
2525 *
2526 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
2527 */
2528void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2529{
2530 int i;
2531
2532 for (i = 0; i < nr_vms; i++)
2533 free_vm_area(vms[i]);
2534 kfree(vms);
2535}
4f8b02b4 2536#endif /* CONFIG_SMP */
a10aa579
CL
2537
2538#ifdef CONFIG_PROC_FS
2539static void *s_start(struct seq_file *m, loff_t *pos)
d4033afd 2540 __acquires(&vmap_area_lock)
a10aa579
CL
2541{
2542 loff_t n = *pos;
d4033afd 2543 struct vmap_area *va;
a10aa579 2544
d4033afd
JK
2545 spin_lock(&vmap_area_lock);
2546 va = list_entry((&vmap_area_list)->next, typeof(*va), list);
2547 while (n > 0 && &va->list != &vmap_area_list) {
a10aa579 2548 n--;
d4033afd 2549 va = list_entry(va->list.next, typeof(*va), list);
a10aa579 2550 }
d4033afd
JK
2551 if (!n && &va->list != &vmap_area_list)
2552 return va;
a10aa579
CL
2553
2554 return NULL;
2555
2556}
2557
2558static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2559{
d4033afd 2560 struct vmap_area *va = p, *next;
a10aa579
CL
2561
2562 ++*pos;
d4033afd
JK
2563 next = list_entry(va->list.next, typeof(*va), list);
2564 if (&next->list != &vmap_area_list)
2565 return next;
2566
2567 return NULL;
a10aa579
CL
2568}
2569
2570static void s_stop(struct seq_file *m, void *p)
d4033afd 2571 __releases(&vmap_area_lock)
a10aa579 2572{
d4033afd 2573 spin_unlock(&vmap_area_lock);
a10aa579
CL
2574}
2575
a47a126a
ED
2576static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2577{
e5adfffc 2578 if (IS_ENABLED(CONFIG_NUMA)) {
a47a126a
ED
2579 unsigned int nr, *counters = m->private;
2580
2581 if (!counters)
2582 return;
2583
af12346c
WL
2584 if (v->flags & VM_UNINITIALIZED)
2585 return;
7e5b528b
DV
2586 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
2587 smp_rmb();
af12346c 2588
a47a126a
ED
2589 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2590
2591 for (nr = 0; nr < v->nr_pages; nr++)
2592 counters[page_to_nid(v->pages[nr])]++;
2593
2594 for_each_node_state(nr, N_HIGH_MEMORY)
2595 if (counters[nr])
2596 seq_printf(m, " N%u=%u", nr, counters[nr]);
2597 }
2598}
2599
a10aa579
CL
2600static int s_show(struct seq_file *m, void *p)
2601{
d4033afd
JK
2602 struct vmap_area *va = p;
2603 struct vm_struct *v;
2604
c2ce8c14
WL
2605 /*
2606 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
2607 * behalf of vmap area is being tear down or vm_map_ram allocation.
2608 */
2609 if (!(va->flags & VM_VM_AREA))
d4033afd 2610 return 0;
d4033afd
JK
2611
2612 v = va->vm;
a10aa579 2613
45ec1690 2614 seq_printf(m, "0x%pK-0x%pK %7ld",
a10aa579
CL
2615 v->addr, v->addr + v->size, v->size);
2616
62c70bce
JP
2617 if (v->caller)
2618 seq_printf(m, " %pS", v->caller);
23016969 2619
a10aa579
CL
2620 if (v->nr_pages)
2621 seq_printf(m, " pages=%d", v->nr_pages);
2622
2623 if (v->phys_addr)
ffa71f33 2624 seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
a10aa579
CL
2625
2626 if (v->flags & VM_IOREMAP)
f4527c90 2627 seq_puts(m, " ioremap");
a10aa579
CL
2628
2629 if (v->flags & VM_ALLOC)
f4527c90 2630 seq_puts(m, " vmalloc");
a10aa579
CL
2631
2632 if (v->flags & VM_MAP)
f4527c90 2633 seq_puts(m, " vmap");
a10aa579
CL
2634
2635 if (v->flags & VM_USERMAP)
f4527c90 2636 seq_puts(m, " user");
a10aa579
CL
2637
2638 if (v->flags & VM_VPAGES)
f4527c90 2639 seq_puts(m, " vpages");
a10aa579 2640
a47a126a 2641 show_numa_info(m, v);
a10aa579
CL
2642 seq_putc(m, '\n');
2643 return 0;
2644}
2645
5f6a6a9c 2646static const struct seq_operations vmalloc_op = {
a10aa579
CL
2647 .start = s_start,
2648 .next = s_next,
2649 .stop = s_stop,
2650 .show = s_show,
2651};
5f6a6a9c
AD
2652
2653static int vmalloc_open(struct inode *inode, struct file *file)
2654{
703394c1
RJ
2655 if (IS_ENABLED(CONFIG_NUMA))
2656 return seq_open_private(file, &vmalloc_op,
2657 nr_node_ids * sizeof(unsigned int));
2658 else
2659 return seq_open(file, &vmalloc_op);
5f6a6a9c
AD
2660}
2661
2662static const struct file_operations proc_vmalloc_operations = {
2663 .open = vmalloc_open,
2664 .read = seq_read,
2665 .llseek = seq_lseek,
2666 .release = seq_release_private,
2667};
2668
2669static int __init proc_vmalloc_init(void)
2670{
2671 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
2672 return 0;
2673}
2674module_init(proc_vmalloc_init);
db3808c1
JK
2675
2676void get_vmalloc_info(struct vmalloc_info *vmi)
2677{
f98782dd 2678 struct vmap_area *va;
db3808c1
JK
2679 unsigned long free_area_size;
2680 unsigned long prev_end;
2681
2682 vmi->used = 0;
f98782dd 2683 vmi->largest_chunk = 0;
db3808c1 2684
f98782dd 2685 prev_end = VMALLOC_START;
db3808c1 2686
474750ab 2687 rcu_read_lock();
db3808c1 2688
f98782dd
JK
2689 if (list_empty(&vmap_area_list)) {
2690 vmi->largest_chunk = VMALLOC_TOTAL;
2691 goto out;
2692 }
db3808c1 2693
474750ab 2694 list_for_each_entry_rcu(va, &vmap_area_list, list) {
f98782dd 2695 unsigned long addr = va->va_start;
db3808c1 2696
f98782dd
JK
2697 /*
2698 * Some archs keep another range for modules in vmalloc space
2699 */
2700 if (addr < VMALLOC_START)
2701 continue;
2702 if (addr >= VMALLOC_END)
2703 break;
db3808c1 2704
f98782dd
JK
2705 if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
2706 continue;
db3808c1 2707
f98782dd 2708 vmi->used += (va->va_end - va->va_start);
db3808c1 2709
f98782dd
JK
2710 free_area_size = addr - prev_end;
2711 if (vmi->largest_chunk < free_area_size)
2712 vmi->largest_chunk = free_area_size;
db3808c1 2713
f98782dd 2714 prev_end = va->va_end;
db3808c1 2715 }
f98782dd
JK
2716
2717 if (VMALLOC_END - prev_end > vmi->largest_chunk)
2718 vmi->largest_chunk = VMALLOC_END - prev_end;
2719
2720out:
474750ab 2721 rcu_read_unlock();
db3808c1 2722}
a10aa579
CL
2723#endif
2724