]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/vmalloc.c
Merge tag 'soundwire-5.3-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul...
[thirdparty/linux.git] / mm / vmalloc.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/vmalloc.c
4 *
5 * Copyright (C) 1993 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
8 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
930fc45a 9 * Numa awareness, Christoph Lameter, SGI, June 2005
1da177e4
LT
10 */
11
db64fe02 12#include <linux/vmalloc.h>
1da177e4
LT
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/highmem.h>
c3edc401 16#include <linux/sched/signal.h>
1da177e4
LT
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/interrupt.h>
5f6a6a9c 20#include <linux/proc_fs.h>
a10aa579 21#include <linux/seq_file.h>
868b104d 22#include <linux/set_memory.h>
3ac7fe5a 23#include <linux/debugobjects.h>
23016969 24#include <linux/kallsyms.h>
db64fe02 25#include <linux/list.h>
4da56b99 26#include <linux/notifier.h>
db64fe02
NP
27#include <linux/rbtree.h>
28#include <linux/radix-tree.h>
29#include <linux/rcupdate.h>
f0aa6617 30#include <linux/pfn.h>
89219d37 31#include <linux/kmemleak.h>
60063497 32#include <linux/atomic.h>
3b32123d 33#include <linux/compiler.h>
32fcfd40 34#include <linux/llist.h>
0f616be1 35#include <linux/bitops.h>
68ad4a33 36#include <linux/rbtree_augmented.h>
3b32123d 37
7c0f6ba6 38#include <linux/uaccess.h>
1da177e4 39#include <asm/tlbflush.h>
2dca6999 40#include <asm/shmparam.h>
1da177e4 41
dd56b046
MG
42#include "internal.h"
43
32fcfd40
AV
44struct vfree_deferred {
45 struct llist_head list;
46 struct work_struct wq;
47};
48static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
49
50static void __vunmap(const void *, int);
51
52static void free_work(struct work_struct *w)
53{
54 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
894e58c1
BP
55 struct llist_node *t, *llnode;
56
57 llist_for_each_safe(llnode, t, llist_del_all(&p->list))
58 __vunmap((void *)llnode, 1);
32fcfd40
AV
59}
60
db64fe02 61/*** Page table manipulation functions ***/
b221385b 62
1da177e4
LT
63static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
64{
65 pte_t *pte;
66
67 pte = pte_offset_kernel(pmd, addr);
68 do {
69 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71 } while (pte++, addr += PAGE_SIZE, addr != end);
72}
73
db64fe02 74static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
1da177e4
LT
75{
76 pmd_t *pmd;
77 unsigned long next;
78
79 pmd = pmd_offset(pud, addr);
80 do {
81 next = pmd_addr_end(addr, end);
b9820d8f
TK
82 if (pmd_clear_huge(pmd))
83 continue;
1da177e4
LT
84 if (pmd_none_or_clear_bad(pmd))
85 continue;
86 vunmap_pte_range(pmd, addr, next);
87 } while (pmd++, addr = next, addr != end);
88}
89
c2febafc 90static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
1da177e4
LT
91{
92 pud_t *pud;
93 unsigned long next;
94
c2febafc 95 pud = pud_offset(p4d, addr);
1da177e4
LT
96 do {
97 next = pud_addr_end(addr, end);
b9820d8f
TK
98 if (pud_clear_huge(pud))
99 continue;
1da177e4
LT
100 if (pud_none_or_clear_bad(pud))
101 continue;
102 vunmap_pmd_range(pud, addr, next);
103 } while (pud++, addr = next, addr != end);
104}
105
c2febafc
KS
106static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
107{
108 p4d_t *p4d;
109 unsigned long next;
110
111 p4d = p4d_offset(pgd, addr);
112 do {
113 next = p4d_addr_end(addr, end);
114 if (p4d_clear_huge(p4d))
115 continue;
116 if (p4d_none_or_clear_bad(p4d))
117 continue;
118 vunmap_pud_range(p4d, addr, next);
119 } while (p4d++, addr = next, addr != end);
120}
121
db64fe02 122static void vunmap_page_range(unsigned long addr, unsigned long end)
1da177e4
LT
123{
124 pgd_t *pgd;
125 unsigned long next;
1da177e4
LT
126
127 BUG_ON(addr >= end);
128 pgd = pgd_offset_k(addr);
1da177e4
LT
129 do {
130 next = pgd_addr_end(addr, end);
131 if (pgd_none_or_clear_bad(pgd))
132 continue;
c2febafc 133 vunmap_p4d_range(pgd, addr, next);
1da177e4 134 } while (pgd++, addr = next, addr != end);
1da177e4
LT
135}
136
137static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
db64fe02 138 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
139{
140 pte_t *pte;
141
db64fe02
NP
142 /*
143 * nr is a running index into the array which helps higher level
144 * callers keep track of where we're up to.
145 */
146
872fec16 147 pte = pte_alloc_kernel(pmd, addr);
1da177e4
LT
148 if (!pte)
149 return -ENOMEM;
150 do {
db64fe02
NP
151 struct page *page = pages[*nr];
152
153 if (WARN_ON(!pte_none(*pte)))
154 return -EBUSY;
155 if (WARN_ON(!page))
1da177e4
LT
156 return -ENOMEM;
157 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
db64fe02 158 (*nr)++;
1da177e4
LT
159 } while (pte++, addr += PAGE_SIZE, addr != end);
160 return 0;
161}
162
db64fe02
NP
163static int vmap_pmd_range(pud_t *pud, unsigned long addr,
164 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
165{
166 pmd_t *pmd;
167 unsigned long next;
168
169 pmd = pmd_alloc(&init_mm, pud, addr);
170 if (!pmd)
171 return -ENOMEM;
172 do {
173 next = pmd_addr_end(addr, end);
db64fe02 174 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
1da177e4
LT
175 return -ENOMEM;
176 } while (pmd++, addr = next, addr != end);
177 return 0;
178}
179
c2febafc 180static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
db64fe02 181 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
182{
183 pud_t *pud;
184 unsigned long next;
185
c2febafc 186 pud = pud_alloc(&init_mm, p4d, addr);
1da177e4
LT
187 if (!pud)
188 return -ENOMEM;
189 do {
190 next = pud_addr_end(addr, end);
db64fe02 191 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
1da177e4
LT
192 return -ENOMEM;
193 } while (pud++, addr = next, addr != end);
194 return 0;
195}
196
c2febafc
KS
197static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
198 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
199{
200 p4d_t *p4d;
201 unsigned long next;
202
203 p4d = p4d_alloc(&init_mm, pgd, addr);
204 if (!p4d)
205 return -ENOMEM;
206 do {
207 next = p4d_addr_end(addr, end);
208 if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
209 return -ENOMEM;
210 } while (p4d++, addr = next, addr != end);
211 return 0;
212}
213
db64fe02
NP
214/*
215 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
216 * will have pfns corresponding to the "pages" array.
217 *
218 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
219 */
8fc48985
TH
220static int vmap_page_range_noflush(unsigned long start, unsigned long end,
221 pgprot_t prot, struct page **pages)
1da177e4
LT
222{
223 pgd_t *pgd;
224 unsigned long next;
2e4e27c7 225 unsigned long addr = start;
db64fe02
NP
226 int err = 0;
227 int nr = 0;
1da177e4
LT
228
229 BUG_ON(addr >= end);
230 pgd = pgd_offset_k(addr);
1da177e4
LT
231 do {
232 next = pgd_addr_end(addr, end);
c2febafc 233 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
1da177e4 234 if (err)
bf88c8c8 235 return err;
1da177e4 236 } while (pgd++, addr = next, addr != end);
db64fe02 237
db64fe02 238 return nr;
1da177e4
LT
239}
240
8fc48985
TH
241static int vmap_page_range(unsigned long start, unsigned long end,
242 pgprot_t prot, struct page **pages)
243{
244 int ret;
245
246 ret = vmap_page_range_noflush(start, end, prot, pages);
247 flush_cache_vmap(start, end);
248 return ret;
249}
250
81ac3ad9 251int is_vmalloc_or_module_addr(const void *x)
73bdf0a6
LT
252{
253 /*
ab4f2ee1 254 * ARM, x86-64 and sparc64 put modules in a special place,
73bdf0a6
LT
255 * and fall back on vmalloc() if that fails. Others
256 * just put it in the vmalloc space.
257 */
258#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
259 unsigned long addr = (unsigned long)x;
260 if (addr >= MODULES_VADDR && addr < MODULES_END)
261 return 1;
262#endif
263 return is_vmalloc_addr(x);
264}
265
48667e7a 266/*
add688fb 267 * Walk a vmap address to the struct page it maps.
48667e7a 268 */
add688fb 269struct page *vmalloc_to_page(const void *vmalloc_addr)
48667e7a
CL
270{
271 unsigned long addr = (unsigned long) vmalloc_addr;
add688fb 272 struct page *page = NULL;
48667e7a 273 pgd_t *pgd = pgd_offset_k(addr);
c2febafc
KS
274 p4d_t *p4d;
275 pud_t *pud;
276 pmd_t *pmd;
277 pte_t *ptep, pte;
48667e7a 278
7aa413de
IM
279 /*
280 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
281 * architectures that do not vmalloc module space
282 */
73bdf0a6 283 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
59ea7463 284
c2febafc
KS
285 if (pgd_none(*pgd))
286 return NULL;
287 p4d = p4d_offset(pgd, addr);
288 if (p4d_none(*p4d))
289 return NULL;
290 pud = pud_offset(p4d, addr);
029c54b0
AB
291
292 /*
293 * Don't dereference bad PUD or PMD (below) entries. This will also
294 * identify huge mappings, which we may encounter on architectures
295 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
296 * identified as vmalloc addresses by is_vmalloc_addr(), but are
297 * not [unambiguously] associated with a struct page, so there is
298 * no correct value to return for them.
299 */
300 WARN_ON_ONCE(pud_bad(*pud));
301 if (pud_none(*pud) || pud_bad(*pud))
c2febafc
KS
302 return NULL;
303 pmd = pmd_offset(pud, addr);
029c54b0
AB
304 WARN_ON_ONCE(pmd_bad(*pmd));
305 if (pmd_none(*pmd) || pmd_bad(*pmd))
c2febafc
KS
306 return NULL;
307
308 ptep = pte_offset_map(pmd, addr);
309 pte = *ptep;
310 if (pte_present(pte))
311 page = pte_page(pte);
312 pte_unmap(ptep);
add688fb 313 return page;
48667e7a 314}
add688fb 315EXPORT_SYMBOL(vmalloc_to_page);
48667e7a
CL
316
317/*
add688fb 318 * Map a vmalloc()-space virtual address to the physical page frame number.
48667e7a 319 */
add688fb 320unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
48667e7a 321{
add688fb 322 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
48667e7a 323}
add688fb 324EXPORT_SYMBOL(vmalloc_to_pfn);
48667e7a 325
db64fe02
NP
326
327/*** Global kva allocator ***/
328
bb850f4d 329#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
a6cf4e0f 330#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
bb850f4d 331
78c72746 332#define VM_LAZY_FREE 0x02
db64fe02
NP
333#define VM_VM_AREA 0x04
334
db64fe02 335static DEFINE_SPINLOCK(vmap_area_lock);
f1c4069e
JK
336/* Export for kexec only */
337LIST_HEAD(vmap_area_list);
80c4bd7a 338static LLIST_HEAD(vmap_purge_list);
89699605 339static struct rb_root vmap_area_root = RB_ROOT;
68ad4a33 340static bool vmap_initialized __read_mostly;
89699605 341
68ad4a33
URS
342/*
343 * This kmem_cache is used for vmap_area objects. Instead of
344 * allocating from slab we reuse an object from this cache to
345 * make things faster. Especially in "no edge" splitting of
346 * free block.
347 */
348static struct kmem_cache *vmap_area_cachep;
349
350/*
351 * This linked list is used in pair with free_vmap_area_root.
352 * It gives O(1) access to prev/next to perform fast coalescing.
353 */
354static LIST_HEAD(free_vmap_area_list);
355
356/*
357 * This augment red-black tree represents the free vmap space.
358 * All vmap_area objects in this tree are sorted by va->va_start
359 * address. It is used for allocation and merging when a vmap
360 * object is released.
361 *
362 * Each vmap_area node contains a maximum available free block
363 * of its sub-tree, right or left. Therefore it is possible to
364 * find a lowest match of free area.
365 */
366static struct rb_root free_vmap_area_root = RB_ROOT;
367
82dd23e8
URS
368/*
369 * Preload a CPU with one object for "no edge" split case. The
370 * aim is to get rid of allocations from the atomic context, thus
371 * to use more permissive allocation masks.
372 */
373static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
374
68ad4a33
URS
375static __always_inline unsigned long
376va_size(struct vmap_area *va)
377{
378 return (va->va_end - va->va_start);
379}
380
381static __always_inline unsigned long
382get_subtree_max_size(struct rb_node *node)
383{
384 struct vmap_area *va;
385
386 va = rb_entry_safe(node, struct vmap_area, rb_node);
387 return va ? va->subtree_max_size : 0;
388}
89699605 389
68ad4a33
URS
390/*
391 * Gets called when remove the node and rotate.
392 */
393static __always_inline unsigned long
394compute_subtree_max_size(struct vmap_area *va)
395{
396 return max3(va_size(va),
397 get_subtree_max_size(va->rb_node.rb_left),
398 get_subtree_max_size(va->rb_node.rb_right));
399}
400
401RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb,
402 struct vmap_area, rb_node, unsigned long, subtree_max_size,
403 compute_subtree_max_size)
404
405static void purge_vmap_area_lazy(void);
406static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
407static unsigned long lazy_max_pages(void);
db64fe02 408
97105f0a
RG
409static atomic_long_t nr_vmalloc_pages;
410
411unsigned long vmalloc_nr_pages(void)
412{
413 return atomic_long_read(&nr_vmalloc_pages);
414}
415
db64fe02 416static struct vmap_area *__find_vmap_area(unsigned long addr)
1da177e4 417{
db64fe02
NP
418 struct rb_node *n = vmap_area_root.rb_node;
419
420 while (n) {
421 struct vmap_area *va;
422
423 va = rb_entry(n, struct vmap_area, rb_node);
424 if (addr < va->va_start)
425 n = n->rb_left;
cef2ac3f 426 else if (addr >= va->va_end)
db64fe02
NP
427 n = n->rb_right;
428 else
429 return va;
430 }
431
432 return NULL;
433}
434
68ad4a33
URS
435/*
436 * This function returns back addresses of parent node
437 * and its left or right link for further processing.
438 */
439static __always_inline struct rb_node **
440find_va_links(struct vmap_area *va,
441 struct rb_root *root, struct rb_node *from,
442 struct rb_node **parent)
443{
444 struct vmap_area *tmp_va;
445 struct rb_node **link;
446
447 if (root) {
448 link = &root->rb_node;
449 if (unlikely(!*link)) {
450 *parent = NULL;
451 return link;
452 }
453 } else {
454 link = &from;
455 }
db64fe02 456
68ad4a33
URS
457 /*
458 * Go to the bottom of the tree. When we hit the last point
459 * we end up with parent rb_node and correct direction, i name
460 * it link, where the new va->rb_node will be attached to.
461 */
462 do {
463 tmp_va = rb_entry(*link, struct vmap_area, rb_node);
db64fe02 464
68ad4a33
URS
465 /*
466 * During the traversal we also do some sanity check.
467 * Trigger the BUG() if there are sides(left/right)
468 * or full overlaps.
469 */
470 if (va->va_start < tmp_va->va_end &&
471 va->va_end <= tmp_va->va_start)
472 link = &(*link)->rb_left;
473 else if (va->va_end > tmp_va->va_start &&
474 va->va_start >= tmp_va->va_end)
475 link = &(*link)->rb_right;
db64fe02
NP
476 else
477 BUG();
68ad4a33
URS
478 } while (*link);
479
480 *parent = &tmp_va->rb_node;
481 return link;
482}
483
484static __always_inline struct list_head *
485get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
486{
487 struct list_head *list;
488
489 if (unlikely(!parent))
490 /*
491 * The red-black tree where we try to find VA neighbors
492 * before merging or inserting is empty, i.e. it means
493 * there is no free vmap space. Normally it does not
494 * happen but we handle this case anyway.
495 */
496 return NULL;
497
498 list = &rb_entry(parent, struct vmap_area, rb_node)->list;
499 return (&parent->rb_right == link ? list->next : list);
500}
501
502static __always_inline void
503link_va(struct vmap_area *va, struct rb_root *root,
504 struct rb_node *parent, struct rb_node **link, struct list_head *head)
505{
506 /*
507 * VA is still not in the list, but we can
508 * identify its future previous list_head node.
509 */
510 if (likely(parent)) {
511 head = &rb_entry(parent, struct vmap_area, rb_node)->list;
512 if (&parent->rb_right != link)
513 head = head->prev;
db64fe02
NP
514 }
515
68ad4a33
URS
516 /* Insert to the rb-tree */
517 rb_link_node(&va->rb_node, parent, link);
518 if (root == &free_vmap_area_root) {
519 /*
520 * Some explanation here. Just perform simple insertion
521 * to the tree. We do not set va->subtree_max_size to
522 * its current size before calling rb_insert_augmented().
523 * It is because of we populate the tree from the bottom
524 * to parent levels when the node _is_ in the tree.
525 *
526 * Therefore we set subtree_max_size to zero after insertion,
527 * to let __augment_tree_propagate_from() puts everything to
528 * the correct order later on.
529 */
530 rb_insert_augmented(&va->rb_node,
531 root, &free_vmap_area_rb_augment_cb);
532 va->subtree_max_size = 0;
533 } else {
534 rb_insert_color(&va->rb_node, root);
535 }
db64fe02 536
68ad4a33
URS
537 /* Address-sort this list */
538 list_add(&va->list, head);
db64fe02
NP
539}
540
68ad4a33
URS
541static __always_inline void
542unlink_va(struct vmap_area *va, struct rb_root *root)
543{
460e42d1
URS
544 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
545 return;
db64fe02 546
460e42d1
URS
547 if (root == &free_vmap_area_root)
548 rb_erase_augmented(&va->rb_node,
549 root, &free_vmap_area_rb_augment_cb);
550 else
551 rb_erase(&va->rb_node, root);
552
553 list_del(&va->list);
554 RB_CLEAR_NODE(&va->rb_node);
68ad4a33
URS
555}
556
bb850f4d
URS
557#if DEBUG_AUGMENT_PROPAGATE_CHECK
558static void
559augment_tree_propagate_check(struct rb_node *n)
560{
561 struct vmap_area *va;
562 struct rb_node *node;
563 unsigned long size;
564 bool found = false;
565
566 if (n == NULL)
567 return;
568
569 va = rb_entry(n, struct vmap_area, rb_node);
570 size = va->subtree_max_size;
571 node = n;
572
573 while (node) {
574 va = rb_entry(node, struct vmap_area, rb_node);
575
576 if (get_subtree_max_size(node->rb_left) == size) {
577 node = node->rb_left;
578 } else {
579 if (va_size(va) == size) {
580 found = true;
581 break;
582 }
583
584 node = node->rb_right;
585 }
586 }
587
588 if (!found) {
589 va = rb_entry(n, struct vmap_area, rb_node);
590 pr_emerg("tree is corrupted: %lu, %lu\n",
591 va_size(va), va->subtree_max_size);
592 }
593
594 augment_tree_propagate_check(n->rb_left);
595 augment_tree_propagate_check(n->rb_right);
596}
597#endif
598
68ad4a33
URS
599/*
600 * This function populates subtree_max_size from bottom to upper
601 * levels starting from VA point. The propagation must be done
602 * when VA size is modified by changing its va_start/va_end. Or
603 * in case of newly inserting of VA to the tree.
604 *
605 * It means that __augment_tree_propagate_from() must be called:
606 * - After VA has been inserted to the tree(free path);
607 * - After VA has been shrunk(allocation path);
608 * - After VA has been increased(merging path).
609 *
610 * Please note that, it does not mean that upper parent nodes
611 * and their subtree_max_size are recalculated all the time up
612 * to the root node.
613 *
614 * 4--8
615 * /\
616 * / \
617 * / \
618 * 2--2 8--8
619 *
620 * For example if we modify the node 4, shrinking it to 2, then
621 * no any modification is required. If we shrink the node 2 to 1
622 * its subtree_max_size is updated only, and set to 1. If we shrink
623 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
624 * node becomes 4--6.
625 */
626static __always_inline void
627augment_tree_propagate_from(struct vmap_area *va)
628{
629 struct rb_node *node = &va->rb_node;
630 unsigned long new_va_sub_max_size;
631
632 while (node) {
633 va = rb_entry(node, struct vmap_area, rb_node);
634 new_va_sub_max_size = compute_subtree_max_size(va);
635
636 /*
637 * If the newly calculated maximum available size of the
638 * subtree is equal to the current one, then it means that
639 * the tree is propagated correctly. So we have to stop at
640 * this point to save cycles.
641 */
642 if (va->subtree_max_size == new_va_sub_max_size)
643 break;
644
645 va->subtree_max_size = new_va_sub_max_size;
646 node = rb_parent(&va->rb_node);
647 }
bb850f4d
URS
648
649#if DEBUG_AUGMENT_PROPAGATE_CHECK
650 augment_tree_propagate_check(free_vmap_area_root.rb_node);
651#endif
68ad4a33
URS
652}
653
654static void
655insert_vmap_area(struct vmap_area *va,
656 struct rb_root *root, struct list_head *head)
657{
658 struct rb_node **link;
659 struct rb_node *parent;
660
661 link = find_va_links(va, root, NULL, &parent);
662 link_va(va, root, parent, link, head);
663}
664
665static void
666insert_vmap_area_augment(struct vmap_area *va,
667 struct rb_node *from, struct rb_root *root,
668 struct list_head *head)
669{
670 struct rb_node **link;
671 struct rb_node *parent;
672
673 if (from)
674 link = find_va_links(va, NULL, from, &parent);
675 else
676 link = find_va_links(va, root, NULL, &parent);
677
678 link_va(va, root, parent, link, head);
679 augment_tree_propagate_from(va);
680}
681
682/*
683 * Merge de-allocated chunk of VA memory with previous
684 * and next free blocks. If coalesce is not done a new
685 * free area is inserted. If VA has been merged, it is
686 * freed.
687 */
688static __always_inline void
689merge_or_add_vmap_area(struct vmap_area *va,
690 struct rb_root *root, struct list_head *head)
691{
692 struct vmap_area *sibling;
693 struct list_head *next;
694 struct rb_node **link;
695 struct rb_node *parent;
696 bool merged = false;
697
698 /*
699 * Find a place in the tree where VA potentially will be
700 * inserted, unless it is merged with its sibling/siblings.
701 */
702 link = find_va_links(va, root, NULL, &parent);
703
704 /*
705 * Get next node of VA to check if merging can be done.
706 */
707 next = get_va_next_sibling(parent, link);
708 if (unlikely(next == NULL))
709 goto insert;
710
711 /*
712 * start end
713 * | |
714 * |<------VA------>|<-----Next----->|
715 * | |
716 * start end
717 */
718 if (next != head) {
719 sibling = list_entry(next, struct vmap_area, list);
720 if (sibling->va_start == va->va_end) {
721 sibling->va_start = va->va_start;
722
723 /* Check and update the tree if needed. */
724 augment_tree_propagate_from(sibling);
725
68ad4a33
URS
726 /* Free vmap_area object. */
727 kmem_cache_free(vmap_area_cachep, va);
728
729 /* Point to the new merged area. */
730 va = sibling;
731 merged = true;
732 }
733 }
734
735 /*
736 * start end
737 * | |
738 * |<-----Prev----->|<------VA------>|
739 * | |
740 * start end
741 */
742 if (next->prev != head) {
743 sibling = list_entry(next->prev, struct vmap_area, list);
744 if (sibling->va_end == va->va_start) {
745 sibling->va_end = va->va_end;
746
747 /* Check and update the tree if needed. */
748 augment_tree_propagate_from(sibling);
749
54f63d9d
URS
750 if (merged)
751 unlink_va(va, root);
68ad4a33
URS
752
753 /* Free vmap_area object. */
754 kmem_cache_free(vmap_area_cachep, va);
68ad4a33
URS
755 return;
756 }
757 }
758
759insert:
760 if (!merged) {
761 link_va(va, root, parent, link, head);
762 augment_tree_propagate_from(va);
763 }
764}
765
766static __always_inline bool
767is_within_this_va(struct vmap_area *va, unsigned long size,
768 unsigned long align, unsigned long vstart)
769{
770 unsigned long nva_start_addr;
771
772 if (va->va_start > vstart)
773 nva_start_addr = ALIGN(va->va_start, align);
774 else
775 nva_start_addr = ALIGN(vstart, align);
776
777 /* Can be overflowed due to big size or alignment. */
778 if (nva_start_addr + size < nva_start_addr ||
779 nva_start_addr < vstart)
780 return false;
781
782 return (nva_start_addr + size <= va->va_end);
783}
784
785/*
786 * Find the first free block(lowest start address) in the tree,
787 * that will accomplish the request corresponding to passing
788 * parameters.
789 */
790static __always_inline struct vmap_area *
791find_vmap_lowest_match(unsigned long size,
792 unsigned long align, unsigned long vstart)
793{
794 struct vmap_area *va;
795 struct rb_node *node;
796 unsigned long length;
797
798 /* Start from the root. */
799 node = free_vmap_area_root.rb_node;
800
801 /* Adjust the search size for alignment overhead. */
802 length = size + align - 1;
803
804 while (node) {
805 va = rb_entry(node, struct vmap_area, rb_node);
806
807 if (get_subtree_max_size(node->rb_left) >= length &&
808 vstart < va->va_start) {
809 node = node->rb_left;
810 } else {
811 if (is_within_this_va(va, size, align, vstart))
812 return va;
813
814 /*
815 * Does not make sense to go deeper towards the right
816 * sub-tree if it does not have a free block that is
817 * equal or bigger to the requested search length.
818 */
819 if (get_subtree_max_size(node->rb_right) >= length) {
820 node = node->rb_right;
821 continue;
822 }
823
824 /*
3806b041 825 * OK. We roll back and find the first right sub-tree,
68ad4a33
URS
826 * that will satisfy the search criteria. It can happen
827 * only once due to "vstart" restriction.
828 */
829 while ((node = rb_parent(node))) {
830 va = rb_entry(node, struct vmap_area, rb_node);
831 if (is_within_this_va(va, size, align, vstart))
832 return va;
833
834 if (get_subtree_max_size(node->rb_right) >= length &&
835 vstart <= va->va_start) {
836 node = node->rb_right;
837 break;
838 }
839 }
840 }
841 }
842
843 return NULL;
844}
845
a6cf4e0f
URS
846#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
847#include <linux/random.h>
848
849static struct vmap_area *
850find_vmap_lowest_linear_match(unsigned long size,
851 unsigned long align, unsigned long vstart)
852{
853 struct vmap_area *va;
854
855 list_for_each_entry(va, &free_vmap_area_list, list) {
856 if (!is_within_this_va(va, size, align, vstart))
857 continue;
858
859 return va;
860 }
861
862 return NULL;
863}
864
865static void
866find_vmap_lowest_match_check(unsigned long size)
867{
868 struct vmap_area *va_1, *va_2;
869 unsigned long vstart;
870 unsigned int rnd;
871
872 get_random_bytes(&rnd, sizeof(rnd));
873 vstart = VMALLOC_START + rnd;
874
875 va_1 = find_vmap_lowest_match(size, 1, vstart);
876 va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
877
878 if (va_1 != va_2)
879 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
880 va_1, va_2, vstart);
881}
882#endif
883
68ad4a33
URS
884enum fit_type {
885 NOTHING_FIT = 0,
886 FL_FIT_TYPE = 1, /* full fit */
887 LE_FIT_TYPE = 2, /* left edge fit */
888 RE_FIT_TYPE = 3, /* right edge fit */
889 NE_FIT_TYPE = 4 /* no edge fit */
890};
891
892static __always_inline enum fit_type
893classify_va_fit_type(struct vmap_area *va,
894 unsigned long nva_start_addr, unsigned long size)
895{
896 enum fit_type type;
897
898 /* Check if it is within VA. */
899 if (nva_start_addr < va->va_start ||
900 nva_start_addr + size > va->va_end)
901 return NOTHING_FIT;
902
903 /* Now classify. */
904 if (va->va_start == nva_start_addr) {
905 if (va->va_end == nva_start_addr + size)
906 type = FL_FIT_TYPE;
907 else
908 type = LE_FIT_TYPE;
909 } else if (va->va_end == nva_start_addr + size) {
910 type = RE_FIT_TYPE;
911 } else {
912 type = NE_FIT_TYPE;
913 }
914
915 return type;
916}
917
918static __always_inline int
919adjust_va_to_fit_type(struct vmap_area *va,
920 unsigned long nva_start_addr, unsigned long size,
921 enum fit_type type)
922{
2c929233 923 struct vmap_area *lva = NULL;
68ad4a33
URS
924
925 if (type == FL_FIT_TYPE) {
926 /*
927 * No need to split VA, it fully fits.
928 *
929 * | |
930 * V NVA V
931 * |---------------|
932 */
933 unlink_va(va, &free_vmap_area_root);
934 kmem_cache_free(vmap_area_cachep, va);
935 } else if (type == LE_FIT_TYPE) {
936 /*
937 * Split left edge of fit VA.
938 *
939 * | |
940 * V NVA V R
941 * |-------|-------|
942 */
943 va->va_start += size;
944 } else if (type == RE_FIT_TYPE) {
945 /*
946 * Split right edge of fit VA.
947 *
948 * | |
949 * L V NVA V
950 * |-------|-------|
951 */
952 va->va_end = nva_start_addr;
953 } else if (type == NE_FIT_TYPE) {
954 /*
955 * Split no edge of fit VA.
956 *
957 * | |
958 * L V NVA V R
959 * |---|-------|---|
960 */
82dd23e8
URS
961 lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
962 if (unlikely(!lva)) {
963 /*
964 * For percpu allocator we do not do any pre-allocation
965 * and leave it as it is. The reason is it most likely
966 * never ends up with NE_FIT_TYPE splitting. In case of
967 * percpu allocations offsets and sizes are aligned to
968 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
969 * are its main fitting cases.
970 *
971 * There are a few exceptions though, as an example it is
972 * a first allocation (early boot up) when we have "one"
973 * big free space that has to be split.
974 */
975 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
976 if (!lva)
977 return -1;
978 }
68ad4a33
URS
979
980 /*
981 * Build the remainder.
982 */
983 lva->va_start = va->va_start;
984 lva->va_end = nva_start_addr;
985
986 /*
987 * Shrink this VA to remaining size.
988 */
989 va->va_start = nva_start_addr + size;
990 } else {
991 return -1;
992 }
993
994 if (type != FL_FIT_TYPE) {
995 augment_tree_propagate_from(va);
996
2c929233 997 if (lva) /* type == NE_FIT_TYPE */
68ad4a33
URS
998 insert_vmap_area_augment(lva, &va->rb_node,
999 &free_vmap_area_root, &free_vmap_area_list);
1000 }
1001
1002 return 0;
1003}
1004
1005/*
1006 * Returns a start address of the newly allocated area, if success.
1007 * Otherwise a vend is returned that indicates failure.
1008 */
1009static __always_inline unsigned long
1010__alloc_vmap_area(unsigned long size, unsigned long align,
cacca6ba 1011 unsigned long vstart, unsigned long vend)
68ad4a33
URS
1012{
1013 unsigned long nva_start_addr;
1014 struct vmap_area *va;
1015 enum fit_type type;
1016 int ret;
1017
1018 va = find_vmap_lowest_match(size, align, vstart);
1019 if (unlikely(!va))
1020 return vend;
1021
1022 if (va->va_start > vstart)
1023 nva_start_addr = ALIGN(va->va_start, align);
1024 else
1025 nva_start_addr = ALIGN(vstart, align);
1026
1027 /* Check the "vend" restriction. */
1028 if (nva_start_addr + size > vend)
1029 return vend;
1030
1031 /* Classify what we have found. */
1032 type = classify_va_fit_type(va, nva_start_addr, size);
1033 if (WARN_ON_ONCE(type == NOTHING_FIT))
1034 return vend;
1035
1036 /* Update the free vmap_area. */
1037 ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1038 if (ret)
1039 return vend;
1040
a6cf4e0f
URS
1041#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1042 find_vmap_lowest_match_check(size);
1043#endif
1044
68ad4a33
URS
1045 return nva_start_addr;
1046}
4da56b99 1047
db64fe02
NP
1048/*
1049 * Allocate a region of KVA of the specified size and alignment, within the
1050 * vstart and vend.
1051 */
1052static struct vmap_area *alloc_vmap_area(unsigned long size,
1053 unsigned long align,
1054 unsigned long vstart, unsigned long vend,
1055 int node, gfp_t gfp_mask)
1056{
82dd23e8 1057 struct vmap_area *va, *pva;
1da177e4 1058 unsigned long addr;
db64fe02
NP
1059 int purged = 0;
1060
7766970c 1061 BUG_ON(!size);
891c49ab 1062 BUG_ON(offset_in_page(size));
89699605 1063 BUG_ON(!is_power_of_2(align));
db64fe02 1064
68ad4a33
URS
1065 if (unlikely(!vmap_initialized))
1066 return ERR_PTR(-EBUSY);
1067
5803ed29 1068 might_sleep();
4da56b99 1069
68ad4a33 1070 va = kmem_cache_alloc_node(vmap_area_cachep,
db64fe02
NP
1071 gfp_mask & GFP_RECLAIM_MASK, node);
1072 if (unlikely(!va))
1073 return ERR_PTR(-ENOMEM);
1074
7f88f88f
CM
1075 /*
1076 * Only scan the relevant parts containing pointers to other objects
1077 * to avoid false negatives.
1078 */
1079 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
1080
db64fe02 1081retry:
82dd23e8
URS
1082 /*
1083 * Preload this CPU with one extra vmap_area object to ensure
1084 * that we have it available when fit type of free area is
1085 * NE_FIT_TYPE.
1086 *
1087 * The preload is done in non-atomic context, thus it allows us
1088 * to use more permissive allocation masks to be more stable under
1089 * low memory condition and high memory pressure.
1090 *
1091 * Even if it fails we do not really care about that. Just proceed
1092 * as it is. "overflow" path will refill the cache we allocate from.
1093 */
1094 preempt_disable();
1095 if (!__this_cpu_read(ne_fit_preload_node)) {
1096 preempt_enable();
1097 pva = kmem_cache_alloc_node(vmap_area_cachep, GFP_KERNEL, node);
1098 preempt_disable();
1099
1100 if (__this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva)) {
1101 if (pva)
1102 kmem_cache_free(vmap_area_cachep, pva);
1103 }
1104 }
1105
db64fe02 1106 spin_lock(&vmap_area_lock);
82dd23e8 1107 preempt_enable();
89699605 1108
afd07389 1109 /*
68ad4a33
URS
1110 * If an allocation fails, the "vend" address is
1111 * returned. Therefore trigger the overflow path.
afd07389 1112 */
cacca6ba 1113 addr = __alloc_vmap_area(size, align, vstart, vend);
68ad4a33 1114 if (unlikely(addr == vend))
89699605 1115 goto overflow;
db64fe02
NP
1116
1117 va->va_start = addr;
1118 va->va_end = addr + size;
1119 va->flags = 0;
68ad4a33
URS
1120 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1121
db64fe02
NP
1122 spin_unlock(&vmap_area_lock);
1123
61e16557 1124 BUG_ON(!IS_ALIGNED(va->va_start, align));
89699605
NP
1125 BUG_ON(va->va_start < vstart);
1126 BUG_ON(va->va_end > vend);
1127
db64fe02 1128 return va;
89699605
NP
1129
1130overflow:
1131 spin_unlock(&vmap_area_lock);
1132 if (!purged) {
1133 purge_vmap_area_lazy();
1134 purged = 1;
1135 goto retry;
1136 }
4da56b99
CW
1137
1138 if (gfpflags_allow_blocking(gfp_mask)) {
1139 unsigned long freed = 0;
1140 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1141 if (freed > 0) {
1142 purged = 0;
1143 goto retry;
1144 }
1145 }
1146
03497d76 1147 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
756a025f
JP
1148 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1149 size);
68ad4a33
URS
1150
1151 kmem_cache_free(vmap_area_cachep, va);
89699605 1152 return ERR_PTR(-EBUSY);
db64fe02
NP
1153}
1154
4da56b99
CW
1155int register_vmap_purge_notifier(struct notifier_block *nb)
1156{
1157 return blocking_notifier_chain_register(&vmap_notify_list, nb);
1158}
1159EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1160
1161int unregister_vmap_purge_notifier(struct notifier_block *nb)
1162{
1163 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1164}
1165EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1166
db64fe02
NP
1167static void __free_vmap_area(struct vmap_area *va)
1168{
ca23e405 1169 /*
68ad4a33 1170 * Remove from the busy tree/list.
ca23e405 1171 */
68ad4a33 1172 unlink_va(va, &vmap_area_root);
ca23e405 1173
68ad4a33
URS
1174 /*
1175 * Merge VA with its neighbors, otherwise just add it.
1176 */
1177 merge_or_add_vmap_area(va,
1178 &free_vmap_area_root, &free_vmap_area_list);
db64fe02
NP
1179}
1180
1181/*
1182 * Free a region of KVA allocated by alloc_vmap_area
1183 */
1184static void free_vmap_area(struct vmap_area *va)
1185{
1186 spin_lock(&vmap_area_lock);
1187 __free_vmap_area(va);
1188 spin_unlock(&vmap_area_lock);
1189}
1190
1191/*
1192 * Clear the pagetable entries of a given vmap_area
1193 */
1194static void unmap_vmap_area(struct vmap_area *va)
1195{
1196 vunmap_page_range(va->va_start, va->va_end);
1197}
1198
1199/*
1200 * lazy_max_pages is the maximum amount of virtual address space we gather up
1201 * before attempting to purge with a TLB flush.
1202 *
1203 * There is a tradeoff here: a larger number will cover more kernel page tables
1204 * and take slightly longer to purge, but it will linearly reduce the number of
1205 * global TLB flushes that must be performed. It would seem natural to scale
1206 * this number up linearly with the number of CPUs (because vmapping activity
1207 * could also scale linearly with the number of CPUs), however it is likely
1208 * that in practice, workloads might be constrained in other ways that mean
1209 * vmap activity will not scale linearly with CPUs. Also, I want to be
1210 * conservative and not introduce a big latency on huge systems, so go with
1211 * a less aggressive log scale. It will still be an improvement over the old
1212 * code, and it will be simple to change the scale factor if we find that it
1213 * becomes a problem on bigger systems.
1214 */
1215static unsigned long lazy_max_pages(void)
1216{
1217 unsigned int log;
1218
1219 log = fls(num_online_cpus());
1220
1221 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1222}
1223
4d36e6f8 1224static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
db64fe02 1225
0574ecd1
CH
1226/*
1227 * Serialize vmap purging. There is no actual criticial section protected
1228 * by this look, but we want to avoid concurrent calls for performance
1229 * reasons and to make the pcpu_get_vm_areas more deterministic.
1230 */
f9e09977 1231static DEFINE_MUTEX(vmap_purge_lock);
0574ecd1 1232
02b709df
NP
1233/* for per-CPU blocks */
1234static void purge_fragmented_blocks_allcpus(void);
1235
3ee48b6a
CW
1236/*
1237 * called before a call to iounmap() if the caller wants vm_area_struct's
1238 * immediately freed.
1239 */
1240void set_iounmap_nonlazy(void)
1241{
4d36e6f8 1242 atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
3ee48b6a
CW
1243}
1244
db64fe02
NP
1245/*
1246 * Purges all lazily-freed vmap areas.
db64fe02 1247 */
0574ecd1 1248static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
db64fe02 1249{
4d36e6f8 1250 unsigned long resched_threshold;
80c4bd7a 1251 struct llist_node *valist;
db64fe02 1252 struct vmap_area *va;
cbb76676 1253 struct vmap_area *n_va;
db64fe02 1254
0574ecd1 1255 lockdep_assert_held(&vmap_purge_lock);
02b709df 1256
80c4bd7a 1257 valist = llist_del_all(&vmap_purge_list);
68571be9
URS
1258 if (unlikely(valist == NULL))
1259 return false;
1260
3f8fd02b
JR
1261 /*
1262 * First make sure the mappings are removed from all page-tables
1263 * before they are freed.
1264 */
1265 vmalloc_sync_all();
1266
68571be9
URS
1267 /*
1268 * TODO: to calculate a flush range without looping.
1269 * The list can be up to lazy_max_pages() elements.
1270 */
80c4bd7a 1271 llist_for_each_entry(va, valist, purge_list) {
0574ecd1
CH
1272 if (va->va_start < start)
1273 start = va->va_start;
1274 if (va->va_end > end)
1275 end = va->va_end;
db64fe02 1276 }
db64fe02 1277
0574ecd1 1278 flush_tlb_kernel_range(start, end);
4d36e6f8 1279 resched_threshold = lazy_max_pages() << 1;
db64fe02 1280
0574ecd1 1281 spin_lock(&vmap_area_lock);
763b218d 1282 llist_for_each_entry_safe(va, n_va, valist, purge_list) {
4d36e6f8 1283 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
763b218d 1284
0574ecd1 1285 __free_vmap_area(va);
4d36e6f8 1286 atomic_long_sub(nr, &vmap_lazy_nr);
68571be9 1287
4d36e6f8 1288 if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
68571be9 1289 cond_resched_lock(&vmap_area_lock);
763b218d 1290 }
0574ecd1
CH
1291 spin_unlock(&vmap_area_lock);
1292 return true;
db64fe02
NP
1293}
1294
496850e5
NP
1295/*
1296 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1297 * is already purging.
1298 */
1299static void try_purge_vmap_area_lazy(void)
1300{
f9e09977 1301 if (mutex_trylock(&vmap_purge_lock)) {
0574ecd1 1302 __purge_vmap_area_lazy(ULONG_MAX, 0);
f9e09977 1303 mutex_unlock(&vmap_purge_lock);
0574ecd1 1304 }
496850e5
NP
1305}
1306
db64fe02
NP
1307/*
1308 * Kick off a purge of the outstanding lazy areas.
1309 */
1310static void purge_vmap_area_lazy(void)
1311{
f9e09977 1312 mutex_lock(&vmap_purge_lock);
0574ecd1
CH
1313 purge_fragmented_blocks_allcpus();
1314 __purge_vmap_area_lazy(ULONG_MAX, 0);
f9e09977 1315 mutex_unlock(&vmap_purge_lock);
db64fe02
NP
1316}
1317
1318/*
64141da5
JF
1319 * Free a vmap area, caller ensuring that the area has been unmapped
1320 * and flush_cache_vunmap had been called for the correct range
1321 * previously.
db64fe02 1322 */
64141da5 1323static void free_vmap_area_noflush(struct vmap_area *va)
db64fe02 1324{
4d36e6f8 1325 unsigned long nr_lazy;
80c4bd7a 1326
4d36e6f8
URS
1327 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1328 PAGE_SHIFT, &vmap_lazy_nr);
80c4bd7a
CW
1329
1330 /* After this point, we may free va at any time */
1331 llist_add(&va->purge_list, &vmap_purge_list);
1332
1333 if (unlikely(nr_lazy > lazy_max_pages()))
496850e5 1334 try_purge_vmap_area_lazy();
db64fe02
NP
1335}
1336
b29acbdc
NP
1337/*
1338 * Free and unmap a vmap area
1339 */
1340static void free_unmap_vmap_area(struct vmap_area *va)
1341{
1342 flush_cache_vunmap(va->va_start, va->va_end);
c8eef01e 1343 unmap_vmap_area(va);
82a2e924
CP
1344 if (debug_pagealloc_enabled())
1345 flush_tlb_kernel_range(va->va_start, va->va_end);
1346
c8eef01e 1347 free_vmap_area_noflush(va);
b29acbdc
NP
1348}
1349
db64fe02
NP
1350static struct vmap_area *find_vmap_area(unsigned long addr)
1351{
1352 struct vmap_area *va;
1353
1354 spin_lock(&vmap_area_lock);
1355 va = __find_vmap_area(addr);
1356 spin_unlock(&vmap_area_lock);
1357
1358 return va;
1359}
1360
db64fe02
NP
1361/*** Per cpu kva allocator ***/
1362
1363/*
1364 * vmap space is limited especially on 32 bit architectures. Ensure there is
1365 * room for at least 16 percpu vmap blocks per CPU.
1366 */
1367/*
1368 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1369 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1370 * instead (we just need a rough idea)
1371 */
1372#if BITS_PER_LONG == 32
1373#define VMALLOC_SPACE (128UL*1024*1024)
1374#else
1375#define VMALLOC_SPACE (128UL*1024*1024*1024)
1376#endif
1377
1378#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1379#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
1380#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
1381#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1382#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
1383#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
f982f915
CL
1384#define VMAP_BBMAP_BITS \
1385 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1386 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1387 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
db64fe02
NP
1388
1389#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1390
1391struct vmap_block_queue {
1392 spinlock_t lock;
1393 struct list_head free;
db64fe02
NP
1394};
1395
1396struct vmap_block {
1397 spinlock_t lock;
1398 struct vmap_area *va;
db64fe02 1399 unsigned long free, dirty;
7d61bfe8 1400 unsigned long dirty_min, dirty_max; /*< dirty range */
de560423
NP
1401 struct list_head free_list;
1402 struct rcu_head rcu_head;
02b709df 1403 struct list_head purge;
db64fe02
NP
1404};
1405
1406/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1407static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1408
1409/*
1410 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
1411 * in the free path. Could get rid of this if we change the API to return a
1412 * "cookie" from alloc, to be passed to free. But no big deal yet.
1413 */
1414static DEFINE_SPINLOCK(vmap_block_tree_lock);
1415static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
1416
1417/*
1418 * We should probably have a fallback mechanism to allocate virtual memory
1419 * out of partially filled vmap blocks. However vmap block sizing should be
1420 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1421 * big problem.
1422 */
1423
1424static unsigned long addr_to_vb_idx(unsigned long addr)
1425{
1426 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1427 addr /= VMAP_BLOCK_SIZE;
1428 return addr;
1429}
1430
cf725ce2
RP
1431static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1432{
1433 unsigned long addr;
1434
1435 addr = va_start + (pages_off << PAGE_SHIFT);
1436 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1437 return (void *)addr;
1438}
1439
1440/**
1441 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1442 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
1443 * @order: how many 2^order pages should be occupied in newly allocated block
1444 * @gfp_mask: flags for the page level allocator
1445 *
a862f68a 1446 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
cf725ce2
RP
1447 */
1448static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
db64fe02
NP
1449{
1450 struct vmap_block_queue *vbq;
1451 struct vmap_block *vb;
1452 struct vmap_area *va;
1453 unsigned long vb_idx;
1454 int node, err;
cf725ce2 1455 void *vaddr;
db64fe02
NP
1456
1457 node = numa_node_id();
1458
1459 vb = kmalloc_node(sizeof(struct vmap_block),
1460 gfp_mask & GFP_RECLAIM_MASK, node);
1461 if (unlikely(!vb))
1462 return ERR_PTR(-ENOMEM);
1463
1464 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1465 VMALLOC_START, VMALLOC_END,
1466 node, gfp_mask);
ddf9c6d4 1467 if (IS_ERR(va)) {
db64fe02 1468 kfree(vb);
e7d86340 1469 return ERR_CAST(va);
db64fe02
NP
1470 }
1471
1472 err = radix_tree_preload(gfp_mask);
1473 if (unlikely(err)) {
1474 kfree(vb);
1475 free_vmap_area(va);
1476 return ERR_PTR(err);
1477 }
1478
cf725ce2 1479 vaddr = vmap_block_vaddr(va->va_start, 0);
db64fe02
NP
1480 spin_lock_init(&vb->lock);
1481 vb->va = va;
cf725ce2
RP
1482 /* At least something should be left free */
1483 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1484 vb->free = VMAP_BBMAP_BITS - (1UL << order);
db64fe02 1485 vb->dirty = 0;
7d61bfe8
RP
1486 vb->dirty_min = VMAP_BBMAP_BITS;
1487 vb->dirty_max = 0;
db64fe02 1488 INIT_LIST_HEAD(&vb->free_list);
db64fe02
NP
1489
1490 vb_idx = addr_to_vb_idx(va->va_start);
1491 spin_lock(&vmap_block_tree_lock);
1492 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
1493 spin_unlock(&vmap_block_tree_lock);
1494 BUG_ON(err);
1495 radix_tree_preload_end();
1496
1497 vbq = &get_cpu_var(vmap_block_queue);
db64fe02 1498 spin_lock(&vbq->lock);
68ac546f 1499 list_add_tail_rcu(&vb->free_list, &vbq->free);
db64fe02 1500 spin_unlock(&vbq->lock);
3f04ba85 1501 put_cpu_var(vmap_block_queue);
db64fe02 1502
cf725ce2 1503 return vaddr;
db64fe02
NP
1504}
1505
db64fe02
NP
1506static void free_vmap_block(struct vmap_block *vb)
1507{
1508 struct vmap_block *tmp;
1509 unsigned long vb_idx;
1510
db64fe02
NP
1511 vb_idx = addr_to_vb_idx(vb->va->va_start);
1512 spin_lock(&vmap_block_tree_lock);
1513 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
1514 spin_unlock(&vmap_block_tree_lock);
1515 BUG_ON(tmp != vb);
1516
64141da5 1517 free_vmap_area_noflush(vb->va);
22a3c7d1 1518 kfree_rcu(vb, rcu_head);
db64fe02
NP
1519}
1520
02b709df
NP
1521static void purge_fragmented_blocks(int cpu)
1522{
1523 LIST_HEAD(purge);
1524 struct vmap_block *vb;
1525 struct vmap_block *n_vb;
1526 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1527
1528 rcu_read_lock();
1529 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1530
1531 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1532 continue;
1533
1534 spin_lock(&vb->lock);
1535 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1536 vb->free = 0; /* prevent further allocs after releasing lock */
1537 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
7d61bfe8
RP
1538 vb->dirty_min = 0;
1539 vb->dirty_max = VMAP_BBMAP_BITS;
02b709df
NP
1540 spin_lock(&vbq->lock);
1541 list_del_rcu(&vb->free_list);
1542 spin_unlock(&vbq->lock);
1543 spin_unlock(&vb->lock);
1544 list_add_tail(&vb->purge, &purge);
1545 } else
1546 spin_unlock(&vb->lock);
1547 }
1548 rcu_read_unlock();
1549
1550 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1551 list_del(&vb->purge);
1552 free_vmap_block(vb);
1553 }
1554}
1555
02b709df
NP
1556static void purge_fragmented_blocks_allcpus(void)
1557{
1558 int cpu;
1559
1560 for_each_possible_cpu(cpu)
1561 purge_fragmented_blocks(cpu);
1562}
1563
db64fe02
NP
1564static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1565{
1566 struct vmap_block_queue *vbq;
1567 struct vmap_block *vb;
cf725ce2 1568 void *vaddr = NULL;
db64fe02
NP
1569 unsigned int order;
1570
891c49ab 1571 BUG_ON(offset_in_page(size));
db64fe02 1572 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
aa91c4d8
JK
1573 if (WARN_ON(size == 0)) {
1574 /*
1575 * Allocating 0 bytes isn't what caller wants since
1576 * get_order(0) returns funny result. Just warn and terminate
1577 * early.
1578 */
1579 return NULL;
1580 }
db64fe02
NP
1581 order = get_order(size);
1582
db64fe02
NP
1583 rcu_read_lock();
1584 vbq = &get_cpu_var(vmap_block_queue);
1585 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
cf725ce2 1586 unsigned long pages_off;
db64fe02
NP
1587
1588 spin_lock(&vb->lock);
cf725ce2
RP
1589 if (vb->free < (1UL << order)) {
1590 spin_unlock(&vb->lock);
1591 continue;
1592 }
02b709df 1593
cf725ce2
RP
1594 pages_off = VMAP_BBMAP_BITS - vb->free;
1595 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
02b709df
NP
1596 vb->free -= 1UL << order;
1597 if (vb->free == 0) {
1598 spin_lock(&vbq->lock);
1599 list_del_rcu(&vb->free_list);
1600 spin_unlock(&vbq->lock);
1601 }
cf725ce2 1602
02b709df
NP
1603 spin_unlock(&vb->lock);
1604 break;
db64fe02 1605 }
02b709df 1606
3f04ba85 1607 put_cpu_var(vmap_block_queue);
db64fe02
NP
1608 rcu_read_unlock();
1609
cf725ce2
RP
1610 /* Allocate new block if nothing was found */
1611 if (!vaddr)
1612 vaddr = new_vmap_block(order, gfp_mask);
db64fe02 1613
cf725ce2 1614 return vaddr;
db64fe02
NP
1615}
1616
1617static void vb_free(const void *addr, unsigned long size)
1618{
1619 unsigned long offset;
1620 unsigned long vb_idx;
1621 unsigned int order;
1622 struct vmap_block *vb;
1623
891c49ab 1624 BUG_ON(offset_in_page(size));
db64fe02 1625 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
b29acbdc
NP
1626
1627 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
1628
db64fe02
NP
1629 order = get_order(size);
1630
1631 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
7d61bfe8 1632 offset >>= PAGE_SHIFT;
db64fe02
NP
1633
1634 vb_idx = addr_to_vb_idx((unsigned long)addr);
1635 rcu_read_lock();
1636 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1637 rcu_read_unlock();
1638 BUG_ON(!vb);
1639
64141da5
JF
1640 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1641
82a2e924
CP
1642 if (debug_pagealloc_enabled())
1643 flush_tlb_kernel_range((unsigned long)addr,
1644 (unsigned long)addr + size);
1645
db64fe02 1646 spin_lock(&vb->lock);
7d61bfe8
RP
1647
1648 /* Expand dirty range */
1649 vb->dirty_min = min(vb->dirty_min, offset);
1650 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
d086817d 1651
db64fe02
NP
1652 vb->dirty += 1UL << order;
1653 if (vb->dirty == VMAP_BBMAP_BITS) {
de560423 1654 BUG_ON(vb->free);
db64fe02
NP
1655 spin_unlock(&vb->lock);
1656 free_vmap_block(vb);
1657 } else
1658 spin_unlock(&vb->lock);
1659}
1660
868b104d 1661static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
db64fe02 1662{
db64fe02 1663 int cpu;
db64fe02 1664
9b463334
JF
1665 if (unlikely(!vmap_initialized))
1666 return;
1667
5803ed29
CH
1668 might_sleep();
1669
db64fe02
NP
1670 for_each_possible_cpu(cpu) {
1671 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1672 struct vmap_block *vb;
1673
1674 rcu_read_lock();
1675 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
db64fe02 1676 spin_lock(&vb->lock);
7d61bfe8
RP
1677 if (vb->dirty) {
1678 unsigned long va_start = vb->va->va_start;
db64fe02 1679 unsigned long s, e;
b136be5e 1680
7d61bfe8
RP
1681 s = va_start + (vb->dirty_min << PAGE_SHIFT);
1682 e = va_start + (vb->dirty_max << PAGE_SHIFT);
db64fe02 1683
7d61bfe8
RP
1684 start = min(s, start);
1685 end = max(e, end);
db64fe02 1686
7d61bfe8 1687 flush = 1;
db64fe02
NP
1688 }
1689 spin_unlock(&vb->lock);
1690 }
1691 rcu_read_unlock();
1692 }
1693
f9e09977 1694 mutex_lock(&vmap_purge_lock);
0574ecd1
CH
1695 purge_fragmented_blocks_allcpus();
1696 if (!__purge_vmap_area_lazy(start, end) && flush)
1697 flush_tlb_kernel_range(start, end);
f9e09977 1698 mutex_unlock(&vmap_purge_lock);
db64fe02 1699}
868b104d
RE
1700
1701/**
1702 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1703 *
1704 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1705 * to amortize TLB flushing overheads. What this means is that any page you
1706 * have now, may, in a former life, have been mapped into kernel virtual
1707 * address by the vmap layer and so there might be some CPUs with TLB entries
1708 * still referencing that page (additional to the regular 1:1 kernel mapping).
1709 *
1710 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1711 * be sure that none of the pages we have control over will have any aliases
1712 * from the vmap layer.
1713 */
1714void vm_unmap_aliases(void)
1715{
1716 unsigned long start = ULONG_MAX, end = 0;
1717 int flush = 0;
1718
1719 _vm_unmap_aliases(start, end, flush);
1720}
db64fe02
NP
1721EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1722
1723/**
1724 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1725 * @mem: the pointer returned by vm_map_ram
1726 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1727 */
1728void vm_unmap_ram(const void *mem, unsigned int count)
1729{
65ee03c4 1730 unsigned long size = (unsigned long)count << PAGE_SHIFT;
db64fe02 1731 unsigned long addr = (unsigned long)mem;
9c3acf60 1732 struct vmap_area *va;
db64fe02 1733
5803ed29 1734 might_sleep();
db64fe02
NP
1735 BUG_ON(!addr);
1736 BUG_ON(addr < VMALLOC_START);
1737 BUG_ON(addr > VMALLOC_END);
a1c0b1a0 1738 BUG_ON(!PAGE_ALIGNED(addr));
db64fe02 1739
9c3acf60 1740 if (likely(count <= VMAP_MAX_ALLOC)) {
05e3ff95 1741 debug_check_no_locks_freed(mem, size);
db64fe02 1742 vb_free(mem, size);
9c3acf60
CH
1743 return;
1744 }
1745
1746 va = find_vmap_area(addr);
1747 BUG_ON(!va);
05e3ff95
CP
1748 debug_check_no_locks_freed((void *)va->va_start,
1749 (va->va_end - va->va_start));
9c3acf60 1750 free_unmap_vmap_area(va);
db64fe02
NP
1751}
1752EXPORT_SYMBOL(vm_unmap_ram);
1753
1754/**
1755 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1756 * @pages: an array of pointers to the pages to be mapped
1757 * @count: number of pages
1758 * @node: prefer to allocate data structures on this node
1759 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
e99c97ad 1760 *
36437638
GK
1761 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1762 * faster than vmap so it's good. But if you mix long-life and short-life
1763 * objects with vm_map_ram(), it could consume lots of address space through
1764 * fragmentation (especially on a 32bit machine). You could see failures in
1765 * the end. Please use this function for short-lived objects.
1766 *
e99c97ad 1767 * Returns: a pointer to the address that has been mapped, or %NULL on failure
db64fe02
NP
1768 */
1769void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1770{
65ee03c4 1771 unsigned long size = (unsigned long)count << PAGE_SHIFT;
db64fe02
NP
1772 unsigned long addr;
1773 void *mem;
1774
1775 if (likely(count <= VMAP_MAX_ALLOC)) {
1776 mem = vb_alloc(size, GFP_KERNEL);
1777 if (IS_ERR(mem))
1778 return NULL;
1779 addr = (unsigned long)mem;
1780 } else {
1781 struct vmap_area *va;
1782 va = alloc_vmap_area(size, PAGE_SIZE,
1783 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1784 if (IS_ERR(va))
1785 return NULL;
1786
1787 addr = va->va_start;
1788 mem = (void *)addr;
1789 }
1790 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1791 vm_unmap_ram(mem, count);
1792 return NULL;
1793 }
1794 return mem;
1795}
1796EXPORT_SYMBOL(vm_map_ram);
1797
4341fa45 1798static struct vm_struct *vmlist __initdata;
92eac168 1799
be9b7335
NP
1800/**
1801 * vm_area_add_early - add vmap area early during boot
1802 * @vm: vm_struct to add
1803 *
1804 * This function is used to add fixed kernel vm area to vmlist before
1805 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1806 * should contain proper values and the other fields should be zero.
1807 *
1808 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1809 */
1810void __init vm_area_add_early(struct vm_struct *vm)
1811{
1812 struct vm_struct *tmp, **p;
1813
1814 BUG_ON(vmap_initialized);
1815 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1816 if (tmp->addr >= vm->addr) {
1817 BUG_ON(tmp->addr < vm->addr + vm->size);
1818 break;
1819 } else
1820 BUG_ON(tmp->addr + tmp->size > vm->addr);
1821 }
1822 vm->next = *p;
1823 *p = vm;
1824}
1825
f0aa6617
TH
1826/**
1827 * vm_area_register_early - register vmap area early during boot
1828 * @vm: vm_struct to register
c0c0a293 1829 * @align: requested alignment
f0aa6617
TH
1830 *
1831 * This function is used to register kernel vm area before
1832 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1833 * proper values on entry and other fields should be zero. On return,
1834 * vm->addr contains the allocated address.
1835 *
1836 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1837 */
c0c0a293 1838void __init vm_area_register_early(struct vm_struct *vm, size_t align)
f0aa6617
TH
1839{
1840 static size_t vm_init_off __initdata;
c0c0a293
TH
1841 unsigned long addr;
1842
1843 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1844 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
f0aa6617 1845
c0c0a293 1846 vm->addr = (void *)addr;
f0aa6617 1847
be9b7335 1848 vm_area_add_early(vm);
f0aa6617
TH
1849}
1850
68ad4a33
URS
1851static void vmap_init_free_space(void)
1852{
1853 unsigned long vmap_start = 1;
1854 const unsigned long vmap_end = ULONG_MAX;
1855 struct vmap_area *busy, *free;
1856
1857 /*
1858 * B F B B B F
1859 * -|-----|.....|-----|-----|-----|.....|-
1860 * | The KVA space |
1861 * |<--------------------------------->|
1862 */
1863 list_for_each_entry(busy, &vmap_area_list, list) {
1864 if (busy->va_start - vmap_start > 0) {
1865 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1866 if (!WARN_ON_ONCE(!free)) {
1867 free->va_start = vmap_start;
1868 free->va_end = busy->va_start;
1869
1870 insert_vmap_area_augment(free, NULL,
1871 &free_vmap_area_root,
1872 &free_vmap_area_list);
1873 }
1874 }
1875
1876 vmap_start = busy->va_end;
1877 }
1878
1879 if (vmap_end - vmap_start > 0) {
1880 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1881 if (!WARN_ON_ONCE(!free)) {
1882 free->va_start = vmap_start;
1883 free->va_end = vmap_end;
1884
1885 insert_vmap_area_augment(free, NULL,
1886 &free_vmap_area_root,
1887 &free_vmap_area_list);
1888 }
1889 }
1890}
1891
db64fe02
NP
1892void __init vmalloc_init(void)
1893{
822c18f2
IK
1894 struct vmap_area *va;
1895 struct vm_struct *tmp;
db64fe02
NP
1896 int i;
1897
68ad4a33
URS
1898 /*
1899 * Create the cache for vmap_area objects.
1900 */
1901 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
1902
db64fe02
NP
1903 for_each_possible_cpu(i) {
1904 struct vmap_block_queue *vbq;
32fcfd40 1905 struct vfree_deferred *p;
db64fe02
NP
1906
1907 vbq = &per_cpu(vmap_block_queue, i);
1908 spin_lock_init(&vbq->lock);
1909 INIT_LIST_HEAD(&vbq->free);
32fcfd40
AV
1910 p = &per_cpu(vfree_deferred, i);
1911 init_llist_head(&p->list);
1912 INIT_WORK(&p->wq, free_work);
db64fe02 1913 }
9b463334 1914
822c18f2
IK
1915 /* Import existing vmlist entries. */
1916 for (tmp = vmlist; tmp; tmp = tmp->next) {
68ad4a33
URS
1917 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1918 if (WARN_ON_ONCE(!va))
1919 continue;
1920
dbda591d 1921 va->flags = VM_VM_AREA;
822c18f2
IK
1922 va->va_start = (unsigned long)tmp->addr;
1923 va->va_end = va->va_start + tmp->size;
dbda591d 1924 va->vm = tmp;
68ad4a33 1925 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
822c18f2 1926 }
ca23e405 1927
68ad4a33
URS
1928 /*
1929 * Now we can initialize a free vmap space.
1930 */
1931 vmap_init_free_space();
9b463334 1932 vmap_initialized = true;
db64fe02
NP
1933}
1934
8fc48985
TH
1935/**
1936 * map_kernel_range_noflush - map kernel VM area with the specified pages
1937 * @addr: start of the VM area to map
1938 * @size: size of the VM area to map
1939 * @prot: page protection flags to use
1940 * @pages: pages to map
1941 *
1942 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1943 * specify should have been allocated using get_vm_area() and its
1944 * friends.
1945 *
1946 * NOTE:
1947 * This function does NOT do any cache flushing. The caller is
1948 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1949 * before calling this function.
1950 *
1951 * RETURNS:
1952 * The number of pages mapped on success, -errno on failure.
1953 */
1954int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1955 pgprot_t prot, struct page **pages)
1956{
1957 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1958}
1959
1960/**
1961 * unmap_kernel_range_noflush - unmap kernel VM area
1962 * @addr: start of the VM area to unmap
1963 * @size: size of the VM area to unmap
1964 *
1965 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
1966 * specify should have been allocated using get_vm_area() and its
1967 * friends.
1968 *
1969 * NOTE:
1970 * This function does NOT do any cache flushing. The caller is
1971 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1972 * before calling this function and flush_tlb_kernel_range() after.
1973 */
1974void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1975{
1976 vunmap_page_range(addr, addr + size);
1977}
81e88fdc 1978EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
8fc48985
TH
1979
1980/**
1981 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1982 * @addr: start of the VM area to unmap
1983 * @size: size of the VM area to unmap
1984 *
1985 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1986 * the unmapping and tlb after.
1987 */
db64fe02
NP
1988void unmap_kernel_range(unsigned long addr, unsigned long size)
1989{
1990 unsigned long end = addr + size;
f6fcba70
TH
1991
1992 flush_cache_vunmap(addr, end);
db64fe02
NP
1993 vunmap_page_range(addr, end);
1994 flush_tlb_kernel_range(addr, end);
1995}
93ef6d6c 1996EXPORT_SYMBOL_GPL(unmap_kernel_range);
db64fe02 1997
f6f8ed47 1998int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
db64fe02
NP
1999{
2000 unsigned long addr = (unsigned long)area->addr;
762216ab 2001 unsigned long end = addr + get_vm_area_size(area);
db64fe02
NP
2002 int err;
2003
f6f8ed47 2004 err = vmap_page_range(addr, end, prot, pages);
db64fe02 2005
f6f8ed47 2006 return err > 0 ? 0 : err;
db64fe02
NP
2007}
2008EXPORT_SYMBOL_GPL(map_vm_area);
2009
f5252e00 2010static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
5e6cafc8 2011 unsigned long flags, const void *caller)
cf88c790 2012{
c69480ad 2013 spin_lock(&vmap_area_lock);
cf88c790
TH
2014 vm->flags = flags;
2015 vm->addr = (void *)va->va_start;
2016 vm->size = va->va_end - va->va_start;
2017 vm->caller = caller;
db1aecaf 2018 va->vm = vm;
cf88c790 2019 va->flags |= VM_VM_AREA;
c69480ad 2020 spin_unlock(&vmap_area_lock);
f5252e00 2021}
cf88c790 2022
20fc02b4 2023static void clear_vm_uninitialized_flag(struct vm_struct *vm)
f5252e00 2024{
d4033afd 2025 /*
20fc02b4 2026 * Before removing VM_UNINITIALIZED,
d4033afd
JK
2027 * we should make sure that vm has proper values.
2028 * Pair with smp_rmb() in show_numa_info().
2029 */
2030 smp_wmb();
20fc02b4 2031 vm->flags &= ~VM_UNINITIALIZED;
cf88c790
TH
2032}
2033
db64fe02 2034static struct vm_struct *__get_vm_area_node(unsigned long size,
2dca6999 2035 unsigned long align, unsigned long flags, unsigned long start,
5e6cafc8 2036 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
db64fe02 2037{
0006526d 2038 struct vmap_area *va;
db64fe02 2039 struct vm_struct *area;
1da177e4 2040
52fd24ca 2041 BUG_ON(in_interrupt());
1da177e4 2042 size = PAGE_ALIGN(size);
31be8309
OH
2043 if (unlikely(!size))
2044 return NULL;
1da177e4 2045
252e5c6e 2046 if (flags & VM_IOREMAP)
2047 align = 1ul << clamp_t(int, get_count_order_long(size),
2048 PAGE_SHIFT, IOREMAP_MAX_ORDER);
2049
cf88c790 2050 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1da177e4
LT
2051 if (unlikely(!area))
2052 return NULL;
2053
71394fe5
AR
2054 if (!(flags & VM_NO_GUARD))
2055 size += PAGE_SIZE;
1da177e4 2056
db64fe02
NP
2057 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2058 if (IS_ERR(va)) {
2059 kfree(area);
2060 return NULL;
1da177e4 2061 }
1da177e4 2062
d82b1d85 2063 setup_vmalloc_vm(area, va, flags, caller);
f5252e00 2064
1da177e4 2065 return area;
1da177e4
LT
2066}
2067
930fc45a
CL
2068struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
2069 unsigned long start, unsigned long end)
2070{
00ef2d2f
DR
2071 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2072 GFP_KERNEL, __builtin_return_address(0));
930fc45a 2073}
5992b6da 2074EXPORT_SYMBOL_GPL(__get_vm_area);
930fc45a 2075
c2968612
BH
2076struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2077 unsigned long start, unsigned long end,
5e6cafc8 2078 const void *caller)
c2968612 2079{
00ef2d2f
DR
2080 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2081 GFP_KERNEL, caller);
c2968612
BH
2082}
2083
1da177e4 2084/**
92eac168
MR
2085 * get_vm_area - reserve a contiguous kernel virtual area
2086 * @size: size of the area
2087 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1da177e4 2088 *
92eac168
MR
2089 * Search an area of @size in the kernel virtual mapping area,
2090 * and reserved it for out purposes. Returns the area descriptor
2091 * on success or %NULL on failure.
a862f68a
MR
2092 *
2093 * Return: the area descriptor on success or %NULL on failure.
1da177e4
LT
2094 */
2095struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2096{
2dca6999 2097 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
00ef2d2f
DR
2098 NUMA_NO_NODE, GFP_KERNEL,
2099 __builtin_return_address(0));
23016969
CL
2100}
2101
2102struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
5e6cafc8 2103 const void *caller)
23016969 2104{
2dca6999 2105 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
00ef2d2f 2106 NUMA_NO_NODE, GFP_KERNEL, caller);
1da177e4
LT
2107}
2108
e9da6e99 2109/**
92eac168
MR
2110 * find_vm_area - find a continuous kernel virtual area
2111 * @addr: base address
e9da6e99 2112 *
92eac168
MR
2113 * Search for the kernel VM area starting at @addr, and return it.
2114 * It is up to the caller to do all required locking to keep the returned
2115 * pointer valid.
a862f68a
MR
2116 *
2117 * Return: pointer to the found area or %NULL on faulure
e9da6e99
MS
2118 */
2119struct vm_struct *find_vm_area(const void *addr)
83342314 2120{
db64fe02 2121 struct vmap_area *va;
83342314 2122
db64fe02
NP
2123 va = find_vmap_area((unsigned long)addr);
2124 if (va && va->flags & VM_VM_AREA)
db1aecaf 2125 return va->vm;
1da177e4 2126
1da177e4 2127 return NULL;
1da177e4
LT
2128}
2129
7856dfeb 2130/**
92eac168
MR
2131 * remove_vm_area - find and remove a continuous kernel virtual area
2132 * @addr: base address
7856dfeb 2133 *
92eac168
MR
2134 * Search for the kernel VM area starting at @addr, and remove it.
2135 * This function returns the found VM area, but using it is NOT safe
2136 * on SMP machines, except for its size or flags.
a862f68a
MR
2137 *
2138 * Return: pointer to the found area or %NULL on faulure
7856dfeb 2139 */
b3bdda02 2140struct vm_struct *remove_vm_area(const void *addr)
7856dfeb 2141{
db64fe02
NP
2142 struct vmap_area *va;
2143
5803ed29
CH
2144 might_sleep();
2145
db64fe02
NP
2146 va = find_vmap_area((unsigned long)addr);
2147 if (va && va->flags & VM_VM_AREA) {
db1aecaf 2148 struct vm_struct *vm = va->vm;
f5252e00 2149
c69480ad
JK
2150 spin_lock(&vmap_area_lock);
2151 va->vm = NULL;
2152 va->flags &= ~VM_VM_AREA;
78c72746 2153 va->flags |= VM_LAZY_FREE;
c69480ad
JK
2154 spin_unlock(&vmap_area_lock);
2155
a5af5aa8 2156 kasan_free_shadow(vm);
dd32c279 2157 free_unmap_vmap_area(va);
dd32c279 2158
db64fe02
NP
2159 return vm;
2160 }
2161 return NULL;
7856dfeb
AK
2162}
2163
868b104d
RE
2164static inline void set_area_direct_map(const struct vm_struct *area,
2165 int (*set_direct_map)(struct page *page))
2166{
2167 int i;
2168
2169 for (i = 0; i < area->nr_pages; i++)
2170 if (page_address(area->pages[i]))
2171 set_direct_map(area->pages[i]);
2172}
2173
2174/* Handle removing and resetting vm mappings related to the vm_struct. */
2175static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2176{
868b104d
RE
2177 unsigned long start = ULONG_MAX, end = 0;
2178 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
31e67340 2179 int flush_dmap = 0;
868b104d
RE
2180 int i;
2181
868b104d
RE
2182 remove_vm_area(area->addr);
2183
2184 /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2185 if (!flush_reset)
2186 return;
2187
2188 /*
2189 * If not deallocating pages, just do the flush of the VM area and
2190 * return.
2191 */
2192 if (!deallocate_pages) {
2193 vm_unmap_aliases();
2194 return;
2195 }
2196
2197 /*
2198 * If execution gets here, flush the vm mapping and reset the direct
2199 * map. Find the start and end range of the direct mappings to make sure
2200 * the vm_unmap_aliases() flush includes the direct map.
2201 */
2202 for (i = 0; i < area->nr_pages; i++) {
8e41f872
RE
2203 unsigned long addr = (unsigned long)page_address(area->pages[i]);
2204 if (addr) {
868b104d 2205 start = min(addr, start);
8e41f872 2206 end = max(addr + PAGE_SIZE, end);
31e67340 2207 flush_dmap = 1;
868b104d
RE
2208 }
2209 }
2210
2211 /*
2212 * Set direct map to something invalid so that it won't be cached if
2213 * there are any accesses after the TLB flush, then flush the TLB and
2214 * reset the direct map permissions to the default.
2215 */
2216 set_area_direct_map(area, set_direct_map_invalid_noflush);
31e67340 2217 _vm_unmap_aliases(start, end, flush_dmap);
868b104d
RE
2218 set_area_direct_map(area, set_direct_map_default_noflush);
2219}
2220
b3bdda02 2221static void __vunmap(const void *addr, int deallocate_pages)
1da177e4
LT
2222{
2223 struct vm_struct *area;
2224
2225 if (!addr)
2226 return;
2227
e69e9d4a 2228 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
ab15d9b4 2229 addr))
1da177e4 2230 return;
1da177e4 2231
6ade2032 2232 area = find_vm_area(addr);
1da177e4 2233 if (unlikely(!area)) {
4c8573e2 2234 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1da177e4 2235 addr);
1da177e4
LT
2236 return;
2237 }
2238
05e3ff95
CP
2239 debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2240 debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
9a11b49a 2241
868b104d
RE
2242 vm_remove_mappings(area, deallocate_pages);
2243
1da177e4
LT
2244 if (deallocate_pages) {
2245 int i;
2246
2247 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
2248 struct page *page = area->pages[i];
2249
2250 BUG_ON(!page);
4949148a 2251 __free_pages(page, 0);
1da177e4 2252 }
97105f0a 2253 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
1da177e4 2254
244d63ee 2255 kvfree(area->pages);
1da177e4
LT
2256 }
2257
2258 kfree(area);
2259 return;
2260}
bf22e37a
AR
2261
2262static inline void __vfree_deferred(const void *addr)
2263{
2264 /*
2265 * Use raw_cpu_ptr() because this can be called from preemptible
2266 * context. Preemption is absolutely fine here, because the llist_add()
2267 * implementation is lockless, so it works even if we are adding to
2268 * nother cpu's list. schedule_work() should be fine with this too.
2269 */
2270 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2271
2272 if (llist_add((struct llist_node *)addr, &p->list))
2273 schedule_work(&p->wq);
2274}
2275
2276/**
92eac168
MR
2277 * vfree_atomic - release memory allocated by vmalloc()
2278 * @addr: memory base address
bf22e37a 2279 *
92eac168
MR
2280 * This one is just like vfree() but can be called in any atomic context
2281 * except NMIs.
bf22e37a
AR
2282 */
2283void vfree_atomic(const void *addr)
2284{
2285 BUG_ON(in_nmi());
2286
2287 kmemleak_free(addr);
2288
2289 if (!addr)
2290 return;
2291 __vfree_deferred(addr);
2292}
2293
c67dc624
RP
2294static void __vfree(const void *addr)
2295{
2296 if (unlikely(in_interrupt()))
2297 __vfree_deferred(addr);
2298 else
2299 __vunmap(addr, 1);
2300}
2301
1da177e4 2302/**
92eac168
MR
2303 * vfree - release memory allocated by vmalloc()
2304 * @addr: memory base address
1da177e4 2305 *
92eac168
MR
2306 * Free the virtually continuous memory area starting at @addr, as
2307 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
2308 * NULL, no operation is performed.
1da177e4 2309 *
92eac168
MR
2310 * Must not be called in NMI context (strictly speaking, only if we don't
2311 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2312 * conventions for vfree() arch-depenedent would be a really bad idea)
c9fcee51 2313 *
92eac168 2314 * May sleep if called *not* from interrupt context.
3ca4ea3a 2315 *
92eac168 2316 * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
1da177e4 2317 */
b3bdda02 2318void vfree(const void *addr)
1da177e4 2319{
32fcfd40 2320 BUG_ON(in_nmi());
89219d37
CM
2321
2322 kmemleak_free(addr);
2323
a8dda165
AR
2324 might_sleep_if(!in_interrupt());
2325
32fcfd40
AV
2326 if (!addr)
2327 return;
c67dc624
RP
2328
2329 __vfree(addr);
1da177e4 2330}
1da177e4
LT
2331EXPORT_SYMBOL(vfree);
2332
2333/**
92eac168
MR
2334 * vunmap - release virtual mapping obtained by vmap()
2335 * @addr: memory base address
1da177e4 2336 *
92eac168
MR
2337 * Free the virtually contiguous memory area starting at @addr,
2338 * which was created from the page array passed to vmap().
1da177e4 2339 *
92eac168 2340 * Must not be called in interrupt context.
1da177e4 2341 */
b3bdda02 2342void vunmap(const void *addr)
1da177e4
LT
2343{
2344 BUG_ON(in_interrupt());
34754b69 2345 might_sleep();
32fcfd40
AV
2346 if (addr)
2347 __vunmap(addr, 0);
1da177e4 2348}
1da177e4
LT
2349EXPORT_SYMBOL(vunmap);
2350
2351/**
92eac168
MR
2352 * vmap - map an array of pages into virtually contiguous space
2353 * @pages: array of page pointers
2354 * @count: number of pages to map
2355 * @flags: vm_area->flags
2356 * @prot: page protection for the mapping
2357 *
2358 * Maps @count pages from @pages into contiguous kernel virtual
2359 * space.
a862f68a
MR
2360 *
2361 * Return: the address of the area or %NULL on failure
1da177e4
LT
2362 */
2363void *vmap(struct page **pages, unsigned int count,
92eac168 2364 unsigned long flags, pgprot_t prot)
1da177e4
LT
2365{
2366 struct vm_struct *area;
65ee03c4 2367 unsigned long size; /* In bytes */
1da177e4 2368
34754b69
PZ
2369 might_sleep();
2370
ca79b0c2 2371 if (count > totalram_pages())
1da177e4
LT
2372 return NULL;
2373
65ee03c4
GJM
2374 size = (unsigned long)count << PAGE_SHIFT;
2375 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
1da177e4
LT
2376 if (!area)
2377 return NULL;
23016969 2378
f6f8ed47 2379 if (map_vm_area(area, prot, pages)) {
1da177e4
LT
2380 vunmap(area->addr);
2381 return NULL;
2382 }
2383
2384 return area->addr;
2385}
1da177e4
LT
2386EXPORT_SYMBOL(vmap);
2387
8594a21c
MH
2388static void *__vmalloc_node(unsigned long size, unsigned long align,
2389 gfp_t gfp_mask, pgprot_t prot,
2390 int node, const void *caller);
e31d9eb5 2391static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3722e13c 2392 pgprot_t prot, int node)
1da177e4
LT
2393{
2394 struct page **pages;
2395 unsigned int nr_pages, array_size, i;
930f036b 2396 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
704b862f
LA
2397 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
2398 const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
2399 0 :
2400 __GFP_HIGHMEM;
1da177e4 2401
762216ab 2402 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1da177e4
LT
2403 array_size = (nr_pages * sizeof(struct page *));
2404
2405 area->nr_pages = nr_pages;
2406 /* Please note that the recursion is strictly bounded. */
8757d5fa 2407 if (array_size > PAGE_SIZE) {
704b862f 2408 pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
3722e13c 2409 PAGE_KERNEL, node, area->caller);
286e1ea3 2410 } else {
976d6dfb 2411 pages = kmalloc_node(array_size, nested_gfp, node);
286e1ea3 2412 }
1da177e4
LT
2413 area->pages = pages;
2414 if (!area->pages) {
2415 remove_vm_area(area->addr);
2416 kfree(area);
2417 return NULL;
2418 }
1da177e4
LT
2419
2420 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
2421 struct page *page;
2422
4b90951c 2423 if (node == NUMA_NO_NODE)
704b862f 2424 page = alloc_page(alloc_mask|highmem_mask);
930fc45a 2425 else
704b862f 2426 page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
bf53d6f8
CL
2427
2428 if (unlikely(!page)) {
1da177e4
LT
2429 /* Successfully allocated i pages, free them in __vunmap() */
2430 area->nr_pages = i;
97105f0a 2431 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
1da177e4
LT
2432 goto fail;
2433 }
bf53d6f8 2434 area->pages[i] = page;
704b862f 2435 if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
660654f9 2436 cond_resched();
1da177e4 2437 }
97105f0a 2438 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
1da177e4 2439
f6f8ed47 2440 if (map_vm_area(area, prot, pages))
1da177e4
LT
2441 goto fail;
2442 return area->addr;
2443
2444fail:
a8e99259 2445 warn_alloc(gfp_mask, NULL,
7877cdcc 2446 "vmalloc: allocation failure, allocated %ld of %ld bytes",
22943ab1 2447 (area->nr_pages*PAGE_SIZE), area->size);
c67dc624 2448 __vfree(area->addr);
1da177e4
LT
2449 return NULL;
2450}
2451
2452/**
92eac168
MR
2453 * __vmalloc_node_range - allocate virtually contiguous memory
2454 * @size: allocation size
2455 * @align: desired alignment
2456 * @start: vm area range start
2457 * @end: vm area range end
2458 * @gfp_mask: flags for the page level allocator
2459 * @prot: protection mask for the allocated pages
2460 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
2461 * @node: node to use for allocation or NUMA_NO_NODE
2462 * @caller: caller's return address
2463 *
2464 * Allocate enough pages to cover @size from the page level
2465 * allocator with @gfp_mask flags. Map them into contiguous
2466 * kernel virtual space, using a pagetable protection of @prot.
a862f68a
MR
2467 *
2468 * Return: the address of the area or %NULL on failure
1da177e4 2469 */
d0a21265
DR
2470void *__vmalloc_node_range(unsigned long size, unsigned long align,
2471 unsigned long start, unsigned long end, gfp_t gfp_mask,
cb9e3c29
AR
2472 pgprot_t prot, unsigned long vm_flags, int node,
2473 const void *caller)
1da177e4
LT
2474{
2475 struct vm_struct *area;
89219d37
CM
2476 void *addr;
2477 unsigned long real_size = size;
1da177e4
LT
2478
2479 size = PAGE_ALIGN(size);
ca79b0c2 2480 if (!size || (size >> PAGE_SHIFT) > totalram_pages())
de7d2b56 2481 goto fail;
1da177e4 2482
cb9e3c29
AR
2483 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
2484 vm_flags, start, end, node, gfp_mask, caller);
1da177e4 2485 if (!area)
de7d2b56 2486 goto fail;
1da177e4 2487
3722e13c 2488 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1368edf0 2489 if (!addr)
b82225f3 2490 return NULL;
89219d37 2491
f5252e00 2492 /*
20fc02b4
ZY
2493 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2494 * flag. It means that vm_struct is not fully initialized.
4341fa45 2495 * Now, it is fully initialized, so remove this flag here.
f5252e00 2496 */
20fc02b4 2497 clear_vm_uninitialized_flag(area);
f5252e00 2498
94f4a161 2499 kmemleak_vmalloc(area, size, gfp_mask);
89219d37
CM
2500
2501 return addr;
de7d2b56
JP
2502
2503fail:
a8e99259 2504 warn_alloc(gfp_mask, NULL,
7877cdcc 2505 "vmalloc: allocation failure: %lu bytes", real_size);
de7d2b56 2506 return NULL;
1da177e4
LT
2507}
2508
153178ed
URS
2509/*
2510 * This is only for performance analysis of vmalloc and stress purpose.
2511 * It is required by vmalloc test module, therefore do not use it other
2512 * than that.
2513 */
2514#ifdef CONFIG_TEST_VMALLOC_MODULE
2515EXPORT_SYMBOL_GPL(__vmalloc_node_range);
2516#endif
2517
d0a21265 2518/**
92eac168
MR
2519 * __vmalloc_node - allocate virtually contiguous memory
2520 * @size: allocation size
2521 * @align: desired alignment
2522 * @gfp_mask: flags for the page level allocator
2523 * @prot: protection mask for the allocated pages
2524 * @node: node to use for allocation or NUMA_NO_NODE
2525 * @caller: caller's return address
a7c3e901 2526 *
92eac168
MR
2527 * Allocate enough pages to cover @size from the page level
2528 * allocator with @gfp_mask flags. Map them into contiguous
2529 * kernel virtual space, using a pagetable protection of @prot.
a7c3e901 2530 *
92eac168
MR
2531 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2532 * and __GFP_NOFAIL are not supported
a7c3e901 2533 *
92eac168
MR
2534 * Any use of gfp flags outside of GFP_KERNEL should be consulted
2535 * with mm people.
a862f68a
MR
2536 *
2537 * Return: pointer to the allocated memory or %NULL on error
d0a21265 2538 */
8594a21c 2539static void *__vmalloc_node(unsigned long size, unsigned long align,
d0a21265 2540 gfp_t gfp_mask, pgprot_t prot,
5e6cafc8 2541 int node, const void *caller)
d0a21265
DR
2542{
2543 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
cb9e3c29 2544 gfp_mask, prot, 0, node, caller);
d0a21265
DR
2545}
2546
930fc45a
CL
2547void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
2548{
00ef2d2f 2549 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
23016969 2550 __builtin_return_address(0));
930fc45a 2551}
1da177e4
LT
2552EXPORT_SYMBOL(__vmalloc);
2553
8594a21c
MH
2554static inline void *__vmalloc_node_flags(unsigned long size,
2555 int node, gfp_t flags)
2556{
2557 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
2558 node, __builtin_return_address(0));
2559}
2560
2561
2562void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
2563 void *caller)
2564{
2565 return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller);
2566}
2567
1da177e4 2568/**
92eac168
MR
2569 * vmalloc - allocate virtually contiguous memory
2570 * @size: allocation size
2571 *
2572 * Allocate enough pages to cover @size from the page level
2573 * allocator and map them into contiguous kernel virtual space.
1da177e4 2574 *
92eac168
MR
2575 * For tight control over page level allocator and protection flags
2576 * use __vmalloc() instead.
a862f68a
MR
2577 *
2578 * Return: pointer to the allocated memory or %NULL on error
1da177e4
LT
2579 */
2580void *vmalloc(unsigned long size)
2581{
00ef2d2f 2582 return __vmalloc_node_flags(size, NUMA_NO_NODE,
19809c2d 2583 GFP_KERNEL);
1da177e4 2584}
1da177e4
LT
2585EXPORT_SYMBOL(vmalloc);
2586
e1ca7788 2587/**
92eac168
MR
2588 * vzalloc - allocate virtually contiguous memory with zero fill
2589 * @size: allocation size
2590 *
2591 * Allocate enough pages to cover @size from the page level
2592 * allocator and map them into contiguous kernel virtual space.
2593 * The memory allocated is set to zero.
2594 *
2595 * For tight control over page level allocator and protection flags
2596 * use __vmalloc() instead.
a862f68a
MR
2597 *
2598 * Return: pointer to the allocated memory or %NULL on error
e1ca7788
DY
2599 */
2600void *vzalloc(unsigned long size)
2601{
00ef2d2f 2602 return __vmalloc_node_flags(size, NUMA_NO_NODE,
19809c2d 2603 GFP_KERNEL | __GFP_ZERO);
e1ca7788
DY
2604}
2605EXPORT_SYMBOL(vzalloc);
2606
83342314 2607/**
ead04089
REB
2608 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2609 * @size: allocation size
83342314 2610 *
ead04089
REB
2611 * The resulting memory area is zeroed so it can be mapped to userspace
2612 * without leaking data.
a862f68a
MR
2613 *
2614 * Return: pointer to the allocated memory or %NULL on error
83342314
NP
2615 */
2616void *vmalloc_user(unsigned long size)
2617{
bc84c535
RP
2618 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2619 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
2620 VM_USERMAP, NUMA_NO_NODE,
2621 __builtin_return_address(0));
83342314
NP
2622}
2623EXPORT_SYMBOL(vmalloc_user);
2624
930fc45a 2625/**
92eac168
MR
2626 * vmalloc_node - allocate memory on a specific node
2627 * @size: allocation size
2628 * @node: numa node
930fc45a 2629 *
92eac168
MR
2630 * Allocate enough pages to cover @size from the page level
2631 * allocator and map them into contiguous kernel virtual space.
930fc45a 2632 *
92eac168
MR
2633 * For tight control over page level allocator and protection flags
2634 * use __vmalloc() instead.
a862f68a
MR
2635 *
2636 * Return: pointer to the allocated memory or %NULL on error
930fc45a
CL
2637 */
2638void *vmalloc_node(unsigned long size, int node)
2639{
19809c2d 2640 return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
23016969 2641 node, __builtin_return_address(0));
930fc45a
CL
2642}
2643EXPORT_SYMBOL(vmalloc_node);
2644
e1ca7788
DY
2645/**
2646 * vzalloc_node - allocate memory on a specific node with zero fill
2647 * @size: allocation size
2648 * @node: numa node
2649 *
2650 * Allocate enough pages to cover @size from the page level
2651 * allocator and map them into contiguous kernel virtual space.
2652 * The memory allocated is set to zero.
2653 *
2654 * For tight control over page level allocator and protection flags
2655 * use __vmalloc_node() instead.
a862f68a
MR
2656 *
2657 * Return: pointer to the allocated memory or %NULL on error
e1ca7788
DY
2658 */
2659void *vzalloc_node(unsigned long size, int node)
2660{
2661 return __vmalloc_node_flags(size, node,
19809c2d 2662 GFP_KERNEL | __GFP_ZERO);
e1ca7788
DY
2663}
2664EXPORT_SYMBOL(vzalloc_node);
2665
1da177e4 2666/**
92eac168
MR
2667 * vmalloc_exec - allocate virtually contiguous, executable memory
2668 * @size: allocation size
1da177e4 2669 *
92eac168
MR
2670 * Kernel-internal function to allocate enough pages to cover @size
2671 * the page level allocator and map them into contiguous and
2672 * executable kernel virtual space.
1da177e4 2673 *
92eac168
MR
2674 * For tight control over page level allocator and protection flags
2675 * use __vmalloc() instead.
a862f68a
MR
2676 *
2677 * Return: pointer to the allocated memory or %NULL on error
1da177e4 2678 */
1da177e4
LT
2679void *vmalloc_exec(unsigned long size)
2680{
868b104d
RE
2681 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2682 GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2683 NUMA_NO_NODE, __builtin_return_address(0));
1da177e4
LT
2684}
2685
0d08e0d3 2686#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
698d0831 2687#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
0d08e0d3 2688#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
698d0831 2689#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
0d08e0d3 2690#else
698d0831
MH
2691/*
2692 * 64b systems should always have either DMA or DMA32 zones. For others
2693 * GFP_DMA32 should do the right thing and use the normal zone.
2694 */
2695#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
0d08e0d3
AK
2696#endif
2697
1da177e4 2698/**
92eac168
MR
2699 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2700 * @size: allocation size
1da177e4 2701 *
92eac168
MR
2702 * Allocate enough 32bit PA addressable pages to cover @size from the
2703 * page level allocator and map them into contiguous kernel virtual space.
a862f68a
MR
2704 *
2705 * Return: pointer to the allocated memory or %NULL on error
1da177e4
LT
2706 */
2707void *vmalloc_32(unsigned long size)
2708{
2dca6999 2709 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
00ef2d2f 2710 NUMA_NO_NODE, __builtin_return_address(0));
1da177e4 2711}
1da177e4
LT
2712EXPORT_SYMBOL(vmalloc_32);
2713
83342314 2714/**
ead04089 2715 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
92eac168 2716 * @size: allocation size
ead04089
REB
2717 *
2718 * The resulting memory area is 32bit addressable and zeroed so it can be
2719 * mapped to userspace without leaking data.
a862f68a
MR
2720 *
2721 * Return: pointer to the allocated memory or %NULL on error
83342314
NP
2722 */
2723void *vmalloc_32_user(unsigned long size)
2724{
bc84c535
RP
2725 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2726 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
2727 VM_USERMAP, NUMA_NO_NODE,
2728 __builtin_return_address(0));
83342314
NP
2729}
2730EXPORT_SYMBOL(vmalloc_32_user);
2731
d0107eb0
KH
2732/*
2733 * small helper routine , copy contents to buf from addr.
2734 * If the page is not present, fill zero.
2735 */
2736
2737static int aligned_vread(char *buf, char *addr, unsigned long count)
2738{
2739 struct page *p;
2740 int copied = 0;
2741
2742 while (count) {
2743 unsigned long offset, length;
2744
891c49ab 2745 offset = offset_in_page(addr);
d0107eb0
KH
2746 length = PAGE_SIZE - offset;
2747 if (length > count)
2748 length = count;
2749 p = vmalloc_to_page(addr);
2750 /*
2751 * To do safe access to this _mapped_ area, we need
2752 * lock. But adding lock here means that we need to add
2753 * overhead of vmalloc()/vfree() calles for this _debug_
2754 * interface, rarely used. Instead of that, we'll use
2755 * kmap() and get small overhead in this access function.
2756 */
2757 if (p) {
2758 /*
2759 * we can expect USER0 is not used (see vread/vwrite's
2760 * function description)
2761 */
9b04c5fe 2762 void *map = kmap_atomic(p);
d0107eb0 2763 memcpy(buf, map + offset, length);
9b04c5fe 2764 kunmap_atomic(map);
d0107eb0
KH
2765 } else
2766 memset(buf, 0, length);
2767
2768 addr += length;
2769 buf += length;
2770 copied += length;
2771 count -= length;
2772 }
2773 return copied;
2774}
2775
2776static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2777{
2778 struct page *p;
2779 int copied = 0;
2780
2781 while (count) {
2782 unsigned long offset, length;
2783
891c49ab 2784 offset = offset_in_page(addr);
d0107eb0
KH
2785 length = PAGE_SIZE - offset;
2786 if (length > count)
2787 length = count;
2788 p = vmalloc_to_page(addr);
2789 /*
2790 * To do safe access to this _mapped_ area, we need
2791 * lock. But adding lock here means that we need to add
2792 * overhead of vmalloc()/vfree() calles for this _debug_
2793 * interface, rarely used. Instead of that, we'll use
2794 * kmap() and get small overhead in this access function.
2795 */
2796 if (p) {
2797 /*
2798 * we can expect USER0 is not used (see vread/vwrite's
2799 * function description)
2800 */
9b04c5fe 2801 void *map = kmap_atomic(p);
d0107eb0 2802 memcpy(map + offset, buf, length);
9b04c5fe 2803 kunmap_atomic(map);
d0107eb0
KH
2804 }
2805 addr += length;
2806 buf += length;
2807 copied += length;
2808 count -= length;
2809 }
2810 return copied;
2811}
2812
2813/**
92eac168
MR
2814 * vread() - read vmalloc area in a safe way.
2815 * @buf: buffer for reading data
2816 * @addr: vm address.
2817 * @count: number of bytes to be read.
2818 *
92eac168
MR
2819 * This function checks that addr is a valid vmalloc'ed area, and
2820 * copy data from that area to a given buffer. If the given memory range
2821 * of [addr...addr+count) includes some valid address, data is copied to
2822 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2823 * IOREMAP area is treated as memory hole and no copy is done.
2824 *
2825 * If [addr...addr+count) doesn't includes any intersects with alive
2826 * vm_struct area, returns 0. @buf should be kernel's buffer.
2827 *
2828 * Note: In usual ops, vread() is never necessary because the caller
2829 * should know vmalloc() area is valid and can use memcpy().
2830 * This is for routines which have to access vmalloc area without
d9009d67 2831 * any information, as /dev/kmem.
a862f68a
MR
2832 *
2833 * Return: number of bytes for which addr and buf should be increased
2834 * (same number as @count) or %0 if [addr...addr+count) doesn't
2835 * include any intersection with valid vmalloc area
d0107eb0 2836 */
1da177e4
LT
2837long vread(char *buf, char *addr, unsigned long count)
2838{
e81ce85f
JK
2839 struct vmap_area *va;
2840 struct vm_struct *vm;
1da177e4 2841 char *vaddr, *buf_start = buf;
d0107eb0 2842 unsigned long buflen = count;
1da177e4
LT
2843 unsigned long n;
2844
2845 /* Don't allow overflow */
2846 if ((unsigned long) addr + count < count)
2847 count = -(unsigned long) addr;
2848
e81ce85f
JK
2849 spin_lock(&vmap_area_lock);
2850 list_for_each_entry(va, &vmap_area_list, list) {
2851 if (!count)
2852 break;
2853
2854 if (!(va->flags & VM_VM_AREA))
2855 continue;
2856
2857 vm = va->vm;
2858 vaddr = (char *) vm->addr;
762216ab 2859 if (addr >= vaddr + get_vm_area_size(vm))
1da177e4
LT
2860 continue;
2861 while (addr < vaddr) {
2862 if (count == 0)
2863 goto finished;
2864 *buf = '\0';
2865 buf++;
2866 addr++;
2867 count--;
2868 }
762216ab 2869 n = vaddr + get_vm_area_size(vm) - addr;
d0107eb0
KH
2870 if (n > count)
2871 n = count;
e81ce85f 2872 if (!(vm->flags & VM_IOREMAP))
d0107eb0
KH
2873 aligned_vread(buf, addr, n);
2874 else /* IOREMAP area is treated as memory hole */
2875 memset(buf, 0, n);
2876 buf += n;
2877 addr += n;
2878 count -= n;
1da177e4
LT
2879 }
2880finished:
e81ce85f 2881 spin_unlock(&vmap_area_lock);
d0107eb0
KH
2882
2883 if (buf == buf_start)
2884 return 0;
2885 /* zero-fill memory holes */
2886 if (buf != buf_start + buflen)
2887 memset(buf, 0, buflen - (buf - buf_start));
2888
2889 return buflen;
1da177e4
LT
2890}
2891
d0107eb0 2892/**
92eac168
MR
2893 * vwrite() - write vmalloc area in a safe way.
2894 * @buf: buffer for source data
2895 * @addr: vm address.
2896 * @count: number of bytes to be read.
2897 *
92eac168
MR
2898 * This function checks that addr is a valid vmalloc'ed area, and
2899 * copy data from a buffer to the given addr. If specified range of
2900 * [addr...addr+count) includes some valid address, data is copied from
2901 * proper area of @buf. If there are memory holes, no copy to hole.
2902 * IOREMAP area is treated as memory hole and no copy is done.
2903 *
2904 * If [addr...addr+count) doesn't includes any intersects with alive
2905 * vm_struct area, returns 0. @buf should be kernel's buffer.
2906 *
2907 * Note: In usual ops, vwrite() is never necessary because the caller
2908 * should know vmalloc() area is valid and can use memcpy().
2909 * This is for routines which have to access vmalloc area without
d9009d67 2910 * any information, as /dev/kmem.
a862f68a
MR
2911 *
2912 * Return: number of bytes for which addr and buf should be
2913 * increased (same number as @count) or %0 if [addr...addr+count)
2914 * doesn't include any intersection with valid vmalloc area
d0107eb0 2915 */
1da177e4
LT
2916long vwrite(char *buf, char *addr, unsigned long count)
2917{
e81ce85f
JK
2918 struct vmap_area *va;
2919 struct vm_struct *vm;
d0107eb0
KH
2920 char *vaddr;
2921 unsigned long n, buflen;
2922 int copied = 0;
1da177e4
LT
2923
2924 /* Don't allow overflow */
2925 if ((unsigned long) addr + count < count)
2926 count = -(unsigned long) addr;
d0107eb0 2927 buflen = count;
1da177e4 2928
e81ce85f
JK
2929 spin_lock(&vmap_area_lock);
2930 list_for_each_entry(va, &vmap_area_list, list) {
2931 if (!count)
2932 break;
2933
2934 if (!(va->flags & VM_VM_AREA))
2935 continue;
2936
2937 vm = va->vm;
2938 vaddr = (char *) vm->addr;
762216ab 2939 if (addr >= vaddr + get_vm_area_size(vm))
1da177e4
LT
2940 continue;
2941 while (addr < vaddr) {
2942 if (count == 0)
2943 goto finished;
2944 buf++;
2945 addr++;
2946 count--;
2947 }
762216ab 2948 n = vaddr + get_vm_area_size(vm) - addr;
d0107eb0
KH
2949 if (n > count)
2950 n = count;
e81ce85f 2951 if (!(vm->flags & VM_IOREMAP)) {
d0107eb0
KH
2952 aligned_vwrite(buf, addr, n);
2953 copied++;
2954 }
2955 buf += n;
2956 addr += n;
2957 count -= n;
1da177e4
LT
2958 }
2959finished:
e81ce85f 2960 spin_unlock(&vmap_area_lock);
d0107eb0
KH
2961 if (!copied)
2962 return 0;
2963 return buflen;
1da177e4 2964}
83342314
NP
2965
2966/**
92eac168
MR
2967 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2968 * @vma: vma to cover
2969 * @uaddr: target user address to start at
2970 * @kaddr: virtual address of vmalloc kernel memory
2971 * @size: size of map area
7682486b 2972 *
92eac168 2973 * Returns: 0 for success, -Exxx on failure
83342314 2974 *
92eac168
MR
2975 * This function checks that @kaddr is a valid vmalloc'ed area,
2976 * and that it is big enough to cover the range starting at
2977 * @uaddr in @vma. Will return failure if that criteria isn't
2978 * met.
83342314 2979 *
92eac168 2980 * Similar to remap_pfn_range() (see mm/memory.c)
83342314 2981 */
e69e9d4a
HD
2982int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2983 void *kaddr, unsigned long size)
83342314
NP
2984{
2985 struct vm_struct *area;
83342314 2986
e69e9d4a
HD
2987 size = PAGE_ALIGN(size);
2988
2989 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
83342314
NP
2990 return -EINVAL;
2991
e69e9d4a 2992 area = find_vm_area(kaddr);
83342314 2993 if (!area)
db64fe02 2994 return -EINVAL;
83342314
NP
2995
2996 if (!(area->flags & VM_USERMAP))
db64fe02 2997 return -EINVAL;
83342314 2998
401592d2 2999 if (kaddr + size > area->addr + get_vm_area_size(area))
db64fe02 3000 return -EINVAL;
83342314 3001
83342314 3002 do {
e69e9d4a 3003 struct page *page = vmalloc_to_page(kaddr);
db64fe02
NP
3004 int ret;
3005
83342314
NP
3006 ret = vm_insert_page(vma, uaddr, page);
3007 if (ret)
3008 return ret;
3009
3010 uaddr += PAGE_SIZE;
e69e9d4a
HD
3011 kaddr += PAGE_SIZE;
3012 size -= PAGE_SIZE;
3013 } while (size > 0);
83342314 3014
314e51b9 3015 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
83342314 3016
db64fe02 3017 return 0;
83342314 3018}
e69e9d4a
HD
3019EXPORT_SYMBOL(remap_vmalloc_range_partial);
3020
3021/**
92eac168
MR
3022 * remap_vmalloc_range - map vmalloc pages to userspace
3023 * @vma: vma to cover (map full range of vma)
3024 * @addr: vmalloc memory
3025 * @pgoff: number of pages into addr before first page to map
e69e9d4a 3026 *
92eac168 3027 * Returns: 0 for success, -Exxx on failure
e69e9d4a 3028 *
92eac168
MR
3029 * This function checks that addr is a valid vmalloc'ed area, and
3030 * that it is big enough to cover the vma. Will return failure if
3031 * that criteria isn't met.
e69e9d4a 3032 *
92eac168 3033 * Similar to remap_pfn_range() (see mm/memory.c)
e69e9d4a
HD
3034 */
3035int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3036 unsigned long pgoff)
3037{
3038 return remap_vmalloc_range_partial(vma, vma->vm_start,
3039 addr + (pgoff << PAGE_SHIFT),
3040 vma->vm_end - vma->vm_start);
3041}
83342314
NP
3042EXPORT_SYMBOL(remap_vmalloc_range);
3043
1eeb66a1
CH
3044/*
3045 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
3046 * have one.
3f8fd02b
JR
3047 *
3048 * The purpose of this function is to make sure the vmalloc area
3049 * mappings are identical in all page-tables in the system.
1eeb66a1 3050 */
3b32123d 3051void __weak vmalloc_sync_all(void)
1eeb66a1
CH
3052{
3053}
5f4352fb
JF
3054
3055
8b1e0f81 3056static int f(pte_t *pte, unsigned long addr, void *data)
5f4352fb 3057{
cd12909c
DV
3058 pte_t ***p = data;
3059
3060 if (p) {
3061 *(*p) = pte;
3062 (*p)++;
3063 }
5f4352fb
JF
3064 return 0;
3065}
3066
3067/**
92eac168
MR
3068 * alloc_vm_area - allocate a range of kernel address space
3069 * @size: size of the area
3070 * @ptes: returns the PTEs for the address space
7682486b 3071 *
92eac168 3072 * Returns: NULL on failure, vm_struct on success
5f4352fb 3073 *
92eac168
MR
3074 * This function reserves a range of kernel address space, and
3075 * allocates pagetables to map that range. No actual mappings
3076 * are created.
cd12909c 3077 *
92eac168
MR
3078 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
3079 * allocated for the VM area are returned.
5f4352fb 3080 */
cd12909c 3081struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
5f4352fb
JF
3082{
3083 struct vm_struct *area;
3084
23016969
CL
3085 area = get_vm_area_caller(size, VM_IOREMAP,
3086 __builtin_return_address(0));
5f4352fb
JF
3087 if (area == NULL)
3088 return NULL;
3089
3090 /*
3091 * This ensures that page tables are constructed for this region
3092 * of kernel virtual address space and mapped into init_mm.
3093 */
3094 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
cd12909c 3095 size, f, ptes ? &ptes : NULL)) {
5f4352fb
JF
3096 free_vm_area(area);
3097 return NULL;
3098 }
3099
5f4352fb
JF
3100 return area;
3101}
3102EXPORT_SYMBOL_GPL(alloc_vm_area);
3103
3104void free_vm_area(struct vm_struct *area)
3105{
3106 struct vm_struct *ret;
3107 ret = remove_vm_area(area->addr);
3108 BUG_ON(ret != area);
3109 kfree(area);
3110}
3111EXPORT_SYMBOL_GPL(free_vm_area);
a10aa579 3112
4f8b02b4 3113#ifdef CONFIG_SMP
ca23e405
TH
3114static struct vmap_area *node_to_va(struct rb_node *n)
3115{
4583e773 3116 return rb_entry_safe(n, struct vmap_area, rb_node);
ca23e405
TH
3117}
3118
3119/**
68ad4a33
URS
3120 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3121 * @addr: target address
ca23e405 3122 *
68ad4a33
URS
3123 * Returns: vmap_area if it is found. If there is no such area
3124 * the first highest(reverse order) vmap_area is returned
3125 * i.e. va->va_start < addr && va->va_end < addr or NULL
3126 * if there are no any areas before @addr.
ca23e405 3127 */
68ad4a33
URS
3128static struct vmap_area *
3129pvm_find_va_enclose_addr(unsigned long addr)
ca23e405 3130{
68ad4a33
URS
3131 struct vmap_area *va, *tmp;
3132 struct rb_node *n;
3133
3134 n = free_vmap_area_root.rb_node;
3135 va = NULL;
ca23e405
TH
3136
3137 while (n) {
68ad4a33
URS
3138 tmp = rb_entry(n, struct vmap_area, rb_node);
3139 if (tmp->va_start <= addr) {
3140 va = tmp;
3141 if (tmp->va_end >= addr)
3142 break;
3143
ca23e405 3144 n = n->rb_right;
68ad4a33
URS
3145 } else {
3146 n = n->rb_left;
3147 }
ca23e405
TH
3148 }
3149
68ad4a33 3150 return va;
ca23e405
TH
3151}
3152
3153/**
68ad4a33
URS
3154 * pvm_determine_end_from_reverse - find the highest aligned address
3155 * of free block below VMALLOC_END
3156 * @va:
3157 * in - the VA we start the search(reverse order);
3158 * out - the VA with the highest aligned end address.
ca23e405 3159 *
68ad4a33 3160 * Returns: determined end address within vmap_area
ca23e405 3161 */
68ad4a33
URS
3162static unsigned long
3163pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
ca23e405 3164{
68ad4a33 3165 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
ca23e405
TH
3166 unsigned long addr;
3167
68ad4a33
URS
3168 if (likely(*va)) {
3169 list_for_each_entry_from_reverse((*va),
3170 &free_vmap_area_list, list) {
3171 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3172 if ((*va)->va_start < addr)
3173 return addr;
3174 }
ca23e405
TH
3175 }
3176
68ad4a33 3177 return 0;
ca23e405
TH
3178}
3179
3180/**
3181 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3182 * @offsets: array containing offset of each area
3183 * @sizes: array containing size of each area
3184 * @nr_vms: the number of areas to allocate
3185 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
ca23e405
TH
3186 *
3187 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3188 * vm_structs on success, %NULL on failure
3189 *
3190 * Percpu allocator wants to use congruent vm areas so that it can
3191 * maintain the offsets among percpu areas. This function allocates
ec3f64fc
DR
3192 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
3193 * be scattered pretty far, distance between two areas easily going up
3194 * to gigabytes. To avoid interacting with regular vmallocs, these
3195 * areas are allocated from top.
ca23e405 3196 *
68ad4a33
URS
3197 * Despite its complicated look, this allocator is rather simple. It
3198 * does everything top-down and scans free blocks from the end looking
3199 * for matching base. While scanning, if any of the areas do not fit the
3200 * base address is pulled down to fit the area. Scanning is repeated till
3201 * all the areas fit and then all necessary data structures are inserted
3202 * and the result is returned.
ca23e405
TH
3203 */
3204struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3205 const size_t *sizes, int nr_vms,
ec3f64fc 3206 size_t align)
ca23e405
TH
3207{
3208 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3209 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
68ad4a33 3210 struct vmap_area **vas, *va;
ca23e405
TH
3211 struct vm_struct **vms;
3212 int area, area2, last_area, term_area;
68ad4a33 3213 unsigned long base, start, size, end, last_end;
ca23e405 3214 bool purged = false;
68ad4a33 3215 enum fit_type type;
ca23e405 3216
ca23e405 3217 /* verify parameters and allocate data structures */
891c49ab 3218 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
ca23e405
TH
3219 for (last_area = 0, area = 0; area < nr_vms; area++) {
3220 start = offsets[area];
3221 end = start + sizes[area];
3222
3223 /* is everything aligned properly? */
3224 BUG_ON(!IS_ALIGNED(offsets[area], align));
3225 BUG_ON(!IS_ALIGNED(sizes[area], align));
3226
3227 /* detect the area with the highest address */
3228 if (start > offsets[last_area])
3229 last_area = area;
3230
c568da28 3231 for (area2 = area + 1; area2 < nr_vms; area2++) {
ca23e405
TH
3232 unsigned long start2 = offsets[area2];
3233 unsigned long end2 = start2 + sizes[area2];
3234
c568da28 3235 BUG_ON(start2 < end && start < end2);
ca23e405
TH
3236 }
3237 }
3238 last_end = offsets[last_area] + sizes[last_area];
3239
3240 if (vmalloc_end - vmalloc_start < last_end) {
3241 WARN_ON(true);
3242 return NULL;
3243 }
3244
4d67d860
TM
3245 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3246 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
ca23e405 3247 if (!vas || !vms)
f1db7afd 3248 goto err_free2;
ca23e405
TH
3249
3250 for (area = 0; area < nr_vms; area++) {
68ad4a33 3251 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
ec3f64fc 3252 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
ca23e405
TH
3253 if (!vas[area] || !vms[area])
3254 goto err_free;
3255 }
3256retry:
3257 spin_lock(&vmap_area_lock);
3258
3259 /* start scanning - we scan from the top, begin with the last area */
3260 area = term_area = last_area;
3261 start = offsets[area];
3262 end = start + sizes[area];
3263
68ad4a33
URS
3264 va = pvm_find_va_enclose_addr(vmalloc_end);
3265 base = pvm_determine_end_from_reverse(&va, align) - end;
ca23e405
TH
3266
3267 while (true) {
ca23e405
TH
3268 /*
3269 * base might have underflowed, add last_end before
3270 * comparing.
3271 */
68ad4a33
URS
3272 if (base + last_end < vmalloc_start + last_end)
3273 goto overflow;
ca23e405
TH
3274
3275 /*
68ad4a33 3276 * Fitting base has not been found.
ca23e405 3277 */
68ad4a33
URS
3278 if (va == NULL)
3279 goto overflow;
ca23e405
TH
3280
3281 /*
68ad4a33 3282 * If this VA does not fit, move base downwards and recheck.
ca23e405 3283 */
68ad4a33
URS
3284 if (base + start < va->va_start || base + end > va->va_end) {
3285 va = node_to_va(rb_prev(&va->rb_node));
3286 base = pvm_determine_end_from_reverse(&va, align) - end;
ca23e405
TH
3287 term_area = area;
3288 continue;
3289 }
3290
3291 /*
3292 * This area fits, move on to the previous one. If
3293 * the previous one is the terminal one, we're done.
3294 */
3295 area = (area + nr_vms - 1) % nr_vms;
3296 if (area == term_area)
3297 break;
68ad4a33 3298
ca23e405
TH
3299 start = offsets[area];
3300 end = start + sizes[area];
68ad4a33 3301 va = pvm_find_va_enclose_addr(base + end);
ca23e405 3302 }
68ad4a33 3303
ca23e405
TH
3304 /* we've found a fitting base, insert all va's */
3305 for (area = 0; area < nr_vms; area++) {
68ad4a33 3306 int ret;
ca23e405 3307
68ad4a33
URS
3308 start = base + offsets[area];
3309 size = sizes[area];
ca23e405 3310
68ad4a33
URS
3311 va = pvm_find_va_enclose_addr(start);
3312 if (WARN_ON_ONCE(va == NULL))
3313 /* It is a BUG(), but trigger recovery instead. */
3314 goto recovery;
3315
3316 type = classify_va_fit_type(va, start, size);
3317 if (WARN_ON_ONCE(type == NOTHING_FIT))
3318 /* It is a BUG(), but trigger recovery instead. */
3319 goto recovery;
3320
3321 ret = adjust_va_to_fit_type(va, start, size, type);
3322 if (unlikely(ret))
3323 goto recovery;
3324
3325 /* Allocated area. */
3326 va = vas[area];
3327 va->va_start = start;
3328 va->va_end = start + size;
3329
3330 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
3331 }
ca23e405
TH
3332
3333 spin_unlock(&vmap_area_lock);
3334
3335 /* insert all vm's */
3336 for (area = 0; area < nr_vms; area++)
3645cb4a
ZY
3337 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
3338 pcpu_get_vm_areas);
ca23e405
TH
3339
3340 kfree(vas);
3341 return vms;
3342
68ad4a33
URS
3343recovery:
3344 /* Remove previously inserted areas. */
3345 while (area--) {
3346 __free_vmap_area(vas[area]);
3347 vas[area] = NULL;
3348 }
3349
3350overflow:
3351 spin_unlock(&vmap_area_lock);
3352 if (!purged) {
3353 purge_vmap_area_lazy();
3354 purged = true;
3355
3356 /* Before "retry", check if we recover. */
3357 for (area = 0; area < nr_vms; area++) {
3358 if (vas[area])
3359 continue;
3360
3361 vas[area] = kmem_cache_zalloc(
3362 vmap_area_cachep, GFP_KERNEL);
3363 if (!vas[area])
3364 goto err_free;
3365 }
3366
3367 goto retry;
3368 }
3369
ca23e405
TH
3370err_free:
3371 for (area = 0; area < nr_vms; area++) {
68ad4a33
URS
3372 if (vas[area])
3373 kmem_cache_free(vmap_area_cachep, vas[area]);
3374
f1db7afd 3375 kfree(vms[area]);
ca23e405 3376 }
f1db7afd 3377err_free2:
ca23e405
TH
3378 kfree(vas);
3379 kfree(vms);
3380 return NULL;
3381}
3382
3383/**
3384 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3385 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3386 * @nr_vms: the number of allocated areas
3387 *
3388 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3389 */
3390void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3391{
3392 int i;
3393
3394 for (i = 0; i < nr_vms; i++)
3395 free_vm_area(vms[i]);
3396 kfree(vms);
3397}
4f8b02b4 3398#endif /* CONFIG_SMP */
a10aa579
CL
3399
3400#ifdef CONFIG_PROC_FS
3401static void *s_start(struct seq_file *m, loff_t *pos)
d4033afd 3402 __acquires(&vmap_area_lock)
a10aa579 3403{
d4033afd 3404 spin_lock(&vmap_area_lock);
3f500069 3405 return seq_list_start(&vmap_area_list, *pos);
a10aa579
CL
3406}
3407
3408static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3409{
3f500069 3410 return seq_list_next(p, &vmap_area_list, pos);
a10aa579
CL
3411}
3412
3413static void s_stop(struct seq_file *m, void *p)
d4033afd 3414 __releases(&vmap_area_lock)
a10aa579 3415{
d4033afd 3416 spin_unlock(&vmap_area_lock);
a10aa579
CL
3417}
3418
a47a126a
ED
3419static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3420{
e5adfffc 3421 if (IS_ENABLED(CONFIG_NUMA)) {
a47a126a
ED
3422 unsigned int nr, *counters = m->private;
3423
3424 if (!counters)
3425 return;
3426
af12346c
WL
3427 if (v->flags & VM_UNINITIALIZED)
3428 return;
7e5b528b
DV
3429 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3430 smp_rmb();
af12346c 3431
a47a126a
ED
3432 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3433
3434 for (nr = 0; nr < v->nr_pages; nr++)
3435 counters[page_to_nid(v->pages[nr])]++;
3436
3437 for_each_node_state(nr, N_HIGH_MEMORY)
3438 if (counters[nr])
3439 seq_printf(m, " N%u=%u", nr, counters[nr]);
3440 }
3441}
3442
a10aa579
CL
3443static int s_show(struct seq_file *m, void *p)
3444{
3f500069 3445 struct vmap_area *va;
d4033afd
JK
3446 struct vm_struct *v;
3447
3f500069 3448 va = list_entry(p, struct vmap_area, list);
3449
c2ce8c14
WL
3450 /*
3451 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
3452 * behalf of vmap area is being tear down or vm_map_ram allocation.
3453 */
78c72746
YX
3454 if (!(va->flags & VM_VM_AREA)) {
3455 seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
3456 (void *)va->va_start, (void *)va->va_end,
3457 va->va_end - va->va_start,
3458 va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
3459
d4033afd 3460 return 0;
78c72746 3461 }
d4033afd
JK
3462
3463 v = va->vm;
a10aa579 3464
45ec1690 3465 seq_printf(m, "0x%pK-0x%pK %7ld",
a10aa579
CL
3466 v->addr, v->addr + v->size, v->size);
3467
62c70bce
JP
3468 if (v->caller)
3469 seq_printf(m, " %pS", v->caller);
23016969 3470
a10aa579
CL
3471 if (v->nr_pages)
3472 seq_printf(m, " pages=%d", v->nr_pages);
3473
3474 if (v->phys_addr)
199eaa05 3475 seq_printf(m, " phys=%pa", &v->phys_addr);
a10aa579
CL
3476
3477 if (v->flags & VM_IOREMAP)
f4527c90 3478 seq_puts(m, " ioremap");
a10aa579
CL
3479
3480 if (v->flags & VM_ALLOC)
f4527c90 3481 seq_puts(m, " vmalloc");
a10aa579
CL
3482
3483 if (v->flags & VM_MAP)
f4527c90 3484 seq_puts(m, " vmap");
a10aa579
CL
3485
3486 if (v->flags & VM_USERMAP)
f4527c90 3487 seq_puts(m, " user");
a10aa579 3488
244d63ee 3489 if (is_vmalloc_addr(v->pages))
f4527c90 3490 seq_puts(m, " vpages");
a10aa579 3491
a47a126a 3492 show_numa_info(m, v);
a10aa579
CL
3493 seq_putc(m, '\n');
3494 return 0;
3495}
3496
5f6a6a9c 3497static const struct seq_operations vmalloc_op = {
a10aa579
CL
3498 .start = s_start,
3499 .next = s_next,
3500 .stop = s_stop,
3501 .show = s_show,
3502};
5f6a6a9c 3503
5f6a6a9c
AD
3504static int __init proc_vmalloc_init(void)
3505{
fddda2b7 3506 if (IS_ENABLED(CONFIG_NUMA))
0825a6f9 3507 proc_create_seq_private("vmallocinfo", 0400, NULL,
44414d82
CH
3508 &vmalloc_op,
3509 nr_node_ids * sizeof(unsigned int), NULL);
fddda2b7 3510 else
0825a6f9 3511 proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
5f6a6a9c
AD
3512 return 0;
3513}
3514module_init(proc_vmalloc_init);
db3808c1 3515
a10aa579 3516#endif