]> git.ipfire.org Git - thirdparty/linux.git/blob - mm/vmalloc.c
mm: remove vmalloc_user_node_flags
[thirdparty/linux.git] / mm / vmalloc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/vmalloc.c
4 *
5 * Copyright (C) 1993 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
8 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
9 * Numa awareness, Christoph Lameter, SGI, June 2005
10 */
11
12 #include <linux/vmalloc.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/highmem.h>
16 #include <linux/sched/signal.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/set_memory.h>
23 #include <linux/debugobjects.h>
24 #include <linux/kallsyms.h>
25 #include <linux/list.h>
26 #include <linux/notifier.h>
27 #include <linux/rbtree.h>
28 #include <linux/radix-tree.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/llist.h>
35 #include <linux/bitops.h>
36 #include <linux/rbtree_augmented.h>
37 #include <linux/overflow.h>
38
39 #include <linux/uaccess.h>
40 #include <asm/tlbflush.h>
41 #include <asm/shmparam.h>
42
43 #include "internal.h"
44
45 bool is_vmalloc_addr(const void *x)
46 {
47 unsigned long addr = (unsigned long)x;
48
49 return addr >= VMALLOC_START && addr < VMALLOC_END;
50 }
51 EXPORT_SYMBOL(is_vmalloc_addr);
52
53 struct vfree_deferred {
54 struct llist_head list;
55 struct work_struct wq;
56 };
57 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
58
59 static void __vunmap(const void *, int);
60
61 static void free_work(struct work_struct *w)
62 {
63 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
64 struct llist_node *t, *llnode;
65
66 llist_for_each_safe(llnode, t, llist_del_all(&p->list))
67 __vunmap((void *)llnode, 1);
68 }
69
70 /*** Page table manipulation functions ***/
71
72 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
73 {
74 pte_t *pte;
75
76 pte = pte_offset_kernel(pmd, addr);
77 do {
78 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
79 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80 } while (pte++, addr += PAGE_SIZE, addr != end);
81 }
82
83 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
84 {
85 pmd_t *pmd;
86 unsigned long next;
87
88 pmd = pmd_offset(pud, addr);
89 do {
90 next = pmd_addr_end(addr, end);
91 if (pmd_clear_huge(pmd))
92 continue;
93 if (pmd_none_or_clear_bad(pmd))
94 continue;
95 vunmap_pte_range(pmd, addr, next);
96 } while (pmd++, addr = next, addr != end);
97 }
98
99 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
100 {
101 pud_t *pud;
102 unsigned long next;
103
104 pud = pud_offset(p4d, addr);
105 do {
106 next = pud_addr_end(addr, end);
107 if (pud_clear_huge(pud))
108 continue;
109 if (pud_none_or_clear_bad(pud))
110 continue;
111 vunmap_pmd_range(pud, addr, next);
112 } while (pud++, addr = next, addr != end);
113 }
114
115 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
116 {
117 p4d_t *p4d;
118 unsigned long next;
119
120 p4d = p4d_offset(pgd, addr);
121 do {
122 next = p4d_addr_end(addr, end);
123 if (p4d_clear_huge(p4d))
124 continue;
125 if (p4d_none_or_clear_bad(p4d))
126 continue;
127 vunmap_pud_range(p4d, addr, next);
128 } while (p4d++, addr = next, addr != end);
129 }
130
131 /**
132 * unmap_kernel_range_noflush - unmap kernel VM area
133 * @addr: start of the VM area to unmap
134 * @size: size of the VM area to unmap
135 *
136 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
137 * should have been allocated using get_vm_area() and its friends.
138 *
139 * NOTE:
140 * This function does NOT do any cache flushing. The caller is responsible
141 * for calling flush_cache_vunmap() on to-be-mapped areas before calling this
142 * function and flush_tlb_kernel_range() after.
143 */
144 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
145 {
146 unsigned long end = addr + size;
147 unsigned long next;
148 pgd_t *pgd;
149
150 BUG_ON(addr >= end);
151 pgd = pgd_offset_k(addr);
152 do {
153 next = pgd_addr_end(addr, end);
154 if (pgd_none_or_clear_bad(pgd))
155 continue;
156 vunmap_p4d_range(pgd, addr, next);
157 } while (pgd++, addr = next, addr != end);
158 }
159
160 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
161 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
162 {
163 pte_t *pte;
164
165 /*
166 * nr is a running index into the array which helps higher level
167 * callers keep track of where we're up to.
168 */
169
170 pte = pte_alloc_kernel(pmd, addr);
171 if (!pte)
172 return -ENOMEM;
173 do {
174 struct page *page = pages[*nr];
175
176 if (WARN_ON(!pte_none(*pte)))
177 return -EBUSY;
178 if (WARN_ON(!page))
179 return -ENOMEM;
180 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
181 (*nr)++;
182 } while (pte++, addr += PAGE_SIZE, addr != end);
183 return 0;
184 }
185
186 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
187 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
188 {
189 pmd_t *pmd;
190 unsigned long next;
191
192 pmd = pmd_alloc(&init_mm, pud, addr);
193 if (!pmd)
194 return -ENOMEM;
195 do {
196 next = pmd_addr_end(addr, end);
197 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
198 return -ENOMEM;
199 } while (pmd++, addr = next, addr != end);
200 return 0;
201 }
202
203 static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
204 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
205 {
206 pud_t *pud;
207 unsigned long next;
208
209 pud = pud_alloc(&init_mm, p4d, addr);
210 if (!pud)
211 return -ENOMEM;
212 do {
213 next = pud_addr_end(addr, end);
214 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
215 return -ENOMEM;
216 } while (pud++, addr = next, addr != end);
217 return 0;
218 }
219
220 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
221 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
222 {
223 p4d_t *p4d;
224 unsigned long next;
225
226 p4d = p4d_alloc(&init_mm, pgd, addr);
227 if (!p4d)
228 return -ENOMEM;
229 do {
230 next = p4d_addr_end(addr, end);
231 if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
232 return -ENOMEM;
233 } while (p4d++, addr = next, addr != end);
234 return 0;
235 }
236
237 /**
238 * map_kernel_range_noflush - map kernel VM area with the specified pages
239 * @addr: start of the VM area to map
240 * @size: size of the VM area to map
241 * @prot: page protection flags to use
242 * @pages: pages to map
243 *
244 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should
245 * have been allocated using get_vm_area() and its friends.
246 *
247 * NOTE:
248 * This function does NOT do any cache flushing. The caller is responsible for
249 * calling flush_cache_vmap() on to-be-mapped areas before calling this
250 * function.
251 *
252 * RETURNS:
253 * 0 on success, -errno on failure.
254 */
255 int map_kernel_range_noflush(unsigned long addr, unsigned long size,
256 pgprot_t prot, struct page **pages)
257 {
258 unsigned long end = addr + size;
259 unsigned long next;
260 pgd_t *pgd;
261 int err = 0;
262 int nr = 0;
263
264 BUG_ON(addr >= end);
265 pgd = pgd_offset_k(addr);
266 do {
267 next = pgd_addr_end(addr, end);
268 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
269 if (err)
270 return err;
271 } while (pgd++, addr = next, addr != end);
272
273 return 0;
274 }
275
276 int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
277 struct page **pages)
278 {
279 int ret;
280
281 ret = map_kernel_range_noflush(start, size, prot, pages);
282 flush_cache_vmap(start, start + size);
283 return ret;
284 }
285
286 int is_vmalloc_or_module_addr(const void *x)
287 {
288 /*
289 * ARM, x86-64 and sparc64 put modules in a special place,
290 * and fall back on vmalloc() if that fails. Others
291 * just put it in the vmalloc space.
292 */
293 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
294 unsigned long addr = (unsigned long)x;
295 if (addr >= MODULES_VADDR && addr < MODULES_END)
296 return 1;
297 #endif
298 return is_vmalloc_addr(x);
299 }
300
301 /*
302 * Walk a vmap address to the struct page it maps.
303 */
304 struct page *vmalloc_to_page(const void *vmalloc_addr)
305 {
306 unsigned long addr = (unsigned long) vmalloc_addr;
307 struct page *page = NULL;
308 pgd_t *pgd = pgd_offset_k(addr);
309 p4d_t *p4d;
310 pud_t *pud;
311 pmd_t *pmd;
312 pte_t *ptep, pte;
313
314 /*
315 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
316 * architectures that do not vmalloc module space
317 */
318 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
319
320 if (pgd_none(*pgd))
321 return NULL;
322 p4d = p4d_offset(pgd, addr);
323 if (p4d_none(*p4d))
324 return NULL;
325 pud = pud_offset(p4d, addr);
326
327 /*
328 * Don't dereference bad PUD or PMD (below) entries. This will also
329 * identify huge mappings, which we may encounter on architectures
330 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
331 * identified as vmalloc addresses by is_vmalloc_addr(), but are
332 * not [unambiguously] associated with a struct page, so there is
333 * no correct value to return for them.
334 */
335 WARN_ON_ONCE(pud_bad(*pud));
336 if (pud_none(*pud) || pud_bad(*pud))
337 return NULL;
338 pmd = pmd_offset(pud, addr);
339 WARN_ON_ONCE(pmd_bad(*pmd));
340 if (pmd_none(*pmd) || pmd_bad(*pmd))
341 return NULL;
342
343 ptep = pte_offset_map(pmd, addr);
344 pte = *ptep;
345 if (pte_present(pte))
346 page = pte_page(pte);
347 pte_unmap(ptep);
348 return page;
349 }
350 EXPORT_SYMBOL(vmalloc_to_page);
351
352 /*
353 * Map a vmalloc()-space virtual address to the physical page frame number.
354 */
355 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
356 {
357 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
358 }
359 EXPORT_SYMBOL(vmalloc_to_pfn);
360
361
362 /*** Global kva allocator ***/
363
364 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
365 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
366
367
368 static DEFINE_SPINLOCK(vmap_area_lock);
369 static DEFINE_SPINLOCK(free_vmap_area_lock);
370 /* Export for kexec only */
371 LIST_HEAD(vmap_area_list);
372 static LLIST_HEAD(vmap_purge_list);
373 static struct rb_root vmap_area_root = RB_ROOT;
374 static bool vmap_initialized __read_mostly;
375
376 /*
377 * This kmem_cache is used for vmap_area objects. Instead of
378 * allocating from slab we reuse an object from this cache to
379 * make things faster. Especially in "no edge" splitting of
380 * free block.
381 */
382 static struct kmem_cache *vmap_area_cachep;
383
384 /*
385 * This linked list is used in pair with free_vmap_area_root.
386 * It gives O(1) access to prev/next to perform fast coalescing.
387 */
388 static LIST_HEAD(free_vmap_area_list);
389
390 /*
391 * This augment red-black tree represents the free vmap space.
392 * All vmap_area objects in this tree are sorted by va->va_start
393 * address. It is used for allocation and merging when a vmap
394 * object is released.
395 *
396 * Each vmap_area node contains a maximum available free block
397 * of its sub-tree, right or left. Therefore it is possible to
398 * find a lowest match of free area.
399 */
400 static struct rb_root free_vmap_area_root = RB_ROOT;
401
402 /*
403 * Preload a CPU with one object for "no edge" split case. The
404 * aim is to get rid of allocations from the atomic context, thus
405 * to use more permissive allocation masks.
406 */
407 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
408
409 static __always_inline unsigned long
410 va_size(struct vmap_area *va)
411 {
412 return (va->va_end - va->va_start);
413 }
414
415 static __always_inline unsigned long
416 get_subtree_max_size(struct rb_node *node)
417 {
418 struct vmap_area *va;
419
420 va = rb_entry_safe(node, struct vmap_area, rb_node);
421 return va ? va->subtree_max_size : 0;
422 }
423
424 /*
425 * Gets called when remove the node and rotate.
426 */
427 static __always_inline unsigned long
428 compute_subtree_max_size(struct vmap_area *va)
429 {
430 return max3(va_size(va),
431 get_subtree_max_size(va->rb_node.rb_left),
432 get_subtree_max_size(va->rb_node.rb_right));
433 }
434
435 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
436 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
437
438 static void purge_vmap_area_lazy(void);
439 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
440 static unsigned long lazy_max_pages(void);
441
442 static atomic_long_t nr_vmalloc_pages;
443
444 unsigned long vmalloc_nr_pages(void)
445 {
446 return atomic_long_read(&nr_vmalloc_pages);
447 }
448
449 static struct vmap_area *__find_vmap_area(unsigned long addr)
450 {
451 struct rb_node *n = vmap_area_root.rb_node;
452
453 while (n) {
454 struct vmap_area *va;
455
456 va = rb_entry(n, struct vmap_area, rb_node);
457 if (addr < va->va_start)
458 n = n->rb_left;
459 else if (addr >= va->va_end)
460 n = n->rb_right;
461 else
462 return va;
463 }
464
465 return NULL;
466 }
467
468 /*
469 * This function returns back addresses of parent node
470 * and its left or right link for further processing.
471 */
472 static __always_inline struct rb_node **
473 find_va_links(struct vmap_area *va,
474 struct rb_root *root, struct rb_node *from,
475 struct rb_node **parent)
476 {
477 struct vmap_area *tmp_va;
478 struct rb_node **link;
479
480 if (root) {
481 link = &root->rb_node;
482 if (unlikely(!*link)) {
483 *parent = NULL;
484 return link;
485 }
486 } else {
487 link = &from;
488 }
489
490 /*
491 * Go to the bottom of the tree. When we hit the last point
492 * we end up with parent rb_node and correct direction, i name
493 * it link, where the new va->rb_node will be attached to.
494 */
495 do {
496 tmp_va = rb_entry(*link, struct vmap_area, rb_node);
497
498 /*
499 * During the traversal we also do some sanity check.
500 * Trigger the BUG() if there are sides(left/right)
501 * or full overlaps.
502 */
503 if (va->va_start < tmp_va->va_end &&
504 va->va_end <= tmp_va->va_start)
505 link = &(*link)->rb_left;
506 else if (va->va_end > tmp_va->va_start &&
507 va->va_start >= tmp_va->va_end)
508 link = &(*link)->rb_right;
509 else
510 BUG();
511 } while (*link);
512
513 *parent = &tmp_va->rb_node;
514 return link;
515 }
516
517 static __always_inline struct list_head *
518 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
519 {
520 struct list_head *list;
521
522 if (unlikely(!parent))
523 /*
524 * The red-black tree where we try to find VA neighbors
525 * before merging or inserting is empty, i.e. it means
526 * there is no free vmap space. Normally it does not
527 * happen but we handle this case anyway.
528 */
529 return NULL;
530
531 list = &rb_entry(parent, struct vmap_area, rb_node)->list;
532 return (&parent->rb_right == link ? list->next : list);
533 }
534
535 static __always_inline void
536 link_va(struct vmap_area *va, struct rb_root *root,
537 struct rb_node *parent, struct rb_node **link, struct list_head *head)
538 {
539 /*
540 * VA is still not in the list, but we can
541 * identify its future previous list_head node.
542 */
543 if (likely(parent)) {
544 head = &rb_entry(parent, struct vmap_area, rb_node)->list;
545 if (&parent->rb_right != link)
546 head = head->prev;
547 }
548
549 /* Insert to the rb-tree */
550 rb_link_node(&va->rb_node, parent, link);
551 if (root == &free_vmap_area_root) {
552 /*
553 * Some explanation here. Just perform simple insertion
554 * to the tree. We do not set va->subtree_max_size to
555 * its current size before calling rb_insert_augmented().
556 * It is because of we populate the tree from the bottom
557 * to parent levels when the node _is_ in the tree.
558 *
559 * Therefore we set subtree_max_size to zero after insertion,
560 * to let __augment_tree_propagate_from() puts everything to
561 * the correct order later on.
562 */
563 rb_insert_augmented(&va->rb_node,
564 root, &free_vmap_area_rb_augment_cb);
565 va->subtree_max_size = 0;
566 } else {
567 rb_insert_color(&va->rb_node, root);
568 }
569
570 /* Address-sort this list */
571 list_add(&va->list, head);
572 }
573
574 static __always_inline void
575 unlink_va(struct vmap_area *va, struct rb_root *root)
576 {
577 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
578 return;
579
580 if (root == &free_vmap_area_root)
581 rb_erase_augmented(&va->rb_node,
582 root, &free_vmap_area_rb_augment_cb);
583 else
584 rb_erase(&va->rb_node, root);
585
586 list_del(&va->list);
587 RB_CLEAR_NODE(&va->rb_node);
588 }
589
590 #if DEBUG_AUGMENT_PROPAGATE_CHECK
591 static void
592 augment_tree_propagate_check(struct rb_node *n)
593 {
594 struct vmap_area *va;
595 struct rb_node *node;
596 unsigned long size;
597 bool found = false;
598
599 if (n == NULL)
600 return;
601
602 va = rb_entry(n, struct vmap_area, rb_node);
603 size = va->subtree_max_size;
604 node = n;
605
606 while (node) {
607 va = rb_entry(node, struct vmap_area, rb_node);
608
609 if (get_subtree_max_size(node->rb_left) == size) {
610 node = node->rb_left;
611 } else {
612 if (va_size(va) == size) {
613 found = true;
614 break;
615 }
616
617 node = node->rb_right;
618 }
619 }
620
621 if (!found) {
622 va = rb_entry(n, struct vmap_area, rb_node);
623 pr_emerg("tree is corrupted: %lu, %lu\n",
624 va_size(va), va->subtree_max_size);
625 }
626
627 augment_tree_propagate_check(n->rb_left);
628 augment_tree_propagate_check(n->rb_right);
629 }
630 #endif
631
632 /*
633 * This function populates subtree_max_size from bottom to upper
634 * levels starting from VA point. The propagation must be done
635 * when VA size is modified by changing its va_start/va_end. Or
636 * in case of newly inserting of VA to the tree.
637 *
638 * It means that __augment_tree_propagate_from() must be called:
639 * - After VA has been inserted to the tree(free path);
640 * - After VA has been shrunk(allocation path);
641 * - After VA has been increased(merging path).
642 *
643 * Please note that, it does not mean that upper parent nodes
644 * and their subtree_max_size are recalculated all the time up
645 * to the root node.
646 *
647 * 4--8
648 * /\
649 * / \
650 * / \
651 * 2--2 8--8
652 *
653 * For example if we modify the node 4, shrinking it to 2, then
654 * no any modification is required. If we shrink the node 2 to 1
655 * its subtree_max_size is updated only, and set to 1. If we shrink
656 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
657 * node becomes 4--6.
658 */
659 static __always_inline void
660 augment_tree_propagate_from(struct vmap_area *va)
661 {
662 struct rb_node *node = &va->rb_node;
663 unsigned long new_va_sub_max_size;
664
665 while (node) {
666 va = rb_entry(node, struct vmap_area, rb_node);
667 new_va_sub_max_size = compute_subtree_max_size(va);
668
669 /*
670 * If the newly calculated maximum available size of the
671 * subtree is equal to the current one, then it means that
672 * the tree is propagated correctly. So we have to stop at
673 * this point to save cycles.
674 */
675 if (va->subtree_max_size == new_va_sub_max_size)
676 break;
677
678 va->subtree_max_size = new_va_sub_max_size;
679 node = rb_parent(&va->rb_node);
680 }
681
682 #if DEBUG_AUGMENT_PROPAGATE_CHECK
683 augment_tree_propagate_check(free_vmap_area_root.rb_node);
684 #endif
685 }
686
687 static void
688 insert_vmap_area(struct vmap_area *va,
689 struct rb_root *root, struct list_head *head)
690 {
691 struct rb_node **link;
692 struct rb_node *parent;
693
694 link = find_va_links(va, root, NULL, &parent);
695 link_va(va, root, parent, link, head);
696 }
697
698 static void
699 insert_vmap_area_augment(struct vmap_area *va,
700 struct rb_node *from, struct rb_root *root,
701 struct list_head *head)
702 {
703 struct rb_node **link;
704 struct rb_node *parent;
705
706 if (from)
707 link = find_va_links(va, NULL, from, &parent);
708 else
709 link = find_va_links(va, root, NULL, &parent);
710
711 link_va(va, root, parent, link, head);
712 augment_tree_propagate_from(va);
713 }
714
715 /*
716 * Merge de-allocated chunk of VA memory with previous
717 * and next free blocks. If coalesce is not done a new
718 * free area is inserted. If VA has been merged, it is
719 * freed.
720 */
721 static __always_inline struct vmap_area *
722 merge_or_add_vmap_area(struct vmap_area *va,
723 struct rb_root *root, struct list_head *head)
724 {
725 struct vmap_area *sibling;
726 struct list_head *next;
727 struct rb_node **link;
728 struct rb_node *parent;
729 bool merged = false;
730
731 /*
732 * Find a place in the tree where VA potentially will be
733 * inserted, unless it is merged with its sibling/siblings.
734 */
735 link = find_va_links(va, root, NULL, &parent);
736
737 /*
738 * Get next node of VA to check if merging can be done.
739 */
740 next = get_va_next_sibling(parent, link);
741 if (unlikely(next == NULL))
742 goto insert;
743
744 /*
745 * start end
746 * | |
747 * |<------VA------>|<-----Next----->|
748 * | |
749 * start end
750 */
751 if (next != head) {
752 sibling = list_entry(next, struct vmap_area, list);
753 if (sibling->va_start == va->va_end) {
754 sibling->va_start = va->va_start;
755
756 /* Check and update the tree if needed. */
757 augment_tree_propagate_from(sibling);
758
759 /* Free vmap_area object. */
760 kmem_cache_free(vmap_area_cachep, va);
761
762 /* Point to the new merged area. */
763 va = sibling;
764 merged = true;
765 }
766 }
767
768 /*
769 * start end
770 * | |
771 * |<-----Prev----->|<------VA------>|
772 * | |
773 * start end
774 */
775 if (next->prev != head) {
776 sibling = list_entry(next->prev, struct vmap_area, list);
777 if (sibling->va_end == va->va_start) {
778 sibling->va_end = va->va_end;
779
780 /* Check and update the tree if needed. */
781 augment_tree_propagate_from(sibling);
782
783 if (merged)
784 unlink_va(va, root);
785
786 /* Free vmap_area object. */
787 kmem_cache_free(vmap_area_cachep, va);
788
789 /* Point to the new merged area. */
790 va = sibling;
791 merged = true;
792 }
793 }
794
795 insert:
796 if (!merged) {
797 link_va(va, root, parent, link, head);
798 augment_tree_propagate_from(va);
799 }
800
801 return va;
802 }
803
804 static __always_inline bool
805 is_within_this_va(struct vmap_area *va, unsigned long size,
806 unsigned long align, unsigned long vstart)
807 {
808 unsigned long nva_start_addr;
809
810 if (va->va_start > vstart)
811 nva_start_addr = ALIGN(va->va_start, align);
812 else
813 nva_start_addr = ALIGN(vstart, align);
814
815 /* Can be overflowed due to big size or alignment. */
816 if (nva_start_addr + size < nva_start_addr ||
817 nva_start_addr < vstart)
818 return false;
819
820 return (nva_start_addr + size <= va->va_end);
821 }
822
823 /*
824 * Find the first free block(lowest start address) in the tree,
825 * that will accomplish the request corresponding to passing
826 * parameters.
827 */
828 static __always_inline struct vmap_area *
829 find_vmap_lowest_match(unsigned long size,
830 unsigned long align, unsigned long vstart)
831 {
832 struct vmap_area *va;
833 struct rb_node *node;
834 unsigned long length;
835
836 /* Start from the root. */
837 node = free_vmap_area_root.rb_node;
838
839 /* Adjust the search size for alignment overhead. */
840 length = size + align - 1;
841
842 while (node) {
843 va = rb_entry(node, struct vmap_area, rb_node);
844
845 if (get_subtree_max_size(node->rb_left) >= length &&
846 vstart < va->va_start) {
847 node = node->rb_left;
848 } else {
849 if (is_within_this_va(va, size, align, vstart))
850 return va;
851
852 /*
853 * Does not make sense to go deeper towards the right
854 * sub-tree if it does not have a free block that is
855 * equal or bigger to the requested search length.
856 */
857 if (get_subtree_max_size(node->rb_right) >= length) {
858 node = node->rb_right;
859 continue;
860 }
861
862 /*
863 * OK. We roll back and find the first right sub-tree,
864 * that will satisfy the search criteria. It can happen
865 * only once due to "vstart" restriction.
866 */
867 while ((node = rb_parent(node))) {
868 va = rb_entry(node, struct vmap_area, rb_node);
869 if (is_within_this_va(va, size, align, vstart))
870 return va;
871
872 if (get_subtree_max_size(node->rb_right) >= length &&
873 vstart <= va->va_start) {
874 node = node->rb_right;
875 break;
876 }
877 }
878 }
879 }
880
881 return NULL;
882 }
883
884 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
885 #include <linux/random.h>
886
887 static struct vmap_area *
888 find_vmap_lowest_linear_match(unsigned long size,
889 unsigned long align, unsigned long vstart)
890 {
891 struct vmap_area *va;
892
893 list_for_each_entry(va, &free_vmap_area_list, list) {
894 if (!is_within_this_va(va, size, align, vstart))
895 continue;
896
897 return va;
898 }
899
900 return NULL;
901 }
902
903 static void
904 find_vmap_lowest_match_check(unsigned long size)
905 {
906 struct vmap_area *va_1, *va_2;
907 unsigned long vstart;
908 unsigned int rnd;
909
910 get_random_bytes(&rnd, sizeof(rnd));
911 vstart = VMALLOC_START + rnd;
912
913 va_1 = find_vmap_lowest_match(size, 1, vstart);
914 va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
915
916 if (va_1 != va_2)
917 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
918 va_1, va_2, vstart);
919 }
920 #endif
921
922 enum fit_type {
923 NOTHING_FIT = 0,
924 FL_FIT_TYPE = 1, /* full fit */
925 LE_FIT_TYPE = 2, /* left edge fit */
926 RE_FIT_TYPE = 3, /* right edge fit */
927 NE_FIT_TYPE = 4 /* no edge fit */
928 };
929
930 static __always_inline enum fit_type
931 classify_va_fit_type(struct vmap_area *va,
932 unsigned long nva_start_addr, unsigned long size)
933 {
934 enum fit_type type;
935
936 /* Check if it is within VA. */
937 if (nva_start_addr < va->va_start ||
938 nva_start_addr + size > va->va_end)
939 return NOTHING_FIT;
940
941 /* Now classify. */
942 if (va->va_start == nva_start_addr) {
943 if (va->va_end == nva_start_addr + size)
944 type = FL_FIT_TYPE;
945 else
946 type = LE_FIT_TYPE;
947 } else if (va->va_end == nva_start_addr + size) {
948 type = RE_FIT_TYPE;
949 } else {
950 type = NE_FIT_TYPE;
951 }
952
953 return type;
954 }
955
956 static __always_inline int
957 adjust_va_to_fit_type(struct vmap_area *va,
958 unsigned long nva_start_addr, unsigned long size,
959 enum fit_type type)
960 {
961 struct vmap_area *lva = NULL;
962
963 if (type == FL_FIT_TYPE) {
964 /*
965 * No need to split VA, it fully fits.
966 *
967 * | |
968 * V NVA V
969 * |---------------|
970 */
971 unlink_va(va, &free_vmap_area_root);
972 kmem_cache_free(vmap_area_cachep, va);
973 } else if (type == LE_FIT_TYPE) {
974 /*
975 * Split left edge of fit VA.
976 *
977 * | |
978 * V NVA V R
979 * |-------|-------|
980 */
981 va->va_start += size;
982 } else if (type == RE_FIT_TYPE) {
983 /*
984 * Split right edge of fit VA.
985 *
986 * | |
987 * L V NVA V
988 * |-------|-------|
989 */
990 va->va_end = nva_start_addr;
991 } else if (type == NE_FIT_TYPE) {
992 /*
993 * Split no edge of fit VA.
994 *
995 * | |
996 * L V NVA V R
997 * |---|-------|---|
998 */
999 lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1000 if (unlikely(!lva)) {
1001 /*
1002 * For percpu allocator we do not do any pre-allocation
1003 * and leave it as it is. The reason is it most likely
1004 * never ends up with NE_FIT_TYPE splitting. In case of
1005 * percpu allocations offsets and sizes are aligned to
1006 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1007 * are its main fitting cases.
1008 *
1009 * There are a few exceptions though, as an example it is
1010 * a first allocation (early boot up) when we have "one"
1011 * big free space that has to be split.
1012 *
1013 * Also we can hit this path in case of regular "vmap"
1014 * allocations, if "this" current CPU was not preloaded.
1015 * See the comment in alloc_vmap_area() why. If so, then
1016 * GFP_NOWAIT is used instead to get an extra object for
1017 * split purpose. That is rare and most time does not
1018 * occur.
1019 *
1020 * What happens if an allocation gets failed. Basically,
1021 * an "overflow" path is triggered to purge lazily freed
1022 * areas to free some memory, then, the "retry" path is
1023 * triggered to repeat one more time. See more details
1024 * in alloc_vmap_area() function.
1025 */
1026 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1027 if (!lva)
1028 return -1;
1029 }
1030
1031 /*
1032 * Build the remainder.
1033 */
1034 lva->va_start = va->va_start;
1035 lva->va_end = nva_start_addr;
1036
1037 /*
1038 * Shrink this VA to remaining size.
1039 */
1040 va->va_start = nva_start_addr + size;
1041 } else {
1042 return -1;
1043 }
1044
1045 if (type != FL_FIT_TYPE) {
1046 augment_tree_propagate_from(va);
1047
1048 if (lva) /* type == NE_FIT_TYPE */
1049 insert_vmap_area_augment(lva, &va->rb_node,
1050 &free_vmap_area_root, &free_vmap_area_list);
1051 }
1052
1053 return 0;
1054 }
1055
1056 /*
1057 * Returns a start address of the newly allocated area, if success.
1058 * Otherwise a vend is returned that indicates failure.
1059 */
1060 static __always_inline unsigned long
1061 __alloc_vmap_area(unsigned long size, unsigned long align,
1062 unsigned long vstart, unsigned long vend)
1063 {
1064 unsigned long nva_start_addr;
1065 struct vmap_area *va;
1066 enum fit_type type;
1067 int ret;
1068
1069 va = find_vmap_lowest_match(size, align, vstart);
1070 if (unlikely(!va))
1071 return vend;
1072
1073 if (va->va_start > vstart)
1074 nva_start_addr = ALIGN(va->va_start, align);
1075 else
1076 nva_start_addr = ALIGN(vstart, align);
1077
1078 /* Check the "vend" restriction. */
1079 if (nva_start_addr + size > vend)
1080 return vend;
1081
1082 /* Classify what we have found. */
1083 type = classify_va_fit_type(va, nva_start_addr, size);
1084 if (WARN_ON_ONCE(type == NOTHING_FIT))
1085 return vend;
1086
1087 /* Update the free vmap_area. */
1088 ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1089 if (ret)
1090 return vend;
1091
1092 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1093 find_vmap_lowest_match_check(size);
1094 #endif
1095
1096 return nva_start_addr;
1097 }
1098
1099 /*
1100 * Free a region of KVA allocated by alloc_vmap_area
1101 */
1102 static void free_vmap_area(struct vmap_area *va)
1103 {
1104 /*
1105 * Remove from the busy tree/list.
1106 */
1107 spin_lock(&vmap_area_lock);
1108 unlink_va(va, &vmap_area_root);
1109 spin_unlock(&vmap_area_lock);
1110
1111 /*
1112 * Insert/Merge it back to the free tree/list.
1113 */
1114 spin_lock(&free_vmap_area_lock);
1115 merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
1116 spin_unlock(&free_vmap_area_lock);
1117 }
1118
1119 /*
1120 * Allocate a region of KVA of the specified size and alignment, within the
1121 * vstart and vend.
1122 */
1123 static struct vmap_area *alloc_vmap_area(unsigned long size,
1124 unsigned long align,
1125 unsigned long vstart, unsigned long vend,
1126 int node, gfp_t gfp_mask)
1127 {
1128 struct vmap_area *va, *pva;
1129 unsigned long addr;
1130 int purged = 0;
1131 int ret;
1132
1133 BUG_ON(!size);
1134 BUG_ON(offset_in_page(size));
1135 BUG_ON(!is_power_of_2(align));
1136
1137 if (unlikely(!vmap_initialized))
1138 return ERR_PTR(-EBUSY);
1139
1140 might_sleep();
1141 gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1142
1143 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1144 if (unlikely(!va))
1145 return ERR_PTR(-ENOMEM);
1146
1147 /*
1148 * Only scan the relevant parts containing pointers to other objects
1149 * to avoid false negatives.
1150 */
1151 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1152
1153 retry:
1154 /*
1155 * Preload this CPU with one extra vmap_area object. It is used
1156 * when fit type of free area is NE_FIT_TYPE. Please note, it
1157 * does not guarantee that an allocation occurs on a CPU that
1158 * is preloaded, instead we minimize the case when it is not.
1159 * It can happen because of cpu migration, because there is a
1160 * race until the below spinlock is taken.
1161 *
1162 * The preload is done in non-atomic context, thus it allows us
1163 * to use more permissive allocation masks to be more stable under
1164 * low memory condition and high memory pressure. In rare case,
1165 * if not preloaded, GFP_NOWAIT is used.
1166 *
1167 * Set "pva" to NULL here, because of "retry" path.
1168 */
1169 pva = NULL;
1170
1171 if (!this_cpu_read(ne_fit_preload_node))
1172 /*
1173 * Even if it fails we do not really care about that.
1174 * Just proceed as it is. If needed "overflow" path
1175 * will refill the cache we allocate from.
1176 */
1177 pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1178
1179 spin_lock(&free_vmap_area_lock);
1180
1181 if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva))
1182 kmem_cache_free(vmap_area_cachep, pva);
1183
1184 /*
1185 * If an allocation fails, the "vend" address is
1186 * returned. Therefore trigger the overflow path.
1187 */
1188 addr = __alloc_vmap_area(size, align, vstart, vend);
1189 spin_unlock(&free_vmap_area_lock);
1190
1191 if (unlikely(addr == vend))
1192 goto overflow;
1193
1194 va->va_start = addr;
1195 va->va_end = addr + size;
1196 va->vm = NULL;
1197
1198
1199 spin_lock(&vmap_area_lock);
1200 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1201 spin_unlock(&vmap_area_lock);
1202
1203 BUG_ON(!IS_ALIGNED(va->va_start, align));
1204 BUG_ON(va->va_start < vstart);
1205 BUG_ON(va->va_end > vend);
1206
1207 ret = kasan_populate_vmalloc(addr, size);
1208 if (ret) {
1209 free_vmap_area(va);
1210 return ERR_PTR(ret);
1211 }
1212
1213 return va;
1214
1215 overflow:
1216 if (!purged) {
1217 purge_vmap_area_lazy();
1218 purged = 1;
1219 goto retry;
1220 }
1221
1222 if (gfpflags_allow_blocking(gfp_mask)) {
1223 unsigned long freed = 0;
1224 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1225 if (freed > 0) {
1226 purged = 0;
1227 goto retry;
1228 }
1229 }
1230
1231 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1232 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1233 size);
1234
1235 kmem_cache_free(vmap_area_cachep, va);
1236 return ERR_PTR(-EBUSY);
1237 }
1238
1239 int register_vmap_purge_notifier(struct notifier_block *nb)
1240 {
1241 return blocking_notifier_chain_register(&vmap_notify_list, nb);
1242 }
1243 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1244
1245 int unregister_vmap_purge_notifier(struct notifier_block *nb)
1246 {
1247 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1248 }
1249 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1250
1251 /*
1252 * lazy_max_pages is the maximum amount of virtual address space we gather up
1253 * before attempting to purge with a TLB flush.
1254 *
1255 * There is a tradeoff here: a larger number will cover more kernel page tables
1256 * and take slightly longer to purge, but it will linearly reduce the number of
1257 * global TLB flushes that must be performed. It would seem natural to scale
1258 * this number up linearly with the number of CPUs (because vmapping activity
1259 * could also scale linearly with the number of CPUs), however it is likely
1260 * that in practice, workloads might be constrained in other ways that mean
1261 * vmap activity will not scale linearly with CPUs. Also, I want to be
1262 * conservative and not introduce a big latency on huge systems, so go with
1263 * a less aggressive log scale. It will still be an improvement over the old
1264 * code, and it will be simple to change the scale factor if we find that it
1265 * becomes a problem on bigger systems.
1266 */
1267 static unsigned long lazy_max_pages(void)
1268 {
1269 unsigned int log;
1270
1271 log = fls(num_online_cpus());
1272
1273 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1274 }
1275
1276 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1277
1278 /*
1279 * Serialize vmap purging. There is no actual criticial section protected
1280 * by this look, but we want to avoid concurrent calls for performance
1281 * reasons and to make the pcpu_get_vm_areas more deterministic.
1282 */
1283 static DEFINE_MUTEX(vmap_purge_lock);
1284
1285 /* for per-CPU blocks */
1286 static void purge_fragmented_blocks_allcpus(void);
1287
1288 /*
1289 * called before a call to iounmap() if the caller wants vm_area_struct's
1290 * immediately freed.
1291 */
1292 void set_iounmap_nonlazy(void)
1293 {
1294 atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1295 }
1296
1297 /*
1298 * Purges all lazily-freed vmap areas.
1299 */
1300 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1301 {
1302 unsigned long resched_threshold;
1303 struct llist_node *valist;
1304 struct vmap_area *va;
1305 struct vmap_area *n_va;
1306
1307 lockdep_assert_held(&vmap_purge_lock);
1308
1309 valist = llist_del_all(&vmap_purge_list);
1310 if (unlikely(valist == NULL))
1311 return false;
1312
1313 /*
1314 * First make sure the mappings are removed from all page-tables
1315 * before they are freed.
1316 */
1317 vmalloc_sync_unmappings();
1318
1319 /*
1320 * TODO: to calculate a flush range without looping.
1321 * The list can be up to lazy_max_pages() elements.
1322 */
1323 llist_for_each_entry(va, valist, purge_list) {
1324 if (va->va_start < start)
1325 start = va->va_start;
1326 if (va->va_end > end)
1327 end = va->va_end;
1328 }
1329
1330 flush_tlb_kernel_range(start, end);
1331 resched_threshold = lazy_max_pages() << 1;
1332
1333 spin_lock(&free_vmap_area_lock);
1334 llist_for_each_entry_safe(va, n_va, valist, purge_list) {
1335 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1336 unsigned long orig_start = va->va_start;
1337 unsigned long orig_end = va->va_end;
1338
1339 /*
1340 * Finally insert or merge lazily-freed area. It is
1341 * detached and there is no need to "unlink" it from
1342 * anything.
1343 */
1344 va = merge_or_add_vmap_area(va, &free_vmap_area_root,
1345 &free_vmap_area_list);
1346
1347 if (is_vmalloc_or_module_addr((void *)orig_start))
1348 kasan_release_vmalloc(orig_start, orig_end,
1349 va->va_start, va->va_end);
1350
1351 atomic_long_sub(nr, &vmap_lazy_nr);
1352
1353 if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1354 cond_resched_lock(&free_vmap_area_lock);
1355 }
1356 spin_unlock(&free_vmap_area_lock);
1357 return true;
1358 }
1359
1360 /*
1361 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1362 * is already purging.
1363 */
1364 static void try_purge_vmap_area_lazy(void)
1365 {
1366 if (mutex_trylock(&vmap_purge_lock)) {
1367 __purge_vmap_area_lazy(ULONG_MAX, 0);
1368 mutex_unlock(&vmap_purge_lock);
1369 }
1370 }
1371
1372 /*
1373 * Kick off a purge of the outstanding lazy areas.
1374 */
1375 static void purge_vmap_area_lazy(void)
1376 {
1377 mutex_lock(&vmap_purge_lock);
1378 purge_fragmented_blocks_allcpus();
1379 __purge_vmap_area_lazy(ULONG_MAX, 0);
1380 mutex_unlock(&vmap_purge_lock);
1381 }
1382
1383 /*
1384 * Free a vmap area, caller ensuring that the area has been unmapped
1385 * and flush_cache_vunmap had been called for the correct range
1386 * previously.
1387 */
1388 static void free_vmap_area_noflush(struct vmap_area *va)
1389 {
1390 unsigned long nr_lazy;
1391
1392 spin_lock(&vmap_area_lock);
1393 unlink_va(va, &vmap_area_root);
1394 spin_unlock(&vmap_area_lock);
1395
1396 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1397 PAGE_SHIFT, &vmap_lazy_nr);
1398
1399 /* After this point, we may free va at any time */
1400 llist_add(&va->purge_list, &vmap_purge_list);
1401
1402 if (unlikely(nr_lazy > lazy_max_pages()))
1403 try_purge_vmap_area_lazy();
1404 }
1405
1406 /*
1407 * Free and unmap a vmap area
1408 */
1409 static void free_unmap_vmap_area(struct vmap_area *va)
1410 {
1411 flush_cache_vunmap(va->va_start, va->va_end);
1412 unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start);
1413 if (debug_pagealloc_enabled_static())
1414 flush_tlb_kernel_range(va->va_start, va->va_end);
1415
1416 free_vmap_area_noflush(va);
1417 }
1418
1419 static struct vmap_area *find_vmap_area(unsigned long addr)
1420 {
1421 struct vmap_area *va;
1422
1423 spin_lock(&vmap_area_lock);
1424 va = __find_vmap_area(addr);
1425 spin_unlock(&vmap_area_lock);
1426
1427 return va;
1428 }
1429
1430 /*** Per cpu kva allocator ***/
1431
1432 /*
1433 * vmap space is limited especially on 32 bit architectures. Ensure there is
1434 * room for at least 16 percpu vmap blocks per CPU.
1435 */
1436 /*
1437 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1438 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1439 * instead (we just need a rough idea)
1440 */
1441 #if BITS_PER_LONG == 32
1442 #define VMALLOC_SPACE (128UL*1024*1024)
1443 #else
1444 #define VMALLOC_SPACE (128UL*1024*1024*1024)
1445 #endif
1446
1447 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1448 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
1449 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
1450 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1451 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
1452 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
1453 #define VMAP_BBMAP_BITS \
1454 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1455 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1456 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1457
1458 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1459
1460 struct vmap_block_queue {
1461 spinlock_t lock;
1462 struct list_head free;
1463 };
1464
1465 struct vmap_block {
1466 spinlock_t lock;
1467 struct vmap_area *va;
1468 unsigned long free, dirty;
1469 unsigned long dirty_min, dirty_max; /*< dirty range */
1470 struct list_head free_list;
1471 struct rcu_head rcu_head;
1472 struct list_head purge;
1473 };
1474
1475 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1476 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1477
1478 /*
1479 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
1480 * in the free path. Could get rid of this if we change the API to return a
1481 * "cookie" from alloc, to be passed to free. But no big deal yet.
1482 */
1483 static DEFINE_SPINLOCK(vmap_block_tree_lock);
1484 static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
1485
1486 /*
1487 * We should probably have a fallback mechanism to allocate virtual memory
1488 * out of partially filled vmap blocks. However vmap block sizing should be
1489 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1490 * big problem.
1491 */
1492
1493 static unsigned long addr_to_vb_idx(unsigned long addr)
1494 {
1495 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1496 addr /= VMAP_BLOCK_SIZE;
1497 return addr;
1498 }
1499
1500 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1501 {
1502 unsigned long addr;
1503
1504 addr = va_start + (pages_off << PAGE_SHIFT);
1505 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1506 return (void *)addr;
1507 }
1508
1509 /**
1510 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1511 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
1512 * @order: how many 2^order pages should be occupied in newly allocated block
1513 * @gfp_mask: flags for the page level allocator
1514 *
1515 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1516 */
1517 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1518 {
1519 struct vmap_block_queue *vbq;
1520 struct vmap_block *vb;
1521 struct vmap_area *va;
1522 unsigned long vb_idx;
1523 int node, err;
1524 void *vaddr;
1525
1526 node = numa_node_id();
1527
1528 vb = kmalloc_node(sizeof(struct vmap_block),
1529 gfp_mask & GFP_RECLAIM_MASK, node);
1530 if (unlikely(!vb))
1531 return ERR_PTR(-ENOMEM);
1532
1533 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1534 VMALLOC_START, VMALLOC_END,
1535 node, gfp_mask);
1536 if (IS_ERR(va)) {
1537 kfree(vb);
1538 return ERR_CAST(va);
1539 }
1540
1541 err = radix_tree_preload(gfp_mask);
1542 if (unlikely(err)) {
1543 kfree(vb);
1544 free_vmap_area(va);
1545 return ERR_PTR(err);
1546 }
1547
1548 vaddr = vmap_block_vaddr(va->va_start, 0);
1549 spin_lock_init(&vb->lock);
1550 vb->va = va;
1551 /* At least something should be left free */
1552 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1553 vb->free = VMAP_BBMAP_BITS - (1UL << order);
1554 vb->dirty = 0;
1555 vb->dirty_min = VMAP_BBMAP_BITS;
1556 vb->dirty_max = 0;
1557 INIT_LIST_HEAD(&vb->free_list);
1558
1559 vb_idx = addr_to_vb_idx(va->va_start);
1560 spin_lock(&vmap_block_tree_lock);
1561 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
1562 spin_unlock(&vmap_block_tree_lock);
1563 BUG_ON(err);
1564 radix_tree_preload_end();
1565
1566 vbq = &get_cpu_var(vmap_block_queue);
1567 spin_lock(&vbq->lock);
1568 list_add_tail_rcu(&vb->free_list, &vbq->free);
1569 spin_unlock(&vbq->lock);
1570 put_cpu_var(vmap_block_queue);
1571
1572 return vaddr;
1573 }
1574
1575 static void free_vmap_block(struct vmap_block *vb)
1576 {
1577 struct vmap_block *tmp;
1578 unsigned long vb_idx;
1579
1580 vb_idx = addr_to_vb_idx(vb->va->va_start);
1581 spin_lock(&vmap_block_tree_lock);
1582 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
1583 spin_unlock(&vmap_block_tree_lock);
1584 BUG_ON(tmp != vb);
1585
1586 free_vmap_area_noflush(vb->va);
1587 kfree_rcu(vb, rcu_head);
1588 }
1589
1590 static void purge_fragmented_blocks(int cpu)
1591 {
1592 LIST_HEAD(purge);
1593 struct vmap_block *vb;
1594 struct vmap_block *n_vb;
1595 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1596
1597 rcu_read_lock();
1598 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1599
1600 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1601 continue;
1602
1603 spin_lock(&vb->lock);
1604 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1605 vb->free = 0; /* prevent further allocs after releasing lock */
1606 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1607 vb->dirty_min = 0;
1608 vb->dirty_max = VMAP_BBMAP_BITS;
1609 spin_lock(&vbq->lock);
1610 list_del_rcu(&vb->free_list);
1611 spin_unlock(&vbq->lock);
1612 spin_unlock(&vb->lock);
1613 list_add_tail(&vb->purge, &purge);
1614 } else
1615 spin_unlock(&vb->lock);
1616 }
1617 rcu_read_unlock();
1618
1619 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1620 list_del(&vb->purge);
1621 free_vmap_block(vb);
1622 }
1623 }
1624
1625 static void purge_fragmented_blocks_allcpus(void)
1626 {
1627 int cpu;
1628
1629 for_each_possible_cpu(cpu)
1630 purge_fragmented_blocks(cpu);
1631 }
1632
1633 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1634 {
1635 struct vmap_block_queue *vbq;
1636 struct vmap_block *vb;
1637 void *vaddr = NULL;
1638 unsigned int order;
1639
1640 BUG_ON(offset_in_page(size));
1641 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1642 if (WARN_ON(size == 0)) {
1643 /*
1644 * Allocating 0 bytes isn't what caller wants since
1645 * get_order(0) returns funny result. Just warn and terminate
1646 * early.
1647 */
1648 return NULL;
1649 }
1650 order = get_order(size);
1651
1652 rcu_read_lock();
1653 vbq = &get_cpu_var(vmap_block_queue);
1654 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1655 unsigned long pages_off;
1656
1657 spin_lock(&vb->lock);
1658 if (vb->free < (1UL << order)) {
1659 spin_unlock(&vb->lock);
1660 continue;
1661 }
1662
1663 pages_off = VMAP_BBMAP_BITS - vb->free;
1664 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1665 vb->free -= 1UL << order;
1666 if (vb->free == 0) {
1667 spin_lock(&vbq->lock);
1668 list_del_rcu(&vb->free_list);
1669 spin_unlock(&vbq->lock);
1670 }
1671
1672 spin_unlock(&vb->lock);
1673 break;
1674 }
1675
1676 put_cpu_var(vmap_block_queue);
1677 rcu_read_unlock();
1678
1679 /* Allocate new block if nothing was found */
1680 if (!vaddr)
1681 vaddr = new_vmap_block(order, gfp_mask);
1682
1683 return vaddr;
1684 }
1685
1686 static void vb_free(unsigned long addr, unsigned long size)
1687 {
1688 unsigned long offset;
1689 unsigned long vb_idx;
1690 unsigned int order;
1691 struct vmap_block *vb;
1692
1693 BUG_ON(offset_in_page(size));
1694 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1695
1696 flush_cache_vunmap(addr, addr + size);
1697
1698 order = get_order(size);
1699
1700 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
1701
1702 vb_idx = addr_to_vb_idx(addr);
1703 rcu_read_lock();
1704 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1705 rcu_read_unlock();
1706 BUG_ON(!vb);
1707
1708 unmap_kernel_range_noflush(addr, size);
1709
1710 if (debug_pagealloc_enabled_static())
1711 flush_tlb_kernel_range(addr, addr + size);
1712
1713 spin_lock(&vb->lock);
1714
1715 /* Expand dirty range */
1716 vb->dirty_min = min(vb->dirty_min, offset);
1717 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1718
1719 vb->dirty += 1UL << order;
1720 if (vb->dirty == VMAP_BBMAP_BITS) {
1721 BUG_ON(vb->free);
1722 spin_unlock(&vb->lock);
1723 free_vmap_block(vb);
1724 } else
1725 spin_unlock(&vb->lock);
1726 }
1727
1728 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
1729 {
1730 int cpu;
1731
1732 if (unlikely(!vmap_initialized))
1733 return;
1734
1735 might_sleep();
1736
1737 for_each_possible_cpu(cpu) {
1738 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1739 struct vmap_block *vb;
1740
1741 rcu_read_lock();
1742 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1743 spin_lock(&vb->lock);
1744 if (vb->dirty) {
1745 unsigned long va_start = vb->va->va_start;
1746 unsigned long s, e;
1747
1748 s = va_start + (vb->dirty_min << PAGE_SHIFT);
1749 e = va_start + (vb->dirty_max << PAGE_SHIFT);
1750
1751 start = min(s, start);
1752 end = max(e, end);
1753
1754 flush = 1;
1755 }
1756 spin_unlock(&vb->lock);
1757 }
1758 rcu_read_unlock();
1759 }
1760
1761 mutex_lock(&vmap_purge_lock);
1762 purge_fragmented_blocks_allcpus();
1763 if (!__purge_vmap_area_lazy(start, end) && flush)
1764 flush_tlb_kernel_range(start, end);
1765 mutex_unlock(&vmap_purge_lock);
1766 }
1767
1768 /**
1769 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1770 *
1771 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1772 * to amortize TLB flushing overheads. What this means is that any page you
1773 * have now, may, in a former life, have been mapped into kernel virtual
1774 * address by the vmap layer and so there might be some CPUs with TLB entries
1775 * still referencing that page (additional to the regular 1:1 kernel mapping).
1776 *
1777 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1778 * be sure that none of the pages we have control over will have any aliases
1779 * from the vmap layer.
1780 */
1781 void vm_unmap_aliases(void)
1782 {
1783 unsigned long start = ULONG_MAX, end = 0;
1784 int flush = 0;
1785
1786 _vm_unmap_aliases(start, end, flush);
1787 }
1788 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1789
1790 /**
1791 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1792 * @mem: the pointer returned by vm_map_ram
1793 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1794 */
1795 void vm_unmap_ram(const void *mem, unsigned int count)
1796 {
1797 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1798 unsigned long addr = (unsigned long)mem;
1799 struct vmap_area *va;
1800
1801 might_sleep();
1802 BUG_ON(!addr);
1803 BUG_ON(addr < VMALLOC_START);
1804 BUG_ON(addr > VMALLOC_END);
1805 BUG_ON(!PAGE_ALIGNED(addr));
1806
1807 kasan_poison_vmalloc(mem, size);
1808
1809 if (likely(count <= VMAP_MAX_ALLOC)) {
1810 debug_check_no_locks_freed(mem, size);
1811 vb_free(addr, size);
1812 return;
1813 }
1814
1815 va = find_vmap_area(addr);
1816 BUG_ON(!va);
1817 debug_check_no_locks_freed((void *)va->va_start,
1818 (va->va_end - va->va_start));
1819 free_unmap_vmap_area(va);
1820 }
1821 EXPORT_SYMBOL(vm_unmap_ram);
1822
1823 /**
1824 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1825 * @pages: an array of pointers to the pages to be mapped
1826 * @count: number of pages
1827 * @node: prefer to allocate data structures on this node
1828 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1829 *
1830 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1831 * faster than vmap so it's good. But if you mix long-life and short-life
1832 * objects with vm_map_ram(), it could consume lots of address space through
1833 * fragmentation (especially on a 32bit machine). You could see failures in
1834 * the end. Please use this function for short-lived objects.
1835 *
1836 * Returns: a pointer to the address that has been mapped, or %NULL on failure
1837 */
1838 void *vm_map_ram(struct page **pages, unsigned int count, int node)
1839 {
1840 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1841 unsigned long addr;
1842 void *mem;
1843
1844 if (likely(count <= VMAP_MAX_ALLOC)) {
1845 mem = vb_alloc(size, GFP_KERNEL);
1846 if (IS_ERR(mem))
1847 return NULL;
1848 addr = (unsigned long)mem;
1849 } else {
1850 struct vmap_area *va;
1851 va = alloc_vmap_area(size, PAGE_SIZE,
1852 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1853 if (IS_ERR(va))
1854 return NULL;
1855
1856 addr = va->va_start;
1857 mem = (void *)addr;
1858 }
1859
1860 kasan_unpoison_vmalloc(mem, size);
1861
1862 if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) {
1863 vm_unmap_ram(mem, count);
1864 return NULL;
1865 }
1866 return mem;
1867 }
1868 EXPORT_SYMBOL(vm_map_ram);
1869
1870 static struct vm_struct *vmlist __initdata;
1871
1872 /**
1873 * vm_area_add_early - add vmap area early during boot
1874 * @vm: vm_struct to add
1875 *
1876 * This function is used to add fixed kernel vm area to vmlist before
1877 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1878 * should contain proper values and the other fields should be zero.
1879 *
1880 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1881 */
1882 void __init vm_area_add_early(struct vm_struct *vm)
1883 {
1884 struct vm_struct *tmp, **p;
1885
1886 BUG_ON(vmap_initialized);
1887 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1888 if (tmp->addr >= vm->addr) {
1889 BUG_ON(tmp->addr < vm->addr + vm->size);
1890 break;
1891 } else
1892 BUG_ON(tmp->addr + tmp->size > vm->addr);
1893 }
1894 vm->next = *p;
1895 *p = vm;
1896 }
1897
1898 /**
1899 * vm_area_register_early - register vmap area early during boot
1900 * @vm: vm_struct to register
1901 * @align: requested alignment
1902 *
1903 * This function is used to register kernel vm area before
1904 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1905 * proper values on entry and other fields should be zero. On return,
1906 * vm->addr contains the allocated address.
1907 *
1908 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1909 */
1910 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1911 {
1912 static size_t vm_init_off __initdata;
1913 unsigned long addr;
1914
1915 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1916 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1917
1918 vm->addr = (void *)addr;
1919
1920 vm_area_add_early(vm);
1921 }
1922
1923 static void vmap_init_free_space(void)
1924 {
1925 unsigned long vmap_start = 1;
1926 const unsigned long vmap_end = ULONG_MAX;
1927 struct vmap_area *busy, *free;
1928
1929 /*
1930 * B F B B B F
1931 * -|-----|.....|-----|-----|-----|.....|-
1932 * | The KVA space |
1933 * |<--------------------------------->|
1934 */
1935 list_for_each_entry(busy, &vmap_area_list, list) {
1936 if (busy->va_start - vmap_start > 0) {
1937 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1938 if (!WARN_ON_ONCE(!free)) {
1939 free->va_start = vmap_start;
1940 free->va_end = busy->va_start;
1941
1942 insert_vmap_area_augment(free, NULL,
1943 &free_vmap_area_root,
1944 &free_vmap_area_list);
1945 }
1946 }
1947
1948 vmap_start = busy->va_end;
1949 }
1950
1951 if (vmap_end - vmap_start > 0) {
1952 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1953 if (!WARN_ON_ONCE(!free)) {
1954 free->va_start = vmap_start;
1955 free->va_end = vmap_end;
1956
1957 insert_vmap_area_augment(free, NULL,
1958 &free_vmap_area_root,
1959 &free_vmap_area_list);
1960 }
1961 }
1962 }
1963
1964 void __init vmalloc_init(void)
1965 {
1966 struct vmap_area *va;
1967 struct vm_struct *tmp;
1968 int i;
1969
1970 /*
1971 * Create the cache for vmap_area objects.
1972 */
1973 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
1974
1975 for_each_possible_cpu(i) {
1976 struct vmap_block_queue *vbq;
1977 struct vfree_deferred *p;
1978
1979 vbq = &per_cpu(vmap_block_queue, i);
1980 spin_lock_init(&vbq->lock);
1981 INIT_LIST_HEAD(&vbq->free);
1982 p = &per_cpu(vfree_deferred, i);
1983 init_llist_head(&p->list);
1984 INIT_WORK(&p->wq, free_work);
1985 }
1986
1987 /* Import existing vmlist entries. */
1988 for (tmp = vmlist; tmp; tmp = tmp->next) {
1989 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1990 if (WARN_ON_ONCE(!va))
1991 continue;
1992
1993 va->va_start = (unsigned long)tmp->addr;
1994 va->va_end = va->va_start + tmp->size;
1995 va->vm = tmp;
1996 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1997 }
1998
1999 /*
2000 * Now we can initialize a free vmap space.
2001 */
2002 vmap_init_free_space();
2003 vmap_initialized = true;
2004 }
2005
2006 /**
2007 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
2008 * @addr: start of the VM area to unmap
2009 * @size: size of the VM area to unmap
2010 *
2011 * Similar to unmap_kernel_range_noflush() but flushes vcache before
2012 * the unmapping and tlb after.
2013 */
2014 void unmap_kernel_range(unsigned long addr, unsigned long size)
2015 {
2016 unsigned long end = addr + size;
2017
2018 flush_cache_vunmap(addr, end);
2019 unmap_kernel_range_noflush(addr, size);
2020 flush_tlb_kernel_range(addr, end);
2021 }
2022
2023 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2024 struct vmap_area *va, unsigned long flags, const void *caller)
2025 {
2026 vm->flags = flags;
2027 vm->addr = (void *)va->va_start;
2028 vm->size = va->va_end - va->va_start;
2029 vm->caller = caller;
2030 va->vm = vm;
2031 }
2032
2033 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2034 unsigned long flags, const void *caller)
2035 {
2036 spin_lock(&vmap_area_lock);
2037 setup_vmalloc_vm_locked(vm, va, flags, caller);
2038 spin_unlock(&vmap_area_lock);
2039 }
2040
2041 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2042 {
2043 /*
2044 * Before removing VM_UNINITIALIZED,
2045 * we should make sure that vm has proper values.
2046 * Pair with smp_rmb() in show_numa_info().
2047 */
2048 smp_wmb();
2049 vm->flags &= ~VM_UNINITIALIZED;
2050 }
2051
2052 static struct vm_struct *__get_vm_area_node(unsigned long size,
2053 unsigned long align, unsigned long flags, unsigned long start,
2054 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
2055 {
2056 struct vmap_area *va;
2057 struct vm_struct *area;
2058 unsigned long requested_size = size;
2059
2060 BUG_ON(in_interrupt());
2061 size = PAGE_ALIGN(size);
2062 if (unlikely(!size))
2063 return NULL;
2064
2065 if (flags & VM_IOREMAP)
2066 align = 1ul << clamp_t(int, get_count_order_long(size),
2067 PAGE_SHIFT, IOREMAP_MAX_ORDER);
2068
2069 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2070 if (unlikely(!area))
2071 return NULL;
2072
2073 if (!(flags & VM_NO_GUARD))
2074 size += PAGE_SIZE;
2075
2076 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2077 if (IS_ERR(va)) {
2078 kfree(area);
2079 return NULL;
2080 }
2081
2082 kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
2083
2084 setup_vmalloc_vm(area, va, flags, caller);
2085
2086 return area;
2087 }
2088
2089 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2090 unsigned long start, unsigned long end,
2091 const void *caller)
2092 {
2093 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2094 GFP_KERNEL, caller);
2095 }
2096
2097 /**
2098 * get_vm_area - reserve a contiguous kernel virtual area
2099 * @size: size of the area
2100 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
2101 *
2102 * Search an area of @size in the kernel virtual mapping area,
2103 * and reserved it for out purposes. Returns the area descriptor
2104 * on success or %NULL on failure.
2105 *
2106 * Return: the area descriptor on success or %NULL on failure.
2107 */
2108 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2109 {
2110 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2111 NUMA_NO_NODE, GFP_KERNEL,
2112 __builtin_return_address(0));
2113 }
2114
2115 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2116 const void *caller)
2117 {
2118 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2119 NUMA_NO_NODE, GFP_KERNEL, caller);
2120 }
2121
2122 /**
2123 * find_vm_area - find a continuous kernel virtual area
2124 * @addr: base address
2125 *
2126 * Search for the kernel VM area starting at @addr, and return it.
2127 * It is up to the caller to do all required locking to keep the returned
2128 * pointer valid.
2129 *
2130 * Return: pointer to the found area or %NULL on faulure
2131 */
2132 struct vm_struct *find_vm_area(const void *addr)
2133 {
2134 struct vmap_area *va;
2135
2136 va = find_vmap_area((unsigned long)addr);
2137 if (!va)
2138 return NULL;
2139
2140 return va->vm;
2141 }
2142
2143 /**
2144 * remove_vm_area - find and remove a continuous kernel virtual area
2145 * @addr: base address
2146 *
2147 * Search for the kernel VM area starting at @addr, and remove it.
2148 * This function returns the found VM area, but using it is NOT safe
2149 * on SMP machines, except for its size or flags.
2150 *
2151 * Return: pointer to the found area or %NULL on faulure
2152 */
2153 struct vm_struct *remove_vm_area(const void *addr)
2154 {
2155 struct vmap_area *va;
2156
2157 might_sleep();
2158
2159 spin_lock(&vmap_area_lock);
2160 va = __find_vmap_area((unsigned long)addr);
2161 if (va && va->vm) {
2162 struct vm_struct *vm = va->vm;
2163
2164 va->vm = NULL;
2165 spin_unlock(&vmap_area_lock);
2166
2167 kasan_free_shadow(vm);
2168 free_unmap_vmap_area(va);
2169
2170 return vm;
2171 }
2172
2173 spin_unlock(&vmap_area_lock);
2174 return NULL;
2175 }
2176
2177 static inline void set_area_direct_map(const struct vm_struct *area,
2178 int (*set_direct_map)(struct page *page))
2179 {
2180 int i;
2181
2182 for (i = 0; i < area->nr_pages; i++)
2183 if (page_address(area->pages[i]))
2184 set_direct_map(area->pages[i]);
2185 }
2186
2187 /* Handle removing and resetting vm mappings related to the vm_struct. */
2188 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2189 {
2190 unsigned long start = ULONG_MAX, end = 0;
2191 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2192 int flush_dmap = 0;
2193 int i;
2194
2195 remove_vm_area(area->addr);
2196
2197 /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2198 if (!flush_reset)
2199 return;
2200
2201 /*
2202 * If not deallocating pages, just do the flush of the VM area and
2203 * return.
2204 */
2205 if (!deallocate_pages) {
2206 vm_unmap_aliases();
2207 return;
2208 }
2209
2210 /*
2211 * If execution gets here, flush the vm mapping and reset the direct
2212 * map. Find the start and end range of the direct mappings to make sure
2213 * the vm_unmap_aliases() flush includes the direct map.
2214 */
2215 for (i = 0; i < area->nr_pages; i++) {
2216 unsigned long addr = (unsigned long)page_address(area->pages[i]);
2217 if (addr) {
2218 start = min(addr, start);
2219 end = max(addr + PAGE_SIZE, end);
2220 flush_dmap = 1;
2221 }
2222 }
2223
2224 /*
2225 * Set direct map to something invalid so that it won't be cached if
2226 * there are any accesses after the TLB flush, then flush the TLB and
2227 * reset the direct map permissions to the default.
2228 */
2229 set_area_direct_map(area, set_direct_map_invalid_noflush);
2230 _vm_unmap_aliases(start, end, flush_dmap);
2231 set_area_direct_map(area, set_direct_map_default_noflush);
2232 }
2233
2234 static void __vunmap(const void *addr, int deallocate_pages)
2235 {
2236 struct vm_struct *area;
2237
2238 if (!addr)
2239 return;
2240
2241 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2242 addr))
2243 return;
2244
2245 area = find_vm_area(addr);
2246 if (unlikely(!area)) {
2247 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2248 addr);
2249 return;
2250 }
2251
2252 debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2253 debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2254
2255 kasan_poison_vmalloc(area->addr, area->size);
2256
2257 vm_remove_mappings(area, deallocate_pages);
2258
2259 if (deallocate_pages) {
2260 int i;
2261
2262 for (i = 0; i < area->nr_pages; i++) {
2263 struct page *page = area->pages[i];
2264
2265 BUG_ON(!page);
2266 __free_pages(page, 0);
2267 }
2268 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2269
2270 kvfree(area->pages);
2271 }
2272
2273 kfree(area);
2274 return;
2275 }
2276
2277 static inline void __vfree_deferred(const void *addr)
2278 {
2279 /*
2280 * Use raw_cpu_ptr() because this can be called from preemptible
2281 * context. Preemption is absolutely fine here, because the llist_add()
2282 * implementation is lockless, so it works even if we are adding to
2283 * nother cpu's list. schedule_work() should be fine with this too.
2284 */
2285 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2286
2287 if (llist_add((struct llist_node *)addr, &p->list))
2288 schedule_work(&p->wq);
2289 }
2290
2291 /**
2292 * vfree_atomic - release memory allocated by vmalloc()
2293 * @addr: memory base address
2294 *
2295 * This one is just like vfree() but can be called in any atomic context
2296 * except NMIs.
2297 */
2298 void vfree_atomic(const void *addr)
2299 {
2300 BUG_ON(in_nmi());
2301
2302 kmemleak_free(addr);
2303
2304 if (!addr)
2305 return;
2306 __vfree_deferred(addr);
2307 }
2308
2309 static void __vfree(const void *addr)
2310 {
2311 if (unlikely(in_interrupt()))
2312 __vfree_deferred(addr);
2313 else
2314 __vunmap(addr, 1);
2315 }
2316
2317 /**
2318 * vfree - release memory allocated by vmalloc()
2319 * @addr: memory base address
2320 *
2321 * Free the virtually continuous memory area starting at @addr, as
2322 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
2323 * NULL, no operation is performed.
2324 *
2325 * Must not be called in NMI context (strictly speaking, only if we don't
2326 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2327 * conventions for vfree() arch-depenedent would be a really bad idea)
2328 *
2329 * May sleep if called *not* from interrupt context.
2330 *
2331 * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
2332 */
2333 void vfree(const void *addr)
2334 {
2335 BUG_ON(in_nmi());
2336
2337 kmemleak_free(addr);
2338
2339 might_sleep_if(!in_interrupt());
2340
2341 if (!addr)
2342 return;
2343
2344 __vfree(addr);
2345 }
2346 EXPORT_SYMBOL(vfree);
2347
2348 /**
2349 * vunmap - release virtual mapping obtained by vmap()
2350 * @addr: memory base address
2351 *
2352 * Free the virtually contiguous memory area starting at @addr,
2353 * which was created from the page array passed to vmap().
2354 *
2355 * Must not be called in interrupt context.
2356 */
2357 void vunmap(const void *addr)
2358 {
2359 BUG_ON(in_interrupt());
2360 might_sleep();
2361 if (addr)
2362 __vunmap(addr, 0);
2363 }
2364 EXPORT_SYMBOL(vunmap);
2365
2366 /**
2367 * vmap - map an array of pages into virtually contiguous space
2368 * @pages: array of page pointers
2369 * @count: number of pages to map
2370 * @flags: vm_area->flags
2371 * @prot: page protection for the mapping
2372 *
2373 * Maps @count pages from @pages into contiguous kernel virtual
2374 * space.
2375 *
2376 * Return: the address of the area or %NULL on failure
2377 */
2378 void *vmap(struct page **pages, unsigned int count,
2379 unsigned long flags, pgprot_t prot)
2380 {
2381 struct vm_struct *area;
2382 unsigned long size; /* In bytes */
2383
2384 might_sleep();
2385
2386 if (count > totalram_pages())
2387 return NULL;
2388
2389 size = (unsigned long)count << PAGE_SHIFT;
2390 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2391 if (!area)
2392 return NULL;
2393
2394 if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot),
2395 pages) < 0) {
2396 vunmap(area->addr);
2397 return NULL;
2398 }
2399
2400 return area->addr;
2401 }
2402 EXPORT_SYMBOL(vmap);
2403
2404 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2405 pgprot_t prot, int node)
2406 {
2407 struct page **pages;
2408 unsigned int nr_pages, array_size, i;
2409 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2410 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
2411 const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
2412 0 :
2413 __GFP_HIGHMEM;
2414
2415 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
2416 array_size = (nr_pages * sizeof(struct page *));
2417
2418 /* Please note that the recursion is strictly bounded. */
2419 if (array_size > PAGE_SIZE) {
2420 pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
2421 node, area->caller);
2422 } else {
2423 pages = kmalloc_node(array_size, nested_gfp, node);
2424 }
2425
2426 if (!pages) {
2427 remove_vm_area(area->addr);
2428 kfree(area);
2429 return NULL;
2430 }
2431
2432 area->pages = pages;
2433 area->nr_pages = nr_pages;
2434
2435 for (i = 0; i < area->nr_pages; i++) {
2436 struct page *page;
2437
2438 if (node == NUMA_NO_NODE)
2439 page = alloc_page(alloc_mask|highmem_mask);
2440 else
2441 page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
2442
2443 if (unlikely(!page)) {
2444 /* Successfully allocated i pages, free them in __vunmap() */
2445 area->nr_pages = i;
2446 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2447 goto fail;
2448 }
2449 area->pages[i] = page;
2450 if (gfpflags_allow_blocking(gfp_mask))
2451 cond_resched();
2452 }
2453 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2454
2455 if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area),
2456 prot, pages) < 0)
2457 goto fail;
2458
2459 return area->addr;
2460
2461 fail:
2462 warn_alloc(gfp_mask, NULL,
2463 "vmalloc: allocation failure, allocated %ld of %ld bytes",
2464 (area->nr_pages*PAGE_SIZE), area->size);
2465 __vfree(area->addr);
2466 return NULL;
2467 }
2468
2469 /**
2470 * __vmalloc_node_range - allocate virtually contiguous memory
2471 * @size: allocation size
2472 * @align: desired alignment
2473 * @start: vm area range start
2474 * @end: vm area range end
2475 * @gfp_mask: flags for the page level allocator
2476 * @prot: protection mask for the allocated pages
2477 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
2478 * @node: node to use for allocation or NUMA_NO_NODE
2479 * @caller: caller's return address
2480 *
2481 * Allocate enough pages to cover @size from the page level
2482 * allocator with @gfp_mask flags. Map them into contiguous
2483 * kernel virtual space, using a pagetable protection of @prot.
2484 *
2485 * Return: the address of the area or %NULL on failure
2486 */
2487 void *__vmalloc_node_range(unsigned long size, unsigned long align,
2488 unsigned long start, unsigned long end, gfp_t gfp_mask,
2489 pgprot_t prot, unsigned long vm_flags, int node,
2490 const void *caller)
2491 {
2492 struct vm_struct *area;
2493 void *addr;
2494 unsigned long real_size = size;
2495
2496 size = PAGE_ALIGN(size);
2497 if (!size || (size >> PAGE_SHIFT) > totalram_pages())
2498 goto fail;
2499
2500 area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED |
2501 vm_flags, start, end, node, gfp_mask, caller);
2502 if (!area)
2503 goto fail;
2504
2505 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
2506 if (!addr)
2507 return NULL;
2508
2509 /*
2510 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2511 * flag. It means that vm_struct is not fully initialized.
2512 * Now, it is fully initialized, so remove this flag here.
2513 */
2514 clear_vm_uninitialized_flag(area);
2515
2516 kmemleak_vmalloc(area, size, gfp_mask);
2517
2518 return addr;
2519
2520 fail:
2521 warn_alloc(gfp_mask, NULL,
2522 "vmalloc: allocation failure: %lu bytes", real_size);
2523 return NULL;
2524 }
2525
2526 /**
2527 * __vmalloc_node - allocate virtually contiguous memory
2528 * @size: allocation size
2529 * @align: desired alignment
2530 * @gfp_mask: flags for the page level allocator
2531 * @node: node to use for allocation or NUMA_NO_NODE
2532 * @caller: caller's return address
2533 *
2534 * Allocate enough pages to cover @size from the page level allocator with
2535 * @gfp_mask flags. Map them into contiguous kernel virtual space.
2536 *
2537 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2538 * and __GFP_NOFAIL are not supported
2539 *
2540 * Any use of gfp flags outside of GFP_KERNEL should be consulted
2541 * with mm people.
2542 *
2543 * Return: pointer to the allocated memory or %NULL on error
2544 */
2545 void *__vmalloc_node(unsigned long size, unsigned long align,
2546 gfp_t gfp_mask, int node, const void *caller)
2547 {
2548 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
2549 gfp_mask, PAGE_KERNEL, 0, node, caller);
2550 }
2551 /*
2552 * This is only for performance analysis of vmalloc and stress purpose.
2553 * It is required by vmalloc test module, therefore do not use it other
2554 * than that.
2555 */
2556 #ifdef CONFIG_TEST_VMALLOC_MODULE
2557 EXPORT_SYMBOL_GPL(__vmalloc_node);
2558 #endif
2559
2560 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
2561 {
2562 return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
2563 __builtin_return_address(0));
2564 }
2565 EXPORT_SYMBOL(__vmalloc);
2566
2567 /**
2568 * vmalloc - allocate virtually contiguous memory
2569 * @size: allocation size
2570 *
2571 * Allocate enough pages to cover @size from the page level
2572 * allocator and map them into contiguous kernel virtual space.
2573 *
2574 * For tight control over page level allocator and protection flags
2575 * use __vmalloc() instead.
2576 *
2577 * Return: pointer to the allocated memory or %NULL on error
2578 */
2579 void *vmalloc(unsigned long size)
2580 {
2581 return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
2582 __builtin_return_address(0));
2583 }
2584 EXPORT_SYMBOL(vmalloc);
2585
2586 /**
2587 * vzalloc - allocate virtually contiguous memory with zero fill
2588 * @size: allocation size
2589 *
2590 * Allocate enough pages to cover @size from the page level
2591 * allocator and map them into contiguous kernel virtual space.
2592 * The memory allocated is set to zero.
2593 *
2594 * For tight control over page level allocator and protection flags
2595 * use __vmalloc() instead.
2596 *
2597 * Return: pointer to the allocated memory or %NULL on error
2598 */
2599 void *vzalloc(unsigned long size)
2600 {
2601 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
2602 __builtin_return_address(0));
2603 }
2604 EXPORT_SYMBOL(vzalloc);
2605
2606 /**
2607 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2608 * @size: allocation size
2609 *
2610 * The resulting memory area is zeroed so it can be mapped to userspace
2611 * without leaking data.
2612 *
2613 * Return: pointer to the allocated memory or %NULL on error
2614 */
2615 void *vmalloc_user(unsigned long size)
2616 {
2617 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2618 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
2619 VM_USERMAP, NUMA_NO_NODE,
2620 __builtin_return_address(0));
2621 }
2622 EXPORT_SYMBOL(vmalloc_user);
2623
2624 /**
2625 * vmalloc_node - allocate memory on a specific node
2626 * @size: allocation size
2627 * @node: numa node
2628 *
2629 * Allocate enough pages to cover @size from the page level
2630 * allocator and map them into contiguous kernel virtual space.
2631 *
2632 * For tight control over page level allocator and protection flags
2633 * use __vmalloc() instead.
2634 *
2635 * Return: pointer to the allocated memory or %NULL on error
2636 */
2637 void *vmalloc_node(unsigned long size, int node)
2638 {
2639 return __vmalloc_node(size, 1, GFP_KERNEL, node,
2640 __builtin_return_address(0));
2641 }
2642 EXPORT_SYMBOL(vmalloc_node);
2643
2644 /**
2645 * vzalloc_node - allocate memory on a specific node with zero fill
2646 * @size: allocation size
2647 * @node: numa node
2648 *
2649 * Allocate enough pages to cover @size from the page level
2650 * allocator and map them into contiguous kernel virtual space.
2651 * The memory allocated is set to zero.
2652 *
2653 * Return: pointer to the allocated memory or %NULL on error
2654 */
2655 void *vzalloc_node(unsigned long size, int node)
2656 {
2657 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
2658 __builtin_return_address(0));
2659 }
2660 EXPORT_SYMBOL(vzalloc_node);
2661
2662 /**
2663 * vmalloc_exec - allocate virtually contiguous, executable memory
2664 * @size: allocation size
2665 *
2666 * Kernel-internal function to allocate enough pages to cover @size
2667 * the page level allocator and map them into contiguous and
2668 * executable kernel virtual space.
2669 *
2670 * For tight control over page level allocator and protection flags
2671 * use __vmalloc() instead.
2672 *
2673 * Return: pointer to the allocated memory or %NULL on error
2674 */
2675 void *vmalloc_exec(unsigned long size)
2676 {
2677 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2678 GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2679 NUMA_NO_NODE, __builtin_return_address(0));
2680 }
2681
2682 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
2683 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
2684 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
2685 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
2686 #else
2687 /*
2688 * 64b systems should always have either DMA or DMA32 zones. For others
2689 * GFP_DMA32 should do the right thing and use the normal zone.
2690 */
2691 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
2692 #endif
2693
2694 /**
2695 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2696 * @size: allocation size
2697 *
2698 * Allocate enough 32bit PA addressable pages to cover @size from the
2699 * page level allocator and map them into contiguous kernel virtual space.
2700 *
2701 * Return: pointer to the allocated memory or %NULL on error
2702 */
2703 void *vmalloc_32(unsigned long size)
2704 {
2705 return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
2706 __builtin_return_address(0));
2707 }
2708 EXPORT_SYMBOL(vmalloc_32);
2709
2710 /**
2711 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
2712 * @size: allocation size
2713 *
2714 * The resulting memory area is 32bit addressable and zeroed so it can be
2715 * mapped to userspace without leaking data.
2716 *
2717 * Return: pointer to the allocated memory or %NULL on error
2718 */
2719 void *vmalloc_32_user(unsigned long size)
2720 {
2721 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2722 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
2723 VM_USERMAP, NUMA_NO_NODE,
2724 __builtin_return_address(0));
2725 }
2726 EXPORT_SYMBOL(vmalloc_32_user);
2727
2728 /*
2729 * small helper routine , copy contents to buf from addr.
2730 * If the page is not present, fill zero.
2731 */
2732
2733 static int aligned_vread(char *buf, char *addr, unsigned long count)
2734 {
2735 struct page *p;
2736 int copied = 0;
2737
2738 while (count) {
2739 unsigned long offset, length;
2740
2741 offset = offset_in_page(addr);
2742 length = PAGE_SIZE - offset;
2743 if (length > count)
2744 length = count;
2745 p = vmalloc_to_page(addr);
2746 /*
2747 * To do safe access to this _mapped_ area, we need
2748 * lock. But adding lock here means that we need to add
2749 * overhead of vmalloc()/vfree() calles for this _debug_
2750 * interface, rarely used. Instead of that, we'll use
2751 * kmap() and get small overhead in this access function.
2752 */
2753 if (p) {
2754 /*
2755 * we can expect USER0 is not used (see vread/vwrite's
2756 * function description)
2757 */
2758 void *map = kmap_atomic(p);
2759 memcpy(buf, map + offset, length);
2760 kunmap_atomic(map);
2761 } else
2762 memset(buf, 0, length);
2763
2764 addr += length;
2765 buf += length;
2766 copied += length;
2767 count -= length;
2768 }
2769 return copied;
2770 }
2771
2772 static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2773 {
2774 struct page *p;
2775 int copied = 0;
2776
2777 while (count) {
2778 unsigned long offset, length;
2779
2780 offset = offset_in_page(addr);
2781 length = PAGE_SIZE - offset;
2782 if (length > count)
2783 length = count;
2784 p = vmalloc_to_page(addr);
2785 /*
2786 * To do safe access to this _mapped_ area, we need
2787 * lock. But adding lock here means that we need to add
2788 * overhead of vmalloc()/vfree() calles for this _debug_
2789 * interface, rarely used. Instead of that, we'll use
2790 * kmap() and get small overhead in this access function.
2791 */
2792 if (p) {
2793 /*
2794 * we can expect USER0 is not used (see vread/vwrite's
2795 * function description)
2796 */
2797 void *map = kmap_atomic(p);
2798 memcpy(map + offset, buf, length);
2799 kunmap_atomic(map);
2800 }
2801 addr += length;
2802 buf += length;
2803 copied += length;
2804 count -= length;
2805 }
2806 return copied;
2807 }
2808
2809 /**
2810 * vread() - read vmalloc area in a safe way.
2811 * @buf: buffer for reading data
2812 * @addr: vm address.
2813 * @count: number of bytes to be read.
2814 *
2815 * This function checks that addr is a valid vmalloc'ed area, and
2816 * copy data from that area to a given buffer. If the given memory range
2817 * of [addr...addr+count) includes some valid address, data is copied to
2818 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2819 * IOREMAP area is treated as memory hole and no copy is done.
2820 *
2821 * If [addr...addr+count) doesn't includes any intersects with alive
2822 * vm_struct area, returns 0. @buf should be kernel's buffer.
2823 *
2824 * Note: In usual ops, vread() is never necessary because the caller
2825 * should know vmalloc() area is valid and can use memcpy().
2826 * This is for routines which have to access vmalloc area without
2827 * any information, as /dev/kmem.
2828 *
2829 * Return: number of bytes for which addr and buf should be increased
2830 * (same number as @count) or %0 if [addr...addr+count) doesn't
2831 * include any intersection with valid vmalloc area
2832 */
2833 long vread(char *buf, char *addr, unsigned long count)
2834 {
2835 struct vmap_area *va;
2836 struct vm_struct *vm;
2837 char *vaddr, *buf_start = buf;
2838 unsigned long buflen = count;
2839 unsigned long n;
2840
2841 /* Don't allow overflow */
2842 if ((unsigned long) addr + count < count)
2843 count = -(unsigned long) addr;
2844
2845 spin_lock(&vmap_area_lock);
2846 list_for_each_entry(va, &vmap_area_list, list) {
2847 if (!count)
2848 break;
2849
2850 if (!va->vm)
2851 continue;
2852
2853 vm = va->vm;
2854 vaddr = (char *) vm->addr;
2855 if (addr >= vaddr + get_vm_area_size(vm))
2856 continue;
2857 while (addr < vaddr) {
2858 if (count == 0)
2859 goto finished;
2860 *buf = '\0';
2861 buf++;
2862 addr++;
2863 count--;
2864 }
2865 n = vaddr + get_vm_area_size(vm) - addr;
2866 if (n > count)
2867 n = count;
2868 if (!(vm->flags & VM_IOREMAP))
2869 aligned_vread(buf, addr, n);
2870 else /* IOREMAP area is treated as memory hole */
2871 memset(buf, 0, n);
2872 buf += n;
2873 addr += n;
2874 count -= n;
2875 }
2876 finished:
2877 spin_unlock(&vmap_area_lock);
2878
2879 if (buf == buf_start)
2880 return 0;
2881 /* zero-fill memory holes */
2882 if (buf != buf_start + buflen)
2883 memset(buf, 0, buflen - (buf - buf_start));
2884
2885 return buflen;
2886 }
2887
2888 /**
2889 * vwrite() - write vmalloc area in a safe way.
2890 * @buf: buffer for source data
2891 * @addr: vm address.
2892 * @count: number of bytes to be read.
2893 *
2894 * This function checks that addr is a valid vmalloc'ed area, and
2895 * copy data from a buffer to the given addr. If specified range of
2896 * [addr...addr+count) includes some valid address, data is copied from
2897 * proper area of @buf. If there are memory holes, no copy to hole.
2898 * IOREMAP area is treated as memory hole and no copy is done.
2899 *
2900 * If [addr...addr+count) doesn't includes any intersects with alive
2901 * vm_struct area, returns 0. @buf should be kernel's buffer.
2902 *
2903 * Note: In usual ops, vwrite() is never necessary because the caller
2904 * should know vmalloc() area is valid and can use memcpy().
2905 * This is for routines which have to access vmalloc area without
2906 * any information, as /dev/kmem.
2907 *
2908 * Return: number of bytes for which addr and buf should be
2909 * increased (same number as @count) or %0 if [addr...addr+count)
2910 * doesn't include any intersection with valid vmalloc area
2911 */
2912 long vwrite(char *buf, char *addr, unsigned long count)
2913 {
2914 struct vmap_area *va;
2915 struct vm_struct *vm;
2916 char *vaddr;
2917 unsigned long n, buflen;
2918 int copied = 0;
2919
2920 /* Don't allow overflow */
2921 if ((unsigned long) addr + count < count)
2922 count = -(unsigned long) addr;
2923 buflen = count;
2924
2925 spin_lock(&vmap_area_lock);
2926 list_for_each_entry(va, &vmap_area_list, list) {
2927 if (!count)
2928 break;
2929
2930 if (!va->vm)
2931 continue;
2932
2933 vm = va->vm;
2934 vaddr = (char *) vm->addr;
2935 if (addr >= vaddr + get_vm_area_size(vm))
2936 continue;
2937 while (addr < vaddr) {
2938 if (count == 0)
2939 goto finished;
2940 buf++;
2941 addr++;
2942 count--;
2943 }
2944 n = vaddr + get_vm_area_size(vm) - addr;
2945 if (n > count)
2946 n = count;
2947 if (!(vm->flags & VM_IOREMAP)) {
2948 aligned_vwrite(buf, addr, n);
2949 copied++;
2950 }
2951 buf += n;
2952 addr += n;
2953 count -= n;
2954 }
2955 finished:
2956 spin_unlock(&vmap_area_lock);
2957 if (!copied)
2958 return 0;
2959 return buflen;
2960 }
2961
2962 /**
2963 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2964 * @vma: vma to cover
2965 * @uaddr: target user address to start at
2966 * @kaddr: virtual address of vmalloc kernel memory
2967 * @pgoff: offset from @kaddr to start at
2968 * @size: size of map area
2969 *
2970 * Returns: 0 for success, -Exxx on failure
2971 *
2972 * This function checks that @kaddr is a valid vmalloc'ed area,
2973 * and that it is big enough to cover the range starting at
2974 * @uaddr in @vma. Will return failure if that criteria isn't
2975 * met.
2976 *
2977 * Similar to remap_pfn_range() (see mm/memory.c)
2978 */
2979 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2980 void *kaddr, unsigned long pgoff,
2981 unsigned long size)
2982 {
2983 struct vm_struct *area;
2984 unsigned long off;
2985 unsigned long end_index;
2986
2987 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
2988 return -EINVAL;
2989
2990 size = PAGE_ALIGN(size);
2991
2992 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2993 return -EINVAL;
2994
2995 area = find_vm_area(kaddr);
2996 if (!area)
2997 return -EINVAL;
2998
2999 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3000 return -EINVAL;
3001
3002 if (check_add_overflow(size, off, &end_index) ||
3003 end_index > get_vm_area_size(area))
3004 return -EINVAL;
3005 kaddr += off;
3006
3007 do {
3008 struct page *page = vmalloc_to_page(kaddr);
3009 int ret;
3010
3011 ret = vm_insert_page(vma, uaddr, page);
3012 if (ret)
3013 return ret;
3014
3015 uaddr += PAGE_SIZE;
3016 kaddr += PAGE_SIZE;
3017 size -= PAGE_SIZE;
3018 } while (size > 0);
3019
3020 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3021
3022 return 0;
3023 }
3024 EXPORT_SYMBOL(remap_vmalloc_range_partial);
3025
3026 /**
3027 * remap_vmalloc_range - map vmalloc pages to userspace
3028 * @vma: vma to cover (map full range of vma)
3029 * @addr: vmalloc memory
3030 * @pgoff: number of pages into addr before first page to map
3031 *
3032 * Returns: 0 for success, -Exxx on failure
3033 *
3034 * This function checks that addr is a valid vmalloc'ed area, and
3035 * that it is big enough to cover the vma. Will return failure if
3036 * that criteria isn't met.
3037 *
3038 * Similar to remap_pfn_range() (see mm/memory.c)
3039 */
3040 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3041 unsigned long pgoff)
3042 {
3043 return remap_vmalloc_range_partial(vma, vma->vm_start,
3044 addr, pgoff,
3045 vma->vm_end - vma->vm_start);
3046 }
3047 EXPORT_SYMBOL(remap_vmalloc_range);
3048
3049 /*
3050 * Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
3051 * not to have one.
3052 *
3053 * The purpose of this function is to make sure the vmalloc area
3054 * mappings are identical in all page-tables in the system.
3055 */
3056 void __weak vmalloc_sync_mappings(void)
3057 {
3058 }
3059
3060 void __weak vmalloc_sync_unmappings(void)
3061 {
3062 }
3063
3064 static int f(pte_t *pte, unsigned long addr, void *data)
3065 {
3066 pte_t ***p = data;
3067
3068 if (p) {
3069 *(*p) = pte;
3070 (*p)++;
3071 }
3072 return 0;
3073 }
3074
3075 /**
3076 * alloc_vm_area - allocate a range of kernel address space
3077 * @size: size of the area
3078 * @ptes: returns the PTEs for the address space
3079 *
3080 * Returns: NULL on failure, vm_struct on success
3081 *
3082 * This function reserves a range of kernel address space, and
3083 * allocates pagetables to map that range. No actual mappings
3084 * are created.
3085 *
3086 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
3087 * allocated for the VM area are returned.
3088 */
3089 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
3090 {
3091 struct vm_struct *area;
3092
3093 area = get_vm_area_caller(size, VM_IOREMAP,
3094 __builtin_return_address(0));
3095 if (area == NULL)
3096 return NULL;
3097
3098 /*
3099 * This ensures that page tables are constructed for this region
3100 * of kernel virtual address space and mapped into init_mm.
3101 */
3102 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3103 size, f, ptes ? &ptes : NULL)) {
3104 free_vm_area(area);
3105 return NULL;
3106 }
3107
3108 return area;
3109 }
3110 EXPORT_SYMBOL_GPL(alloc_vm_area);
3111
3112 void free_vm_area(struct vm_struct *area)
3113 {
3114 struct vm_struct *ret;
3115 ret = remove_vm_area(area->addr);
3116 BUG_ON(ret != area);
3117 kfree(area);
3118 }
3119 EXPORT_SYMBOL_GPL(free_vm_area);
3120
3121 #ifdef CONFIG_SMP
3122 static struct vmap_area *node_to_va(struct rb_node *n)
3123 {
3124 return rb_entry_safe(n, struct vmap_area, rb_node);
3125 }
3126
3127 /**
3128 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3129 * @addr: target address
3130 *
3131 * Returns: vmap_area if it is found. If there is no such area
3132 * the first highest(reverse order) vmap_area is returned
3133 * i.e. va->va_start < addr && va->va_end < addr or NULL
3134 * if there are no any areas before @addr.
3135 */
3136 static struct vmap_area *
3137 pvm_find_va_enclose_addr(unsigned long addr)
3138 {
3139 struct vmap_area *va, *tmp;
3140 struct rb_node *n;
3141
3142 n = free_vmap_area_root.rb_node;
3143 va = NULL;
3144
3145 while (n) {
3146 tmp = rb_entry(n, struct vmap_area, rb_node);
3147 if (tmp->va_start <= addr) {
3148 va = tmp;
3149 if (tmp->va_end >= addr)
3150 break;
3151
3152 n = n->rb_right;
3153 } else {
3154 n = n->rb_left;
3155 }
3156 }
3157
3158 return va;
3159 }
3160
3161 /**
3162 * pvm_determine_end_from_reverse - find the highest aligned address
3163 * of free block below VMALLOC_END
3164 * @va:
3165 * in - the VA we start the search(reverse order);
3166 * out - the VA with the highest aligned end address.
3167 *
3168 * Returns: determined end address within vmap_area
3169 */
3170 static unsigned long
3171 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3172 {
3173 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3174 unsigned long addr;
3175
3176 if (likely(*va)) {
3177 list_for_each_entry_from_reverse((*va),
3178 &free_vmap_area_list, list) {
3179 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3180 if ((*va)->va_start < addr)
3181 return addr;
3182 }
3183 }
3184
3185 return 0;
3186 }
3187
3188 /**
3189 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3190 * @offsets: array containing offset of each area
3191 * @sizes: array containing size of each area
3192 * @nr_vms: the number of areas to allocate
3193 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3194 *
3195 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3196 * vm_structs on success, %NULL on failure
3197 *
3198 * Percpu allocator wants to use congruent vm areas so that it can
3199 * maintain the offsets among percpu areas. This function allocates
3200 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
3201 * be scattered pretty far, distance between two areas easily going up
3202 * to gigabytes. To avoid interacting with regular vmallocs, these
3203 * areas are allocated from top.
3204 *
3205 * Despite its complicated look, this allocator is rather simple. It
3206 * does everything top-down and scans free blocks from the end looking
3207 * for matching base. While scanning, if any of the areas do not fit the
3208 * base address is pulled down to fit the area. Scanning is repeated till
3209 * all the areas fit and then all necessary data structures are inserted
3210 * and the result is returned.
3211 */
3212 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3213 const size_t *sizes, int nr_vms,
3214 size_t align)
3215 {
3216 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3217 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3218 struct vmap_area **vas, *va;
3219 struct vm_struct **vms;
3220 int area, area2, last_area, term_area;
3221 unsigned long base, start, size, end, last_end, orig_start, orig_end;
3222 bool purged = false;
3223 enum fit_type type;
3224
3225 /* verify parameters and allocate data structures */
3226 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3227 for (last_area = 0, area = 0; area < nr_vms; area++) {
3228 start = offsets[area];
3229 end = start + sizes[area];
3230
3231 /* is everything aligned properly? */
3232 BUG_ON(!IS_ALIGNED(offsets[area], align));
3233 BUG_ON(!IS_ALIGNED(sizes[area], align));
3234
3235 /* detect the area with the highest address */
3236 if (start > offsets[last_area])
3237 last_area = area;
3238
3239 for (area2 = area + 1; area2 < nr_vms; area2++) {
3240 unsigned long start2 = offsets[area2];
3241 unsigned long end2 = start2 + sizes[area2];
3242
3243 BUG_ON(start2 < end && start < end2);
3244 }
3245 }
3246 last_end = offsets[last_area] + sizes[last_area];
3247
3248 if (vmalloc_end - vmalloc_start < last_end) {
3249 WARN_ON(true);
3250 return NULL;
3251 }
3252
3253 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3254 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3255 if (!vas || !vms)
3256 goto err_free2;
3257
3258 for (area = 0; area < nr_vms; area++) {
3259 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3260 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3261 if (!vas[area] || !vms[area])
3262 goto err_free;
3263 }
3264 retry:
3265 spin_lock(&free_vmap_area_lock);
3266
3267 /* start scanning - we scan from the top, begin with the last area */
3268 area = term_area = last_area;
3269 start = offsets[area];
3270 end = start + sizes[area];
3271
3272 va = pvm_find_va_enclose_addr(vmalloc_end);
3273 base = pvm_determine_end_from_reverse(&va, align) - end;
3274
3275 while (true) {
3276 /*
3277 * base might have underflowed, add last_end before
3278 * comparing.
3279 */
3280 if (base + last_end < vmalloc_start + last_end)
3281 goto overflow;
3282
3283 /*
3284 * Fitting base has not been found.
3285 */
3286 if (va == NULL)
3287 goto overflow;
3288
3289 /*
3290 * If required width exceeds current VA block, move
3291 * base downwards and then recheck.
3292 */
3293 if (base + end > va->va_end) {
3294 base = pvm_determine_end_from_reverse(&va, align) - end;
3295 term_area = area;
3296 continue;
3297 }
3298
3299 /*
3300 * If this VA does not fit, move base downwards and recheck.
3301 */
3302 if (base + start < va->va_start) {
3303 va = node_to_va(rb_prev(&va->rb_node));
3304 base = pvm_determine_end_from_reverse(&va, align) - end;
3305 term_area = area;
3306 continue;
3307 }
3308
3309 /*
3310 * This area fits, move on to the previous one. If
3311 * the previous one is the terminal one, we're done.
3312 */
3313 area = (area + nr_vms - 1) % nr_vms;
3314 if (area == term_area)
3315 break;
3316
3317 start = offsets[area];
3318 end = start + sizes[area];
3319 va = pvm_find_va_enclose_addr(base + end);
3320 }
3321
3322 /* we've found a fitting base, insert all va's */
3323 for (area = 0; area < nr_vms; area++) {
3324 int ret;
3325
3326 start = base + offsets[area];
3327 size = sizes[area];
3328
3329 va = pvm_find_va_enclose_addr(start);
3330 if (WARN_ON_ONCE(va == NULL))
3331 /* It is a BUG(), but trigger recovery instead. */
3332 goto recovery;
3333
3334 type = classify_va_fit_type(va, start, size);
3335 if (WARN_ON_ONCE(type == NOTHING_FIT))
3336 /* It is a BUG(), but trigger recovery instead. */
3337 goto recovery;
3338
3339 ret = adjust_va_to_fit_type(va, start, size, type);
3340 if (unlikely(ret))
3341 goto recovery;
3342
3343 /* Allocated area. */
3344 va = vas[area];
3345 va->va_start = start;
3346 va->va_end = start + size;
3347 }
3348
3349 spin_unlock(&free_vmap_area_lock);
3350
3351 /* populate the kasan shadow space */
3352 for (area = 0; area < nr_vms; area++) {
3353 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3354 goto err_free_shadow;
3355
3356 kasan_unpoison_vmalloc((void *)vas[area]->va_start,
3357 sizes[area]);
3358 }
3359
3360 /* insert all vm's */
3361 spin_lock(&vmap_area_lock);
3362 for (area = 0; area < nr_vms; area++) {
3363 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3364
3365 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3366 pcpu_get_vm_areas);
3367 }
3368 spin_unlock(&vmap_area_lock);
3369
3370 kfree(vas);
3371 return vms;
3372
3373 recovery:
3374 /*
3375 * Remove previously allocated areas. There is no
3376 * need in removing these areas from the busy tree,
3377 * because they are inserted only on the final step
3378 * and when pcpu_get_vm_areas() is success.
3379 */
3380 while (area--) {
3381 orig_start = vas[area]->va_start;
3382 orig_end = vas[area]->va_end;
3383 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3384 &free_vmap_area_list);
3385 kasan_release_vmalloc(orig_start, orig_end,
3386 va->va_start, va->va_end);
3387 vas[area] = NULL;
3388 }
3389
3390 overflow:
3391 spin_unlock(&free_vmap_area_lock);
3392 if (!purged) {
3393 purge_vmap_area_lazy();
3394 purged = true;
3395
3396 /* Before "retry", check if we recover. */
3397 for (area = 0; area < nr_vms; area++) {
3398 if (vas[area])
3399 continue;
3400
3401 vas[area] = kmem_cache_zalloc(
3402 vmap_area_cachep, GFP_KERNEL);
3403 if (!vas[area])
3404 goto err_free;
3405 }
3406
3407 goto retry;
3408 }
3409
3410 err_free:
3411 for (area = 0; area < nr_vms; area++) {
3412 if (vas[area])
3413 kmem_cache_free(vmap_area_cachep, vas[area]);
3414
3415 kfree(vms[area]);
3416 }
3417 err_free2:
3418 kfree(vas);
3419 kfree(vms);
3420 return NULL;
3421
3422 err_free_shadow:
3423 spin_lock(&free_vmap_area_lock);
3424 /*
3425 * We release all the vmalloc shadows, even the ones for regions that
3426 * hadn't been successfully added. This relies on kasan_release_vmalloc
3427 * being able to tolerate this case.
3428 */
3429 for (area = 0; area < nr_vms; area++) {
3430 orig_start = vas[area]->va_start;
3431 orig_end = vas[area]->va_end;
3432 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3433 &free_vmap_area_list);
3434 kasan_release_vmalloc(orig_start, orig_end,
3435 va->va_start, va->va_end);
3436 vas[area] = NULL;
3437 kfree(vms[area]);
3438 }
3439 spin_unlock(&free_vmap_area_lock);
3440 kfree(vas);
3441 kfree(vms);
3442 return NULL;
3443 }
3444
3445 /**
3446 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3447 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3448 * @nr_vms: the number of allocated areas
3449 *
3450 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3451 */
3452 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3453 {
3454 int i;
3455
3456 for (i = 0; i < nr_vms; i++)
3457 free_vm_area(vms[i]);
3458 kfree(vms);
3459 }
3460 #endif /* CONFIG_SMP */
3461
3462 #ifdef CONFIG_PROC_FS
3463 static void *s_start(struct seq_file *m, loff_t *pos)
3464 __acquires(&vmap_purge_lock)
3465 __acquires(&vmap_area_lock)
3466 {
3467 mutex_lock(&vmap_purge_lock);
3468 spin_lock(&vmap_area_lock);
3469
3470 return seq_list_start(&vmap_area_list, *pos);
3471 }
3472
3473 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3474 {
3475 return seq_list_next(p, &vmap_area_list, pos);
3476 }
3477
3478 static void s_stop(struct seq_file *m, void *p)
3479 __releases(&vmap_purge_lock)
3480 __releases(&vmap_area_lock)
3481 {
3482 mutex_unlock(&vmap_purge_lock);
3483 spin_unlock(&vmap_area_lock);
3484 }
3485
3486 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3487 {
3488 if (IS_ENABLED(CONFIG_NUMA)) {
3489 unsigned int nr, *counters = m->private;
3490
3491 if (!counters)
3492 return;
3493
3494 if (v->flags & VM_UNINITIALIZED)
3495 return;
3496 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3497 smp_rmb();
3498
3499 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3500
3501 for (nr = 0; nr < v->nr_pages; nr++)
3502 counters[page_to_nid(v->pages[nr])]++;
3503
3504 for_each_node_state(nr, N_HIGH_MEMORY)
3505 if (counters[nr])
3506 seq_printf(m, " N%u=%u", nr, counters[nr]);
3507 }
3508 }
3509
3510 static void show_purge_info(struct seq_file *m)
3511 {
3512 struct llist_node *head;
3513 struct vmap_area *va;
3514
3515 head = READ_ONCE(vmap_purge_list.first);
3516 if (head == NULL)
3517 return;
3518
3519 llist_for_each_entry(va, head, purge_list) {
3520 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3521 (void *)va->va_start, (void *)va->va_end,
3522 va->va_end - va->va_start);
3523 }
3524 }
3525
3526 static int s_show(struct seq_file *m, void *p)
3527 {
3528 struct vmap_area *va;
3529 struct vm_struct *v;
3530
3531 va = list_entry(p, struct vmap_area, list);
3532
3533 /*
3534 * s_show can encounter race with remove_vm_area, !vm on behalf
3535 * of vmap area is being tear down or vm_map_ram allocation.
3536 */
3537 if (!va->vm) {
3538 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3539 (void *)va->va_start, (void *)va->va_end,
3540 va->va_end - va->va_start);
3541
3542 return 0;
3543 }
3544
3545 v = va->vm;
3546
3547 seq_printf(m, "0x%pK-0x%pK %7ld",
3548 v->addr, v->addr + v->size, v->size);
3549
3550 if (v->caller)
3551 seq_printf(m, " %pS", v->caller);
3552
3553 if (v->nr_pages)
3554 seq_printf(m, " pages=%d", v->nr_pages);
3555
3556 if (v->phys_addr)
3557 seq_printf(m, " phys=%pa", &v->phys_addr);
3558
3559 if (v->flags & VM_IOREMAP)
3560 seq_puts(m, " ioremap");
3561
3562 if (v->flags & VM_ALLOC)
3563 seq_puts(m, " vmalloc");
3564
3565 if (v->flags & VM_MAP)
3566 seq_puts(m, " vmap");
3567
3568 if (v->flags & VM_USERMAP)
3569 seq_puts(m, " user");
3570
3571 if (v->flags & VM_DMA_COHERENT)
3572 seq_puts(m, " dma-coherent");
3573
3574 if (is_vmalloc_addr(v->pages))
3575 seq_puts(m, " vpages");
3576
3577 show_numa_info(m, v);
3578 seq_putc(m, '\n');
3579
3580 /*
3581 * As a final step, dump "unpurged" areas. Note,
3582 * that entire "/proc/vmallocinfo" output will not
3583 * be address sorted, because the purge list is not
3584 * sorted.
3585 */
3586 if (list_is_last(&va->list, &vmap_area_list))
3587 show_purge_info(m);
3588
3589 return 0;
3590 }
3591
3592 static const struct seq_operations vmalloc_op = {
3593 .start = s_start,
3594 .next = s_next,
3595 .stop = s_stop,
3596 .show = s_show,
3597 };
3598
3599 static int __init proc_vmalloc_init(void)
3600 {
3601 if (IS_ENABLED(CONFIG_NUMA))
3602 proc_create_seq_private("vmallocinfo", 0400, NULL,
3603 &vmalloc_op,
3604 nr_node_ids * sizeof(unsigned int), NULL);
3605 else
3606 proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3607 return 0;
3608 }
3609 module_init(proc_vmalloc_init);
3610
3611 #endif