]> git.ipfire.org Git - thirdparty/linux.git/blob - mm/vmalloc.c
Merge tag 'nvme-6.9-2024-03-21' of git://git.infradead.org/nvme into block-6.9
[thirdparty/linux.git] / mm / vmalloc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1993 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7 * Numa awareness, Christoph Lameter, SGI, June 2005
8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
9 */
10
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
28 #include <linux/io.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/memcontrol.h>
35 #include <linux/llist.h>
36 #include <linux/uio.h>
37 #include <linux/bitops.h>
38 #include <linux/rbtree_augmented.h>
39 #include <linux/overflow.h>
40 #include <linux/pgtable.h>
41 #include <linux/hugetlb.h>
42 #include <linux/sched/mm.h>
43 #include <asm/tlbflush.h>
44 #include <asm/shmparam.h>
45
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/vmalloc.h>
48
49 #include "internal.h"
50 #include "pgalloc-track.h"
51
52 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
53 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
54
55 static int __init set_nohugeiomap(char *str)
56 {
57 ioremap_max_page_shift = PAGE_SHIFT;
58 return 0;
59 }
60 early_param("nohugeiomap", set_nohugeiomap);
61 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
62 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
63 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
64
65 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
66 static bool __ro_after_init vmap_allow_huge = true;
67
68 static int __init set_nohugevmalloc(char *str)
69 {
70 vmap_allow_huge = false;
71 return 0;
72 }
73 early_param("nohugevmalloc", set_nohugevmalloc);
74 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
75 static const bool vmap_allow_huge = false;
76 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
77
78 bool is_vmalloc_addr(const void *x)
79 {
80 unsigned long addr = (unsigned long)kasan_reset_tag(x);
81
82 return addr >= VMALLOC_START && addr < VMALLOC_END;
83 }
84 EXPORT_SYMBOL(is_vmalloc_addr);
85
86 struct vfree_deferred {
87 struct llist_head list;
88 struct work_struct wq;
89 };
90 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
91
92 /*** Page table manipulation functions ***/
93 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
94 phys_addr_t phys_addr, pgprot_t prot,
95 unsigned int max_page_shift, pgtbl_mod_mask *mask)
96 {
97 pte_t *pte;
98 u64 pfn;
99 unsigned long size = PAGE_SIZE;
100
101 pfn = phys_addr >> PAGE_SHIFT;
102 pte = pte_alloc_kernel_track(pmd, addr, mask);
103 if (!pte)
104 return -ENOMEM;
105 do {
106 BUG_ON(!pte_none(ptep_get(pte)));
107
108 #ifdef CONFIG_HUGETLB_PAGE
109 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
110 if (size != PAGE_SIZE) {
111 pte_t entry = pfn_pte(pfn, prot);
112
113 entry = arch_make_huge_pte(entry, ilog2(size), 0);
114 set_huge_pte_at(&init_mm, addr, pte, entry, size);
115 pfn += PFN_DOWN(size);
116 continue;
117 }
118 #endif
119 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
120 pfn++;
121 } while (pte += PFN_DOWN(size), addr += size, addr != end);
122 *mask |= PGTBL_PTE_MODIFIED;
123 return 0;
124 }
125
126 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
127 phys_addr_t phys_addr, pgprot_t prot,
128 unsigned int max_page_shift)
129 {
130 if (max_page_shift < PMD_SHIFT)
131 return 0;
132
133 if (!arch_vmap_pmd_supported(prot))
134 return 0;
135
136 if ((end - addr) != PMD_SIZE)
137 return 0;
138
139 if (!IS_ALIGNED(addr, PMD_SIZE))
140 return 0;
141
142 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
143 return 0;
144
145 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
146 return 0;
147
148 return pmd_set_huge(pmd, phys_addr, prot);
149 }
150
151 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
152 phys_addr_t phys_addr, pgprot_t prot,
153 unsigned int max_page_shift, pgtbl_mod_mask *mask)
154 {
155 pmd_t *pmd;
156 unsigned long next;
157
158 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
159 if (!pmd)
160 return -ENOMEM;
161 do {
162 next = pmd_addr_end(addr, end);
163
164 if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
165 max_page_shift)) {
166 *mask |= PGTBL_PMD_MODIFIED;
167 continue;
168 }
169
170 if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
171 return -ENOMEM;
172 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
173 return 0;
174 }
175
176 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
177 phys_addr_t phys_addr, pgprot_t prot,
178 unsigned int max_page_shift)
179 {
180 if (max_page_shift < PUD_SHIFT)
181 return 0;
182
183 if (!arch_vmap_pud_supported(prot))
184 return 0;
185
186 if ((end - addr) != PUD_SIZE)
187 return 0;
188
189 if (!IS_ALIGNED(addr, PUD_SIZE))
190 return 0;
191
192 if (!IS_ALIGNED(phys_addr, PUD_SIZE))
193 return 0;
194
195 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
196 return 0;
197
198 return pud_set_huge(pud, phys_addr, prot);
199 }
200
201 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
202 phys_addr_t phys_addr, pgprot_t prot,
203 unsigned int max_page_shift, pgtbl_mod_mask *mask)
204 {
205 pud_t *pud;
206 unsigned long next;
207
208 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
209 if (!pud)
210 return -ENOMEM;
211 do {
212 next = pud_addr_end(addr, end);
213
214 if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
215 max_page_shift)) {
216 *mask |= PGTBL_PUD_MODIFIED;
217 continue;
218 }
219
220 if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
221 max_page_shift, mask))
222 return -ENOMEM;
223 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
224 return 0;
225 }
226
227 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
228 phys_addr_t phys_addr, pgprot_t prot,
229 unsigned int max_page_shift)
230 {
231 if (max_page_shift < P4D_SHIFT)
232 return 0;
233
234 if (!arch_vmap_p4d_supported(prot))
235 return 0;
236
237 if ((end - addr) != P4D_SIZE)
238 return 0;
239
240 if (!IS_ALIGNED(addr, P4D_SIZE))
241 return 0;
242
243 if (!IS_ALIGNED(phys_addr, P4D_SIZE))
244 return 0;
245
246 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
247 return 0;
248
249 return p4d_set_huge(p4d, phys_addr, prot);
250 }
251
252 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
253 phys_addr_t phys_addr, pgprot_t prot,
254 unsigned int max_page_shift, pgtbl_mod_mask *mask)
255 {
256 p4d_t *p4d;
257 unsigned long next;
258
259 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
260 if (!p4d)
261 return -ENOMEM;
262 do {
263 next = p4d_addr_end(addr, end);
264
265 if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
266 max_page_shift)) {
267 *mask |= PGTBL_P4D_MODIFIED;
268 continue;
269 }
270
271 if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
272 max_page_shift, mask))
273 return -ENOMEM;
274 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
275 return 0;
276 }
277
278 static int vmap_range_noflush(unsigned long addr, unsigned long end,
279 phys_addr_t phys_addr, pgprot_t prot,
280 unsigned int max_page_shift)
281 {
282 pgd_t *pgd;
283 unsigned long start;
284 unsigned long next;
285 int err;
286 pgtbl_mod_mask mask = 0;
287
288 might_sleep();
289 BUG_ON(addr >= end);
290
291 start = addr;
292 pgd = pgd_offset_k(addr);
293 do {
294 next = pgd_addr_end(addr, end);
295 err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
296 max_page_shift, &mask);
297 if (err)
298 break;
299 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
300
301 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
302 arch_sync_kernel_mappings(start, end);
303
304 return err;
305 }
306
307 int vmap_page_range(unsigned long addr, unsigned long end,
308 phys_addr_t phys_addr, pgprot_t prot)
309 {
310 int err;
311
312 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
313 ioremap_max_page_shift);
314 flush_cache_vmap(addr, end);
315 if (!err)
316 err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
317 ioremap_max_page_shift);
318 return err;
319 }
320
321 int ioremap_page_range(unsigned long addr, unsigned long end,
322 phys_addr_t phys_addr, pgprot_t prot)
323 {
324 struct vm_struct *area;
325
326 area = find_vm_area((void *)addr);
327 if (!area || !(area->flags & VM_IOREMAP)) {
328 WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
329 return -EINVAL;
330 }
331 if (addr != (unsigned long)area->addr ||
332 (void *)end != area->addr + get_vm_area_size(area)) {
333 WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
334 addr, end, (long)area->addr,
335 (long)area->addr + get_vm_area_size(area));
336 return -ERANGE;
337 }
338 return vmap_page_range(addr, end, phys_addr, prot);
339 }
340
341 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
342 pgtbl_mod_mask *mask)
343 {
344 pte_t *pte;
345
346 pte = pte_offset_kernel(pmd, addr);
347 do {
348 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
349 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
350 } while (pte++, addr += PAGE_SIZE, addr != end);
351 *mask |= PGTBL_PTE_MODIFIED;
352 }
353
354 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
355 pgtbl_mod_mask *mask)
356 {
357 pmd_t *pmd;
358 unsigned long next;
359 int cleared;
360
361 pmd = pmd_offset(pud, addr);
362 do {
363 next = pmd_addr_end(addr, end);
364
365 cleared = pmd_clear_huge(pmd);
366 if (cleared || pmd_bad(*pmd))
367 *mask |= PGTBL_PMD_MODIFIED;
368
369 if (cleared)
370 continue;
371 if (pmd_none_or_clear_bad(pmd))
372 continue;
373 vunmap_pte_range(pmd, addr, next, mask);
374
375 cond_resched();
376 } while (pmd++, addr = next, addr != end);
377 }
378
379 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
380 pgtbl_mod_mask *mask)
381 {
382 pud_t *pud;
383 unsigned long next;
384 int cleared;
385
386 pud = pud_offset(p4d, addr);
387 do {
388 next = pud_addr_end(addr, end);
389
390 cleared = pud_clear_huge(pud);
391 if (cleared || pud_bad(*pud))
392 *mask |= PGTBL_PUD_MODIFIED;
393
394 if (cleared)
395 continue;
396 if (pud_none_or_clear_bad(pud))
397 continue;
398 vunmap_pmd_range(pud, addr, next, mask);
399 } while (pud++, addr = next, addr != end);
400 }
401
402 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
403 pgtbl_mod_mask *mask)
404 {
405 p4d_t *p4d;
406 unsigned long next;
407
408 p4d = p4d_offset(pgd, addr);
409 do {
410 next = p4d_addr_end(addr, end);
411
412 p4d_clear_huge(p4d);
413 if (p4d_bad(*p4d))
414 *mask |= PGTBL_P4D_MODIFIED;
415
416 if (p4d_none_or_clear_bad(p4d))
417 continue;
418 vunmap_pud_range(p4d, addr, next, mask);
419 } while (p4d++, addr = next, addr != end);
420 }
421
422 /*
423 * vunmap_range_noflush is similar to vunmap_range, but does not
424 * flush caches or TLBs.
425 *
426 * The caller is responsible for calling flush_cache_vmap() before calling
427 * this function, and flush_tlb_kernel_range after it has returned
428 * successfully (and before the addresses are expected to cause a page fault
429 * or be re-mapped for something else, if TLB flushes are being delayed or
430 * coalesced).
431 *
432 * This is an internal function only. Do not use outside mm/.
433 */
434 void __vunmap_range_noflush(unsigned long start, unsigned long end)
435 {
436 unsigned long next;
437 pgd_t *pgd;
438 unsigned long addr = start;
439 pgtbl_mod_mask mask = 0;
440
441 BUG_ON(addr >= end);
442 pgd = pgd_offset_k(addr);
443 do {
444 next = pgd_addr_end(addr, end);
445 if (pgd_bad(*pgd))
446 mask |= PGTBL_PGD_MODIFIED;
447 if (pgd_none_or_clear_bad(pgd))
448 continue;
449 vunmap_p4d_range(pgd, addr, next, &mask);
450 } while (pgd++, addr = next, addr != end);
451
452 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
453 arch_sync_kernel_mappings(start, end);
454 }
455
456 void vunmap_range_noflush(unsigned long start, unsigned long end)
457 {
458 kmsan_vunmap_range_noflush(start, end);
459 __vunmap_range_noflush(start, end);
460 }
461
462 /**
463 * vunmap_range - unmap kernel virtual addresses
464 * @addr: start of the VM area to unmap
465 * @end: end of the VM area to unmap (non-inclusive)
466 *
467 * Clears any present PTEs in the virtual address range, flushes TLBs and
468 * caches. Any subsequent access to the address before it has been re-mapped
469 * is a kernel bug.
470 */
471 void vunmap_range(unsigned long addr, unsigned long end)
472 {
473 flush_cache_vunmap(addr, end);
474 vunmap_range_noflush(addr, end);
475 flush_tlb_kernel_range(addr, end);
476 }
477
478 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
479 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
480 pgtbl_mod_mask *mask)
481 {
482 pte_t *pte;
483
484 /*
485 * nr is a running index into the array which helps higher level
486 * callers keep track of where we're up to.
487 */
488
489 pte = pte_alloc_kernel_track(pmd, addr, mask);
490 if (!pte)
491 return -ENOMEM;
492 do {
493 struct page *page = pages[*nr];
494
495 if (WARN_ON(!pte_none(ptep_get(pte))))
496 return -EBUSY;
497 if (WARN_ON(!page))
498 return -ENOMEM;
499 if (WARN_ON(!pfn_valid(page_to_pfn(page))))
500 return -EINVAL;
501
502 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
503 (*nr)++;
504 } while (pte++, addr += PAGE_SIZE, addr != end);
505 *mask |= PGTBL_PTE_MODIFIED;
506 return 0;
507 }
508
509 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
510 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
511 pgtbl_mod_mask *mask)
512 {
513 pmd_t *pmd;
514 unsigned long next;
515
516 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
517 if (!pmd)
518 return -ENOMEM;
519 do {
520 next = pmd_addr_end(addr, end);
521 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
522 return -ENOMEM;
523 } while (pmd++, addr = next, addr != end);
524 return 0;
525 }
526
527 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
528 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
529 pgtbl_mod_mask *mask)
530 {
531 pud_t *pud;
532 unsigned long next;
533
534 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
535 if (!pud)
536 return -ENOMEM;
537 do {
538 next = pud_addr_end(addr, end);
539 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
540 return -ENOMEM;
541 } while (pud++, addr = next, addr != end);
542 return 0;
543 }
544
545 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
546 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
547 pgtbl_mod_mask *mask)
548 {
549 p4d_t *p4d;
550 unsigned long next;
551
552 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
553 if (!p4d)
554 return -ENOMEM;
555 do {
556 next = p4d_addr_end(addr, end);
557 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
558 return -ENOMEM;
559 } while (p4d++, addr = next, addr != end);
560 return 0;
561 }
562
563 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
564 pgprot_t prot, struct page **pages)
565 {
566 unsigned long start = addr;
567 pgd_t *pgd;
568 unsigned long next;
569 int err = 0;
570 int nr = 0;
571 pgtbl_mod_mask mask = 0;
572
573 BUG_ON(addr >= end);
574 pgd = pgd_offset_k(addr);
575 do {
576 next = pgd_addr_end(addr, end);
577 if (pgd_bad(*pgd))
578 mask |= PGTBL_PGD_MODIFIED;
579 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
580 if (err)
581 return err;
582 } while (pgd++, addr = next, addr != end);
583
584 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
585 arch_sync_kernel_mappings(start, end);
586
587 return 0;
588 }
589
590 /*
591 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
592 * flush caches.
593 *
594 * The caller is responsible for calling flush_cache_vmap() after this
595 * function returns successfully and before the addresses are accessed.
596 *
597 * This is an internal function only. Do not use outside mm/.
598 */
599 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
600 pgprot_t prot, struct page **pages, unsigned int page_shift)
601 {
602 unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
603
604 WARN_ON(page_shift < PAGE_SHIFT);
605
606 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
607 page_shift == PAGE_SHIFT)
608 return vmap_small_pages_range_noflush(addr, end, prot, pages);
609
610 for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
611 int err;
612
613 err = vmap_range_noflush(addr, addr + (1UL << page_shift),
614 page_to_phys(pages[i]), prot,
615 page_shift);
616 if (err)
617 return err;
618
619 addr += 1UL << page_shift;
620 }
621
622 return 0;
623 }
624
625 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
626 pgprot_t prot, struct page **pages, unsigned int page_shift)
627 {
628 int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
629 page_shift);
630
631 if (ret)
632 return ret;
633 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
634 }
635
636 /**
637 * vmap_pages_range - map pages to a kernel virtual address
638 * @addr: start of the VM area to map
639 * @end: end of the VM area to map (non-inclusive)
640 * @prot: page protection flags to use
641 * @pages: pages to map (always PAGE_SIZE pages)
642 * @page_shift: maximum shift that the pages may be mapped with, @pages must
643 * be aligned and contiguous up to at least this shift.
644 *
645 * RETURNS:
646 * 0 on success, -errno on failure.
647 */
648 static int vmap_pages_range(unsigned long addr, unsigned long end,
649 pgprot_t prot, struct page **pages, unsigned int page_shift)
650 {
651 int err;
652
653 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
654 flush_cache_vmap(addr, end);
655 return err;
656 }
657
658 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start,
659 unsigned long end)
660 {
661 might_sleep();
662 if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS))
663 return -EINVAL;
664 if (WARN_ON_ONCE(area->flags & VM_NO_GUARD))
665 return -EINVAL;
666 if (WARN_ON_ONCE(!(area->flags & VM_SPARSE)))
667 return -EINVAL;
668 if ((end - start) >> PAGE_SHIFT > totalram_pages())
669 return -E2BIG;
670 if (start < (unsigned long)area->addr ||
671 (void *)end > area->addr + get_vm_area_size(area))
672 return -ERANGE;
673 return 0;
674 }
675
676 /**
677 * vm_area_map_pages - map pages inside given sparse vm_area
678 * @area: vm_area
679 * @start: start address inside vm_area
680 * @end: end address inside vm_area
681 * @pages: pages to map (always PAGE_SIZE pages)
682 */
683 int vm_area_map_pages(struct vm_struct *area, unsigned long start,
684 unsigned long end, struct page **pages)
685 {
686 int err;
687
688 err = check_sparse_vm_area(area, start, end);
689 if (err)
690 return err;
691
692 return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT);
693 }
694
695 /**
696 * vm_area_unmap_pages - unmap pages inside given sparse vm_area
697 * @area: vm_area
698 * @start: start address inside vm_area
699 * @end: end address inside vm_area
700 */
701 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
702 unsigned long end)
703 {
704 if (check_sparse_vm_area(area, start, end))
705 return;
706
707 vunmap_range(start, end);
708 }
709
710 int is_vmalloc_or_module_addr(const void *x)
711 {
712 /*
713 * ARM, x86-64 and sparc64 put modules in a special place,
714 * and fall back on vmalloc() if that fails. Others
715 * just put it in the vmalloc space.
716 */
717 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
718 unsigned long addr = (unsigned long)kasan_reset_tag(x);
719 if (addr >= MODULES_VADDR && addr < MODULES_END)
720 return 1;
721 #endif
722 return is_vmalloc_addr(x);
723 }
724 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
725
726 /*
727 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
728 * return the tail page that corresponds to the base page address, which
729 * matches small vmap mappings.
730 */
731 struct page *vmalloc_to_page(const void *vmalloc_addr)
732 {
733 unsigned long addr = (unsigned long) vmalloc_addr;
734 struct page *page = NULL;
735 pgd_t *pgd = pgd_offset_k(addr);
736 p4d_t *p4d;
737 pud_t *pud;
738 pmd_t *pmd;
739 pte_t *ptep, pte;
740
741 /*
742 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
743 * architectures that do not vmalloc module space
744 */
745 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
746
747 if (pgd_none(*pgd))
748 return NULL;
749 if (WARN_ON_ONCE(pgd_leaf(*pgd)))
750 return NULL; /* XXX: no allowance for huge pgd */
751 if (WARN_ON_ONCE(pgd_bad(*pgd)))
752 return NULL;
753
754 p4d = p4d_offset(pgd, addr);
755 if (p4d_none(*p4d))
756 return NULL;
757 if (p4d_leaf(*p4d))
758 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
759 if (WARN_ON_ONCE(p4d_bad(*p4d)))
760 return NULL;
761
762 pud = pud_offset(p4d, addr);
763 if (pud_none(*pud))
764 return NULL;
765 if (pud_leaf(*pud))
766 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
767 if (WARN_ON_ONCE(pud_bad(*pud)))
768 return NULL;
769
770 pmd = pmd_offset(pud, addr);
771 if (pmd_none(*pmd))
772 return NULL;
773 if (pmd_leaf(*pmd))
774 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
775 if (WARN_ON_ONCE(pmd_bad(*pmd)))
776 return NULL;
777
778 ptep = pte_offset_kernel(pmd, addr);
779 pte = ptep_get(ptep);
780 if (pte_present(pte))
781 page = pte_page(pte);
782
783 return page;
784 }
785 EXPORT_SYMBOL(vmalloc_to_page);
786
787 /*
788 * Map a vmalloc()-space virtual address to the physical page frame number.
789 */
790 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
791 {
792 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
793 }
794 EXPORT_SYMBOL(vmalloc_to_pfn);
795
796
797 /*** Global kva allocator ***/
798
799 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
800 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
801
802
803 static DEFINE_SPINLOCK(vmap_area_lock);
804 static DEFINE_SPINLOCK(free_vmap_area_lock);
805 /* Export for kexec only */
806 LIST_HEAD(vmap_area_list);
807 static struct rb_root vmap_area_root = RB_ROOT;
808 static bool vmap_initialized __read_mostly;
809
810 static struct rb_root purge_vmap_area_root = RB_ROOT;
811 static LIST_HEAD(purge_vmap_area_list);
812 static DEFINE_SPINLOCK(purge_vmap_area_lock);
813
814 /*
815 * This kmem_cache is used for vmap_area objects. Instead of
816 * allocating from slab we reuse an object from this cache to
817 * make things faster. Especially in "no edge" splitting of
818 * free block.
819 */
820 static struct kmem_cache *vmap_area_cachep;
821
822 /*
823 * This linked list is used in pair with free_vmap_area_root.
824 * It gives O(1) access to prev/next to perform fast coalescing.
825 */
826 static LIST_HEAD(free_vmap_area_list);
827
828 /*
829 * This augment red-black tree represents the free vmap space.
830 * All vmap_area objects in this tree are sorted by va->va_start
831 * address. It is used for allocation and merging when a vmap
832 * object is released.
833 *
834 * Each vmap_area node contains a maximum available free block
835 * of its sub-tree, right or left. Therefore it is possible to
836 * find a lowest match of free area.
837 */
838 static struct rb_root free_vmap_area_root = RB_ROOT;
839
840 /*
841 * Preload a CPU with one object for "no edge" split case. The
842 * aim is to get rid of allocations from the atomic context, thus
843 * to use more permissive allocation masks.
844 */
845 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
846
847 static __always_inline unsigned long
848 va_size(struct vmap_area *va)
849 {
850 return (va->va_end - va->va_start);
851 }
852
853 static __always_inline unsigned long
854 get_subtree_max_size(struct rb_node *node)
855 {
856 struct vmap_area *va;
857
858 va = rb_entry_safe(node, struct vmap_area, rb_node);
859 return va ? va->subtree_max_size : 0;
860 }
861
862 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
863 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
864
865 static void reclaim_and_purge_vmap_areas(void);
866 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
867 static void drain_vmap_area_work(struct work_struct *work);
868 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
869
870 static atomic_long_t nr_vmalloc_pages;
871
872 unsigned long vmalloc_nr_pages(void)
873 {
874 return atomic_long_read(&nr_vmalloc_pages);
875 }
876
877 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
878 static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
879 {
880 struct vmap_area *va = NULL;
881 struct rb_node *n = vmap_area_root.rb_node;
882
883 addr = (unsigned long)kasan_reset_tag((void *)addr);
884
885 while (n) {
886 struct vmap_area *tmp;
887
888 tmp = rb_entry(n, struct vmap_area, rb_node);
889 if (tmp->va_end > addr) {
890 va = tmp;
891 if (tmp->va_start <= addr)
892 break;
893
894 n = n->rb_left;
895 } else
896 n = n->rb_right;
897 }
898
899 return va;
900 }
901
902 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
903 {
904 struct rb_node *n = root->rb_node;
905
906 addr = (unsigned long)kasan_reset_tag((void *)addr);
907
908 while (n) {
909 struct vmap_area *va;
910
911 va = rb_entry(n, struct vmap_area, rb_node);
912 if (addr < va->va_start)
913 n = n->rb_left;
914 else if (addr >= va->va_end)
915 n = n->rb_right;
916 else
917 return va;
918 }
919
920 return NULL;
921 }
922
923 /*
924 * This function returns back addresses of parent node
925 * and its left or right link for further processing.
926 *
927 * Otherwise NULL is returned. In that case all further
928 * steps regarding inserting of conflicting overlap range
929 * have to be declined and actually considered as a bug.
930 */
931 static __always_inline struct rb_node **
932 find_va_links(struct vmap_area *va,
933 struct rb_root *root, struct rb_node *from,
934 struct rb_node **parent)
935 {
936 struct vmap_area *tmp_va;
937 struct rb_node **link;
938
939 if (root) {
940 link = &root->rb_node;
941 if (unlikely(!*link)) {
942 *parent = NULL;
943 return link;
944 }
945 } else {
946 link = &from;
947 }
948
949 /*
950 * Go to the bottom of the tree. When we hit the last point
951 * we end up with parent rb_node and correct direction, i name
952 * it link, where the new va->rb_node will be attached to.
953 */
954 do {
955 tmp_va = rb_entry(*link, struct vmap_area, rb_node);
956
957 /*
958 * During the traversal we also do some sanity check.
959 * Trigger the BUG() if there are sides(left/right)
960 * or full overlaps.
961 */
962 if (va->va_end <= tmp_va->va_start)
963 link = &(*link)->rb_left;
964 else if (va->va_start >= tmp_va->va_end)
965 link = &(*link)->rb_right;
966 else {
967 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
968 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
969
970 return NULL;
971 }
972 } while (*link);
973
974 *parent = &tmp_va->rb_node;
975 return link;
976 }
977
978 static __always_inline struct list_head *
979 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
980 {
981 struct list_head *list;
982
983 if (unlikely(!parent))
984 /*
985 * The red-black tree where we try to find VA neighbors
986 * before merging or inserting is empty, i.e. it means
987 * there is no free vmap space. Normally it does not
988 * happen but we handle this case anyway.
989 */
990 return NULL;
991
992 list = &rb_entry(parent, struct vmap_area, rb_node)->list;
993 return (&parent->rb_right == link ? list->next : list);
994 }
995
996 static __always_inline void
997 __link_va(struct vmap_area *va, struct rb_root *root,
998 struct rb_node *parent, struct rb_node **link,
999 struct list_head *head, bool augment)
1000 {
1001 /*
1002 * VA is still not in the list, but we can
1003 * identify its future previous list_head node.
1004 */
1005 if (likely(parent)) {
1006 head = &rb_entry(parent, struct vmap_area, rb_node)->list;
1007 if (&parent->rb_right != link)
1008 head = head->prev;
1009 }
1010
1011 /* Insert to the rb-tree */
1012 rb_link_node(&va->rb_node, parent, link);
1013 if (augment) {
1014 /*
1015 * Some explanation here. Just perform simple insertion
1016 * to the tree. We do not set va->subtree_max_size to
1017 * its current size before calling rb_insert_augmented().
1018 * It is because we populate the tree from the bottom
1019 * to parent levels when the node _is_ in the tree.
1020 *
1021 * Therefore we set subtree_max_size to zero after insertion,
1022 * to let __augment_tree_propagate_from() puts everything to
1023 * the correct order later on.
1024 */
1025 rb_insert_augmented(&va->rb_node,
1026 root, &free_vmap_area_rb_augment_cb);
1027 va->subtree_max_size = 0;
1028 } else {
1029 rb_insert_color(&va->rb_node, root);
1030 }
1031
1032 /* Address-sort this list */
1033 list_add(&va->list, head);
1034 }
1035
1036 static __always_inline void
1037 link_va(struct vmap_area *va, struct rb_root *root,
1038 struct rb_node *parent, struct rb_node **link,
1039 struct list_head *head)
1040 {
1041 __link_va(va, root, parent, link, head, false);
1042 }
1043
1044 static __always_inline void
1045 link_va_augment(struct vmap_area *va, struct rb_root *root,
1046 struct rb_node *parent, struct rb_node **link,
1047 struct list_head *head)
1048 {
1049 __link_va(va, root, parent, link, head, true);
1050 }
1051
1052 static __always_inline void
1053 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
1054 {
1055 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
1056 return;
1057
1058 if (augment)
1059 rb_erase_augmented(&va->rb_node,
1060 root, &free_vmap_area_rb_augment_cb);
1061 else
1062 rb_erase(&va->rb_node, root);
1063
1064 list_del_init(&va->list);
1065 RB_CLEAR_NODE(&va->rb_node);
1066 }
1067
1068 static __always_inline void
1069 unlink_va(struct vmap_area *va, struct rb_root *root)
1070 {
1071 __unlink_va(va, root, false);
1072 }
1073
1074 static __always_inline void
1075 unlink_va_augment(struct vmap_area *va, struct rb_root *root)
1076 {
1077 __unlink_va(va, root, true);
1078 }
1079
1080 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1081 /*
1082 * Gets called when remove the node and rotate.
1083 */
1084 static __always_inline unsigned long
1085 compute_subtree_max_size(struct vmap_area *va)
1086 {
1087 return max3(va_size(va),
1088 get_subtree_max_size(va->rb_node.rb_left),
1089 get_subtree_max_size(va->rb_node.rb_right));
1090 }
1091
1092 static void
1093 augment_tree_propagate_check(void)
1094 {
1095 struct vmap_area *va;
1096 unsigned long computed_size;
1097
1098 list_for_each_entry(va, &free_vmap_area_list, list) {
1099 computed_size = compute_subtree_max_size(va);
1100 if (computed_size != va->subtree_max_size)
1101 pr_emerg("tree is corrupted: %lu, %lu\n",
1102 va_size(va), va->subtree_max_size);
1103 }
1104 }
1105 #endif
1106
1107 /*
1108 * This function populates subtree_max_size from bottom to upper
1109 * levels starting from VA point. The propagation must be done
1110 * when VA size is modified by changing its va_start/va_end. Or
1111 * in case of newly inserting of VA to the tree.
1112 *
1113 * It means that __augment_tree_propagate_from() must be called:
1114 * - After VA has been inserted to the tree(free path);
1115 * - After VA has been shrunk(allocation path);
1116 * - After VA has been increased(merging path).
1117 *
1118 * Please note that, it does not mean that upper parent nodes
1119 * and their subtree_max_size are recalculated all the time up
1120 * to the root node.
1121 *
1122 * 4--8
1123 * /\
1124 * / \
1125 * / \
1126 * 2--2 8--8
1127 *
1128 * For example if we modify the node 4, shrinking it to 2, then
1129 * no any modification is required. If we shrink the node 2 to 1
1130 * its subtree_max_size is updated only, and set to 1. If we shrink
1131 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1132 * node becomes 4--6.
1133 */
1134 static __always_inline void
1135 augment_tree_propagate_from(struct vmap_area *va)
1136 {
1137 /*
1138 * Populate the tree from bottom towards the root until
1139 * the calculated maximum available size of checked node
1140 * is equal to its current one.
1141 */
1142 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1143
1144 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1145 augment_tree_propagate_check();
1146 #endif
1147 }
1148
1149 static void
1150 insert_vmap_area(struct vmap_area *va,
1151 struct rb_root *root, struct list_head *head)
1152 {
1153 struct rb_node **link;
1154 struct rb_node *parent;
1155
1156 link = find_va_links(va, root, NULL, &parent);
1157 if (link)
1158 link_va(va, root, parent, link, head);
1159 }
1160
1161 static void
1162 insert_vmap_area_augment(struct vmap_area *va,
1163 struct rb_node *from, struct rb_root *root,
1164 struct list_head *head)
1165 {
1166 struct rb_node **link;
1167 struct rb_node *parent;
1168
1169 if (from)
1170 link = find_va_links(va, NULL, from, &parent);
1171 else
1172 link = find_va_links(va, root, NULL, &parent);
1173
1174 if (link) {
1175 link_va_augment(va, root, parent, link, head);
1176 augment_tree_propagate_from(va);
1177 }
1178 }
1179
1180 /*
1181 * Merge de-allocated chunk of VA memory with previous
1182 * and next free blocks. If coalesce is not done a new
1183 * free area is inserted. If VA has been merged, it is
1184 * freed.
1185 *
1186 * Please note, it can return NULL in case of overlap
1187 * ranges, followed by WARN() report. Despite it is a
1188 * buggy behaviour, a system can be alive and keep
1189 * ongoing.
1190 */
1191 static __always_inline struct vmap_area *
1192 __merge_or_add_vmap_area(struct vmap_area *va,
1193 struct rb_root *root, struct list_head *head, bool augment)
1194 {
1195 struct vmap_area *sibling;
1196 struct list_head *next;
1197 struct rb_node **link;
1198 struct rb_node *parent;
1199 bool merged = false;
1200
1201 /*
1202 * Find a place in the tree where VA potentially will be
1203 * inserted, unless it is merged with its sibling/siblings.
1204 */
1205 link = find_va_links(va, root, NULL, &parent);
1206 if (!link)
1207 return NULL;
1208
1209 /*
1210 * Get next node of VA to check if merging can be done.
1211 */
1212 next = get_va_next_sibling(parent, link);
1213 if (unlikely(next == NULL))
1214 goto insert;
1215
1216 /*
1217 * start end
1218 * | |
1219 * |<------VA------>|<-----Next----->|
1220 * | |
1221 * start end
1222 */
1223 if (next != head) {
1224 sibling = list_entry(next, struct vmap_area, list);
1225 if (sibling->va_start == va->va_end) {
1226 sibling->va_start = va->va_start;
1227
1228 /* Free vmap_area object. */
1229 kmem_cache_free(vmap_area_cachep, va);
1230
1231 /* Point to the new merged area. */
1232 va = sibling;
1233 merged = true;
1234 }
1235 }
1236
1237 /*
1238 * start end
1239 * | |
1240 * |<-----Prev----->|<------VA------>|
1241 * | |
1242 * start end
1243 */
1244 if (next->prev != head) {
1245 sibling = list_entry(next->prev, struct vmap_area, list);
1246 if (sibling->va_end == va->va_start) {
1247 /*
1248 * If both neighbors are coalesced, it is important
1249 * to unlink the "next" node first, followed by merging
1250 * with "previous" one. Otherwise the tree might not be
1251 * fully populated if a sibling's augmented value is
1252 * "normalized" because of rotation operations.
1253 */
1254 if (merged)
1255 __unlink_va(va, root, augment);
1256
1257 sibling->va_end = va->va_end;
1258
1259 /* Free vmap_area object. */
1260 kmem_cache_free(vmap_area_cachep, va);
1261
1262 /* Point to the new merged area. */
1263 va = sibling;
1264 merged = true;
1265 }
1266 }
1267
1268 insert:
1269 if (!merged)
1270 __link_va(va, root, parent, link, head, augment);
1271
1272 return va;
1273 }
1274
1275 static __always_inline struct vmap_area *
1276 merge_or_add_vmap_area(struct vmap_area *va,
1277 struct rb_root *root, struct list_head *head)
1278 {
1279 return __merge_or_add_vmap_area(va, root, head, false);
1280 }
1281
1282 static __always_inline struct vmap_area *
1283 merge_or_add_vmap_area_augment(struct vmap_area *va,
1284 struct rb_root *root, struct list_head *head)
1285 {
1286 va = __merge_or_add_vmap_area(va, root, head, true);
1287 if (va)
1288 augment_tree_propagate_from(va);
1289
1290 return va;
1291 }
1292
1293 static __always_inline bool
1294 is_within_this_va(struct vmap_area *va, unsigned long size,
1295 unsigned long align, unsigned long vstart)
1296 {
1297 unsigned long nva_start_addr;
1298
1299 if (va->va_start > vstart)
1300 nva_start_addr = ALIGN(va->va_start, align);
1301 else
1302 nva_start_addr = ALIGN(vstart, align);
1303
1304 /* Can be overflowed due to big size or alignment. */
1305 if (nva_start_addr + size < nva_start_addr ||
1306 nva_start_addr < vstart)
1307 return false;
1308
1309 return (nva_start_addr + size <= va->va_end);
1310 }
1311
1312 /*
1313 * Find the first free block(lowest start address) in the tree,
1314 * that will accomplish the request corresponding to passing
1315 * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1316 * a search length is adjusted to account for worst case alignment
1317 * overhead.
1318 */
1319 static __always_inline struct vmap_area *
1320 find_vmap_lowest_match(struct rb_root *root, unsigned long size,
1321 unsigned long align, unsigned long vstart, bool adjust_search_size)
1322 {
1323 struct vmap_area *va;
1324 struct rb_node *node;
1325 unsigned long length;
1326
1327 /* Start from the root. */
1328 node = root->rb_node;
1329
1330 /* Adjust the search size for alignment overhead. */
1331 length = adjust_search_size ? size + align - 1 : size;
1332
1333 while (node) {
1334 va = rb_entry(node, struct vmap_area, rb_node);
1335
1336 if (get_subtree_max_size(node->rb_left) >= length &&
1337 vstart < va->va_start) {
1338 node = node->rb_left;
1339 } else {
1340 if (is_within_this_va(va, size, align, vstart))
1341 return va;
1342
1343 /*
1344 * Does not make sense to go deeper towards the right
1345 * sub-tree if it does not have a free block that is
1346 * equal or bigger to the requested search length.
1347 */
1348 if (get_subtree_max_size(node->rb_right) >= length) {
1349 node = node->rb_right;
1350 continue;
1351 }
1352
1353 /*
1354 * OK. We roll back and find the first right sub-tree,
1355 * that will satisfy the search criteria. It can happen
1356 * due to "vstart" restriction or an alignment overhead
1357 * that is bigger then PAGE_SIZE.
1358 */
1359 while ((node = rb_parent(node))) {
1360 va = rb_entry(node, struct vmap_area, rb_node);
1361 if (is_within_this_va(va, size, align, vstart))
1362 return va;
1363
1364 if (get_subtree_max_size(node->rb_right) >= length &&
1365 vstart <= va->va_start) {
1366 /*
1367 * Shift the vstart forward. Please note, we update it with
1368 * parent's start address adding "1" because we do not want
1369 * to enter same sub-tree after it has already been checked
1370 * and no suitable free block found there.
1371 */
1372 vstart = va->va_start + 1;
1373 node = node->rb_right;
1374 break;
1375 }
1376 }
1377 }
1378 }
1379
1380 return NULL;
1381 }
1382
1383 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1384 #include <linux/random.h>
1385
1386 static struct vmap_area *
1387 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
1388 unsigned long align, unsigned long vstart)
1389 {
1390 struct vmap_area *va;
1391
1392 list_for_each_entry(va, head, list) {
1393 if (!is_within_this_va(va, size, align, vstart))
1394 continue;
1395
1396 return va;
1397 }
1398
1399 return NULL;
1400 }
1401
1402 static void
1403 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
1404 unsigned long size, unsigned long align)
1405 {
1406 struct vmap_area *va_1, *va_2;
1407 unsigned long vstart;
1408 unsigned int rnd;
1409
1410 get_random_bytes(&rnd, sizeof(rnd));
1411 vstart = VMALLOC_START + rnd;
1412
1413 va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
1414 va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
1415
1416 if (va_1 != va_2)
1417 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1418 va_1, va_2, vstart);
1419 }
1420 #endif
1421
1422 enum fit_type {
1423 NOTHING_FIT = 0,
1424 FL_FIT_TYPE = 1, /* full fit */
1425 LE_FIT_TYPE = 2, /* left edge fit */
1426 RE_FIT_TYPE = 3, /* right edge fit */
1427 NE_FIT_TYPE = 4 /* no edge fit */
1428 };
1429
1430 static __always_inline enum fit_type
1431 classify_va_fit_type(struct vmap_area *va,
1432 unsigned long nva_start_addr, unsigned long size)
1433 {
1434 enum fit_type type;
1435
1436 /* Check if it is within VA. */
1437 if (nva_start_addr < va->va_start ||
1438 nva_start_addr + size > va->va_end)
1439 return NOTHING_FIT;
1440
1441 /* Now classify. */
1442 if (va->va_start == nva_start_addr) {
1443 if (va->va_end == nva_start_addr + size)
1444 type = FL_FIT_TYPE;
1445 else
1446 type = LE_FIT_TYPE;
1447 } else if (va->va_end == nva_start_addr + size) {
1448 type = RE_FIT_TYPE;
1449 } else {
1450 type = NE_FIT_TYPE;
1451 }
1452
1453 return type;
1454 }
1455
1456 static __always_inline int
1457 adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
1458 struct vmap_area *va, unsigned long nva_start_addr,
1459 unsigned long size)
1460 {
1461 struct vmap_area *lva = NULL;
1462 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
1463
1464 if (type == FL_FIT_TYPE) {
1465 /*
1466 * No need to split VA, it fully fits.
1467 *
1468 * | |
1469 * V NVA V
1470 * |---------------|
1471 */
1472 unlink_va_augment(va, root);
1473 kmem_cache_free(vmap_area_cachep, va);
1474 } else if (type == LE_FIT_TYPE) {
1475 /*
1476 * Split left edge of fit VA.
1477 *
1478 * | |
1479 * V NVA V R
1480 * |-------|-------|
1481 */
1482 va->va_start += size;
1483 } else if (type == RE_FIT_TYPE) {
1484 /*
1485 * Split right edge of fit VA.
1486 *
1487 * | |
1488 * L V NVA V
1489 * |-------|-------|
1490 */
1491 va->va_end = nva_start_addr;
1492 } else if (type == NE_FIT_TYPE) {
1493 /*
1494 * Split no edge of fit VA.
1495 *
1496 * | |
1497 * L V NVA V R
1498 * |---|-------|---|
1499 */
1500 lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1501 if (unlikely(!lva)) {
1502 /*
1503 * For percpu allocator we do not do any pre-allocation
1504 * and leave it as it is. The reason is it most likely
1505 * never ends up with NE_FIT_TYPE splitting. In case of
1506 * percpu allocations offsets and sizes are aligned to
1507 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1508 * are its main fitting cases.
1509 *
1510 * There are a few exceptions though, as an example it is
1511 * a first allocation (early boot up) when we have "one"
1512 * big free space that has to be split.
1513 *
1514 * Also we can hit this path in case of regular "vmap"
1515 * allocations, if "this" current CPU was not preloaded.
1516 * See the comment in alloc_vmap_area() why. If so, then
1517 * GFP_NOWAIT is used instead to get an extra object for
1518 * split purpose. That is rare and most time does not
1519 * occur.
1520 *
1521 * What happens if an allocation gets failed. Basically,
1522 * an "overflow" path is triggered to purge lazily freed
1523 * areas to free some memory, then, the "retry" path is
1524 * triggered to repeat one more time. See more details
1525 * in alloc_vmap_area() function.
1526 */
1527 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1528 if (!lva)
1529 return -1;
1530 }
1531
1532 /*
1533 * Build the remainder.
1534 */
1535 lva->va_start = va->va_start;
1536 lva->va_end = nva_start_addr;
1537
1538 /*
1539 * Shrink this VA to remaining size.
1540 */
1541 va->va_start = nva_start_addr + size;
1542 } else {
1543 return -1;
1544 }
1545
1546 if (type != FL_FIT_TYPE) {
1547 augment_tree_propagate_from(va);
1548
1549 if (lva) /* type == NE_FIT_TYPE */
1550 insert_vmap_area_augment(lva, &va->rb_node, root, head);
1551 }
1552
1553 return 0;
1554 }
1555
1556 /*
1557 * Returns a start address of the newly allocated area, if success.
1558 * Otherwise a vend is returned that indicates failure.
1559 */
1560 static __always_inline unsigned long
1561 __alloc_vmap_area(struct rb_root *root, struct list_head *head,
1562 unsigned long size, unsigned long align,
1563 unsigned long vstart, unsigned long vend)
1564 {
1565 bool adjust_search_size = true;
1566 unsigned long nva_start_addr;
1567 struct vmap_area *va;
1568 int ret;
1569
1570 /*
1571 * Do not adjust when:
1572 * a) align <= PAGE_SIZE, because it does not make any sense.
1573 * All blocks(their start addresses) are at least PAGE_SIZE
1574 * aligned anyway;
1575 * b) a short range where a requested size corresponds to exactly
1576 * specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1577 * With adjusted search length an allocation would not succeed.
1578 */
1579 if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
1580 adjust_search_size = false;
1581
1582 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
1583 if (unlikely(!va))
1584 return vend;
1585
1586 if (va->va_start > vstart)
1587 nva_start_addr = ALIGN(va->va_start, align);
1588 else
1589 nva_start_addr = ALIGN(vstart, align);
1590
1591 /* Check the "vend" restriction. */
1592 if (nva_start_addr + size > vend)
1593 return vend;
1594
1595 /* Update the free vmap_area. */
1596 ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
1597 if (WARN_ON_ONCE(ret))
1598 return vend;
1599
1600 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1601 find_vmap_lowest_match_check(root, head, size, align);
1602 #endif
1603
1604 return nva_start_addr;
1605 }
1606
1607 /*
1608 * Free a region of KVA allocated by alloc_vmap_area
1609 */
1610 static void free_vmap_area(struct vmap_area *va)
1611 {
1612 /*
1613 * Remove from the busy tree/list.
1614 */
1615 spin_lock(&vmap_area_lock);
1616 unlink_va(va, &vmap_area_root);
1617 spin_unlock(&vmap_area_lock);
1618
1619 /*
1620 * Insert/Merge it back to the free tree/list.
1621 */
1622 spin_lock(&free_vmap_area_lock);
1623 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1624 spin_unlock(&free_vmap_area_lock);
1625 }
1626
1627 static inline void
1628 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1629 {
1630 struct vmap_area *va = NULL;
1631
1632 /*
1633 * Preload this CPU with one extra vmap_area object. It is used
1634 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1635 * a CPU that does an allocation is preloaded.
1636 *
1637 * We do it in non-atomic context, thus it allows us to use more
1638 * permissive allocation masks to be more stable under low memory
1639 * condition and high memory pressure.
1640 */
1641 if (!this_cpu_read(ne_fit_preload_node))
1642 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1643
1644 spin_lock(lock);
1645
1646 if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1647 kmem_cache_free(vmap_area_cachep, va);
1648 }
1649
1650 /*
1651 * Allocate a region of KVA of the specified size and alignment, within the
1652 * vstart and vend.
1653 */
1654 static struct vmap_area *alloc_vmap_area(unsigned long size,
1655 unsigned long align,
1656 unsigned long vstart, unsigned long vend,
1657 int node, gfp_t gfp_mask,
1658 unsigned long va_flags)
1659 {
1660 struct vmap_area *va;
1661 unsigned long freed;
1662 unsigned long addr;
1663 int purged = 0;
1664 int ret;
1665
1666 if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
1667 return ERR_PTR(-EINVAL);
1668
1669 if (unlikely(!vmap_initialized))
1670 return ERR_PTR(-EBUSY);
1671
1672 might_sleep();
1673 gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1674
1675 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1676 if (unlikely(!va))
1677 return ERR_PTR(-ENOMEM);
1678
1679 /*
1680 * Only scan the relevant parts containing pointers to other objects
1681 * to avoid false negatives.
1682 */
1683 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1684
1685 retry:
1686 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1687 addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
1688 size, align, vstart, vend);
1689 spin_unlock(&free_vmap_area_lock);
1690
1691 trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
1692
1693 /*
1694 * If an allocation fails, the "vend" address is
1695 * returned. Therefore trigger the overflow path.
1696 */
1697 if (unlikely(addr == vend))
1698 goto overflow;
1699
1700 va->va_start = addr;
1701 va->va_end = addr + size;
1702 va->vm = NULL;
1703 va->flags = va_flags;
1704
1705 spin_lock(&vmap_area_lock);
1706 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1707 spin_unlock(&vmap_area_lock);
1708
1709 BUG_ON(!IS_ALIGNED(va->va_start, align));
1710 BUG_ON(va->va_start < vstart);
1711 BUG_ON(va->va_end > vend);
1712
1713 ret = kasan_populate_vmalloc(addr, size);
1714 if (ret) {
1715 free_vmap_area(va);
1716 return ERR_PTR(ret);
1717 }
1718
1719 return va;
1720
1721 overflow:
1722 if (!purged) {
1723 reclaim_and_purge_vmap_areas();
1724 purged = 1;
1725 goto retry;
1726 }
1727
1728 freed = 0;
1729 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1730
1731 if (freed > 0) {
1732 purged = 0;
1733 goto retry;
1734 }
1735
1736 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1737 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1738 size);
1739
1740 kmem_cache_free(vmap_area_cachep, va);
1741 return ERR_PTR(-EBUSY);
1742 }
1743
1744 int register_vmap_purge_notifier(struct notifier_block *nb)
1745 {
1746 return blocking_notifier_chain_register(&vmap_notify_list, nb);
1747 }
1748 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1749
1750 int unregister_vmap_purge_notifier(struct notifier_block *nb)
1751 {
1752 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1753 }
1754 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1755
1756 /*
1757 * lazy_max_pages is the maximum amount of virtual address space we gather up
1758 * before attempting to purge with a TLB flush.
1759 *
1760 * There is a tradeoff here: a larger number will cover more kernel page tables
1761 * and take slightly longer to purge, but it will linearly reduce the number of
1762 * global TLB flushes that must be performed. It would seem natural to scale
1763 * this number up linearly with the number of CPUs (because vmapping activity
1764 * could also scale linearly with the number of CPUs), however it is likely
1765 * that in practice, workloads might be constrained in other ways that mean
1766 * vmap activity will not scale linearly with CPUs. Also, I want to be
1767 * conservative and not introduce a big latency on huge systems, so go with
1768 * a less aggressive log scale. It will still be an improvement over the old
1769 * code, and it will be simple to change the scale factor if we find that it
1770 * becomes a problem on bigger systems.
1771 */
1772 static unsigned long lazy_max_pages(void)
1773 {
1774 unsigned int log;
1775
1776 log = fls(num_online_cpus());
1777
1778 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1779 }
1780
1781 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1782
1783 /*
1784 * Serialize vmap purging. There is no actual critical section protected
1785 * by this lock, but we want to avoid concurrent calls for performance
1786 * reasons and to make the pcpu_get_vm_areas more deterministic.
1787 */
1788 static DEFINE_MUTEX(vmap_purge_lock);
1789
1790 /* for per-CPU blocks */
1791 static void purge_fragmented_blocks_allcpus(void);
1792
1793 /*
1794 * Purges all lazily-freed vmap areas.
1795 */
1796 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1797 {
1798 unsigned long resched_threshold;
1799 unsigned int num_purged_areas = 0;
1800 struct list_head local_purge_list;
1801 struct vmap_area *va, *n_va;
1802
1803 lockdep_assert_held(&vmap_purge_lock);
1804
1805 spin_lock(&purge_vmap_area_lock);
1806 purge_vmap_area_root = RB_ROOT;
1807 list_replace_init(&purge_vmap_area_list, &local_purge_list);
1808 spin_unlock(&purge_vmap_area_lock);
1809
1810 if (unlikely(list_empty(&local_purge_list)))
1811 goto out;
1812
1813 start = min(start,
1814 list_first_entry(&local_purge_list,
1815 struct vmap_area, list)->va_start);
1816
1817 end = max(end,
1818 list_last_entry(&local_purge_list,
1819 struct vmap_area, list)->va_end);
1820
1821 flush_tlb_kernel_range(start, end);
1822 resched_threshold = lazy_max_pages() << 1;
1823
1824 spin_lock(&free_vmap_area_lock);
1825 list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
1826 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1827 unsigned long orig_start = va->va_start;
1828 unsigned long orig_end = va->va_end;
1829
1830 /*
1831 * Finally insert or merge lazily-freed area. It is
1832 * detached and there is no need to "unlink" it from
1833 * anything.
1834 */
1835 va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1836 &free_vmap_area_list);
1837
1838 if (!va)
1839 continue;
1840
1841 if (is_vmalloc_or_module_addr((void *)orig_start))
1842 kasan_release_vmalloc(orig_start, orig_end,
1843 va->va_start, va->va_end);
1844
1845 atomic_long_sub(nr, &vmap_lazy_nr);
1846 num_purged_areas++;
1847
1848 if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1849 cond_resched_lock(&free_vmap_area_lock);
1850 }
1851 spin_unlock(&free_vmap_area_lock);
1852
1853 out:
1854 trace_purge_vmap_area_lazy(start, end, num_purged_areas);
1855 return num_purged_areas > 0;
1856 }
1857
1858 /*
1859 * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
1860 */
1861 static void reclaim_and_purge_vmap_areas(void)
1862
1863 {
1864 mutex_lock(&vmap_purge_lock);
1865 purge_fragmented_blocks_allcpus();
1866 __purge_vmap_area_lazy(ULONG_MAX, 0);
1867 mutex_unlock(&vmap_purge_lock);
1868 }
1869
1870 static void drain_vmap_area_work(struct work_struct *work)
1871 {
1872 unsigned long nr_lazy;
1873
1874 do {
1875 mutex_lock(&vmap_purge_lock);
1876 __purge_vmap_area_lazy(ULONG_MAX, 0);
1877 mutex_unlock(&vmap_purge_lock);
1878
1879 /* Recheck if further work is required. */
1880 nr_lazy = atomic_long_read(&vmap_lazy_nr);
1881 } while (nr_lazy > lazy_max_pages());
1882 }
1883
1884 /*
1885 * Free a vmap area, caller ensuring that the area has been unmapped,
1886 * unlinked and flush_cache_vunmap had been called for the correct
1887 * range previously.
1888 */
1889 static void free_vmap_area_noflush(struct vmap_area *va)
1890 {
1891 unsigned long nr_lazy_max = lazy_max_pages();
1892 unsigned long va_start = va->va_start;
1893 unsigned long nr_lazy;
1894
1895 if (WARN_ON_ONCE(!list_empty(&va->list)))
1896 return;
1897
1898 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1899 PAGE_SHIFT, &vmap_lazy_nr);
1900
1901 /*
1902 * Merge or place it to the purge tree/list.
1903 */
1904 spin_lock(&purge_vmap_area_lock);
1905 merge_or_add_vmap_area(va,
1906 &purge_vmap_area_root, &purge_vmap_area_list);
1907 spin_unlock(&purge_vmap_area_lock);
1908
1909 trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
1910
1911 /* After this point, we may free va at any time */
1912 if (unlikely(nr_lazy > nr_lazy_max))
1913 schedule_work(&drain_vmap_work);
1914 }
1915
1916 /*
1917 * Free and unmap a vmap area
1918 */
1919 static void free_unmap_vmap_area(struct vmap_area *va)
1920 {
1921 flush_cache_vunmap(va->va_start, va->va_end);
1922 vunmap_range_noflush(va->va_start, va->va_end);
1923 if (debug_pagealloc_enabled_static())
1924 flush_tlb_kernel_range(va->va_start, va->va_end);
1925
1926 free_vmap_area_noflush(va);
1927 }
1928
1929 struct vmap_area *find_vmap_area(unsigned long addr)
1930 {
1931 struct vmap_area *va;
1932
1933 spin_lock(&vmap_area_lock);
1934 va = __find_vmap_area(addr, &vmap_area_root);
1935 spin_unlock(&vmap_area_lock);
1936
1937 return va;
1938 }
1939
1940 static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
1941 {
1942 struct vmap_area *va;
1943
1944 spin_lock(&vmap_area_lock);
1945 va = __find_vmap_area(addr, &vmap_area_root);
1946 if (va)
1947 unlink_va(va, &vmap_area_root);
1948 spin_unlock(&vmap_area_lock);
1949
1950 return va;
1951 }
1952
1953 /*** Per cpu kva allocator ***/
1954
1955 /*
1956 * vmap space is limited especially on 32 bit architectures. Ensure there is
1957 * room for at least 16 percpu vmap blocks per CPU.
1958 */
1959 /*
1960 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1961 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1962 * instead (we just need a rough idea)
1963 */
1964 #if BITS_PER_LONG == 32
1965 #define VMALLOC_SPACE (128UL*1024*1024)
1966 #else
1967 #define VMALLOC_SPACE (128UL*1024*1024*1024)
1968 #endif
1969
1970 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1971 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
1972 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
1973 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1974 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
1975 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
1976 #define VMAP_BBMAP_BITS \
1977 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1978 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1979 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1980
1981 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1982
1983 /*
1984 * Purge threshold to prevent overeager purging of fragmented blocks for
1985 * regular operations: Purge if vb->free is less than 1/4 of the capacity.
1986 */
1987 #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4)
1988
1989 #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
1990 #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
1991 #define VMAP_FLAGS_MASK 0x3
1992
1993 struct vmap_block_queue {
1994 spinlock_t lock;
1995 struct list_head free;
1996
1997 /*
1998 * An xarray requires an extra memory dynamically to
1999 * be allocated. If it is an issue, we can use rb-tree
2000 * instead.
2001 */
2002 struct xarray vmap_blocks;
2003 };
2004
2005 struct vmap_block {
2006 spinlock_t lock;
2007 struct vmap_area *va;
2008 unsigned long free, dirty;
2009 DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
2010 unsigned long dirty_min, dirty_max; /*< dirty range */
2011 struct list_head free_list;
2012 struct rcu_head rcu_head;
2013 struct list_head purge;
2014 };
2015
2016 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
2017 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
2018
2019 /*
2020 * In order to fast access to any "vmap_block" associated with a
2021 * specific address, we use a hash.
2022 *
2023 * A per-cpu vmap_block_queue is used in both ways, to serialize
2024 * an access to free block chains among CPUs(alloc path) and it
2025 * also acts as a vmap_block hash(alloc/free paths). It means we
2026 * overload it, since we already have the per-cpu array which is
2027 * used as a hash table. When used as a hash a 'cpu' passed to
2028 * per_cpu() is not actually a CPU but rather a hash index.
2029 *
2030 * A hash function is addr_to_vb_xa() which hashes any address
2031 * to a specific index(in a hash) it belongs to. This then uses a
2032 * per_cpu() macro to access an array with generated index.
2033 *
2034 * An example:
2035 *
2036 * CPU_1 CPU_2 CPU_0
2037 * | | |
2038 * V V V
2039 * 0 10 20 30 40 50 60
2040 * |------|------|------|------|------|------|...<vmap address space>
2041 * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2
2042 *
2043 * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
2044 * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
2045 *
2046 * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
2047 * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
2048 *
2049 * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
2050 * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
2051 *
2052 * This technique almost always avoids lock contention on insert/remove,
2053 * however xarray spinlocks protect against any contention that remains.
2054 */
2055 static struct xarray *
2056 addr_to_vb_xa(unsigned long addr)
2057 {
2058 int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
2059
2060 return &per_cpu(vmap_block_queue, index).vmap_blocks;
2061 }
2062
2063 /*
2064 * We should probably have a fallback mechanism to allocate virtual memory
2065 * out of partially filled vmap blocks. However vmap block sizing should be
2066 * fairly reasonable according to the vmalloc size, so it shouldn't be a
2067 * big problem.
2068 */
2069
2070 static unsigned long addr_to_vb_idx(unsigned long addr)
2071 {
2072 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
2073 addr /= VMAP_BLOCK_SIZE;
2074 return addr;
2075 }
2076
2077 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
2078 {
2079 unsigned long addr;
2080
2081 addr = va_start + (pages_off << PAGE_SHIFT);
2082 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
2083 return (void *)addr;
2084 }
2085
2086 /**
2087 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2088 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
2089 * @order: how many 2^order pages should be occupied in newly allocated block
2090 * @gfp_mask: flags for the page level allocator
2091 *
2092 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2093 */
2094 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
2095 {
2096 struct vmap_block_queue *vbq;
2097 struct vmap_block *vb;
2098 struct vmap_area *va;
2099 struct xarray *xa;
2100 unsigned long vb_idx;
2101 int node, err;
2102 void *vaddr;
2103
2104 node = numa_node_id();
2105
2106 vb = kmalloc_node(sizeof(struct vmap_block),
2107 gfp_mask & GFP_RECLAIM_MASK, node);
2108 if (unlikely(!vb))
2109 return ERR_PTR(-ENOMEM);
2110
2111 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2112 VMALLOC_START, VMALLOC_END,
2113 node, gfp_mask,
2114 VMAP_RAM|VMAP_BLOCK);
2115 if (IS_ERR(va)) {
2116 kfree(vb);
2117 return ERR_CAST(va);
2118 }
2119
2120 vaddr = vmap_block_vaddr(va->va_start, 0);
2121 spin_lock_init(&vb->lock);
2122 vb->va = va;
2123 /* At least something should be left free */
2124 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
2125 bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
2126 vb->free = VMAP_BBMAP_BITS - (1UL << order);
2127 vb->dirty = 0;
2128 vb->dirty_min = VMAP_BBMAP_BITS;
2129 vb->dirty_max = 0;
2130 bitmap_set(vb->used_map, 0, (1UL << order));
2131 INIT_LIST_HEAD(&vb->free_list);
2132
2133 xa = addr_to_vb_xa(va->va_start);
2134 vb_idx = addr_to_vb_idx(va->va_start);
2135 err = xa_insert(xa, vb_idx, vb, gfp_mask);
2136 if (err) {
2137 kfree(vb);
2138 free_vmap_area(va);
2139 return ERR_PTR(err);
2140 }
2141
2142 vbq = raw_cpu_ptr(&vmap_block_queue);
2143 spin_lock(&vbq->lock);
2144 list_add_tail_rcu(&vb->free_list, &vbq->free);
2145 spin_unlock(&vbq->lock);
2146
2147 return vaddr;
2148 }
2149
2150 static void free_vmap_block(struct vmap_block *vb)
2151 {
2152 struct vmap_block *tmp;
2153 struct xarray *xa;
2154
2155 xa = addr_to_vb_xa(vb->va->va_start);
2156 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2157 BUG_ON(tmp != vb);
2158
2159 spin_lock(&vmap_area_lock);
2160 unlink_va(vb->va, &vmap_area_root);
2161 spin_unlock(&vmap_area_lock);
2162
2163 free_vmap_area_noflush(vb->va);
2164 kfree_rcu(vb, rcu_head);
2165 }
2166
2167 static bool purge_fragmented_block(struct vmap_block *vb,
2168 struct vmap_block_queue *vbq, struct list_head *purge_list,
2169 bool force_purge)
2170 {
2171 if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
2172 vb->dirty == VMAP_BBMAP_BITS)
2173 return false;
2174
2175 /* Don't overeagerly purge usable blocks unless requested */
2176 if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
2177 return false;
2178
2179 /* prevent further allocs after releasing lock */
2180 WRITE_ONCE(vb->free, 0);
2181 /* prevent purging it again */
2182 WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
2183 vb->dirty_min = 0;
2184 vb->dirty_max = VMAP_BBMAP_BITS;
2185 spin_lock(&vbq->lock);
2186 list_del_rcu(&vb->free_list);
2187 spin_unlock(&vbq->lock);
2188 list_add_tail(&vb->purge, purge_list);
2189 return true;
2190 }
2191
2192 static void free_purged_blocks(struct list_head *purge_list)
2193 {
2194 struct vmap_block *vb, *n_vb;
2195
2196 list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
2197 list_del(&vb->purge);
2198 free_vmap_block(vb);
2199 }
2200 }
2201
2202 static void purge_fragmented_blocks(int cpu)
2203 {
2204 LIST_HEAD(purge);
2205 struct vmap_block *vb;
2206 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2207
2208 rcu_read_lock();
2209 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2210 unsigned long free = READ_ONCE(vb->free);
2211 unsigned long dirty = READ_ONCE(vb->dirty);
2212
2213 if (free + dirty != VMAP_BBMAP_BITS ||
2214 dirty == VMAP_BBMAP_BITS)
2215 continue;
2216
2217 spin_lock(&vb->lock);
2218 purge_fragmented_block(vb, vbq, &purge, true);
2219 spin_unlock(&vb->lock);
2220 }
2221 rcu_read_unlock();
2222 free_purged_blocks(&purge);
2223 }
2224
2225 static void purge_fragmented_blocks_allcpus(void)
2226 {
2227 int cpu;
2228
2229 for_each_possible_cpu(cpu)
2230 purge_fragmented_blocks(cpu);
2231 }
2232
2233 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2234 {
2235 struct vmap_block_queue *vbq;
2236 struct vmap_block *vb;
2237 void *vaddr = NULL;
2238 unsigned int order;
2239
2240 BUG_ON(offset_in_page(size));
2241 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2242 if (WARN_ON(size == 0)) {
2243 /*
2244 * Allocating 0 bytes isn't what caller wants since
2245 * get_order(0) returns funny result. Just warn and terminate
2246 * early.
2247 */
2248 return NULL;
2249 }
2250 order = get_order(size);
2251
2252 rcu_read_lock();
2253 vbq = raw_cpu_ptr(&vmap_block_queue);
2254 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2255 unsigned long pages_off;
2256
2257 if (READ_ONCE(vb->free) < (1UL << order))
2258 continue;
2259
2260 spin_lock(&vb->lock);
2261 if (vb->free < (1UL << order)) {
2262 spin_unlock(&vb->lock);
2263 continue;
2264 }
2265
2266 pages_off = VMAP_BBMAP_BITS - vb->free;
2267 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2268 WRITE_ONCE(vb->free, vb->free - (1UL << order));
2269 bitmap_set(vb->used_map, pages_off, (1UL << order));
2270 if (vb->free == 0) {
2271 spin_lock(&vbq->lock);
2272 list_del_rcu(&vb->free_list);
2273 spin_unlock(&vbq->lock);
2274 }
2275
2276 spin_unlock(&vb->lock);
2277 break;
2278 }
2279
2280 rcu_read_unlock();
2281
2282 /* Allocate new block if nothing was found */
2283 if (!vaddr)
2284 vaddr = new_vmap_block(order, gfp_mask);
2285
2286 return vaddr;
2287 }
2288
2289 static void vb_free(unsigned long addr, unsigned long size)
2290 {
2291 unsigned long offset;
2292 unsigned int order;
2293 struct vmap_block *vb;
2294 struct xarray *xa;
2295
2296 BUG_ON(offset_in_page(size));
2297 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2298
2299 flush_cache_vunmap(addr, addr + size);
2300
2301 order = get_order(size);
2302 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2303
2304 xa = addr_to_vb_xa(addr);
2305 vb = xa_load(xa, addr_to_vb_idx(addr));
2306
2307 spin_lock(&vb->lock);
2308 bitmap_clear(vb->used_map, offset, (1UL << order));
2309 spin_unlock(&vb->lock);
2310
2311 vunmap_range_noflush(addr, addr + size);
2312
2313 if (debug_pagealloc_enabled_static())
2314 flush_tlb_kernel_range(addr, addr + size);
2315
2316 spin_lock(&vb->lock);
2317
2318 /* Expand the not yet TLB flushed dirty range */
2319 vb->dirty_min = min(vb->dirty_min, offset);
2320 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2321
2322 WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
2323 if (vb->dirty == VMAP_BBMAP_BITS) {
2324 BUG_ON(vb->free);
2325 spin_unlock(&vb->lock);
2326 free_vmap_block(vb);
2327 } else
2328 spin_unlock(&vb->lock);
2329 }
2330
2331 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2332 {
2333 LIST_HEAD(purge_list);
2334 int cpu;
2335
2336 if (unlikely(!vmap_initialized))
2337 return;
2338
2339 mutex_lock(&vmap_purge_lock);
2340
2341 for_each_possible_cpu(cpu) {
2342 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2343 struct vmap_block *vb;
2344 unsigned long idx;
2345
2346 rcu_read_lock();
2347 xa_for_each(&vbq->vmap_blocks, idx, vb) {
2348 spin_lock(&vb->lock);
2349
2350 /*
2351 * Try to purge a fragmented block first. If it's
2352 * not purgeable, check whether there is dirty
2353 * space to be flushed.
2354 */
2355 if (!purge_fragmented_block(vb, vbq, &purge_list, false) &&
2356 vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
2357 unsigned long va_start = vb->va->va_start;
2358 unsigned long s, e;
2359
2360 s = va_start + (vb->dirty_min << PAGE_SHIFT);
2361 e = va_start + (vb->dirty_max << PAGE_SHIFT);
2362
2363 start = min(s, start);
2364 end = max(e, end);
2365
2366 /* Prevent that this is flushed again */
2367 vb->dirty_min = VMAP_BBMAP_BITS;
2368 vb->dirty_max = 0;
2369
2370 flush = 1;
2371 }
2372 spin_unlock(&vb->lock);
2373 }
2374 rcu_read_unlock();
2375 }
2376 free_purged_blocks(&purge_list);
2377
2378 if (!__purge_vmap_area_lazy(start, end) && flush)
2379 flush_tlb_kernel_range(start, end);
2380 mutex_unlock(&vmap_purge_lock);
2381 }
2382
2383 /**
2384 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2385 *
2386 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2387 * to amortize TLB flushing overheads. What this means is that any page you
2388 * have now, may, in a former life, have been mapped into kernel virtual
2389 * address by the vmap layer and so there might be some CPUs with TLB entries
2390 * still referencing that page (additional to the regular 1:1 kernel mapping).
2391 *
2392 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2393 * be sure that none of the pages we have control over will have any aliases
2394 * from the vmap layer.
2395 */
2396 void vm_unmap_aliases(void)
2397 {
2398 unsigned long start = ULONG_MAX, end = 0;
2399 int flush = 0;
2400
2401 _vm_unmap_aliases(start, end, flush);
2402 }
2403 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2404
2405 /**
2406 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2407 * @mem: the pointer returned by vm_map_ram
2408 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2409 */
2410 void vm_unmap_ram(const void *mem, unsigned int count)
2411 {
2412 unsigned long size = (unsigned long)count << PAGE_SHIFT;
2413 unsigned long addr = (unsigned long)kasan_reset_tag(mem);
2414 struct vmap_area *va;
2415
2416 might_sleep();
2417 BUG_ON(!addr);
2418 BUG_ON(addr < VMALLOC_START);
2419 BUG_ON(addr > VMALLOC_END);
2420 BUG_ON(!PAGE_ALIGNED(addr));
2421
2422 kasan_poison_vmalloc(mem, size);
2423
2424 if (likely(count <= VMAP_MAX_ALLOC)) {
2425 debug_check_no_locks_freed(mem, size);
2426 vb_free(addr, size);
2427 return;
2428 }
2429
2430 va = find_unlink_vmap_area(addr);
2431 if (WARN_ON_ONCE(!va))
2432 return;
2433
2434 debug_check_no_locks_freed((void *)va->va_start,
2435 (va->va_end - va->va_start));
2436 free_unmap_vmap_area(va);
2437 }
2438 EXPORT_SYMBOL(vm_unmap_ram);
2439
2440 /**
2441 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2442 * @pages: an array of pointers to the pages to be mapped
2443 * @count: number of pages
2444 * @node: prefer to allocate data structures on this node
2445 *
2446 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2447 * faster than vmap so it's good. But if you mix long-life and short-life
2448 * objects with vm_map_ram(), it could consume lots of address space through
2449 * fragmentation (especially on a 32bit machine). You could see failures in
2450 * the end. Please use this function for short-lived objects.
2451 *
2452 * Returns: a pointer to the address that has been mapped, or %NULL on failure
2453 */
2454 void *vm_map_ram(struct page **pages, unsigned int count, int node)
2455 {
2456 unsigned long size = (unsigned long)count << PAGE_SHIFT;
2457 unsigned long addr;
2458 void *mem;
2459
2460 if (likely(count <= VMAP_MAX_ALLOC)) {
2461 mem = vb_alloc(size, GFP_KERNEL);
2462 if (IS_ERR(mem))
2463 return NULL;
2464 addr = (unsigned long)mem;
2465 } else {
2466 struct vmap_area *va;
2467 va = alloc_vmap_area(size, PAGE_SIZE,
2468 VMALLOC_START, VMALLOC_END,
2469 node, GFP_KERNEL, VMAP_RAM);
2470 if (IS_ERR(va))
2471 return NULL;
2472
2473 addr = va->va_start;
2474 mem = (void *)addr;
2475 }
2476
2477 if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2478 pages, PAGE_SHIFT) < 0) {
2479 vm_unmap_ram(mem, count);
2480 return NULL;
2481 }
2482
2483 /*
2484 * Mark the pages as accessible, now that they are mapped.
2485 * With hardware tag-based KASAN, marking is skipped for
2486 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2487 */
2488 mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
2489
2490 return mem;
2491 }
2492 EXPORT_SYMBOL(vm_map_ram);
2493
2494 static struct vm_struct *vmlist __initdata;
2495
2496 static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2497 {
2498 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2499 return vm->page_order;
2500 #else
2501 return 0;
2502 #endif
2503 }
2504
2505 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2506 {
2507 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2508 vm->page_order = order;
2509 #else
2510 BUG_ON(order != 0);
2511 #endif
2512 }
2513
2514 /**
2515 * vm_area_add_early - add vmap area early during boot
2516 * @vm: vm_struct to add
2517 *
2518 * This function is used to add fixed kernel vm area to vmlist before
2519 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
2520 * should contain proper values and the other fields should be zero.
2521 *
2522 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2523 */
2524 void __init vm_area_add_early(struct vm_struct *vm)
2525 {
2526 struct vm_struct *tmp, **p;
2527
2528 BUG_ON(vmap_initialized);
2529 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2530 if (tmp->addr >= vm->addr) {
2531 BUG_ON(tmp->addr < vm->addr + vm->size);
2532 break;
2533 } else
2534 BUG_ON(tmp->addr + tmp->size > vm->addr);
2535 }
2536 vm->next = *p;
2537 *p = vm;
2538 }
2539
2540 /**
2541 * vm_area_register_early - register vmap area early during boot
2542 * @vm: vm_struct to register
2543 * @align: requested alignment
2544 *
2545 * This function is used to register kernel vm area before
2546 * vmalloc_init() is called. @vm->size and @vm->flags should contain
2547 * proper values on entry and other fields should be zero. On return,
2548 * vm->addr contains the allocated address.
2549 *
2550 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2551 */
2552 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
2553 {
2554 unsigned long addr = ALIGN(VMALLOC_START, align);
2555 struct vm_struct *cur, **p;
2556
2557 BUG_ON(vmap_initialized);
2558
2559 for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
2560 if ((unsigned long)cur->addr - addr >= vm->size)
2561 break;
2562 addr = ALIGN((unsigned long)cur->addr + cur->size, align);
2563 }
2564
2565 BUG_ON(addr > VMALLOC_END - vm->size);
2566 vm->addr = (void *)addr;
2567 vm->next = *p;
2568 *p = vm;
2569 kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
2570 }
2571
2572 static void vmap_init_free_space(void)
2573 {
2574 unsigned long vmap_start = 1;
2575 const unsigned long vmap_end = ULONG_MAX;
2576 struct vmap_area *busy, *free;
2577
2578 /*
2579 * B F B B B F
2580 * -|-----|.....|-----|-----|-----|.....|-
2581 * | The KVA space |
2582 * |<--------------------------------->|
2583 */
2584 list_for_each_entry(busy, &vmap_area_list, list) {
2585 if (busy->va_start - vmap_start > 0) {
2586 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2587 if (!WARN_ON_ONCE(!free)) {
2588 free->va_start = vmap_start;
2589 free->va_end = busy->va_start;
2590
2591 insert_vmap_area_augment(free, NULL,
2592 &free_vmap_area_root,
2593 &free_vmap_area_list);
2594 }
2595 }
2596
2597 vmap_start = busy->va_end;
2598 }
2599
2600 if (vmap_end - vmap_start > 0) {
2601 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2602 if (!WARN_ON_ONCE(!free)) {
2603 free->va_start = vmap_start;
2604 free->va_end = vmap_end;
2605
2606 insert_vmap_area_augment(free, NULL,
2607 &free_vmap_area_root,
2608 &free_vmap_area_list);
2609 }
2610 }
2611 }
2612
2613 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2614 struct vmap_area *va, unsigned long flags, const void *caller)
2615 {
2616 vm->flags = flags;
2617 vm->addr = (void *)va->va_start;
2618 vm->size = va->va_end - va->va_start;
2619 vm->caller = caller;
2620 va->vm = vm;
2621 }
2622
2623 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2624 unsigned long flags, const void *caller)
2625 {
2626 spin_lock(&vmap_area_lock);
2627 setup_vmalloc_vm_locked(vm, va, flags, caller);
2628 spin_unlock(&vmap_area_lock);
2629 }
2630
2631 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2632 {
2633 /*
2634 * Before removing VM_UNINITIALIZED,
2635 * we should make sure that vm has proper values.
2636 * Pair with smp_rmb() in show_numa_info().
2637 */
2638 smp_wmb();
2639 vm->flags &= ~VM_UNINITIALIZED;
2640 }
2641
2642 static struct vm_struct *__get_vm_area_node(unsigned long size,
2643 unsigned long align, unsigned long shift, unsigned long flags,
2644 unsigned long start, unsigned long end, int node,
2645 gfp_t gfp_mask, const void *caller)
2646 {
2647 struct vmap_area *va;
2648 struct vm_struct *area;
2649 unsigned long requested_size = size;
2650
2651 BUG_ON(in_interrupt());
2652 size = ALIGN(size, 1ul << shift);
2653 if (unlikely(!size))
2654 return NULL;
2655
2656 if (flags & VM_IOREMAP)
2657 align = 1ul << clamp_t(int, get_count_order_long(size),
2658 PAGE_SHIFT, IOREMAP_MAX_ORDER);
2659
2660 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2661 if (unlikely(!area))
2662 return NULL;
2663
2664 if (!(flags & VM_NO_GUARD))
2665 size += PAGE_SIZE;
2666
2667 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
2668 if (IS_ERR(va)) {
2669 kfree(area);
2670 return NULL;
2671 }
2672
2673 setup_vmalloc_vm(area, va, flags, caller);
2674
2675 /*
2676 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
2677 * best-effort approach, as they can be mapped outside of vmalloc code.
2678 * For VM_ALLOC mappings, the pages are marked as accessible after
2679 * getting mapped in __vmalloc_node_range().
2680 * With hardware tag-based KASAN, marking is skipped for
2681 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2682 */
2683 if (!(flags & VM_ALLOC))
2684 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
2685 KASAN_VMALLOC_PROT_NORMAL);
2686
2687 return area;
2688 }
2689
2690 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2691 unsigned long start, unsigned long end,
2692 const void *caller)
2693 {
2694 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
2695 NUMA_NO_NODE, GFP_KERNEL, caller);
2696 }
2697
2698 /**
2699 * get_vm_area - reserve a contiguous kernel virtual area
2700 * @size: size of the area
2701 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
2702 *
2703 * Search an area of @size in the kernel virtual mapping area,
2704 * and reserved it for out purposes. Returns the area descriptor
2705 * on success or %NULL on failure.
2706 *
2707 * Return: the area descriptor on success or %NULL on failure.
2708 */
2709 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2710 {
2711 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2712 VMALLOC_START, VMALLOC_END,
2713 NUMA_NO_NODE, GFP_KERNEL,
2714 __builtin_return_address(0));
2715 }
2716
2717 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2718 const void *caller)
2719 {
2720 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2721 VMALLOC_START, VMALLOC_END,
2722 NUMA_NO_NODE, GFP_KERNEL, caller);
2723 }
2724
2725 /**
2726 * find_vm_area - find a continuous kernel virtual area
2727 * @addr: base address
2728 *
2729 * Search for the kernel VM area starting at @addr, and return it.
2730 * It is up to the caller to do all required locking to keep the returned
2731 * pointer valid.
2732 *
2733 * Return: the area descriptor on success or %NULL on failure.
2734 */
2735 struct vm_struct *find_vm_area(const void *addr)
2736 {
2737 struct vmap_area *va;
2738
2739 va = find_vmap_area((unsigned long)addr);
2740 if (!va)
2741 return NULL;
2742
2743 return va->vm;
2744 }
2745
2746 /**
2747 * remove_vm_area - find and remove a continuous kernel virtual area
2748 * @addr: base address
2749 *
2750 * Search for the kernel VM area starting at @addr, and remove it.
2751 * This function returns the found VM area, but using it is NOT safe
2752 * on SMP machines, except for its size or flags.
2753 *
2754 * Return: the area descriptor on success or %NULL on failure.
2755 */
2756 struct vm_struct *remove_vm_area(const void *addr)
2757 {
2758 struct vmap_area *va;
2759 struct vm_struct *vm;
2760
2761 might_sleep();
2762
2763 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2764 addr))
2765 return NULL;
2766
2767 va = find_unlink_vmap_area((unsigned long)addr);
2768 if (!va || !va->vm)
2769 return NULL;
2770 vm = va->vm;
2771
2772 debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
2773 debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
2774 kasan_free_module_shadow(vm);
2775 kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
2776
2777 free_unmap_vmap_area(va);
2778 return vm;
2779 }
2780
2781 static inline void set_area_direct_map(const struct vm_struct *area,
2782 int (*set_direct_map)(struct page *page))
2783 {
2784 int i;
2785
2786 /* HUGE_VMALLOC passes small pages to set_direct_map */
2787 for (i = 0; i < area->nr_pages; i++)
2788 if (page_address(area->pages[i]))
2789 set_direct_map(area->pages[i]);
2790 }
2791
2792 /*
2793 * Flush the vm mapping and reset the direct map.
2794 */
2795 static void vm_reset_perms(struct vm_struct *area)
2796 {
2797 unsigned long start = ULONG_MAX, end = 0;
2798 unsigned int page_order = vm_area_page_order(area);
2799 int flush_dmap = 0;
2800 int i;
2801
2802 /*
2803 * Find the start and end range of the direct mappings to make sure that
2804 * the vm_unmap_aliases() flush includes the direct map.
2805 */
2806 for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2807 unsigned long addr = (unsigned long)page_address(area->pages[i]);
2808
2809 if (addr) {
2810 unsigned long page_size;
2811
2812 page_size = PAGE_SIZE << page_order;
2813 start = min(addr, start);
2814 end = max(addr + page_size, end);
2815 flush_dmap = 1;
2816 }
2817 }
2818
2819 /*
2820 * Set direct map to something invalid so that it won't be cached if
2821 * there are any accesses after the TLB flush, then flush the TLB and
2822 * reset the direct map permissions to the default.
2823 */
2824 set_area_direct_map(area, set_direct_map_invalid_noflush);
2825 _vm_unmap_aliases(start, end, flush_dmap);
2826 set_area_direct_map(area, set_direct_map_default_noflush);
2827 }
2828
2829 static void delayed_vfree_work(struct work_struct *w)
2830 {
2831 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
2832 struct llist_node *t, *llnode;
2833
2834 llist_for_each_safe(llnode, t, llist_del_all(&p->list))
2835 vfree(llnode);
2836 }
2837
2838 /**
2839 * vfree_atomic - release memory allocated by vmalloc()
2840 * @addr: memory base address
2841 *
2842 * This one is just like vfree() but can be called in any atomic context
2843 * except NMIs.
2844 */
2845 void vfree_atomic(const void *addr)
2846 {
2847 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2848
2849 BUG_ON(in_nmi());
2850 kmemleak_free(addr);
2851
2852 /*
2853 * Use raw_cpu_ptr() because this can be called from preemptible
2854 * context. Preemption is absolutely fine here, because the llist_add()
2855 * implementation is lockless, so it works even if we are adding to
2856 * another cpu's list. schedule_work() should be fine with this too.
2857 */
2858 if (addr && llist_add((struct llist_node *)addr, &p->list))
2859 schedule_work(&p->wq);
2860 }
2861
2862 /**
2863 * vfree - Release memory allocated by vmalloc()
2864 * @addr: Memory base address
2865 *
2866 * Free the virtually continuous memory area starting at @addr, as obtained
2867 * from one of the vmalloc() family of APIs. This will usually also free the
2868 * physical memory underlying the virtual allocation, but that memory is
2869 * reference counted, so it will not be freed until the last user goes away.
2870 *
2871 * If @addr is NULL, no operation is performed.
2872 *
2873 * Context:
2874 * May sleep if called *not* from interrupt context.
2875 * Must not be called in NMI context (strictly speaking, it could be
2876 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2877 * conventions for vfree() arch-dependent would be a really bad idea).
2878 */
2879 void vfree(const void *addr)
2880 {
2881 struct vm_struct *vm;
2882 int i;
2883
2884 if (unlikely(in_interrupt())) {
2885 vfree_atomic(addr);
2886 return;
2887 }
2888
2889 BUG_ON(in_nmi());
2890 kmemleak_free(addr);
2891 might_sleep();
2892
2893 if (!addr)
2894 return;
2895
2896 vm = remove_vm_area(addr);
2897 if (unlikely(!vm)) {
2898 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2899 addr);
2900 return;
2901 }
2902
2903 if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
2904 vm_reset_perms(vm);
2905 for (i = 0; i < vm->nr_pages; i++) {
2906 struct page *page = vm->pages[i];
2907
2908 BUG_ON(!page);
2909 mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
2910 /*
2911 * High-order allocs for huge vmallocs are split, so
2912 * can be freed as an array of order-0 allocations
2913 */
2914 __free_page(page);
2915 cond_resched();
2916 }
2917 atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
2918 kvfree(vm->pages);
2919 kfree(vm);
2920 }
2921 EXPORT_SYMBOL(vfree);
2922
2923 /**
2924 * vunmap - release virtual mapping obtained by vmap()
2925 * @addr: memory base address
2926 *
2927 * Free the virtually contiguous memory area starting at @addr,
2928 * which was created from the page array passed to vmap().
2929 *
2930 * Must not be called in interrupt context.
2931 */
2932 void vunmap(const void *addr)
2933 {
2934 struct vm_struct *vm;
2935
2936 BUG_ON(in_interrupt());
2937 might_sleep();
2938
2939 if (!addr)
2940 return;
2941 vm = remove_vm_area(addr);
2942 if (unlikely(!vm)) {
2943 WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
2944 addr);
2945 return;
2946 }
2947 kfree(vm);
2948 }
2949 EXPORT_SYMBOL(vunmap);
2950
2951 /**
2952 * vmap - map an array of pages into virtually contiguous space
2953 * @pages: array of page pointers
2954 * @count: number of pages to map
2955 * @flags: vm_area->flags
2956 * @prot: page protection for the mapping
2957 *
2958 * Maps @count pages from @pages into contiguous kernel virtual space.
2959 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2960 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2961 * are transferred from the caller to vmap(), and will be freed / dropped when
2962 * vfree() is called on the return value.
2963 *
2964 * Return: the address of the area or %NULL on failure
2965 */
2966 void *vmap(struct page **pages, unsigned int count,
2967 unsigned long flags, pgprot_t prot)
2968 {
2969 struct vm_struct *area;
2970 unsigned long addr;
2971 unsigned long size; /* In bytes */
2972
2973 might_sleep();
2974
2975 if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
2976 return NULL;
2977
2978 /*
2979 * Your top guard is someone else's bottom guard. Not having a top
2980 * guard compromises someone else's mappings too.
2981 */
2982 if (WARN_ON_ONCE(flags & VM_NO_GUARD))
2983 flags &= ~VM_NO_GUARD;
2984
2985 if (count > totalram_pages())
2986 return NULL;
2987
2988 size = (unsigned long)count << PAGE_SHIFT;
2989 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2990 if (!area)
2991 return NULL;
2992
2993 addr = (unsigned long)area->addr;
2994 if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2995 pages, PAGE_SHIFT) < 0) {
2996 vunmap(area->addr);
2997 return NULL;
2998 }
2999
3000 if (flags & VM_MAP_PUT_PAGES) {
3001 area->pages = pages;
3002 area->nr_pages = count;
3003 }
3004 return area->addr;
3005 }
3006 EXPORT_SYMBOL(vmap);
3007
3008 #ifdef CONFIG_VMAP_PFN
3009 struct vmap_pfn_data {
3010 unsigned long *pfns;
3011 pgprot_t prot;
3012 unsigned int idx;
3013 };
3014
3015 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
3016 {
3017 struct vmap_pfn_data *data = private;
3018 unsigned long pfn = data->pfns[data->idx];
3019 pte_t ptent;
3020
3021 if (WARN_ON_ONCE(pfn_valid(pfn)))
3022 return -EINVAL;
3023
3024 ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
3025 set_pte_at(&init_mm, addr, pte, ptent);
3026
3027 data->idx++;
3028 return 0;
3029 }
3030
3031 /**
3032 * vmap_pfn - map an array of PFNs into virtually contiguous space
3033 * @pfns: array of PFNs
3034 * @count: number of pages to map
3035 * @prot: page protection for the mapping
3036 *
3037 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
3038 * the start address of the mapping.
3039 */
3040 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
3041 {
3042 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
3043 struct vm_struct *area;
3044
3045 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
3046 __builtin_return_address(0));
3047 if (!area)
3048 return NULL;
3049 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3050 count * PAGE_SIZE, vmap_pfn_apply, &data)) {
3051 free_vm_area(area);
3052 return NULL;
3053 }
3054
3055 flush_cache_vmap((unsigned long)area->addr,
3056 (unsigned long)area->addr + count * PAGE_SIZE);
3057
3058 return area->addr;
3059 }
3060 EXPORT_SYMBOL_GPL(vmap_pfn);
3061 #endif /* CONFIG_VMAP_PFN */
3062
3063 static inline unsigned int
3064 vm_area_alloc_pages(gfp_t gfp, int nid,
3065 unsigned int order, unsigned int nr_pages, struct page **pages)
3066 {
3067 unsigned int nr_allocated = 0;
3068 gfp_t alloc_gfp = gfp;
3069 bool nofail = false;
3070 struct page *page;
3071 int i;
3072
3073 /*
3074 * For order-0 pages we make use of bulk allocator, if
3075 * the page array is partly or not at all populated due
3076 * to fails, fallback to a single page allocator that is
3077 * more permissive.
3078 */
3079 if (!order) {
3080 /* bulk allocator doesn't support nofail req. officially */
3081 gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
3082
3083 while (nr_allocated < nr_pages) {
3084 unsigned int nr, nr_pages_request;
3085
3086 /*
3087 * A maximum allowed request is hard-coded and is 100
3088 * pages per call. That is done in order to prevent a
3089 * long preemption off scenario in the bulk-allocator
3090 * so the range is [1:100].
3091 */
3092 nr_pages_request = min(100U, nr_pages - nr_allocated);
3093
3094 /* memory allocation should consider mempolicy, we can't
3095 * wrongly use nearest node when nid == NUMA_NO_NODE,
3096 * otherwise memory may be allocated in only one node,
3097 * but mempolicy wants to alloc memory by interleaving.
3098 */
3099 if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
3100 nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
3101 nr_pages_request,
3102 pages + nr_allocated);
3103
3104 else
3105 nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
3106 nr_pages_request,
3107 pages + nr_allocated);
3108
3109 nr_allocated += nr;
3110 cond_resched();
3111
3112 /*
3113 * If zero or pages were obtained partly,
3114 * fallback to a single page allocator.
3115 */
3116 if (nr != nr_pages_request)
3117 break;
3118 }
3119 } else if (gfp & __GFP_NOFAIL) {
3120 /*
3121 * Higher order nofail allocations are really expensive and
3122 * potentially dangerous (pre-mature OOM, disruptive reclaim
3123 * and compaction etc.
3124 */
3125 alloc_gfp &= ~__GFP_NOFAIL;
3126 nofail = true;
3127 }
3128
3129 /* High-order pages or fallback path if "bulk" fails. */
3130 while (nr_allocated < nr_pages) {
3131 if (fatal_signal_pending(current))
3132 break;
3133
3134 if (nid == NUMA_NO_NODE)
3135 page = alloc_pages(alloc_gfp, order);
3136 else
3137 page = alloc_pages_node(nid, alloc_gfp, order);
3138 if (unlikely(!page)) {
3139 if (!nofail)
3140 break;
3141
3142 /* fall back to the zero order allocations */
3143 alloc_gfp |= __GFP_NOFAIL;
3144 order = 0;
3145 continue;
3146 }
3147
3148 /*
3149 * Higher order allocations must be able to be treated as
3150 * indepdenent small pages by callers (as they can with
3151 * small-page vmallocs). Some drivers do their own refcounting
3152 * on vmalloc_to_page() pages, some use page->mapping,
3153 * page->lru, etc.
3154 */
3155 if (order)
3156 split_page(page, order);
3157
3158 /*
3159 * Careful, we allocate and map page-order pages, but
3160 * tracking is done per PAGE_SIZE page so as to keep the
3161 * vm_struct APIs independent of the physical/mapped size.
3162 */
3163 for (i = 0; i < (1U << order); i++)
3164 pages[nr_allocated + i] = page + i;
3165
3166 cond_resched();
3167 nr_allocated += 1U << order;
3168 }
3169
3170 return nr_allocated;
3171 }
3172
3173 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3174 pgprot_t prot, unsigned int page_shift,
3175 int node)
3176 {
3177 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
3178 bool nofail = gfp_mask & __GFP_NOFAIL;
3179 unsigned long addr = (unsigned long)area->addr;
3180 unsigned long size = get_vm_area_size(area);
3181 unsigned long array_size;
3182 unsigned int nr_small_pages = size >> PAGE_SHIFT;
3183 unsigned int page_order;
3184 unsigned int flags;
3185 int ret;
3186
3187 array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
3188
3189 if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
3190 gfp_mask |= __GFP_HIGHMEM;
3191
3192 /* Please note that the recursion is strictly bounded. */
3193 if (array_size > PAGE_SIZE) {
3194 area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
3195 area->caller);
3196 } else {
3197 area->pages = kmalloc_node(array_size, nested_gfp, node);
3198 }
3199
3200 if (!area->pages) {
3201 warn_alloc(gfp_mask, NULL,
3202 "vmalloc error: size %lu, failed to allocated page array size %lu",
3203 nr_small_pages * PAGE_SIZE, array_size);
3204 free_vm_area(area);
3205 return NULL;
3206 }
3207
3208 set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
3209 page_order = vm_area_page_order(area);
3210
3211 area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
3212 node, page_order, nr_small_pages, area->pages);
3213
3214 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
3215 if (gfp_mask & __GFP_ACCOUNT) {
3216 int i;
3217
3218 for (i = 0; i < area->nr_pages; i++)
3219 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
3220 }
3221
3222 /*
3223 * If not enough pages were obtained to accomplish an
3224 * allocation request, free them via vfree() if any.
3225 */
3226 if (area->nr_pages != nr_small_pages) {
3227 /*
3228 * vm_area_alloc_pages() can fail due to insufficient memory but
3229 * also:-
3230 *
3231 * - a pending fatal signal
3232 * - insufficient huge page-order pages
3233 *
3234 * Since we always retry allocations at order-0 in the huge page
3235 * case a warning for either is spurious.
3236 */
3237 if (!fatal_signal_pending(current) && page_order == 0)
3238 warn_alloc(gfp_mask, NULL,
3239 "vmalloc error: size %lu, failed to allocate pages",
3240 area->nr_pages * PAGE_SIZE);
3241 goto fail;
3242 }
3243
3244 /*
3245 * page tables allocations ignore external gfp mask, enforce it
3246 * by the scope API
3247 */
3248 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3249 flags = memalloc_nofs_save();
3250 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3251 flags = memalloc_noio_save();
3252
3253 do {
3254 ret = vmap_pages_range(addr, addr + size, prot, area->pages,
3255 page_shift);
3256 if (nofail && (ret < 0))
3257 schedule_timeout_uninterruptible(1);
3258 } while (nofail && (ret < 0));
3259
3260 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3261 memalloc_nofs_restore(flags);
3262 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3263 memalloc_noio_restore(flags);
3264
3265 if (ret < 0) {
3266 warn_alloc(gfp_mask, NULL,
3267 "vmalloc error: size %lu, failed to map pages",
3268 area->nr_pages * PAGE_SIZE);
3269 goto fail;
3270 }
3271
3272 return area->addr;
3273
3274 fail:
3275 vfree(area->addr);
3276 return NULL;
3277 }
3278
3279 /**
3280 * __vmalloc_node_range - allocate virtually contiguous memory
3281 * @size: allocation size
3282 * @align: desired alignment
3283 * @start: vm area range start
3284 * @end: vm area range end
3285 * @gfp_mask: flags for the page level allocator
3286 * @prot: protection mask for the allocated pages
3287 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
3288 * @node: node to use for allocation or NUMA_NO_NODE
3289 * @caller: caller's return address
3290 *
3291 * Allocate enough pages to cover @size from the page level
3292 * allocator with @gfp_mask flags. Please note that the full set of gfp
3293 * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
3294 * supported.
3295 * Zone modifiers are not supported. From the reclaim modifiers
3296 * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
3297 * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
3298 * __GFP_RETRY_MAYFAIL are not supported).
3299 *
3300 * __GFP_NOWARN can be used to suppress failures messages.
3301 *
3302 * Map them into contiguous kernel virtual space, using a pagetable
3303 * protection of @prot.
3304 *
3305 * Return: the address of the area or %NULL on failure
3306 */
3307 void *__vmalloc_node_range(unsigned long size, unsigned long align,
3308 unsigned long start, unsigned long end, gfp_t gfp_mask,
3309 pgprot_t prot, unsigned long vm_flags, int node,
3310 const void *caller)
3311 {
3312 struct vm_struct *area;
3313 void *ret;
3314 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3315 unsigned long real_size = size;
3316 unsigned long real_align = align;
3317 unsigned int shift = PAGE_SHIFT;
3318
3319 if (WARN_ON_ONCE(!size))
3320 return NULL;
3321
3322 if ((size >> PAGE_SHIFT) > totalram_pages()) {
3323 warn_alloc(gfp_mask, NULL,
3324 "vmalloc error: size %lu, exceeds total pages",
3325 real_size);
3326 return NULL;
3327 }
3328
3329 if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
3330 unsigned long size_per_node;
3331
3332 /*
3333 * Try huge pages. Only try for PAGE_KERNEL allocations,
3334 * others like modules don't yet expect huge pages in
3335 * their allocations due to apply_to_page_range not
3336 * supporting them.
3337 */
3338
3339 size_per_node = size;
3340 if (node == NUMA_NO_NODE)
3341 size_per_node /= num_online_nodes();
3342 if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
3343 shift = PMD_SHIFT;
3344 else
3345 shift = arch_vmap_pte_supported_shift(size_per_node);
3346
3347 align = max(real_align, 1UL << shift);
3348 size = ALIGN(real_size, 1UL << shift);
3349 }
3350
3351 again:
3352 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
3353 VM_UNINITIALIZED | vm_flags, start, end, node,
3354 gfp_mask, caller);
3355 if (!area) {
3356 bool nofail = gfp_mask & __GFP_NOFAIL;
3357 warn_alloc(gfp_mask, NULL,
3358 "vmalloc error: size %lu, vm_struct allocation failed%s",
3359 real_size, (nofail) ? ". Retrying." : "");
3360 if (nofail) {
3361 schedule_timeout_uninterruptible(1);
3362 goto again;
3363 }
3364 goto fail;
3365 }
3366
3367 /*
3368 * Prepare arguments for __vmalloc_area_node() and
3369 * kasan_unpoison_vmalloc().
3370 */
3371 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3372 if (kasan_hw_tags_enabled()) {
3373 /*
3374 * Modify protection bits to allow tagging.
3375 * This must be done before mapping.
3376 */
3377 prot = arch_vmap_pgprot_tagged(prot);
3378
3379 /*
3380 * Skip page_alloc poisoning and zeroing for physical
3381 * pages backing VM_ALLOC mapping. Memory is instead
3382 * poisoned and zeroed by kasan_unpoison_vmalloc().
3383 */
3384 gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
3385 }
3386
3387 /* Take note that the mapping is PAGE_KERNEL. */
3388 kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3389 }
3390
3391 /* Allocate physical pages and map them into vmalloc space. */
3392 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3393 if (!ret)
3394 goto fail;
3395
3396 /*
3397 * Mark the pages as accessible, now that they are mapped.
3398 * The condition for setting KASAN_VMALLOC_INIT should complement the
3399 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
3400 * to make sure that memory is initialized under the same conditions.
3401 * Tag-based KASAN modes only assign tags to normal non-executable
3402 * allocations, see __kasan_unpoison_vmalloc().
3403 */
3404 kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
3405 if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
3406 (gfp_mask & __GFP_SKIP_ZERO))
3407 kasan_flags |= KASAN_VMALLOC_INIT;
3408 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3409 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
3410
3411 /*
3412 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3413 * flag. It means that vm_struct is not fully initialized.
3414 * Now, it is fully initialized, so remove this flag here.
3415 */
3416 clear_vm_uninitialized_flag(area);
3417
3418 size = PAGE_ALIGN(size);
3419 if (!(vm_flags & VM_DEFER_KMEMLEAK))
3420 kmemleak_vmalloc(area, size, gfp_mask);
3421
3422 return area->addr;
3423
3424 fail:
3425 if (shift > PAGE_SHIFT) {
3426 shift = PAGE_SHIFT;
3427 align = real_align;
3428 size = real_size;
3429 goto again;
3430 }
3431
3432 return NULL;
3433 }
3434
3435 /**
3436 * __vmalloc_node - allocate virtually contiguous memory
3437 * @size: allocation size
3438 * @align: desired alignment
3439 * @gfp_mask: flags for the page level allocator
3440 * @node: node to use for allocation or NUMA_NO_NODE
3441 * @caller: caller's return address
3442 *
3443 * Allocate enough pages to cover @size from the page level allocator with
3444 * @gfp_mask flags. Map them into contiguous kernel virtual space.
3445 *
3446 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3447 * and __GFP_NOFAIL are not supported
3448 *
3449 * Any use of gfp flags outside of GFP_KERNEL should be consulted
3450 * with mm people.
3451 *
3452 * Return: pointer to the allocated memory or %NULL on error
3453 */
3454 void *__vmalloc_node(unsigned long size, unsigned long align,
3455 gfp_t gfp_mask, int node, const void *caller)
3456 {
3457 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
3458 gfp_mask, PAGE_KERNEL, 0, node, caller);
3459 }
3460 /*
3461 * This is only for performance analysis of vmalloc and stress purpose.
3462 * It is required by vmalloc test module, therefore do not use it other
3463 * than that.
3464 */
3465 #ifdef CONFIG_TEST_VMALLOC_MODULE
3466 EXPORT_SYMBOL_GPL(__vmalloc_node);
3467 #endif
3468
3469 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3470 {
3471 return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
3472 __builtin_return_address(0));
3473 }
3474 EXPORT_SYMBOL(__vmalloc);
3475
3476 /**
3477 * vmalloc - allocate virtually contiguous memory
3478 * @size: allocation size
3479 *
3480 * Allocate enough pages to cover @size from the page level
3481 * allocator and map them into contiguous kernel virtual space.
3482 *
3483 * For tight control over page level allocator and protection flags
3484 * use __vmalloc() instead.
3485 *
3486 * Return: pointer to the allocated memory or %NULL on error
3487 */
3488 void *vmalloc(unsigned long size)
3489 {
3490 return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3491 __builtin_return_address(0));
3492 }
3493 EXPORT_SYMBOL(vmalloc);
3494
3495 /**
3496 * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
3497 * @size: allocation size
3498 * @gfp_mask: flags for the page level allocator
3499 *
3500 * Allocate enough pages to cover @size from the page level
3501 * allocator and map them into contiguous kernel virtual space.
3502 * If @size is greater than or equal to PMD_SIZE, allow using
3503 * huge pages for the memory
3504 *
3505 * Return: pointer to the allocated memory or %NULL on error
3506 */
3507 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
3508 {
3509 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3510 gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
3511 NUMA_NO_NODE, __builtin_return_address(0));
3512 }
3513 EXPORT_SYMBOL_GPL(vmalloc_huge);
3514
3515 /**
3516 * vzalloc - allocate virtually contiguous memory with zero fill
3517 * @size: allocation size
3518 *
3519 * Allocate enough pages to cover @size from the page level
3520 * allocator and map them into contiguous kernel virtual space.
3521 * The memory allocated is set to zero.
3522 *
3523 * For tight control over page level allocator and protection flags
3524 * use __vmalloc() instead.
3525 *
3526 * Return: pointer to the allocated memory or %NULL on error
3527 */
3528 void *vzalloc(unsigned long size)
3529 {
3530 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3531 __builtin_return_address(0));
3532 }
3533 EXPORT_SYMBOL(vzalloc);
3534
3535 /**
3536 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3537 * @size: allocation size
3538 *
3539 * The resulting memory area is zeroed so it can be mapped to userspace
3540 * without leaking data.
3541 *
3542 * Return: pointer to the allocated memory or %NULL on error
3543 */
3544 void *vmalloc_user(unsigned long size)
3545 {
3546 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
3547 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3548 VM_USERMAP, NUMA_NO_NODE,
3549 __builtin_return_address(0));
3550 }
3551 EXPORT_SYMBOL(vmalloc_user);
3552
3553 /**
3554 * vmalloc_node - allocate memory on a specific node
3555 * @size: allocation size
3556 * @node: numa node
3557 *
3558 * Allocate enough pages to cover @size from the page level
3559 * allocator and map them into contiguous kernel virtual space.
3560 *
3561 * For tight control over page level allocator and protection flags
3562 * use __vmalloc() instead.
3563 *
3564 * Return: pointer to the allocated memory or %NULL on error
3565 */
3566 void *vmalloc_node(unsigned long size, int node)
3567 {
3568 return __vmalloc_node(size, 1, GFP_KERNEL, node,
3569 __builtin_return_address(0));
3570 }
3571 EXPORT_SYMBOL(vmalloc_node);
3572
3573 /**
3574 * vzalloc_node - allocate memory on a specific node with zero fill
3575 * @size: allocation size
3576 * @node: numa node
3577 *
3578 * Allocate enough pages to cover @size from the page level
3579 * allocator and map them into contiguous kernel virtual space.
3580 * The memory allocated is set to zero.
3581 *
3582 * Return: pointer to the allocated memory or %NULL on error
3583 */
3584 void *vzalloc_node(unsigned long size, int node)
3585 {
3586 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
3587 __builtin_return_address(0));
3588 }
3589 EXPORT_SYMBOL(vzalloc_node);
3590
3591 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3592 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3593 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3594 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3595 #else
3596 /*
3597 * 64b systems should always have either DMA or DMA32 zones. For others
3598 * GFP_DMA32 should do the right thing and use the normal zone.
3599 */
3600 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3601 #endif
3602
3603 /**
3604 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3605 * @size: allocation size
3606 *
3607 * Allocate enough 32bit PA addressable pages to cover @size from the
3608 * page level allocator and map them into contiguous kernel virtual space.
3609 *
3610 * Return: pointer to the allocated memory or %NULL on error
3611 */
3612 void *vmalloc_32(unsigned long size)
3613 {
3614 return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3615 __builtin_return_address(0));
3616 }
3617 EXPORT_SYMBOL(vmalloc_32);
3618
3619 /**
3620 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3621 * @size: allocation size
3622 *
3623 * The resulting memory area is 32bit addressable and zeroed so it can be
3624 * mapped to userspace without leaking data.
3625 *
3626 * Return: pointer to the allocated memory or %NULL on error
3627 */
3628 void *vmalloc_32_user(unsigned long size)
3629 {
3630 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
3631 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3632 VM_USERMAP, NUMA_NO_NODE,
3633 __builtin_return_address(0));
3634 }
3635 EXPORT_SYMBOL(vmalloc_32_user);
3636
3637 /*
3638 * Atomically zero bytes in the iterator.
3639 *
3640 * Returns the number of zeroed bytes.
3641 */
3642 static size_t zero_iter(struct iov_iter *iter, size_t count)
3643 {
3644 size_t remains = count;
3645
3646 while (remains > 0) {
3647 size_t num, copied;
3648
3649 num = min_t(size_t, remains, PAGE_SIZE);
3650 copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
3651 remains -= copied;
3652
3653 if (copied < num)
3654 break;
3655 }
3656
3657 return count - remains;
3658 }
3659
3660 /*
3661 * small helper routine, copy contents to iter from addr.
3662 * If the page is not present, fill zero.
3663 *
3664 * Returns the number of copied bytes.
3665 */
3666 static size_t aligned_vread_iter(struct iov_iter *iter,
3667 const char *addr, size_t count)
3668 {
3669 size_t remains = count;
3670 struct page *page;
3671
3672 while (remains > 0) {
3673 unsigned long offset, length;
3674 size_t copied = 0;
3675
3676 offset = offset_in_page(addr);
3677 length = PAGE_SIZE - offset;
3678 if (length > remains)
3679 length = remains;
3680 page = vmalloc_to_page(addr);
3681 /*
3682 * To do safe access to this _mapped_ area, we need lock. But
3683 * adding lock here means that we need to add overhead of
3684 * vmalloc()/vfree() calls for this _debug_ interface, rarely
3685 * used. Instead of that, we'll use an local mapping via
3686 * copy_page_to_iter_nofault() and accept a small overhead in
3687 * this access function.
3688 */
3689 if (page)
3690 copied = copy_page_to_iter_nofault(page, offset,
3691 length, iter);
3692 else
3693 copied = zero_iter(iter, length);
3694
3695 addr += copied;
3696 remains -= copied;
3697
3698 if (copied != length)
3699 break;
3700 }
3701
3702 return count - remains;
3703 }
3704
3705 /*
3706 * Read from a vm_map_ram region of memory.
3707 *
3708 * Returns the number of copied bytes.
3709 */
3710 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
3711 size_t count, unsigned long flags)
3712 {
3713 char *start;
3714 struct vmap_block *vb;
3715 struct xarray *xa;
3716 unsigned long offset;
3717 unsigned int rs, re;
3718 size_t remains, n;
3719
3720 /*
3721 * If it's area created by vm_map_ram() interface directly, but
3722 * not further subdividing and delegating management to vmap_block,
3723 * handle it here.
3724 */
3725 if (!(flags & VMAP_BLOCK))
3726 return aligned_vread_iter(iter, addr, count);
3727
3728 remains = count;
3729
3730 /*
3731 * Area is split into regions and tracked with vmap_block, read out
3732 * each region and zero fill the hole between regions.
3733 */
3734 xa = addr_to_vb_xa((unsigned long) addr);
3735 vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
3736 if (!vb)
3737 goto finished_zero;
3738
3739 spin_lock(&vb->lock);
3740 if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
3741 spin_unlock(&vb->lock);
3742 goto finished_zero;
3743 }
3744
3745 for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
3746 size_t copied;
3747
3748 if (remains == 0)
3749 goto finished;
3750
3751 start = vmap_block_vaddr(vb->va->va_start, rs);
3752
3753 if (addr < start) {
3754 size_t to_zero = min_t(size_t, start - addr, remains);
3755 size_t zeroed = zero_iter(iter, to_zero);
3756
3757 addr += zeroed;
3758 remains -= zeroed;
3759
3760 if (remains == 0 || zeroed != to_zero)
3761 goto finished;
3762 }
3763
3764 /*it could start reading from the middle of used region*/
3765 offset = offset_in_page(addr);
3766 n = ((re - rs + 1) << PAGE_SHIFT) - offset;
3767 if (n > remains)
3768 n = remains;
3769
3770 copied = aligned_vread_iter(iter, start + offset, n);
3771
3772 addr += copied;
3773 remains -= copied;
3774
3775 if (copied != n)
3776 goto finished;
3777 }
3778
3779 spin_unlock(&vb->lock);
3780
3781 finished_zero:
3782 /* zero-fill the left dirty or free regions */
3783 return count - remains + zero_iter(iter, remains);
3784 finished:
3785 /* We couldn't copy/zero everything */
3786 spin_unlock(&vb->lock);
3787 return count - remains;
3788 }
3789
3790 /**
3791 * vread_iter() - read vmalloc area in a safe way to an iterator.
3792 * @iter: the iterator to which data should be written.
3793 * @addr: vm address.
3794 * @count: number of bytes to be read.
3795 *
3796 * This function checks that addr is a valid vmalloc'ed area, and
3797 * copy data from that area to a given buffer. If the given memory range
3798 * of [addr...addr+count) includes some valid address, data is copied to
3799 * proper area of @buf. If there are memory holes, they'll be zero-filled.
3800 * IOREMAP area is treated as memory hole and no copy is done.
3801 *
3802 * If [addr...addr+count) doesn't includes any intersects with alive
3803 * vm_struct area, returns 0. @buf should be kernel's buffer.
3804 *
3805 * Note: In usual ops, vread() is never necessary because the caller
3806 * should know vmalloc() area is valid and can use memcpy().
3807 * This is for routines which have to access vmalloc area without
3808 * any information, as /proc/kcore.
3809 *
3810 * Return: number of bytes for which addr and buf should be increased
3811 * (same number as @count) or %0 if [addr...addr+count) doesn't
3812 * include any intersection with valid vmalloc area
3813 */
3814 long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
3815 {
3816 struct vmap_area *va;
3817 struct vm_struct *vm;
3818 char *vaddr;
3819 size_t n, size, flags, remains;
3820
3821 addr = kasan_reset_tag(addr);
3822
3823 /* Don't allow overflow */
3824 if ((unsigned long) addr + count < count)
3825 count = -(unsigned long) addr;
3826
3827 remains = count;
3828
3829 spin_lock(&vmap_area_lock);
3830 va = find_vmap_area_exceed_addr((unsigned long)addr);
3831 if (!va)
3832 goto finished_zero;
3833
3834 /* no intersects with alive vmap_area */
3835 if ((unsigned long)addr + remains <= va->va_start)
3836 goto finished_zero;
3837
3838 list_for_each_entry_from(va, &vmap_area_list, list) {
3839 size_t copied;
3840
3841 if (remains == 0)
3842 goto finished;
3843
3844 vm = va->vm;
3845 flags = va->flags & VMAP_FLAGS_MASK;
3846 /*
3847 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
3848 * be set together with VMAP_RAM.
3849 */
3850 WARN_ON(flags == VMAP_BLOCK);
3851
3852 if (!vm && !flags)
3853 continue;
3854
3855 if (vm && (vm->flags & VM_UNINITIALIZED))
3856 continue;
3857
3858 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3859 smp_rmb();
3860
3861 vaddr = (char *) va->va_start;
3862 size = vm ? get_vm_area_size(vm) : va_size(va);
3863
3864 if (addr >= vaddr + size)
3865 continue;
3866
3867 if (addr < vaddr) {
3868 size_t to_zero = min_t(size_t, vaddr - addr, remains);
3869 size_t zeroed = zero_iter(iter, to_zero);
3870
3871 addr += zeroed;
3872 remains -= zeroed;
3873
3874 if (remains == 0 || zeroed != to_zero)
3875 goto finished;
3876 }
3877
3878 n = vaddr + size - addr;
3879 if (n > remains)
3880 n = remains;
3881
3882 if (flags & VMAP_RAM)
3883 copied = vmap_ram_vread_iter(iter, addr, n, flags);
3884 else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE))))
3885 copied = aligned_vread_iter(iter, addr, n);
3886 else /* IOREMAP | SPARSE area is treated as memory hole */
3887 copied = zero_iter(iter, n);
3888
3889 addr += copied;
3890 remains -= copied;
3891
3892 if (copied != n)
3893 goto finished;
3894 }
3895
3896 finished_zero:
3897 spin_unlock(&vmap_area_lock);
3898 /* zero-fill memory holes */
3899 return count - remains + zero_iter(iter, remains);
3900 finished:
3901 /* Nothing remains, or We couldn't copy/zero everything. */
3902 spin_unlock(&vmap_area_lock);
3903
3904 return count - remains;
3905 }
3906
3907 /**
3908 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3909 * @vma: vma to cover
3910 * @uaddr: target user address to start at
3911 * @kaddr: virtual address of vmalloc kernel memory
3912 * @pgoff: offset from @kaddr to start at
3913 * @size: size of map area
3914 *
3915 * Returns: 0 for success, -Exxx on failure
3916 *
3917 * This function checks that @kaddr is a valid vmalloc'ed area,
3918 * and that it is big enough to cover the range starting at
3919 * @uaddr in @vma. Will return failure if that criteria isn't
3920 * met.
3921 *
3922 * Similar to remap_pfn_range() (see mm/memory.c)
3923 */
3924 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3925 void *kaddr, unsigned long pgoff,
3926 unsigned long size)
3927 {
3928 struct vm_struct *area;
3929 unsigned long off;
3930 unsigned long end_index;
3931
3932 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3933 return -EINVAL;
3934
3935 size = PAGE_ALIGN(size);
3936
3937 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3938 return -EINVAL;
3939
3940 area = find_vm_area(kaddr);
3941 if (!area)
3942 return -EINVAL;
3943
3944 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3945 return -EINVAL;
3946
3947 if (check_add_overflow(size, off, &end_index) ||
3948 end_index > get_vm_area_size(area))
3949 return -EINVAL;
3950 kaddr += off;
3951
3952 do {
3953 struct page *page = vmalloc_to_page(kaddr);
3954 int ret;
3955
3956 ret = vm_insert_page(vma, uaddr, page);
3957 if (ret)
3958 return ret;
3959
3960 uaddr += PAGE_SIZE;
3961 kaddr += PAGE_SIZE;
3962 size -= PAGE_SIZE;
3963 } while (size > 0);
3964
3965 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
3966
3967 return 0;
3968 }
3969
3970 /**
3971 * remap_vmalloc_range - map vmalloc pages to userspace
3972 * @vma: vma to cover (map full range of vma)
3973 * @addr: vmalloc memory
3974 * @pgoff: number of pages into addr before first page to map
3975 *
3976 * Returns: 0 for success, -Exxx on failure
3977 *
3978 * This function checks that addr is a valid vmalloc'ed area, and
3979 * that it is big enough to cover the vma. Will return failure if
3980 * that criteria isn't met.
3981 *
3982 * Similar to remap_pfn_range() (see mm/memory.c)
3983 */
3984 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3985 unsigned long pgoff)
3986 {
3987 return remap_vmalloc_range_partial(vma, vma->vm_start,
3988 addr, pgoff,
3989 vma->vm_end - vma->vm_start);
3990 }
3991 EXPORT_SYMBOL(remap_vmalloc_range);
3992
3993 void free_vm_area(struct vm_struct *area)
3994 {
3995 struct vm_struct *ret;
3996 ret = remove_vm_area(area->addr);
3997 BUG_ON(ret != area);
3998 kfree(area);
3999 }
4000 EXPORT_SYMBOL_GPL(free_vm_area);
4001
4002 #ifdef CONFIG_SMP
4003 static struct vmap_area *node_to_va(struct rb_node *n)
4004 {
4005 return rb_entry_safe(n, struct vmap_area, rb_node);
4006 }
4007
4008 /**
4009 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
4010 * @addr: target address
4011 *
4012 * Returns: vmap_area if it is found. If there is no such area
4013 * the first highest(reverse order) vmap_area is returned
4014 * i.e. va->va_start < addr && va->va_end < addr or NULL
4015 * if there are no any areas before @addr.
4016 */
4017 static struct vmap_area *
4018 pvm_find_va_enclose_addr(unsigned long addr)
4019 {
4020 struct vmap_area *va, *tmp;
4021 struct rb_node *n;
4022
4023 n = free_vmap_area_root.rb_node;
4024 va = NULL;
4025
4026 while (n) {
4027 tmp = rb_entry(n, struct vmap_area, rb_node);
4028 if (tmp->va_start <= addr) {
4029 va = tmp;
4030 if (tmp->va_end >= addr)
4031 break;
4032
4033 n = n->rb_right;
4034 } else {
4035 n = n->rb_left;
4036 }
4037 }
4038
4039 return va;
4040 }
4041
4042 /**
4043 * pvm_determine_end_from_reverse - find the highest aligned address
4044 * of free block below VMALLOC_END
4045 * @va:
4046 * in - the VA we start the search(reverse order);
4047 * out - the VA with the highest aligned end address.
4048 * @align: alignment for required highest address
4049 *
4050 * Returns: determined end address within vmap_area
4051 */
4052 static unsigned long
4053 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
4054 {
4055 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4056 unsigned long addr;
4057
4058 if (likely(*va)) {
4059 list_for_each_entry_from_reverse((*va),
4060 &free_vmap_area_list, list) {
4061 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
4062 if ((*va)->va_start < addr)
4063 return addr;
4064 }
4065 }
4066
4067 return 0;
4068 }
4069
4070 /**
4071 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4072 * @offsets: array containing offset of each area
4073 * @sizes: array containing size of each area
4074 * @nr_vms: the number of areas to allocate
4075 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4076 *
4077 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4078 * vm_structs on success, %NULL on failure
4079 *
4080 * Percpu allocator wants to use congruent vm areas so that it can
4081 * maintain the offsets among percpu areas. This function allocates
4082 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
4083 * be scattered pretty far, distance between two areas easily going up
4084 * to gigabytes. To avoid interacting with regular vmallocs, these
4085 * areas are allocated from top.
4086 *
4087 * Despite its complicated look, this allocator is rather simple. It
4088 * does everything top-down and scans free blocks from the end looking
4089 * for matching base. While scanning, if any of the areas do not fit the
4090 * base address is pulled down to fit the area. Scanning is repeated till
4091 * all the areas fit and then all necessary data structures are inserted
4092 * and the result is returned.
4093 */
4094 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
4095 const size_t *sizes, int nr_vms,
4096 size_t align)
4097 {
4098 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
4099 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4100 struct vmap_area **vas, *va;
4101 struct vm_struct **vms;
4102 int area, area2, last_area, term_area;
4103 unsigned long base, start, size, end, last_end, orig_start, orig_end;
4104 bool purged = false;
4105
4106 /* verify parameters and allocate data structures */
4107 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
4108 for (last_area = 0, area = 0; area < nr_vms; area++) {
4109 start = offsets[area];
4110 end = start + sizes[area];
4111
4112 /* is everything aligned properly? */
4113 BUG_ON(!IS_ALIGNED(offsets[area], align));
4114 BUG_ON(!IS_ALIGNED(sizes[area], align));
4115
4116 /* detect the area with the highest address */
4117 if (start > offsets[last_area])
4118 last_area = area;
4119
4120 for (area2 = area + 1; area2 < nr_vms; area2++) {
4121 unsigned long start2 = offsets[area2];
4122 unsigned long end2 = start2 + sizes[area2];
4123
4124 BUG_ON(start2 < end && start < end2);
4125 }
4126 }
4127 last_end = offsets[last_area] + sizes[last_area];
4128
4129 if (vmalloc_end - vmalloc_start < last_end) {
4130 WARN_ON(true);
4131 return NULL;
4132 }
4133
4134 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
4135 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
4136 if (!vas || !vms)
4137 goto err_free2;
4138
4139 for (area = 0; area < nr_vms; area++) {
4140 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
4141 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
4142 if (!vas[area] || !vms[area])
4143 goto err_free;
4144 }
4145 retry:
4146 spin_lock(&free_vmap_area_lock);
4147
4148 /* start scanning - we scan from the top, begin with the last area */
4149 area = term_area = last_area;
4150 start = offsets[area];
4151 end = start + sizes[area];
4152
4153 va = pvm_find_va_enclose_addr(vmalloc_end);
4154 base = pvm_determine_end_from_reverse(&va, align) - end;
4155
4156 while (true) {
4157 /*
4158 * base might have underflowed, add last_end before
4159 * comparing.
4160 */
4161 if (base + last_end < vmalloc_start + last_end)
4162 goto overflow;
4163
4164 /*
4165 * Fitting base has not been found.
4166 */
4167 if (va == NULL)
4168 goto overflow;
4169
4170 /*
4171 * If required width exceeds current VA block, move
4172 * base downwards and then recheck.
4173 */
4174 if (base + end > va->va_end) {
4175 base = pvm_determine_end_from_reverse(&va, align) - end;
4176 term_area = area;
4177 continue;
4178 }
4179
4180 /*
4181 * If this VA does not fit, move base downwards and recheck.
4182 */
4183 if (base + start < va->va_start) {
4184 va = node_to_va(rb_prev(&va->rb_node));
4185 base = pvm_determine_end_from_reverse(&va, align) - end;
4186 term_area = area;
4187 continue;
4188 }
4189
4190 /*
4191 * This area fits, move on to the previous one. If
4192 * the previous one is the terminal one, we're done.
4193 */
4194 area = (area + nr_vms - 1) % nr_vms;
4195 if (area == term_area)
4196 break;
4197
4198 start = offsets[area];
4199 end = start + sizes[area];
4200 va = pvm_find_va_enclose_addr(base + end);
4201 }
4202
4203 /* we've found a fitting base, insert all va's */
4204 for (area = 0; area < nr_vms; area++) {
4205 int ret;
4206
4207 start = base + offsets[area];
4208 size = sizes[area];
4209
4210 va = pvm_find_va_enclose_addr(start);
4211 if (WARN_ON_ONCE(va == NULL))
4212 /* It is a BUG(), but trigger recovery instead. */
4213 goto recovery;
4214
4215 ret = adjust_va_to_fit_type(&free_vmap_area_root,
4216 &free_vmap_area_list,
4217 va, start, size);
4218 if (WARN_ON_ONCE(unlikely(ret)))
4219 /* It is a BUG(), but trigger recovery instead. */
4220 goto recovery;
4221
4222 /* Allocated area. */
4223 va = vas[area];
4224 va->va_start = start;
4225 va->va_end = start + size;
4226 }
4227
4228 spin_unlock(&free_vmap_area_lock);
4229
4230 /* populate the kasan shadow space */
4231 for (area = 0; area < nr_vms; area++) {
4232 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
4233 goto err_free_shadow;
4234 }
4235
4236 /* insert all vm's */
4237 spin_lock(&vmap_area_lock);
4238 for (area = 0; area < nr_vms; area++) {
4239 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
4240
4241 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
4242 pcpu_get_vm_areas);
4243 }
4244 spin_unlock(&vmap_area_lock);
4245
4246 /*
4247 * Mark allocated areas as accessible. Do it now as a best-effort
4248 * approach, as they can be mapped outside of vmalloc code.
4249 * With hardware tag-based KASAN, marking is skipped for
4250 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
4251 */
4252 for (area = 0; area < nr_vms; area++)
4253 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
4254 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
4255
4256 kfree(vas);
4257 return vms;
4258
4259 recovery:
4260 /*
4261 * Remove previously allocated areas. There is no
4262 * need in removing these areas from the busy tree,
4263 * because they are inserted only on the final step
4264 * and when pcpu_get_vm_areas() is success.
4265 */
4266 while (area--) {
4267 orig_start = vas[area]->va_start;
4268 orig_end = vas[area]->va_end;
4269 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4270 &free_vmap_area_list);
4271 if (va)
4272 kasan_release_vmalloc(orig_start, orig_end,
4273 va->va_start, va->va_end);
4274 vas[area] = NULL;
4275 }
4276
4277 overflow:
4278 spin_unlock(&free_vmap_area_lock);
4279 if (!purged) {
4280 reclaim_and_purge_vmap_areas();
4281 purged = true;
4282
4283 /* Before "retry", check if we recover. */
4284 for (area = 0; area < nr_vms; area++) {
4285 if (vas[area])
4286 continue;
4287
4288 vas[area] = kmem_cache_zalloc(
4289 vmap_area_cachep, GFP_KERNEL);
4290 if (!vas[area])
4291 goto err_free;
4292 }
4293
4294 goto retry;
4295 }
4296
4297 err_free:
4298 for (area = 0; area < nr_vms; area++) {
4299 if (vas[area])
4300 kmem_cache_free(vmap_area_cachep, vas[area]);
4301
4302 kfree(vms[area]);
4303 }
4304 err_free2:
4305 kfree(vas);
4306 kfree(vms);
4307 return NULL;
4308
4309 err_free_shadow:
4310 spin_lock(&free_vmap_area_lock);
4311 /*
4312 * We release all the vmalloc shadows, even the ones for regions that
4313 * hadn't been successfully added. This relies on kasan_release_vmalloc
4314 * being able to tolerate this case.
4315 */
4316 for (area = 0; area < nr_vms; area++) {
4317 orig_start = vas[area]->va_start;
4318 orig_end = vas[area]->va_end;
4319 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4320 &free_vmap_area_list);
4321 if (va)
4322 kasan_release_vmalloc(orig_start, orig_end,
4323 va->va_start, va->va_end);
4324 vas[area] = NULL;
4325 kfree(vms[area]);
4326 }
4327 spin_unlock(&free_vmap_area_lock);
4328 kfree(vas);
4329 kfree(vms);
4330 return NULL;
4331 }
4332
4333 /**
4334 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4335 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4336 * @nr_vms: the number of allocated areas
4337 *
4338 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4339 */
4340 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
4341 {
4342 int i;
4343
4344 for (i = 0; i < nr_vms; i++)
4345 free_vm_area(vms[i]);
4346 kfree(vms);
4347 }
4348 #endif /* CONFIG_SMP */
4349
4350 #ifdef CONFIG_PRINTK
4351 bool vmalloc_dump_obj(void *object)
4352 {
4353 void *objp = (void *)PAGE_ALIGN((unsigned long)object);
4354 const void *caller;
4355 struct vm_struct *vm;
4356 struct vmap_area *va;
4357 unsigned long addr;
4358 unsigned int nr_pages;
4359
4360 if (!spin_trylock(&vmap_area_lock))
4361 return false;
4362 va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
4363 if (!va) {
4364 spin_unlock(&vmap_area_lock);
4365 return false;
4366 }
4367
4368 vm = va->vm;
4369 if (!vm) {
4370 spin_unlock(&vmap_area_lock);
4371 return false;
4372 }
4373 addr = (unsigned long)vm->addr;
4374 caller = vm->caller;
4375 nr_pages = vm->nr_pages;
4376 spin_unlock(&vmap_area_lock);
4377 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
4378 nr_pages, addr, caller);
4379 return true;
4380 }
4381 #endif
4382
4383 #ifdef CONFIG_PROC_FS
4384 static void *s_start(struct seq_file *m, loff_t *pos)
4385 __acquires(&vmap_purge_lock)
4386 __acquires(&vmap_area_lock)
4387 {
4388 mutex_lock(&vmap_purge_lock);
4389 spin_lock(&vmap_area_lock);
4390
4391 return seq_list_start(&vmap_area_list, *pos);
4392 }
4393
4394 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4395 {
4396 return seq_list_next(p, &vmap_area_list, pos);
4397 }
4398
4399 static void s_stop(struct seq_file *m, void *p)
4400 __releases(&vmap_area_lock)
4401 __releases(&vmap_purge_lock)
4402 {
4403 spin_unlock(&vmap_area_lock);
4404 mutex_unlock(&vmap_purge_lock);
4405 }
4406
4407 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
4408 {
4409 if (IS_ENABLED(CONFIG_NUMA)) {
4410 unsigned int nr, *counters = m->private;
4411 unsigned int step = 1U << vm_area_page_order(v);
4412
4413 if (!counters)
4414 return;
4415
4416 if (v->flags & VM_UNINITIALIZED)
4417 return;
4418 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4419 smp_rmb();
4420
4421 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
4422
4423 for (nr = 0; nr < v->nr_pages; nr += step)
4424 counters[page_to_nid(v->pages[nr])] += step;
4425 for_each_node_state(nr, N_HIGH_MEMORY)
4426 if (counters[nr])
4427 seq_printf(m, " N%u=%u", nr, counters[nr]);
4428 }
4429 }
4430
4431 static void show_purge_info(struct seq_file *m)
4432 {
4433 struct vmap_area *va;
4434
4435 spin_lock(&purge_vmap_area_lock);
4436 list_for_each_entry(va, &purge_vmap_area_list, list) {
4437 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
4438 (void *)va->va_start, (void *)va->va_end,
4439 va->va_end - va->va_start);
4440 }
4441 spin_unlock(&purge_vmap_area_lock);
4442 }
4443
4444 static int s_show(struct seq_file *m, void *p)
4445 {
4446 struct vmap_area *va;
4447 struct vm_struct *v;
4448
4449 va = list_entry(p, struct vmap_area, list);
4450
4451 if (!va->vm) {
4452 if (va->flags & VMAP_RAM)
4453 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
4454 (void *)va->va_start, (void *)va->va_end,
4455 va->va_end - va->va_start);
4456
4457 goto final;
4458 }
4459
4460 v = va->vm;
4461
4462 seq_printf(m, "0x%pK-0x%pK %7ld",
4463 v->addr, v->addr + v->size, v->size);
4464
4465 if (v->caller)
4466 seq_printf(m, " %pS", v->caller);
4467
4468 if (v->nr_pages)
4469 seq_printf(m, " pages=%d", v->nr_pages);
4470
4471 if (v->phys_addr)
4472 seq_printf(m, " phys=%pa", &v->phys_addr);
4473
4474 if (v->flags & VM_IOREMAP)
4475 seq_puts(m, " ioremap");
4476
4477 if (v->flags & VM_SPARSE)
4478 seq_puts(m, " sparse");
4479
4480 if (v->flags & VM_ALLOC)
4481 seq_puts(m, " vmalloc");
4482
4483 if (v->flags & VM_MAP)
4484 seq_puts(m, " vmap");
4485
4486 if (v->flags & VM_USERMAP)
4487 seq_puts(m, " user");
4488
4489 if (v->flags & VM_DMA_COHERENT)
4490 seq_puts(m, " dma-coherent");
4491
4492 if (is_vmalloc_addr(v->pages))
4493 seq_puts(m, " vpages");
4494
4495 show_numa_info(m, v);
4496 seq_putc(m, '\n');
4497
4498 /*
4499 * As a final step, dump "unpurged" areas.
4500 */
4501 final:
4502 if (list_is_last(&va->list, &vmap_area_list))
4503 show_purge_info(m);
4504
4505 return 0;
4506 }
4507
4508 static const struct seq_operations vmalloc_op = {
4509 .start = s_start,
4510 .next = s_next,
4511 .stop = s_stop,
4512 .show = s_show,
4513 };
4514
4515 static int __init proc_vmalloc_init(void)
4516 {
4517 if (IS_ENABLED(CONFIG_NUMA))
4518 proc_create_seq_private("vmallocinfo", 0400, NULL,
4519 &vmalloc_op,
4520 nr_node_ids * sizeof(unsigned int), NULL);
4521 else
4522 proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
4523 return 0;
4524 }
4525 module_init(proc_vmalloc_init);
4526
4527 #endif
4528
4529 void __init vmalloc_init(void)
4530 {
4531 struct vmap_area *va;
4532 struct vm_struct *tmp;
4533 int i;
4534
4535 /*
4536 * Create the cache for vmap_area objects.
4537 */
4538 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
4539
4540 for_each_possible_cpu(i) {
4541 struct vmap_block_queue *vbq;
4542 struct vfree_deferred *p;
4543
4544 vbq = &per_cpu(vmap_block_queue, i);
4545 spin_lock_init(&vbq->lock);
4546 INIT_LIST_HEAD(&vbq->free);
4547 p = &per_cpu(vfree_deferred, i);
4548 init_llist_head(&p->list);
4549 INIT_WORK(&p->wq, delayed_vfree_work);
4550 xa_init(&vbq->vmap_blocks);
4551 }
4552
4553 /* Import existing vmlist entries. */
4554 for (tmp = vmlist; tmp; tmp = tmp->next) {
4555 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
4556 if (WARN_ON_ONCE(!va))
4557 continue;
4558
4559 va->va_start = (unsigned long)tmp->addr;
4560 va->va_end = va->va_start + tmp->size;
4561 va->vm = tmp;
4562 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
4563 }
4564
4565 /*
4566 * Now we can initialize a free vmap space.
4567 */
4568 vmap_init_free_space();
4569 vmap_initialized = true;
4570 }