]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/vmalloc.c
kasan: remove PG_skip_kasan_poison flag
[thirdparty/linux.git] / mm / vmalloc.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
1da177e4
LT
3 * Copyright (C) 1993 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
930fc45a 7 * Numa awareness, Christoph Lameter, SGI, June 2005
d758ffe6 8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
1da177e4
LT
9 */
10
db64fe02 11#include <linux/vmalloc.h>
1da177e4
LT
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
c3edc401 15#include <linux/sched/signal.h>
1da177e4
LT
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
5f6a6a9c 19#include <linux/proc_fs.h>
a10aa579 20#include <linux/seq_file.h>
868b104d 21#include <linux/set_memory.h>
3ac7fe5a 22#include <linux/debugobjects.h>
23016969 23#include <linux/kallsyms.h>
db64fe02 24#include <linux/list.h>
4da56b99 25#include <linux/notifier.h>
db64fe02 26#include <linux/rbtree.h>
0f14599c 27#include <linux/xarray.h>
5da96bdd 28#include <linux/io.h>
db64fe02 29#include <linux/rcupdate.h>
f0aa6617 30#include <linux/pfn.h>
89219d37 31#include <linux/kmemleak.h>
60063497 32#include <linux/atomic.h>
3b32123d 33#include <linux/compiler.h>
4e5aa1f4 34#include <linux/memcontrol.h>
32fcfd40 35#include <linux/llist.h>
0f616be1 36#include <linux/bitops.h>
68ad4a33 37#include <linux/rbtree_augmented.h>
bdebd6a2 38#include <linux/overflow.h>
c0eb315a 39#include <linux/pgtable.h>
7c0f6ba6 40#include <linux/uaccess.h>
f7ee1f13 41#include <linux/hugetlb.h>
451769eb 42#include <linux/sched/mm.h>
1da177e4 43#include <asm/tlbflush.h>
2dca6999 44#include <asm/shmparam.h>
1da177e4 45
cf243da6
URS
46#define CREATE_TRACE_POINTS
47#include <trace/events/vmalloc.h>
48
dd56b046 49#include "internal.h"
2a681cfa 50#include "pgalloc-track.h"
dd56b046 51
82a70ce0
CH
52#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
53static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
54
55static int __init set_nohugeiomap(char *str)
56{
57 ioremap_max_page_shift = PAGE_SHIFT;
58 return 0;
59}
60early_param("nohugeiomap", set_nohugeiomap);
61#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
62static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
63#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
64
121e6f32
NP
65#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
66static bool __ro_after_init vmap_allow_huge = true;
67
68static int __init set_nohugevmalloc(char *str)
69{
70 vmap_allow_huge = false;
71 return 0;
72}
73early_param("nohugevmalloc", set_nohugevmalloc);
74#else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
75static const bool vmap_allow_huge = false;
76#endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
77
186525bd
IM
78bool is_vmalloc_addr(const void *x)
79{
4aff1dc4 80 unsigned long addr = (unsigned long)kasan_reset_tag(x);
186525bd
IM
81
82 return addr >= VMALLOC_START && addr < VMALLOC_END;
83}
84EXPORT_SYMBOL(is_vmalloc_addr);
85
32fcfd40
AV
86struct vfree_deferred {
87 struct llist_head list;
88 struct work_struct wq;
89};
90static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
91
db64fe02 92/*** Page table manipulation functions ***/
5e9e3d77
NP
93static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
94 phys_addr_t phys_addr, pgprot_t prot,
f7ee1f13 95 unsigned int max_page_shift, pgtbl_mod_mask *mask)
5e9e3d77
NP
96{
97 pte_t *pte;
98 u64 pfn;
f7ee1f13 99 unsigned long size = PAGE_SIZE;
5e9e3d77
NP
100
101 pfn = phys_addr >> PAGE_SHIFT;
102 pte = pte_alloc_kernel_track(pmd, addr, mask);
103 if (!pte)
104 return -ENOMEM;
105 do {
106 BUG_ON(!pte_none(*pte));
f7ee1f13
CL
107
108#ifdef CONFIG_HUGETLB_PAGE
109 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
110 if (size != PAGE_SIZE) {
111 pte_t entry = pfn_pte(pfn, prot);
112
f7ee1f13
CL
113 entry = arch_make_huge_pte(entry, ilog2(size), 0);
114 set_huge_pte_at(&init_mm, addr, pte, entry);
115 pfn += PFN_DOWN(size);
116 continue;
117 }
118#endif
5e9e3d77
NP
119 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
120 pfn++;
f7ee1f13 121 } while (pte += PFN_DOWN(size), addr += size, addr != end);
5e9e3d77
NP
122 *mask |= PGTBL_PTE_MODIFIED;
123 return 0;
124}
125
126static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
127 phys_addr_t phys_addr, pgprot_t prot,
128 unsigned int max_page_shift)
129{
130 if (max_page_shift < PMD_SHIFT)
131 return 0;
132
133 if (!arch_vmap_pmd_supported(prot))
134 return 0;
135
136 if ((end - addr) != PMD_SIZE)
137 return 0;
138
139 if (!IS_ALIGNED(addr, PMD_SIZE))
140 return 0;
141
142 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
143 return 0;
144
145 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
146 return 0;
147
148 return pmd_set_huge(pmd, phys_addr, prot);
149}
150
151static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
152 phys_addr_t phys_addr, pgprot_t prot,
153 unsigned int max_page_shift, pgtbl_mod_mask *mask)
154{
155 pmd_t *pmd;
156 unsigned long next;
157
158 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
159 if (!pmd)
160 return -ENOMEM;
161 do {
162 next = pmd_addr_end(addr, end);
163
164 if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
165 max_page_shift)) {
166 *mask |= PGTBL_PMD_MODIFIED;
167 continue;
168 }
169
f7ee1f13 170 if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
5e9e3d77
NP
171 return -ENOMEM;
172 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
173 return 0;
174}
175
176static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
177 phys_addr_t phys_addr, pgprot_t prot,
178 unsigned int max_page_shift)
179{
180 if (max_page_shift < PUD_SHIFT)
181 return 0;
182
183 if (!arch_vmap_pud_supported(prot))
184 return 0;
185
186 if ((end - addr) != PUD_SIZE)
187 return 0;
188
189 if (!IS_ALIGNED(addr, PUD_SIZE))
190 return 0;
191
192 if (!IS_ALIGNED(phys_addr, PUD_SIZE))
193 return 0;
194
195 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
196 return 0;
197
198 return pud_set_huge(pud, phys_addr, prot);
199}
200
201static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
202 phys_addr_t phys_addr, pgprot_t prot,
203 unsigned int max_page_shift, pgtbl_mod_mask *mask)
204{
205 pud_t *pud;
206 unsigned long next;
207
208 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
209 if (!pud)
210 return -ENOMEM;
211 do {
212 next = pud_addr_end(addr, end);
213
214 if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
215 max_page_shift)) {
216 *mask |= PGTBL_PUD_MODIFIED;
217 continue;
218 }
219
220 if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
221 max_page_shift, mask))
222 return -ENOMEM;
223 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
224 return 0;
225}
226
227static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
228 phys_addr_t phys_addr, pgprot_t prot,
229 unsigned int max_page_shift)
230{
231 if (max_page_shift < P4D_SHIFT)
232 return 0;
233
234 if (!arch_vmap_p4d_supported(prot))
235 return 0;
236
237 if ((end - addr) != P4D_SIZE)
238 return 0;
239
240 if (!IS_ALIGNED(addr, P4D_SIZE))
241 return 0;
242
243 if (!IS_ALIGNED(phys_addr, P4D_SIZE))
244 return 0;
245
246 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
247 return 0;
248
249 return p4d_set_huge(p4d, phys_addr, prot);
250}
251
252static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
253 phys_addr_t phys_addr, pgprot_t prot,
254 unsigned int max_page_shift, pgtbl_mod_mask *mask)
255{
256 p4d_t *p4d;
257 unsigned long next;
258
259 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
260 if (!p4d)
261 return -ENOMEM;
262 do {
263 next = p4d_addr_end(addr, end);
264
265 if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
266 max_page_shift)) {
267 *mask |= PGTBL_P4D_MODIFIED;
268 continue;
269 }
270
271 if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
272 max_page_shift, mask))
273 return -ENOMEM;
274 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
275 return 0;
276}
277
5d87510d 278static int vmap_range_noflush(unsigned long addr, unsigned long end,
5e9e3d77
NP
279 phys_addr_t phys_addr, pgprot_t prot,
280 unsigned int max_page_shift)
281{
282 pgd_t *pgd;
283 unsigned long start;
284 unsigned long next;
285 int err;
286 pgtbl_mod_mask mask = 0;
287
288 might_sleep();
289 BUG_ON(addr >= end);
290
291 start = addr;
292 pgd = pgd_offset_k(addr);
293 do {
294 next = pgd_addr_end(addr, end);
295 err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
296 max_page_shift, &mask);
297 if (err)
298 break;
299 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
300
5e9e3d77
NP
301 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
302 arch_sync_kernel_mappings(start, end);
303
304 return err;
305}
b221385b 306
82a70ce0
CH
307int ioremap_page_range(unsigned long addr, unsigned long end,
308 phys_addr_t phys_addr, pgprot_t prot)
5d87510d
NP
309{
310 int err;
311
8491502f 312 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
82a70ce0 313 ioremap_max_page_shift);
5d87510d 314 flush_cache_vmap(addr, end);
b073d7f8
AP
315 if (!err)
316 kmsan_ioremap_page_range(addr, end, phys_addr, prot,
317 ioremap_max_page_shift);
5d87510d
NP
318 return err;
319}
320
2ba3e694
JR
321static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
322 pgtbl_mod_mask *mask)
1da177e4
LT
323{
324 pte_t *pte;
325
326 pte = pte_offset_kernel(pmd, addr);
327 do {
328 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
329 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
330 } while (pte++, addr += PAGE_SIZE, addr != end);
2ba3e694 331 *mask |= PGTBL_PTE_MODIFIED;
1da177e4
LT
332}
333
2ba3e694
JR
334static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
335 pgtbl_mod_mask *mask)
1da177e4
LT
336{
337 pmd_t *pmd;
338 unsigned long next;
2ba3e694 339 int cleared;
1da177e4
LT
340
341 pmd = pmd_offset(pud, addr);
342 do {
343 next = pmd_addr_end(addr, end);
2ba3e694
JR
344
345 cleared = pmd_clear_huge(pmd);
346 if (cleared || pmd_bad(*pmd))
347 *mask |= PGTBL_PMD_MODIFIED;
348
349 if (cleared)
b9820d8f 350 continue;
1da177e4
LT
351 if (pmd_none_or_clear_bad(pmd))
352 continue;
2ba3e694 353 vunmap_pte_range(pmd, addr, next, mask);
e47110e9
AK
354
355 cond_resched();
1da177e4
LT
356 } while (pmd++, addr = next, addr != end);
357}
358
2ba3e694
JR
359static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
360 pgtbl_mod_mask *mask)
1da177e4
LT
361{
362 pud_t *pud;
363 unsigned long next;
2ba3e694 364 int cleared;
1da177e4 365
c2febafc 366 pud = pud_offset(p4d, addr);
1da177e4
LT
367 do {
368 next = pud_addr_end(addr, end);
2ba3e694
JR
369
370 cleared = pud_clear_huge(pud);
371 if (cleared || pud_bad(*pud))
372 *mask |= PGTBL_PUD_MODIFIED;
373
374 if (cleared)
b9820d8f 375 continue;
1da177e4
LT
376 if (pud_none_or_clear_bad(pud))
377 continue;
2ba3e694 378 vunmap_pmd_range(pud, addr, next, mask);
1da177e4
LT
379 } while (pud++, addr = next, addr != end);
380}
381
2ba3e694
JR
382static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
383 pgtbl_mod_mask *mask)
c2febafc
KS
384{
385 p4d_t *p4d;
386 unsigned long next;
387
388 p4d = p4d_offset(pgd, addr);
389 do {
390 next = p4d_addr_end(addr, end);
2ba3e694 391
c8db8c26
L
392 p4d_clear_huge(p4d);
393 if (p4d_bad(*p4d))
2ba3e694
JR
394 *mask |= PGTBL_P4D_MODIFIED;
395
c2febafc
KS
396 if (p4d_none_or_clear_bad(p4d))
397 continue;
2ba3e694 398 vunmap_pud_range(p4d, addr, next, mask);
c2febafc
KS
399 } while (p4d++, addr = next, addr != end);
400}
401
4ad0ae8c
NP
402/*
403 * vunmap_range_noflush is similar to vunmap_range, but does not
404 * flush caches or TLBs.
b521c43f 405 *
4ad0ae8c
NP
406 * The caller is responsible for calling flush_cache_vmap() before calling
407 * this function, and flush_tlb_kernel_range after it has returned
408 * successfully (and before the addresses are expected to cause a page fault
409 * or be re-mapped for something else, if TLB flushes are being delayed or
410 * coalesced).
b521c43f 411 *
4ad0ae8c 412 * This is an internal function only. Do not use outside mm/.
b521c43f 413 */
b073d7f8 414void __vunmap_range_noflush(unsigned long start, unsigned long end)
1da177e4 415{
1da177e4 416 unsigned long next;
b521c43f 417 pgd_t *pgd;
2ba3e694
JR
418 unsigned long addr = start;
419 pgtbl_mod_mask mask = 0;
1da177e4
LT
420
421 BUG_ON(addr >= end);
422 pgd = pgd_offset_k(addr);
1da177e4
LT
423 do {
424 next = pgd_addr_end(addr, end);
2ba3e694
JR
425 if (pgd_bad(*pgd))
426 mask |= PGTBL_PGD_MODIFIED;
1da177e4
LT
427 if (pgd_none_or_clear_bad(pgd))
428 continue;
2ba3e694 429 vunmap_p4d_range(pgd, addr, next, &mask);
1da177e4 430 } while (pgd++, addr = next, addr != end);
2ba3e694
JR
431
432 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
433 arch_sync_kernel_mappings(start, end);
1da177e4
LT
434}
435
b073d7f8
AP
436void vunmap_range_noflush(unsigned long start, unsigned long end)
437{
438 kmsan_vunmap_range_noflush(start, end);
439 __vunmap_range_noflush(start, end);
440}
441
4ad0ae8c
NP
442/**
443 * vunmap_range - unmap kernel virtual addresses
444 * @addr: start of the VM area to unmap
445 * @end: end of the VM area to unmap (non-inclusive)
446 *
447 * Clears any present PTEs in the virtual address range, flushes TLBs and
448 * caches. Any subsequent access to the address before it has been re-mapped
449 * is a kernel bug.
450 */
451void vunmap_range(unsigned long addr, unsigned long end)
452{
453 flush_cache_vunmap(addr, end);
454 vunmap_range_noflush(addr, end);
455 flush_tlb_kernel_range(addr, end);
456}
457
0a264884 458static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
2ba3e694
JR
459 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
460 pgtbl_mod_mask *mask)
1da177e4
LT
461{
462 pte_t *pte;
463
db64fe02
NP
464 /*
465 * nr is a running index into the array which helps higher level
466 * callers keep track of where we're up to.
467 */
468
2ba3e694 469 pte = pte_alloc_kernel_track(pmd, addr, mask);
1da177e4
LT
470 if (!pte)
471 return -ENOMEM;
472 do {
db64fe02
NP
473 struct page *page = pages[*nr];
474
475 if (WARN_ON(!pte_none(*pte)))
476 return -EBUSY;
477 if (WARN_ON(!page))
1da177e4 478 return -ENOMEM;
4fcdcc12
YN
479 if (WARN_ON(!pfn_valid(page_to_pfn(page))))
480 return -EINVAL;
481
1da177e4 482 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
db64fe02 483 (*nr)++;
1da177e4 484 } while (pte++, addr += PAGE_SIZE, addr != end);
2ba3e694 485 *mask |= PGTBL_PTE_MODIFIED;
1da177e4
LT
486 return 0;
487}
488
0a264884 489static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
2ba3e694
JR
490 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
491 pgtbl_mod_mask *mask)
1da177e4
LT
492{
493 pmd_t *pmd;
494 unsigned long next;
495
2ba3e694 496 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
1da177e4
LT
497 if (!pmd)
498 return -ENOMEM;
499 do {
500 next = pmd_addr_end(addr, end);
0a264884 501 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
1da177e4
LT
502 return -ENOMEM;
503 } while (pmd++, addr = next, addr != end);
504 return 0;
505}
506
0a264884 507static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
2ba3e694
JR
508 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
509 pgtbl_mod_mask *mask)
1da177e4
LT
510{
511 pud_t *pud;
512 unsigned long next;
513
2ba3e694 514 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
1da177e4
LT
515 if (!pud)
516 return -ENOMEM;
517 do {
518 next = pud_addr_end(addr, end);
0a264884 519 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
1da177e4
LT
520 return -ENOMEM;
521 } while (pud++, addr = next, addr != end);
522 return 0;
523}
524
0a264884 525static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
2ba3e694
JR
526 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
527 pgtbl_mod_mask *mask)
c2febafc
KS
528{
529 p4d_t *p4d;
530 unsigned long next;
531
2ba3e694 532 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
c2febafc
KS
533 if (!p4d)
534 return -ENOMEM;
535 do {
536 next = p4d_addr_end(addr, end);
0a264884 537 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
c2febafc
KS
538 return -ENOMEM;
539 } while (p4d++, addr = next, addr != end);
540 return 0;
541}
542
121e6f32
NP
543static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
544 pgprot_t prot, struct page **pages)
1da177e4 545{
2ba3e694 546 unsigned long start = addr;
b521c43f 547 pgd_t *pgd;
121e6f32 548 unsigned long next;
db64fe02
NP
549 int err = 0;
550 int nr = 0;
2ba3e694 551 pgtbl_mod_mask mask = 0;
1da177e4
LT
552
553 BUG_ON(addr >= end);
554 pgd = pgd_offset_k(addr);
1da177e4
LT
555 do {
556 next = pgd_addr_end(addr, end);
2ba3e694
JR
557 if (pgd_bad(*pgd))
558 mask |= PGTBL_PGD_MODIFIED;
0a264884 559 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
1da177e4 560 if (err)
bf88c8c8 561 return err;
1da177e4 562 } while (pgd++, addr = next, addr != end);
db64fe02 563
2ba3e694
JR
564 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
565 arch_sync_kernel_mappings(start, end);
566
60bb4465 567 return 0;
1da177e4
LT
568}
569
b67177ec
NP
570/*
571 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
572 * flush caches.
573 *
574 * The caller is responsible for calling flush_cache_vmap() after this
575 * function returns successfully and before the addresses are accessed.
576 *
577 * This is an internal function only. Do not use outside mm/.
578 */
b073d7f8 579int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
121e6f32
NP
580 pgprot_t prot, struct page **pages, unsigned int page_shift)
581{
582 unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
583
584 WARN_ON(page_shift < PAGE_SHIFT);
585
586 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
587 page_shift == PAGE_SHIFT)
588 return vmap_small_pages_range_noflush(addr, end, prot, pages);
589
590 for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
591 int err;
592
593 err = vmap_range_noflush(addr, addr + (1UL << page_shift),
08262ac5 594 page_to_phys(pages[i]), prot,
121e6f32
NP
595 page_shift);
596 if (err)
597 return err;
598
599 addr += 1UL << page_shift;
600 }
601
602 return 0;
603}
b073d7f8
AP
604
605int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
606 pgprot_t prot, struct page **pages, unsigned int page_shift)
607{
608 kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
609 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
610}
121e6f32 611
121e6f32 612/**
b67177ec 613 * vmap_pages_range - map pages to a kernel virtual address
121e6f32 614 * @addr: start of the VM area to map
b67177ec 615 * @end: end of the VM area to map (non-inclusive)
121e6f32 616 * @prot: page protection flags to use
b67177ec
NP
617 * @pages: pages to map (always PAGE_SIZE pages)
618 * @page_shift: maximum shift that the pages may be mapped with, @pages must
619 * be aligned and contiguous up to at least this shift.
121e6f32
NP
620 *
621 * RETURNS:
622 * 0 on success, -errno on failure.
623 */
b67177ec
NP
624static int vmap_pages_range(unsigned long addr, unsigned long end,
625 pgprot_t prot, struct page **pages, unsigned int page_shift)
8fc48985 626{
b67177ec 627 int err;
8fc48985 628
b67177ec
NP
629 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
630 flush_cache_vmap(addr, end);
631 return err;
8fc48985
TH
632}
633
81ac3ad9 634int is_vmalloc_or_module_addr(const void *x)
73bdf0a6
LT
635{
636 /*
ab4f2ee1 637 * ARM, x86-64 and sparc64 put modules in a special place,
73bdf0a6
LT
638 * and fall back on vmalloc() if that fails. Others
639 * just put it in the vmalloc space.
640 */
641#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
4aff1dc4 642 unsigned long addr = (unsigned long)kasan_reset_tag(x);
73bdf0a6
LT
643 if (addr >= MODULES_VADDR && addr < MODULES_END)
644 return 1;
645#endif
646 return is_vmalloc_addr(x);
647}
01858469 648EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
73bdf0a6 649
48667e7a 650/*
c0eb315a
NP
651 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
652 * return the tail page that corresponds to the base page address, which
653 * matches small vmap mappings.
48667e7a 654 */
add688fb 655struct page *vmalloc_to_page(const void *vmalloc_addr)
48667e7a
CL
656{
657 unsigned long addr = (unsigned long) vmalloc_addr;
add688fb 658 struct page *page = NULL;
48667e7a 659 pgd_t *pgd = pgd_offset_k(addr);
c2febafc
KS
660 p4d_t *p4d;
661 pud_t *pud;
662 pmd_t *pmd;
663 pte_t *ptep, pte;
48667e7a 664
7aa413de
IM
665 /*
666 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
667 * architectures that do not vmalloc module space
668 */
73bdf0a6 669 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
59ea7463 670
c2febafc
KS
671 if (pgd_none(*pgd))
672 return NULL;
c0eb315a
NP
673 if (WARN_ON_ONCE(pgd_leaf(*pgd)))
674 return NULL; /* XXX: no allowance for huge pgd */
675 if (WARN_ON_ONCE(pgd_bad(*pgd)))
676 return NULL;
677
c2febafc
KS
678 p4d = p4d_offset(pgd, addr);
679 if (p4d_none(*p4d))
680 return NULL;
c0eb315a
NP
681 if (p4d_leaf(*p4d))
682 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
683 if (WARN_ON_ONCE(p4d_bad(*p4d)))
684 return NULL;
029c54b0 685
c0eb315a
NP
686 pud = pud_offset(p4d, addr);
687 if (pud_none(*pud))
688 return NULL;
689 if (pud_leaf(*pud))
690 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
691 if (WARN_ON_ONCE(pud_bad(*pud)))
c2febafc 692 return NULL;
c0eb315a 693
c2febafc 694 pmd = pmd_offset(pud, addr);
c0eb315a
NP
695 if (pmd_none(*pmd))
696 return NULL;
697 if (pmd_leaf(*pmd))
698 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
699 if (WARN_ON_ONCE(pmd_bad(*pmd)))
c2febafc
KS
700 return NULL;
701
702 ptep = pte_offset_map(pmd, addr);
703 pte = *ptep;
704 if (pte_present(pte))
705 page = pte_page(pte);
706 pte_unmap(ptep);
c0eb315a 707
add688fb 708 return page;
48667e7a 709}
add688fb 710EXPORT_SYMBOL(vmalloc_to_page);
48667e7a
CL
711
712/*
add688fb 713 * Map a vmalloc()-space virtual address to the physical page frame number.
48667e7a 714 */
add688fb 715unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
48667e7a 716{
add688fb 717 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
48667e7a 718}
add688fb 719EXPORT_SYMBOL(vmalloc_to_pfn);
48667e7a 720
db64fe02
NP
721
722/*** Global kva allocator ***/
723
bb850f4d 724#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
a6cf4e0f 725#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
bb850f4d 726
db64fe02 727
db64fe02 728static DEFINE_SPINLOCK(vmap_area_lock);
e36176be 729static DEFINE_SPINLOCK(free_vmap_area_lock);
f1c4069e
JK
730/* Export for kexec only */
731LIST_HEAD(vmap_area_list);
89699605 732static struct rb_root vmap_area_root = RB_ROOT;
68ad4a33 733static bool vmap_initialized __read_mostly;
89699605 734
96e2db45
URS
735static struct rb_root purge_vmap_area_root = RB_ROOT;
736static LIST_HEAD(purge_vmap_area_list);
737static DEFINE_SPINLOCK(purge_vmap_area_lock);
738
68ad4a33
URS
739/*
740 * This kmem_cache is used for vmap_area objects. Instead of
741 * allocating from slab we reuse an object from this cache to
742 * make things faster. Especially in "no edge" splitting of
743 * free block.
744 */
745static struct kmem_cache *vmap_area_cachep;
746
747/*
748 * This linked list is used in pair with free_vmap_area_root.
749 * It gives O(1) access to prev/next to perform fast coalescing.
750 */
751static LIST_HEAD(free_vmap_area_list);
752
753/*
754 * This augment red-black tree represents the free vmap space.
755 * All vmap_area objects in this tree are sorted by va->va_start
756 * address. It is used for allocation and merging when a vmap
757 * object is released.
758 *
759 * Each vmap_area node contains a maximum available free block
760 * of its sub-tree, right or left. Therefore it is possible to
761 * find a lowest match of free area.
762 */
763static struct rb_root free_vmap_area_root = RB_ROOT;
764
82dd23e8
URS
765/*
766 * Preload a CPU with one object for "no edge" split case. The
767 * aim is to get rid of allocations from the atomic context, thus
768 * to use more permissive allocation masks.
769 */
770static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
771
68ad4a33
URS
772static __always_inline unsigned long
773va_size(struct vmap_area *va)
774{
775 return (va->va_end - va->va_start);
776}
777
778static __always_inline unsigned long
779get_subtree_max_size(struct rb_node *node)
780{
781 struct vmap_area *va;
782
783 va = rb_entry_safe(node, struct vmap_area, rb_node);
784 return va ? va->subtree_max_size : 0;
785}
89699605 786
315cc066
ML
787RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
788 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
68ad4a33
URS
789
790static void purge_vmap_area_lazy(void);
791static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
690467c8
URS
792static void drain_vmap_area_work(struct work_struct *work);
793static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
db64fe02 794
97105f0a
RG
795static atomic_long_t nr_vmalloc_pages;
796
797unsigned long vmalloc_nr_pages(void)
798{
799 return atomic_long_read(&nr_vmalloc_pages);
800}
801
153090f2 802/* Look up the first VA which satisfies addr < va_end, NULL if none. */
f181234a
CW
803static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
804{
805 struct vmap_area *va = NULL;
806 struct rb_node *n = vmap_area_root.rb_node;
807
4aff1dc4
AK
808 addr = (unsigned long)kasan_reset_tag((void *)addr);
809
f181234a
CW
810 while (n) {
811 struct vmap_area *tmp;
812
813 tmp = rb_entry(n, struct vmap_area, rb_node);
814 if (tmp->va_end > addr) {
815 va = tmp;
816 if (tmp->va_start <= addr)
817 break;
818
819 n = n->rb_left;
820 } else
821 n = n->rb_right;
822 }
823
824 return va;
825}
826
899c6efe 827static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
1da177e4 828{
899c6efe 829 struct rb_node *n = root->rb_node;
db64fe02 830
4aff1dc4
AK
831 addr = (unsigned long)kasan_reset_tag((void *)addr);
832
db64fe02
NP
833 while (n) {
834 struct vmap_area *va;
835
836 va = rb_entry(n, struct vmap_area, rb_node);
837 if (addr < va->va_start)
838 n = n->rb_left;
cef2ac3f 839 else if (addr >= va->va_end)
db64fe02
NP
840 n = n->rb_right;
841 else
842 return va;
843 }
844
845 return NULL;
846}
847
68ad4a33
URS
848/*
849 * This function returns back addresses of parent node
850 * and its left or right link for further processing.
9c801f61
URS
851 *
852 * Otherwise NULL is returned. In that case all further
853 * steps regarding inserting of conflicting overlap range
854 * have to be declined and actually considered as a bug.
68ad4a33
URS
855 */
856static __always_inline struct rb_node **
857find_va_links(struct vmap_area *va,
858 struct rb_root *root, struct rb_node *from,
859 struct rb_node **parent)
860{
861 struct vmap_area *tmp_va;
862 struct rb_node **link;
863
864 if (root) {
865 link = &root->rb_node;
866 if (unlikely(!*link)) {
867 *parent = NULL;
868 return link;
869 }
870 } else {
871 link = &from;
872 }
db64fe02 873
68ad4a33
URS
874 /*
875 * Go to the bottom of the tree. When we hit the last point
876 * we end up with parent rb_node and correct direction, i name
877 * it link, where the new va->rb_node will be attached to.
878 */
879 do {
880 tmp_va = rb_entry(*link, struct vmap_area, rb_node);
db64fe02 881
68ad4a33
URS
882 /*
883 * During the traversal we also do some sanity check.
884 * Trigger the BUG() if there are sides(left/right)
885 * or full overlaps.
886 */
753df96b 887 if (va->va_end <= tmp_va->va_start)
68ad4a33 888 link = &(*link)->rb_left;
753df96b 889 else if (va->va_start >= tmp_va->va_end)
68ad4a33 890 link = &(*link)->rb_right;
9c801f61
URS
891 else {
892 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
893 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
894
895 return NULL;
896 }
68ad4a33
URS
897 } while (*link);
898
899 *parent = &tmp_va->rb_node;
900 return link;
901}
902
903static __always_inline struct list_head *
904get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
905{
906 struct list_head *list;
907
908 if (unlikely(!parent))
909 /*
910 * The red-black tree where we try to find VA neighbors
911 * before merging or inserting is empty, i.e. it means
912 * there is no free vmap space. Normally it does not
913 * happen but we handle this case anyway.
914 */
915 return NULL;
916
917 list = &rb_entry(parent, struct vmap_area, rb_node)->list;
918 return (&parent->rb_right == link ? list->next : list);
919}
920
921static __always_inline void
8eb510db
URS
922__link_va(struct vmap_area *va, struct rb_root *root,
923 struct rb_node *parent, struct rb_node **link,
924 struct list_head *head, bool augment)
68ad4a33
URS
925{
926 /*
927 * VA is still not in the list, but we can
928 * identify its future previous list_head node.
929 */
930 if (likely(parent)) {
931 head = &rb_entry(parent, struct vmap_area, rb_node)->list;
932 if (&parent->rb_right != link)
933 head = head->prev;
db64fe02
NP
934 }
935
68ad4a33
URS
936 /* Insert to the rb-tree */
937 rb_link_node(&va->rb_node, parent, link);
8eb510db 938 if (augment) {
68ad4a33
URS
939 /*
940 * Some explanation here. Just perform simple insertion
941 * to the tree. We do not set va->subtree_max_size to
942 * its current size before calling rb_insert_augmented().
153090f2 943 * It is because we populate the tree from the bottom
68ad4a33
URS
944 * to parent levels when the node _is_ in the tree.
945 *
946 * Therefore we set subtree_max_size to zero after insertion,
947 * to let __augment_tree_propagate_from() puts everything to
948 * the correct order later on.
949 */
950 rb_insert_augmented(&va->rb_node,
951 root, &free_vmap_area_rb_augment_cb);
952 va->subtree_max_size = 0;
953 } else {
954 rb_insert_color(&va->rb_node, root);
955 }
db64fe02 956
68ad4a33
URS
957 /* Address-sort this list */
958 list_add(&va->list, head);
db64fe02
NP
959}
960
68ad4a33 961static __always_inline void
8eb510db
URS
962link_va(struct vmap_area *va, struct rb_root *root,
963 struct rb_node *parent, struct rb_node **link,
964 struct list_head *head)
965{
966 __link_va(va, root, parent, link, head, false);
967}
968
969static __always_inline void
970link_va_augment(struct vmap_area *va, struct rb_root *root,
971 struct rb_node *parent, struct rb_node **link,
972 struct list_head *head)
973{
974 __link_va(va, root, parent, link, head, true);
975}
976
977static __always_inline void
978__unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
68ad4a33 979{
460e42d1
URS
980 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
981 return;
db64fe02 982
8eb510db 983 if (augment)
460e42d1
URS
984 rb_erase_augmented(&va->rb_node,
985 root, &free_vmap_area_rb_augment_cb);
986 else
987 rb_erase(&va->rb_node, root);
988
5d7a7c54 989 list_del_init(&va->list);
460e42d1 990 RB_CLEAR_NODE(&va->rb_node);
68ad4a33
URS
991}
992
8eb510db
URS
993static __always_inline void
994unlink_va(struct vmap_area *va, struct rb_root *root)
995{
996 __unlink_va(va, root, false);
997}
998
999static __always_inline void
1000unlink_va_augment(struct vmap_area *va, struct rb_root *root)
1001{
1002 __unlink_va(va, root, true);
1003}
1004
bb850f4d 1005#if DEBUG_AUGMENT_PROPAGATE_CHECK
c3385e84
JC
1006/*
1007 * Gets called when remove the node and rotate.
1008 */
1009static __always_inline unsigned long
1010compute_subtree_max_size(struct vmap_area *va)
1011{
1012 return max3(va_size(va),
1013 get_subtree_max_size(va->rb_node.rb_left),
1014 get_subtree_max_size(va->rb_node.rb_right));
1015}
1016
bb850f4d 1017static void
da27c9ed 1018augment_tree_propagate_check(void)
bb850f4d
URS
1019{
1020 struct vmap_area *va;
da27c9ed 1021 unsigned long computed_size;
bb850f4d 1022
da27c9ed
URS
1023 list_for_each_entry(va, &free_vmap_area_list, list) {
1024 computed_size = compute_subtree_max_size(va);
1025 if (computed_size != va->subtree_max_size)
1026 pr_emerg("tree is corrupted: %lu, %lu\n",
1027 va_size(va), va->subtree_max_size);
bb850f4d 1028 }
bb850f4d
URS
1029}
1030#endif
1031
68ad4a33
URS
1032/*
1033 * This function populates subtree_max_size from bottom to upper
1034 * levels starting from VA point. The propagation must be done
1035 * when VA size is modified by changing its va_start/va_end. Or
1036 * in case of newly inserting of VA to the tree.
1037 *
1038 * It means that __augment_tree_propagate_from() must be called:
1039 * - After VA has been inserted to the tree(free path);
1040 * - After VA has been shrunk(allocation path);
1041 * - After VA has been increased(merging path).
1042 *
1043 * Please note that, it does not mean that upper parent nodes
1044 * and their subtree_max_size are recalculated all the time up
1045 * to the root node.
1046 *
1047 * 4--8
1048 * /\
1049 * / \
1050 * / \
1051 * 2--2 8--8
1052 *
1053 * For example if we modify the node 4, shrinking it to 2, then
1054 * no any modification is required. If we shrink the node 2 to 1
1055 * its subtree_max_size is updated only, and set to 1. If we shrink
1056 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1057 * node becomes 4--6.
1058 */
1059static __always_inline void
1060augment_tree_propagate_from(struct vmap_area *va)
1061{
15ae144f
URS
1062 /*
1063 * Populate the tree from bottom towards the root until
1064 * the calculated maximum available size of checked node
1065 * is equal to its current one.
1066 */
1067 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
bb850f4d
URS
1068
1069#if DEBUG_AUGMENT_PROPAGATE_CHECK
da27c9ed 1070 augment_tree_propagate_check();
bb850f4d 1071#endif
68ad4a33
URS
1072}
1073
1074static void
1075insert_vmap_area(struct vmap_area *va,
1076 struct rb_root *root, struct list_head *head)
1077{
1078 struct rb_node **link;
1079 struct rb_node *parent;
1080
1081 link = find_va_links(va, root, NULL, &parent);
9c801f61
URS
1082 if (link)
1083 link_va(va, root, parent, link, head);
68ad4a33
URS
1084}
1085
1086static void
1087insert_vmap_area_augment(struct vmap_area *va,
1088 struct rb_node *from, struct rb_root *root,
1089 struct list_head *head)
1090{
1091 struct rb_node **link;
1092 struct rb_node *parent;
1093
1094 if (from)
1095 link = find_va_links(va, NULL, from, &parent);
1096 else
1097 link = find_va_links(va, root, NULL, &parent);
1098
9c801f61 1099 if (link) {
8eb510db 1100 link_va_augment(va, root, parent, link, head);
9c801f61
URS
1101 augment_tree_propagate_from(va);
1102 }
68ad4a33
URS
1103}
1104
1105/*
1106 * Merge de-allocated chunk of VA memory with previous
1107 * and next free blocks. If coalesce is not done a new
1108 * free area is inserted. If VA has been merged, it is
1109 * freed.
9c801f61
URS
1110 *
1111 * Please note, it can return NULL in case of overlap
1112 * ranges, followed by WARN() report. Despite it is a
1113 * buggy behaviour, a system can be alive and keep
1114 * ongoing.
68ad4a33 1115 */
3c5c3cfb 1116static __always_inline struct vmap_area *
8eb510db
URS
1117__merge_or_add_vmap_area(struct vmap_area *va,
1118 struct rb_root *root, struct list_head *head, bool augment)
68ad4a33
URS
1119{
1120 struct vmap_area *sibling;
1121 struct list_head *next;
1122 struct rb_node **link;
1123 struct rb_node *parent;
1124 bool merged = false;
1125
1126 /*
1127 * Find a place in the tree where VA potentially will be
1128 * inserted, unless it is merged with its sibling/siblings.
1129 */
1130 link = find_va_links(va, root, NULL, &parent);
9c801f61
URS
1131 if (!link)
1132 return NULL;
68ad4a33
URS
1133
1134 /*
1135 * Get next node of VA to check if merging can be done.
1136 */
1137 next = get_va_next_sibling(parent, link);
1138 if (unlikely(next == NULL))
1139 goto insert;
1140
1141 /*
1142 * start end
1143 * | |
1144 * |<------VA------>|<-----Next----->|
1145 * | |
1146 * start end
1147 */
1148 if (next != head) {
1149 sibling = list_entry(next, struct vmap_area, list);
1150 if (sibling->va_start == va->va_end) {
1151 sibling->va_start = va->va_start;
1152
68ad4a33
URS
1153 /* Free vmap_area object. */
1154 kmem_cache_free(vmap_area_cachep, va);
1155
1156 /* Point to the new merged area. */
1157 va = sibling;
1158 merged = true;
1159 }
1160 }
1161
1162 /*
1163 * start end
1164 * | |
1165 * |<-----Prev----->|<------VA------>|
1166 * | |
1167 * start end
1168 */
1169 if (next->prev != head) {
1170 sibling = list_entry(next->prev, struct vmap_area, list);
1171 if (sibling->va_end == va->va_start) {
5dd78640
URS
1172 /*
1173 * If both neighbors are coalesced, it is important
1174 * to unlink the "next" node first, followed by merging
1175 * with "previous" one. Otherwise the tree might not be
1176 * fully populated if a sibling's augmented value is
1177 * "normalized" because of rotation operations.
1178 */
54f63d9d 1179 if (merged)
8eb510db 1180 __unlink_va(va, root, augment);
68ad4a33 1181
5dd78640
URS
1182 sibling->va_end = va->va_end;
1183
68ad4a33
URS
1184 /* Free vmap_area object. */
1185 kmem_cache_free(vmap_area_cachep, va);
3c5c3cfb
DA
1186
1187 /* Point to the new merged area. */
1188 va = sibling;
1189 merged = true;
68ad4a33
URS
1190 }
1191 }
1192
1193insert:
5dd78640 1194 if (!merged)
8eb510db 1195 __link_va(va, root, parent, link, head, augment);
3c5c3cfb 1196
96e2db45
URS
1197 return va;
1198}
1199
8eb510db
URS
1200static __always_inline struct vmap_area *
1201merge_or_add_vmap_area(struct vmap_area *va,
1202 struct rb_root *root, struct list_head *head)
1203{
1204 return __merge_or_add_vmap_area(va, root, head, false);
1205}
1206
96e2db45
URS
1207static __always_inline struct vmap_area *
1208merge_or_add_vmap_area_augment(struct vmap_area *va,
1209 struct rb_root *root, struct list_head *head)
1210{
8eb510db 1211 va = __merge_or_add_vmap_area(va, root, head, true);
96e2db45
URS
1212 if (va)
1213 augment_tree_propagate_from(va);
1214
3c5c3cfb 1215 return va;
68ad4a33
URS
1216}
1217
1218static __always_inline bool
1219is_within_this_va(struct vmap_area *va, unsigned long size,
1220 unsigned long align, unsigned long vstart)
1221{
1222 unsigned long nva_start_addr;
1223
1224 if (va->va_start > vstart)
1225 nva_start_addr = ALIGN(va->va_start, align);
1226 else
1227 nva_start_addr = ALIGN(vstart, align);
1228
1229 /* Can be overflowed due to big size or alignment. */
1230 if (nva_start_addr + size < nva_start_addr ||
1231 nva_start_addr < vstart)
1232 return false;
1233
1234 return (nva_start_addr + size <= va->va_end);
1235}
1236
1237/*
1238 * Find the first free block(lowest start address) in the tree,
1239 * that will accomplish the request corresponding to passing
9333fe98
UR
1240 * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1241 * a search length is adjusted to account for worst case alignment
1242 * overhead.
68ad4a33
URS
1243 */
1244static __always_inline struct vmap_area *
f9863be4
URS
1245find_vmap_lowest_match(struct rb_root *root, unsigned long size,
1246 unsigned long align, unsigned long vstart, bool adjust_search_size)
68ad4a33
URS
1247{
1248 struct vmap_area *va;
1249 struct rb_node *node;
9333fe98 1250 unsigned long length;
68ad4a33
URS
1251
1252 /* Start from the root. */
f9863be4 1253 node = root->rb_node;
68ad4a33 1254
9333fe98
UR
1255 /* Adjust the search size for alignment overhead. */
1256 length = adjust_search_size ? size + align - 1 : size;
1257
68ad4a33
URS
1258 while (node) {
1259 va = rb_entry(node, struct vmap_area, rb_node);
1260
9333fe98 1261 if (get_subtree_max_size(node->rb_left) >= length &&
68ad4a33
URS
1262 vstart < va->va_start) {
1263 node = node->rb_left;
1264 } else {
1265 if (is_within_this_va(va, size, align, vstart))
1266 return va;
1267
1268 /*
1269 * Does not make sense to go deeper towards the right
1270 * sub-tree if it does not have a free block that is
9333fe98 1271 * equal or bigger to the requested search length.
68ad4a33 1272 */
9333fe98 1273 if (get_subtree_max_size(node->rb_right) >= length) {
68ad4a33
URS
1274 node = node->rb_right;
1275 continue;
1276 }
1277
1278 /*
3806b041 1279 * OK. We roll back and find the first right sub-tree,
68ad4a33 1280 * that will satisfy the search criteria. It can happen
9f531973
URS
1281 * due to "vstart" restriction or an alignment overhead
1282 * that is bigger then PAGE_SIZE.
68ad4a33
URS
1283 */
1284 while ((node = rb_parent(node))) {
1285 va = rb_entry(node, struct vmap_area, rb_node);
1286 if (is_within_this_va(va, size, align, vstart))
1287 return va;
1288
9333fe98 1289 if (get_subtree_max_size(node->rb_right) >= length &&
68ad4a33 1290 vstart <= va->va_start) {
9f531973
URS
1291 /*
1292 * Shift the vstart forward. Please note, we update it with
1293 * parent's start address adding "1" because we do not want
1294 * to enter same sub-tree after it has already been checked
1295 * and no suitable free block found there.
1296 */
1297 vstart = va->va_start + 1;
68ad4a33
URS
1298 node = node->rb_right;
1299 break;
1300 }
1301 }
1302 }
1303 }
1304
1305 return NULL;
1306}
1307
a6cf4e0f
URS
1308#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1309#include <linux/random.h>
1310
1311static struct vmap_area *
bd1264c3 1312find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
a6cf4e0f
URS
1313 unsigned long align, unsigned long vstart)
1314{
1315 struct vmap_area *va;
1316
bd1264c3 1317 list_for_each_entry(va, head, list) {
a6cf4e0f
URS
1318 if (!is_within_this_va(va, size, align, vstart))
1319 continue;
1320
1321 return va;
1322 }
1323
1324 return NULL;
1325}
1326
1327static void
bd1264c3
SL
1328find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
1329 unsigned long size, unsigned long align)
a6cf4e0f
URS
1330{
1331 struct vmap_area *va_1, *va_2;
1332 unsigned long vstart;
1333 unsigned int rnd;
1334
1335 get_random_bytes(&rnd, sizeof(rnd));
1336 vstart = VMALLOC_START + rnd;
1337
bd1264c3
SL
1338 va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
1339 va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
a6cf4e0f
URS
1340
1341 if (va_1 != va_2)
1342 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1343 va_1, va_2, vstart);
1344}
1345#endif
1346
68ad4a33
URS
1347enum fit_type {
1348 NOTHING_FIT = 0,
1349 FL_FIT_TYPE = 1, /* full fit */
1350 LE_FIT_TYPE = 2, /* left edge fit */
1351 RE_FIT_TYPE = 3, /* right edge fit */
1352 NE_FIT_TYPE = 4 /* no edge fit */
1353};
1354
1355static __always_inline enum fit_type
1356classify_va_fit_type(struct vmap_area *va,
1357 unsigned long nva_start_addr, unsigned long size)
1358{
1359 enum fit_type type;
1360
1361 /* Check if it is within VA. */
1362 if (nva_start_addr < va->va_start ||
1363 nva_start_addr + size > va->va_end)
1364 return NOTHING_FIT;
1365
1366 /* Now classify. */
1367 if (va->va_start == nva_start_addr) {
1368 if (va->va_end == nva_start_addr + size)
1369 type = FL_FIT_TYPE;
1370 else
1371 type = LE_FIT_TYPE;
1372 } else if (va->va_end == nva_start_addr + size) {
1373 type = RE_FIT_TYPE;
1374 } else {
1375 type = NE_FIT_TYPE;
1376 }
1377
1378 return type;
1379}
1380
1381static __always_inline int
f9863be4
URS
1382adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
1383 struct vmap_area *va, unsigned long nva_start_addr,
1384 unsigned long size)
68ad4a33 1385{
2c929233 1386 struct vmap_area *lva = NULL;
1b23ff80 1387 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
68ad4a33
URS
1388
1389 if (type == FL_FIT_TYPE) {
1390 /*
1391 * No need to split VA, it fully fits.
1392 *
1393 * | |
1394 * V NVA V
1395 * |---------------|
1396 */
f9863be4 1397 unlink_va_augment(va, root);
68ad4a33
URS
1398 kmem_cache_free(vmap_area_cachep, va);
1399 } else if (type == LE_FIT_TYPE) {
1400 /*
1401 * Split left edge of fit VA.
1402 *
1403 * | |
1404 * V NVA V R
1405 * |-------|-------|
1406 */
1407 va->va_start += size;
1408 } else if (type == RE_FIT_TYPE) {
1409 /*
1410 * Split right edge of fit VA.
1411 *
1412 * | |
1413 * L V NVA V
1414 * |-------|-------|
1415 */
1416 va->va_end = nva_start_addr;
1417 } else if (type == NE_FIT_TYPE) {
1418 /*
1419 * Split no edge of fit VA.
1420 *
1421 * | |
1422 * L V NVA V R
1423 * |---|-------|---|
1424 */
82dd23e8
URS
1425 lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1426 if (unlikely(!lva)) {
1427 /*
1428 * For percpu allocator we do not do any pre-allocation
1429 * and leave it as it is. The reason is it most likely
1430 * never ends up with NE_FIT_TYPE splitting. In case of
1431 * percpu allocations offsets and sizes are aligned to
1432 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1433 * are its main fitting cases.
1434 *
1435 * There are a few exceptions though, as an example it is
1436 * a first allocation (early boot up) when we have "one"
1437 * big free space that has to be split.
060650a2
URS
1438 *
1439 * Also we can hit this path in case of regular "vmap"
1440 * allocations, if "this" current CPU was not preloaded.
1441 * See the comment in alloc_vmap_area() why. If so, then
1442 * GFP_NOWAIT is used instead to get an extra object for
1443 * split purpose. That is rare and most time does not
1444 * occur.
1445 *
1446 * What happens if an allocation gets failed. Basically,
1447 * an "overflow" path is triggered to purge lazily freed
1448 * areas to free some memory, then, the "retry" path is
1449 * triggered to repeat one more time. See more details
1450 * in alloc_vmap_area() function.
82dd23e8
URS
1451 */
1452 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1453 if (!lva)
1454 return -1;
1455 }
68ad4a33
URS
1456
1457 /*
1458 * Build the remainder.
1459 */
1460 lva->va_start = va->va_start;
1461 lva->va_end = nva_start_addr;
1462
1463 /*
1464 * Shrink this VA to remaining size.
1465 */
1466 va->va_start = nva_start_addr + size;
1467 } else {
1468 return -1;
1469 }
1470
1471 if (type != FL_FIT_TYPE) {
1472 augment_tree_propagate_from(va);
1473
2c929233 1474 if (lva) /* type == NE_FIT_TYPE */
f9863be4 1475 insert_vmap_area_augment(lva, &va->rb_node, root, head);
68ad4a33
URS
1476 }
1477
1478 return 0;
1479}
1480
1481/*
1482 * Returns a start address of the newly allocated area, if success.
1483 * Otherwise a vend is returned that indicates failure.
1484 */
1485static __always_inline unsigned long
f9863be4
URS
1486__alloc_vmap_area(struct rb_root *root, struct list_head *head,
1487 unsigned long size, unsigned long align,
cacca6ba 1488 unsigned long vstart, unsigned long vend)
68ad4a33 1489{
9333fe98 1490 bool adjust_search_size = true;
68ad4a33
URS
1491 unsigned long nva_start_addr;
1492 struct vmap_area *va;
68ad4a33
URS
1493 int ret;
1494
9333fe98
UR
1495 /*
1496 * Do not adjust when:
1497 * a) align <= PAGE_SIZE, because it does not make any sense.
1498 * All blocks(their start addresses) are at least PAGE_SIZE
1499 * aligned anyway;
1500 * b) a short range where a requested size corresponds to exactly
1501 * specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1502 * With adjusted search length an allocation would not succeed.
1503 */
1504 if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
1505 adjust_search_size = false;
1506
f9863be4 1507 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
68ad4a33
URS
1508 if (unlikely(!va))
1509 return vend;
1510
1511 if (va->va_start > vstart)
1512 nva_start_addr = ALIGN(va->va_start, align);
1513 else
1514 nva_start_addr = ALIGN(vstart, align);
1515
1516 /* Check the "vend" restriction. */
1517 if (nva_start_addr + size > vend)
1518 return vend;
1519
68ad4a33 1520 /* Update the free vmap_area. */
f9863be4 1521 ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
1b23ff80 1522 if (WARN_ON_ONCE(ret))
68ad4a33
URS
1523 return vend;
1524
a6cf4e0f 1525#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
bd1264c3 1526 find_vmap_lowest_match_check(root, head, size, align);
a6cf4e0f
URS
1527#endif
1528
68ad4a33
URS
1529 return nva_start_addr;
1530}
4da56b99 1531
d98c9e83
AR
1532/*
1533 * Free a region of KVA allocated by alloc_vmap_area
1534 */
1535static void free_vmap_area(struct vmap_area *va)
1536{
1537 /*
1538 * Remove from the busy tree/list.
1539 */
1540 spin_lock(&vmap_area_lock);
1541 unlink_va(va, &vmap_area_root);
1542 spin_unlock(&vmap_area_lock);
1543
1544 /*
1545 * Insert/Merge it back to the free tree/list.
1546 */
1547 spin_lock(&free_vmap_area_lock);
96e2db45 1548 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
d98c9e83
AR
1549 spin_unlock(&free_vmap_area_lock);
1550}
1551
187f8cc4
URS
1552static inline void
1553preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1554{
1555 struct vmap_area *va = NULL;
1556
1557 /*
1558 * Preload this CPU with one extra vmap_area object. It is used
1559 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1560 * a CPU that does an allocation is preloaded.
1561 *
1562 * We do it in non-atomic context, thus it allows us to use more
1563 * permissive allocation masks to be more stable under low memory
1564 * condition and high memory pressure.
1565 */
1566 if (!this_cpu_read(ne_fit_preload_node))
1567 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1568
1569 spin_lock(lock);
1570
1571 if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1572 kmem_cache_free(vmap_area_cachep, va);
1573}
1574
db64fe02
NP
1575/*
1576 * Allocate a region of KVA of the specified size and alignment, within the
1577 * vstart and vend.
1578 */
1579static struct vmap_area *alloc_vmap_area(unsigned long size,
1580 unsigned long align,
1581 unsigned long vstart, unsigned long vend,
869176a0
BH
1582 int node, gfp_t gfp_mask,
1583 unsigned long va_flags)
db64fe02 1584{
187f8cc4 1585 struct vmap_area *va;
12e376a6 1586 unsigned long freed;
1da177e4 1587 unsigned long addr;
db64fe02 1588 int purged = 0;
d98c9e83 1589 int ret;
db64fe02 1590
7e4a32c0
HL
1591 if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
1592 return ERR_PTR(-EINVAL);
db64fe02 1593
68ad4a33
URS
1594 if (unlikely(!vmap_initialized))
1595 return ERR_PTR(-EBUSY);
1596
5803ed29 1597 might_sleep();
f07116d7 1598 gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
4da56b99 1599
f07116d7 1600 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
db64fe02
NP
1601 if (unlikely(!va))
1602 return ERR_PTR(-ENOMEM);
1603
7f88f88f
CM
1604 /*
1605 * Only scan the relevant parts containing pointers to other objects
1606 * to avoid false negatives.
1607 */
f07116d7 1608 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
7f88f88f 1609
db64fe02 1610retry:
187f8cc4 1611 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
f9863be4
URS
1612 addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
1613 size, align, vstart, vend);
187f8cc4 1614 spin_unlock(&free_vmap_area_lock);
89699605 1615
cf243da6
URS
1616 trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
1617
afd07389 1618 /*
68ad4a33
URS
1619 * If an allocation fails, the "vend" address is
1620 * returned. Therefore trigger the overflow path.
afd07389 1621 */
68ad4a33 1622 if (unlikely(addr == vend))
89699605 1623 goto overflow;
db64fe02
NP
1624
1625 va->va_start = addr;
1626 va->va_end = addr + size;
688fcbfc 1627 va->vm = NULL;
869176a0 1628 va->flags = va_flags;
68ad4a33 1629
e36176be
URS
1630 spin_lock(&vmap_area_lock);
1631 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
db64fe02
NP
1632 spin_unlock(&vmap_area_lock);
1633
61e16557 1634 BUG_ON(!IS_ALIGNED(va->va_start, align));
89699605
NP
1635 BUG_ON(va->va_start < vstart);
1636 BUG_ON(va->va_end > vend);
1637
d98c9e83
AR
1638 ret = kasan_populate_vmalloc(addr, size);
1639 if (ret) {
1640 free_vmap_area(va);
1641 return ERR_PTR(ret);
1642 }
1643
db64fe02 1644 return va;
89699605
NP
1645
1646overflow:
89699605
NP
1647 if (!purged) {
1648 purge_vmap_area_lazy();
1649 purged = 1;
1650 goto retry;
1651 }
4da56b99 1652
12e376a6
URS
1653 freed = 0;
1654 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1655
1656 if (freed > 0) {
1657 purged = 0;
1658 goto retry;
4da56b99
CW
1659 }
1660
03497d76 1661 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
756a025f
JP
1662 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1663 size);
68ad4a33
URS
1664
1665 kmem_cache_free(vmap_area_cachep, va);
89699605 1666 return ERR_PTR(-EBUSY);
db64fe02
NP
1667}
1668
4da56b99
CW
1669int register_vmap_purge_notifier(struct notifier_block *nb)
1670{
1671 return blocking_notifier_chain_register(&vmap_notify_list, nb);
1672}
1673EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1674
1675int unregister_vmap_purge_notifier(struct notifier_block *nb)
1676{
1677 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1678}
1679EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1680
db64fe02
NP
1681/*
1682 * lazy_max_pages is the maximum amount of virtual address space we gather up
1683 * before attempting to purge with a TLB flush.
1684 *
1685 * There is a tradeoff here: a larger number will cover more kernel page tables
1686 * and take slightly longer to purge, but it will linearly reduce the number of
1687 * global TLB flushes that must be performed. It would seem natural to scale
1688 * this number up linearly with the number of CPUs (because vmapping activity
1689 * could also scale linearly with the number of CPUs), however it is likely
1690 * that in practice, workloads might be constrained in other ways that mean
1691 * vmap activity will not scale linearly with CPUs. Also, I want to be
1692 * conservative and not introduce a big latency on huge systems, so go with
1693 * a less aggressive log scale. It will still be an improvement over the old
1694 * code, and it will be simple to change the scale factor if we find that it
1695 * becomes a problem on bigger systems.
1696 */
1697static unsigned long lazy_max_pages(void)
1698{
1699 unsigned int log;
1700
1701 log = fls(num_online_cpus());
1702
1703 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1704}
1705
4d36e6f8 1706static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
db64fe02 1707
0574ecd1 1708/*
f0953a1b 1709 * Serialize vmap purging. There is no actual critical section protected
153090f2 1710 * by this lock, but we want to avoid concurrent calls for performance
0574ecd1
CH
1711 * reasons and to make the pcpu_get_vm_areas more deterministic.
1712 */
f9e09977 1713static DEFINE_MUTEX(vmap_purge_lock);
0574ecd1 1714
02b709df
NP
1715/* for per-CPU blocks */
1716static void purge_fragmented_blocks_allcpus(void);
1717
db64fe02
NP
1718/*
1719 * Purges all lazily-freed vmap areas.
db64fe02 1720 */
0574ecd1 1721static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
db64fe02 1722{
4d36e6f8 1723 unsigned long resched_threshold;
6030fd5f 1724 unsigned int num_purged_areas = 0;
baa468a6 1725 struct list_head local_purge_list;
96e2db45 1726 struct vmap_area *va, *n_va;
db64fe02 1727
0574ecd1 1728 lockdep_assert_held(&vmap_purge_lock);
02b709df 1729
96e2db45
URS
1730 spin_lock(&purge_vmap_area_lock);
1731 purge_vmap_area_root = RB_ROOT;
baa468a6 1732 list_replace_init(&purge_vmap_area_list, &local_purge_list);
96e2db45
URS
1733 spin_unlock(&purge_vmap_area_lock);
1734
baa468a6 1735 if (unlikely(list_empty(&local_purge_list)))
6030fd5f 1736 goto out;
68571be9 1737
96e2db45 1738 start = min(start,
baa468a6 1739 list_first_entry(&local_purge_list,
96e2db45
URS
1740 struct vmap_area, list)->va_start);
1741
1742 end = max(end,
baa468a6 1743 list_last_entry(&local_purge_list,
96e2db45 1744 struct vmap_area, list)->va_end);
db64fe02 1745
0574ecd1 1746 flush_tlb_kernel_range(start, end);
4d36e6f8 1747 resched_threshold = lazy_max_pages() << 1;
db64fe02 1748
e36176be 1749 spin_lock(&free_vmap_area_lock);
baa468a6 1750 list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
4d36e6f8 1751 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
3c5c3cfb
DA
1752 unsigned long orig_start = va->va_start;
1753 unsigned long orig_end = va->va_end;
763b218d 1754
dd3b8353
URS
1755 /*
1756 * Finally insert or merge lazily-freed area. It is
1757 * detached and there is no need to "unlink" it from
1758 * anything.
1759 */
96e2db45
URS
1760 va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1761 &free_vmap_area_list);
3c5c3cfb 1762
9c801f61
URS
1763 if (!va)
1764 continue;
1765
3c5c3cfb
DA
1766 if (is_vmalloc_or_module_addr((void *)orig_start))
1767 kasan_release_vmalloc(orig_start, orig_end,
1768 va->va_start, va->va_end);
dd3b8353 1769
4d36e6f8 1770 atomic_long_sub(nr, &vmap_lazy_nr);
6030fd5f 1771 num_purged_areas++;
68571be9 1772
4d36e6f8 1773 if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
e36176be 1774 cond_resched_lock(&free_vmap_area_lock);
763b218d 1775 }
e36176be 1776 spin_unlock(&free_vmap_area_lock);
6030fd5f
URS
1777
1778out:
1779 trace_purge_vmap_area_lazy(start, end, num_purged_areas);
1780 return num_purged_areas > 0;
db64fe02
NP
1781}
1782
1783/*
1784 * Kick off a purge of the outstanding lazy areas.
1785 */
1786static void purge_vmap_area_lazy(void)
1787{
f9e09977 1788 mutex_lock(&vmap_purge_lock);
0574ecd1
CH
1789 purge_fragmented_blocks_allcpus();
1790 __purge_vmap_area_lazy(ULONG_MAX, 0);
f9e09977 1791 mutex_unlock(&vmap_purge_lock);
db64fe02
NP
1792}
1793
690467c8
URS
1794static void drain_vmap_area_work(struct work_struct *work)
1795{
1796 unsigned long nr_lazy;
1797
1798 do {
1799 mutex_lock(&vmap_purge_lock);
1800 __purge_vmap_area_lazy(ULONG_MAX, 0);
1801 mutex_unlock(&vmap_purge_lock);
1802
1803 /* Recheck if further work is required. */
1804 nr_lazy = atomic_long_read(&vmap_lazy_nr);
1805 } while (nr_lazy > lazy_max_pages());
1806}
1807
db64fe02 1808/*
edd89818
URS
1809 * Free a vmap area, caller ensuring that the area has been unmapped,
1810 * unlinked and flush_cache_vunmap had been called for the correct
1811 * range previously.
db64fe02 1812 */
64141da5 1813static void free_vmap_area_noflush(struct vmap_area *va)
db64fe02 1814{
8c4196fe
URS
1815 unsigned long nr_lazy_max = lazy_max_pages();
1816 unsigned long va_start = va->va_start;
4d36e6f8 1817 unsigned long nr_lazy;
80c4bd7a 1818
edd89818
URS
1819 if (WARN_ON_ONCE(!list_empty(&va->list)))
1820 return;
dd3b8353 1821
4d36e6f8
URS
1822 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1823 PAGE_SHIFT, &vmap_lazy_nr);
80c4bd7a 1824
96e2db45
URS
1825 /*
1826 * Merge or place it to the purge tree/list.
1827 */
1828 spin_lock(&purge_vmap_area_lock);
1829 merge_or_add_vmap_area(va,
1830 &purge_vmap_area_root, &purge_vmap_area_list);
1831 spin_unlock(&purge_vmap_area_lock);
80c4bd7a 1832
8c4196fe
URS
1833 trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
1834
96e2db45 1835 /* After this point, we may free va at any time */
8c4196fe 1836 if (unlikely(nr_lazy > nr_lazy_max))
690467c8 1837 schedule_work(&drain_vmap_work);
db64fe02
NP
1838}
1839
b29acbdc
NP
1840/*
1841 * Free and unmap a vmap area
1842 */
1843static void free_unmap_vmap_area(struct vmap_area *va)
1844{
1845 flush_cache_vunmap(va->va_start, va->va_end);
4ad0ae8c 1846 vunmap_range_noflush(va->va_start, va->va_end);
8e57f8ac 1847 if (debug_pagealloc_enabled_static())
82a2e924
CP
1848 flush_tlb_kernel_range(va->va_start, va->va_end);
1849
c8eef01e 1850 free_vmap_area_noflush(va);
b29acbdc
NP
1851}
1852
993d0b28 1853struct vmap_area *find_vmap_area(unsigned long addr)
db64fe02
NP
1854{
1855 struct vmap_area *va;
1856
1857 spin_lock(&vmap_area_lock);
899c6efe 1858 va = __find_vmap_area(addr, &vmap_area_root);
db64fe02
NP
1859 spin_unlock(&vmap_area_lock);
1860
1861 return va;
1862}
1863
edd89818
URS
1864static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
1865{
1866 struct vmap_area *va;
1867
1868 spin_lock(&vmap_area_lock);
1869 va = __find_vmap_area(addr, &vmap_area_root);
1870 if (va)
1871 unlink_va(va, &vmap_area_root);
1872 spin_unlock(&vmap_area_lock);
1873
1874 return va;
1875}
1876
db64fe02
NP
1877/*** Per cpu kva allocator ***/
1878
1879/*
1880 * vmap space is limited especially on 32 bit architectures. Ensure there is
1881 * room for at least 16 percpu vmap blocks per CPU.
1882 */
1883/*
1884 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1885 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1886 * instead (we just need a rough idea)
1887 */
1888#if BITS_PER_LONG == 32
1889#define VMALLOC_SPACE (128UL*1024*1024)
1890#else
1891#define VMALLOC_SPACE (128UL*1024*1024*1024)
1892#endif
1893
1894#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1895#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
1896#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
1897#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1898#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
1899#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
f982f915
CL
1900#define VMAP_BBMAP_BITS \
1901 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1902 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1903 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
db64fe02
NP
1904
1905#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1906
869176a0
BH
1907#define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
1908#define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
1909#define VMAP_FLAGS_MASK 0x3
1910
db64fe02
NP
1911struct vmap_block_queue {
1912 spinlock_t lock;
1913 struct list_head free;
db64fe02
NP
1914};
1915
1916struct vmap_block {
1917 spinlock_t lock;
1918 struct vmap_area *va;
db64fe02 1919 unsigned long free, dirty;
d76f9954 1920 DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
7d61bfe8 1921 unsigned long dirty_min, dirty_max; /*< dirty range */
de560423
NP
1922 struct list_head free_list;
1923 struct rcu_head rcu_head;
02b709df 1924 struct list_head purge;
db64fe02
NP
1925};
1926
1927/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1928static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1929
1930/*
0f14599c 1931 * XArray of vmap blocks, indexed by address, to quickly find a vmap block
db64fe02
NP
1932 * in the free path. Could get rid of this if we change the API to return a
1933 * "cookie" from alloc, to be passed to free. But no big deal yet.
1934 */
0f14599c 1935static DEFINE_XARRAY(vmap_blocks);
db64fe02
NP
1936
1937/*
1938 * We should probably have a fallback mechanism to allocate virtual memory
1939 * out of partially filled vmap blocks. However vmap block sizing should be
1940 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1941 * big problem.
1942 */
1943
1944static unsigned long addr_to_vb_idx(unsigned long addr)
1945{
1946 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1947 addr /= VMAP_BLOCK_SIZE;
1948 return addr;
1949}
1950
cf725ce2
RP
1951static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1952{
1953 unsigned long addr;
1954
1955 addr = va_start + (pages_off << PAGE_SHIFT);
1956 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1957 return (void *)addr;
1958}
1959
1960/**
1961 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1962 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
1963 * @order: how many 2^order pages should be occupied in newly allocated block
1964 * @gfp_mask: flags for the page level allocator
1965 *
a862f68a 1966 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
cf725ce2
RP
1967 */
1968static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
db64fe02
NP
1969{
1970 struct vmap_block_queue *vbq;
1971 struct vmap_block *vb;
1972 struct vmap_area *va;
1973 unsigned long vb_idx;
1974 int node, err;
cf725ce2 1975 void *vaddr;
db64fe02
NP
1976
1977 node = numa_node_id();
1978
1979 vb = kmalloc_node(sizeof(struct vmap_block),
1980 gfp_mask & GFP_RECLAIM_MASK, node);
1981 if (unlikely(!vb))
1982 return ERR_PTR(-ENOMEM);
1983
1984 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1985 VMALLOC_START, VMALLOC_END,
869176a0
BH
1986 node, gfp_mask,
1987 VMAP_RAM|VMAP_BLOCK);
ddf9c6d4 1988 if (IS_ERR(va)) {
db64fe02 1989 kfree(vb);
e7d86340 1990 return ERR_CAST(va);
db64fe02
NP
1991 }
1992
cf725ce2 1993 vaddr = vmap_block_vaddr(va->va_start, 0);
db64fe02
NP
1994 spin_lock_init(&vb->lock);
1995 vb->va = va;
cf725ce2
RP
1996 /* At least something should be left free */
1997 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
d76f9954 1998 bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
cf725ce2 1999 vb->free = VMAP_BBMAP_BITS - (1UL << order);
db64fe02 2000 vb->dirty = 0;
7d61bfe8
RP
2001 vb->dirty_min = VMAP_BBMAP_BITS;
2002 vb->dirty_max = 0;
d76f9954 2003 bitmap_set(vb->used_map, 0, (1UL << order));
db64fe02 2004 INIT_LIST_HEAD(&vb->free_list);
db64fe02
NP
2005
2006 vb_idx = addr_to_vb_idx(va->va_start);
0f14599c
MWO
2007 err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
2008 if (err) {
2009 kfree(vb);
2010 free_vmap_area(va);
2011 return ERR_PTR(err);
2012 }
db64fe02 2013
3f804920 2014 vbq = raw_cpu_ptr(&vmap_block_queue);
db64fe02 2015 spin_lock(&vbq->lock);
68ac546f 2016 list_add_tail_rcu(&vb->free_list, &vbq->free);
db64fe02 2017 spin_unlock(&vbq->lock);
db64fe02 2018
cf725ce2 2019 return vaddr;
db64fe02
NP
2020}
2021
db64fe02
NP
2022static void free_vmap_block(struct vmap_block *vb)
2023{
2024 struct vmap_block *tmp;
db64fe02 2025
0f14599c 2026 tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
db64fe02
NP
2027 BUG_ON(tmp != vb);
2028
edd89818
URS
2029 spin_lock(&vmap_area_lock);
2030 unlink_va(vb->va, &vmap_area_root);
2031 spin_unlock(&vmap_area_lock);
2032
64141da5 2033 free_vmap_area_noflush(vb->va);
22a3c7d1 2034 kfree_rcu(vb, rcu_head);
db64fe02
NP
2035}
2036
02b709df
NP
2037static void purge_fragmented_blocks(int cpu)
2038{
2039 LIST_HEAD(purge);
2040 struct vmap_block *vb;
2041 struct vmap_block *n_vb;
2042 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2043
2044 rcu_read_lock();
2045 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2046
2047 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
2048 continue;
2049
2050 spin_lock(&vb->lock);
2051 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
2052 vb->free = 0; /* prevent further allocs after releasing lock */
2053 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
7d61bfe8
RP
2054 vb->dirty_min = 0;
2055 vb->dirty_max = VMAP_BBMAP_BITS;
02b709df
NP
2056 spin_lock(&vbq->lock);
2057 list_del_rcu(&vb->free_list);
2058 spin_unlock(&vbq->lock);
2059 spin_unlock(&vb->lock);
2060 list_add_tail(&vb->purge, &purge);
2061 } else
2062 spin_unlock(&vb->lock);
2063 }
2064 rcu_read_unlock();
2065
2066 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
2067 list_del(&vb->purge);
2068 free_vmap_block(vb);
2069 }
2070}
2071
02b709df
NP
2072static void purge_fragmented_blocks_allcpus(void)
2073{
2074 int cpu;
2075
2076 for_each_possible_cpu(cpu)
2077 purge_fragmented_blocks(cpu);
2078}
2079
db64fe02
NP
2080static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2081{
2082 struct vmap_block_queue *vbq;
2083 struct vmap_block *vb;
cf725ce2 2084 void *vaddr = NULL;
db64fe02
NP
2085 unsigned int order;
2086
891c49ab 2087 BUG_ON(offset_in_page(size));
db64fe02 2088 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
aa91c4d8
JK
2089 if (WARN_ON(size == 0)) {
2090 /*
2091 * Allocating 0 bytes isn't what caller wants since
2092 * get_order(0) returns funny result. Just warn and terminate
2093 * early.
2094 */
2095 return NULL;
2096 }
db64fe02
NP
2097 order = get_order(size);
2098
db64fe02 2099 rcu_read_lock();
3f804920 2100 vbq = raw_cpu_ptr(&vmap_block_queue);
db64fe02 2101 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
cf725ce2 2102 unsigned long pages_off;
db64fe02
NP
2103
2104 spin_lock(&vb->lock);
cf725ce2
RP
2105 if (vb->free < (1UL << order)) {
2106 spin_unlock(&vb->lock);
2107 continue;
2108 }
02b709df 2109
cf725ce2
RP
2110 pages_off = VMAP_BBMAP_BITS - vb->free;
2111 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
02b709df 2112 vb->free -= 1UL << order;
d76f9954 2113 bitmap_set(vb->used_map, pages_off, (1UL << order));
02b709df
NP
2114 if (vb->free == 0) {
2115 spin_lock(&vbq->lock);
2116 list_del_rcu(&vb->free_list);
2117 spin_unlock(&vbq->lock);
2118 }
cf725ce2 2119
02b709df
NP
2120 spin_unlock(&vb->lock);
2121 break;
db64fe02 2122 }
02b709df 2123
db64fe02
NP
2124 rcu_read_unlock();
2125
cf725ce2
RP
2126 /* Allocate new block if nothing was found */
2127 if (!vaddr)
2128 vaddr = new_vmap_block(order, gfp_mask);
db64fe02 2129
cf725ce2 2130 return vaddr;
db64fe02
NP
2131}
2132
78a0e8c4 2133static void vb_free(unsigned long addr, unsigned long size)
db64fe02
NP
2134{
2135 unsigned long offset;
db64fe02
NP
2136 unsigned int order;
2137 struct vmap_block *vb;
2138
891c49ab 2139 BUG_ON(offset_in_page(size));
db64fe02 2140 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
b29acbdc 2141
78a0e8c4 2142 flush_cache_vunmap(addr, addr + size);
b29acbdc 2143
db64fe02 2144 order = get_order(size);
78a0e8c4 2145 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
0f14599c 2146 vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
d76f9954
BH
2147 spin_lock(&vb->lock);
2148 bitmap_clear(vb->used_map, offset, (1UL << order));
2149 spin_unlock(&vb->lock);
db64fe02 2150
4ad0ae8c 2151 vunmap_range_noflush(addr, addr + size);
64141da5 2152
8e57f8ac 2153 if (debug_pagealloc_enabled_static())
78a0e8c4 2154 flush_tlb_kernel_range(addr, addr + size);
82a2e924 2155
db64fe02 2156 spin_lock(&vb->lock);
7d61bfe8
RP
2157
2158 /* Expand dirty range */
2159 vb->dirty_min = min(vb->dirty_min, offset);
2160 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
d086817d 2161
db64fe02
NP
2162 vb->dirty += 1UL << order;
2163 if (vb->dirty == VMAP_BBMAP_BITS) {
de560423 2164 BUG_ON(vb->free);
db64fe02
NP
2165 spin_unlock(&vb->lock);
2166 free_vmap_block(vb);
2167 } else
2168 spin_unlock(&vb->lock);
2169}
2170
868b104d 2171static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
db64fe02 2172{
db64fe02 2173 int cpu;
db64fe02 2174
9b463334
JF
2175 if (unlikely(!vmap_initialized))
2176 return;
2177
5803ed29
CH
2178 might_sleep();
2179
db64fe02
NP
2180 for_each_possible_cpu(cpu) {
2181 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2182 struct vmap_block *vb;
2183
2184 rcu_read_lock();
2185 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
db64fe02 2186 spin_lock(&vb->lock);
ad216c03 2187 if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
7d61bfe8 2188 unsigned long va_start = vb->va->va_start;
db64fe02 2189 unsigned long s, e;
b136be5e 2190
7d61bfe8
RP
2191 s = va_start + (vb->dirty_min << PAGE_SHIFT);
2192 e = va_start + (vb->dirty_max << PAGE_SHIFT);
db64fe02 2193
7d61bfe8
RP
2194 start = min(s, start);
2195 end = max(e, end);
db64fe02 2196
7d61bfe8 2197 flush = 1;
db64fe02
NP
2198 }
2199 spin_unlock(&vb->lock);
2200 }
2201 rcu_read_unlock();
2202 }
2203
f9e09977 2204 mutex_lock(&vmap_purge_lock);
0574ecd1
CH
2205 purge_fragmented_blocks_allcpus();
2206 if (!__purge_vmap_area_lazy(start, end) && flush)
2207 flush_tlb_kernel_range(start, end);
f9e09977 2208 mutex_unlock(&vmap_purge_lock);
db64fe02 2209}
868b104d
RE
2210
2211/**
2212 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2213 *
2214 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2215 * to amortize TLB flushing overheads. What this means is that any page you
2216 * have now, may, in a former life, have been mapped into kernel virtual
2217 * address by the vmap layer and so there might be some CPUs with TLB entries
2218 * still referencing that page (additional to the regular 1:1 kernel mapping).
2219 *
2220 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2221 * be sure that none of the pages we have control over will have any aliases
2222 * from the vmap layer.
2223 */
2224void vm_unmap_aliases(void)
2225{
2226 unsigned long start = ULONG_MAX, end = 0;
2227 int flush = 0;
2228
2229 _vm_unmap_aliases(start, end, flush);
2230}
db64fe02
NP
2231EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2232
2233/**
2234 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2235 * @mem: the pointer returned by vm_map_ram
2236 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2237 */
2238void vm_unmap_ram(const void *mem, unsigned int count)
2239{
65ee03c4 2240 unsigned long size = (unsigned long)count << PAGE_SHIFT;
4aff1dc4 2241 unsigned long addr = (unsigned long)kasan_reset_tag(mem);
9c3acf60 2242 struct vmap_area *va;
db64fe02 2243
5803ed29 2244 might_sleep();
db64fe02
NP
2245 BUG_ON(!addr);
2246 BUG_ON(addr < VMALLOC_START);
2247 BUG_ON(addr > VMALLOC_END);
a1c0b1a0 2248 BUG_ON(!PAGE_ALIGNED(addr));
db64fe02 2249
d98c9e83
AR
2250 kasan_poison_vmalloc(mem, size);
2251
9c3acf60 2252 if (likely(count <= VMAP_MAX_ALLOC)) {
05e3ff95 2253 debug_check_no_locks_freed(mem, size);
78a0e8c4 2254 vb_free(addr, size);
9c3acf60
CH
2255 return;
2256 }
2257
edd89818 2258 va = find_unlink_vmap_area(addr);
14687619
URS
2259 if (WARN_ON_ONCE(!va))
2260 return;
2261
05e3ff95
CP
2262 debug_check_no_locks_freed((void *)va->va_start,
2263 (va->va_end - va->va_start));
9c3acf60 2264 free_unmap_vmap_area(va);
db64fe02
NP
2265}
2266EXPORT_SYMBOL(vm_unmap_ram);
2267
2268/**
2269 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2270 * @pages: an array of pointers to the pages to be mapped
2271 * @count: number of pages
2272 * @node: prefer to allocate data structures on this node
e99c97ad 2273 *
36437638
GK
2274 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2275 * faster than vmap so it's good. But if you mix long-life and short-life
2276 * objects with vm_map_ram(), it could consume lots of address space through
2277 * fragmentation (especially on a 32bit machine). You could see failures in
2278 * the end. Please use this function for short-lived objects.
2279 *
e99c97ad 2280 * Returns: a pointer to the address that has been mapped, or %NULL on failure
db64fe02 2281 */
d4efd79a 2282void *vm_map_ram(struct page **pages, unsigned int count, int node)
db64fe02 2283{
65ee03c4 2284 unsigned long size = (unsigned long)count << PAGE_SHIFT;
db64fe02
NP
2285 unsigned long addr;
2286 void *mem;
2287
2288 if (likely(count <= VMAP_MAX_ALLOC)) {
2289 mem = vb_alloc(size, GFP_KERNEL);
2290 if (IS_ERR(mem))
2291 return NULL;
2292 addr = (unsigned long)mem;
2293 } else {
2294 struct vmap_area *va;
2295 va = alloc_vmap_area(size, PAGE_SIZE,
869176a0
BH
2296 VMALLOC_START, VMALLOC_END,
2297 node, GFP_KERNEL, VMAP_RAM);
db64fe02
NP
2298 if (IS_ERR(va))
2299 return NULL;
2300
2301 addr = va->va_start;
2302 mem = (void *)addr;
2303 }
d98c9e83 2304
b67177ec
NP
2305 if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2306 pages, PAGE_SHIFT) < 0) {
db64fe02
NP
2307 vm_unmap_ram(mem, count);
2308 return NULL;
2309 }
b67177ec 2310
23689e91
AK
2311 /*
2312 * Mark the pages as accessible, now that they are mapped.
2313 * With hardware tag-based KASAN, marking is skipped for
2314 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2315 */
f6e39794 2316 mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
19f1c3ac 2317
db64fe02
NP
2318 return mem;
2319}
2320EXPORT_SYMBOL(vm_map_ram);
2321
4341fa45 2322static struct vm_struct *vmlist __initdata;
92eac168 2323
121e6f32
NP
2324static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2325{
2326#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2327 return vm->page_order;
2328#else
2329 return 0;
2330#endif
2331}
2332
2333static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2334{
2335#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2336 vm->page_order = order;
2337#else
2338 BUG_ON(order != 0);
2339#endif
2340}
2341
be9b7335
NP
2342/**
2343 * vm_area_add_early - add vmap area early during boot
2344 * @vm: vm_struct to add
2345 *
2346 * This function is used to add fixed kernel vm area to vmlist before
2347 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
2348 * should contain proper values and the other fields should be zero.
2349 *
2350 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2351 */
2352void __init vm_area_add_early(struct vm_struct *vm)
2353{
2354 struct vm_struct *tmp, **p;
2355
2356 BUG_ON(vmap_initialized);
2357 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2358 if (tmp->addr >= vm->addr) {
2359 BUG_ON(tmp->addr < vm->addr + vm->size);
2360 break;
2361 } else
2362 BUG_ON(tmp->addr + tmp->size > vm->addr);
2363 }
2364 vm->next = *p;
2365 *p = vm;
2366}
2367
f0aa6617
TH
2368/**
2369 * vm_area_register_early - register vmap area early during boot
2370 * @vm: vm_struct to register
c0c0a293 2371 * @align: requested alignment
f0aa6617
TH
2372 *
2373 * This function is used to register kernel vm area before
2374 * vmalloc_init() is called. @vm->size and @vm->flags should contain
2375 * proper values on entry and other fields should be zero. On return,
2376 * vm->addr contains the allocated address.
2377 *
2378 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2379 */
c0c0a293 2380void __init vm_area_register_early(struct vm_struct *vm, size_t align)
f0aa6617 2381{
0eb68437
KW
2382 unsigned long addr = ALIGN(VMALLOC_START, align);
2383 struct vm_struct *cur, **p;
c0c0a293 2384
0eb68437 2385 BUG_ON(vmap_initialized);
f0aa6617 2386
0eb68437
KW
2387 for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
2388 if ((unsigned long)cur->addr - addr >= vm->size)
2389 break;
2390 addr = ALIGN((unsigned long)cur->addr + cur->size, align);
2391 }
f0aa6617 2392
0eb68437
KW
2393 BUG_ON(addr > VMALLOC_END - vm->size);
2394 vm->addr = (void *)addr;
2395 vm->next = *p;
2396 *p = vm;
3252b1d8 2397 kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
f0aa6617
TH
2398}
2399
68ad4a33
URS
2400static void vmap_init_free_space(void)
2401{
2402 unsigned long vmap_start = 1;
2403 const unsigned long vmap_end = ULONG_MAX;
2404 struct vmap_area *busy, *free;
2405
2406 /*
2407 * B F B B B F
2408 * -|-----|.....|-----|-----|-----|.....|-
2409 * | The KVA space |
2410 * |<--------------------------------->|
2411 */
2412 list_for_each_entry(busy, &vmap_area_list, list) {
2413 if (busy->va_start - vmap_start > 0) {
2414 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2415 if (!WARN_ON_ONCE(!free)) {
2416 free->va_start = vmap_start;
2417 free->va_end = busy->va_start;
2418
2419 insert_vmap_area_augment(free, NULL,
2420 &free_vmap_area_root,
2421 &free_vmap_area_list);
2422 }
2423 }
2424
2425 vmap_start = busy->va_end;
2426 }
2427
2428 if (vmap_end - vmap_start > 0) {
2429 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2430 if (!WARN_ON_ONCE(!free)) {
2431 free->va_start = vmap_start;
2432 free->va_end = vmap_end;
2433
2434 insert_vmap_area_augment(free, NULL,
2435 &free_vmap_area_root,
2436 &free_vmap_area_list);
2437 }
2438 }
2439}
2440
e36176be
URS
2441static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2442 struct vmap_area *va, unsigned long flags, const void *caller)
cf88c790 2443{
cf88c790
TH
2444 vm->flags = flags;
2445 vm->addr = (void *)va->va_start;
2446 vm->size = va->va_end - va->va_start;
2447 vm->caller = caller;
db1aecaf 2448 va->vm = vm;
e36176be
URS
2449}
2450
2451static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2452 unsigned long flags, const void *caller)
2453{
2454 spin_lock(&vmap_area_lock);
2455 setup_vmalloc_vm_locked(vm, va, flags, caller);
c69480ad 2456 spin_unlock(&vmap_area_lock);
f5252e00 2457}
cf88c790 2458
20fc02b4 2459static void clear_vm_uninitialized_flag(struct vm_struct *vm)
f5252e00 2460{
d4033afd 2461 /*
20fc02b4 2462 * Before removing VM_UNINITIALIZED,
d4033afd
JK
2463 * we should make sure that vm has proper values.
2464 * Pair with smp_rmb() in show_numa_info().
2465 */
2466 smp_wmb();
20fc02b4 2467 vm->flags &= ~VM_UNINITIALIZED;
cf88c790
TH
2468}
2469
db64fe02 2470static struct vm_struct *__get_vm_area_node(unsigned long size,
7ca3027b
DA
2471 unsigned long align, unsigned long shift, unsigned long flags,
2472 unsigned long start, unsigned long end, int node,
2473 gfp_t gfp_mask, const void *caller)
db64fe02 2474{
0006526d 2475 struct vmap_area *va;
db64fe02 2476 struct vm_struct *area;
d98c9e83 2477 unsigned long requested_size = size;
1da177e4 2478
52fd24ca 2479 BUG_ON(in_interrupt());
7ca3027b 2480 size = ALIGN(size, 1ul << shift);
31be8309
OH
2481 if (unlikely(!size))
2482 return NULL;
1da177e4 2483
252e5c6e 2484 if (flags & VM_IOREMAP)
2485 align = 1ul << clamp_t(int, get_count_order_long(size),
2486 PAGE_SHIFT, IOREMAP_MAX_ORDER);
2487
cf88c790 2488 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1da177e4
LT
2489 if (unlikely(!area))
2490 return NULL;
2491
71394fe5
AR
2492 if (!(flags & VM_NO_GUARD))
2493 size += PAGE_SIZE;
1da177e4 2494
869176a0 2495 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
db64fe02
NP
2496 if (IS_ERR(va)) {
2497 kfree(area);
2498 return NULL;
1da177e4 2499 }
1da177e4 2500
d98c9e83 2501 setup_vmalloc_vm(area, va, flags, caller);
3c5c3cfb 2502
19f1c3ac
AK
2503 /*
2504 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
2505 * best-effort approach, as they can be mapped outside of vmalloc code.
2506 * For VM_ALLOC mappings, the pages are marked as accessible after
2507 * getting mapped in __vmalloc_node_range().
23689e91
AK
2508 * With hardware tag-based KASAN, marking is skipped for
2509 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
19f1c3ac
AK
2510 */
2511 if (!(flags & VM_ALLOC))
23689e91 2512 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
f6e39794 2513 KASAN_VMALLOC_PROT_NORMAL);
1d96320f 2514
1da177e4 2515 return area;
1da177e4
LT
2516}
2517
c2968612
BH
2518struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2519 unsigned long start, unsigned long end,
5e6cafc8 2520 const void *caller)
c2968612 2521{
7ca3027b
DA
2522 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
2523 NUMA_NO_NODE, GFP_KERNEL, caller);
c2968612
BH
2524}
2525
1da177e4 2526/**
92eac168
MR
2527 * get_vm_area - reserve a contiguous kernel virtual area
2528 * @size: size of the area
2529 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1da177e4 2530 *
92eac168
MR
2531 * Search an area of @size in the kernel virtual mapping area,
2532 * and reserved it for out purposes. Returns the area descriptor
2533 * on success or %NULL on failure.
a862f68a
MR
2534 *
2535 * Return: the area descriptor on success or %NULL on failure.
1da177e4
LT
2536 */
2537struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2538{
7ca3027b
DA
2539 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2540 VMALLOC_START, VMALLOC_END,
00ef2d2f
DR
2541 NUMA_NO_NODE, GFP_KERNEL,
2542 __builtin_return_address(0));
23016969
CL
2543}
2544
2545struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
5e6cafc8 2546 const void *caller)
23016969 2547{
7ca3027b
DA
2548 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2549 VMALLOC_START, VMALLOC_END,
00ef2d2f 2550 NUMA_NO_NODE, GFP_KERNEL, caller);
1da177e4
LT
2551}
2552
e9da6e99 2553/**
92eac168
MR
2554 * find_vm_area - find a continuous kernel virtual area
2555 * @addr: base address
e9da6e99 2556 *
92eac168
MR
2557 * Search for the kernel VM area starting at @addr, and return it.
2558 * It is up to the caller to do all required locking to keep the returned
2559 * pointer valid.
a862f68a 2560 *
74640617 2561 * Return: the area descriptor on success or %NULL on failure.
e9da6e99
MS
2562 */
2563struct vm_struct *find_vm_area(const void *addr)
83342314 2564{
db64fe02 2565 struct vmap_area *va;
83342314 2566
db64fe02 2567 va = find_vmap_area((unsigned long)addr);
688fcbfc
PL
2568 if (!va)
2569 return NULL;
1da177e4 2570
688fcbfc 2571 return va->vm;
1da177e4
LT
2572}
2573
7856dfeb 2574/**
92eac168
MR
2575 * remove_vm_area - find and remove a continuous kernel virtual area
2576 * @addr: base address
7856dfeb 2577 *
92eac168
MR
2578 * Search for the kernel VM area starting at @addr, and remove it.
2579 * This function returns the found VM area, but using it is NOT safe
2580 * on SMP machines, except for its size or flags.
a862f68a 2581 *
74640617 2582 * Return: the area descriptor on success or %NULL on failure.
7856dfeb 2583 */
b3bdda02 2584struct vm_struct *remove_vm_area(const void *addr)
7856dfeb 2585{
db64fe02 2586 struct vmap_area *va;
75c59ce7 2587 struct vm_struct *vm;
db64fe02 2588
5803ed29
CH
2589 might_sleep();
2590
17d3ef43
CH
2591 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2592 addr))
2593 return NULL;
c69480ad 2594
75c59ce7
CH
2595 va = find_unlink_vmap_area((unsigned long)addr);
2596 if (!va || !va->vm)
2597 return NULL;
2598 vm = va->vm;
dd32c279 2599
17d3ef43
CH
2600 debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
2601 debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
75c59ce7 2602 kasan_free_module_shadow(vm);
17d3ef43 2603 kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
dd3b8353 2604
75c59ce7
CH
2605 free_unmap_vmap_area(va);
2606 return vm;
7856dfeb
AK
2607}
2608
868b104d
RE
2609static inline void set_area_direct_map(const struct vm_struct *area,
2610 int (*set_direct_map)(struct page *page))
2611{
2612 int i;
2613
121e6f32 2614 /* HUGE_VMALLOC passes small pages to set_direct_map */
868b104d
RE
2615 for (i = 0; i < area->nr_pages; i++)
2616 if (page_address(area->pages[i]))
2617 set_direct_map(area->pages[i]);
2618}
2619
9e5fa0ae
CH
2620/*
2621 * Flush the vm mapping and reset the direct map.
2622 */
2623static void vm_reset_perms(struct vm_struct *area)
868b104d 2624{
868b104d 2625 unsigned long start = ULONG_MAX, end = 0;
121e6f32 2626 unsigned int page_order = vm_area_page_order(area);
31e67340 2627 int flush_dmap = 0;
868b104d
RE
2628 int i;
2629
868b104d 2630 /*
9e5fa0ae 2631 * Find the start and end range of the direct mappings to make sure that
868b104d
RE
2632 * the vm_unmap_aliases() flush includes the direct map.
2633 */
121e6f32 2634 for (i = 0; i < area->nr_pages; i += 1U << page_order) {
8e41f872 2635 unsigned long addr = (unsigned long)page_address(area->pages[i]);
9e5fa0ae 2636
8e41f872 2637 if (addr) {
121e6f32
NP
2638 unsigned long page_size;
2639
2640 page_size = PAGE_SIZE << page_order;
868b104d 2641 start = min(addr, start);
121e6f32 2642 end = max(addr + page_size, end);
31e67340 2643 flush_dmap = 1;
868b104d
RE
2644 }
2645 }
2646
2647 /*
2648 * Set direct map to something invalid so that it won't be cached if
2649 * there are any accesses after the TLB flush, then flush the TLB and
2650 * reset the direct map permissions to the default.
2651 */
2652 set_area_direct_map(area, set_direct_map_invalid_noflush);
31e67340 2653 _vm_unmap_aliases(start, end, flush_dmap);
868b104d
RE
2654 set_area_direct_map(area, set_direct_map_default_noflush);
2655}
2656
208162f4 2657static void delayed_vfree_work(struct work_struct *w)
1da177e4 2658{
208162f4
CH
2659 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
2660 struct llist_node *t, *llnode;
bf22e37a 2661
208162f4 2662 llist_for_each_safe(llnode, t, llist_del_all(&p->list))
5d3d31d6 2663 vfree(llnode);
bf22e37a
AR
2664}
2665
2666/**
92eac168
MR
2667 * vfree_atomic - release memory allocated by vmalloc()
2668 * @addr: memory base address
bf22e37a 2669 *
92eac168
MR
2670 * This one is just like vfree() but can be called in any atomic context
2671 * except NMIs.
bf22e37a
AR
2672 */
2673void vfree_atomic(const void *addr)
2674{
01e2e839 2675 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
bf22e37a 2676
01e2e839 2677 BUG_ON(in_nmi());
bf22e37a
AR
2678 kmemleak_free(addr);
2679
01e2e839
CH
2680 /*
2681 * Use raw_cpu_ptr() because this can be called from preemptible
2682 * context. Preemption is absolutely fine here, because the llist_add()
2683 * implementation is lockless, so it works even if we are adding to
2684 * another cpu's list. schedule_work() should be fine with this too.
2685 */
2686 if (addr && llist_add((struct llist_node *)addr, &p->list))
2687 schedule_work(&p->wq);
c67dc624
RP
2688}
2689
1da177e4 2690/**
fa307474
MWO
2691 * vfree - Release memory allocated by vmalloc()
2692 * @addr: Memory base address
1da177e4 2693 *
fa307474
MWO
2694 * Free the virtually continuous memory area starting at @addr, as obtained
2695 * from one of the vmalloc() family of APIs. This will usually also free the
2696 * physical memory underlying the virtual allocation, but that memory is
2697 * reference counted, so it will not be freed until the last user goes away.
1da177e4 2698 *
fa307474 2699 * If @addr is NULL, no operation is performed.
c9fcee51 2700 *
fa307474 2701 * Context:
92eac168 2702 * May sleep if called *not* from interrupt context.
fa307474
MWO
2703 * Must not be called in NMI context (strictly speaking, it could be
2704 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
f0953a1b 2705 * conventions for vfree() arch-dependent would be a really bad idea).
1da177e4 2706 */
b3bdda02 2707void vfree(const void *addr)
1da177e4 2708{
79311c1f
CH
2709 struct vm_struct *vm;
2710 int i;
89219d37 2711
01e2e839
CH
2712 if (unlikely(in_interrupt())) {
2713 vfree_atomic(addr);
2714 return;
2715 }
89219d37 2716
01e2e839 2717 BUG_ON(in_nmi());
89219d37 2718 kmemleak_free(addr);
01e2e839 2719 might_sleep();
a8dda165 2720
32fcfd40
AV
2721 if (!addr)
2722 return;
c67dc624 2723
79311c1f
CH
2724 vm = remove_vm_area(addr);
2725 if (unlikely(!vm)) {
2726 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2727 addr);
2728 return;
2729 }
2730
9e5fa0ae
CH
2731 if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
2732 vm_reset_perms(vm);
79311c1f
CH
2733 for (i = 0; i < vm->nr_pages; i++) {
2734 struct page *page = vm->pages[i];
2735
2736 BUG_ON(!page);
2737 mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
2738 /*
2739 * High-order allocs for huge vmallocs are split, so
2740 * can be freed as an array of order-0 allocations
2741 */
2742 __free_pages(page, 0);
2743 cond_resched();
2744 }
2745 atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
2746 kvfree(vm->pages);
2747 kfree(vm);
1da177e4 2748}
1da177e4
LT
2749EXPORT_SYMBOL(vfree);
2750
2751/**
92eac168
MR
2752 * vunmap - release virtual mapping obtained by vmap()
2753 * @addr: memory base address
1da177e4 2754 *
92eac168
MR
2755 * Free the virtually contiguous memory area starting at @addr,
2756 * which was created from the page array passed to vmap().
1da177e4 2757 *
92eac168 2758 * Must not be called in interrupt context.
1da177e4 2759 */
b3bdda02 2760void vunmap(const void *addr)
1da177e4 2761{
79311c1f
CH
2762 struct vm_struct *vm;
2763
1da177e4 2764 BUG_ON(in_interrupt());
34754b69 2765 might_sleep();
79311c1f
CH
2766
2767 if (!addr)
2768 return;
2769 vm = remove_vm_area(addr);
2770 if (unlikely(!vm)) {
2771 WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
2772 addr);
2773 return;
2774 }
2775 kfree(vm);
1da177e4 2776}
1da177e4
LT
2777EXPORT_SYMBOL(vunmap);
2778
2779/**
92eac168
MR
2780 * vmap - map an array of pages into virtually contiguous space
2781 * @pages: array of page pointers
2782 * @count: number of pages to map
2783 * @flags: vm_area->flags
2784 * @prot: page protection for the mapping
2785 *
b944afc9
CH
2786 * Maps @count pages from @pages into contiguous kernel virtual space.
2787 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2788 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2789 * are transferred from the caller to vmap(), and will be freed / dropped when
2790 * vfree() is called on the return value.
a862f68a
MR
2791 *
2792 * Return: the address of the area or %NULL on failure
1da177e4
LT
2793 */
2794void *vmap(struct page **pages, unsigned int count,
92eac168 2795 unsigned long flags, pgprot_t prot)
1da177e4
LT
2796{
2797 struct vm_struct *area;
b67177ec 2798 unsigned long addr;
65ee03c4 2799 unsigned long size; /* In bytes */
1da177e4 2800
34754b69
PZ
2801 might_sleep();
2802
37f3605e
CH
2803 if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
2804 return NULL;
2805
bd1a8fb2
PZ
2806 /*
2807 * Your top guard is someone else's bottom guard. Not having a top
2808 * guard compromises someone else's mappings too.
2809 */
2810 if (WARN_ON_ONCE(flags & VM_NO_GUARD))
2811 flags &= ~VM_NO_GUARD;
2812
ca79b0c2 2813 if (count > totalram_pages())
1da177e4
LT
2814 return NULL;
2815
65ee03c4
GJM
2816 size = (unsigned long)count << PAGE_SHIFT;
2817 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
1da177e4
LT
2818 if (!area)
2819 return NULL;
23016969 2820
b67177ec
NP
2821 addr = (unsigned long)area->addr;
2822 if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2823 pages, PAGE_SHIFT) < 0) {
1da177e4
LT
2824 vunmap(area->addr);
2825 return NULL;
2826 }
2827
c22ee528 2828 if (flags & VM_MAP_PUT_PAGES) {
b944afc9 2829 area->pages = pages;
c22ee528
ML
2830 area->nr_pages = count;
2831 }
1da177e4
LT
2832 return area->addr;
2833}
1da177e4
LT
2834EXPORT_SYMBOL(vmap);
2835
3e9a9e25
CH
2836#ifdef CONFIG_VMAP_PFN
2837struct vmap_pfn_data {
2838 unsigned long *pfns;
2839 pgprot_t prot;
2840 unsigned int idx;
2841};
2842
2843static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
2844{
2845 struct vmap_pfn_data *data = private;
2846
2847 if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
2848 return -EINVAL;
2849 *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
2850 return 0;
2851}
2852
2853/**
2854 * vmap_pfn - map an array of PFNs into virtually contiguous space
2855 * @pfns: array of PFNs
2856 * @count: number of pages to map
2857 * @prot: page protection for the mapping
2858 *
2859 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
2860 * the start address of the mapping.
2861 */
2862void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
2863{
2864 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
2865 struct vm_struct *area;
2866
2867 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
2868 __builtin_return_address(0));
2869 if (!area)
2870 return NULL;
2871 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2872 count * PAGE_SIZE, vmap_pfn_apply, &data)) {
2873 free_vm_area(area);
2874 return NULL;
2875 }
2876 return area->addr;
2877}
2878EXPORT_SYMBOL_GPL(vmap_pfn);
2879#endif /* CONFIG_VMAP_PFN */
2880
12b9f873
UR
2881static inline unsigned int
2882vm_area_alloc_pages(gfp_t gfp, int nid,
343ab817 2883 unsigned int order, unsigned int nr_pages, struct page **pages)
12b9f873
UR
2884{
2885 unsigned int nr_allocated = 0;
e9c3cda4
MH
2886 gfp_t alloc_gfp = gfp;
2887 bool nofail = false;
ffb29b1c
CW
2888 struct page *page;
2889 int i;
12b9f873
UR
2890
2891 /*
2892 * For order-0 pages we make use of bulk allocator, if
2893 * the page array is partly or not at all populated due
2894 * to fails, fallback to a single page allocator that is
2895 * more permissive.
2896 */
c00b6b96 2897 if (!order) {
e9c3cda4 2898 /* bulk allocator doesn't support nofail req. officially */
9376130c
MH
2899 gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
2900
343ab817
URS
2901 while (nr_allocated < nr_pages) {
2902 unsigned int nr, nr_pages_request;
2903
2904 /*
2905 * A maximum allowed request is hard-coded and is 100
2906 * pages per call. That is done in order to prevent a
2907 * long preemption off scenario in the bulk-allocator
2908 * so the range is [1:100].
2909 */
2910 nr_pages_request = min(100U, nr_pages - nr_allocated);
2911
c00b6b96
CW
2912 /* memory allocation should consider mempolicy, we can't
2913 * wrongly use nearest node when nid == NUMA_NO_NODE,
2914 * otherwise memory may be allocated in only one node,
98af39d5 2915 * but mempolicy wants to alloc memory by interleaving.
c00b6b96
CW
2916 */
2917 if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
9376130c 2918 nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
c00b6b96
CW
2919 nr_pages_request,
2920 pages + nr_allocated);
2921
2922 else
9376130c 2923 nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
c00b6b96
CW
2924 nr_pages_request,
2925 pages + nr_allocated);
343ab817
URS
2926
2927 nr_allocated += nr;
2928 cond_resched();
2929
2930 /*
2931 * If zero or pages were obtained partly,
2932 * fallback to a single page allocator.
2933 */
2934 if (nr != nr_pages_request)
2935 break;
2936 }
e9c3cda4
MH
2937 } else if (gfp & __GFP_NOFAIL) {
2938 /*
2939 * Higher order nofail allocations are really expensive and
2940 * potentially dangerous (pre-mature OOM, disruptive reclaim
2941 * and compaction etc.
2942 */
2943 alloc_gfp &= ~__GFP_NOFAIL;
2944 nofail = true;
3b8000ae 2945 }
12b9f873
UR
2946
2947 /* High-order pages or fallback path if "bulk" fails. */
ffb29b1c 2948 while (nr_allocated < nr_pages) {
dd544141
VA
2949 if (fatal_signal_pending(current))
2950 break;
2951
ffb29b1c 2952 if (nid == NUMA_NO_NODE)
e9c3cda4 2953 page = alloc_pages(alloc_gfp, order);
ffb29b1c 2954 else
e9c3cda4
MH
2955 page = alloc_pages_node(nid, alloc_gfp, order);
2956 if (unlikely(!page)) {
2957 if (!nofail)
2958 break;
2959
2960 /* fall back to the zero order allocations */
2961 alloc_gfp |= __GFP_NOFAIL;
2962 order = 0;
2963 continue;
2964 }
2965
3b8000ae
NP
2966 /*
2967 * Higher order allocations must be able to be treated as
2968 * indepdenent small pages by callers (as they can with
2969 * small-page vmallocs). Some drivers do their own refcounting
2970 * on vmalloc_to_page() pages, some use page->mapping,
2971 * page->lru, etc.
2972 */
2973 if (order)
2974 split_page(page, order);
12b9f873
UR
2975
2976 /*
2977 * Careful, we allocate and map page-order pages, but
2978 * tracking is done per PAGE_SIZE page so as to keep the
2979 * vm_struct APIs independent of the physical/mapped size.
2980 */
2981 for (i = 0; i < (1U << order); i++)
2982 pages[nr_allocated + i] = page + i;
2983
12e376a6 2984 cond_resched();
12b9f873
UR
2985 nr_allocated += 1U << order;
2986 }
2987
2988 return nr_allocated;
2989}
2990
e31d9eb5 2991static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
121e6f32
NP
2992 pgprot_t prot, unsigned int page_shift,
2993 int node)
1da177e4 2994{
930f036b 2995 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
9376130c 2996 bool nofail = gfp_mask & __GFP_NOFAIL;
121e6f32
NP
2997 unsigned long addr = (unsigned long)area->addr;
2998 unsigned long size = get_vm_area_size(area);
34fe6537 2999 unsigned long array_size;
121e6f32
NP
3000 unsigned int nr_small_pages = size >> PAGE_SHIFT;
3001 unsigned int page_order;
451769eb
MH
3002 unsigned int flags;
3003 int ret;
1da177e4 3004
121e6f32 3005 array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
80b1d8fd 3006
f255935b
CH
3007 if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
3008 gfp_mask |= __GFP_HIGHMEM;
1da177e4 3009
1da177e4 3010 /* Please note that the recursion is strictly bounded. */
8757d5fa 3011 if (array_size > PAGE_SIZE) {
5c1f4e69 3012 area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
f255935b 3013 area->caller);
286e1ea3 3014 } else {
5c1f4e69 3015 area->pages = kmalloc_node(array_size, nested_gfp, node);
286e1ea3 3016 }
7ea36242 3017
5c1f4e69 3018 if (!area->pages) {
c3d77172 3019 warn_alloc(gfp_mask, NULL,
f4bdfeaf
URS
3020 "vmalloc error: size %lu, failed to allocated page array size %lu",
3021 nr_small_pages * PAGE_SIZE, array_size);
cd61413b 3022 free_vm_area(area);
1da177e4
LT
3023 return NULL;
3024 }
1da177e4 3025
121e6f32 3026 set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
121e6f32 3027 page_order = vm_area_page_order(area);
bf53d6f8 3028
c3d77172
URS
3029 area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
3030 node, page_order, nr_small_pages, area->pages);
5c1f4e69 3031
97105f0a 3032 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
4e5aa1f4 3033 if (gfp_mask & __GFP_ACCOUNT) {
3b8000ae 3034 int i;
4e5aa1f4 3035
3b8000ae
NP
3036 for (i = 0; i < area->nr_pages; i++)
3037 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
4e5aa1f4 3038 }
1da177e4 3039
5c1f4e69
URS
3040 /*
3041 * If not enough pages were obtained to accomplish an
f41f036b 3042 * allocation request, free them via vfree() if any.
5c1f4e69
URS
3043 */
3044 if (area->nr_pages != nr_small_pages) {
c3d77172 3045 warn_alloc(gfp_mask, NULL,
f4bdfeaf 3046 "vmalloc error: size %lu, page order %u, failed to allocate pages",
5c1f4e69
URS
3047 area->nr_pages * PAGE_SIZE, page_order);
3048 goto fail;
3049 }
3050
451769eb
MH
3051 /*
3052 * page tables allocations ignore external gfp mask, enforce it
3053 * by the scope API
3054 */
3055 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3056 flags = memalloc_nofs_save();
3057 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3058 flags = memalloc_noio_save();
3059
9376130c
MH
3060 do {
3061 ret = vmap_pages_range(addr, addr + size, prot, area->pages,
451769eb 3062 page_shift);
9376130c
MH
3063 if (nofail && (ret < 0))
3064 schedule_timeout_uninterruptible(1);
3065 } while (nofail && (ret < 0));
451769eb
MH
3066
3067 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3068 memalloc_nofs_restore(flags);
3069 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3070 memalloc_noio_restore(flags);
3071
3072 if (ret < 0) {
c3d77172 3073 warn_alloc(gfp_mask, NULL,
f4bdfeaf
URS
3074 "vmalloc error: size %lu, failed to map pages",
3075 area->nr_pages * PAGE_SIZE);
1da177e4 3076 goto fail;
d70bec8c 3077 }
ed1f324c 3078
1da177e4
LT
3079 return area->addr;
3080
3081fail:
f41f036b 3082 vfree(area->addr);
1da177e4
LT
3083 return NULL;
3084}
3085
3086/**
92eac168
MR
3087 * __vmalloc_node_range - allocate virtually contiguous memory
3088 * @size: allocation size
3089 * @align: desired alignment
3090 * @start: vm area range start
3091 * @end: vm area range end
3092 * @gfp_mask: flags for the page level allocator
3093 * @prot: protection mask for the allocated pages
3094 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
3095 * @node: node to use for allocation or NUMA_NO_NODE
3096 * @caller: caller's return address
3097 *
3098 * Allocate enough pages to cover @size from the page level
b7d90e7a 3099 * allocator with @gfp_mask flags. Please note that the full set of gfp
30d3f011
MH
3100 * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
3101 * supported.
3102 * Zone modifiers are not supported. From the reclaim modifiers
3103 * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
3104 * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
3105 * __GFP_RETRY_MAYFAIL are not supported).
3106 *
3107 * __GFP_NOWARN can be used to suppress failures messages.
b7d90e7a
MH
3108 *
3109 * Map them into contiguous kernel virtual space, using a pagetable
3110 * protection of @prot.
a862f68a
MR
3111 *
3112 * Return: the address of the area or %NULL on failure
1da177e4 3113 */
d0a21265
DR
3114void *__vmalloc_node_range(unsigned long size, unsigned long align,
3115 unsigned long start, unsigned long end, gfp_t gfp_mask,
cb9e3c29
AR
3116 pgprot_t prot, unsigned long vm_flags, int node,
3117 const void *caller)
1da177e4
LT
3118{
3119 struct vm_struct *area;
19f1c3ac 3120 void *ret;
f6e39794 3121 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
89219d37 3122 unsigned long real_size = size;
121e6f32
NP
3123 unsigned long real_align = align;
3124 unsigned int shift = PAGE_SHIFT;
1da177e4 3125
d70bec8c
NP
3126 if (WARN_ON_ONCE(!size))
3127 return NULL;
3128
3129 if ((size >> PAGE_SHIFT) > totalram_pages()) {
3130 warn_alloc(gfp_mask, NULL,
f4bdfeaf
URS
3131 "vmalloc error: size %lu, exceeds total pages",
3132 real_size);
d70bec8c 3133 return NULL;
121e6f32
NP
3134 }
3135
559089e0 3136 if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
121e6f32 3137 unsigned long size_per_node;
1da177e4 3138
121e6f32
NP
3139 /*
3140 * Try huge pages. Only try for PAGE_KERNEL allocations,
3141 * others like modules don't yet expect huge pages in
3142 * their allocations due to apply_to_page_range not
3143 * supporting them.
3144 */
3145
3146 size_per_node = size;
3147 if (node == NUMA_NO_NODE)
3148 size_per_node /= num_online_nodes();
3382bbee 3149 if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
121e6f32 3150 shift = PMD_SHIFT;
3382bbee
CL
3151 else
3152 shift = arch_vmap_pte_supported_shift(size_per_node);
3153
3154 align = max(real_align, 1UL << shift);
3155 size = ALIGN(real_size, 1UL << shift);
121e6f32
NP
3156 }
3157
3158again:
7ca3027b
DA
3159 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
3160 VM_UNINITIALIZED | vm_flags, start, end, node,
3161 gfp_mask, caller);
d70bec8c 3162 if (!area) {
9376130c 3163 bool nofail = gfp_mask & __GFP_NOFAIL;
d70bec8c 3164 warn_alloc(gfp_mask, NULL,
9376130c
MH
3165 "vmalloc error: size %lu, vm_struct allocation failed%s",
3166 real_size, (nofail) ? ". Retrying." : "");
3167 if (nofail) {
3168 schedule_timeout_uninterruptible(1);
3169 goto again;
3170 }
de7d2b56 3171 goto fail;
d70bec8c 3172 }
1da177e4 3173
f6e39794
AK
3174 /*
3175 * Prepare arguments for __vmalloc_area_node() and
3176 * kasan_unpoison_vmalloc().
3177 */
3178 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3179 if (kasan_hw_tags_enabled()) {
3180 /*
3181 * Modify protection bits to allow tagging.
3182 * This must be done before mapping.
3183 */
3184 prot = arch_vmap_pgprot_tagged(prot);
01d92c7f 3185
f6e39794
AK
3186 /*
3187 * Skip page_alloc poisoning and zeroing for physical
3188 * pages backing VM_ALLOC mapping. Memory is instead
3189 * poisoned and zeroed by kasan_unpoison_vmalloc().
3190 */
0a54864f 3191 gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
f6e39794
AK
3192 }
3193
3194 /* Take note that the mapping is PAGE_KERNEL. */
3195 kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
23689e91
AK
3196 }
3197
01d92c7f 3198 /* Allocate physical pages and map them into vmalloc space. */
19f1c3ac
AK
3199 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3200 if (!ret)
121e6f32 3201 goto fail;
89219d37 3202
23689e91
AK
3203 /*
3204 * Mark the pages as accessible, now that they are mapped.
6c2f761d
AK
3205 * The condition for setting KASAN_VMALLOC_INIT should complement the
3206 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
3207 * to make sure that memory is initialized under the same conditions.
f6e39794
AK
3208 * Tag-based KASAN modes only assign tags to normal non-executable
3209 * allocations, see __kasan_unpoison_vmalloc().
23689e91 3210 */
f6e39794 3211 kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
6c2f761d
AK
3212 if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
3213 (gfp_mask & __GFP_SKIP_ZERO))
23689e91 3214 kasan_flags |= KASAN_VMALLOC_INIT;
f6e39794 3215 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
23689e91 3216 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
19f1c3ac 3217
f5252e00 3218 /*
20fc02b4
ZY
3219 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3220 * flag. It means that vm_struct is not fully initialized.
4341fa45 3221 * Now, it is fully initialized, so remove this flag here.
f5252e00 3222 */
20fc02b4 3223 clear_vm_uninitialized_flag(area);
f5252e00 3224
7ca3027b 3225 size = PAGE_ALIGN(size);
60115fa5
KW
3226 if (!(vm_flags & VM_DEFER_KMEMLEAK))
3227 kmemleak_vmalloc(area, size, gfp_mask);
89219d37 3228
19f1c3ac 3229 return area->addr;
de7d2b56
JP
3230
3231fail:
121e6f32
NP
3232 if (shift > PAGE_SHIFT) {
3233 shift = PAGE_SHIFT;
3234 align = real_align;
3235 size = real_size;
3236 goto again;
3237 }
3238
de7d2b56 3239 return NULL;
1da177e4
LT
3240}
3241
d0a21265 3242/**
92eac168
MR
3243 * __vmalloc_node - allocate virtually contiguous memory
3244 * @size: allocation size
3245 * @align: desired alignment
3246 * @gfp_mask: flags for the page level allocator
92eac168
MR
3247 * @node: node to use for allocation or NUMA_NO_NODE
3248 * @caller: caller's return address
a7c3e901 3249 *
f38fcb9c
CH
3250 * Allocate enough pages to cover @size from the page level allocator with
3251 * @gfp_mask flags. Map them into contiguous kernel virtual space.
a7c3e901 3252 *
92eac168
MR
3253 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3254 * and __GFP_NOFAIL are not supported
a7c3e901 3255 *
92eac168
MR
3256 * Any use of gfp flags outside of GFP_KERNEL should be consulted
3257 * with mm people.
a862f68a
MR
3258 *
3259 * Return: pointer to the allocated memory or %NULL on error
d0a21265 3260 */
2b905948 3261void *__vmalloc_node(unsigned long size, unsigned long align,
f38fcb9c 3262 gfp_t gfp_mask, int node, const void *caller)
d0a21265
DR
3263{
3264 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
f38fcb9c 3265 gfp_mask, PAGE_KERNEL, 0, node, caller);
d0a21265 3266}
c3f896dc
CH
3267/*
3268 * This is only for performance analysis of vmalloc and stress purpose.
3269 * It is required by vmalloc test module, therefore do not use it other
3270 * than that.
3271 */
3272#ifdef CONFIG_TEST_VMALLOC_MODULE
3273EXPORT_SYMBOL_GPL(__vmalloc_node);
3274#endif
d0a21265 3275
88dca4ca 3276void *__vmalloc(unsigned long size, gfp_t gfp_mask)
930fc45a 3277{
f38fcb9c 3278 return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
23016969 3279 __builtin_return_address(0));
930fc45a 3280}
1da177e4
LT
3281EXPORT_SYMBOL(__vmalloc);
3282
3283/**
92eac168
MR
3284 * vmalloc - allocate virtually contiguous memory
3285 * @size: allocation size
3286 *
3287 * Allocate enough pages to cover @size from the page level
3288 * allocator and map them into contiguous kernel virtual space.
1da177e4 3289 *
92eac168
MR
3290 * For tight control over page level allocator and protection flags
3291 * use __vmalloc() instead.
a862f68a
MR
3292 *
3293 * Return: pointer to the allocated memory or %NULL on error
1da177e4
LT
3294 */
3295void *vmalloc(unsigned long size)
3296{
4d39d728
CH
3297 return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3298 __builtin_return_address(0));
1da177e4 3299}
1da177e4
LT
3300EXPORT_SYMBOL(vmalloc);
3301
15a64f5a 3302/**
559089e0
SL
3303 * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
3304 * @size: allocation size
3305 * @gfp_mask: flags for the page level allocator
15a64f5a 3306 *
559089e0 3307 * Allocate enough pages to cover @size from the page level
15a64f5a 3308 * allocator and map them into contiguous kernel virtual space.
559089e0
SL
3309 * If @size is greater than or equal to PMD_SIZE, allow using
3310 * huge pages for the memory
15a64f5a
CI
3311 *
3312 * Return: pointer to the allocated memory or %NULL on error
3313 */
559089e0 3314void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
15a64f5a
CI
3315{
3316 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
559089e0 3317 gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
15a64f5a
CI
3318 NUMA_NO_NODE, __builtin_return_address(0));
3319}
559089e0 3320EXPORT_SYMBOL_GPL(vmalloc_huge);
15a64f5a 3321
e1ca7788 3322/**
92eac168
MR
3323 * vzalloc - allocate virtually contiguous memory with zero fill
3324 * @size: allocation size
3325 *
3326 * Allocate enough pages to cover @size from the page level
3327 * allocator and map them into contiguous kernel virtual space.
3328 * The memory allocated is set to zero.
3329 *
3330 * For tight control over page level allocator and protection flags
3331 * use __vmalloc() instead.
a862f68a
MR
3332 *
3333 * Return: pointer to the allocated memory or %NULL on error
e1ca7788
DY
3334 */
3335void *vzalloc(unsigned long size)
3336{
4d39d728
CH
3337 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3338 __builtin_return_address(0));
e1ca7788
DY
3339}
3340EXPORT_SYMBOL(vzalloc);
3341
83342314 3342/**
ead04089
REB
3343 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3344 * @size: allocation size
83342314 3345 *
ead04089
REB
3346 * The resulting memory area is zeroed so it can be mapped to userspace
3347 * without leaking data.
a862f68a
MR
3348 *
3349 * Return: pointer to the allocated memory or %NULL on error
83342314
NP
3350 */
3351void *vmalloc_user(unsigned long size)
3352{
bc84c535
RP
3353 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
3354 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3355 VM_USERMAP, NUMA_NO_NODE,
3356 __builtin_return_address(0));
83342314
NP
3357}
3358EXPORT_SYMBOL(vmalloc_user);
3359
930fc45a 3360/**
92eac168
MR
3361 * vmalloc_node - allocate memory on a specific node
3362 * @size: allocation size
3363 * @node: numa node
930fc45a 3364 *
92eac168
MR
3365 * Allocate enough pages to cover @size from the page level
3366 * allocator and map them into contiguous kernel virtual space.
930fc45a 3367 *
92eac168
MR
3368 * For tight control over page level allocator and protection flags
3369 * use __vmalloc() instead.
a862f68a
MR
3370 *
3371 * Return: pointer to the allocated memory or %NULL on error
930fc45a
CL
3372 */
3373void *vmalloc_node(unsigned long size, int node)
3374{
f38fcb9c
CH
3375 return __vmalloc_node(size, 1, GFP_KERNEL, node,
3376 __builtin_return_address(0));
930fc45a
CL
3377}
3378EXPORT_SYMBOL(vmalloc_node);
3379
e1ca7788
DY
3380/**
3381 * vzalloc_node - allocate memory on a specific node with zero fill
3382 * @size: allocation size
3383 * @node: numa node
3384 *
3385 * Allocate enough pages to cover @size from the page level
3386 * allocator and map them into contiguous kernel virtual space.
3387 * The memory allocated is set to zero.
3388 *
a862f68a 3389 * Return: pointer to the allocated memory or %NULL on error
e1ca7788
DY
3390 */
3391void *vzalloc_node(unsigned long size, int node)
3392{
4d39d728
CH
3393 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
3394 __builtin_return_address(0));
e1ca7788
DY
3395}
3396EXPORT_SYMBOL(vzalloc_node);
3397
0d08e0d3 3398#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
698d0831 3399#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
0d08e0d3 3400#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
698d0831 3401#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
0d08e0d3 3402#else
698d0831
MH
3403/*
3404 * 64b systems should always have either DMA or DMA32 zones. For others
3405 * GFP_DMA32 should do the right thing and use the normal zone.
3406 */
68d68ff6 3407#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
0d08e0d3
AK
3408#endif
3409
1da177e4 3410/**
92eac168
MR
3411 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3412 * @size: allocation size
1da177e4 3413 *
92eac168
MR
3414 * Allocate enough 32bit PA addressable pages to cover @size from the
3415 * page level allocator and map them into contiguous kernel virtual space.
a862f68a
MR
3416 *
3417 * Return: pointer to the allocated memory or %NULL on error
1da177e4
LT
3418 */
3419void *vmalloc_32(unsigned long size)
3420{
f38fcb9c
CH
3421 return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3422 __builtin_return_address(0));
1da177e4 3423}
1da177e4
LT
3424EXPORT_SYMBOL(vmalloc_32);
3425
83342314 3426/**
ead04089 3427 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
92eac168 3428 * @size: allocation size
ead04089
REB
3429 *
3430 * The resulting memory area is 32bit addressable and zeroed so it can be
3431 * mapped to userspace without leaking data.
a862f68a
MR
3432 *
3433 * Return: pointer to the allocated memory or %NULL on error
83342314
NP
3434 */
3435void *vmalloc_32_user(unsigned long size)
3436{
bc84c535
RP
3437 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
3438 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3439 VM_USERMAP, NUMA_NO_NODE,
3440 __builtin_return_address(0));
83342314
NP
3441}
3442EXPORT_SYMBOL(vmalloc_32_user);
3443
d0107eb0
KH
3444/*
3445 * small helper routine , copy contents to buf from addr.
3446 * If the page is not present, fill zero.
3447 */
3448
3449static int aligned_vread(char *buf, char *addr, unsigned long count)
3450{
3451 struct page *p;
3452 int copied = 0;
3453
3454 while (count) {
3455 unsigned long offset, length;
3456
891c49ab 3457 offset = offset_in_page(addr);
d0107eb0
KH
3458 length = PAGE_SIZE - offset;
3459 if (length > count)
3460 length = count;
3461 p = vmalloc_to_page(addr);
3462 /*
3463 * To do safe access to this _mapped_ area, we need
3464 * lock. But adding lock here means that we need to add
f0953a1b 3465 * overhead of vmalloc()/vfree() calls for this _debug_
d0107eb0
KH
3466 * interface, rarely used. Instead of that, we'll use
3467 * kmap() and get small overhead in this access function.
3468 */
3469 if (p) {
f7c8ce44 3470 /* We can expect USER0 is not used -- see vread() */
9b04c5fe 3471 void *map = kmap_atomic(p);
d0107eb0 3472 memcpy(buf, map + offset, length);
9b04c5fe 3473 kunmap_atomic(map);
d0107eb0
KH
3474 } else
3475 memset(buf, 0, length);
3476
3477 addr += length;
3478 buf += length;
3479 copied += length;
3480 count -= length;
3481 }
3482 return copied;
3483}
3484
06c89946
BH
3485static void vmap_ram_vread(char *buf, char *addr, int count, unsigned long flags)
3486{
3487 char *start;
3488 struct vmap_block *vb;
3489 unsigned long offset;
3490 unsigned int rs, re, n;
3491
3492 /*
3493 * If it's area created by vm_map_ram() interface directly, but
3494 * not further subdividing and delegating management to vmap_block,
3495 * handle it here.
3496 */
3497 if (!(flags & VMAP_BLOCK)) {
3498 aligned_vread(buf, addr, count);
3499 return;
3500 }
3501
3502 /*
3503 * Area is split into regions and tracked with vmap_block, read out
3504 * each region and zero fill the hole between regions.
3505 */
3506 vb = xa_load(&vmap_blocks, addr_to_vb_idx((unsigned long)addr));
3507 if (!vb)
3508 goto finished;
3509
3510 spin_lock(&vb->lock);
3511 if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
3512 spin_unlock(&vb->lock);
3513 goto finished;
3514 }
3515 for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
3516 if (!count)
3517 break;
3518 start = vmap_block_vaddr(vb->va->va_start, rs);
3519 while (addr < start) {
3520 if (count == 0)
3521 goto unlock;
3522 *buf = '\0';
3523 buf++;
3524 addr++;
3525 count--;
3526 }
3527 /*it could start reading from the middle of used region*/
3528 offset = offset_in_page(addr);
3529 n = ((re - rs + 1) << PAGE_SHIFT) - offset;
3530 if (n > count)
3531 n = count;
3532 aligned_vread(buf, start+offset, n);
3533
3534 buf += n;
3535 addr += n;
3536 count -= n;
3537 }
3538unlock:
3539 spin_unlock(&vb->lock);
3540
3541finished:
3542 /* zero-fill the left dirty or free regions */
3543 if (count)
3544 memset(buf, 0, count);
3545}
3546
d0107eb0 3547/**
92eac168
MR
3548 * vread() - read vmalloc area in a safe way.
3549 * @buf: buffer for reading data
3550 * @addr: vm address.
3551 * @count: number of bytes to be read.
3552 *
92eac168
MR
3553 * This function checks that addr is a valid vmalloc'ed area, and
3554 * copy data from that area to a given buffer. If the given memory range
3555 * of [addr...addr+count) includes some valid address, data is copied to
3556 * proper area of @buf. If there are memory holes, they'll be zero-filled.
3557 * IOREMAP area is treated as memory hole and no copy is done.
3558 *
3559 * If [addr...addr+count) doesn't includes any intersects with alive
3560 * vm_struct area, returns 0. @buf should be kernel's buffer.
3561 *
3562 * Note: In usual ops, vread() is never necessary because the caller
3563 * should know vmalloc() area is valid and can use memcpy().
3564 * This is for routines which have to access vmalloc area without
bbcd53c9 3565 * any information, as /proc/kcore.
a862f68a
MR
3566 *
3567 * Return: number of bytes for which addr and buf should be increased
3568 * (same number as @count) or %0 if [addr...addr+count) doesn't
3569 * include any intersection with valid vmalloc area
d0107eb0 3570 */
1da177e4
LT
3571long vread(char *buf, char *addr, unsigned long count)
3572{
e81ce85f
JK
3573 struct vmap_area *va;
3574 struct vm_struct *vm;
1da177e4 3575 char *vaddr, *buf_start = buf;
d0107eb0 3576 unsigned long buflen = count;
06c89946 3577 unsigned long n, size, flags;
1da177e4 3578
4aff1dc4
AK
3579 addr = kasan_reset_tag(addr);
3580
1da177e4
LT
3581 /* Don't allow overflow */
3582 if ((unsigned long) addr + count < count)
3583 count = -(unsigned long) addr;
3584
e81ce85f 3585 spin_lock(&vmap_area_lock);
f181234a 3586 va = find_vmap_area_exceed_addr((unsigned long)addr);
f608788c
SD
3587 if (!va)
3588 goto finished;
f181234a
CW
3589
3590 /* no intersects with alive vmap_area */
3591 if ((unsigned long)addr + count <= va->va_start)
3592 goto finished;
3593
f608788c 3594 list_for_each_entry_from(va, &vmap_area_list, list) {
e81ce85f
JK
3595 if (!count)
3596 break;
3597
06c89946
BH
3598 vm = va->vm;
3599 flags = va->flags & VMAP_FLAGS_MASK;
3600 /*
3601 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
3602 * be set together with VMAP_RAM.
3603 */
3604 WARN_ON(flags == VMAP_BLOCK);
3605
3606 if (!vm && !flags)
e81ce85f
JK
3607 continue;
3608
30a7a9b1
BH
3609 if (vm && (vm->flags & VM_UNINITIALIZED))
3610 continue;
3611 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3612 smp_rmb();
3613
06c89946
BH
3614 vaddr = (char *) va->va_start;
3615 size = vm ? get_vm_area_size(vm) : va_size(va);
3616
3617 if (addr >= vaddr + size)
1da177e4
LT
3618 continue;
3619 while (addr < vaddr) {
3620 if (count == 0)
3621 goto finished;
3622 *buf = '\0';
3623 buf++;
3624 addr++;
3625 count--;
3626 }
06c89946 3627 n = vaddr + size - addr;
d0107eb0
KH
3628 if (n > count)
3629 n = count;
06c89946
BH
3630
3631 if (flags & VMAP_RAM)
3632 vmap_ram_vread(buf, addr, n, flags);
3633 else if (!(vm->flags & VM_IOREMAP))
d0107eb0
KH
3634 aligned_vread(buf, addr, n);
3635 else /* IOREMAP area is treated as memory hole */
3636 memset(buf, 0, n);
3637 buf += n;
3638 addr += n;
3639 count -= n;
1da177e4
LT
3640 }
3641finished:
e81ce85f 3642 spin_unlock(&vmap_area_lock);
d0107eb0
KH
3643
3644 if (buf == buf_start)
3645 return 0;
3646 /* zero-fill memory holes */
3647 if (buf != buf_start + buflen)
3648 memset(buf, 0, buflen - (buf - buf_start));
3649
3650 return buflen;
1da177e4
LT
3651}
3652
83342314 3653/**
92eac168
MR
3654 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3655 * @vma: vma to cover
3656 * @uaddr: target user address to start at
3657 * @kaddr: virtual address of vmalloc kernel memory
bdebd6a2 3658 * @pgoff: offset from @kaddr to start at
92eac168 3659 * @size: size of map area
7682486b 3660 *
92eac168 3661 * Returns: 0 for success, -Exxx on failure
83342314 3662 *
92eac168
MR
3663 * This function checks that @kaddr is a valid vmalloc'ed area,
3664 * and that it is big enough to cover the range starting at
3665 * @uaddr in @vma. Will return failure if that criteria isn't
3666 * met.
83342314 3667 *
92eac168 3668 * Similar to remap_pfn_range() (see mm/memory.c)
83342314 3669 */
e69e9d4a 3670int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
bdebd6a2
JH
3671 void *kaddr, unsigned long pgoff,
3672 unsigned long size)
83342314
NP
3673{
3674 struct vm_struct *area;
bdebd6a2
JH
3675 unsigned long off;
3676 unsigned long end_index;
3677
3678 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3679 return -EINVAL;
83342314 3680
e69e9d4a
HD
3681 size = PAGE_ALIGN(size);
3682
3683 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
83342314
NP
3684 return -EINVAL;
3685
e69e9d4a 3686 area = find_vm_area(kaddr);
83342314 3687 if (!area)
db64fe02 3688 return -EINVAL;
83342314 3689
fe9041c2 3690 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
db64fe02 3691 return -EINVAL;
83342314 3692
bdebd6a2
JH
3693 if (check_add_overflow(size, off, &end_index) ||
3694 end_index > get_vm_area_size(area))
db64fe02 3695 return -EINVAL;
bdebd6a2 3696 kaddr += off;
83342314 3697
83342314 3698 do {
e69e9d4a 3699 struct page *page = vmalloc_to_page(kaddr);
db64fe02
NP
3700 int ret;
3701
83342314
NP
3702 ret = vm_insert_page(vma, uaddr, page);
3703 if (ret)
3704 return ret;
3705
3706 uaddr += PAGE_SIZE;
e69e9d4a
HD
3707 kaddr += PAGE_SIZE;
3708 size -= PAGE_SIZE;
3709 } while (size > 0);
83342314 3710
1c71222e 3711 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
83342314 3712
db64fe02 3713 return 0;
83342314 3714}
e69e9d4a
HD
3715
3716/**
92eac168
MR
3717 * remap_vmalloc_range - map vmalloc pages to userspace
3718 * @vma: vma to cover (map full range of vma)
3719 * @addr: vmalloc memory
3720 * @pgoff: number of pages into addr before first page to map
e69e9d4a 3721 *
92eac168 3722 * Returns: 0 for success, -Exxx on failure
e69e9d4a 3723 *
92eac168
MR
3724 * This function checks that addr is a valid vmalloc'ed area, and
3725 * that it is big enough to cover the vma. Will return failure if
3726 * that criteria isn't met.
e69e9d4a 3727 *
92eac168 3728 * Similar to remap_pfn_range() (see mm/memory.c)
e69e9d4a
HD
3729 */
3730int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3731 unsigned long pgoff)
3732{
3733 return remap_vmalloc_range_partial(vma, vma->vm_start,
bdebd6a2 3734 addr, pgoff,
e69e9d4a
HD
3735 vma->vm_end - vma->vm_start);
3736}
83342314
NP
3737EXPORT_SYMBOL(remap_vmalloc_range);
3738
5f4352fb
JF
3739void free_vm_area(struct vm_struct *area)
3740{
3741 struct vm_struct *ret;
3742 ret = remove_vm_area(area->addr);
3743 BUG_ON(ret != area);
3744 kfree(area);
3745}
3746EXPORT_SYMBOL_GPL(free_vm_area);
a10aa579 3747
4f8b02b4 3748#ifdef CONFIG_SMP
ca23e405
TH
3749static struct vmap_area *node_to_va(struct rb_node *n)
3750{
4583e773 3751 return rb_entry_safe(n, struct vmap_area, rb_node);
ca23e405
TH
3752}
3753
3754/**
68ad4a33
URS
3755 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3756 * @addr: target address
ca23e405 3757 *
68ad4a33
URS
3758 * Returns: vmap_area if it is found. If there is no such area
3759 * the first highest(reverse order) vmap_area is returned
3760 * i.e. va->va_start < addr && va->va_end < addr or NULL
3761 * if there are no any areas before @addr.
ca23e405 3762 */
68ad4a33
URS
3763static struct vmap_area *
3764pvm_find_va_enclose_addr(unsigned long addr)
ca23e405 3765{
68ad4a33
URS
3766 struct vmap_area *va, *tmp;
3767 struct rb_node *n;
3768
3769 n = free_vmap_area_root.rb_node;
3770 va = NULL;
ca23e405
TH
3771
3772 while (n) {
68ad4a33
URS
3773 tmp = rb_entry(n, struct vmap_area, rb_node);
3774 if (tmp->va_start <= addr) {
3775 va = tmp;
3776 if (tmp->va_end >= addr)
3777 break;
3778
ca23e405 3779 n = n->rb_right;
68ad4a33
URS
3780 } else {
3781 n = n->rb_left;
3782 }
ca23e405
TH
3783 }
3784
68ad4a33 3785 return va;
ca23e405
TH
3786}
3787
3788/**
68ad4a33
URS
3789 * pvm_determine_end_from_reverse - find the highest aligned address
3790 * of free block below VMALLOC_END
3791 * @va:
3792 * in - the VA we start the search(reverse order);
3793 * out - the VA with the highest aligned end address.
799fa85d 3794 * @align: alignment for required highest address
ca23e405 3795 *
68ad4a33 3796 * Returns: determined end address within vmap_area
ca23e405 3797 */
68ad4a33
URS
3798static unsigned long
3799pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
ca23e405 3800{
68ad4a33 3801 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
ca23e405
TH
3802 unsigned long addr;
3803
68ad4a33
URS
3804 if (likely(*va)) {
3805 list_for_each_entry_from_reverse((*va),
3806 &free_vmap_area_list, list) {
3807 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3808 if ((*va)->va_start < addr)
3809 return addr;
3810 }
ca23e405
TH
3811 }
3812
68ad4a33 3813 return 0;
ca23e405
TH
3814}
3815
3816/**
3817 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3818 * @offsets: array containing offset of each area
3819 * @sizes: array containing size of each area
3820 * @nr_vms: the number of areas to allocate
3821 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
ca23e405
TH
3822 *
3823 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3824 * vm_structs on success, %NULL on failure
3825 *
3826 * Percpu allocator wants to use congruent vm areas so that it can
3827 * maintain the offsets among percpu areas. This function allocates
ec3f64fc
DR
3828 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
3829 * be scattered pretty far, distance between two areas easily going up
3830 * to gigabytes. To avoid interacting with regular vmallocs, these
3831 * areas are allocated from top.
ca23e405 3832 *
68ad4a33
URS
3833 * Despite its complicated look, this allocator is rather simple. It
3834 * does everything top-down and scans free blocks from the end looking
3835 * for matching base. While scanning, if any of the areas do not fit the
3836 * base address is pulled down to fit the area. Scanning is repeated till
3837 * all the areas fit and then all necessary data structures are inserted
3838 * and the result is returned.
ca23e405
TH
3839 */
3840struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3841 const size_t *sizes, int nr_vms,
ec3f64fc 3842 size_t align)
ca23e405
TH
3843{
3844 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3845 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
68ad4a33 3846 struct vmap_area **vas, *va;
ca23e405
TH
3847 struct vm_struct **vms;
3848 int area, area2, last_area, term_area;
253a496d 3849 unsigned long base, start, size, end, last_end, orig_start, orig_end;
ca23e405
TH
3850 bool purged = false;
3851
ca23e405 3852 /* verify parameters and allocate data structures */
891c49ab 3853 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
ca23e405
TH
3854 for (last_area = 0, area = 0; area < nr_vms; area++) {
3855 start = offsets[area];
3856 end = start + sizes[area];
3857
3858 /* is everything aligned properly? */
3859 BUG_ON(!IS_ALIGNED(offsets[area], align));
3860 BUG_ON(!IS_ALIGNED(sizes[area], align));
3861
3862 /* detect the area with the highest address */
3863 if (start > offsets[last_area])
3864 last_area = area;
3865
c568da28 3866 for (area2 = area + 1; area2 < nr_vms; area2++) {
ca23e405
TH
3867 unsigned long start2 = offsets[area2];
3868 unsigned long end2 = start2 + sizes[area2];
3869
c568da28 3870 BUG_ON(start2 < end && start < end2);
ca23e405
TH
3871 }
3872 }
3873 last_end = offsets[last_area] + sizes[last_area];
3874
3875 if (vmalloc_end - vmalloc_start < last_end) {
3876 WARN_ON(true);
3877 return NULL;
3878 }
3879
4d67d860
TM
3880 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3881 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
ca23e405 3882 if (!vas || !vms)
f1db7afd 3883 goto err_free2;
ca23e405
TH
3884
3885 for (area = 0; area < nr_vms; area++) {
68ad4a33 3886 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
ec3f64fc 3887 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
ca23e405
TH
3888 if (!vas[area] || !vms[area])
3889 goto err_free;
3890 }
3891retry:
e36176be 3892 spin_lock(&free_vmap_area_lock);
ca23e405
TH
3893
3894 /* start scanning - we scan from the top, begin with the last area */
3895 area = term_area = last_area;
3896 start = offsets[area];
3897 end = start + sizes[area];
3898
68ad4a33
URS
3899 va = pvm_find_va_enclose_addr(vmalloc_end);
3900 base = pvm_determine_end_from_reverse(&va, align) - end;
ca23e405
TH
3901
3902 while (true) {
ca23e405
TH
3903 /*
3904 * base might have underflowed, add last_end before
3905 * comparing.
3906 */
68ad4a33
URS
3907 if (base + last_end < vmalloc_start + last_end)
3908 goto overflow;
ca23e405
TH
3909
3910 /*
68ad4a33 3911 * Fitting base has not been found.
ca23e405 3912 */
68ad4a33
URS
3913 if (va == NULL)
3914 goto overflow;
ca23e405 3915
5336e52c 3916 /*
d8cc323d 3917 * If required width exceeds current VA block, move
5336e52c
KS
3918 * base downwards and then recheck.
3919 */
3920 if (base + end > va->va_end) {
3921 base = pvm_determine_end_from_reverse(&va, align) - end;
3922 term_area = area;
3923 continue;
3924 }
3925
ca23e405 3926 /*
68ad4a33 3927 * If this VA does not fit, move base downwards and recheck.
ca23e405 3928 */
5336e52c 3929 if (base + start < va->va_start) {
68ad4a33
URS
3930 va = node_to_va(rb_prev(&va->rb_node));
3931 base = pvm_determine_end_from_reverse(&va, align) - end;
ca23e405
TH
3932 term_area = area;
3933 continue;
3934 }
3935
3936 /*
3937 * This area fits, move on to the previous one. If
3938 * the previous one is the terminal one, we're done.
3939 */
3940 area = (area + nr_vms - 1) % nr_vms;
3941 if (area == term_area)
3942 break;
68ad4a33 3943
ca23e405
TH
3944 start = offsets[area];
3945 end = start + sizes[area];
68ad4a33 3946 va = pvm_find_va_enclose_addr(base + end);
ca23e405 3947 }
68ad4a33 3948
ca23e405
TH
3949 /* we've found a fitting base, insert all va's */
3950 for (area = 0; area < nr_vms; area++) {
68ad4a33 3951 int ret;
ca23e405 3952
68ad4a33
URS
3953 start = base + offsets[area];
3954 size = sizes[area];
ca23e405 3955
68ad4a33
URS
3956 va = pvm_find_va_enclose_addr(start);
3957 if (WARN_ON_ONCE(va == NULL))
3958 /* It is a BUG(), but trigger recovery instead. */
3959 goto recovery;
3960
f9863be4
URS
3961 ret = adjust_va_to_fit_type(&free_vmap_area_root,
3962 &free_vmap_area_list,
3963 va, start, size);
1b23ff80 3964 if (WARN_ON_ONCE(unlikely(ret)))
68ad4a33
URS
3965 /* It is a BUG(), but trigger recovery instead. */
3966 goto recovery;
3967
68ad4a33
URS
3968 /* Allocated area. */
3969 va = vas[area];
3970 va->va_start = start;
3971 va->va_end = start + size;
68ad4a33 3972 }
ca23e405 3973
e36176be 3974 spin_unlock(&free_vmap_area_lock);
ca23e405 3975
253a496d
DA
3976 /* populate the kasan shadow space */
3977 for (area = 0; area < nr_vms; area++) {
3978 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3979 goto err_free_shadow;
253a496d
DA
3980 }
3981
ca23e405 3982 /* insert all vm's */
e36176be
URS
3983 spin_lock(&vmap_area_lock);
3984 for (area = 0; area < nr_vms; area++) {
3985 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3986
3987 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3645cb4a 3988 pcpu_get_vm_areas);
e36176be
URS
3989 }
3990 spin_unlock(&vmap_area_lock);
ca23e405 3991
19f1c3ac
AK
3992 /*
3993 * Mark allocated areas as accessible. Do it now as a best-effort
3994 * approach, as they can be mapped outside of vmalloc code.
23689e91
AK
3995 * With hardware tag-based KASAN, marking is skipped for
3996 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
19f1c3ac 3997 */
1d96320f
AK
3998 for (area = 0; area < nr_vms; area++)
3999 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
f6e39794 4000 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
1d96320f 4001
ca23e405
TH
4002 kfree(vas);
4003 return vms;
4004
68ad4a33 4005recovery:
e36176be
URS
4006 /*
4007 * Remove previously allocated areas. There is no
4008 * need in removing these areas from the busy tree,
4009 * because they are inserted only on the final step
4010 * and when pcpu_get_vm_areas() is success.
4011 */
68ad4a33 4012 while (area--) {
253a496d
DA
4013 orig_start = vas[area]->va_start;
4014 orig_end = vas[area]->va_end;
96e2db45
URS
4015 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4016 &free_vmap_area_list);
9c801f61
URS
4017 if (va)
4018 kasan_release_vmalloc(orig_start, orig_end,
4019 va->va_start, va->va_end);
68ad4a33
URS
4020 vas[area] = NULL;
4021 }
4022
4023overflow:
e36176be 4024 spin_unlock(&free_vmap_area_lock);
68ad4a33
URS
4025 if (!purged) {
4026 purge_vmap_area_lazy();
4027 purged = true;
4028
4029 /* Before "retry", check if we recover. */
4030 for (area = 0; area < nr_vms; area++) {
4031 if (vas[area])
4032 continue;
4033
4034 vas[area] = kmem_cache_zalloc(
4035 vmap_area_cachep, GFP_KERNEL);
4036 if (!vas[area])
4037 goto err_free;
4038 }
4039
4040 goto retry;
4041 }
4042
ca23e405
TH
4043err_free:
4044 for (area = 0; area < nr_vms; area++) {
68ad4a33
URS
4045 if (vas[area])
4046 kmem_cache_free(vmap_area_cachep, vas[area]);
4047
f1db7afd 4048 kfree(vms[area]);
ca23e405 4049 }
f1db7afd 4050err_free2:
ca23e405
TH
4051 kfree(vas);
4052 kfree(vms);
4053 return NULL;
253a496d
DA
4054
4055err_free_shadow:
4056 spin_lock(&free_vmap_area_lock);
4057 /*
4058 * We release all the vmalloc shadows, even the ones for regions that
4059 * hadn't been successfully added. This relies on kasan_release_vmalloc
4060 * being able to tolerate this case.
4061 */
4062 for (area = 0; area < nr_vms; area++) {
4063 orig_start = vas[area]->va_start;
4064 orig_end = vas[area]->va_end;
96e2db45
URS
4065 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4066 &free_vmap_area_list);
9c801f61
URS
4067 if (va)
4068 kasan_release_vmalloc(orig_start, orig_end,
4069 va->va_start, va->va_end);
253a496d
DA
4070 vas[area] = NULL;
4071 kfree(vms[area]);
4072 }
4073 spin_unlock(&free_vmap_area_lock);
4074 kfree(vas);
4075 kfree(vms);
4076 return NULL;
ca23e405
TH
4077}
4078
4079/**
4080 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4081 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4082 * @nr_vms: the number of allocated areas
4083 *
4084 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4085 */
4086void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
4087{
4088 int i;
4089
4090 for (i = 0; i < nr_vms; i++)
4091 free_vm_area(vms[i]);
4092 kfree(vms);
4093}
4f8b02b4 4094#endif /* CONFIG_SMP */
a10aa579 4095
5bb1bb35 4096#ifdef CONFIG_PRINTK
98f18083
PM
4097bool vmalloc_dump_obj(void *object)
4098{
4099 struct vm_struct *vm;
4100 void *objp = (void *)PAGE_ALIGN((unsigned long)object);
4101
4102 vm = find_vm_area(objp);
4103 if (!vm)
4104 return false;
bd34dcd4
PM
4105 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
4106 vm->nr_pages, (unsigned long)vm->addr, vm->caller);
98f18083
PM
4107 return true;
4108}
5bb1bb35 4109#endif
98f18083 4110
a10aa579
CL
4111#ifdef CONFIG_PROC_FS
4112static void *s_start(struct seq_file *m, loff_t *pos)
e36176be 4113 __acquires(&vmap_purge_lock)
d4033afd 4114 __acquires(&vmap_area_lock)
a10aa579 4115{
e36176be 4116 mutex_lock(&vmap_purge_lock);
d4033afd 4117 spin_lock(&vmap_area_lock);
e36176be 4118
3f500069 4119 return seq_list_start(&vmap_area_list, *pos);
a10aa579
CL
4120}
4121
4122static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4123{
3f500069 4124 return seq_list_next(p, &vmap_area_list, pos);
a10aa579
CL
4125}
4126
4127static void s_stop(struct seq_file *m, void *p)
d4033afd 4128 __releases(&vmap_area_lock)
0a7dd4e9 4129 __releases(&vmap_purge_lock)
a10aa579 4130{
d4033afd 4131 spin_unlock(&vmap_area_lock);
0a7dd4e9 4132 mutex_unlock(&vmap_purge_lock);
a10aa579
CL
4133}
4134
a47a126a
ED
4135static void show_numa_info(struct seq_file *m, struct vm_struct *v)
4136{
e5adfffc 4137 if (IS_ENABLED(CONFIG_NUMA)) {
a47a126a 4138 unsigned int nr, *counters = m->private;
51e50b3a 4139 unsigned int step = 1U << vm_area_page_order(v);
a47a126a
ED
4140
4141 if (!counters)
4142 return;
4143
af12346c
WL
4144 if (v->flags & VM_UNINITIALIZED)
4145 return;
7e5b528b
DV
4146 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4147 smp_rmb();
af12346c 4148
a47a126a
ED
4149 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
4150
51e50b3a
ED
4151 for (nr = 0; nr < v->nr_pages; nr += step)
4152 counters[page_to_nid(v->pages[nr])] += step;
a47a126a
ED
4153 for_each_node_state(nr, N_HIGH_MEMORY)
4154 if (counters[nr])
4155 seq_printf(m, " N%u=%u", nr, counters[nr]);
4156 }
4157}
4158
dd3b8353
URS
4159static void show_purge_info(struct seq_file *m)
4160{
dd3b8353
URS
4161 struct vmap_area *va;
4162
96e2db45
URS
4163 spin_lock(&purge_vmap_area_lock);
4164 list_for_each_entry(va, &purge_vmap_area_list, list) {
dd3b8353
URS
4165 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
4166 (void *)va->va_start, (void *)va->va_end,
4167 va->va_end - va->va_start);
4168 }
96e2db45 4169 spin_unlock(&purge_vmap_area_lock);
dd3b8353
URS
4170}
4171
a10aa579
CL
4172static int s_show(struct seq_file *m, void *p)
4173{
3f500069 4174 struct vmap_area *va;
d4033afd
JK
4175 struct vm_struct *v;
4176
3f500069 4177 va = list_entry(p, struct vmap_area, list);
4178
688fcbfc 4179 if (!va->vm) {
bba9697b
BH
4180 if (va->flags & VMAP_RAM)
4181 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
4182 (void *)va->va_start, (void *)va->va_end,
4183 va->va_end - va->va_start);
78c72746 4184
7cc7913e 4185 goto final;
78c72746 4186 }
d4033afd
JK
4187
4188 v = va->vm;
a10aa579 4189
45ec1690 4190 seq_printf(m, "0x%pK-0x%pK %7ld",
a10aa579
CL
4191 v->addr, v->addr + v->size, v->size);
4192
62c70bce
JP
4193 if (v->caller)
4194 seq_printf(m, " %pS", v->caller);
23016969 4195
a10aa579
CL
4196 if (v->nr_pages)
4197 seq_printf(m, " pages=%d", v->nr_pages);
4198
4199 if (v->phys_addr)
199eaa05 4200 seq_printf(m, " phys=%pa", &v->phys_addr);
a10aa579
CL
4201
4202 if (v->flags & VM_IOREMAP)
f4527c90 4203 seq_puts(m, " ioremap");
a10aa579
CL
4204
4205 if (v->flags & VM_ALLOC)
f4527c90 4206 seq_puts(m, " vmalloc");
a10aa579
CL
4207
4208 if (v->flags & VM_MAP)
f4527c90 4209 seq_puts(m, " vmap");
a10aa579
CL
4210
4211 if (v->flags & VM_USERMAP)
f4527c90 4212 seq_puts(m, " user");
a10aa579 4213
fe9041c2
CH
4214 if (v->flags & VM_DMA_COHERENT)
4215 seq_puts(m, " dma-coherent");
4216
244d63ee 4217 if (is_vmalloc_addr(v->pages))
f4527c90 4218 seq_puts(m, " vpages");
a10aa579 4219
a47a126a 4220 show_numa_info(m, v);
a10aa579 4221 seq_putc(m, '\n');
dd3b8353
URS
4222
4223 /*
96e2db45 4224 * As a final step, dump "unpurged" areas.
dd3b8353 4225 */
7cc7913e 4226final:
dd3b8353
URS
4227 if (list_is_last(&va->list, &vmap_area_list))
4228 show_purge_info(m);
4229
a10aa579
CL
4230 return 0;
4231}
4232
5f6a6a9c 4233static const struct seq_operations vmalloc_op = {
a10aa579
CL
4234 .start = s_start,
4235 .next = s_next,
4236 .stop = s_stop,
4237 .show = s_show,
4238};
5f6a6a9c 4239
5f6a6a9c
AD
4240static int __init proc_vmalloc_init(void)
4241{
fddda2b7 4242 if (IS_ENABLED(CONFIG_NUMA))
0825a6f9 4243 proc_create_seq_private("vmallocinfo", 0400, NULL,
44414d82
CH
4244 &vmalloc_op,
4245 nr_node_ids * sizeof(unsigned int), NULL);
fddda2b7 4246 else
0825a6f9 4247 proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
5f6a6a9c
AD
4248 return 0;
4249}
4250module_init(proc_vmalloc_init);
db3808c1 4251
a10aa579 4252#endif
208162f4
CH
4253
4254void __init vmalloc_init(void)
4255{
4256 struct vmap_area *va;
4257 struct vm_struct *tmp;
4258 int i;
4259
4260 /*
4261 * Create the cache for vmap_area objects.
4262 */
4263 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
4264
4265 for_each_possible_cpu(i) {
4266 struct vmap_block_queue *vbq;
4267 struct vfree_deferred *p;
4268
4269 vbq = &per_cpu(vmap_block_queue, i);
4270 spin_lock_init(&vbq->lock);
4271 INIT_LIST_HEAD(&vbq->free);
4272 p = &per_cpu(vfree_deferred, i);
4273 init_llist_head(&p->list);
4274 INIT_WORK(&p->wq, delayed_vfree_work);
4275 }
4276
4277 /* Import existing vmlist entries. */
4278 for (tmp = vmlist; tmp; tmp = tmp->next) {
4279 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
4280 if (WARN_ON_ONCE(!va))
4281 continue;
4282
4283 va->va_start = (unsigned long)tmp->addr;
4284 va->va_end = va->va_start + tmp->size;
4285 va->vm = tmp;
4286 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
4287 }
4288
4289 /*
4290 * Now we can initialize a free vmap space.
4291 */
4292 vmap_init_free_space();
4293 vmap_initialized = true;
4294}