]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/percpu.c
Merge tag 'net-6.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[thirdparty/linux.git] / mm / percpu.c
CommitLineData
55716d26 1// SPDX-License-Identifier: GPL-2.0-only
fbf59bc9 2/*
88999a89 3 * mm/percpu.c - percpu memory allocator
fbf59bc9
TH
4 *
5 * Copyright (C) 2009 SUSE Linux Products GmbH
6 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 *
5e81ee3e 8 * Copyright (C) 2017 Facebook Inc.
bfacd38f 9 * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
5e81ee3e 10 *
9c015162
DZF
11 * The percpu allocator handles both static and dynamic areas. Percpu
12 * areas are allocated in chunks which are divided into units. There is
13 * a 1-to-1 mapping for units to possible cpus. These units are grouped
14 * based on NUMA properties of the machine.
fbf59bc9
TH
15 *
16 * c0 c1 c2
17 * ------------------- ------------------- ------------
18 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
19 * ------------------- ...... ------------------- .... ------------
20 *
9c015162
DZF
21 * Allocation is done by offsets into a unit's address space. Ie., an
22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
23 * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
24 * and even sparse. Access is handled by configuring percpu base
25 * registers according to the cpu to unit mappings and offsetting the
26 * base address using pcpu_unit_size.
27 *
28 * There is special consideration for the first chunk which must handle
29 * the static percpu variables in the kernel image as allocation services
5e81ee3e 30 * are not online yet. In short, the first chunk is structured like so:
9c015162
DZF
31 *
32 * <Static | [Reserved] | Dynamic>
33 *
34 * The static data is copied from the original section managed by the
35 * linker. The reserved section, if non-zero, primarily manages static
36 * percpu variables from kernel modules. Finally, the dynamic section
37 * takes care of normal allocations.
fbf59bc9 38 *
5e81ee3e 39 * The allocator organizes chunks into lists according to free size and
3c7be18a
RG
40 * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT
41 * flag should be passed. All memcg-aware allocations are sharing one set
42 * of chunks and all unaccounted allocations and allocations performed
43 * by processes belonging to the root memory cgroup are using the second set.
44 *
45 * The allocator tries to allocate from the fullest chunk first. Each chunk
46 * is managed by a bitmap with metadata blocks. The allocation map is updated
47 * on every allocation and free to reflect the current state while the boundary
5e81ee3e
DZF
48 * map is only updated on allocation. Each metadata block contains
49 * information to help mitigate the need to iterate over large portions
50 * of the bitmap. The reverse mapping from page to chunk is stored in
51 * the page's index. Lastly, units are lazily backed and grow in unison.
52 *
53 * There is a unique conversion that goes on here between bytes and bits.
54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
55 * tracks the number of pages it is responsible for in nr_pages. Helper
56 * functions are used to convert from between the bytes, bits, and blocks.
57 * All hints are managed in bits unless explicitly stated.
9c015162 58 *
4091fb95 59 * To use this allocator, arch code should do the following:
fbf59bc9 60 *
fbf59bc9 61 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
e0100983
TH
62 * regular address to percpu pointer and back if they need to be
63 * different from the default
fbf59bc9 64 *
8d408b4b
TH
65 * - use pcpu_setup_first_chunk() during percpu area initialization to
66 * setup the first chunk containing the kernel static percpu area
fbf59bc9
TH
67 */
68
870d4b12
JP
69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70
fbf59bc9 71#include <linux/bitmap.h>
d7d29ac7 72#include <linux/cpumask.h>
57c8a661 73#include <linux/memblock.h>
fd1e8a1f 74#include <linux/err.h>
fbf59bc9 75#include <linux/list.h>
a530b795 76#include <linux/log2.h>
fbf59bc9
TH
77#include <linux/mm.h>
78#include <linux/module.h>
79#include <linux/mutex.h>
80#include <linux/percpu.h>
81#include <linux/pfn.h>
fbf59bc9 82#include <linux/slab.h>
ccea34b5 83#include <linux/spinlock.h>
fbf59bc9 84#include <linux/vmalloc.h>
a56dbddf 85#include <linux/workqueue.h>
f528f0b8 86#include <linux/kmemleak.h>
71546d10 87#include <linux/sched.h>
28307d93 88#include <linux/sched/mm.h>
3c7be18a 89#include <linux/memcontrol.h>
fbf59bc9
TH
90
91#include <asm/cacheflush.h>
e0100983 92#include <asm/sections.h>
fbf59bc9 93#include <asm/tlbflush.h>
3b034b0d 94#include <asm/io.h>
fbf59bc9 95
df95e795
DZ
96#define CREATE_TRACE_POINTS
97#include <trace/events/percpu.h>
98
8fa3ed80
DZ
99#include "percpu-internal.h"
100
ac9380f6
RG
101/*
102 * The slots are sorted by the size of the biggest continuous free area.
103 * 1-31 bytes share the same slot.
104 */
40064aec 105#define PCPU_SLOT_BASE_SHIFT 5
8744d859
DZ
106/* chunks in slots below this are subject to being sidelined on failed alloc */
107#define PCPU_SLOT_FAIL_THRESHOLD 3
40064aec 108
1a4d7607
TH
109#define PCPU_EMPTY_POP_PAGES_LOW 2
110#define PCPU_EMPTY_POP_PAGES_HIGH 4
fbf59bc9 111
bbddff05 112#ifdef CONFIG_SMP
e0100983
TH
113/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
114#ifndef __addr_to_pcpu_ptr
115#define __addr_to_pcpu_ptr(addr) \
43cf38eb
TH
116 (void __percpu *)((unsigned long)(addr) - \
117 (unsigned long)pcpu_base_addr + \
118 (unsigned long)__per_cpu_start)
e0100983
TH
119#endif
120#ifndef __pcpu_ptr_to_addr
121#define __pcpu_ptr_to_addr(ptr) \
43cf38eb
TH
122 (void __force *)((unsigned long)(ptr) + \
123 (unsigned long)pcpu_base_addr - \
124 (unsigned long)__per_cpu_start)
e0100983 125#endif
bbddff05
TH
126#else /* CONFIG_SMP */
127/* on UP, it's always identity mapped */
128#define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
129#define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
130#endif /* CONFIG_SMP */
e0100983 131
1328710b
DM
132static int pcpu_unit_pages __ro_after_init;
133static int pcpu_unit_size __ro_after_init;
134static int pcpu_nr_units __ro_after_init;
135static int pcpu_atom_size __ro_after_init;
8fa3ed80 136int pcpu_nr_slots __ro_after_init;
8d55ba5d 137static int pcpu_free_slot __ro_after_init;
f1833241
RG
138int pcpu_sidelined_slot __ro_after_init;
139int pcpu_to_depopulate_slot __ro_after_init;
1328710b 140static size_t pcpu_chunk_struct_size __ro_after_init;
fbf59bc9 141
a855b84c 142/* cpus with the lowest and highest unit addresses */
1328710b
DM
143static unsigned int pcpu_low_unit_cpu __ro_after_init;
144static unsigned int pcpu_high_unit_cpu __ro_after_init;
2f39e637 145
fbf59bc9 146/* the address of the first chunk which starts with the kernel static area */
1328710b 147void *pcpu_base_addr __ro_after_init;
fbf59bc9 148
1328710b
DM
149static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
150const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
2f39e637 151
6563297c 152/* group information, used for vm allocation */
1328710b
DM
153static int pcpu_nr_groups __ro_after_init;
154static const unsigned long *pcpu_group_offsets __ro_after_init;
155static const size_t *pcpu_group_sizes __ro_after_init;
6563297c 156
ae9e6bc9
TH
157/*
158 * The first chunk which always exists. Note that unlike other
159 * chunks, this one can be allocated and mapped in several different
160 * ways and thus often doesn't live in the vmalloc area.
161 */
8fa3ed80 162struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
ae9e6bc9
TH
163
164/*
165 * Optional reserved chunk. This chunk reserves part of the first
e2266705
DZF
166 * chunk and serves it for reserved allocations. When the reserved
167 * region doesn't exist, the following variable is NULL.
ae9e6bc9 168 */
8fa3ed80 169struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
edcb4639 170
8fa3ed80 171DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
6710e594 172static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
fbf59bc9 173
3c7be18a 174struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
fbf59bc9 175
b539b87f 176/*
faf65dde 177 * The number of empty populated pages, protected by pcpu_lock.
0760fa3d 178 * The reserved chunk doesn't contribute to the count.
b539b87f 179 */
faf65dde 180int pcpu_nr_empty_pop_pages;
b539b87f 181
7e8a6304
DZF
182/*
183 * The number of populated pages in use by the allocator, protected by
184 * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
185 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
186 * and increments/decrements this count by 1).
187 */
188static unsigned long pcpu_nr_populated;
189
1a4d7607
TH
190/*
191 * Balance work is used to populate or destroy chunks asynchronously. We
192 * try to keep the number of populated free pages between
193 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
194 * empty chunk.
195 */
fe6bd8c3
TH
196static void pcpu_balance_workfn(struct work_struct *work);
197static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
1a4d7607
TH
198static bool pcpu_async_enabled __read_mostly;
199static bool pcpu_atomic_alloc_failed;
200
201static void pcpu_schedule_balance_work(void)
202{
203 if (pcpu_async_enabled)
204 schedule_work(&pcpu_balance_work);
205}
a56dbddf 206
c0ebfdc3 207/**
560f2c23
DZF
208 * pcpu_addr_in_chunk - check if the address is served from this chunk
209 * @chunk: chunk of interest
210 * @addr: percpu address
c0ebfdc3
DZF
211 *
212 * RETURNS:
560f2c23 213 * True if the address is served from this chunk.
c0ebfdc3 214 */
560f2c23 215static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
020ec653 216{
c0ebfdc3
DZF
217 void *start_addr, *end_addr;
218
560f2c23 219 if (!chunk)
c0ebfdc3 220 return false;
020ec653 221
560f2c23
DZF
222 start_addr = chunk->base_addr + chunk->start_offset;
223 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
224 chunk->end_offset;
c0ebfdc3
DZF
225
226 return addr >= start_addr && addr < end_addr;
020ec653
TH
227}
228
d9b55eeb 229static int __pcpu_size_to_slot(int size)
fbf59bc9 230{
cae3aeb8 231 int highbit = fls(size); /* size is in bytes */
fbf59bc9
TH
232 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
233}
234
d9b55eeb
TH
235static int pcpu_size_to_slot(int size)
236{
237 if (size == pcpu_unit_size)
1c29a3ce 238 return pcpu_free_slot;
d9b55eeb
TH
239 return __pcpu_size_to_slot(size);
240}
241
fbf59bc9
TH
242static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
243{
92c14cab
DZ
244 const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
245
246 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
247 chunk_md->contig_hint == 0)
fbf59bc9
TH
248 return 0;
249
92c14cab 250 return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
fbf59bc9
TH
251}
252
88999a89
TH
253/* set the pointer to a chunk in a page struct */
254static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
255{
256 page->index = (unsigned long)pcpu;
257}
258
259/* obtain pointer to a chunk from a page struct */
260static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
261{
262 return (struct pcpu_chunk *)page->index;
263}
264
265static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
fbf59bc9 266{
2f39e637 267 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
fbf59bc9
TH
268}
269
c0ebfdc3
DZF
270static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
271{
272 return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
273}
274
9983b6f0
TH
275static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
276 unsigned int cpu, int page_idx)
fbf59bc9 277{
c0ebfdc3
DZF
278 return (unsigned long)chunk->base_addr +
279 pcpu_unit_page_offset(cpu, page_idx);
fbf59bc9
TH
280}
281
ca460b3c
DZF
282/*
283 * The following are helper functions to help access bitmaps and convert
284 * between bitmap offsets to address offsets.
285 */
286static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
287{
288 return chunk->alloc_map +
289 (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
290}
291
292static unsigned long pcpu_off_to_block_index(int off)
293{
294 return off / PCPU_BITMAP_BLOCK_BITS;
295}
296
297static unsigned long pcpu_off_to_block_off(int off)
298{
299 return off & (PCPU_BITMAP_BLOCK_BITS - 1);
300}
301
b185cd0d
DZF
302static unsigned long pcpu_block_off_to_off(int index, int off)
303{
304 return index * PCPU_BITMAP_BLOCK_BITS + off;
305}
306
8ea2e1e3
RG
307/**
308 * pcpu_check_block_hint - check against the contig hint
309 * @block: block of interest
310 * @bits: size of allocation
311 * @align: alignment of area (max PAGE_SIZE)
312 *
313 * Check to see if the allocation can fit in the block's contig hint.
314 * Note, a chunk uses the same hints as a block so this can also check against
315 * the chunk's contig hint.
316 */
317static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits,
318 size_t align)
319{
320 int bit_off = ALIGN(block->contig_hint_start, align) -
321 block->contig_hint_start;
322
323 return bit_off + bits <= block->contig_hint;
324}
325
382b88e9
DZ
326/*
327 * pcpu_next_hint - determine which hint to use
328 * @block: block of interest
329 * @alloc_bits: size of allocation
330 *
331 * This determines if we should scan based on the scan_hint or first_free.
332 * In general, we want to scan from first_free to fulfill allocations by
333 * first fit. However, if we know a scan_hint at position scan_hint_start
334 * cannot fulfill an allocation, we can begin scanning from there knowing
335 * the contig_hint will be our fallback.
336 */
337static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
338{
339 /*
340 * The three conditions below determine if we can skip past the
341 * scan_hint. First, does the scan hint exist. Second, is the
342 * contig_hint after the scan_hint (possibly not true iff
343 * contig_hint == scan_hint). Third, is the allocation request
344 * larger than the scan_hint.
345 */
346 if (block->scan_hint &&
347 block->contig_hint_start > block->scan_hint_start &&
348 alloc_bits > block->scan_hint)
349 return block->scan_hint_start + block->scan_hint;
350
351 return block->first_free;
352}
353
525ca84d
DZF
354/**
355 * pcpu_next_md_free_region - finds the next hint free area
356 * @chunk: chunk of interest
357 * @bit_off: chunk offset
358 * @bits: size of free area
359 *
360 * Helper function for pcpu_for_each_md_free_region. It checks
361 * block->contig_hint and performs aggregation across blocks to find the
362 * next hint. It modifies bit_off and bits in-place to be consumed in the
363 * loop.
364 */
365static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
366 int *bits)
367{
368 int i = pcpu_off_to_block_index(*bit_off);
369 int block_off = pcpu_off_to_block_off(*bit_off);
370 struct pcpu_block_md *block;
371
372 *bits = 0;
373 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
374 block++, i++) {
375 /* handles contig area across blocks */
376 if (*bits) {
377 *bits += block->left_free;
378 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
379 continue;
380 return;
381 }
382
383 /*
384 * This checks three things. First is there a contig_hint to
385 * check. Second, have we checked this hint before by
386 * comparing the block_off. Third, is this the same as the
387 * right contig hint. In the last case, it spills over into
388 * the next block and should be handled by the contig area
389 * across blocks code.
390 */
391 *bits = block->contig_hint;
392 if (*bits && block->contig_hint_start >= block_off &&
393 *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
394 *bit_off = pcpu_block_off_to_off(i,
395 block->contig_hint_start);
396 return;
397 }
1fa4df3e
DZ
398 /* reset to satisfy the second predicate above */
399 block_off = 0;
525ca84d
DZF
400
401 *bits = block->right_free;
402 *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
403 }
404}
405
b4c2116c
DZF
406/**
407 * pcpu_next_fit_region - finds fit areas for a given allocation request
408 * @chunk: chunk of interest
409 * @alloc_bits: size of allocation
410 * @align: alignment of area (max PAGE_SIZE)
411 * @bit_off: chunk offset
412 * @bits: size of free area
413 *
414 * Finds the next free region that is viable for use with a given size and
415 * alignment. This only returns if there is a valid area to be used for this
416 * allocation. block->first_free is returned if the allocation request fits
417 * within the block to see if the request can be fulfilled prior to the contig
418 * hint.
419 */
420static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
421 int align, int *bit_off, int *bits)
422{
423 int i = pcpu_off_to_block_index(*bit_off);
424 int block_off = pcpu_off_to_block_off(*bit_off);
425 struct pcpu_block_md *block;
426
427 *bits = 0;
428 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
429 block++, i++) {
430 /* handles contig area across blocks */
431 if (*bits) {
432 *bits += block->left_free;
433 if (*bits >= alloc_bits)
434 return;
435 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
436 continue;
437 }
438
439 /* check block->contig_hint */
440 *bits = ALIGN(block->contig_hint_start, align) -
441 block->contig_hint_start;
442 /*
443 * This uses the block offset to determine if this has been
444 * checked in the prior iteration.
445 */
446 if (block->contig_hint &&
447 block->contig_hint_start >= block_off &&
448 block->contig_hint >= *bits + alloc_bits) {
382b88e9
DZ
449 int start = pcpu_next_hint(block, alloc_bits);
450
b4c2116c 451 *bits += alloc_bits + block->contig_hint_start -
382b88e9
DZ
452 start;
453 *bit_off = pcpu_block_off_to_off(i, start);
b4c2116c
DZF
454 return;
455 }
1fa4df3e
DZ
456 /* reset to satisfy the second predicate above */
457 block_off = 0;
b4c2116c
DZF
458
459 *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
460 align);
461 *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
462 *bit_off = pcpu_block_off_to_off(i, *bit_off);
463 if (*bits >= alloc_bits)
464 return;
465 }
466
467 /* no valid offsets were found - fail condition */
468 *bit_off = pcpu_chunk_map_bits(chunk);
469}
470
525ca84d
DZF
471/*
472 * Metadata free area iterators. These perform aggregation of free areas
473 * based on the metadata blocks and return the offset @bit_off and size in
b4c2116c
DZF
474 * bits of the free area @bits. pcpu_for_each_fit_region only returns when
475 * a fit is found for the allocation request.
525ca84d
DZF
476 */
477#define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
478 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
479 (bit_off) < pcpu_chunk_map_bits((chunk)); \
480 (bit_off) += (bits) + 1, \
481 pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
482
b4c2116c
DZF
483#define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
484 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
485 &(bits)); \
486 (bit_off) < pcpu_chunk_map_bits((chunk)); \
487 (bit_off) += (bits), \
488 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
489 &(bits)))
490
fbf59bc9 491/**
90459ce0 492 * pcpu_mem_zalloc - allocate memory
1880d93b 493 * @size: bytes to allocate
47504ee0 494 * @gfp: allocation flags
fbf59bc9 495 *
1880d93b 496 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
47504ee0
DZ
497 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
498 * This is to facilitate passing through whitelisted flags. The
499 * returned memory is always zeroed.
fbf59bc9
TH
500 *
501 * RETURNS:
1880d93b 502 * Pointer to the allocated area on success, NULL on failure.
fbf59bc9 503 */
47504ee0 504static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
fbf59bc9 505{
099a19d9
TH
506 if (WARN_ON_ONCE(!slab_is_available()))
507 return NULL;
508
1880d93b 509 if (size <= PAGE_SIZE)
554fef1c 510 return kzalloc(size, gfp);
7af4c093 511 else
88dca4ca 512 return __vmalloc(size, gfp | __GFP_ZERO);
1880d93b 513}
fbf59bc9 514
1880d93b
TH
515/**
516 * pcpu_mem_free - free memory
517 * @ptr: memory to free
1880d93b 518 *
90459ce0 519 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
1880d93b 520 */
1d5cfdb0 521static void pcpu_mem_free(void *ptr)
1880d93b 522{
1d5cfdb0 523 kvfree(ptr);
fbf59bc9
TH
524}
525
8744d859
DZ
526static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
527 bool move_front)
528{
529 if (chunk != pcpu_reserved_chunk) {
530 if (move_front)
faf65dde 531 list_move(&chunk->list, &pcpu_chunk_lists[slot]);
8744d859 532 else
faf65dde 533 list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]);
8744d859
DZ
534 }
535}
536
537static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
538{
539 __pcpu_chunk_move(chunk, slot, true);
540}
541
fbf59bc9
TH
542/**
543 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
544 * @chunk: chunk of interest
545 * @oslot: the previous slot it was on
546 *
547 * This function is called after an allocation or free changed @chunk.
548 * New slot according to the changed state is determined and @chunk is
edcb4639
TH
549 * moved to the slot. Note that the reserved chunk is never put on
550 * chunk slots.
ccea34b5
TH
551 *
552 * CONTEXT:
553 * pcpu_lock.
fbf59bc9
TH
554 */
555static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
556{
557 int nslot = pcpu_chunk_slot(chunk);
558
f1833241
RG
559 /* leave isolated chunks in-place */
560 if (chunk->isolated)
561 return;
562
8744d859
DZ
563 if (oslot != nslot)
564 __pcpu_chunk_move(chunk, nslot, oslot < nslot);
833af842
TH
565}
566
f1833241
RG
567static void pcpu_isolate_chunk(struct pcpu_chunk *chunk)
568{
f1833241
RG
569 lockdep_assert_held(&pcpu_lock);
570
571 if (!chunk->isolated) {
572 chunk->isolated = true;
faf65dde 573 pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages;
f1833241 574 }
faf65dde 575 list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]);
f1833241
RG
576}
577
578static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk)
579{
f1833241
RG
580 lockdep_assert_held(&pcpu_lock);
581
582 if (chunk->isolated) {
583 chunk->isolated = false;
faf65dde 584 pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages;
f1833241
RG
585 pcpu_chunk_relocate(chunk, -1);
586 }
587}
588
b239f7da
DZ
589/*
590 * pcpu_update_empty_pages - update empty page counters
833af842 591 * @chunk: chunk of interest
b239f7da 592 * @nr: nr of empty pages
833af842 593 *
b239f7da
DZ
594 * This is used to keep track of the empty pages now based on the premise
595 * a md_block covers a page. The hint update functions recognize if a block
596 * is made full or broken to calculate deltas for keeping track of free pages.
40064aec 597 */
b239f7da 598static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
40064aec 599{
b239f7da 600 chunk->nr_empty_pop_pages += nr;
f1833241 601 if (chunk != pcpu_reserved_chunk && !chunk->isolated)
faf65dde 602 pcpu_nr_empty_pop_pages += nr;
40064aec
DZF
603}
604
d9f3a01e
DZ
605/*
606 * pcpu_region_overlap - determines if two regions overlap
607 * @a: start of first region, inclusive
608 * @b: end of first region, exclusive
609 * @x: start of second region, inclusive
610 * @y: end of second region, exclusive
833af842 611 *
d9f3a01e
DZ
612 * This is used to determine if the hint region [a, b) overlaps with the
613 * allocated region [x, y).
833af842 614 */
d9f3a01e 615static inline bool pcpu_region_overlap(int a, int b, int x, int y)
833af842 616{
d9f3a01e 617 return (a < y) && (x < b);
40064aec 618}
9f7dcf22 619
ca460b3c
DZF
620/**
621 * pcpu_block_update - updates a block given a free area
622 * @block: block of interest
623 * @start: start offset in block
624 * @end: end offset in block
625 *
626 * Updates a block given a known free area. The region [start, end) is
268625a6
DZF
627 * expected to be the entirety of the free area within a block. Chooses
628 * the best starting offset if the contig hints are equal.
ca460b3c
DZF
629 */
630static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
631{
632 int contig = end - start;
633
634 block->first_free = min(block->first_free, start);
635 if (start == 0)
636 block->left_free = contig;
637
047924c9 638 if (end == block->nr_bits)
ca460b3c
DZF
639 block->right_free = contig;
640
641 if (contig > block->contig_hint) {
382b88e9
DZ
642 /* promote the old contig_hint to be the new scan_hint */
643 if (start > block->contig_hint_start) {
644 if (block->contig_hint > block->scan_hint) {
645 block->scan_hint_start =
646 block->contig_hint_start;
647 block->scan_hint = block->contig_hint;
648 } else if (start < block->scan_hint_start) {
649 /*
650 * The old contig_hint == scan_hint. But, the
651 * new contig is larger so hold the invariant
652 * scan_hint_start < contig_hint_start.
653 */
654 block->scan_hint = 0;
655 }
656 } else {
657 block->scan_hint = 0;
658 }
ca460b3c
DZF
659 block->contig_hint_start = start;
660 block->contig_hint = contig;
382b88e9
DZ
661 } else if (contig == block->contig_hint) {
662 if (block->contig_hint_start &&
663 (!start ||
664 __ffs(start) > __ffs(block->contig_hint_start))) {
665 /* start has a better alignment so use it */
666 block->contig_hint_start = start;
667 if (start < block->scan_hint_start &&
668 block->contig_hint > block->scan_hint)
669 block->scan_hint = 0;
670 } else if (start > block->scan_hint_start ||
671 block->contig_hint > block->scan_hint) {
672 /*
673 * Knowing contig == contig_hint, update the scan_hint
674 * if it is farther than or larger than the current
675 * scan_hint.
676 */
677 block->scan_hint_start = start;
678 block->scan_hint = contig;
679 }
680 } else {
681 /*
682 * The region is smaller than the contig_hint. So only update
683 * the scan_hint if it is larger than or equal and farther than
684 * the current scan_hint.
685 */
686 if ((start < block->contig_hint_start &&
687 (contig > block->scan_hint ||
688 (contig == block->scan_hint &&
689 start > block->scan_hint_start)))) {
690 block->scan_hint_start = start;
691 block->scan_hint = contig;
692 }
ca460b3c
DZF
693 }
694}
695
b89462a9
DZ
696/*
697 * pcpu_block_update_scan - update a block given a free area from a scan
698 * @chunk: chunk of interest
699 * @bit_off: chunk offset
700 * @bits: size of free area
701 *
702 * Finding the final allocation spot first goes through pcpu_find_block_fit()
703 * to find a block that can hold the allocation and then pcpu_alloc_area()
704 * where a scan is used. When allocations require specific alignments,
705 * we can inadvertently create holes which will not be seen in the alloc
706 * or free paths.
707 *
708 * This takes a given free area hole and updates a block as it may change the
709 * scan_hint. We need to scan backwards to ensure we don't miss free bits
710 * from alignment.
711 */
712static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
713 int bits)
714{
715 int s_off = pcpu_off_to_block_off(bit_off);
716 int e_off = s_off + bits;
717 int s_index, l_bit;
718 struct pcpu_block_md *block;
719
720 if (e_off > PCPU_BITMAP_BLOCK_BITS)
721 return;
722
723 s_index = pcpu_off_to_block_index(bit_off);
724 block = chunk->md_blocks + s_index;
725
726 /* scan backwards in case of alignment skipping free bits */
727 l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
728 s_off = (s_off == l_bit) ? 0 : l_bit + 1;
729
730 pcpu_block_update(block, s_off, e_off);
731}
732
92c14cab
DZ
733/**
734 * pcpu_chunk_refresh_hint - updates metadata about a chunk
735 * @chunk: chunk of interest
d33d9f3d 736 * @full_scan: if we should scan from the beginning
92c14cab
DZ
737 *
738 * Iterates over the metadata blocks to find the largest contig area.
d33d9f3d
DZ
739 * A full scan can be avoided on the allocation path as this is triggered
740 * if we broke the contig_hint. In doing so, the scan_hint will be before
741 * the contig_hint or after if the scan_hint == contig_hint. This cannot
742 * be prevented on freeing as we want to find the largest area possibly
743 * spanning blocks.
92c14cab 744 */
d33d9f3d 745static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
92c14cab
DZ
746{
747 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
748 int bit_off, bits;
749
d33d9f3d
DZ
750 /* promote scan_hint to contig_hint */
751 if (!full_scan && chunk_md->scan_hint) {
752 bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
753 chunk_md->contig_hint_start = chunk_md->scan_hint_start;
754 chunk_md->contig_hint = chunk_md->scan_hint;
755 chunk_md->scan_hint = 0;
756 } else {
757 bit_off = chunk_md->first_free;
758 chunk_md->contig_hint = 0;
759 }
92c14cab 760
92c14cab 761 bits = 0;
e837dfde 762 pcpu_for_each_md_free_region(chunk, bit_off, bits)
92c14cab 763 pcpu_block_update(chunk_md, bit_off, bit_off + bits);
ca460b3c
DZF
764}
765
766/**
767 * pcpu_block_refresh_hint
768 * @chunk: chunk of interest
769 * @index: index of the metadata block
770 *
771 * Scans over the block beginning at first_free and updates the block
772 * metadata accordingly.
773 */
774static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
775{
776 struct pcpu_block_md *block = chunk->md_blocks + index;
777 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
ec288a2c 778 unsigned int start, end; /* region start, region end */
da3afdd5
DZ
779
780 /* promote scan_hint to contig_hint */
781 if (block->scan_hint) {
782 start = block->scan_hint_start + block->scan_hint;
783 block->contig_hint_start = block->scan_hint_start;
784 block->contig_hint = block->scan_hint;
785 block->scan_hint = 0;
786 } else {
787 start = block->first_free;
788 block->contig_hint = 0;
789 }
ca460b3c 790
da3afdd5 791 block->right_free = 0;
ca460b3c
DZF
792
793 /* iterate over free areas and update the contig hints */
ec288a2c
YN
794 for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS)
795 pcpu_block_update(block, start, end);
ca460b3c
DZF
796}
797
798/**
799 * pcpu_block_update_hint_alloc - update hint on allocation path
800 * @chunk: chunk of interest
801 * @bit_off: chunk offset
802 * @bits: size of request
fc304334
DZF
803 *
804 * Updates metadata for the allocation path. The metadata only has to be
805 * refreshed by a full scan iff the chunk's contig hint is broken. Block level
806 * scans are required if the block's contig hint is broken.
ca460b3c
DZF
807 */
808static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
809 int bits)
810{
92c14cab 811 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
b239f7da 812 int nr_empty_pages = 0;
ca460b3c
DZF
813 struct pcpu_block_md *s_block, *e_block, *block;
814 int s_index, e_index; /* block indexes of the freed allocation */
815 int s_off, e_off; /* block offsets of the freed allocation */
816
817 /*
818 * Calculate per block offsets.
819 * The calculation uses an inclusive range, but the resulting offsets
820 * are [start, end). e_index always points to the last block in the
821 * range.
822 */
823 s_index = pcpu_off_to_block_index(bit_off);
824 e_index = pcpu_off_to_block_index(bit_off + bits - 1);
825 s_off = pcpu_off_to_block_off(bit_off);
826 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
827
828 s_block = chunk->md_blocks + s_index;
829 e_block = chunk->md_blocks + e_index;
830
831 /*
832 * Update s_block.
833 */
b239f7da
DZ
834 if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
835 nr_empty_pages++;
836
73046f8d
BH
837 /*
838 * block->first_free must be updated if the allocation takes its place.
839 * If the allocation breaks the contig_hint, a scan is required to
840 * restore this hint.
841 */
fc304334
DZF
842 if (s_off == s_block->first_free)
843 s_block->first_free = find_next_zero_bit(
844 pcpu_index_alloc_map(chunk, s_index),
845 PCPU_BITMAP_BLOCK_BITS,
846 s_off + bits);
847
382b88e9
DZ
848 if (pcpu_region_overlap(s_block->scan_hint_start,
849 s_block->scan_hint_start + s_block->scan_hint,
850 s_off,
851 s_off + bits))
852 s_block->scan_hint = 0;
853
d9f3a01e
DZ
854 if (pcpu_region_overlap(s_block->contig_hint_start,
855 s_block->contig_hint_start +
856 s_block->contig_hint,
857 s_off,
858 s_off + bits)) {
fc304334 859 /* block contig hint is broken - scan to fix it */
da3afdd5
DZ
860 if (!s_off)
861 s_block->left_free = 0;
fc304334
DZF
862 pcpu_block_refresh_hint(chunk, s_index);
863 } else {
864 /* update left and right contig manually */
865 s_block->left_free = min(s_block->left_free, s_off);
866 if (s_index == e_index)
867 s_block->right_free = min_t(int, s_block->right_free,
868 PCPU_BITMAP_BLOCK_BITS - e_off);
869 else
870 s_block->right_free = 0;
871 }
ca460b3c
DZF
872
873 /*
874 * Update e_block.
875 */
876 if (s_index != e_index) {
b239f7da
DZ
877 if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
878 nr_empty_pages++;
879
fc304334
DZF
880 /*
881 * When the allocation is across blocks, the end is along
882 * the left part of the e_block.
883 */
884 e_block->first_free = find_next_zero_bit(
885 pcpu_index_alloc_map(chunk, e_index),
886 PCPU_BITMAP_BLOCK_BITS, e_off);
887
888 if (e_off == PCPU_BITMAP_BLOCK_BITS) {
889 /* reset the block */
890 e_block++;
891 } else {
382b88e9
DZ
892 if (e_off > e_block->scan_hint_start)
893 e_block->scan_hint = 0;
894
da3afdd5 895 e_block->left_free = 0;
fc304334
DZF
896 if (e_off > e_block->contig_hint_start) {
897 /* contig hint is broken - scan to fix it */
898 pcpu_block_refresh_hint(chunk, e_index);
899 } else {
fc304334
DZF
900 e_block->right_free =
901 min_t(int, e_block->right_free,
902 PCPU_BITMAP_BLOCK_BITS - e_off);
903 }
904 }
ca460b3c
DZF
905
906 /* update in-between md_blocks */
b239f7da 907 nr_empty_pages += (e_index - s_index - 1);
ca460b3c 908 for (block = s_block + 1; block < e_block; block++) {
382b88e9 909 block->scan_hint = 0;
ca460b3c
DZF
910 block->contig_hint = 0;
911 block->left_free = 0;
912 block->right_free = 0;
913 }
914 }
915
73046f8d
BH
916 /*
917 * If the allocation is not atomic, some blocks may not be
918 * populated with pages, while we account it here. The number
919 * of pages will be added back with pcpu_chunk_populated()
920 * when populating pages.
921 */
b239f7da
DZ
922 if (nr_empty_pages)
923 pcpu_update_empty_pages(chunk, -nr_empty_pages);
924
d33d9f3d
DZ
925 if (pcpu_region_overlap(chunk_md->scan_hint_start,
926 chunk_md->scan_hint_start +
927 chunk_md->scan_hint,
928 bit_off,
929 bit_off + bits))
930 chunk_md->scan_hint = 0;
931
fc304334
DZF
932 /*
933 * The only time a full chunk scan is required is if the chunk
934 * contig hint is broken. Otherwise, it means a smaller space
935 * was used and therefore the chunk contig hint is still correct.
936 */
92c14cab
DZ
937 if (pcpu_region_overlap(chunk_md->contig_hint_start,
938 chunk_md->contig_hint_start +
939 chunk_md->contig_hint,
d9f3a01e
DZ
940 bit_off,
941 bit_off + bits))
d33d9f3d 942 pcpu_chunk_refresh_hint(chunk, false);
ca460b3c
DZF
943}
944
945/**
946 * pcpu_block_update_hint_free - updates the block hints on the free path
947 * @chunk: chunk of interest
948 * @bit_off: chunk offset
949 * @bits: size of request
b185cd0d
DZF
950 *
951 * Updates metadata for the allocation path. This avoids a blind block
952 * refresh by making use of the block contig hints. If this fails, it scans
953 * forward and backward to determine the extent of the free area. This is
954 * capped at the boundary of blocks.
955 *
956 * A chunk update is triggered if a page becomes free, a block becomes free,
957 * or the free spans across blocks. This tradeoff is to minimize iterating
92c14cab
DZ
958 * over the block metadata to update chunk_md->contig_hint.
959 * chunk_md->contig_hint may be off by up to a page, but it will never be more
960 * than the available space. If the contig hint is contained in one block, it
961 * will be accurate.
ca460b3c
DZF
962 */
963static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
964 int bits)
965{
b239f7da 966 int nr_empty_pages = 0;
ca460b3c
DZF
967 struct pcpu_block_md *s_block, *e_block, *block;
968 int s_index, e_index; /* block indexes of the freed allocation */
969 int s_off, e_off; /* block offsets of the freed allocation */
b185cd0d 970 int start, end; /* start and end of the whole free area */
ca460b3c
DZF
971
972 /*
973 * Calculate per block offsets.
974 * The calculation uses an inclusive range, but the resulting offsets
975 * are [start, end). e_index always points to the last block in the
976 * range.
977 */
978 s_index = pcpu_off_to_block_index(bit_off);
979 e_index = pcpu_off_to_block_index(bit_off + bits - 1);
980 s_off = pcpu_off_to_block_off(bit_off);
981 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
982
983 s_block = chunk->md_blocks + s_index;
984 e_block = chunk->md_blocks + e_index;
985
b185cd0d
DZF
986 /*
987 * Check if the freed area aligns with the block->contig_hint.
988 * If it does, then the scan to find the beginning/end of the
989 * larger free area can be avoided.
990 *
991 * start and end refer to beginning and end of the free area
992 * within each their respective blocks. This is not necessarily
993 * the entire free area as it may span blocks past the beginning
994 * or end of the block.
995 */
996 start = s_off;
997 if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
998 start = s_block->contig_hint_start;
999 } else {
1000 /*
1001 * Scan backwards to find the extent of the free area.
1002 * find_last_bit returns the starting bit, so if the start bit
1003 * is returned, that means there was no last bit and the
1004 * remainder of the chunk is free.
1005 */
1006 int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
1007 start);
1008 start = (start == l_bit) ? 0 : l_bit + 1;
1009 }
1010
1011 end = e_off;
1012 if (e_off == e_block->contig_hint_start)
1013 end = e_block->contig_hint_start + e_block->contig_hint;
1014 else
1015 end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
1016 PCPU_BITMAP_BLOCK_BITS, end);
1017
ca460b3c 1018 /* update s_block */
b185cd0d 1019 e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
b239f7da
DZ
1020 if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
1021 nr_empty_pages++;
b185cd0d 1022 pcpu_block_update(s_block, start, e_off);
ca460b3c
DZF
1023
1024 /* freeing in the same block */
1025 if (s_index != e_index) {
1026 /* update e_block */
b239f7da
DZ
1027 if (end == PCPU_BITMAP_BLOCK_BITS)
1028 nr_empty_pages++;
b185cd0d 1029 pcpu_block_update(e_block, 0, end);
ca460b3c
DZF
1030
1031 /* reset md_blocks in the middle */
b239f7da 1032 nr_empty_pages += (e_index - s_index - 1);
ca460b3c
DZF
1033 for (block = s_block + 1; block < e_block; block++) {
1034 block->first_free = 0;
382b88e9 1035 block->scan_hint = 0;
ca460b3c
DZF
1036 block->contig_hint_start = 0;
1037 block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1038 block->left_free = PCPU_BITMAP_BLOCK_BITS;
1039 block->right_free = PCPU_BITMAP_BLOCK_BITS;
1040 }
1041 }
1042
b239f7da
DZ
1043 if (nr_empty_pages)
1044 pcpu_update_empty_pages(chunk, nr_empty_pages);
1045
b185cd0d 1046 /*
b239f7da
DZ
1047 * Refresh chunk metadata when the free makes a block free or spans
1048 * across blocks. The contig_hint may be off by up to a page, but if
1049 * the contig_hint is contained in a block, it will be accurate with
1050 * the else condition below.
b185cd0d 1051 */
b239f7da 1052 if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
d33d9f3d 1053 pcpu_chunk_refresh_hint(chunk, true);
b185cd0d 1054 else
92c14cab
DZ
1055 pcpu_block_update(&chunk->chunk_md,
1056 pcpu_block_off_to_off(s_index, start),
1057 end);
ca460b3c
DZF
1058}
1059
40064aec
DZF
1060/**
1061 * pcpu_is_populated - determines if the region is populated
1062 * @chunk: chunk of interest
1063 * @bit_off: chunk offset
1064 * @bits: size of area
1065 * @next_off: return value for the next offset to start searching
1066 *
1067 * For atomic allocations, check if the backing pages are populated.
1068 *
1069 * RETURNS:
1070 * Bool if the backing pages are populated.
1071 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1072 */
1073static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1074 int *next_off)
1075{
801a5736 1076 unsigned int start, end;
833af842 1077
801a5736
YN
1078 start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1079 end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
833af842 1080
801a5736
YN
1081 start = find_next_zero_bit(chunk->populated, end, start);
1082 if (start >= end)
40064aec 1083 return true;
833af842 1084
801a5736
YN
1085 end = find_next_bit(chunk->populated, end, start + 1);
1086
1087 *next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
40064aec 1088 return false;
9f7dcf22
TH
1089}
1090
a16037c8 1091/**
40064aec
DZF
1092 * pcpu_find_block_fit - finds the block index to start searching
1093 * @chunk: chunk of interest
1094 * @alloc_bits: size of request in allocation units
1095 * @align: alignment of area (max PAGE_SIZE bytes)
1096 * @pop_only: use populated regions only
1097 *
b4c2116c
DZF
1098 * Given a chunk and an allocation spec, find the offset to begin searching
1099 * for a free region. This iterates over the bitmap metadata blocks to
1100 * find an offset that will be guaranteed to fit the requirements. It is
1101 * not quite first fit as if the allocation does not fit in the contig hint
1102 * of a block or chunk, it is skipped. This errs on the side of caution
1103 * to prevent excess iteration. Poor alignment can cause the allocator to
1104 * skip over blocks and chunks that have valid free areas.
1105 *
40064aec
DZF
1106 * RETURNS:
1107 * The offset in the bitmap to begin searching.
1108 * -1 if no offset is found.
a16037c8 1109 */
40064aec
DZF
1110static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1111 size_t align, bool pop_only)
a16037c8 1112{
92c14cab 1113 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
b4c2116c 1114 int bit_off, bits, next_off;
a16037c8 1115
13f96637 1116 /*
8ea2e1e3
RG
1117 * This is an optimization to prevent scanning by assuming if the
1118 * allocation cannot fit in the global hint, there is memory pressure
1119 * and creating a new chunk would happen soon.
13f96637 1120 */
8ea2e1e3 1121 if (!pcpu_check_block_hint(chunk_md, alloc_bits, align))
13f96637
DZF
1122 return -1;
1123
d33d9f3d 1124 bit_off = pcpu_next_hint(chunk_md, alloc_bits);
b4c2116c
DZF
1125 bits = 0;
1126 pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
40064aec 1127 if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
b4c2116c 1128 &next_off))
40064aec 1129 break;
a16037c8 1130
b4c2116c 1131 bit_off = next_off;
40064aec 1132 bits = 0;
a16037c8 1133 }
40064aec
DZF
1134
1135 if (bit_off == pcpu_chunk_map_bits(chunk))
1136 return -1;
1137
1138 return bit_off;
a16037c8
TH
1139}
1140
b89462a9
DZ
1141/*
1142 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1143 * @map: the address to base the search on
1144 * @size: the bitmap size in bits
1145 * @start: the bitnumber to start searching at
1146 * @nr: the number of zeroed bits we're looking for
1147 * @align_mask: alignment mask for zero area
1148 * @largest_off: offset of the largest area skipped
1149 * @largest_bits: size of the largest area skipped
1150 *
1151 * The @align_mask should be one less than a power of 2.
1152 *
1153 * This is a modified version of bitmap_find_next_zero_area_off() to remember
1154 * the largest area that was skipped. This is imperfect, but in general is
1155 * good enough. The largest remembered region is the largest failed region
1156 * seen. This does not include anything we possibly skipped due to alignment.
1157 * pcpu_block_update_scan() does scan backwards to try and recover what was
1158 * lost to alignment. While this can cause scanning to miss earlier possible
1159 * free areas, smaller allocations will eventually fill those holes.
1160 */
1161static unsigned long pcpu_find_zero_area(unsigned long *map,
1162 unsigned long size,
1163 unsigned long start,
1164 unsigned long nr,
1165 unsigned long align_mask,
1166 unsigned long *largest_off,
1167 unsigned long *largest_bits)
1168{
1169 unsigned long index, end, i, area_off, area_bits;
1170again:
1171 index = find_next_zero_bit(map, size, start);
1172
1173 /* Align allocation */
1174 index = __ALIGN_MASK(index, align_mask);
1175 area_off = index;
1176
1177 end = index + nr;
1178 if (end > size)
1179 return end;
1180 i = find_next_bit(map, end, index);
1181 if (i < end) {
1182 area_bits = i - area_off;
1183 /* remember largest unused area with best alignment */
1184 if (area_bits > *largest_bits ||
1185 (area_bits == *largest_bits && *largest_off &&
1186 (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1187 *largest_off = area_off;
1188 *largest_bits = area_bits;
1189 }
1190
1191 start = i + 1;
1192 goto again;
1193 }
1194 return index;
1195}
1196
fbf59bc9 1197/**
40064aec 1198 * pcpu_alloc_area - allocates an area from a pcpu_chunk
fbf59bc9 1199 * @chunk: chunk of interest
40064aec
DZF
1200 * @alloc_bits: size of request in allocation units
1201 * @align: alignment of area (max PAGE_SIZE)
1202 * @start: bit_off to start searching
9f7dcf22 1203 *
40064aec 1204 * This function takes in a @start offset to begin searching to fit an
b4c2116c
DZF
1205 * allocation of @alloc_bits with alignment @align. It needs to scan
1206 * the allocation map because if it fits within the block's contig hint,
1207 * @start will be block->first_free. This is an attempt to fill the
1208 * allocation prior to breaking the contig hint. The allocation and
1209 * boundary maps are updated accordingly if it confirms a valid
1210 * free area.
ccea34b5 1211 *
fbf59bc9 1212 * RETURNS:
40064aec
DZF
1213 * Allocated addr offset in @chunk on success.
1214 * -1 if no matching area is found.
fbf59bc9 1215 */
40064aec
DZF
1216static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1217 size_t align, int start)
fbf59bc9 1218{
92c14cab 1219 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
40064aec 1220 size_t align_mask = (align) ? (align - 1) : 0;
b89462a9 1221 unsigned long area_off = 0, area_bits = 0;
40064aec 1222 int bit_off, end, oslot;
a16037c8 1223
40064aec 1224 lockdep_assert_held(&pcpu_lock);
fbf59bc9 1225
40064aec 1226 oslot = pcpu_chunk_slot(chunk);
fbf59bc9 1227
40064aec
DZF
1228 /*
1229 * Search to find a fit.
1230 */
8c43004a
DZ
1231 end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
1232 pcpu_chunk_map_bits(chunk));
b89462a9
DZ
1233 bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1234 align_mask, &area_off, &area_bits);
40064aec
DZF
1235 if (bit_off >= end)
1236 return -1;
fbf59bc9 1237
b89462a9
DZ
1238 if (area_bits)
1239 pcpu_block_update_scan(chunk, area_off, area_bits);
1240
40064aec
DZF
1241 /* update alloc map */
1242 bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
3d331ad7 1243
40064aec
DZF
1244 /* update boundary map */
1245 set_bit(bit_off, chunk->bound_map);
1246 bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1247 set_bit(bit_off + alloc_bits, chunk->bound_map);
fbf59bc9 1248
40064aec 1249 chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
fbf59bc9 1250
86b442fb 1251 /* update first free bit */
92c14cab
DZ
1252 if (bit_off == chunk_md->first_free)
1253 chunk_md->first_free = find_next_zero_bit(
86b442fb
DZF
1254 chunk->alloc_map,
1255 pcpu_chunk_map_bits(chunk),
1256 bit_off + alloc_bits);
1257
ca460b3c 1258 pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
fbf59bc9 1259
fbf59bc9
TH
1260 pcpu_chunk_relocate(chunk, oslot);
1261
40064aec 1262 return bit_off * PCPU_MIN_ALLOC_SIZE;
fbf59bc9
TH
1263}
1264
1265/**
40064aec 1266 * pcpu_free_area - frees the corresponding offset
fbf59bc9 1267 * @chunk: chunk of interest
40064aec 1268 * @off: addr offset into chunk
ccea34b5 1269 *
40064aec
DZF
1270 * This function determines the size of an allocation to free using
1271 * the boundary bitmap and clears the allocation map.
5b32af91
RG
1272 *
1273 * RETURNS:
1274 * Number of freed bytes.
fbf59bc9 1275 */
5b32af91 1276static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
fbf59bc9 1277{
92c14cab 1278 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
5b32af91 1279 int bit_off, bits, end, oslot, freed;
723ad1d9 1280
5ccd30e4 1281 lockdep_assert_held(&pcpu_lock);
30a5b536 1282 pcpu_stats_area_dealloc(chunk);
5ccd30e4 1283
40064aec 1284 oslot = pcpu_chunk_slot(chunk);
fbf59bc9 1285
40064aec 1286 bit_off = off / PCPU_MIN_ALLOC_SIZE;
3d331ad7 1287
40064aec
DZF
1288 /* find end index */
1289 end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1290 bit_off + 1);
1291 bits = end - bit_off;
1292 bitmap_clear(chunk->alloc_map, bit_off, bits);
fbf59bc9 1293
5b32af91
RG
1294 freed = bits * PCPU_MIN_ALLOC_SIZE;
1295
40064aec 1296 /* update metadata */
5b32af91 1297 chunk->free_bytes += freed;
b539b87f 1298
86b442fb 1299 /* update first free bit */
92c14cab 1300 chunk_md->first_free = min(chunk_md->first_free, bit_off);
86b442fb 1301
ca460b3c 1302 pcpu_block_update_hint_free(chunk, bit_off, bits);
fbf59bc9 1303
fbf59bc9 1304 pcpu_chunk_relocate(chunk, oslot);
5b32af91
RG
1305
1306 return freed;
fbf59bc9
TH
1307}
1308
047924c9
DZ
1309static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1310{
1311 block->scan_hint = 0;
1312 block->contig_hint = nr_bits;
1313 block->left_free = nr_bits;
1314 block->right_free = nr_bits;
1315 block->first_free = 0;
1316 block->nr_bits = nr_bits;
1317}
1318
ca460b3c
DZF
1319static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1320{
1321 struct pcpu_block_md *md_block;
1322
92c14cab
DZ
1323 /* init the chunk's block */
1324 pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1325
ca460b3c
DZF
1326 for (md_block = chunk->md_blocks;
1327 md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
047924c9
DZ
1328 md_block++)
1329 pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
ca460b3c
DZF
1330}
1331
40064aec
DZF
1332/**
1333 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1334 * @tmp_addr: the start of the region served
1335 * @map_size: size of the region served
1336 *
1337 * This is responsible for creating the chunks that serve the first chunk. The
1338 * base_addr is page aligned down of @tmp_addr while the region end is page
1339 * aligned up. Offsets are kept track of to determine the region served. All
1340 * this is done to appease the bitmap allocator in avoiding partial blocks.
1341 *
1342 * RETURNS:
1343 * Chunk serving the region at @tmp_addr of @map_size.
1344 */
c0ebfdc3 1345static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
40064aec 1346 int map_size)
10edf5b0
DZF
1347{
1348 struct pcpu_chunk *chunk;
3289e053 1349 unsigned long aligned_addr;
40064aec 1350 int start_offset, offset_bits, region_size, region_bits;
f655f405 1351 size_t alloc_size;
c0ebfdc3
DZF
1352
1353 /* region calculations */
1354 aligned_addr = tmp_addr & PAGE_MASK;
1355
1356 start_offset = tmp_addr - aligned_addr;
3289e053 1357 region_size = ALIGN(start_offset + map_size, PAGE_SIZE);
10edf5b0 1358
c0ebfdc3 1359 /* allocate chunk */
61cf93d3
DZ
1360 alloc_size = struct_size(chunk, populated,
1361 BITS_TO_LONGS(region_size >> PAGE_SHIFT));
f655f405
MR
1362 chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1363 if (!chunk)
1364 panic("%s: Failed to allocate %zu bytes\n", __func__,
1365 alloc_size);
c0ebfdc3 1366
10edf5b0 1367 INIT_LIST_HEAD(&chunk->list);
c0ebfdc3
DZF
1368
1369 chunk->base_addr = (void *)aligned_addr;
10edf5b0 1370 chunk->start_offset = start_offset;
6b9d7c8e 1371 chunk->end_offset = region_size - chunk->start_offset - map_size;
c0ebfdc3 1372
8ab16c43 1373 chunk->nr_pages = region_size >> PAGE_SHIFT;
40064aec 1374 region_bits = pcpu_chunk_map_bits(chunk);
c0ebfdc3 1375
f655f405
MR
1376 alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1377 chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1378 if (!chunk->alloc_map)
1379 panic("%s: Failed to allocate %zu bytes\n", __func__,
1380 alloc_size);
1381
1382 alloc_size =
1383 BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1384 chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1385 if (!chunk->bound_map)
1386 panic("%s: Failed to allocate %zu bytes\n", __func__,
1387 alloc_size);
1388
1389 alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1390 chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1391 if (!chunk->md_blocks)
1392 panic("%s: Failed to allocate %zu bytes\n", __func__,
1393 alloc_size);
1394
8f30d266 1395#ifdef NEED_PCPUOBJ_EXT
faf65dde 1396 /* first chunk is free to use */
8f30d266 1397 chunk->obj_exts = NULL;
3c7be18a 1398#endif
ca460b3c 1399 pcpu_init_md_blocks(chunk);
10edf5b0
DZF
1400
1401 /* manage populated page bitmap */
1402 chunk->immutable = true;
8ab16c43
DZF
1403 bitmap_fill(chunk->populated, chunk->nr_pages);
1404 chunk->nr_populated = chunk->nr_pages;
b239f7da 1405 chunk->nr_empty_pop_pages = chunk->nr_pages;
10edf5b0 1406
40064aec 1407 chunk->free_bytes = map_size;
c0ebfdc3
DZF
1408
1409 if (chunk->start_offset) {
1410 /* hide the beginning of the bitmap */
40064aec
DZF
1411 offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1412 bitmap_set(chunk->alloc_map, 0, offset_bits);
1413 set_bit(0, chunk->bound_map);
1414 set_bit(offset_bits, chunk->bound_map);
ca460b3c 1415
92c14cab 1416 chunk->chunk_md.first_free = offset_bits;
86b442fb 1417
ca460b3c 1418 pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
c0ebfdc3
DZF
1419 }
1420
6b9d7c8e
DZF
1421 if (chunk->end_offset) {
1422 /* hide the end of the bitmap */
40064aec
DZF
1423 offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1424 bitmap_set(chunk->alloc_map,
1425 pcpu_chunk_map_bits(chunk) - offset_bits,
1426 offset_bits);
1427 set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1428 chunk->bound_map);
1429 set_bit(region_bits, chunk->bound_map);
6b9d7c8e 1430
ca460b3c
DZF
1431 pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1432 - offset_bits, offset_bits);
1433 }
40064aec 1434
10edf5b0
DZF
1435 return chunk;
1436}
1437
faf65dde 1438static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
6081089f
TH
1439{
1440 struct pcpu_chunk *chunk;
40064aec 1441 int region_bits;
6081089f 1442
47504ee0 1443 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
6081089f
TH
1444 if (!chunk)
1445 return NULL;
1446
40064aec
DZF
1447 INIT_LIST_HEAD(&chunk->list);
1448 chunk->nr_pages = pcpu_unit_pages;
1449 region_bits = pcpu_chunk_map_bits(chunk);
6081089f 1450
40064aec 1451 chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
47504ee0 1452 sizeof(chunk->alloc_map[0]), gfp);
40064aec
DZF
1453 if (!chunk->alloc_map)
1454 goto alloc_map_fail;
6081089f 1455
40064aec 1456 chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
47504ee0 1457 sizeof(chunk->bound_map[0]), gfp);
40064aec
DZF
1458 if (!chunk->bound_map)
1459 goto bound_map_fail;
6081089f 1460
ca460b3c 1461 chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
47504ee0 1462 sizeof(chunk->md_blocks[0]), gfp);
ca460b3c
DZF
1463 if (!chunk->md_blocks)
1464 goto md_blocks_fail;
1465
8f30d266
KO
1466#ifdef NEED_PCPUOBJ_EXT
1467 if (need_pcpuobj_ext()) {
1468 chunk->obj_exts =
3c7be18a 1469 pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
8f30d266
KO
1470 sizeof(struct pcpuobj_ext), gfp);
1471 if (!chunk->obj_exts)
3c7be18a
RG
1472 goto objcg_fail;
1473 }
1474#endif
1475
ca460b3c
DZF
1476 pcpu_init_md_blocks(chunk);
1477
40064aec 1478 /* init metadata */
40064aec 1479 chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
c0ebfdc3 1480
6081089f 1481 return chunk;
40064aec 1482
8f30d266 1483#ifdef NEED_PCPUOBJ_EXT
3c7be18a
RG
1484objcg_fail:
1485 pcpu_mem_free(chunk->md_blocks);
1486#endif
ca460b3c
DZF
1487md_blocks_fail:
1488 pcpu_mem_free(chunk->bound_map);
40064aec
DZF
1489bound_map_fail:
1490 pcpu_mem_free(chunk->alloc_map);
1491alloc_map_fail:
1492 pcpu_mem_free(chunk);
1493
1494 return NULL;
6081089f
TH
1495}
1496
1497static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1498{
1499 if (!chunk)
1500 return;
8f30d266
KO
1501#ifdef NEED_PCPUOBJ_EXT
1502 pcpu_mem_free(chunk->obj_exts);
3c7be18a 1503#endif
6685b357 1504 pcpu_mem_free(chunk->md_blocks);
40064aec
DZF
1505 pcpu_mem_free(chunk->bound_map);
1506 pcpu_mem_free(chunk->alloc_map);
1d5cfdb0 1507 pcpu_mem_free(chunk);
6081089f
TH
1508}
1509
b539b87f
TH
1510/**
1511 * pcpu_chunk_populated - post-population bookkeeping
1512 * @chunk: pcpu_chunk which got populated
1513 * @page_start: the start page
1514 * @page_end: the end page
1515 *
1516 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1517 * the bookkeeping information accordingly. Must be called after each
1518 * successful population.
1519 */
40064aec 1520static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
b239f7da 1521 int page_end)
b539b87f
TH
1522{
1523 int nr = page_end - page_start;
1524
1525 lockdep_assert_held(&pcpu_lock);
1526
1527 bitmap_set(chunk->populated, page_start, nr);
1528 chunk->nr_populated += nr;
7e8a6304 1529 pcpu_nr_populated += nr;
40064aec 1530
b239f7da 1531 pcpu_update_empty_pages(chunk, nr);
b539b87f
TH
1532}
1533
1534/**
1535 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1536 * @chunk: pcpu_chunk which got depopulated
1537 * @page_start: the start page
1538 * @page_end: the end page
1539 *
1540 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1541 * Update the bookkeeping information accordingly. Must be called after
1542 * each successful depopulation.
1543 */
1544static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1545 int page_start, int page_end)
1546{
1547 int nr = page_end - page_start;
1548
1549 lockdep_assert_held(&pcpu_lock);
1550
1551 bitmap_clear(chunk->populated, page_start, nr);
1552 chunk->nr_populated -= nr;
7e8a6304 1553 pcpu_nr_populated -= nr;
b239f7da
DZ
1554
1555 pcpu_update_empty_pages(chunk, -nr);
b539b87f
TH
1556}
1557
9f645532
TH
1558/*
1559 * Chunk management implementation.
1560 *
1561 * To allow different implementations, chunk alloc/free and
1562 * [de]population are implemented in a separate file which is pulled
1563 * into this file and compiled together. The following functions
1564 * should be implemented.
1565 *
1566 * pcpu_populate_chunk - populate the specified range of a chunk
1567 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
93274f1d 1568 * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk
9f645532
TH
1569 * pcpu_create_chunk - create a new chunk
1570 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1571 * pcpu_addr_to_page - translate address to physical address
1572 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
fbf59bc9 1573 */
15d9f3d1 1574static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
47504ee0 1575 int page_start, int page_end, gfp_t gfp);
15d9f3d1
DZ
1576static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1577 int page_start, int page_end);
93274f1d
DZ
1578static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
1579 int page_start, int page_end);
faf65dde 1580static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
9f645532
TH
1581static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1582static struct page *pcpu_addr_to_page(void *addr);
1583static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
fbf59bc9 1584
b0c9778b
TH
1585#ifdef CONFIG_NEED_PER_CPU_KM
1586#include "percpu-km.c"
1587#else
9f645532 1588#include "percpu-vm.c"
b0c9778b 1589#endif
fbf59bc9 1590
88999a89
TH
1591/**
1592 * pcpu_chunk_addr_search - determine chunk containing specified address
1593 * @addr: address for which the chunk needs to be determined.
1594 *
c0ebfdc3
DZF
1595 * This is an internal function that handles all but static allocations.
1596 * Static percpu address values should never be passed into the allocator.
1597 *
88999a89
TH
1598 * RETURNS:
1599 * The address of the found chunk.
1600 */
1601static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1602{
c0ebfdc3 1603 /* is it in the dynamic region (first chunk)? */
560f2c23 1604 if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
88999a89 1605 return pcpu_first_chunk;
c0ebfdc3
DZF
1606
1607 /* is it in the reserved region? */
560f2c23 1608 if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
c0ebfdc3 1609 return pcpu_reserved_chunk;
88999a89
TH
1610
1611 /*
1612 * The address is relative to unit0 which might be unused and
1613 * thus unmapped. Offset the address to the unit space of the
1614 * current processor before looking it up in the vmalloc
1615 * space. Note that any possible cpu id can be used here, so
1616 * there's no need to worry about preemption or cpu hotplug.
1617 */
1618 addr += pcpu_unit_offsets[raw_smp_processor_id()];
9f645532 1619 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
88999a89
TH
1620}
1621
3c7be18a 1622#ifdef CONFIG_MEMCG_KMEM
faf65dde
RG
1623static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
1624 struct obj_cgroup **objcgp)
3c7be18a
RG
1625{
1626 struct obj_cgroup *objcg;
1627
f7a449f7 1628 if (!memcg_kmem_online() || !(gfp & __GFP_ACCOUNT))
faf65dde 1629 return true;
3c7be18a 1630
c63b835d 1631 objcg = current_obj_cgroup();
3c7be18a 1632 if (!objcg)
faf65dde 1633 return true;
3c7be18a 1634
c63b835d 1635 if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size)))
faf65dde 1636 return false;
3c7be18a
RG
1637
1638 *objcgp = objcg;
faf65dde 1639 return true;
3c7be18a
RG
1640}
1641
1642static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1643 struct pcpu_chunk *chunk, int off,
1644 size_t size)
1645{
1646 if (!objcg)
1647 return;
1648
8f30d266 1649 if (likely(chunk && chunk->obj_exts)) {
c63b835d 1650 obj_cgroup_get(objcg);
8f30d266 1651 chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = objcg;
772616b0
RG
1652
1653 rcu_read_lock();
1654 mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
8c57c077 1655 pcpu_obj_full_size(size));
772616b0 1656 rcu_read_unlock();
3c7be18a 1657 } else {
8c57c077 1658 obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
3c7be18a
RG
1659 }
1660}
1661
1662static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1663{
1664 struct obj_cgroup *objcg;
1665
8f30d266 1666 if (unlikely(!chunk->obj_exts))
3c7be18a
RG
1667 return;
1668
8f30d266 1669 objcg = chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup;
faf65dde
RG
1670 if (!objcg)
1671 return;
8f30d266 1672 chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = NULL;
3c7be18a 1673
8c57c077 1674 obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
3c7be18a 1675
772616b0
RG
1676 rcu_read_lock();
1677 mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
8c57c077 1678 -pcpu_obj_full_size(size));
772616b0
RG
1679 rcu_read_unlock();
1680
3c7be18a
RG
1681 obj_cgroup_put(objcg);
1682}
1683
1684#else /* CONFIG_MEMCG_KMEM */
faf65dde 1685static bool
3c7be18a
RG
1686pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
1687{
faf65dde 1688 return true;
3c7be18a
RG
1689}
1690
1691static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1692 struct pcpu_chunk *chunk, int off,
1693 size_t size)
1694{
1695}
1696
1697static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1698{
1699}
1700#endif /* CONFIG_MEMCG_KMEM */
1701
60fa4a9e
KO
1702#ifdef CONFIG_MEM_ALLOC_PROFILING
1703static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off,
1704 size_t size)
1705{
1706 if (mem_alloc_profiling_enabled() && likely(chunk->obj_exts)) {
1707 alloc_tag_add(&chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].tag,
1708 current->alloc_tag, size);
1709 }
1710}
1711
1712static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1713{
1714 if (mem_alloc_profiling_enabled() && likely(chunk->obj_exts))
1715 alloc_tag_sub(&chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].tag, size);
1716}
1717#else
1718static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off,
1719 size_t size)
1720{
1721}
1722
1723static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1724{
1725}
1726#endif
1727
fbf59bc9 1728/**
edcb4639 1729 * pcpu_alloc - the percpu allocator
cae3aeb8 1730 * @size: size of area to allocate in bytes
fbf59bc9 1731 * @align: alignment of area (max PAGE_SIZE)
edcb4639 1732 * @reserved: allocate from the reserved chunk if available
5835d96e 1733 * @gfp: allocation flags
fbf59bc9 1734 *
5835d96e 1735 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
0ea7eeec
DB
1736 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1737 * then no warning will be triggered on invalid or failed allocation
1738 * requests.
fbf59bc9
TH
1739 *
1740 * RETURNS:
1741 * Percpu pointer to the allocated area on success, NULL on failure.
1742 */
24e44cc2 1743void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
5835d96e 1744 gfp_t gfp)
fbf59bc9 1745{
28307d93
FM
1746 gfp_t pcpu_gfp;
1747 bool is_atomic;
1748 bool do_warn;
3c7be18a 1749 struct obj_cgroup *objcg = NULL;
f2badb0c 1750 static int warn_limit = 10;
8744d859 1751 struct pcpu_chunk *chunk, *next;
f2badb0c 1752 const char *err;
40064aec 1753 int slot, off, cpu, ret;
403a91b1 1754 unsigned long flags;
f528f0b8 1755 void __percpu *ptr;
40064aec 1756 size_t bits, bit_align;
fbf59bc9 1757
28307d93
FM
1758 gfp = current_gfp_context(gfp);
1759 /* whitelisted flags that can be passed to the backing allocators */
1760 pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1761 is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1762 do_warn = !(gfp & __GFP_NOWARN);
1763
723ad1d9 1764 /*
40064aec
DZF
1765 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1766 * therefore alignment must be a minimum of that many bytes.
1767 * An allocation may have internal fragmentation from rounding up
1768 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
723ad1d9 1769 */
d2f3c384
DZF
1770 if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1771 align = PCPU_MIN_ALLOC_SIZE;
723ad1d9 1772
d2f3c384 1773 size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
40064aec
DZF
1774 bits = size >> PCPU_MIN_ALLOC_SHIFT;
1775 bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
2f69fa82 1776
3ca45a46 1777 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1778 !is_power_of_2(align))) {
0ea7eeec 1779 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
756a025f 1780 size, align);
fbf59bc9
TH
1781 return NULL;
1782 }
1783
faf65dde 1784 if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg)))
3c7be18a 1785 return NULL;
3c7be18a 1786
f52ba1fe
KT
1787 if (!is_atomic) {
1788 /*
1789 * pcpu_balance_workfn() allocates memory under this mutex,
1790 * and it may wait for memory reclaim. Allow current task
1791 * to become OOM victim, in case of memory pressure.
1792 */
3c7be18a 1793 if (gfp & __GFP_NOFAIL) {
f52ba1fe 1794 mutex_lock(&pcpu_alloc_mutex);
3c7be18a
RG
1795 } else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
1796 pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
f52ba1fe 1797 return NULL;
3c7be18a 1798 }
f52ba1fe 1799 }
6710e594 1800
403a91b1 1801 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9 1802
edcb4639
TH
1803 /* serve reserved allocations from the reserved chunk if available */
1804 if (reserved && pcpu_reserved_chunk) {
1805 chunk = pcpu_reserved_chunk;
833af842 1806
40064aec
DZF
1807 off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1808 if (off < 0) {
833af842 1809 err = "alloc from reserved chunk failed";
ccea34b5 1810 goto fail_unlock;
f2badb0c 1811 }
833af842 1812
40064aec 1813 off = pcpu_alloc_area(chunk, bits, bit_align, off);
edcb4639
TH
1814 if (off >= 0)
1815 goto area_found;
833af842 1816
f2badb0c 1817 err = "alloc from reserved chunk failed";
ccea34b5 1818 goto fail_unlock;
edcb4639
TH
1819 }
1820
ccea34b5 1821restart:
edcb4639 1822 /* search through normal chunks */
f1833241 1823 for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) {
faf65dde
RG
1824 list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot],
1825 list) {
40064aec
DZF
1826 off = pcpu_find_block_fit(chunk, bits, bit_align,
1827 is_atomic);
8744d859
DZ
1828 if (off < 0) {
1829 if (slot < PCPU_SLOT_FAIL_THRESHOLD)
1830 pcpu_chunk_move(chunk, 0);
fbf59bc9 1831 continue;
8744d859 1832 }
ccea34b5 1833
40064aec 1834 off = pcpu_alloc_area(chunk, bits, bit_align, off);
f1833241
RG
1835 if (off >= 0) {
1836 pcpu_reintegrate_chunk(chunk);
fbf59bc9 1837 goto area_found;
f1833241 1838 }
fbf59bc9
TH
1839 }
1840 }
1841
403a91b1 1842 spin_unlock_irqrestore(&pcpu_lock, flags);
ccea34b5 1843
11df02bf
DZ
1844 if (is_atomic) {
1845 err = "atomic alloc failed, no space left";
5835d96e 1846 goto fail;
11df02bf 1847 }
5835d96e 1848
e04cb697 1849 /* No space left. Create a new chunk. */
faf65dde
RG
1850 if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) {
1851 chunk = pcpu_create_chunk(pcpu_gfp);
b38d08f3
TH
1852 if (!chunk) {
1853 err = "failed to allocate new chunk";
1854 goto fail;
1855 }
1856
1857 spin_lock_irqsave(&pcpu_lock, flags);
1858 pcpu_chunk_relocate(chunk, -1);
1859 } else {
1860 spin_lock_irqsave(&pcpu_lock, flags);
f2badb0c 1861 }
ccea34b5 1862
ccea34b5 1863 goto restart;
fbf59bc9
TH
1864
1865area_found:
30a5b536 1866 pcpu_stats_area_alloc(chunk, size);
403a91b1 1867 spin_unlock_irqrestore(&pcpu_lock, flags);
ccea34b5 1868
dca49645 1869 /* populate if not all pages are already there */
5835d96e 1870 if (!is_atomic) {
ec288a2c 1871 unsigned int page_end, rs, re;
dca49645 1872
ec288a2c 1873 rs = PFN_DOWN(off);
e04d3208 1874 page_end = PFN_UP(off + size);
b38d08f3 1875
ec288a2c 1876 for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) {
e04d3208
TH
1877 WARN_ON(chunk->immutable);
1878
554fef1c 1879 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
e04d3208
TH
1880
1881 spin_lock_irqsave(&pcpu_lock, flags);
1882 if (ret) {
40064aec 1883 pcpu_free_area(chunk, off);
e04d3208
TH
1884 err = "failed to populate";
1885 goto fail_unlock;
1886 }
b239f7da 1887 pcpu_chunk_populated(chunk, rs, re);
e04d3208 1888 spin_unlock_irqrestore(&pcpu_lock, flags);
dca49645 1889 }
fbf59bc9 1890
e04d3208
TH
1891 mutex_unlock(&pcpu_alloc_mutex);
1892 }
ccea34b5 1893
faf65dde 1894 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1a4d7607
TH
1895 pcpu_schedule_balance_work();
1896
dca49645
TH
1897 /* clear the areas and return address relative to base address */
1898 for_each_possible_cpu(cpu)
1899 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1900
f528f0b8 1901 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
8a8c35fa 1902 kmemleak_alloc_percpu(ptr, size, gfp);
df95e795 1903
f67bed13
VA
1904 trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align,
1905 chunk->base_addr, off, ptr,
1906 pcpu_obj_full_size(size), gfp);
df95e795 1907
3c7be18a
RG
1908 pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
1909
24e44cc2
SB
1910 pcpu_alloc_tag_alloc_hook(chunk, off, size);
1911
f528f0b8 1912 return ptr;
ccea34b5
TH
1913
1914fail_unlock:
403a91b1 1915 spin_unlock_irqrestore(&pcpu_lock, flags);
b38d08f3 1916fail:
df95e795
DZ
1917 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1918
f7d77dfc 1919 if (do_warn && warn_limit) {
870d4b12 1920 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
598d8091 1921 size, align, is_atomic, err);
f7d77dfc
BH
1922 if (!is_atomic)
1923 dump_stack();
f2badb0c 1924 if (!--warn_limit)
870d4b12 1925 pr_info("limit reached, disable warning\n");
f2badb0c 1926 }
f7d77dfc 1927
1a4d7607 1928 if (is_atomic) {
f0953a1b 1929 /* see the flag handling in pcpu_balance_workfn() */
1a4d7607
TH
1930 pcpu_atomic_alloc_failed = true;
1931 pcpu_schedule_balance_work();
6710e594
TH
1932 } else {
1933 mutex_unlock(&pcpu_alloc_mutex);
1a4d7607 1934 }
3c7be18a
RG
1935
1936 pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1937
ccea34b5 1938 return NULL;
fbf59bc9 1939}
24e44cc2 1940EXPORT_SYMBOL_GPL(pcpu_alloc_noprof);
edcb4639 1941
a56dbddf 1942/**
67c2669d 1943 * pcpu_balance_free - manage the amount of free chunks
f1833241 1944 * @empty_only: free chunks only if there are no populated pages
a56dbddf 1945 *
f1833241
RG
1946 * If empty_only is %false, reclaim all fully free chunks regardless of the
1947 * number of populated pages. Otherwise, only reclaim chunks that have no
1948 * populated pages.
e4d77700
RG
1949 *
1950 * CONTEXT:
1951 * pcpu_lock (can be dropped temporarily)
a56dbddf 1952 */
faf65dde 1953static void pcpu_balance_free(bool empty_only)
fbf59bc9 1954{
fe6bd8c3 1955 LIST_HEAD(to_free);
faf65dde 1956 struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot];
a56dbddf
TH
1957 struct pcpu_chunk *chunk, *next;
1958
e4d77700 1959 lockdep_assert_held(&pcpu_lock);
a56dbddf 1960
1a4d7607
TH
1961 /*
1962 * There's no reason to keep around multiple unused chunks and VM
1963 * areas can be scarce. Destroy all free chunks except for one.
1964 */
fe6bd8c3 1965 list_for_each_entry_safe(chunk, next, free_head, list) {
a56dbddf
TH
1966 WARN_ON(chunk->immutable);
1967
1968 /* spare the first one */
fe6bd8c3 1969 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
a56dbddf
TH
1970 continue;
1971
f1833241
RG
1972 if (!empty_only || chunk->nr_empty_pop_pages == 0)
1973 list_move(&chunk->list, &to_free);
a56dbddf
TH
1974 }
1975
e4d77700
RG
1976 if (list_empty(&to_free))
1977 return;
a56dbddf 1978
e4d77700 1979 spin_unlock_irq(&pcpu_lock);
fe6bd8c3 1980 list_for_each_entry_safe(chunk, next, &to_free, list) {
e837dfde 1981 unsigned int rs, re;
dca49645 1982
ec288a2c 1983 for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
a93ace48 1984 pcpu_depopulate_chunk(chunk, rs, re);
b539b87f
TH
1985 spin_lock_irq(&pcpu_lock);
1986 pcpu_chunk_depopulated(chunk, rs, re);
1987 spin_unlock_irq(&pcpu_lock);
a93ace48 1988 }
6081089f 1989 pcpu_destroy_chunk(chunk);
accd4f36 1990 cond_resched();
a56dbddf 1991 }
e4d77700 1992 spin_lock_irq(&pcpu_lock);
67c2669d
RG
1993}
1994
1995/**
1996 * pcpu_balance_populated - manage the amount of populated pages
67c2669d
RG
1997 *
1998 * Maintain a certain amount of populated pages to satisfy atomic allocations.
1999 * It is possible that this is called when physical memory is scarce causing
2000 * OOM killer to be triggered. We should avoid doing so until an actual
2001 * allocation causes the failure as it is possible that requests can be
2002 * serviced from already backed regions.
e4d77700
RG
2003 *
2004 * CONTEXT:
2005 * pcpu_lock (can be dropped temporarily)
67c2669d 2006 */
faf65dde 2007static void pcpu_balance_populated(void)
67c2669d
RG
2008{
2009 /* gfp flags passed to underlying allocators */
2010 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
67c2669d
RG
2011 struct pcpu_chunk *chunk;
2012 int slot, nr_to_pop, ret;
971f3918 2013
e4d77700 2014 lockdep_assert_held(&pcpu_lock);
971f3918 2015
1a4d7607
TH
2016 /*
2017 * Ensure there are certain number of free populated pages for
2018 * atomic allocs. Fill up from the most packed so that atomic
2019 * allocs don't increase fragmentation. If atomic allocation
2020 * failed previously, always populate the maximum amount. This
2021 * should prevent atomic allocs larger than PAGE_SIZE from keeping
2022 * failing indefinitely; however, large atomic allocs are not
2023 * something we support properly and can be highly unreliable and
2024 * inefficient.
2025 */
2026retry_pop:
2027 if (pcpu_atomic_alloc_failed) {
2028 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
2029 /* best effort anyway, don't worry about synchronization */
2030 pcpu_atomic_alloc_failed = false;
2031 } else {
2032 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
faf65dde 2033 pcpu_nr_empty_pop_pages,
1a4d7607
TH
2034 0, PCPU_EMPTY_POP_PAGES_HIGH);
2035 }
2036
1c29a3ce 2037 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
e837dfde 2038 unsigned int nr_unpop = 0, rs, re;
1a4d7607
TH
2039
2040 if (!nr_to_pop)
2041 break;
2042
faf65dde 2043 list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
8ab16c43 2044 nr_unpop = chunk->nr_pages - chunk->nr_populated;
1a4d7607
TH
2045 if (nr_unpop)
2046 break;
2047 }
1a4d7607
TH
2048
2049 if (!nr_unpop)
2050 continue;
2051
2052 /* @chunk can't go away while pcpu_alloc_mutex is held */
ec288a2c 2053 for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
e837dfde 2054 int nr = min_t(int, re - rs, nr_to_pop);
1a4d7607 2055
e4d77700 2056 spin_unlock_irq(&pcpu_lock);
47504ee0 2057 ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
e4d77700
RG
2058 cond_resched();
2059 spin_lock_irq(&pcpu_lock);
1a4d7607
TH
2060 if (!ret) {
2061 nr_to_pop -= nr;
b239f7da 2062 pcpu_chunk_populated(chunk, rs, rs + nr);
1a4d7607
TH
2063 } else {
2064 nr_to_pop = 0;
2065 }
2066
2067 if (!nr_to_pop)
2068 break;
2069 }
2070 }
2071
2072 if (nr_to_pop) {
2073 /* ran out of chunks to populate, create a new one and retry */
e4d77700 2074 spin_unlock_irq(&pcpu_lock);
faf65dde 2075 chunk = pcpu_create_chunk(gfp);
e4d77700
RG
2076 cond_resched();
2077 spin_lock_irq(&pcpu_lock);
1a4d7607 2078 if (chunk) {
1a4d7607 2079 pcpu_chunk_relocate(chunk, -1);
1a4d7607
TH
2080 goto retry_pop;
2081 }
2082 }
fbf59bc9 2083}
1a4d7607 2084
f1833241
RG
2085/**
2086 * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages
f1833241
RG
2087 *
2088 * Scan over chunks in the depopulate list and try to release unused populated
2089 * pages back to the system. Depopulated chunks are sidelined to prevent
2090 * repopulating these pages unless required. Fully free chunks are reintegrated
2091 * and freed accordingly (1 is kept around). If we drop below the empty
2092 * populated pages threshold, reintegrate the chunk if it has empty free pages.
2093 * Each chunk is scanned in the reverse order to keep populated pages close to
2094 * the beginning of the chunk.
e4d77700
RG
2095 *
2096 * CONTEXT:
2097 * pcpu_lock (can be dropped temporarily)
2098 *
f1833241 2099 */
faf65dde 2100static void pcpu_reclaim_populated(void)
f1833241 2101{
f1833241
RG
2102 struct pcpu_chunk *chunk;
2103 struct pcpu_block_md *block;
93274f1d 2104 int freed_page_start, freed_page_end;
f1833241 2105 int i, end;
93274f1d 2106 bool reintegrate;
f1833241 2107
e4d77700 2108 lockdep_assert_held(&pcpu_lock);
f1833241 2109
f1833241
RG
2110 /*
2111 * Once a chunk is isolated to the to_depopulate list, the chunk is no
2112 * longer discoverable to allocations whom may populate pages. The only
2113 * other accessor is the free path which only returns area back to the
2114 * allocator not touching the populated bitmap.
2115 */
c1f6688d
BH
2116 while ((chunk = list_first_entry_or_null(
2117 &pcpu_chunk_lists[pcpu_to_depopulate_slot],
2118 struct pcpu_chunk, list))) {
f1833241
RG
2119 WARN_ON(chunk->immutable);
2120
2121 /*
2122 * Scan chunk's pages in the reverse order to keep populated
2123 * pages close to the beginning of the chunk.
2124 */
93274f1d
DZ
2125 freed_page_start = chunk->nr_pages;
2126 freed_page_end = 0;
2127 reintegrate = false;
f1833241
RG
2128 for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
2129 /* no more work to do */
2130 if (chunk->nr_empty_pop_pages == 0)
2131 break;
2132
2133 /* reintegrate chunk to prevent atomic alloc failures */
faf65dde 2134 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
93274f1d 2135 reintegrate = true;
83d261fc 2136 break;
f1833241
RG
2137 }
2138
2139 /*
2140 * If the page is empty and populated, start or
2141 * extend the (i, end) range. If i == 0, decrease
2142 * i and perform the depopulation to cover the last
2143 * (first) page in the chunk.
2144 */
2145 block = chunk->md_blocks + i;
2146 if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS &&
2147 test_bit(i, chunk->populated)) {
2148 if (end == -1)
2149 end = i;
2150 if (i > 0)
2151 continue;
2152 i--;
2153 }
2154
2155 /* depopulate if there is an active range */
2156 if (end == -1)
2157 continue;
2158
2159 spin_unlock_irq(&pcpu_lock);
2160 pcpu_depopulate_chunk(chunk, i + 1, end + 1);
2161 cond_resched();
2162 spin_lock_irq(&pcpu_lock);
2163
2164 pcpu_chunk_depopulated(chunk, i + 1, end + 1);
93274f1d
DZ
2165 freed_page_start = min(freed_page_start, i + 1);
2166 freed_page_end = max(freed_page_end, end + 1);
f1833241
RG
2167
2168 /* reset the range and continue */
2169 end = -1;
2170 }
2171
93274f1d
DZ
2172 /* batch tlb flush per chunk to amortize cost */
2173 if (freed_page_start < freed_page_end) {
2174 spin_unlock_irq(&pcpu_lock);
2175 pcpu_post_unmap_tlb_flush(chunk,
2176 freed_page_start,
2177 freed_page_end);
2178 cond_resched();
2179 spin_lock_irq(&pcpu_lock);
2180 }
2181
2182 if (reintegrate || chunk->free_bytes == pcpu_unit_size)
f1833241
RG
2183 pcpu_reintegrate_chunk(chunk);
2184 else
93274f1d
DZ
2185 list_move_tail(&chunk->list,
2186 &pcpu_chunk_lists[pcpu_sidelined_slot]);
f1833241 2187 }
fbf59bc9
TH
2188}
2189
3c7be18a
RG
2190/**
2191 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2192 * @work: unused
2193 *
f1833241
RG
2194 * For each chunk type, manage the number of fully free chunks and the number of
2195 * populated pages. An important thing to consider is when pages are freed and
2196 * how they contribute to the global counts.
3c7be18a
RG
2197 */
2198static void pcpu_balance_workfn(struct work_struct *work)
2199{
f1833241
RG
2200 /*
2201 * pcpu_balance_free() is called twice because the first time we may
2202 * trim pages in the active pcpu_nr_empty_pop_pages which may cause us
2203 * to grow other chunks. This then gives pcpu_reclaim_populated() time
2204 * to move fully free chunks to the active list to be freed if
2205 * appropriate.
2206 */
faf65dde 2207 mutex_lock(&pcpu_alloc_mutex);
e4d77700
RG
2208 spin_lock_irq(&pcpu_lock);
2209
faf65dde
RG
2210 pcpu_balance_free(false);
2211 pcpu_reclaim_populated();
2212 pcpu_balance_populated();
2213 pcpu_balance_free(true);
3c7be18a 2214
e4d77700 2215 spin_unlock_irq(&pcpu_lock);
faf65dde 2216 mutex_unlock(&pcpu_alloc_mutex);
3c7be18a
RG
2217}
2218
b460bc83
HT
2219/**
2220 * pcpu_alloc_size - the size of the dynamic percpu area
2221 * @ptr: pointer to the dynamic percpu area
2222 *
2223 * Returns the size of the @ptr allocation. This is undefined for statically
2224 * defined percpu variables as there is no corresponding chunk->bound_map.
2225 *
2226 * RETURNS:
2227 * The size of the dynamic percpu area.
2228 *
2229 * CONTEXT:
2230 * Can be called from atomic context.
2231 */
2232size_t pcpu_alloc_size(void __percpu *ptr)
2233{
2234 struct pcpu_chunk *chunk;
2235 unsigned long bit_off, end;
2236 void *addr;
2237
2238 if (!ptr)
2239 return 0;
2240
2241 addr = __pcpu_ptr_to_addr(ptr);
2242 /* No pcpu_lock here: ptr has not been freed, so chunk is still alive */
2243 chunk = pcpu_chunk_addr_search(addr);
2244 bit_off = (addr - chunk->base_addr) / PCPU_MIN_ALLOC_SIZE;
2245 end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
2246 bit_off + 1);
2247 return (end - bit_off) * PCPU_MIN_ALLOC_SIZE;
2248}
2249
fbf59bc9
TH
2250/**
2251 * free_percpu - free percpu area
2252 * @ptr: pointer to area to free
2253 *
ccea34b5
TH
2254 * Free percpu area @ptr.
2255 *
2256 * CONTEXT:
2257 * Can be called from atomic context.
fbf59bc9 2258 */
43cf38eb 2259void free_percpu(void __percpu *ptr)
fbf59bc9 2260{
129182e5 2261 void *addr;
fbf59bc9 2262 struct pcpu_chunk *chunk;
ccea34b5 2263 unsigned long flags;
3c7be18a 2264 int size, off;
198790d9 2265 bool need_balance = false;
fbf59bc9
TH
2266
2267 if (!ptr)
2268 return;
2269
f528f0b8
CM
2270 kmemleak_free_percpu(ptr);
2271
129182e5 2272 addr = __pcpu_ptr_to_addr(ptr);
fbf59bc9 2273 chunk = pcpu_chunk_addr_search(addr);
bba174f5 2274 off = addr - chunk->base_addr;
fbf59bc9 2275
394e6869 2276 spin_lock_irqsave(&pcpu_lock, flags);
3c7be18a
RG
2277 size = pcpu_free_area(chunk, off);
2278
24e44cc2
SB
2279 pcpu_alloc_tag_free_hook(chunk, off, size);
2280
3c7be18a 2281 pcpu_memcg_free_hook(chunk, off, size);
fbf59bc9 2282
f1833241
RG
2283 /*
2284 * If there are more than one fully free chunks, wake up grim reaper.
2285 * If the chunk is isolated, it may be in the process of being
2286 * reclaimed. Let reclaim manage cleaning up of that chunk.
2287 */
2288 if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) {
fbf59bc9
TH
2289 struct pcpu_chunk *pos;
2290
faf65dde 2291 list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list)
fbf59bc9 2292 if (pos != chunk) {
198790d9 2293 need_balance = true;
fbf59bc9
TH
2294 break;
2295 }
f1833241
RG
2296 } else if (pcpu_should_reclaim_chunk(chunk)) {
2297 pcpu_isolate_chunk(chunk);
2298 need_balance = true;
fbf59bc9
TH
2299 }
2300
df95e795
DZ
2301 trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2302
ccea34b5 2303 spin_unlock_irqrestore(&pcpu_lock, flags);
198790d9
JS
2304
2305 if (need_balance)
2306 pcpu_schedule_balance_work();
fbf59bc9
TH
2307}
2308EXPORT_SYMBOL_GPL(free_percpu);
2309
383776fa 2310bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
10fad5e4 2311{
bbddff05 2312#ifdef CONFIG_SMP
10fad5e4
TH
2313 const size_t static_size = __per_cpu_end - __per_cpu_start;
2314 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2315 unsigned int cpu;
2316
2317 for_each_possible_cpu(cpu) {
2318 void *start = per_cpu_ptr(base, cpu);
383776fa 2319 void *va = (void *)addr;
10fad5e4 2320
383776fa 2321 if (va >= start && va < start + static_size) {
8ce371f9 2322 if (can_addr) {
383776fa 2323 *can_addr = (unsigned long) (va - start);
8ce371f9
PZ
2324 *can_addr += (unsigned long)
2325 per_cpu_ptr(base, get_boot_cpu_id());
2326 }
10fad5e4 2327 return true;
383776fa
TG
2328 }
2329 }
bbddff05
TH
2330#endif
2331 /* on UP, can't distinguish from other static vars, always false */
10fad5e4
TH
2332 return false;
2333}
2334
383776fa
TG
2335/**
2336 * is_kernel_percpu_address - test whether address is from static percpu area
2337 * @addr: address to test
2338 *
2339 * Test whether @addr belongs to in-kernel static percpu area. Module
2340 * static percpu areas are not considered. For those, use
2341 * is_module_percpu_address().
2342 *
2343 * RETURNS:
2344 * %true if @addr is from in-kernel static percpu area, %false otherwise.
2345 */
2346bool is_kernel_percpu_address(unsigned long addr)
2347{
2348 return __is_kernel_percpu_address(addr, NULL);
2349}
2350
3b034b0d
VG
2351/**
2352 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2353 * @addr: the address to be converted to physical address
2354 *
2355 * Given @addr which is dereferenceable address obtained via one of
2356 * percpu access macros, this function translates it into its physical
2357 * address. The caller is responsible for ensuring @addr stays valid
2358 * until this function finishes.
2359 *
67589c71
DY
2360 * percpu allocator has special setup for the first chunk, which currently
2361 * supports either embedding in linear address space or vmalloc mapping,
2362 * and, from the second one, the backing allocator (currently either vm or
2363 * km) provides translation.
2364 *
bffc4375 2365 * The addr can be translated simply without checking if it falls into the
67589c71
DY
2366 * first chunk. But the current code reflects better how percpu allocator
2367 * actually works, and the verification can discover both bugs in percpu
2368 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2369 * code.
2370 *
3b034b0d
VG
2371 * RETURNS:
2372 * The physical address for @addr.
2373 */
2374phys_addr_t per_cpu_ptr_to_phys(void *addr)
2375{
9983b6f0
TH
2376 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2377 bool in_first_chunk = false;
a855b84c 2378 unsigned long first_low, first_high;
9983b6f0
TH
2379 unsigned int cpu;
2380
2381 /*
a855b84c 2382 * The following test on unit_low/high isn't strictly
9983b6f0
TH
2383 * necessary but will speed up lookups of addresses which
2384 * aren't in the first chunk.
c0ebfdc3
DZF
2385 *
2386 * The address check is against full chunk sizes. pcpu_base_addr
2387 * points to the beginning of the first chunk including the
2388 * static region. Assumes good intent as the first chunk may
2389 * not be full (ie. < pcpu_unit_pages in size).
9983b6f0 2390 */
c0ebfdc3
DZF
2391 first_low = (unsigned long)pcpu_base_addr +
2392 pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2393 first_high = (unsigned long)pcpu_base_addr +
2394 pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
a855b84c
TH
2395 if ((unsigned long)addr >= first_low &&
2396 (unsigned long)addr < first_high) {
9983b6f0
TH
2397 for_each_possible_cpu(cpu) {
2398 void *start = per_cpu_ptr(base, cpu);
2399
2400 if (addr >= start && addr < start + pcpu_unit_size) {
2401 in_first_chunk = true;
2402 break;
2403 }
2404 }
2405 }
2406
2407 if (in_first_chunk) {
eac522ef 2408 if (!is_vmalloc_addr(addr))
020ec653
TH
2409 return __pa(addr);
2410 else
9f57bd4d
ES
2411 return page_to_phys(vmalloc_to_page(addr)) +
2412 offset_in_page(addr);
020ec653 2413 } else
9f57bd4d
ES
2414 return page_to_phys(pcpu_addr_to_page(addr)) +
2415 offset_in_page(addr);
3b034b0d
VG
2416}
2417
fbf59bc9 2418/**
fd1e8a1f
TH
2419 * pcpu_alloc_alloc_info - allocate percpu allocation info
2420 * @nr_groups: the number of groups
2421 * @nr_units: the number of units
2422 *
2423 * Allocate ai which is large enough for @nr_groups groups containing
2424 * @nr_units units. The returned ai's groups[0].cpu_map points to the
2425 * cpu_map array which is long enough for @nr_units and filled with
2426 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
2427 * pointer of other groups.
2428 *
2429 * RETURNS:
2430 * Pointer to the allocated pcpu_alloc_info on success, NULL on
2431 * failure.
2432 */
2433struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2434 int nr_units)
2435{
2436 struct pcpu_alloc_info *ai;
2437 size_t base_size, ai_size;
2438 void *ptr;
2439 int unit;
2440
14d37612 2441 base_size = ALIGN(struct_size(ai, groups, nr_groups),
fd1e8a1f
TH
2442 __alignof__(ai->groups[0].cpu_map[0]));
2443 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2444
26fb3dae 2445 ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
fd1e8a1f
TH
2446 if (!ptr)
2447 return NULL;
2448 ai = ptr;
2449 ptr += base_size;
2450
2451 ai->groups[0].cpu_map = ptr;
2452
2453 for (unit = 0; unit < nr_units; unit++)
2454 ai->groups[0].cpu_map[unit] = NR_CPUS;
2455
2456 ai->nr_groups = nr_groups;
2457 ai->__ai_size = PFN_ALIGN(ai_size);
2458
2459 return ai;
2460}
2461
2462/**
2463 * pcpu_free_alloc_info - free percpu allocation info
2464 * @ai: pcpu_alloc_info to free
2465 *
2466 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2467 */
2468void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2469{
4421cca0 2470 memblock_free(ai, ai->__ai_size);
fd1e8a1f
TH
2471}
2472
fd1e8a1f
TH
2473/**
2474 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2475 * @lvl: loglevel
2476 * @ai: allocation info to dump
2477 *
2478 * Print out information about @ai using loglevel @lvl.
2479 */
2480static void pcpu_dump_alloc_info(const char *lvl,
2481 const struct pcpu_alloc_info *ai)
033e48fb 2482{
fd1e8a1f 2483 int group_width = 1, cpu_width = 1, width;
033e48fb 2484 char empty_str[] = "--------";
fd1e8a1f
TH
2485 int alloc = 0, alloc_end = 0;
2486 int group, v;
2487 int upa, apl; /* units per alloc, allocs per line */
2488
2489 v = ai->nr_groups;
2490 while (v /= 10)
2491 group_width++;
033e48fb 2492
fd1e8a1f 2493 v = num_possible_cpus();
033e48fb 2494 while (v /= 10)
fd1e8a1f
TH
2495 cpu_width++;
2496 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
033e48fb 2497
fd1e8a1f
TH
2498 upa = ai->alloc_size / ai->unit_size;
2499 width = upa * (cpu_width + 1) + group_width + 3;
2500 apl = rounddown_pow_of_two(max(60 / width, 1));
033e48fb 2501
fd1e8a1f
TH
2502 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2503 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2504 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
033e48fb 2505
fd1e8a1f
TH
2506 for (group = 0; group < ai->nr_groups; group++) {
2507 const struct pcpu_group_info *gi = &ai->groups[group];
2508 int unit = 0, unit_end = 0;
2509
2510 BUG_ON(gi->nr_units % upa);
2511 for (alloc_end += gi->nr_units / upa;
2512 alloc < alloc_end; alloc++) {
2513 if (!(alloc % apl)) {
1170532b 2514 pr_cont("\n");
fd1e8a1f
TH
2515 printk("%spcpu-alloc: ", lvl);
2516 }
1170532b 2517 pr_cont("[%0*d] ", group_width, group);
fd1e8a1f
TH
2518
2519 for (unit_end += upa; unit < unit_end; unit++)
2520 if (gi->cpu_map[unit] != NR_CPUS)
1170532b
JP
2521 pr_cont("%0*d ",
2522 cpu_width, gi->cpu_map[unit]);
fd1e8a1f 2523 else
1170532b 2524 pr_cont("%s ", empty_str);
033e48fb 2525 }
033e48fb 2526 }
1170532b 2527 pr_cont("\n");
033e48fb 2528}
033e48fb 2529
fbf59bc9 2530/**
8d408b4b 2531 * pcpu_setup_first_chunk - initialize the first percpu chunk
fd1e8a1f 2532 * @ai: pcpu_alloc_info describing how to percpu area is shaped
38a6be52 2533 * @base_addr: mapped address
8d408b4b
TH
2534 *
2535 * Initialize the first percpu chunk which contains the kernel static
69ab285b 2536 * percpu area. This function is to be called from arch percpu area
38a6be52 2537 * setup path.
8d408b4b 2538 *
fd1e8a1f
TH
2539 * @ai contains all information necessary to initialize the first
2540 * chunk and prime the dynamic percpu allocator.
2541 *
2542 * @ai->static_size is the size of static percpu area.
2543 *
2544 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
edcb4639
TH
2545 * reserve after the static area in the first chunk. This reserves
2546 * the first chunk such that it's available only through reserved
2547 * percpu allocation. This is primarily used to serve module percpu
2548 * static areas on architectures where the addressing model has
2549 * limited offset range for symbol relocations to guarantee module
2550 * percpu symbols fall inside the relocatable range.
2551 *
fd1e8a1f
TH
2552 * @ai->dyn_size determines the number of bytes available for dynamic
2553 * allocation in the first chunk. The area between @ai->static_size +
2554 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
6074d5b0 2555 *
fd1e8a1f
TH
2556 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2557 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2558 * @ai->dyn_size.
8d408b4b 2559 *
fd1e8a1f
TH
2560 * @ai->atom_size is the allocation atom size and used as alignment
2561 * for vm areas.
8d408b4b 2562 *
fd1e8a1f
TH
2563 * @ai->alloc_size is the allocation size and always multiple of
2564 * @ai->atom_size. This is larger than @ai->atom_size if
2565 * @ai->unit_size is larger than @ai->atom_size.
2566 *
2567 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2568 * percpu areas. Units which should be colocated are put into the
2569 * same group. Dynamic VM areas will be allocated according to these
2570 * groupings. If @ai->nr_groups is zero, a single group containing
2571 * all units is assumed.
8d408b4b 2572 *
38a6be52
TH
2573 * The caller should have mapped the first chunk at @base_addr and
2574 * copied static data to each unit.
fbf59bc9 2575 *
c0ebfdc3
DZF
2576 * The first chunk will always contain a static and a dynamic region.
2577 * However, the static region is not managed by any chunk. If the first
2578 * chunk also contains a reserved region, it is served by two chunks -
2579 * one for the reserved region and one for the dynamic region. They
2580 * share the same vm, but use offset regions in the area allocation map.
2581 * The chunk serving the dynamic region is circulated in the chunk slots
2582 * and available for dynamic allocation like any other chunk.
fbf59bc9 2583 */
163fa234
KW
2584void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2585 void *base_addr)
fbf59bc9 2586{
b9c39442 2587 size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
d2f3c384 2588 size_t static_size, dyn_size;
6563297c
TH
2589 unsigned long *group_offsets;
2590 size_t *group_sizes;
fb435d52 2591 unsigned long *unit_off;
fbf59bc9 2592 unsigned int cpu;
fd1e8a1f
TH
2593 int *unit_map;
2594 int group, unit, i;
c0ebfdc3 2595 unsigned long tmp_addr;
f655f405 2596 size_t alloc_size;
fbf59bc9 2597
635b75fc
TH
2598#define PCPU_SETUP_BUG_ON(cond) do { \
2599 if (unlikely(cond)) { \
870d4b12
JP
2600 pr_emerg("failed to initialize, %s\n", #cond); \
2601 pr_emerg("cpu_possible_mask=%*pb\n", \
807de073 2602 cpumask_pr_args(cpu_possible_mask)); \
635b75fc
TH
2603 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2604 BUG(); \
2605 } \
2606} while (0)
2607
2f39e637 2608 /* sanity checks */
635b75fc 2609 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
bbddff05 2610#ifdef CONFIG_SMP
635b75fc 2611 PCPU_SETUP_BUG_ON(!ai->static_size);
f09f1243 2612 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
bbddff05 2613#endif
635b75fc 2614 PCPU_SETUP_BUG_ON(!base_addr);
f09f1243 2615 PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
635b75fc 2616 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
f09f1243 2617 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
635b75fc 2618 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
ca460b3c 2619 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
099a19d9 2620 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
d2f3c384 2621 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
ca460b3c
DZF
2622 PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2623 IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
9f645532 2624 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
8d408b4b 2625
6563297c 2626 /* process group information and build config tables accordingly */
f655f405
MR
2627 alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2628 group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2629 if (!group_offsets)
2630 panic("%s: Failed to allocate %zu bytes\n", __func__,
2631 alloc_size);
2632
2633 alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2634 group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2635 if (!group_sizes)
2636 panic("%s: Failed to allocate %zu bytes\n", __func__,
2637 alloc_size);
2638
2639 alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2640 unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2641 if (!unit_map)
2642 panic("%s: Failed to allocate %zu bytes\n", __func__,
2643 alloc_size);
2644
2645 alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2646 unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2647 if (!unit_off)
2648 panic("%s: Failed to allocate %zu bytes\n", __func__,
2649 alloc_size);
2f39e637 2650
fd1e8a1f 2651 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
ffe0d5a5 2652 unit_map[cpu] = UINT_MAX;
a855b84c
TH
2653
2654 pcpu_low_unit_cpu = NR_CPUS;
2655 pcpu_high_unit_cpu = NR_CPUS;
2f39e637 2656
fd1e8a1f
TH
2657 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2658 const struct pcpu_group_info *gi = &ai->groups[group];
2f39e637 2659
6563297c
TH
2660 group_offsets[group] = gi->base_offset;
2661 group_sizes[group] = gi->nr_units * ai->unit_size;
2662
fd1e8a1f
TH
2663 for (i = 0; i < gi->nr_units; i++) {
2664 cpu = gi->cpu_map[i];
2665 if (cpu == NR_CPUS)
2666 continue;
8d408b4b 2667
9f295664 2668 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
635b75fc
TH
2669 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2670 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
fbf59bc9 2671
fd1e8a1f 2672 unit_map[cpu] = unit + i;
fb435d52
TH
2673 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2674
a855b84c
TH
2675 /* determine low/high unit_cpu */
2676 if (pcpu_low_unit_cpu == NR_CPUS ||
2677 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2678 pcpu_low_unit_cpu = cpu;
2679 if (pcpu_high_unit_cpu == NR_CPUS ||
2680 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2681 pcpu_high_unit_cpu = cpu;
fd1e8a1f 2682 }
2f39e637 2683 }
fd1e8a1f
TH
2684 pcpu_nr_units = unit;
2685
2686 for_each_possible_cpu(cpu)
635b75fc
TH
2687 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2688
2689 /* we're done parsing the input, undefine BUG macro and dump config */
2690#undef PCPU_SETUP_BUG_ON
bcbea798 2691 pcpu_dump_alloc_info(KERN_DEBUG, ai);
fd1e8a1f 2692
6563297c
TH
2693 pcpu_nr_groups = ai->nr_groups;
2694 pcpu_group_offsets = group_offsets;
2695 pcpu_group_sizes = group_sizes;
fd1e8a1f 2696 pcpu_unit_map = unit_map;
fb435d52 2697 pcpu_unit_offsets = unit_off;
2f39e637
TH
2698
2699 /* determine basic parameters */
fd1e8a1f 2700 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
d9b55eeb 2701 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
6563297c 2702 pcpu_atom_size = ai->atom_size;
7ee1e758 2703 pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated,
61cf93d3 2704 BITS_TO_LONGS(pcpu_unit_pages));
cafe8816 2705
30a5b536
DZ
2706 pcpu_stats_save_ai(ai);
2707
d9b55eeb 2708 /*
f1833241
RG
2709 * Allocate chunk slots. The slots after the active slots are:
2710 * sidelined_slot - isolated, depopulated chunks
2711 * free_slot - fully free chunks
2712 * to_depopulate_slot - isolated, chunks to depopulate
d9b55eeb 2713 */
f1833241
RG
2714 pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
2715 pcpu_free_slot = pcpu_sidelined_slot + 1;
2716 pcpu_to_depopulate_slot = pcpu_free_slot + 1;
2717 pcpu_nr_slots = pcpu_to_depopulate_slot + 1;
3c7be18a 2718 pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
faf65dde 2719 sizeof(pcpu_chunk_lists[0]),
3c7be18a
RG
2720 SMP_CACHE_BYTES);
2721 if (!pcpu_chunk_lists)
f655f405 2722 panic("%s: Failed to allocate %zu bytes\n", __func__,
faf65dde 2723 pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]));
3c7be18a 2724
faf65dde
RG
2725 for (i = 0; i < pcpu_nr_slots; i++)
2726 INIT_LIST_HEAD(&pcpu_chunk_lists[i]);
fbf59bc9 2727
d2f3c384
DZF
2728 /*
2729 * The end of the static region needs to be aligned with the
2730 * minimum allocation size as this offsets the reserved and
2731 * dynamic region. The first chunk ends page aligned by
2732 * expanding the dynamic region, therefore the dynamic region
2733 * can be shrunk to compensate while still staying above the
2734 * configured sizes.
2735 */
2736 static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2737 dyn_size = ai->dyn_size - (static_size - ai->static_size);
2738
edcb4639 2739 /*
7ee1e758
BH
2740 * Initialize first chunk:
2741 * This chunk is broken up into 3 parts:
2742 * < static | [reserved] | dynamic >
2743 * - static - there is no backing chunk because these allocations can
2744 * never be freed.
2745 * - reserved (pcpu_reserved_chunk) - exists primarily to serve
2746 * allocations from module load.
2747 * - dynamic (pcpu_first_chunk) - serves the dynamic part of the first
2748 * chunk.
edcb4639 2749 */
d2f3c384 2750 tmp_addr = (unsigned long)base_addr + static_size;
7ee1e758
BH
2751 if (ai->reserved_size)
2752 pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr,
2753 ai->reserved_size);
2754 tmp_addr = (unsigned long)base_addr + static_size + ai->reserved_size;
2755 pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, dyn_size);
edcb4639 2756
faf65dde 2757 pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
ae9e6bc9 2758 pcpu_chunk_relocate(pcpu_first_chunk, -1);
fbf59bc9 2759
7e8a6304
DZF
2760 /* include all regions of the first chunk */
2761 pcpu_nr_populated += PFN_DOWN(size_sum);
2762
30a5b536 2763 pcpu_stats_chunk_alloc();
df95e795 2764 trace_percpu_create_chunk(base_addr);
30a5b536 2765
fbf59bc9 2766 /* we're done */
bba174f5 2767 pcpu_base_addr = base_addr;
fbf59bc9 2768}
66c3a757 2769
bbddff05
TH
2770#ifdef CONFIG_SMP
2771
17f3609c 2772const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
f58dc01b
TH
2773 [PCPU_FC_AUTO] = "auto",
2774 [PCPU_FC_EMBED] = "embed",
2775 [PCPU_FC_PAGE] = "page",
f58dc01b 2776};
66c3a757 2777
f58dc01b 2778enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
66c3a757 2779
f58dc01b
TH
2780static int __init percpu_alloc_setup(char *str)
2781{
5479c78a
CG
2782 if (!str)
2783 return -EINVAL;
2784
f58dc01b
TH
2785 if (0)
2786 /* nada */;
2787#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2788 else if (!strcmp(str, "embed"))
2789 pcpu_chosen_fc = PCPU_FC_EMBED;
2790#endif
2791#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2792 else if (!strcmp(str, "page"))
2793 pcpu_chosen_fc = PCPU_FC_PAGE;
f58dc01b
TH
2794#endif
2795 else
870d4b12 2796 pr_warn("unknown allocator %s specified\n", str);
66c3a757 2797
f58dc01b 2798 return 0;
66c3a757 2799}
f58dc01b 2800early_param("percpu_alloc", percpu_alloc_setup);
66c3a757 2801
3c9a024f
TH
2802/*
2803 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2804 * Build it if needed by the arch config or the generic setup is going
2805 * to be used.
2806 */
08fc4580
TH
2807#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2808 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
3c9a024f
TH
2809#define BUILD_EMBED_FIRST_CHUNK
2810#endif
2811
2812/* build pcpu_page_first_chunk() iff needed by the arch config */
2813#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2814#define BUILD_PAGE_FIRST_CHUNK
2815#endif
2816
2817/* pcpu_build_alloc_info() is used by both embed and page first chunk */
2818#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2819/**
2820 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2821 * @reserved_size: the size of reserved percpu area in bytes
2822 * @dyn_size: minimum free size for dynamic allocation in bytes
2823 * @atom_size: allocation atom size
2824 * @cpu_distance_fn: callback to determine distance between cpus, optional
2825 *
2826 * This function determines grouping of units, their mappings to cpus
2827 * and other parameters considering needed percpu size, allocation
2828 * atom size and distances between CPUs.
2829 *
bffc4375 2830 * Groups are always multiples of atom size and CPUs which are of
3c9a024f
TH
2831 * LOCAL_DISTANCE both ways are grouped together and share space for
2832 * units in the same group. The returned configuration is guaranteed
2833 * to have CPUs on different nodes on different groups and >=75% usage
2834 * of allocated virtual address space.
2835 *
2836 * RETURNS:
2837 * On success, pointer to the new allocation_info is returned. On
2838 * failure, ERR_PTR value is returned.
2839 */
258e0815 2840static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
3c9a024f
TH
2841 size_t reserved_size, size_t dyn_size,
2842 size_t atom_size,
2843 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2844{
2845 static int group_map[NR_CPUS] __initdata;
2846 static int group_cnt[NR_CPUS] __initdata;
d7d29ac7 2847 static struct cpumask mask __initdata;
3c9a024f
TH
2848 const size_t static_size = __per_cpu_end - __per_cpu_start;
2849 int nr_groups = 1, nr_units = 0;
2850 size_t size_sum, min_unit_size, alloc_size;
3f649ab7 2851 int upa, max_upa, best_upa; /* units_per_alloc */
3c9a024f
TH
2852 int last_allocs, group, unit;
2853 unsigned int cpu, tcpu;
2854 struct pcpu_alloc_info *ai;
2855 unsigned int *cpu_map;
2856
2857 /* this function may be called multiple times */
2858 memset(group_map, 0, sizeof(group_map));
2859 memset(group_cnt, 0, sizeof(group_cnt));
d7d29ac7 2860 cpumask_clear(&mask);
3c9a024f
TH
2861
2862 /* calculate size_sum and ensure dyn_size is enough for early alloc */
2863 size_sum = PFN_ALIGN(static_size + reserved_size +
2864 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2865 dyn_size = size_sum - static_size - reserved_size;
2866
2867 /*
2868 * Determine min_unit_size, alloc_size and max_upa such that
2869 * alloc_size is multiple of atom_size and is the smallest
25985edc 2870 * which can accommodate 4k aligned segments which are equal to
3c9a024f
TH
2871 * or larger than min_unit_size.
2872 */
2873 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2874
9c015162 2875 /* determine the maximum # of units that can fit in an allocation */
3c9a024f
TH
2876 alloc_size = roundup(min_unit_size, atom_size);
2877 upa = alloc_size / min_unit_size;
f09f1243 2878 while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
3c9a024f
TH
2879 upa--;
2880 max_upa = upa;
2881
d7d29ac7
WY
2882 cpumask_copy(&mask, cpu_possible_mask);
2883
3c9a024f 2884 /* group cpus according to their proximity */
d7d29ac7
WY
2885 for (group = 0; !cpumask_empty(&mask); group++) {
2886 /* pop the group's first cpu */
2887 cpu = cpumask_first(&mask);
3c9a024f
TH
2888 group_map[cpu] = group;
2889 group_cnt[group]++;
d7d29ac7
WY
2890 cpumask_clear_cpu(cpu, &mask);
2891
2892 for_each_cpu(tcpu, &mask) {
2893 if (!cpu_distance_fn ||
2894 (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
2895 cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
2896 group_map[tcpu] = group;
2897 group_cnt[group]++;
2898 cpumask_clear_cpu(tcpu, &mask);
2899 }
2900 }
3c9a024f 2901 }
d7d29ac7 2902 nr_groups = group;
3c9a024f
TH
2903
2904 /*
9c015162
DZF
2905 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2906 * Expand the unit_size until we use >= 75% of the units allocated.
2907 * Related to atom_size, which could be much larger than the unit_size.
3c9a024f
TH
2908 */
2909 last_allocs = INT_MAX;
4829c791 2910 best_upa = 0;
3c9a024f
TH
2911 for (upa = max_upa; upa; upa--) {
2912 int allocs = 0, wasted = 0;
2913
f09f1243 2914 if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
3c9a024f
TH
2915 continue;
2916
2917 for (group = 0; group < nr_groups; group++) {
2918 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2919 allocs += this_allocs;
2920 wasted += this_allocs * upa - group_cnt[group];
2921 }
2922
2923 /*
2924 * Don't accept if wastage is over 1/3. The
2925 * greater-than comparison ensures upa==1 always
2926 * passes the following check.
2927 */
2928 if (wasted > num_possible_cpus() / 3)
2929 continue;
2930
2931 /* and then don't consume more memory */
2932 if (allocs > last_allocs)
2933 break;
2934 last_allocs = allocs;
2935 best_upa = upa;
2936 }
4829c791 2937 BUG_ON(!best_upa);
3c9a024f
TH
2938 upa = best_upa;
2939
2940 /* allocate and fill alloc_info */
2941 for (group = 0; group < nr_groups; group++)
2942 nr_units += roundup(group_cnt[group], upa);
2943
2944 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2945 if (!ai)
2946 return ERR_PTR(-ENOMEM);
2947 cpu_map = ai->groups[0].cpu_map;
2948
2949 for (group = 0; group < nr_groups; group++) {
2950 ai->groups[group].cpu_map = cpu_map;
2951 cpu_map += roundup(group_cnt[group], upa);
2952 }
2953
2954 ai->static_size = static_size;
2955 ai->reserved_size = reserved_size;
2956 ai->dyn_size = dyn_size;
2957 ai->unit_size = alloc_size / upa;
2958 ai->atom_size = atom_size;
2959 ai->alloc_size = alloc_size;
2960
2de7852f 2961 for (group = 0, unit = 0; group < nr_groups; group++) {
3c9a024f
TH
2962 struct pcpu_group_info *gi = &ai->groups[group];
2963
2964 /*
2965 * Initialize base_offset as if all groups are located
2966 * back-to-back. The caller should update this to
2967 * reflect actual allocation.
2968 */
2969 gi->base_offset = unit * ai->unit_size;
2970
2971 for_each_possible_cpu(cpu)
2972 if (group_map[cpu] == group)
2973 gi->cpu_map[gi->nr_units++] = cpu;
2974 gi->nr_units = roundup(gi->nr_units, upa);
2975 unit += gi->nr_units;
2976 }
2977 BUG_ON(unit != nr_units);
2978
2979 return ai;
2980}
23f91716
KW
2981
2982static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
2983 pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
2984{
2985 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
2986#ifdef CONFIG_NUMA
2987 int node = NUMA_NO_NODE;
2988 void *ptr;
2989
2990 if (cpu_to_nd_fn)
2991 node = cpu_to_nd_fn(cpu);
2992
2993 if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) {
2994 ptr = memblock_alloc_from(size, align, goal);
2995 pr_info("cpu %d has no node %d or node-local memory\n",
2996 cpu, node);
2997 pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n",
2998 cpu, size, (u64)__pa(ptr));
2999 } else {
3000 ptr = memblock_alloc_try_nid(size, align, goal,
3001 MEMBLOCK_ALLOC_ACCESSIBLE,
3002 node);
3003
3004 pr_debug("per cpu data for cpu%d %zu bytes on node%d at 0x%llx\n",
3005 cpu, size, node, (u64)__pa(ptr));
3006 }
3007 return ptr;
3008#else
3009 return memblock_alloc_from(size, align, goal);
3010#endif
3011}
3012
3013static void __init pcpu_fc_free(void *ptr, size_t size)
3014{
3015 memblock_free(ptr, size);
3016}
3c9a024f
TH
3017#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
3018
3019#if defined(BUILD_EMBED_FIRST_CHUNK)
66c3a757
TH
3020/**
3021 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
66c3a757 3022 * @reserved_size: the size of reserved percpu area in bytes
4ba6ce25 3023 * @dyn_size: minimum free size for dynamic allocation in bytes
c8826dd5
TH
3024 * @atom_size: allocation atom size
3025 * @cpu_distance_fn: callback to determine distance between cpus, optional
1ca3fb3a 3026 * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
66c3a757
TH
3027 *
3028 * This is a helper to ease setting up embedded first percpu chunk and
3029 * can be called where pcpu_setup_first_chunk() is expected.
3030 *
3031 * If this function is used to setup the first chunk, it is allocated
23f91716 3032 * by calling pcpu_fc_alloc and used as-is without being mapped into
c8826dd5
TH
3033 * vmalloc area. Allocations are always whole multiples of @atom_size
3034 * aligned to @atom_size.
3035 *
3036 * This enables the first chunk to piggy back on the linear physical
3037 * mapping which often uses larger page size. Please note that this
3038 * can result in very sparse cpu->unit mapping on NUMA machines thus
3039 * requiring large vmalloc address space. Don't use this allocator if
3040 * vmalloc space is not orders of magnitude larger than distances
3041 * between node memory addresses (ie. 32bit NUMA machines).
66c3a757 3042 *
4ba6ce25 3043 * @dyn_size specifies the minimum dynamic area size.
66c3a757
TH
3044 *
3045 * If the needed size is smaller than the minimum or specified unit
23f91716 3046 * size, the leftover is returned using pcpu_fc_free.
66c3a757
TH
3047 *
3048 * RETURNS:
fb435d52 3049 * 0 on success, -errno on failure.
66c3a757 3050 */
4ba6ce25 3051int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
c8826dd5
TH
3052 size_t atom_size,
3053 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
23f91716 3054 pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
66c3a757 3055{
c8826dd5
TH
3056 void *base = (void *)ULONG_MAX;
3057 void **areas = NULL;
fd1e8a1f 3058 struct pcpu_alloc_info *ai;
93c76b6b 3059 size_t size_sum, areas_size;
3060 unsigned long max_distance;
163fa234 3061 int group, i, highest_group, rc = 0;
66c3a757 3062
c8826dd5
TH
3063 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
3064 cpu_distance_fn);
fd1e8a1f
TH
3065 if (IS_ERR(ai))
3066 return PTR_ERR(ai);
66c3a757 3067
fd1e8a1f 3068 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
c8826dd5 3069 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
fa8a7094 3070
26fb3dae 3071 areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
c8826dd5 3072 if (!areas) {
fb435d52 3073 rc = -ENOMEM;
c8826dd5 3074 goto out_free;
fa8a7094 3075 }
66c3a757 3076
9b739662 3077 /* allocate, copy and determine base address & max_distance */
3078 highest_group = 0;
c8826dd5
TH
3079 for (group = 0; group < ai->nr_groups; group++) {
3080 struct pcpu_group_info *gi = &ai->groups[group];
3081 unsigned int cpu = NR_CPUS;
3082 void *ptr;
3083
3084 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
3085 cpu = gi->cpu_map[i];
3086 BUG_ON(cpu == NR_CPUS);
3087
3088 /* allocate space for the whole group */
23f91716 3089 ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn);
c8826dd5
TH
3090 if (!ptr) {
3091 rc = -ENOMEM;
3092 goto out_free_areas;
3093 }
f528f0b8 3094 /* kmemleak tracks the percpu allocations separately */
a317ebcc 3095 kmemleak_ignore_phys(__pa(ptr));
c8826dd5 3096 areas[group] = ptr;
fd1e8a1f 3097
c8826dd5 3098 base = min(ptr, base);
9b739662 3099 if (ptr > areas[highest_group])
3100 highest_group = group;
3101 }
3102 max_distance = areas[highest_group] - base;
3103 max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
3104
3105 /* warn if maximum distance is further than 75% of vmalloc space */
3106 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
3107 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
3108 max_distance, VMALLOC_TOTAL);
3109#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
3110 /* and fail if we have fallback */
3111 rc = -EINVAL;
3112 goto out_free_areas;
3113#endif
42b64281
TH
3114 }
3115
3116 /*
3117 * Copy data and free unused parts. This should happen after all
3118 * allocations are complete; otherwise, we may end up with
3119 * overlapping groups.
3120 */
3121 for (group = 0; group < ai->nr_groups; group++) {
3122 struct pcpu_group_info *gi = &ai->groups[group];
3123 void *ptr = areas[group];
c8826dd5
TH
3124
3125 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
3126 if (gi->cpu_map[i] == NR_CPUS) {
3127 /* unused unit, free whole */
23f91716 3128 pcpu_fc_free(ptr, ai->unit_size);
c8826dd5
TH
3129 continue;
3130 }
3131 /* copy and return the unused part */
3132 memcpy(ptr, __per_cpu_load, ai->static_size);
23f91716 3133 pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum);
c8826dd5 3134 }
fa8a7094 3135 }
66c3a757 3136
c8826dd5 3137 /* base address is now known, determine group base offsets */
6ea529a2 3138 for (group = 0; group < ai->nr_groups; group++) {
c8826dd5 3139 ai->groups[group].base_offset = areas[group] - base;
6ea529a2 3140 }
c8826dd5 3141
00206a69
MC
3142 pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
3143 PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
fd1e8a1f 3144 ai->dyn_size, ai->unit_size);
d4b95f80 3145
163fa234 3146 pcpu_setup_first_chunk(ai, base);
c8826dd5
TH
3147 goto out_free;
3148
3149out_free_areas:
3150 for (group = 0; group < ai->nr_groups; group++)
f851c8d8 3151 if (areas[group])
23f91716 3152 pcpu_fc_free(areas[group],
f851c8d8 3153 ai->groups[group].nr_units * ai->unit_size);
c8826dd5 3154out_free:
fd1e8a1f 3155 pcpu_free_alloc_info(ai);
c8826dd5 3156 if (areas)
4421cca0 3157 memblock_free(areas, areas_size);
fb435d52 3158 return rc;
d4b95f80 3159}
3c9a024f 3160#endif /* BUILD_EMBED_FIRST_CHUNK */
d4b95f80 3161
3c9a024f 3162#ifdef BUILD_PAGE_FIRST_CHUNK
20c03576
KW
3163#include <asm/pgalloc.h>
3164
3165#ifndef P4D_TABLE_SIZE
3166#define P4D_TABLE_SIZE PAGE_SIZE
3167#endif
3168
3169#ifndef PUD_TABLE_SIZE
3170#define PUD_TABLE_SIZE PAGE_SIZE
3171#endif
3172
3173#ifndef PMD_TABLE_SIZE
3174#define PMD_TABLE_SIZE PAGE_SIZE
3175#endif
3176
3177#ifndef PTE_TABLE_SIZE
3178#define PTE_TABLE_SIZE PAGE_SIZE
3179#endif
3180void __init __weak pcpu_populate_pte(unsigned long addr)
3181{
3182 pgd_t *pgd = pgd_offset_k(addr);
3183 p4d_t *p4d;
3184 pud_t *pud;
3185 pmd_t *pmd;
3186
3187 if (pgd_none(*pgd)) {
41fd59b7
BM
3188 p4d = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
3189 if (!p4d)
20c03576 3190 goto err_alloc;
41fd59b7 3191 pgd_populate(&init_mm, pgd, p4d);
20c03576
KW
3192 }
3193
3194 p4d = p4d_offset(pgd, addr);
3195 if (p4d_none(*p4d)) {
41fd59b7
BM
3196 pud = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
3197 if (!pud)
20c03576 3198 goto err_alloc;
41fd59b7 3199 p4d_populate(&init_mm, p4d, pud);
20c03576
KW
3200 }
3201
3202 pud = pud_offset(p4d, addr);
3203 if (pud_none(*pud)) {
41fd59b7
BM
3204 pmd = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
3205 if (!pmd)
20c03576 3206 goto err_alloc;
41fd59b7 3207 pud_populate(&init_mm, pud, pmd);
20c03576
KW
3208 }
3209
3210 pmd = pmd_offset(pud, addr);
3211 if (!pmd_present(*pmd)) {
3212 pte_t *new;
3213
3214 new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
3215 if (!new)
3216 goto err_alloc;
3217 pmd_populate_kernel(&init_mm, pmd, new);
3218 }
3219
3220 return;
3221
3222err_alloc:
3223 panic("%s: Failed to allocate memory\n", __func__);
3224}
3225
d4b95f80 3226/**
00ae4064 3227 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
d4b95f80 3228 * @reserved_size: the size of reserved percpu area in bytes
1ca3fb3a 3229 * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
d4b95f80 3230 *
00ae4064
TH
3231 * This is a helper to ease setting up page-remapped first percpu
3232 * chunk and can be called where pcpu_setup_first_chunk() is expected.
d4b95f80
TH
3233 *
3234 * This is the basic allocator. Static percpu area is allocated
3235 * page-by-page into vmalloc area.
3236 *
3237 * RETURNS:
fb435d52 3238 * 0 on success, -errno on failure.
d4b95f80 3239 */
20c03576 3240int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
d4b95f80 3241{
8f05a6a6 3242 static struct vm_struct vm;
fd1e8a1f 3243 struct pcpu_alloc_info *ai;
00ae4064 3244 char psize_str[16];
ce3141a2 3245 int unit_pages;
d4b95f80 3246 size_t pages_size;
ce3141a2 3247 struct page **pages;
163fa234 3248 int unit, i, j, rc = 0;
8f606604 3249 int upa;
3250 int nr_g0_units;
d4b95f80 3251
00ae4064
TH
3252 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
3253
4ba6ce25 3254 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
fd1e8a1f
TH
3255 if (IS_ERR(ai))
3256 return PTR_ERR(ai);
3257 BUG_ON(ai->nr_groups != 1);
8f606604 3258 upa = ai->alloc_size/ai->unit_size;
3259 nr_g0_units = roundup(num_possible_cpus(), upa);
0b59c25f 3260 if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
8f606604 3261 pcpu_free_alloc_info(ai);
3262 return -EINVAL;
3263 }
fd1e8a1f
TH
3264
3265 unit_pages = ai->unit_size >> PAGE_SHIFT;
d4b95f80
TH
3266
3267 /* unaligned allocations can't be freed, round up to page size */
fd1e8a1f
TH
3268 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
3269 sizeof(pages[0]));
7e1c4e27 3270 pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
f655f405
MR
3271 if (!pages)
3272 panic("%s: Failed to allocate %zu bytes\n", __func__,
3273 pages_size);
d4b95f80 3274
8f05a6a6 3275 /* allocate pages */
d4b95f80 3276 j = 0;
8f606604 3277 for (unit = 0; unit < num_possible_cpus(); unit++) {
3278 unsigned int cpu = ai->groups[0].cpu_map[unit];
ce3141a2 3279 for (i = 0; i < unit_pages; i++) {
d4b95f80
TH
3280 void *ptr;
3281
23f91716 3282 ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn);
d4b95f80 3283 if (!ptr) {
870d4b12 3284 pr_warn("failed to allocate %s page for cpu%u\n",
8f606604 3285 psize_str, cpu);
d4b95f80
TH
3286 goto enomem;
3287 }
f528f0b8 3288 /* kmemleak tracks the percpu allocations separately */
a317ebcc 3289 kmemleak_ignore_phys(__pa(ptr));
ce3141a2 3290 pages[j++] = virt_to_page(ptr);
d4b95f80 3291 }
8f606604 3292 }
d4b95f80 3293
8f05a6a6
TH
3294 /* allocate vm area, map the pages and copy static data */
3295 vm.flags = VM_ALLOC;
fd1e8a1f 3296 vm.size = num_possible_cpus() * ai->unit_size;
8f05a6a6
TH
3297 vm_area_register_early(&vm, PAGE_SIZE);
3298
fd1e8a1f 3299 for (unit = 0; unit < num_possible_cpus(); unit++) {
1d9d3257 3300 unsigned long unit_addr =
fd1e8a1f 3301 (unsigned long)vm.addr + unit * ai->unit_size;
8f05a6a6 3302
ce3141a2 3303 for (i = 0; i < unit_pages; i++)
20c03576 3304 pcpu_populate_pte(unit_addr + (i << PAGE_SHIFT));
8f05a6a6
TH
3305
3306 /* pte already populated, the following shouldn't fail */
fb435d52
TH
3307 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3308 unit_pages);
3309 if (rc < 0)
3310 panic("failed to map percpu area, err=%d\n", rc);
66c3a757 3311
7a92fc8b 3312 flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
8f05a6a6
TH
3313
3314 /* copy static data */
fd1e8a1f 3315 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
66c3a757
TH
3316 }
3317
3318 /* we're ready, commit */
00206a69
MC
3319 pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3320 unit_pages, psize_str, ai->static_size,
fd1e8a1f 3321 ai->reserved_size, ai->dyn_size);
d4b95f80 3322
163fa234 3323 pcpu_setup_first_chunk(ai, vm.addr);
d4b95f80
TH
3324 goto out_free_ar;
3325
3326enomem:
3327 while (--j >= 0)
23f91716 3328 pcpu_fc_free(page_address(pages[j]), PAGE_SIZE);
fb435d52 3329 rc = -ENOMEM;
d4b95f80 3330out_free_ar:
4421cca0 3331 memblock_free(pages, pages_size);
fd1e8a1f 3332 pcpu_free_alloc_info(ai);
fb435d52 3333 return rc;
d4b95f80 3334}
3c9a024f 3335#endif /* BUILD_PAGE_FIRST_CHUNK */
d4b95f80 3336
bbddff05 3337#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
e74e3962 3338/*
bbddff05 3339 * Generic SMP percpu area setup.
e74e3962
TH
3340 *
3341 * The embedding helper is used because its behavior closely resembles
3342 * the original non-dynamic generic percpu area setup. This is
3343 * important because many archs have addressing restrictions and might
3344 * fail if the percpu area is located far away from the previous
3345 * location. As an added bonus, in non-NUMA cases, embedding is
3346 * generally a good idea TLB-wise because percpu area can piggy back
3347 * on the physical linear memory mapping which uses large page
3348 * mappings on applicable archs.
3349 */
e74e3962
TH
3350unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3351EXPORT_SYMBOL(__per_cpu_offset);
3352
3353void __init setup_per_cpu_areas(void)
3354{
e74e3962
TH
3355 unsigned long delta;
3356 unsigned int cpu;
fb435d52 3357 int rc;
e74e3962
TH
3358
3359 /*
3360 * Always reserve area for module percpu variables. That's
3361 * what the legacy allocator did.
3362 */
23f91716
KW
3363 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE,
3364 PAGE_SIZE, NULL, NULL);
fb435d52 3365 if (rc < 0)
bbddff05 3366 panic("Failed to initialize percpu areas.");
e74e3962
TH
3367
3368 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3369 for_each_possible_cpu(cpu)
fb435d52 3370 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
66c3a757 3371}
bbddff05
TH
3372#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3373
3374#else /* CONFIG_SMP */
3375
3376/*
3377 * UP percpu area setup.
3378 *
3379 * UP always uses km-based percpu allocator with identity mapping.
3380 * Static percpu variables are indistinguishable from the usual static
3381 * variables and don't require any special preparation.
3382 */
3383void __init setup_per_cpu_areas(void)
3384{
3385 const size_t unit_size =
3386 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3387 PERCPU_DYNAMIC_RESERVE));
3388 struct pcpu_alloc_info *ai;
3389 void *fc;
3390
3391 ai = pcpu_alloc_alloc_info(1, 1);
26fb3dae 3392 fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
bbddff05
TH
3393 if (!ai || !fc)
3394 panic("Failed to allocate memory for percpu areas.");
100d13c3 3395 /* kmemleak tracks the percpu allocations separately */
a317ebcc 3396 kmemleak_ignore_phys(__pa(fc));
bbddff05
TH
3397
3398 ai->dyn_size = unit_size;
3399 ai->unit_size = unit_size;
3400 ai->atom_size = unit_size;
3401 ai->alloc_size = unit_size;
3402 ai->groups[0].nr_units = 1;
3403 ai->groups[0].cpu_map[0] = 0;
3404
163fa234 3405 pcpu_setup_first_chunk(ai, fc);
438a5061 3406 pcpu_free_alloc_info(ai);
bbddff05
TH
3407}
3408
3409#endif /* CONFIG_SMP */
099a19d9 3410
7e8a6304
DZF
3411/*
3412 * pcpu_nr_pages - calculate total number of populated backing pages
3413 *
3414 * This reflects the number of pages populated to back chunks. Metadata is
3415 * excluded in the number exposed in meminfo as the number of backing pages
3416 * scales with the number of cpus and can quickly outweigh the memory used for
3417 * metadata. It also keeps this calculation nice and simple.
3418 *
3419 * RETURNS:
3420 * Total number of populated backing pages in use by the allocator.
3421 */
3422unsigned long pcpu_nr_pages(void)
3423{
3424 return pcpu_nr_populated * pcpu_nr_units;
3425}
3426
1a4d7607
TH
3427/*
3428 * Percpu allocator is initialized early during boot when neither slab or
3429 * workqueue is available. Plug async management until everything is up
3430 * and running.
3431 */
3432static int __init percpu_enable_async(void)
3433{
3434 pcpu_async_enabled = true;
3435 return 0;
3436}
3437subsys_initcall(percpu_enable_async);