]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - mm/sparse.c
Linux 4.20.17
[thirdparty/kernel/stable.git] / mm / sparse.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
d41dee36
AW
2/*
3 * sparse memory mappings.
4 */
d41dee36 5#include <linux/mm.h>
5a0e3ad6 6#include <linux/slab.h>
d41dee36 7#include <linux/mmzone.h>
97ad1087 8#include <linux/memblock.h>
3b32123d 9#include <linux/compiler.h>
0b0acbec 10#include <linux/highmem.h>
b95f1b31 11#include <linux/export.h>
28ae55c9 12#include <linux/spinlock.h>
0b0acbec 13#include <linux/vmalloc.h>
3b32123d 14
0c0a4a51 15#include "internal.h"
d41dee36 16#include <asm/dma.h>
8f6aac41
CL
17#include <asm/pgalloc.h>
18#include <asm/pgtable.h>
d41dee36
AW
19
20/*
21 * Permanent SPARSEMEM data:
22 *
23 * 1) mem_section - memory sections, mem_map's for valid memory
24 */
3e347261 25#ifdef CONFIG_SPARSEMEM_EXTREME
83e3c487 26struct mem_section **mem_section;
3e347261
BP
27#else
28struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
22fc6ecc 29 ____cacheline_internodealigned_in_smp;
3e347261
BP
30#endif
31EXPORT_SYMBOL(mem_section);
32
89689ae7
CL
33#ifdef NODE_NOT_IN_PAGE_FLAGS
34/*
35 * If we did not store the node number in the page then we have to
36 * do a lookup in the section_to_node_table in order to find which
37 * node the page belongs to.
38 */
39#if MAX_NUMNODES <= 256
40static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41#else
42static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
43#endif
44
33dd4e0e 45int page_to_nid(const struct page *page)
89689ae7
CL
46{
47 return section_to_node_table[page_to_section(page)];
48}
49EXPORT_SYMBOL(page_to_nid);
85770ffe
AW
50
51static void set_section_nid(unsigned long section_nr, int nid)
52{
53 section_to_node_table[section_nr] = nid;
54}
55#else /* !NODE_NOT_IN_PAGE_FLAGS */
56static inline void set_section_nid(unsigned long section_nr, int nid)
57{
58}
89689ae7
CL
59#endif
60
3e347261 61#ifdef CONFIG_SPARSEMEM_EXTREME
bd721ea7 62static noinline struct mem_section __ref *sparse_index_alloc(int nid)
28ae55c9
DH
63{
64 struct mem_section *section = NULL;
65 unsigned long array_size = SECTIONS_PER_ROOT *
66 sizeof(struct mem_section);
67
b95046b0
MH
68 if (slab_is_available())
69 section = kzalloc_node(array_size, GFP_KERNEL, nid);
70 else
7e1c4e27
MR
71 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
72 nid);
28ae55c9
DH
73
74 return section;
3e347261 75}
802f192e 76
a3142c8e 77static int __meminit sparse_index_init(unsigned long section_nr, int nid)
802f192e 78{
28ae55c9
DH
79 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
80 struct mem_section *section;
802f192e
BP
81
82 if (mem_section[root])
28ae55c9 83 return -EEXIST;
3e347261 84
28ae55c9 85 section = sparse_index_alloc(nid);
af0cd5a7
WC
86 if (!section)
87 return -ENOMEM;
28ae55c9
DH
88
89 mem_section[root] = section;
c1c95183 90
9d1936cf 91 return 0;
28ae55c9
DH
92}
93#else /* !SPARSEMEM_EXTREME */
94static inline int sparse_index_init(unsigned long section_nr, int nid)
95{
96 return 0;
802f192e 97}
28ae55c9
DH
98#endif
99
91fd8b95 100#ifdef CONFIG_SPARSEMEM_EXTREME
4ca644d9
DH
101int __section_nr(struct mem_section* ms)
102{
103 unsigned long root_nr;
83e3c487 104 struct mem_section *root = NULL;
4ca644d9 105
12783b00
MK
106 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
107 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
4ca644d9
DH
108 if (!root)
109 continue;
110
111 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
112 break;
113 }
114
83e3c487 115 VM_BUG_ON(!root);
db36a461 116
4ca644d9
DH
117 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
118}
91fd8b95
ZC
119#else
120int __section_nr(struct mem_section* ms)
121{
122 return (int)(ms - mem_section[0]);
123}
124#endif
4ca644d9 125
30c253e6
AW
126/*
127 * During early boot, before section_mem_map is used for an actual
128 * mem_map, we use section_mem_map to store the section's NUMA
129 * node. This keeps us from having to use another data structure. The
130 * node information is cleared just before we store the real mem_map.
131 */
132static inline unsigned long sparse_encode_early_nid(int nid)
133{
134 return (nid << SECTION_NID_SHIFT);
135}
136
137static inline int sparse_early_nid(struct mem_section *section)
138{
139 return (section->section_mem_map >> SECTION_NID_SHIFT);
140}
141
2dbb51c4
MG
142/* Validate the physical addressing limitations of the model */
143void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
144 unsigned long *end_pfn)
d41dee36 145{
2dbb51c4 146 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
d41dee36 147
bead9a3a
IM
148 /*
149 * Sanity checks - do not allow an architecture to pass
150 * in larger pfns than the maximum scope of sparsemem:
151 */
2dbb51c4
MG
152 if (*start_pfn > max_sparsemem_pfn) {
153 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
154 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
155 *start_pfn, *end_pfn, max_sparsemem_pfn);
156 WARN_ON_ONCE(1);
157 *start_pfn = max_sparsemem_pfn;
158 *end_pfn = max_sparsemem_pfn;
ef161a98 159 } else if (*end_pfn > max_sparsemem_pfn) {
2dbb51c4
MG
160 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
161 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
162 *start_pfn, *end_pfn, max_sparsemem_pfn);
163 WARN_ON_ONCE(1);
164 *end_pfn = max_sparsemem_pfn;
165 }
166}
167
c4e1be9e
DH
168/*
169 * There are a number of times that we loop over NR_MEM_SECTIONS,
170 * looking for section_present() on each. But, when we have very
171 * large physical address spaces, NR_MEM_SECTIONS can also be
172 * very large which makes the loops quite long.
173 *
174 * Keeping track of this gives us an easy way to break out of
175 * those loops early.
176 */
177int __highest_present_section_nr;
178static void section_mark_present(struct mem_section *ms)
179{
180 int section_nr = __section_nr(ms);
181
182 if (section_nr > __highest_present_section_nr)
183 __highest_present_section_nr = section_nr;
184
185 ms->section_mem_map |= SECTION_MARKED_PRESENT;
186}
187
188static inline int next_present_section_nr(int section_nr)
189{
190 do {
191 section_nr++;
192 if (present_section_nr(section_nr))
193 return section_nr;
d538c164 194 } while ((section_nr <= __highest_present_section_nr));
c4e1be9e
DH
195
196 return -1;
197}
198#define for_each_present_section_nr(start, section_nr) \
199 for (section_nr = next_present_section_nr(start-1); \
200 ((section_nr >= 0) && \
c4e1be9e
DH
201 (section_nr <= __highest_present_section_nr)); \
202 section_nr = next_present_section_nr(section_nr))
203
85c77f79
PT
204static inline unsigned long first_present_section_nr(void)
205{
206 return next_present_section_nr(-1);
207}
208
2dbb51c4
MG
209/* Record a memory area against a node. */
210void __init memory_present(int nid, unsigned long start, unsigned long end)
211{
212 unsigned long pfn;
bead9a3a 213
629a359b
KS
214#ifdef CONFIG_SPARSEMEM_EXTREME
215 if (unlikely(!mem_section)) {
216 unsigned long size, align;
217
d09cfbbf 218 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
629a359b 219 align = 1 << (INTERNODE_CACHE_SHIFT);
eb31d559 220 mem_section = memblock_alloc(size, align);
629a359b
KS
221 }
222#endif
223
d41dee36 224 start &= PAGE_SECTION_MASK;
2dbb51c4 225 mminit_validate_memmodel_limits(&start, &end);
d41dee36
AW
226 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
227 unsigned long section = pfn_to_section_nr(pfn);
802f192e
BP
228 struct mem_section *ms;
229
230 sparse_index_init(section, nid);
85770ffe 231 set_section_nid(section, nid);
802f192e
BP
232
233 ms = __nr_to_section(section);
c4e1be9e 234 if (!ms->section_mem_map) {
2d070eab
MH
235 ms->section_mem_map = sparse_encode_early_nid(nid) |
236 SECTION_IS_ONLINE;
c4e1be9e
DH
237 section_mark_present(ms);
238 }
d41dee36
AW
239 }
240}
241
9def36e0
LG
242/*
243 * Mark all memblocks as present using memory_present(). This is a
244 * convienence function that is useful for a number of arches
245 * to mark all of the systems memory as present during initialization.
246 */
247void __init memblocks_present(void)
248{
249 struct memblock_region *reg;
250
251 for_each_memblock(memory, reg) {
252 memory_present(memblock_get_region_node(reg),
253 memblock_region_memory_base_pfn(reg),
254 memblock_region_memory_end_pfn(reg));
255 }
256}
257
29751f69
AW
258/*
259 * Subtle, we encode the real pfn into the mem_map such that
260 * the identity pfn - section_mem_map will return the actual
261 * physical page frame number.
262 */
263static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
264{
def9b71e
PT
265 unsigned long coded_mem_map =
266 (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
267 BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
268 BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
269 return coded_mem_map;
29751f69
AW
270}
271
272/*
ea01ea93 273 * Decode mem_map from the coded memmap
29751f69 274 */
29751f69
AW
275struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
276{
ea01ea93
BP
277 /* mask off the extra low bits of information */
278 coded_mem_map &= SECTION_MAP_MASK;
29751f69
AW
279 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
280}
281
4e40987f 282static void __meminit sparse_init_one_section(struct mem_section *ms,
5c0e3066
MG
283 unsigned long pnum, struct page *mem_map,
284 unsigned long *pageblock_bitmap)
29751f69 285{
30c253e6 286 ms->section_mem_map &= ~SECTION_MAP_MASK;
540557b9
AW
287 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
288 SECTION_HAS_MEM_MAP;
5c0e3066 289 ms->pageblock_flags = pageblock_bitmap;
29751f69
AW
290}
291
04753278 292unsigned long usemap_size(void)
5c0e3066 293{
60a7a88d 294 return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
5c0e3066
MG
295}
296
297#ifdef CONFIG_MEMORY_HOTPLUG
298static unsigned long *__kmalloc_section_usemap(void)
299{
300 return kmalloc(usemap_size(), GFP_KERNEL);
301}
302#endif /* CONFIG_MEMORY_HOTPLUG */
303
48c90682
YG
304#ifdef CONFIG_MEMORY_HOTREMOVE
305static unsigned long * __init
a4322e1b 306sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
238305bb 307 unsigned long size)
48c90682 308{
99ab7b19
YL
309 unsigned long goal, limit;
310 unsigned long *p;
311 int nid;
48c90682
YG
312 /*
313 * A page may contain usemaps for other sections preventing the
314 * page being freed and making a section unremovable while
c800bcd5 315 * other sections referencing the usemap remain active. Similarly,
48c90682
YG
316 * a pgdat can prevent a section being removed. If section A
317 * contains a pgdat and section B contains the usemap, both
318 * sections become inter-dependent. This allocates usemaps
319 * from the same section as the pgdat where possible to avoid
320 * this problem.
321 */
07b4e2bc 322 goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
99ab7b19
YL
323 limit = goal + (1UL << PA_SECTION_SHIFT);
324 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
325again:
eb31d559 326 p = memblock_alloc_try_nid_nopanic(size,
bb016b84
SS
327 SMP_CACHE_BYTES, goal, limit,
328 nid);
99ab7b19
YL
329 if (!p && limit) {
330 limit = 0;
331 goto again;
332 }
333 return p;
48c90682
YG
334}
335
336static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
337{
338 unsigned long usemap_snr, pgdat_snr;
83e3c487
KS
339 static unsigned long old_usemap_snr;
340 static unsigned long old_pgdat_snr;
48c90682
YG
341 struct pglist_data *pgdat = NODE_DATA(nid);
342 int usemap_nid;
343
83e3c487
KS
344 /* First call */
345 if (!old_usemap_snr) {
346 old_usemap_snr = NR_MEM_SECTIONS;
347 old_pgdat_snr = NR_MEM_SECTIONS;
348 }
349
48c90682
YG
350 usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
351 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
352 if (usemap_snr == pgdat_snr)
353 return;
354
355 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
356 /* skip redundant message */
357 return;
358
359 old_usemap_snr = usemap_snr;
360 old_pgdat_snr = pgdat_snr;
361
362 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
363 if (usemap_nid != nid) {
1170532b
JP
364 pr_info("node %d must be removed before remove section %ld\n",
365 nid, usemap_snr);
48c90682
YG
366 return;
367 }
368 /*
369 * There is a circular dependency.
370 * Some platforms allow un-removable section because they will just
371 * gather other removable sections for dynamic partitioning.
372 * Just notify un-removable section's number here.
373 */
1170532b
JP
374 pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
375 usemap_snr, pgdat_snr, nid);
48c90682
YG
376}
377#else
378static unsigned long * __init
a4322e1b 379sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
238305bb 380 unsigned long size)
48c90682 381{
eb31d559 382 return memblock_alloc_node_nopanic(size, pgdat->node_id);
48c90682
YG
383}
384
385static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
386{
387}
388#endif /* CONFIG_MEMORY_HOTREMOVE */
389
35fd1eb1 390#ifdef CONFIG_SPARSEMEM_VMEMMAP
afda57bc 391static unsigned long __init section_map_size(void)
35fd1eb1
PT
392{
393 return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
394}
395
396#else
afda57bc 397static unsigned long __init section_map_size(void)
e131c06b
PT
398{
399 return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
400}
401
7b73d978
CH
402struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
403 struct vmem_altmap *altmap)
29751f69 404{
e131c06b
PT
405 unsigned long size = section_map_size();
406 struct page *map = sparse_buffer_alloc(size);
407
408 if (map)
409 return map;
29751f69 410
eb31d559 411 map = memblock_alloc_try_nid(size,
bb016b84 412 PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
97ad1087 413 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
8f6aac41
CL
414 return map;
415}
416#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
417
35fd1eb1
PT
418static void *sparsemap_buf __meminitdata;
419static void *sparsemap_buf_end __meminitdata;
420
afda57bc 421static void __init sparse_buffer_init(unsigned long size, int nid)
35fd1eb1
PT
422{
423 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
424 sparsemap_buf =
eb31d559 425 memblock_alloc_try_nid_raw(size, PAGE_SIZE,
35fd1eb1 426 __pa(MAX_DMA_ADDRESS),
97ad1087 427 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
35fd1eb1
PT
428 sparsemap_buf_end = sparsemap_buf + size;
429}
430
afda57bc 431static void __init sparse_buffer_fini(void)
35fd1eb1
PT
432{
433 unsigned long size = sparsemap_buf_end - sparsemap_buf;
434
435 if (sparsemap_buf && size > 0)
436 memblock_free_early(__pa(sparsemap_buf), size);
437 sparsemap_buf = NULL;
438}
439
440void * __meminit sparse_buffer_alloc(unsigned long size)
441{
442 void *ptr = NULL;
443
444 if (sparsemap_buf) {
445 ptr = PTR_ALIGN(sparsemap_buf, size);
446 if (ptr + size > sparsemap_buf_end)
447 ptr = NULL;
448 else
449 sparsemap_buf = ptr + size;
450 }
451 return ptr;
452}
453
3b32123d 454void __weak __meminit vmemmap_populate_print_last(void)
c2b91e2e
YL
455{
456}
a4322e1b 457
85c77f79
PT
458/*
459 * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
460 * And number of present sections in this node is map_count.
461 */
462static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
463 unsigned long pnum_end,
464 unsigned long map_count)
465{
466 unsigned long pnum, usemap_longs, *usemap;
467 struct page *map;
468
469 usemap_longs = BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS);
470 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
471 usemap_size() *
472 map_count);
473 if (!usemap) {
474 pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
475 goto failed;
476 }
477 sparse_buffer_init(map_count * section_map_size(), nid);
478 for_each_present_section_nr(pnum_begin, pnum) {
479 if (pnum >= pnum_end)
480 break;
481
482 map = sparse_mem_map_populate(pnum, nid, NULL);
483 if (!map) {
484 pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
485 __func__, nid);
486 pnum_begin = pnum;
487 goto failed;
488 }
489 check_usemap_section_nr(nid, usemap);
490 sparse_init_one_section(__nr_to_section(pnum), pnum, map, usemap);
491 usemap += usemap_longs;
492 }
493 sparse_buffer_fini();
494 return;
495failed:
496 /* We failed to allocate, mark all the following pnums as not present */
497 for_each_present_section_nr(pnum_begin, pnum) {
498 struct mem_section *ms;
499
500 if (pnum >= pnum_end)
501 break;
502 ms = __nr_to_section(pnum);
503 ms->section_mem_map = 0;
504 }
505}
506
507/*
508 * Allocate the accumulated non-linear sections, allocate a mem_map
509 * for each and record the physical to section mapping.
510 */
2a3cb8ba 511void __init sparse_init(void)
85c77f79
PT
512{
513 unsigned long pnum_begin = first_present_section_nr();
514 int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
515 unsigned long pnum_end, map_count = 1;
516
517 /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
518 set_pageblock_order();
519
520 for_each_present_section_nr(pnum_begin + 1, pnum_end) {
521 int nid = sparse_early_nid(__nr_to_section(pnum_end));
522
523 if (nid == nid_begin) {
524 map_count++;
525 continue;
526 }
527 /* Init node with sections in range [pnum_begin, pnum_end) */
528 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
529 nid_begin = nid;
530 pnum_begin = pnum_end;
531 map_count = 1;
532 }
533 /* cover the last node */
534 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
535 vmemmap_populate_print_last();
536}
537
193faea9 538#ifdef CONFIG_MEMORY_HOTPLUG
2d070eab
MH
539
540/* Mark all memory sections within the pfn range as online */
541void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
542{
543 unsigned long pfn;
544
545 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
b4ccec41 546 unsigned long section_nr = pfn_to_section_nr(pfn);
2d070eab
MH
547 struct mem_section *ms;
548
549 /* onlining code should never touch invalid ranges */
550 if (WARN_ON(!valid_section_nr(section_nr)))
551 continue;
552
553 ms = __nr_to_section(section_nr);
554 ms->section_mem_map |= SECTION_IS_ONLINE;
555 }
556}
557
558#ifdef CONFIG_MEMORY_HOTREMOVE
559/* Mark all memory sections within the pfn range as online */
560void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
561{
562 unsigned long pfn;
563
564 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
27227c73 565 unsigned long section_nr = pfn_to_section_nr(pfn);
2d070eab
MH
566 struct mem_section *ms;
567
568 /*
569 * TODO this needs some double checking. Offlining code makes
570 * sure to check pfn_valid but those checks might be just bogus
571 */
572 if (WARN_ON(!valid_section_nr(section_nr)))
573 continue;
574
575 ms = __nr_to_section(section_nr);
576 ms->section_mem_map &= ~SECTION_IS_ONLINE;
577 }
578}
579#endif
580
98f3cfc1 581#ifdef CONFIG_SPARSEMEM_VMEMMAP
7b73d978
CH
582static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
583 struct vmem_altmap *altmap)
98f3cfc1
YG
584{
585 /* This will make the necessary allocations eventually. */
7b73d978 586 return sparse_mem_map_populate(pnum, nid, altmap);
98f3cfc1 587}
24b6d416
CH
588static void __kfree_section_memmap(struct page *memmap,
589 struct vmem_altmap *altmap)
98f3cfc1 590{
0aad818b 591 unsigned long start = (unsigned long)memmap;
85b35fea 592 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
0aad818b 593
24b6d416 594 vmemmap_free(start, end, altmap);
98f3cfc1 595}
4edd7cef 596#ifdef CONFIG_MEMORY_HOTREMOVE
81556b02 597static void free_map_bootmem(struct page *memmap)
0c0a4a51 598{
0aad818b 599 unsigned long start = (unsigned long)memmap;
81556b02 600 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
0aad818b 601
24b6d416 602 vmemmap_free(start, end, NULL);
0c0a4a51 603}
4edd7cef 604#endif /* CONFIG_MEMORY_HOTREMOVE */
98f3cfc1 605#else
85b35fea 606static struct page *__kmalloc_section_memmap(void)
0b0acbec
DH
607{
608 struct page *page, *ret;
85b35fea 609 unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
0b0acbec 610
f2d0aa5b 611 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
0b0acbec
DH
612 if (page)
613 goto got_map_page;
614
615 ret = vmalloc(memmap_size);
616 if (ret)
617 goto got_map_ptr;
618
619 return NULL;
620got_map_page:
621 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
622got_map_ptr:
0b0acbec
DH
623
624 return ret;
625}
626
7b73d978
CH
627static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
628 struct vmem_altmap *altmap)
98f3cfc1 629{
85b35fea 630 return __kmalloc_section_memmap();
98f3cfc1
YG
631}
632
24b6d416
CH
633static void __kfree_section_memmap(struct page *memmap,
634 struct vmem_altmap *altmap)
0b0acbec 635{
9e2779fa 636 if (is_vmalloc_addr(memmap))
0b0acbec
DH
637 vfree(memmap);
638 else
639 free_pages((unsigned long)memmap,
85b35fea 640 get_order(sizeof(struct page) * PAGES_PER_SECTION));
0b0acbec 641}
0c0a4a51 642
4edd7cef 643#ifdef CONFIG_MEMORY_HOTREMOVE
81556b02 644static void free_map_bootmem(struct page *memmap)
0c0a4a51
YG
645{
646 unsigned long maps_section_nr, removing_section_nr, i;
81556b02 647 unsigned long magic, nr_pages;
ae64ffca 648 struct page *page = virt_to_page(memmap);
0c0a4a51 649
81556b02
ZY
650 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
651 >> PAGE_SHIFT;
652
0c0a4a51 653 for (i = 0; i < nr_pages; i++, page++) {
ddffe98d 654 magic = (unsigned long) page->freelist;
0c0a4a51
YG
655
656 BUG_ON(magic == NODE_INFO);
657
658 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
857e522a 659 removing_section_nr = page_private(page);
0c0a4a51
YG
660
661 /*
662 * When this function is called, the removing section is
663 * logical offlined state. This means all pages are isolated
664 * from page allocator. If removing section's memmap is placed
665 * on the same section, it must not be freed.
666 * If it is freed, page allocator may allocate it which will
667 * be removed physically soon.
668 */
669 if (maps_section_nr != removing_section_nr)
670 put_page_bootmem(page);
671 }
672}
4edd7cef 673#endif /* CONFIG_MEMORY_HOTREMOVE */
98f3cfc1 674#endif /* CONFIG_SPARSEMEM_VMEMMAP */
0b0acbec 675
29751f69
AW
676/*
677 * returns the number of sections whose mem_maps were properly
678 * set. If this is <=0, then that means that the passed-in
679 * map was not consumed and must be freed.
680 */
7b73d978
CH
681int __meminit sparse_add_one_section(struct pglist_data *pgdat,
682 unsigned long start_pfn, struct vmem_altmap *altmap)
29751f69 683{
0b0acbec 684 unsigned long section_nr = pfn_to_section_nr(start_pfn);
0b0acbec
DH
685 struct mem_section *ms;
686 struct page *memmap;
5c0e3066 687 unsigned long *usemap;
0b0acbec
DH
688 unsigned long flags;
689 int ret;
29751f69 690
0b0acbec
DH
691 /*
692 * no locking for this, because it does its own
693 * plus, it does a kmalloc
694 */
bbd06825
WC
695 ret = sparse_index_init(section_nr, pgdat->node_id);
696 if (ret < 0 && ret != -EEXIST)
697 return ret;
4e40987f 698 ret = 0;
7b73d978 699 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
bbd06825
WC
700 if (!memmap)
701 return -ENOMEM;
5c0e3066 702 usemap = __kmalloc_section_usemap();
bbd06825 703 if (!usemap) {
24b6d416 704 __kfree_section_memmap(memmap, altmap);
bbd06825
WC
705 return -ENOMEM;
706 }
0b0acbec
DH
707
708 pgdat_resize_lock(pgdat, &flags);
29751f69 709
0b0acbec
DH
710 ms = __pfn_to_section(start_pfn);
711 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
712 ret = -EEXIST;
713 goto out;
714 }
5c0e3066 715
d0dc12e8
PT
716 /*
717 * Poison uninitialized struct pages in order to catch invalid flags
718 * combinations.
719 */
f682a97a 720 page_init_poison(memmap, sizeof(struct page) * PAGES_PER_SECTION);
3ac19f8e 721
c4e1be9e 722 section_mark_present(ms);
4e40987f 723 sparse_init_one_section(ms, section_nr, memmap, usemap);
0b0acbec 724
0b0acbec
DH
725out:
726 pgdat_resize_unlock(pgdat, &flags);
4e40987f 727 if (ret < 0) {
bbd06825 728 kfree(usemap);
24b6d416 729 __kfree_section_memmap(memmap, altmap);
bbd06825 730 }
0b0acbec 731 return ret;
29751f69 732}
ea01ea93 733
f3deb687 734#ifdef CONFIG_MEMORY_HOTREMOVE
95a4774d
WC
735#ifdef CONFIG_MEMORY_FAILURE
736static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
737{
738 int i;
739
740 if (!memmap)
741 return;
742
4b94ffdc 743 for (i = 0; i < nr_pages; i++) {
95a4774d 744 if (PageHWPoison(&memmap[i])) {
293c07e3 745 atomic_long_sub(1, &num_poisoned_pages);
95a4774d
WC
746 ClearPageHWPoison(&memmap[i]);
747 }
748 }
749}
750#else
751static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
752{
753}
754#endif
755
24b6d416
CH
756static void free_section_usemap(struct page *memmap, unsigned long *usemap,
757 struct vmem_altmap *altmap)
4edd7cef
DR
758{
759 struct page *usemap_page;
4edd7cef
DR
760
761 if (!usemap)
762 return;
763
764 usemap_page = virt_to_page(usemap);
765 /*
766 * Check to see if allocation came from hot-plug-add
767 */
768 if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
769 kfree(usemap);
770 if (memmap)
24b6d416 771 __kfree_section_memmap(memmap, altmap);
4edd7cef
DR
772 return;
773 }
774
775 /*
776 * The usemap came from bootmem. This is packed with other usemaps
777 * on the section which has pgdat at boot time. Just keep it as is now.
778 */
779
81556b02
ZY
780 if (memmap)
781 free_map_bootmem(memmap);
4edd7cef
DR
782}
783
4b94ffdc 784void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
24b6d416 785 unsigned long map_offset, struct vmem_altmap *altmap)
ea01ea93
BP
786{
787 struct page *memmap = NULL;
cd099682
TC
788 unsigned long *usemap = NULL, flags;
789 struct pglist_data *pgdat = zone->zone_pgdat;
ea01ea93 790
cd099682 791 pgdat_resize_lock(pgdat, &flags);
ea01ea93
BP
792 if (ms->section_mem_map) {
793 usemap = ms->pageblock_flags;
794 memmap = sparse_decode_mem_map(ms->section_mem_map,
795 __section_nr(ms));
796 ms->section_mem_map = 0;
797 ms->pageblock_flags = NULL;
798 }
cd099682 799 pgdat_resize_unlock(pgdat, &flags);
ea01ea93 800
4b94ffdc
DW
801 clear_hwpoisoned_pages(memmap + map_offset,
802 PAGES_PER_SECTION - map_offset);
24b6d416 803 free_section_usemap(memmap, usemap, altmap);
ea01ea93 804}
4edd7cef
DR
805#endif /* CONFIG_MEMORY_HOTREMOVE */
806#endif /* CONFIG_MEMORY_HOTPLUG */