]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/mm/memory_hotplug.c | |
3 | * | |
4 | * Copyright (C) | |
5 | */ | |
6 | ||
7 | #include <linux/stddef.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/swap.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/pagemap.h> | |
12 | #include <linux/bootmem.h> | |
13 | #include <linux/compiler.h> | |
14 | #include <linux/export.h> | |
15 | #include <linux/pagevec.h> | |
16 | #include <linux/writeback.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/sysctl.h> | |
19 | #include <linux/cpu.h> | |
20 | #include <linux/memory.h> | |
21 | #include <linux/memory_hotplug.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/vmalloc.h> | |
24 | #include <linux/ioport.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/migrate.h> | |
27 | #include <linux/page-isolation.h> | |
28 | #include <linux/pfn.h> | |
29 | #include <linux/suspend.h> | |
30 | #include <linux/mm_inline.h> | |
31 | #include <linux/firmware-map.h> | |
32 | #include <linux/stop_machine.h> | |
33 | ||
34 | #include <asm/tlbflush.h> | |
35 | ||
36 | #include "internal.h" | |
37 | ||
38 | /* | |
39 | * online_page_callback contains pointer to current page onlining function. | |
40 | * Initially it is generic_online_page(). If it is required it could be | |
41 | * changed by calling set_online_page_callback() for callback registration | |
42 | * and restore_online_page_callback() for generic callback restore. | |
43 | */ | |
44 | ||
45 | static void generic_online_page(struct page *page); | |
46 | ||
47 | static online_page_callback_t online_page_callback = generic_online_page; | |
48 | ||
49 | DEFINE_MUTEX(mem_hotplug_mutex); | |
50 | ||
51 | void lock_memory_hotplug(void) | |
52 | { | |
53 | mutex_lock(&mem_hotplug_mutex); | |
54 | ||
55 | /* for exclusive hibernation if CONFIG_HIBERNATION=y */ | |
56 | lock_system_sleep(); | |
57 | } | |
58 | ||
59 | void unlock_memory_hotplug(void) | |
60 | { | |
61 | unlock_system_sleep(); | |
62 | mutex_unlock(&mem_hotplug_mutex); | |
63 | } | |
64 | ||
65 | ||
66 | /* add this memory to iomem resource */ | |
67 | static struct resource *register_memory_resource(u64 start, u64 size) | |
68 | { | |
69 | struct resource *res; | |
70 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | |
71 | BUG_ON(!res); | |
72 | ||
73 | res->name = "System RAM"; | |
74 | res->start = start; | |
75 | res->end = start + size - 1; | |
76 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | |
77 | if (request_resource(&iomem_resource, res) < 0) { | |
78 | printk("System RAM resource %pR cannot be added\n", res); | |
79 | kfree(res); | |
80 | res = NULL; | |
81 | } | |
82 | return res; | |
83 | } | |
84 | ||
85 | static void release_memory_resource(struct resource *res) | |
86 | { | |
87 | if (!res) | |
88 | return; | |
89 | release_resource(res); | |
90 | kfree(res); | |
91 | return; | |
92 | } | |
93 | ||
94 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | |
95 | void get_page_bootmem(unsigned long info, struct page *page, | |
96 | unsigned long type) | |
97 | { | |
98 | page->lru.next = (struct list_head *) type; | |
99 | SetPagePrivate(page); | |
100 | set_page_private(page, info); | |
101 | atomic_inc(&page->_count); | |
102 | } | |
103 | ||
104 | /* reference to __meminit __free_pages_bootmem is valid | |
105 | * so use __ref to tell modpost not to generate a warning */ | |
106 | void __ref put_page_bootmem(struct page *page) | |
107 | { | |
108 | unsigned long type; | |
109 | static DEFINE_MUTEX(ppb_lock); | |
110 | ||
111 | type = (unsigned long) page->lru.next; | |
112 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || | |
113 | type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); | |
114 | ||
115 | if (atomic_dec_return(&page->_count) == 1) { | |
116 | ClearPagePrivate(page); | |
117 | set_page_private(page, 0); | |
118 | INIT_LIST_HEAD(&page->lru); | |
119 | ||
120 | /* | |
121 | * Please refer to comment for __free_pages_bootmem() | |
122 | * for why we serialize here. | |
123 | */ | |
124 | mutex_lock(&ppb_lock); | |
125 | __free_pages_bootmem(page, 0); | |
126 | mutex_unlock(&ppb_lock); | |
127 | totalram_pages++; | |
128 | } | |
129 | ||
130 | } | |
131 | ||
132 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE | |
133 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | |
134 | static void register_page_bootmem_info_section(unsigned long start_pfn) | |
135 | { | |
136 | unsigned long *usemap, mapsize, section_nr, i; | |
137 | struct mem_section *ms; | |
138 | struct page *page, *memmap; | |
139 | ||
140 | section_nr = pfn_to_section_nr(start_pfn); | |
141 | ms = __nr_to_section(section_nr); | |
142 | ||
143 | /* Get section's memmap address */ | |
144 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
145 | ||
146 | /* | |
147 | * Get page for the memmap's phys address | |
148 | * XXX: need more consideration for sparse_vmemmap... | |
149 | */ | |
150 | page = virt_to_page(memmap); | |
151 | mapsize = sizeof(struct page) * PAGES_PER_SECTION; | |
152 | mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; | |
153 | ||
154 | /* remember memmap's page */ | |
155 | for (i = 0; i < mapsize; i++, page++) | |
156 | get_page_bootmem(section_nr, page, SECTION_INFO); | |
157 | ||
158 | usemap = __nr_to_section(section_nr)->pageblock_flags; | |
159 | page = virt_to_page(usemap); | |
160 | ||
161 | mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; | |
162 | ||
163 | for (i = 0; i < mapsize; i++, page++) | |
164 | get_page_bootmem(section_nr, page, MIX_SECTION_INFO); | |
165 | ||
166 | } | |
167 | #else /* CONFIG_SPARSEMEM_VMEMMAP */ | |
168 | static void register_page_bootmem_info_section(unsigned long start_pfn) | |
169 | { | |
170 | unsigned long *usemap, mapsize, section_nr, i; | |
171 | struct mem_section *ms; | |
172 | struct page *page, *memmap; | |
173 | ||
174 | if (!pfn_valid(start_pfn)) | |
175 | return; | |
176 | ||
177 | section_nr = pfn_to_section_nr(start_pfn); | |
178 | ms = __nr_to_section(section_nr); | |
179 | ||
180 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
181 | ||
182 | register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); | |
183 | ||
184 | usemap = __nr_to_section(section_nr)->pageblock_flags; | |
185 | page = virt_to_page(usemap); | |
186 | ||
187 | mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; | |
188 | ||
189 | for (i = 0; i < mapsize; i++, page++) | |
190 | get_page_bootmem(section_nr, page, MIX_SECTION_INFO); | |
191 | } | |
192 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
193 | ||
194 | void register_page_bootmem_info_node(struct pglist_data *pgdat) | |
195 | { | |
196 | unsigned long i, pfn, end_pfn, nr_pages; | |
197 | int node = pgdat->node_id; | |
198 | struct page *page; | |
199 | struct zone *zone; | |
200 | ||
201 | nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; | |
202 | page = virt_to_page(pgdat); | |
203 | ||
204 | for (i = 0; i < nr_pages; i++, page++) | |
205 | get_page_bootmem(node, page, NODE_INFO); | |
206 | ||
207 | zone = &pgdat->node_zones[0]; | |
208 | for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { | |
209 | if (zone->wait_table) { | |
210 | nr_pages = zone->wait_table_hash_nr_entries | |
211 | * sizeof(wait_queue_head_t); | |
212 | nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; | |
213 | page = virt_to_page(zone->wait_table); | |
214 | ||
215 | for (i = 0; i < nr_pages; i++, page++) | |
216 | get_page_bootmem(node, page, NODE_INFO); | |
217 | } | |
218 | } | |
219 | ||
220 | pfn = pgdat->node_start_pfn; | |
221 | end_pfn = pgdat_end_pfn(pgdat); | |
222 | ||
223 | /* register_section info */ | |
224 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
225 | /* | |
226 | * Some platforms can assign the same pfn to multiple nodes - on | |
227 | * node0 as well as nodeN. To avoid registering a pfn against | |
228 | * multiple nodes we check that this pfn does not already | |
229 | * reside in some other node. | |
230 | */ | |
231 | if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) | |
232 | register_page_bootmem_info_section(pfn); | |
233 | } | |
234 | } | |
235 | #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ | |
236 | ||
237 | static void grow_zone_span(struct zone *zone, unsigned long start_pfn, | |
238 | unsigned long end_pfn) | |
239 | { | |
240 | unsigned long old_zone_end_pfn; | |
241 | ||
242 | zone_span_writelock(zone); | |
243 | ||
244 | old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
245 | if (!zone->spanned_pages || start_pfn < zone->zone_start_pfn) | |
246 | zone->zone_start_pfn = start_pfn; | |
247 | ||
248 | zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - | |
249 | zone->zone_start_pfn; | |
250 | ||
251 | zone_span_writeunlock(zone); | |
252 | } | |
253 | ||
254 | static void resize_zone(struct zone *zone, unsigned long start_pfn, | |
255 | unsigned long end_pfn) | |
256 | { | |
257 | zone_span_writelock(zone); | |
258 | ||
259 | if (end_pfn - start_pfn) { | |
260 | zone->zone_start_pfn = start_pfn; | |
261 | zone->spanned_pages = end_pfn - start_pfn; | |
262 | } else { | |
263 | /* | |
264 | * make it consist as free_area_init_core(), | |
265 | * if spanned_pages = 0, then keep start_pfn = 0 | |
266 | */ | |
267 | zone->zone_start_pfn = 0; | |
268 | zone->spanned_pages = 0; | |
269 | } | |
270 | ||
271 | zone_span_writeunlock(zone); | |
272 | } | |
273 | ||
274 | static void fix_zone_id(struct zone *zone, unsigned long start_pfn, | |
275 | unsigned long end_pfn) | |
276 | { | |
277 | enum zone_type zid = zone_idx(zone); | |
278 | int nid = zone->zone_pgdat->node_id; | |
279 | unsigned long pfn; | |
280 | ||
281 | for (pfn = start_pfn; pfn < end_pfn; pfn++) | |
282 | set_page_links(pfn_to_page(pfn), zid, nid, pfn); | |
283 | } | |
284 | ||
285 | /* Can fail with -ENOMEM from allocating a wait table with vmalloc() or | |
286 | * alloc_bootmem_node_nopanic() */ | |
287 | static int __ref ensure_zone_is_initialized(struct zone *zone, | |
288 | unsigned long start_pfn, unsigned long num_pages) | |
289 | { | |
290 | if (!zone_is_initialized(zone)) | |
291 | return init_currently_empty_zone(zone, start_pfn, num_pages, | |
292 | MEMMAP_HOTPLUG); | |
293 | return 0; | |
294 | } | |
295 | ||
296 | static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2, | |
297 | unsigned long start_pfn, unsigned long end_pfn) | |
298 | { | |
299 | int ret; | |
300 | unsigned long flags; | |
301 | unsigned long z1_start_pfn; | |
302 | ||
303 | ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn); | |
304 | if (ret) | |
305 | return ret; | |
306 | ||
307 | pgdat_resize_lock(z1->zone_pgdat, &flags); | |
308 | ||
309 | /* can't move pfns which are higher than @z2 */ | |
310 | if (end_pfn > zone_end_pfn(z2)) | |
311 | goto out_fail; | |
312 | /* the move out part mast at the left most of @z2 */ | |
313 | if (start_pfn > z2->zone_start_pfn) | |
314 | goto out_fail; | |
315 | /* must included/overlap */ | |
316 | if (end_pfn <= z2->zone_start_pfn) | |
317 | goto out_fail; | |
318 | ||
319 | /* use start_pfn for z1's start_pfn if z1 is empty */ | |
320 | if (z1->spanned_pages) | |
321 | z1_start_pfn = z1->zone_start_pfn; | |
322 | else | |
323 | z1_start_pfn = start_pfn; | |
324 | ||
325 | resize_zone(z1, z1_start_pfn, end_pfn); | |
326 | resize_zone(z2, end_pfn, zone_end_pfn(z2)); | |
327 | ||
328 | pgdat_resize_unlock(z1->zone_pgdat, &flags); | |
329 | ||
330 | fix_zone_id(z1, start_pfn, end_pfn); | |
331 | ||
332 | return 0; | |
333 | out_fail: | |
334 | pgdat_resize_unlock(z1->zone_pgdat, &flags); | |
335 | return -1; | |
336 | } | |
337 | ||
338 | static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2, | |
339 | unsigned long start_pfn, unsigned long end_pfn) | |
340 | { | |
341 | int ret; | |
342 | unsigned long flags; | |
343 | unsigned long z2_end_pfn; | |
344 | ||
345 | ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn); | |
346 | if (ret) | |
347 | return ret; | |
348 | ||
349 | pgdat_resize_lock(z1->zone_pgdat, &flags); | |
350 | ||
351 | /* can't move pfns which are lower than @z1 */ | |
352 | if (z1->zone_start_pfn > start_pfn) | |
353 | goto out_fail; | |
354 | /* the move out part mast at the right most of @z1 */ | |
355 | if (zone_end_pfn(z1) > end_pfn) | |
356 | goto out_fail; | |
357 | /* must included/overlap */ | |
358 | if (start_pfn >= zone_end_pfn(z1)) | |
359 | goto out_fail; | |
360 | ||
361 | /* use end_pfn for z2's end_pfn if z2 is empty */ | |
362 | if (z2->spanned_pages) | |
363 | z2_end_pfn = zone_end_pfn(z2); | |
364 | else | |
365 | z2_end_pfn = end_pfn; | |
366 | ||
367 | resize_zone(z1, z1->zone_start_pfn, start_pfn); | |
368 | resize_zone(z2, start_pfn, z2_end_pfn); | |
369 | ||
370 | pgdat_resize_unlock(z1->zone_pgdat, &flags); | |
371 | ||
372 | fix_zone_id(z2, start_pfn, end_pfn); | |
373 | ||
374 | return 0; | |
375 | out_fail: | |
376 | pgdat_resize_unlock(z1->zone_pgdat, &flags); | |
377 | return -1; | |
378 | } | |
379 | ||
380 | static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, | |
381 | unsigned long end_pfn) | |
382 | { | |
383 | unsigned long old_pgdat_end_pfn = | |
384 | pgdat->node_start_pfn + pgdat->node_spanned_pages; | |
385 | ||
386 | if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) | |
387 | pgdat->node_start_pfn = start_pfn; | |
388 | ||
389 | pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - | |
390 | pgdat->node_start_pfn; | |
391 | } | |
392 | ||
393 | static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) | |
394 | { | |
395 | struct pglist_data *pgdat = zone->zone_pgdat; | |
396 | int nr_pages = PAGES_PER_SECTION; | |
397 | int nid = pgdat->node_id; | |
398 | int zone_type; | |
399 | unsigned long flags; | |
400 | int ret; | |
401 | ||
402 | zone_type = zone - pgdat->node_zones; | |
403 | ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages); | |
404 | if (ret) | |
405 | return ret; | |
406 | ||
407 | pgdat_resize_lock(zone->zone_pgdat, &flags); | |
408 | grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); | |
409 | grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, | |
410 | phys_start_pfn + nr_pages); | |
411 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | |
412 | memmap_init_zone(nr_pages, nid, zone_type, | |
413 | phys_start_pfn, MEMMAP_HOTPLUG); | |
414 | return 0; | |
415 | } | |
416 | ||
417 | static int __meminit __add_section(int nid, struct zone *zone, | |
418 | unsigned long phys_start_pfn) | |
419 | { | |
420 | int nr_pages = PAGES_PER_SECTION; | |
421 | int ret; | |
422 | ||
423 | if (pfn_valid(phys_start_pfn)) | |
424 | return -EEXIST; | |
425 | ||
426 | ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); | |
427 | ||
428 | if (ret < 0) | |
429 | return ret; | |
430 | ||
431 | ret = __add_zone(zone, phys_start_pfn); | |
432 | ||
433 | if (ret < 0) | |
434 | return ret; | |
435 | ||
436 | return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); | |
437 | } | |
438 | ||
439 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ | |
440 | static int find_smallest_section_pfn(int nid, struct zone *zone, | |
441 | unsigned long start_pfn, | |
442 | unsigned long end_pfn) | |
443 | { | |
444 | struct mem_section *ms; | |
445 | ||
446 | for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { | |
447 | ms = __pfn_to_section(start_pfn); | |
448 | ||
449 | if (unlikely(!valid_section(ms))) | |
450 | continue; | |
451 | ||
452 | if (unlikely(pfn_to_nid(start_pfn) != nid)) | |
453 | continue; | |
454 | ||
455 | if (zone && zone != page_zone(pfn_to_page(start_pfn))) | |
456 | continue; | |
457 | ||
458 | return start_pfn; | |
459 | } | |
460 | ||
461 | return 0; | |
462 | } | |
463 | ||
464 | /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ | |
465 | static int find_biggest_section_pfn(int nid, struct zone *zone, | |
466 | unsigned long start_pfn, | |
467 | unsigned long end_pfn) | |
468 | { | |
469 | struct mem_section *ms; | |
470 | unsigned long pfn; | |
471 | ||
472 | /* pfn is the end pfn of a memory section. */ | |
473 | pfn = end_pfn - 1; | |
474 | for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { | |
475 | ms = __pfn_to_section(pfn); | |
476 | ||
477 | if (unlikely(!valid_section(ms))) | |
478 | continue; | |
479 | ||
480 | if (unlikely(pfn_to_nid(pfn) != nid)) | |
481 | continue; | |
482 | ||
483 | if (zone && zone != page_zone(pfn_to_page(pfn))) | |
484 | continue; | |
485 | ||
486 | return pfn; | |
487 | } | |
488 | ||
489 | return 0; | |
490 | } | |
491 | ||
492 | static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, | |
493 | unsigned long end_pfn) | |
494 | { | |
495 | unsigned long zone_start_pfn = zone->zone_start_pfn; | |
496 | unsigned long zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
497 | unsigned long pfn; | |
498 | struct mem_section *ms; | |
499 | int nid = zone_to_nid(zone); | |
500 | ||
501 | zone_span_writelock(zone); | |
502 | if (zone_start_pfn == start_pfn) { | |
503 | /* | |
504 | * If the section is smallest section in the zone, it need | |
505 | * shrink zone->zone_start_pfn and zone->zone_spanned_pages. | |
506 | * In this case, we find second smallest valid mem_section | |
507 | * for shrinking zone. | |
508 | */ | |
509 | pfn = find_smallest_section_pfn(nid, zone, end_pfn, | |
510 | zone_end_pfn); | |
511 | if (pfn) { | |
512 | zone->zone_start_pfn = pfn; | |
513 | zone->spanned_pages = zone_end_pfn - pfn; | |
514 | } | |
515 | } else if (zone_end_pfn == end_pfn) { | |
516 | /* | |
517 | * If the section is biggest section in the zone, it need | |
518 | * shrink zone->spanned_pages. | |
519 | * In this case, we find second biggest valid mem_section for | |
520 | * shrinking zone. | |
521 | */ | |
522 | pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, | |
523 | start_pfn); | |
524 | if (pfn) | |
525 | zone->spanned_pages = pfn - zone_start_pfn + 1; | |
526 | } | |
527 | ||
528 | /* | |
529 | * The section is not biggest or smallest mem_section in the zone, it | |
530 | * only creates a hole in the zone. So in this case, we need not | |
531 | * change the zone. But perhaps, the zone has only hole data. Thus | |
532 | * it check the zone has only hole or not. | |
533 | */ | |
534 | pfn = zone_start_pfn; | |
535 | for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) { | |
536 | ms = __pfn_to_section(pfn); | |
537 | ||
538 | if (unlikely(!valid_section(ms))) | |
539 | continue; | |
540 | ||
541 | if (page_zone(pfn_to_page(pfn)) != zone) | |
542 | continue; | |
543 | ||
544 | /* If the section is current section, it continues the loop */ | |
545 | if (start_pfn == pfn) | |
546 | continue; | |
547 | ||
548 | /* If we find valid section, we have nothing to do */ | |
549 | zone_span_writeunlock(zone); | |
550 | return; | |
551 | } | |
552 | ||
553 | /* The zone has no valid section */ | |
554 | zone->zone_start_pfn = 0; | |
555 | zone->spanned_pages = 0; | |
556 | zone_span_writeunlock(zone); | |
557 | } | |
558 | ||
559 | static void shrink_pgdat_span(struct pglist_data *pgdat, | |
560 | unsigned long start_pfn, unsigned long end_pfn) | |
561 | { | |
562 | unsigned long pgdat_start_pfn = pgdat->node_start_pfn; | |
563 | unsigned long pgdat_end_pfn = | |
564 | pgdat->node_start_pfn + pgdat->node_spanned_pages; | |
565 | unsigned long pfn; | |
566 | struct mem_section *ms; | |
567 | int nid = pgdat->node_id; | |
568 | ||
569 | if (pgdat_start_pfn == start_pfn) { | |
570 | /* | |
571 | * If the section is smallest section in the pgdat, it need | |
572 | * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages. | |
573 | * In this case, we find second smallest valid mem_section | |
574 | * for shrinking zone. | |
575 | */ | |
576 | pfn = find_smallest_section_pfn(nid, NULL, end_pfn, | |
577 | pgdat_end_pfn); | |
578 | if (pfn) { | |
579 | pgdat->node_start_pfn = pfn; | |
580 | pgdat->node_spanned_pages = pgdat_end_pfn - pfn; | |
581 | } | |
582 | } else if (pgdat_end_pfn == end_pfn) { | |
583 | /* | |
584 | * If the section is biggest section in the pgdat, it need | |
585 | * shrink pgdat->node_spanned_pages. | |
586 | * In this case, we find second biggest valid mem_section for | |
587 | * shrinking zone. | |
588 | */ | |
589 | pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn, | |
590 | start_pfn); | |
591 | if (pfn) | |
592 | pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1; | |
593 | } | |
594 | ||
595 | /* | |
596 | * If the section is not biggest or smallest mem_section in the pgdat, | |
597 | * it only creates a hole in the pgdat. So in this case, we need not | |
598 | * change the pgdat. | |
599 | * But perhaps, the pgdat has only hole data. Thus it check the pgdat | |
600 | * has only hole or not. | |
601 | */ | |
602 | pfn = pgdat_start_pfn; | |
603 | for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) { | |
604 | ms = __pfn_to_section(pfn); | |
605 | ||
606 | if (unlikely(!valid_section(ms))) | |
607 | continue; | |
608 | ||
609 | if (pfn_to_nid(pfn) != nid) | |
610 | continue; | |
611 | ||
612 | /* If the section is current section, it continues the loop */ | |
613 | if (start_pfn == pfn) | |
614 | continue; | |
615 | ||
616 | /* If we find valid section, we have nothing to do */ | |
617 | return; | |
618 | } | |
619 | ||
620 | /* The pgdat has no valid section */ | |
621 | pgdat->node_start_pfn = 0; | |
622 | pgdat->node_spanned_pages = 0; | |
623 | } | |
624 | ||
625 | static void __remove_zone(struct zone *zone, unsigned long start_pfn) | |
626 | { | |
627 | struct pglist_data *pgdat = zone->zone_pgdat; | |
628 | int nr_pages = PAGES_PER_SECTION; | |
629 | int zone_type; | |
630 | unsigned long flags; | |
631 | ||
632 | zone_type = zone - pgdat->node_zones; | |
633 | ||
634 | pgdat_resize_lock(zone->zone_pgdat, &flags); | |
635 | shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); | |
636 | shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); | |
637 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | |
638 | } | |
639 | ||
640 | static int __remove_section(struct zone *zone, struct mem_section *ms) | |
641 | { | |
642 | unsigned long start_pfn; | |
643 | int scn_nr; | |
644 | int ret = -EINVAL; | |
645 | ||
646 | if (!valid_section(ms)) | |
647 | return ret; | |
648 | ||
649 | ret = unregister_memory_section(ms); | |
650 | if (ret) | |
651 | return ret; | |
652 | ||
653 | scn_nr = __section_nr(ms); | |
654 | start_pfn = section_nr_to_pfn(scn_nr); | |
655 | __remove_zone(zone, start_pfn); | |
656 | ||
657 | sparse_remove_one_section(zone, ms); | |
658 | return 0; | |
659 | } | |
660 | ||
661 | /* | |
662 | * Reasonably generic function for adding memory. It is | |
663 | * expected that archs that support memory hotplug will | |
664 | * call this function after deciding the zone to which to | |
665 | * add the new pages. | |
666 | */ | |
667 | int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, | |
668 | unsigned long nr_pages) | |
669 | { | |
670 | unsigned long i; | |
671 | int err = 0; | |
672 | int start_sec, end_sec; | |
673 | /* during initialize mem_map, align hot-added range to section */ | |
674 | start_sec = pfn_to_section_nr(phys_start_pfn); | |
675 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); | |
676 | ||
677 | for (i = start_sec; i <= end_sec; i++) { | |
678 | err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); | |
679 | ||
680 | /* | |
681 | * EEXIST is finally dealt with by ioresource collision | |
682 | * check. see add_memory() => register_memory_resource() | |
683 | * Warning will be printed if there is collision. | |
684 | */ | |
685 | if (err && (err != -EEXIST)) | |
686 | break; | |
687 | err = 0; | |
688 | } | |
689 | ||
690 | return err; | |
691 | } | |
692 | EXPORT_SYMBOL_GPL(__add_pages); | |
693 | ||
694 | /** | |
695 | * __remove_pages() - remove sections of pages from a zone | |
696 | * @zone: zone from which pages need to be removed | |
697 | * @phys_start_pfn: starting pageframe (must be aligned to start of a section) | |
698 | * @nr_pages: number of pages to remove (must be multiple of section size) | |
699 | * | |
700 | * Generic helper function to remove section mappings and sysfs entries | |
701 | * for the section of the memory we are removing. Caller needs to make | |
702 | * sure that pages are marked reserved and zones are adjust properly by | |
703 | * calling offline_pages(). | |
704 | */ | |
705 | int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |
706 | unsigned long nr_pages) | |
707 | { | |
708 | unsigned long i; | |
709 | int sections_to_remove; | |
710 | resource_size_t start, size; | |
711 | int ret = 0; | |
712 | ||
713 | /* | |
714 | * We can only remove entire sections | |
715 | */ | |
716 | BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); | |
717 | BUG_ON(nr_pages % PAGES_PER_SECTION); | |
718 | ||
719 | start = phys_start_pfn << PAGE_SHIFT; | |
720 | size = nr_pages * PAGE_SIZE; | |
721 | ret = release_mem_region_adjustable(&iomem_resource, start, size); | |
722 | if (ret) | |
723 | pr_warn("Unable to release resource <%016llx-%016llx> (%d)\n", | |
724 | start, start + size - 1, ret); | |
725 | ||
726 | sections_to_remove = nr_pages / PAGES_PER_SECTION; | |
727 | for (i = 0; i < sections_to_remove; i++) { | |
728 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; | |
729 | ret = __remove_section(zone, __pfn_to_section(pfn)); | |
730 | if (ret) | |
731 | break; | |
732 | } | |
733 | return ret; | |
734 | } | |
735 | EXPORT_SYMBOL_GPL(__remove_pages); | |
736 | ||
737 | int set_online_page_callback(online_page_callback_t callback) | |
738 | { | |
739 | int rc = -EINVAL; | |
740 | ||
741 | lock_memory_hotplug(); | |
742 | ||
743 | if (online_page_callback == generic_online_page) { | |
744 | online_page_callback = callback; | |
745 | rc = 0; | |
746 | } | |
747 | ||
748 | unlock_memory_hotplug(); | |
749 | ||
750 | return rc; | |
751 | } | |
752 | EXPORT_SYMBOL_GPL(set_online_page_callback); | |
753 | ||
754 | int restore_online_page_callback(online_page_callback_t callback) | |
755 | { | |
756 | int rc = -EINVAL; | |
757 | ||
758 | lock_memory_hotplug(); | |
759 | ||
760 | if (online_page_callback == callback) { | |
761 | online_page_callback = generic_online_page; | |
762 | rc = 0; | |
763 | } | |
764 | ||
765 | unlock_memory_hotplug(); | |
766 | ||
767 | return rc; | |
768 | } | |
769 | EXPORT_SYMBOL_GPL(restore_online_page_callback); | |
770 | ||
771 | void __online_page_set_limits(struct page *page) | |
772 | { | |
773 | unsigned long pfn = page_to_pfn(page); | |
774 | ||
775 | if (pfn >= num_physpages) | |
776 | num_physpages = pfn + 1; | |
777 | } | |
778 | EXPORT_SYMBOL_GPL(__online_page_set_limits); | |
779 | ||
780 | void __online_page_increment_counters(struct page *page) | |
781 | { | |
782 | totalram_pages++; | |
783 | ||
784 | #ifdef CONFIG_HIGHMEM | |
785 | if (PageHighMem(page)) | |
786 | totalhigh_pages++; | |
787 | #endif | |
788 | } | |
789 | EXPORT_SYMBOL_GPL(__online_page_increment_counters); | |
790 | ||
791 | void __online_page_free(struct page *page) | |
792 | { | |
793 | ClearPageReserved(page); | |
794 | init_page_count(page); | |
795 | __free_page(page); | |
796 | } | |
797 | EXPORT_SYMBOL_GPL(__online_page_free); | |
798 | ||
799 | static void generic_online_page(struct page *page) | |
800 | { | |
801 | __online_page_set_limits(page); | |
802 | __online_page_increment_counters(page); | |
803 | __online_page_free(page); | |
804 | } | |
805 | ||
806 | static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, | |
807 | void *arg) | |
808 | { | |
809 | unsigned long i; | |
810 | unsigned long onlined_pages = *(unsigned long *)arg; | |
811 | struct page *page; | |
812 | if (PageReserved(pfn_to_page(start_pfn))) | |
813 | for (i = 0; i < nr_pages; i++) { | |
814 | page = pfn_to_page(start_pfn + i); | |
815 | (*online_page_callback)(page); | |
816 | onlined_pages++; | |
817 | } | |
818 | *(unsigned long *)arg = onlined_pages; | |
819 | return 0; | |
820 | } | |
821 | ||
822 | #ifdef CONFIG_MOVABLE_NODE | |
823 | /* | |
824 | * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have | |
825 | * normal memory. | |
826 | */ | |
827 | static bool can_online_high_movable(struct zone *zone) | |
828 | { | |
829 | return true; | |
830 | } | |
831 | #else /* CONFIG_MOVABLE_NODE */ | |
832 | /* ensure every online node has NORMAL memory */ | |
833 | static bool can_online_high_movable(struct zone *zone) | |
834 | { | |
835 | return node_state(zone_to_nid(zone), N_NORMAL_MEMORY); | |
836 | } | |
837 | #endif /* CONFIG_MOVABLE_NODE */ | |
838 | ||
839 | /* check which state of node_states will be changed when online memory */ | |
840 | static void node_states_check_changes_online(unsigned long nr_pages, | |
841 | struct zone *zone, struct memory_notify *arg) | |
842 | { | |
843 | int nid = zone_to_nid(zone); | |
844 | enum zone_type zone_last = ZONE_NORMAL; | |
845 | ||
846 | /* | |
847 | * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] | |
848 | * contains nodes which have zones of 0...ZONE_NORMAL, | |
849 | * set zone_last to ZONE_NORMAL. | |
850 | * | |
851 | * If we don't have HIGHMEM nor movable node, | |
852 | * node_states[N_NORMAL_MEMORY] contains nodes which have zones of | |
853 | * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. | |
854 | */ | |
855 | if (N_MEMORY == N_NORMAL_MEMORY) | |
856 | zone_last = ZONE_MOVABLE; | |
857 | ||
858 | /* | |
859 | * if the memory to be online is in a zone of 0...zone_last, and | |
860 | * the zones of 0...zone_last don't have memory before online, we will | |
861 | * need to set the node to node_states[N_NORMAL_MEMORY] after | |
862 | * the memory is online. | |
863 | */ | |
864 | if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY)) | |
865 | arg->status_change_nid_normal = nid; | |
866 | else | |
867 | arg->status_change_nid_normal = -1; | |
868 | ||
869 | #ifdef CONFIG_HIGHMEM | |
870 | /* | |
871 | * If we have movable node, node_states[N_HIGH_MEMORY] | |
872 | * contains nodes which have zones of 0...ZONE_HIGHMEM, | |
873 | * set zone_last to ZONE_HIGHMEM. | |
874 | * | |
875 | * If we don't have movable node, node_states[N_NORMAL_MEMORY] | |
876 | * contains nodes which have zones of 0...ZONE_MOVABLE, | |
877 | * set zone_last to ZONE_MOVABLE. | |
878 | */ | |
879 | zone_last = ZONE_HIGHMEM; | |
880 | if (N_MEMORY == N_HIGH_MEMORY) | |
881 | zone_last = ZONE_MOVABLE; | |
882 | ||
883 | if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY)) | |
884 | arg->status_change_nid_high = nid; | |
885 | else | |
886 | arg->status_change_nid_high = -1; | |
887 | #else | |
888 | arg->status_change_nid_high = arg->status_change_nid_normal; | |
889 | #endif | |
890 | ||
891 | /* | |
892 | * if the node don't have memory befor online, we will need to | |
893 | * set the node to node_states[N_MEMORY] after the memory | |
894 | * is online. | |
895 | */ | |
896 | if (!node_state(nid, N_MEMORY)) | |
897 | arg->status_change_nid = nid; | |
898 | else | |
899 | arg->status_change_nid = -1; | |
900 | } | |
901 | ||
902 | static void node_states_set_node(int node, struct memory_notify *arg) | |
903 | { | |
904 | if (arg->status_change_nid_normal >= 0) | |
905 | node_set_state(node, N_NORMAL_MEMORY); | |
906 | ||
907 | if (arg->status_change_nid_high >= 0) | |
908 | node_set_state(node, N_HIGH_MEMORY); | |
909 | ||
910 | node_set_state(node, N_MEMORY); | |
911 | } | |
912 | ||
913 | ||
914 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) | |
915 | { | |
916 | unsigned long onlined_pages = 0; | |
917 | struct zone *zone; | |
918 | int need_zonelists_rebuild = 0; | |
919 | int nid; | |
920 | int ret; | |
921 | struct memory_notify arg; | |
922 | ||
923 | lock_memory_hotplug(); | |
924 | /* | |
925 | * This doesn't need a lock to do pfn_to_page(). | |
926 | * The section can't be removed here because of the | |
927 | * memory_block->state_mutex. | |
928 | */ | |
929 | zone = page_zone(pfn_to_page(pfn)); | |
930 | ||
931 | if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) && | |
932 | !can_online_high_movable(zone)) { | |
933 | unlock_memory_hotplug(); | |
934 | return -1; | |
935 | } | |
936 | ||
937 | if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) { | |
938 | if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) { | |
939 | unlock_memory_hotplug(); | |
940 | return -1; | |
941 | } | |
942 | } | |
943 | if (online_type == ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) { | |
944 | if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) { | |
945 | unlock_memory_hotplug(); | |
946 | return -1; | |
947 | } | |
948 | } | |
949 | ||
950 | /* Previous code may changed the zone of the pfn range */ | |
951 | zone = page_zone(pfn_to_page(pfn)); | |
952 | ||
953 | arg.start_pfn = pfn; | |
954 | arg.nr_pages = nr_pages; | |
955 | node_states_check_changes_online(nr_pages, zone, &arg); | |
956 | ||
957 | nid = page_to_nid(pfn_to_page(pfn)); | |
958 | ||
959 | ret = memory_notify(MEM_GOING_ONLINE, &arg); | |
960 | ret = notifier_to_errno(ret); | |
961 | if (ret) { | |
962 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
963 | unlock_memory_hotplug(); | |
964 | return ret; | |
965 | } | |
966 | /* | |
967 | * If this zone is not populated, then it is not in zonelist. | |
968 | * This means the page allocator ignores this zone. | |
969 | * So, zonelist must be updated after online. | |
970 | */ | |
971 | mutex_lock(&zonelists_mutex); | |
972 | if (!populated_zone(zone)) { | |
973 | need_zonelists_rebuild = 1; | |
974 | build_all_zonelists(NULL, zone); | |
975 | } | |
976 | ||
977 | ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, | |
978 | online_pages_range); | |
979 | if (ret) { | |
980 | if (need_zonelists_rebuild) | |
981 | zone_pcp_reset(zone); | |
982 | mutex_unlock(&zonelists_mutex); | |
983 | printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n", | |
984 | (unsigned long long) pfn << PAGE_SHIFT, | |
985 | (((unsigned long long) pfn + nr_pages) | |
986 | << PAGE_SHIFT) - 1); | |
987 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
988 | unlock_memory_hotplug(); | |
989 | return ret; | |
990 | } | |
991 | ||
992 | zone->managed_pages += onlined_pages; | |
993 | zone->present_pages += onlined_pages; | |
994 | zone->zone_pgdat->node_present_pages += onlined_pages; | |
995 | if (onlined_pages) { | |
996 | node_states_set_node(zone_to_nid(zone), &arg); | |
997 | if (need_zonelists_rebuild) | |
998 | build_all_zonelists(NULL, NULL); | |
999 | else | |
1000 | zone_pcp_update(zone); | |
1001 | } | |
1002 | ||
1003 | mutex_unlock(&zonelists_mutex); | |
1004 | ||
1005 | init_per_zone_wmark_min(); | |
1006 | ||
1007 | if (onlined_pages) | |
1008 | kswapd_run(zone_to_nid(zone)); | |
1009 | ||
1010 | vm_total_pages = nr_free_pagecache_pages(); | |
1011 | ||
1012 | writeback_set_ratelimit(); | |
1013 | ||
1014 | if (onlined_pages) | |
1015 | memory_notify(MEM_ONLINE, &arg); | |
1016 | unlock_memory_hotplug(); | |
1017 | ||
1018 | return 0; | |
1019 | } | |
1020 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ | |
1021 | ||
1022 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ | |
1023 | static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) | |
1024 | { | |
1025 | struct pglist_data *pgdat; | |
1026 | unsigned long zones_size[MAX_NR_ZONES] = {0}; | |
1027 | unsigned long zholes_size[MAX_NR_ZONES] = {0}; | |
1028 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
1029 | ||
1030 | pgdat = NODE_DATA(nid); | |
1031 | if (!pgdat) { | |
1032 | pgdat = arch_alloc_nodedata(nid); | |
1033 | if (!pgdat) | |
1034 | return NULL; | |
1035 | ||
1036 | arch_refresh_nodedata(nid, pgdat); | |
1037 | } | |
1038 | ||
1039 | /* we can use NODE_DATA(nid) from here */ | |
1040 | ||
1041 | /* init node's zones as empty zones, we don't have any present pages.*/ | |
1042 | free_area_init_node(nid, zones_size, start_pfn, zholes_size); | |
1043 | ||
1044 | /* | |
1045 | * The node we allocated has no zone fallback lists. For avoiding | |
1046 | * to access not-initialized zonelist, build here. | |
1047 | */ | |
1048 | mutex_lock(&zonelists_mutex); | |
1049 | build_all_zonelists(pgdat, NULL); | |
1050 | mutex_unlock(&zonelists_mutex); | |
1051 | ||
1052 | return pgdat; | |
1053 | } | |
1054 | ||
1055 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) | |
1056 | { | |
1057 | arch_refresh_nodedata(nid, NULL); | |
1058 | arch_free_nodedata(pgdat); | |
1059 | return; | |
1060 | } | |
1061 | ||
1062 | ||
1063 | /* | |
1064 | * called by cpu_up() to online a node without onlined memory. | |
1065 | */ | |
1066 | int mem_online_node(int nid) | |
1067 | { | |
1068 | pg_data_t *pgdat; | |
1069 | int ret; | |
1070 | ||
1071 | lock_memory_hotplug(); | |
1072 | pgdat = hotadd_new_pgdat(nid, 0); | |
1073 | if (!pgdat) { | |
1074 | ret = -ENOMEM; | |
1075 | goto out; | |
1076 | } | |
1077 | node_set_online(nid); | |
1078 | ret = register_one_node(nid); | |
1079 | BUG_ON(ret); | |
1080 | ||
1081 | out: | |
1082 | unlock_memory_hotplug(); | |
1083 | return ret; | |
1084 | } | |
1085 | ||
1086 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ | |
1087 | int __ref add_memory(int nid, u64 start, u64 size) | |
1088 | { | |
1089 | pg_data_t *pgdat = NULL; | |
1090 | bool new_pgdat; | |
1091 | bool new_node; | |
1092 | struct resource *res; | |
1093 | int ret; | |
1094 | ||
1095 | lock_memory_hotplug(); | |
1096 | ||
1097 | res = register_memory_resource(start, size); | |
1098 | ret = -EEXIST; | |
1099 | if (!res) | |
1100 | goto out; | |
1101 | ||
1102 | { /* Stupid hack to suppress address-never-null warning */ | |
1103 | void *p = NODE_DATA(nid); | |
1104 | new_pgdat = !p; | |
1105 | } | |
1106 | new_node = !node_online(nid); | |
1107 | if (new_node) { | |
1108 | pgdat = hotadd_new_pgdat(nid, start); | |
1109 | ret = -ENOMEM; | |
1110 | if (!pgdat) | |
1111 | goto error; | |
1112 | } | |
1113 | ||
1114 | /* call arch's memory hotadd */ | |
1115 | ret = arch_add_memory(nid, start, size); | |
1116 | ||
1117 | if (ret < 0) | |
1118 | goto error; | |
1119 | ||
1120 | /* we online node here. we can't roll back from here. */ | |
1121 | node_set_online(nid); | |
1122 | ||
1123 | if (new_node) { | |
1124 | ret = register_one_node(nid); | |
1125 | /* | |
1126 | * If sysfs file of new node can't create, cpu on the node | |
1127 | * can't be hot-added. There is no rollback way now. | |
1128 | * So, check by BUG_ON() to catch it reluctantly.. | |
1129 | */ | |
1130 | BUG_ON(ret); | |
1131 | } | |
1132 | ||
1133 | /* create new memmap entry */ | |
1134 | firmware_map_add_hotplug(start, start + size, "System RAM"); | |
1135 | ||
1136 | goto out; | |
1137 | ||
1138 | error: | |
1139 | /* rollback pgdat allocation and others */ | |
1140 | if (new_pgdat) | |
1141 | rollback_node_hotadd(nid, pgdat); | |
1142 | release_memory_resource(res); | |
1143 | ||
1144 | out: | |
1145 | unlock_memory_hotplug(); | |
1146 | return ret; | |
1147 | } | |
1148 | EXPORT_SYMBOL_GPL(add_memory); | |
1149 | ||
1150 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
1151 | /* | |
1152 | * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy | |
1153 | * set and the size of the free page is given by page_order(). Using this, | |
1154 | * the function determines if the pageblock contains only free pages. | |
1155 | * Due to buddy contraints, a free page at least the size of a pageblock will | |
1156 | * be located at the start of the pageblock | |
1157 | */ | |
1158 | static inline int pageblock_free(struct page *page) | |
1159 | { | |
1160 | return PageBuddy(page) && page_order(page) >= pageblock_order; | |
1161 | } | |
1162 | ||
1163 | /* Return the start of the next active pageblock after a given page */ | |
1164 | static struct page *next_active_pageblock(struct page *page) | |
1165 | { | |
1166 | /* Ensure the starting page is pageblock-aligned */ | |
1167 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); | |
1168 | ||
1169 | /* If the entire pageblock is free, move to the end of free page */ | |
1170 | if (pageblock_free(page)) { | |
1171 | int order; | |
1172 | /* be careful. we don't have locks, page_order can be changed.*/ | |
1173 | order = page_order(page); | |
1174 | if ((order < MAX_ORDER) && (order >= pageblock_order)) | |
1175 | return page + (1 << order); | |
1176 | } | |
1177 | ||
1178 | return page + pageblock_nr_pages; | |
1179 | } | |
1180 | ||
1181 | /* Checks if this range of memory is likely to be hot-removable. */ | |
1182 | int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) | |
1183 | { | |
1184 | struct page *page = pfn_to_page(start_pfn); | |
1185 | struct page *end_page = page + nr_pages; | |
1186 | ||
1187 | /* Check the starting page of each pageblock within the range */ | |
1188 | for (; page < end_page; page = next_active_pageblock(page)) { | |
1189 | if (!is_pageblock_removable_nolock(page)) | |
1190 | return 0; | |
1191 | cond_resched(); | |
1192 | } | |
1193 | ||
1194 | /* All pageblocks in the memory block are likely to be hot-removable */ | |
1195 | return 1; | |
1196 | } | |
1197 | ||
1198 | /* | |
1199 | * Confirm all pages in a range [start, end) is belongs to the same zone. | |
1200 | */ | |
1201 | static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) | |
1202 | { | |
1203 | unsigned long pfn; | |
1204 | struct zone *zone = NULL; | |
1205 | struct page *page; | |
1206 | int i; | |
1207 | for (pfn = start_pfn; | |
1208 | pfn < end_pfn; | |
1209 | pfn += MAX_ORDER_NR_PAGES) { | |
1210 | i = 0; | |
1211 | /* This is just a CONFIG_HOLES_IN_ZONE check.*/ | |
1212 | while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) | |
1213 | i++; | |
1214 | if (i == MAX_ORDER_NR_PAGES) | |
1215 | continue; | |
1216 | page = pfn_to_page(pfn + i); | |
1217 | if (zone && page_zone(page) != zone) | |
1218 | return 0; | |
1219 | zone = page_zone(page); | |
1220 | } | |
1221 | return 1; | |
1222 | } | |
1223 | ||
1224 | /* | |
1225 | * Scanning pfn is much easier than scanning lru list. | |
1226 | * Scan pfn from start to end and Find LRU page. | |
1227 | */ | |
1228 | static unsigned long scan_lru_pages(unsigned long start, unsigned long end) | |
1229 | { | |
1230 | unsigned long pfn; | |
1231 | struct page *page; | |
1232 | for (pfn = start; pfn < end; pfn++) { | |
1233 | if (pfn_valid(pfn)) { | |
1234 | page = pfn_to_page(pfn); | |
1235 | if (PageLRU(page)) | |
1236 | return pfn; | |
1237 | } | |
1238 | } | |
1239 | return 0; | |
1240 | } | |
1241 | ||
1242 | #define NR_OFFLINE_AT_ONCE_PAGES (256) | |
1243 | static int | |
1244 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |
1245 | { | |
1246 | unsigned long pfn; | |
1247 | struct page *page; | |
1248 | int move_pages = NR_OFFLINE_AT_ONCE_PAGES; | |
1249 | int not_managed = 0; | |
1250 | int ret = 0; | |
1251 | LIST_HEAD(source); | |
1252 | ||
1253 | for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { | |
1254 | if (!pfn_valid(pfn)) | |
1255 | continue; | |
1256 | page = pfn_to_page(pfn); | |
1257 | if (!get_page_unless_zero(page)) | |
1258 | continue; | |
1259 | /* | |
1260 | * We can skip free pages. And we can only deal with pages on | |
1261 | * LRU. | |
1262 | */ | |
1263 | ret = isolate_lru_page(page); | |
1264 | if (!ret) { /* Success */ | |
1265 | put_page(page); | |
1266 | list_add_tail(&page->lru, &source); | |
1267 | move_pages--; | |
1268 | inc_zone_page_state(page, NR_ISOLATED_ANON + | |
1269 | page_is_file_cache(page)); | |
1270 | ||
1271 | } else { | |
1272 | #ifdef CONFIG_DEBUG_VM | |
1273 | printk(KERN_ALERT "removing pfn %lx from LRU failed\n", | |
1274 | pfn); | |
1275 | dump_page(page); | |
1276 | #endif | |
1277 | put_page(page); | |
1278 | /* Because we don't have big zone->lock. we should | |
1279 | check this again here. */ | |
1280 | if (page_count(page)) { | |
1281 | not_managed++; | |
1282 | ret = -EBUSY; | |
1283 | break; | |
1284 | } | |
1285 | } | |
1286 | } | |
1287 | if (!list_empty(&source)) { | |
1288 | if (not_managed) { | |
1289 | putback_lru_pages(&source); | |
1290 | goto out; | |
1291 | } | |
1292 | ||
1293 | /* | |
1294 | * alloc_migrate_target should be improooooved!! | |
1295 | * migrate_pages returns # of failed pages. | |
1296 | */ | |
1297 | ret = migrate_pages(&source, alloc_migrate_target, 0, | |
1298 | MIGRATE_SYNC, MR_MEMORY_HOTPLUG); | |
1299 | if (ret) | |
1300 | putback_lru_pages(&source); | |
1301 | } | |
1302 | out: | |
1303 | return ret; | |
1304 | } | |
1305 | ||
1306 | /* | |
1307 | * remove from free_area[] and mark all as Reserved. | |
1308 | */ | |
1309 | static int | |
1310 | offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, | |
1311 | void *data) | |
1312 | { | |
1313 | __offline_isolated_pages(start, start + nr_pages); | |
1314 | return 0; | |
1315 | } | |
1316 | ||
1317 | static void | |
1318 | offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) | |
1319 | { | |
1320 | walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, | |
1321 | offline_isolated_pages_cb); | |
1322 | } | |
1323 | ||
1324 | /* | |
1325 | * Check all pages in range, recoreded as memory resource, are isolated. | |
1326 | */ | |
1327 | static int | |
1328 | check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, | |
1329 | void *data) | |
1330 | { | |
1331 | int ret; | |
1332 | long offlined = *(long *)data; | |
1333 | ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); | |
1334 | offlined = nr_pages; | |
1335 | if (!ret) | |
1336 | *(long *)data += offlined; | |
1337 | return ret; | |
1338 | } | |
1339 | ||
1340 | static long | |
1341 | check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | |
1342 | { | |
1343 | long offlined = 0; | |
1344 | int ret; | |
1345 | ||
1346 | ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, | |
1347 | check_pages_isolated_cb); | |
1348 | if (ret < 0) | |
1349 | offlined = (long)ret; | |
1350 | return offlined; | |
1351 | } | |
1352 | ||
1353 | #ifdef CONFIG_MOVABLE_NODE | |
1354 | /* | |
1355 | * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have | |
1356 | * normal memory. | |
1357 | */ | |
1358 | static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) | |
1359 | { | |
1360 | return true; | |
1361 | } | |
1362 | #else /* CONFIG_MOVABLE_NODE */ | |
1363 | /* ensure the node has NORMAL memory if it is still online */ | |
1364 | static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) | |
1365 | { | |
1366 | struct pglist_data *pgdat = zone->zone_pgdat; | |
1367 | unsigned long present_pages = 0; | |
1368 | enum zone_type zt; | |
1369 | ||
1370 | for (zt = 0; zt <= ZONE_NORMAL; zt++) | |
1371 | present_pages += pgdat->node_zones[zt].present_pages; | |
1372 | ||
1373 | if (present_pages > nr_pages) | |
1374 | return true; | |
1375 | ||
1376 | present_pages = 0; | |
1377 | for (; zt <= ZONE_MOVABLE; zt++) | |
1378 | present_pages += pgdat->node_zones[zt].present_pages; | |
1379 | ||
1380 | /* | |
1381 | * we can't offline the last normal memory until all | |
1382 | * higher memory is offlined. | |
1383 | */ | |
1384 | return present_pages == 0; | |
1385 | } | |
1386 | #endif /* CONFIG_MOVABLE_NODE */ | |
1387 | ||
1388 | /* check which state of node_states will be changed when offline memory */ | |
1389 | static void node_states_check_changes_offline(unsigned long nr_pages, | |
1390 | struct zone *zone, struct memory_notify *arg) | |
1391 | { | |
1392 | struct pglist_data *pgdat = zone->zone_pgdat; | |
1393 | unsigned long present_pages = 0; | |
1394 | enum zone_type zt, zone_last = ZONE_NORMAL; | |
1395 | ||
1396 | /* | |
1397 | * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] | |
1398 | * contains nodes which have zones of 0...ZONE_NORMAL, | |
1399 | * set zone_last to ZONE_NORMAL. | |
1400 | * | |
1401 | * If we don't have HIGHMEM nor movable node, | |
1402 | * node_states[N_NORMAL_MEMORY] contains nodes which have zones of | |
1403 | * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. | |
1404 | */ | |
1405 | if (N_MEMORY == N_NORMAL_MEMORY) | |
1406 | zone_last = ZONE_MOVABLE; | |
1407 | ||
1408 | /* | |
1409 | * check whether node_states[N_NORMAL_MEMORY] will be changed. | |
1410 | * If the memory to be offline is in a zone of 0...zone_last, | |
1411 | * and it is the last present memory, 0...zone_last will | |
1412 | * become empty after offline , thus we can determind we will | |
1413 | * need to clear the node from node_states[N_NORMAL_MEMORY]. | |
1414 | */ | |
1415 | for (zt = 0; zt <= zone_last; zt++) | |
1416 | present_pages += pgdat->node_zones[zt].present_pages; | |
1417 | if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) | |
1418 | arg->status_change_nid_normal = zone_to_nid(zone); | |
1419 | else | |
1420 | arg->status_change_nid_normal = -1; | |
1421 | ||
1422 | #ifdef CONFIG_HIGHMEM | |
1423 | /* | |
1424 | * If we have movable node, node_states[N_HIGH_MEMORY] | |
1425 | * contains nodes which have zones of 0...ZONE_HIGHMEM, | |
1426 | * set zone_last to ZONE_HIGHMEM. | |
1427 | * | |
1428 | * If we don't have movable node, node_states[N_NORMAL_MEMORY] | |
1429 | * contains nodes which have zones of 0...ZONE_MOVABLE, | |
1430 | * set zone_last to ZONE_MOVABLE. | |
1431 | */ | |
1432 | zone_last = ZONE_HIGHMEM; | |
1433 | if (N_MEMORY == N_HIGH_MEMORY) | |
1434 | zone_last = ZONE_MOVABLE; | |
1435 | ||
1436 | for (; zt <= zone_last; zt++) | |
1437 | present_pages += pgdat->node_zones[zt].present_pages; | |
1438 | if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) | |
1439 | arg->status_change_nid_high = zone_to_nid(zone); | |
1440 | else | |
1441 | arg->status_change_nid_high = -1; | |
1442 | #else | |
1443 | arg->status_change_nid_high = arg->status_change_nid_normal; | |
1444 | #endif | |
1445 | ||
1446 | /* | |
1447 | * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE | |
1448 | */ | |
1449 | zone_last = ZONE_MOVABLE; | |
1450 | ||
1451 | /* | |
1452 | * check whether node_states[N_HIGH_MEMORY] will be changed | |
1453 | * If we try to offline the last present @nr_pages from the node, | |
1454 | * we can determind we will need to clear the node from | |
1455 | * node_states[N_HIGH_MEMORY]. | |
1456 | */ | |
1457 | for (; zt <= zone_last; zt++) | |
1458 | present_pages += pgdat->node_zones[zt].present_pages; | |
1459 | if (nr_pages >= present_pages) | |
1460 | arg->status_change_nid = zone_to_nid(zone); | |
1461 | else | |
1462 | arg->status_change_nid = -1; | |
1463 | } | |
1464 | ||
1465 | static void node_states_clear_node(int node, struct memory_notify *arg) | |
1466 | { | |
1467 | if (arg->status_change_nid_normal >= 0) | |
1468 | node_clear_state(node, N_NORMAL_MEMORY); | |
1469 | ||
1470 | if ((N_MEMORY != N_NORMAL_MEMORY) && | |
1471 | (arg->status_change_nid_high >= 0)) | |
1472 | node_clear_state(node, N_HIGH_MEMORY); | |
1473 | ||
1474 | if ((N_MEMORY != N_HIGH_MEMORY) && | |
1475 | (arg->status_change_nid >= 0)) | |
1476 | node_clear_state(node, N_MEMORY); | |
1477 | } | |
1478 | ||
1479 | static int __ref __offline_pages(unsigned long start_pfn, | |
1480 | unsigned long end_pfn, unsigned long timeout) | |
1481 | { | |
1482 | unsigned long pfn, nr_pages, expire; | |
1483 | long offlined_pages; | |
1484 | int ret, drain, retry_max, node; | |
1485 | struct zone *zone; | |
1486 | struct memory_notify arg; | |
1487 | ||
1488 | BUG_ON(start_pfn >= end_pfn); | |
1489 | /* at least, alignment against pageblock is necessary */ | |
1490 | if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) | |
1491 | return -EINVAL; | |
1492 | if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) | |
1493 | return -EINVAL; | |
1494 | /* This makes hotplug much easier...and readable. | |
1495 | we assume this for now. .*/ | |
1496 | if (!test_pages_in_a_zone(start_pfn, end_pfn)) | |
1497 | return -EINVAL; | |
1498 | ||
1499 | lock_memory_hotplug(); | |
1500 | ||
1501 | zone = page_zone(pfn_to_page(start_pfn)); | |
1502 | node = zone_to_nid(zone); | |
1503 | nr_pages = end_pfn - start_pfn; | |
1504 | ||
1505 | ret = -EINVAL; | |
1506 | if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages)) | |
1507 | goto out; | |
1508 | ||
1509 | /* set above range as isolated */ | |
1510 | ret = start_isolate_page_range(start_pfn, end_pfn, | |
1511 | MIGRATE_MOVABLE, true); | |
1512 | if (ret) | |
1513 | goto out; | |
1514 | ||
1515 | arg.start_pfn = start_pfn; | |
1516 | arg.nr_pages = nr_pages; | |
1517 | node_states_check_changes_offline(nr_pages, zone, &arg); | |
1518 | ||
1519 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | |
1520 | ret = notifier_to_errno(ret); | |
1521 | if (ret) | |
1522 | goto failed_removal; | |
1523 | ||
1524 | pfn = start_pfn; | |
1525 | expire = jiffies + timeout; | |
1526 | drain = 0; | |
1527 | retry_max = 5; | |
1528 | repeat: | |
1529 | /* start memory hot removal */ | |
1530 | ret = -EAGAIN; | |
1531 | if (time_after(jiffies, expire)) | |
1532 | goto failed_removal; | |
1533 | ret = -EINTR; | |
1534 | if (signal_pending(current)) | |
1535 | goto failed_removal; | |
1536 | ret = 0; | |
1537 | if (drain) { | |
1538 | lru_add_drain_all(); | |
1539 | cond_resched(); | |
1540 | drain_all_pages(); | |
1541 | } | |
1542 | ||
1543 | pfn = scan_lru_pages(start_pfn, end_pfn); | |
1544 | if (pfn) { /* We have page on LRU */ | |
1545 | ret = do_migrate_range(pfn, end_pfn); | |
1546 | if (!ret) { | |
1547 | drain = 1; | |
1548 | goto repeat; | |
1549 | } else { | |
1550 | if (ret < 0) | |
1551 | if (--retry_max == 0) | |
1552 | goto failed_removal; | |
1553 | yield(); | |
1554 | drain = 1; | |
1555 | goto repeat; | |
1556 | } | |
1557 | } | |
1558 | /* drain all zone's lru pagevec, this is asynchronous... */ | |
1559 | lru_add_drain_all(); | |
1560 | yield(); | |
1561 | /* drain pcp pages, this is synchronous. */ | |
1562 | drain_all_pages(); | |
1563 | /* check again */ | |
1564 | offlined_pages = check_pages_isolated(start_pfn, end_pfn); | |
1565 | if (offlined_pages < 0) { | |
1566 | ret = -EBUSY; | |
1567 | goto failed_removal; | |
1568 | } | |
1569 | printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); | |
1570 | /* Ok, all of our target is isolated. | |
1571 | We cannot do rollback at this point. */ | |
1572 | offline_isolated_pages(start_pfn, end_pfn); | |
1573 | /* reset pagetype flags and makes migrate type to be MOVABLE */ | |
1574 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | |
1575 | /* removal success */ | |
1576 | zone->managed_pages -= offlined_pages; | |
1577 | zone->present_pages -= offlined_pages; | |
1578 | zone->zone_pgdat->node_present_pages -= offlined_pages; | |
1579 | totalram_pages -= offlined_pages; | |
1580 | ||
1581 | init_per_zone_wmark_min(); | |
1582 | ||
1583 | if (!populated_zone(zone)) { | |
1584 | zone_pcp_reset(zone); | |
1585 | mutex_lock(&zonelists_mutex); | |
1586 | build_all_zonelists(NULL, NULL); | |
1587 | mutex_unlock(&zonelists_mutex); | |
1588 | } else | |
1589 | zone_pcp_update(zone); | |
1590 | ||
1591 | node_states_clear_node(node, &arg); | |
1592 | if (arg.status_change_nid >= 0) | |
1593 | kswapd_stop(node); | |
1594 | ||
1595 | vm_total_pages = nr_free_pagecache_pages(); | |
1596 | writeback_set_ratelimit(); | |
1597 | ||
1598 | memory_notify(MEM_OFFLINE, &arg); | |
1599 | unlock_memory_hotplug(); | |
1600 | return 0; | |
1601 | ||
1602 | failed_removal: | |
1603 | printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n", | |
1604 | (unsigned long long) start_pfn << PAGE_SHIFT, | |
1605 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); | |
1606 | memory_notify(MEM_CANCEL_OFFLINE, &arg); | |
1607 | /* pushback to free area */ | |
1608 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | |
1609 | ||
1610 | out: | |
1611 | unlock_memory_hotplug(); | |
1612 | return ret; | |
1613 | } | |
1614 | ||
1615 | int offline_pages(unsigned long start_pfn, unsigned long nr_pages) | |
1616 | { | |
1617 | return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ); | |
1618 | } | |
1619 | ||
1620 | /** | |
1621 | * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn) | |
1622 | * @start_pfn: start pfn of the memory range | |
1623 | * @end_pfn: end pfn of the memory range | |
1624 | * @arg: argument passed to func | |
1625 | * @func: callback for each memory section walked | |
1626 | * | |
1627 | * This function walks through all present mem sections in range | |
1628 | * [start_pfn, end_pfn) and call func on each mem section. | |
1629 | * | |
1630 | * Returns the return value of func. | |
1631 | */ | |
1632 | static int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, | |
1633 | void *arg, int (*func)(struct memory_block *, void *)) | |
1634 | { | |
1635 | struct memory_block *mem = NULL; | |
1636 | struct mem_section *section; | |
1637 | unsigned long pfn, section_nr; | |
1638 | int ret; | |
1639 | ||
1640 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
1641 | section_nr = pfn_to_section_nr(pfn); | |
1642 | if (!present_section_nr(section_nr)) | |
1643 | continue; | |
1644 | ||
1645 | section = __nr_to_section(section_nr); | |
1646 | /* same memblock? */ | |
1647 | if (mem) | |
1648 | if ((section_nr >= mem->start_section_nr) && | |
1649 | (section_nr <= mem->end_section_nr)) | |
1650 | continue; | |
1651 | ||
1652 | mem = find_memory_block_hinted(section, mem); | |
1653 | if (!mem) | |
1654 | continue; | |
1655 | ||
1656 | ret = func(mem, arg); | |
1657 | if (ret) { | |
1658 | kobject_put(&mem->dev.kobj); | |
1659 | return ret; | |
1660 | } | |
1661 | } | |
1662 | ||
1663 | if (mem) | |
1664 | kobject_put(&mem->dev.kobj); | |
1665 | ||
1666 | return 0; | |
1667 | } | |
1668 | ||
1669 | /** | |
1670 | * offline_memory_block_cb - callback function for offlining memory block | |
1671 | * @mem: the memory block to be offlined | |
1672 | * @arg: buffer to hold error msg | |
1673 | * | |
1674 | * Always return 0, and put the error msg in arg if any. | |
1675 | */ | |
1676 | static int offline_memory_block_cb(struct memory_block *mem, void *arg) | |
1677 | { | |
1678 | int *ret = arg; | |
1679 | int error = offline_memory_block(mem); | |
1680 | ||
1681 | if (error != 0 && *ret == 0) | |
1682 | *ret = error; | |
1683 | ||
1684 | return 0; | |
1685 | } | |
1686 | ||
1687 | static int is_memblock_offlined_cb(struct memory_block *mem, void *arg) | |
1688 | { | |
1689 | int ret = !is_memblock_offlined(mem); | |
1690 | ||
1691 | if (unlikely(ret)) | |
1692 | pr_warn("removing memory fails, because memory " | |
1693 | "[%#010llx-%#010llx] is onlined\n", | |
1694 | PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)), | |
1695 | PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1); | |
1696 | ||
1697 | return ret; | |
1698 | } | |
1699 | ||
1700 | static int check_cpu_on_node(void *data) | |
1701 | { | |
1702 | struct pglist_data *pgdat = data; | |
1703 | int cpu; | |
1704 | ||
1705 | for_each_present_cpu(cpu) { | |
1706 | if (cpu_to_node(cpu) == pgdat->node_id) | |
1707 | /* | |
1708 | * the cpu on this node isn't removed, and we can't | |
1709 | * offline this node. | |
1710 | */ | |
1711 | return -EBUSY; | |
1712 | } | |
1713 | ||
1714 | return 0; | |
1715 | } | |
1716 | ||
1717 | static void unmap_cpu_on_node(void *data) | |
1718 | { | |
1719 | #ifdef CONFIG_ACPI_NUMA | |
1720 | struct pglist_data *pgdat = data; | |
1721 | int cpu; | |
1722 | ||
1723 | for_each_possible_cpu(cpu) | |
1724 | if (cpu_to_node(cpu) == pgdat->node_id) | |
1725 | numa_clear_node(cpu); | |
1726 | #endif | |
1727 | } | |
1728 | ||
1729 | static int check_and_unmap_cpu_on_node(void *data) | |
1730 | { | |
1731 | int ret = check_cpu_on_node(data); | |
1732 | ||
1733 | if (ret) | |
1734 | return ret; | |
1735 | ||
1736 | /* | |
1737 | * the node will be offlined when we come here, so we can clear | |
1738 | * the cpu_to_node() now. | |
1739 | */ | |
1740 | ||
1741 | unmap_cpu_on_node(data); | |
1742 | return 0; | |
1743 | } | |
1744 | ||
1745 | /* offline the node if all memory sections of this node are removed */ | |
1746 | void try_offline_node(int nid) | |
1747 | { | |
1748 | pg_data_t *pgdat = NODE_DATA(nid); | |
1749 | unsigned long start_pfn = pgdat->node_start_pfn; | |
1750 | unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; | |
1751 | unsigned long pfn; | |
1752 | struct page *pgdat_page = virt_to_page(pgdat); | |
1753 | int i; | |
1754 | ||
1755 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
1756 | unsigned long section_nr = pfn_to_section_nr(pfn); | |
1757 | ||
1758 | if (!present_section_nr(section_nr)) | |
1759 | continue; | |
1760 | ||
1761 | if (pfn_to_nid(pfn) != nid) | |
1762 | continue; | |
1763 | ||
1764 | /* | |
1765 | * some memory sections of this node are not removed, and we | |
1766 | * can't offline node now. | |
1767 | */ | |
1768 | return; | |
1769 | } | |
1770 | ||
1771 | if (stop_machine(check_and_unmap_cpu_on_node, pgdat, NULL)) | |
1772 | return; | |
1773 | ||
1774 | /* | |
1775 | * all memory/cpu of this node are removed, we can offline this | |
1776 | * node now. | |
1777 | */ | |
1778 | node_set_offline(nid); | |
1779 | unregister_one_node(nid); | |
1780 | ||
1781 | if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page)) | |
1782 | /* node data is allocated from boot memory */ | |
1783 | return; | |
1784 | ||
1785 | /* free waittable in each zone */ | |
1786 | for (i = 0; i < MAX_NR_ZONES; i++) { | |
1787 | struct zone *zone = pgdat->node_zones + i; | |
1788 | ||
1789 | /* | |
1790 | * wait_table may be allocated from boot memory, | |
1791 | * here only free if it's allocated by vmalloc. | |
1792 | */ | |
1793 | if (is_vmalloc_addr(zone->wait_table)) | |
1794 | vfree(zone->wait_table); | |
1795 | } | |
1796 | ||
1797 | /* | |
1798 | * Since there is no way to guarentee the address of pgdat/zone is not | |
1799 | * on stack of any kernel threads or used by other kernel objects | |
1800 | * without reference counting or other symchronizing method, do not | |
1801 | * reset node_data and free pgdat here. Just reset it to 0 and reuse | |
1802 | * the memory when the node is online again. | |
1803 | */ | |
1804 | memset(pgdat, 0, sizeof(*pgdat)); | |
1805 | } | |
1806 | EXPORT_SYMBOL(try_offline_node); | |
1807 | ||
1808 | int __ref remove_memory(int nid, u64 start, u64 size) | |
1809 | { | |
1810 | unsigned long start_pfn, end_pfn; | |
1811 | int ret = 0; | |
1812 | int retry = 1; | |
1813 | ||
1814 | start_pfn = PFN_DOWN(start); | |
1815 | end_pfn = PFN_UP(start + size - 1); | |
1816 | ||
1817 | /* | |
1818 | * When CONFIG_MEMCG is on, one memory block may be used by other | |
1819 | * blocks to store page cgroup when onlining pages. But we don't know | |
1820 | * in what order pages are onlined. So we iterate twice to offline | |
1821 | * memory: | |
1822 | * 1st iterate: offline every non primary memory block. | |
1823 | * 2nd iterate: offline primary (i.e. first added) memory block. | |
1824 | */ | |
1825 | repeat: | |
1826 | walk_memory_range(start_pfn, end_pfn, &ret, | |
1827 | offline_memory_block_cb); | |
1828 | if (ret) { | |
1829 | if (!retry) | |
1830 | return ret; | |
1831 | ||
1832 | retry = 0; | |
1833 | ret = 0; | |
1834 | goto repeat; | |
1835 | } | |
1836 | ||
1837 | lock_memory_hotplug(); | |
1838 | ||
1839 | /* | |
1840 | * we have offlined all memory blocks like this: | |
1841 | * 1. lock memory hotplug | |
1842 | * 2. offline a memory block | |
1843 | * 3. unlock memory hotplug | |
1844 | * | |
1845 | * repeat step1-3 to offline the memory block. All memory blocks | |
1846 | * must be offlined before removing memory. But we don't hold the | |
1847 | * lock in the whole operation. So we should check whether all | |
1848 | * memory blocks are offlined. | |
1849 | */ | |
1850 | ||
1851 | ret = walk_memory_range(start_pfn, end_pfn, NULL, | |
1852 | is_memblock_offlined_cb); | |
1853 | if (ret) { | |
1854 | unlock_memory_hotplug(); | |
1855 | return ret; | |
1856 | } | |
1857 | ||
1858 | /* remove memmap entry */ | |
1859 | firmware_map_remove(start, start + size, "System RAM"); | |
1860 | ||
1861 | arch_remove_memory(start, size); | |
1862 | ||
1863 | try_offline_node(nid); | |
1864 | ||
1865 | unlock_memory_hotplug(); | |
1866 | ||
1867 | return 0; | |
1868 | } | |
1869 | #else | |
1870 | int offline_pages(unsigned long start_pfn, unsigned long nr_pages) | |
1871 | { | |
1872 | return -EINVAL; | |
1873 | } | |
1874 | int remove_memory(int nid, u64 start, u64 size) | |
1875 | { | |
1876 | return -EINVAL; | |
1877 | } | |
1878 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
1879 | EXPORT_SYMBOL_GPL(remove_memory); |