]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - mm/page_alloc.c
memcg: remove NULL check from lookup_page_cgroup() result
[thirdparty/kernel/linux.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4 23#include <linux/bootmem.h>
edbe7d23 24#include <linux/memblock.h>
1da177e4 25#include <linux/compiler.h>
9f158333 26#include <linux/kernel.h>
b1eeab67 27#include <linux/kmemcheck.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
5a3135c2 33#include <linux/oom.h>
1da177e4
LT
34#include <linux/notifier.h>
35#include <linux/topology.h>
36#include <linux/sysctl.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
bdc8cb98 39#include <linux/memory_hotplug.h>
1da177e4
LT
40#include <linux/nodemask.h>
41#include <linux/vmalloc.h>
4be38e35 42#include <linux/mempolicy.h>
6811378e 43#include <linux/stop_machine.h>
c713216d
MG
44#include <linux/sort.h>
45#include <linux/pfn.h>
3fcfab16 46#include <linux/backing-dev.h>
933e312e 47#include <linux/fault-inject.h>
a5d76b54 48#include <linux/page-isolation.h>
52d4b9ac 49#include <linux/page_cgroup.h>
3ac7fe5a 50#include <linux/debugobjects.h>
dbb1f81c 51#include <linux/kmemleak.h>
925cc71e 52#include <linux/memory.h>
56de7263 53#include <linux/compaction.h>
0d3d062a 54#include <trace/events/kmem.h>
718a3821 55#include <linux/ftrace_event.h>
1da177e4
LT
56
57#include <asm/tlbflush.h>
ac924c60 58#include <asm/div64.h>
1da177e4
LT
59#include "internal.h"
60
72812019
LS
61#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
62DEFINE_PER_CPU(int, numa_node);
63EXPORT_PER_CPU_SYMBOL(numa_node);
64#endif
65
7aac7898
LS
66#ifdef CONFIG_HAVE_MEMORYLESS_NODES
67/*
68 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
69 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
70 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
71 * defined in <linux/topology.h>.
72 */
73DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
74EXPORT_PER_CPU_SYMBOL(_numa_mem_);
75#endif
76
1da177e4 77/*
13808910 78 * Array of node states.
1da177e4 79 */
13808910
CL
80nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
81 [N_POSSIBLE] = NODE_MASK_ALL,
82 [N_ONLINE] = { { [0] = 1UL } },
83#ifndef CONFIG_NUMA
84 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
85#ifdef CONFIG_HIGHMEM
86 [N_HIGH_MEMORY] = { { [0] = 1UL } },
87#endif
88 [N_CPU] = { { [0] = 1UL } },
89#endif /* NUMA */
90};
91EXPORT_SYMBOL(node_states);
92
6c231b7b 93unsigned long totalram_pages __read_mostly;
cb45b0e9 94unsigned long totalreserve_pages __read_mostly;
8ad4b1fb 95int percpu_pagelist_fraction;
dcce284a 96gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
1da177e4 97
452aa699
RW
98#ifdef CONFIG_PM_SLEEP
99/*
100 * The following functions are used by the suspend/hibernate code to temporarily
101 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
102 * while devices are suspended. To avoid races with the suspend/hibernate code,
103 * they should always be called with pm_mutex held (gfp_allowed_mask also should
104 * only be modified with pm_mutex held, unless the suspend/hibernate code is
105 * guaranteed not to run in parallel with that modification).
106 */
c9e664f1
RW
107
108static gfp_t saved_gfp_mask;
109
110void pm_restore_gfp_mask(void)
452aa699
RW
111{
112 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
113 if (saved_gfp_mask) {
114 gfp_allowed_mask = saved_gfp_mask;
115 saved_gfp_mask = 0;
116 }
452aa699
RW
117}
118
c9e664f1 119void pm_restrict_gfp_mask(void)
452aa699 120{
452aa699 121 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
122 WARN_ON(saved_gfp_mask);
123 saved_gfp_mask = gfp_allowed_mask;
124 gfp_allowed_mask &= ~GFP_IOFS;
452aa699
RW
125}
126#endif /* CONFIG_PM_SLEEP */
127
d9c23400
MG
128#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
129int pageblock_order __read_mostly;
130#endif
131
d98c7a09 132static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 133
1da177e4
LT
134/*
135 * results with 256, 32 in the lowmem_reserve sysctl:
136 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
137 * 1G machine -> (16M dma, 784M normal, 224M high)
138 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
139 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
140 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
141 *
142 * TBD: should special case ZONE_DMA32 machines here - in those we normally
143 * don't need any ZONE_NORMAL reservation
1da177e4 144 */
2f1b6248 145int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 146#ifdef CONFIG_ZONE_DMA
2f1b6248 147 256,
4b51d669 148#endif
fb0e7942 149#ifdef CONFIG_ZONE_DMA32
2f1b6248 150 256,
fb0e7942 151#endif
e53ef38d 152#ifdef CONFIG_HIGHMEM
2a1e274a 153 32,
e53ef38d 154#endif
2a1e274a 155 32,
2f1b6248 156};
1da177e4
LT
157
158EXPORT_SYMBOL(totalram_pages);
1da177e4 159
15ad7cdc 160static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 161#ifdef CONFIG_ZONE_DMA
2f1b6248 162 "DMA",
4b51d669 163#endif
fb0e7942 164#ifdef CONFIG_ZONE_DMA32
2f1b6248 165 "DMA32",
fb0e7942 166#endif
2f1b6248 167 "Normal",
e53ef38d 168#ifdef CONFIG_HIGHMEM
2a1e274a 169 "HighMem",
e53ef38d 170#endif
2a1e274a 171 "Movable",
2f1b6248
CL
172};
173
1da177e4
LT
174int min_free_kbytes = 1024;
175
2c85f51d
JB
176static unsigned long __meminitdata nr_kernel_pages;
177static unsigned long __meminitdata nr_all_pages;
a3142c8e 178static unsigned long __meminitdata dma_reserve;
1da177e4 179
c713216d
MG
180#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
181 /*
183ff22b 182 * MAX_ACTIVE_REGIONS determines the maximum number of distinct
c713216d
MG
183 * ranges of memory (RAM) that may be registered with add_active_range().
184 * Ranges passed to add_active_range() will be merged if possible
185 * so the number of times add_active_range() can be called is
186 * related to the number of nodes and the number of holes
187 */
188 #ifdef CONFIG_MAX_ACTIVE_REGIONS
189 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
190 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
191 #else
192 #if MAX_NUMNODES >= 32
193 /* If there can be many nodes, allow up to 50 holes per node */
194 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
195 #else
196 /* By default, allow up to 256 distinct regions */
197 #define MAX_ACTIVE_REGIONS 256
198 #endif
199 #endif
200
98011f56
JB
201 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
202 static int __meminitdata nr_nodemap_entries;
203 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
204 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
b69a7288 205 static unsigned long __initdata required_kernelcore;
484f51f8 206 static unsigned long __initdata required_movablecore;
b69a7288 207 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
2a1e274a
MG
208
209 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
210 int movable_zone;
211 EXPORT_SYMBOL(movable_zone);
c713216d
MG
212#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
213
418508c1
MS
214#if MAX_NUMNODES > 1
215int nr_node_ids __read_mostly = MAX_NUMNODES;
62bc62a8 216int nr_online_nodes __read_mostly = 1;
418508c1 217EXPORT_SYMBOL(nr_node_ids);
62bc62a8 218EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
219#endif
220
9ef9acb0
MG
221int page_group_by_mobility_disabled __read_mostly;
222
b2a0ac88
MG
223static void set_pageblock_migratetype(struct page *page, int migratetype)
224{
49255c61
MG
225
226 if (unlikely(page_group_by_mobility_disabled))
227 migratetype = MIGRATE_UNMOVABLE;
228
b2a0ac88
MG
229 set_pageblock_flags_group(page, (unsigned long)migratetype,
230 PB_migrate, PB_migrate_end);
231}
232
7f33d49a
RW
233bool oom_killer_disabled __read_mostly;
234
13e7444b 235#ifdef CONFIG_DEBUG_VM
c6a57e19 236static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 237{
bdc8cb98
DH
238 int ret = 0;
239 unsigned seq;
240 unsigned long pfn = page_to_pfn(page);
c6a57e19 241
bdc8cb98
DH
242 do {
243 seq = zone_span_seqbegin(zone);
244 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
245 ret = 1;
246 else if (pfn < zone->zone_start_pfn)
247 ret = 1;
248 } while (zone_span_seqretry(zone, seq));
249
250 return ret;
c6a57e19
DH
251}
252
253static int page_is_consistent(struct zone *zone, struct page *page)
254{
14e07298 255 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 256 return 0;
1da177e4 257 if (zone != page_zone(page))
c6a57e19
DH
258 return 0;
259
260 return 1;
261}
262/*
263 * Temporary debugging check for pages not lying within a given zone.
264 */
265static int bad_range(struct zone *zone, struct page *page)
266{
267 if (page_outside_zone_boundaries(zone, page))
1da177e4 268 return 1;
c6a57e19
DH
269 if (!page_is_consistent(zone, page))
270 return 1;
271
1da177e4
LT
272 return 0;
273}
13e7444b
NP
274#else
275static inline int bad_range(struct zone *zone, struct page *page)
276{
277 return 0;
278}
279#endif
280
224abf92 281static void bad_page(struct page *page)
1da177e4 282{
d936cf9b
HD
283 static unsigned long resume;
284 static unsigned long nr_shown;
285 static unsigned long nr_unshown;
286
2a7684a2
WF
287 /* Don't complain about poisoned pages */
288 if (PageHWPoison(page)) {
ef2b4b95 289 reset_page_mapcount(page); /* remove PageBuddy */
2a7684a2
WF
290 return;
291 }
292
d936cf9b
HD
293 /*
294 * Allow a burst of 60 reports, then keep quiet for that minute;
295 * or allow a steady drip of one report per second.
296 */
297 if (nr_shown == 60) {
298 if (time_before(jiffies, resume)) {
299 nr_unshown++;
300 goto out;
301 }
302 if (nr_unshown) {
1e9e6365
HD
303 printk(KERN_ALERT
304 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
305 nr_unshown);
306 nr_unshown = 0;
307 }
308 nr_shown = 0;
309 }
310 if (nr_shown++ == 0)
311 resume = jiffies + 60 * HZ;
312
1e9e6365 313 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 314 current->comm, page_to_pfn(page));
718a3821 315 dump_page(page);
3dc14741 316
1da177e4 317 dump_stack();
d936cf9b 318out:
8cc3b392 319 /* Leave bad fields for debug, except PageBuddy could make trouble */
ef2b4b95 320 reset_page_mapcount(page); /* remove PageBuddy */
9f158333 321 add_taint(TAINT_BAD_PAGE);
1da177e4
LT
322}
323
1da177e4
LT
324/*
325 * Higher-order pages are called "compound pages". They are structured thusly:
326 *
327 * The first PAGE_SIZE page is called the "head page".
328 *
329 * The remaining PAGE_SIZE pages are called "tail pages".
330 *
331 * All pages have PG_compound set. All pages have their ->private pointing at
332 * the head page (even the head page has this).
333 *
41d78ba5
HD
334 * The first tail page's ->lru.next holds the address of the compound page's
335 * put_page() function. Its ->lru.prev holds the order of allocation.
336 * This usage means that zero-order pages may not be compound.
1da177e4 337 */
d98c7a09
HD
338
339static void free_compound_page(struct page *page)
340{
d85f3385 341 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
342}
343
01ad1c08 344void prep_compound_page(struct page *page, unsigned long order)
18229df5
AW
345{
346 int i;
347 int nr_pages = 1 << order;
348
349 set_compound_page_dtor(page, free_compound_page);
350 set_compound_order(page, order);
351 __SetPageHead(page);
352 for (i = 1; i < nr_pages; i++) {
353 struct page *p = page + i;
354
355 __SetPageTail(p);
356 p->first_page = page;
357 }
358}
359
59ff4216 360/* update __split_huge_page_refcount if you change this function */
8cc3b392 361static int destroy_compound_page(struct page *page, unsigned long order)
1da177e4
LT
362{
363 int i;
364 int nr_pages = 1 << order;
8cc3b392 365 int bad = 0;
1da177e4 366
8cc3b392
HD
367 if (unlikely(compound_order(page) != order) ||
368 unlikely(!PageHead(page))) {
224abf92 369 bad_page(page);
8cc3b392
HD
370 bad++;
371 }
1da177e4 372
6d777953 373 __ClearPageHead(page);
8cc3b392 374
18229df5
AW
375 for (i = 1; i < nr_pages; i++) {
376 struct page *p = page + i;
1da177e4 377
e713a21d 378 if (unlikely(!PageTail(p) || (p->first_page != page))) {
224abf92 379 bad_page(page);
8cc3b392
HD
380 bad++;
381 }
d85f3385 382 __ClearPageTail(p);
1da177e4 383 }
8cc3b392
HD
384
385 return bad;
1da177e4 386}
1da177e4 387
17cf4406
NP
388static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
389{
390 int i;
391
6626c5d5
AM
392 /*
393 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
394 * and __GFP_HIGHMEM from hard or soft interrupt context.
395 */
725d704e 396 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
397 for (i = 0; i < (1 << order); i++)
398 clear_highpage(page + i);
399}
400
6aa3001b
AM
401static inline void set_page_order(struct page *page, int order)
402{
4c21e2f2 403 set_page_private(page, order);
676165a8 404 __SetPageBuddy(page);
1da177e4
LT
405}
406
407static inline void rmv_page_order(struct page *page)
408{
676165a8 409 __ClearPageBuddy(page);
4c21e2f2 410 set_page_private(page, 0);
1da177e4
LT
411}
412
413/*
414 * Locate the struct page for both the matching buddy in our
415 * pair (buddy1) and the combined O(n+1) page they form (page).
416 *
417 * 1) Any buddy B1 will have an order O twin B2 which satisfies
418 * the following equation:
419 * B2 = B1 ^ (1 << O)
420 * For example, if the starting buddy (buddy2) is #8 its order
421 * 1 buddy is #10:
422 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
423 *
424 * 2) Any buddy B will have an order O+1 parent P which
425 * satisfies the following equation:
426 * P = B & ~(1 << O)
427 *
d6e05edc 428 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4 429 */
1da177e4 430static inline unsigned long
43506fad 431__find_buddy_index(unsigned long page_idx, unsigned int order)
1da177e4 432{
43506fad 433 return page_idx ^ (1 << order);
1da177e4
LT
434}
435
436/*
437 * This function checks whether a page is free && is the buddy
438 * we can do coalesce a page and its buddy if
13e7444b 439 * (a) the buddy is not in a hole &&
676165a8 440 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
441 * (c) a page and its buddy have the same order &&
442 * (d) a page and its buddy are in the same zone.
676165a8 443 *
5f24ce5f
AA
444 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
445 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
1da177e4 446 *
676165a8 447 * For recording page's order, we use page_private(page).
1da177e4 448 */
cb2b95e1
AW
449static inline int page_is_buddy(struct page *page, struct page *buddy,
450 int order)
1da177e4 451{
14e07298 452 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 453 return 0;
13e7444b 454
cb2b95e1
AW
455 if (page_zone_id(page) != page_zone_id(buddy))
456 return 0;
457
458 if (PageBuddy(buddy) && page_order(buddy) == order) {
a3af9c38 459 VM_BUG_ON(page_count(buddy) != 0);
6aa3001b 460 return 1;
676165a8 461 }
6aa3001b 462 return 0;
1da177e4
LT
463}
464
465/*
466 * Freeing function for a buddy system allocator.
467 *
468 * The concept of a buddy system is to maintain direct-mapped table
469 * (containing bit values) for memory blocks of various "orders".
470 * The bottom level table contains the map for the smallest allocatable
471 * units of memory (here, pages), and each level above it describes
472 * pairs of units from the levels below, hence, "buddies".
473 * At a high level, all that happens here is marking the table entry
474 * at the bottom level available, and propagating the changes upward
475 * as necessary, plus some accounting needed to play nicely with other
476 * parts of the VM system.
477 * At each level, we keep a list of pages, which are heads of continuous
5f24ce5f 478 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
4c21e2f2 479 * order is recorded in page_private(page) field.
1da177e4
LT
480 * So when we are allocating or freeing one, we can derive the state of the
481 * other. That is, if we allocate a small block, and both were
482 * free, the remainder of the region must be split into blocks.
483 * If a block is freed, and its buddy is also free, then this
484 * triggers coalescing into a block of larger size.
485 *
486 * -- wli
487 */
488
48db57f8 489static inline void __free_one_page(struct page *page,
ed0ae21d
MG
490 struct zone *zone, unsigned int order,
491 int migratetype)
1da177e4
LT
492{
493 unsigned long page_idx;
6dda9d55 494 unsigned long combined_idx;
43506fad 495 unsigned long uninitialized_var(buddy_idx);
6dda9d55 496 struct page *buddy;
1da177e4 497
224abf92 498 if (unlikely(PageCompound(page)))
8cc3b392
HD
499 if (unlikely(destroy_compound_page(page, order)))
500 return;
1da177e4 501
ed0ae21d
MG
502 VM_BUG_ON(migratetype == -1);
503
1da177e4
LT
504 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
505
f2260e6b 506 VM_BUG_ON(page_idx & ((1 << order) - 1));
725d704e 507 VM_BUG_ON(bad_range(zone, page));
1da177e4 508
1da177e4 509 while (order < MAX_ORDER-1) {
43506fad
KC
510 buddy_idx = __find_buddy_index(page_idx, order);
511 buddy = page + (buddy_idx - page_idx);
cb2b95e1 512 if (!page_is_buddy(page, buddy, order))
3c82d0ce 513 break;
13e7444b 514
3c82d0ce 515 /* Our buddy is free, merge with it and move up one order. */
1da177e4 516 list_del(&buddy->lru);
b2a0ac88 517 zone->free_area[order].nr_free--;
1da177e4 518 rmv_page_order(buddy);
43506fad 519 combined_idx = buddy_idx & page_idx;
1da177e4
LT
520 page = page + (combined_idx - page_idx);
521 page_idx = combined_idx;
522 order++;
523 }
524 set_page_order(page, order);
6dda9d55
CZ
525
526 /*
527 * If this is not the largest possible page, check if the buddy
528 * of the next-highest order is free. If it is, it's possible
529 * that pages are being freed that will coalesce soon. In case,
530 * that is happening, add the free page to the tail of the list
531 * so it's less likely to be used soon and more likely to be merged
532 * as a higher order page
533 */
b7f50cfa 534 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
6dda9d55 535 struct page *higher_page, *higher_buddy;
43506fad
KC
536 combined_idx = buddy_idx & page_idx;
537 higher_page = page + (combined_idx - page_idx);
538 buddy_idx = __find_buddy_index(combined_idx, order + 1);
539 higher_buddy = page + (buddy_idx - combined_idx);
6dda9d55
CZ
540 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
541 list_add_tail(&page->lru,
542 &zone->free_area[order].free_list[migratetype]);
543 goto out;
544 }
545 }
546
547 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
548out:
1da177e4
LT
549 zone->free_area[order].nr_free++;
550}
551
092cead6
KM
552/*
553 * free_page_mlock() -- clean up attempts to free and mlocked() page.
554 * Page should not be on lru, so no need to fix that up.
555 * free_pages_check() will verify...
556 */
557static inline void free_page_mlock(struct page *page)
558{
092cead6
KM
559 __dec_zone_page_state(page, NR_MLOCK);
560 __count_vm_event(UNEVICTABLE_MLOCKFREED);
561}
092cead6 562
224abf92 563static inline int free_pages_check(struct page *page)
1da177e4 564{
92be2e33
NP
565 if (unlikely(page_mapcount(page) |
566 (page->mapping != NULL) |
a3af9c38 567 (atomic_read(&page->_count) != 0) |
8cc3b392 568 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
224abf92 569 bad_page(page);
79f4b7bf 570 return 1;
8cc3b392 571 }
79f4b7bf
HD
572 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
573 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
574 return 0;
1da177e4
LT
575}
576
577/*
5f8dcc21 578 * Frees a number of pages from the PCP lists
1da177e4 579 * Assumes all pages on list are in same zone, and of same order.
207f36ee 580 * count is the number of pages to free.
1da177e4
LT
581 *
582 * If the zone was previously in an "all pages pinned" state then look to
583 * see if this freeing clears that state.
584 *
585 * And clear the zone's pages_scanned counter, to hold off the "all pages are
586 * pinned" detection logic.
587 */
5f8dcc21
MG
588static void free_pcppages_bulk(struct zone *zone, int count,
589 struct per_cpu_pages *pcp)
1da177e4 590{
5f8dcc21 591 int migratetype = 0;
a6f9edd6 592 int batch_free = 0;
72853e29 593 int to_free = count;
5f8dcc21 594
c54ad30c 595 spin_lock(&zone->lock);
93e4a89a 596 zone->all_unreclaimable = 0;
1da177e4 597 zone->pages_scanned = 0;
f2260e6b 598
72853e29 599 while (to_free) {
48db57f8 600 struct page *page;
5f8dcc21
MG
601 struct list_head *list;
602
603 /*
a6f9edd6
MG
604 * Remove pages from lists in a round-robin fashion. A
605 * batch_free count is maintained that is incremented when an
606 * empty list is encountered. This is so more pages are freed
607 * off fuller lists instead of spinning excessively around empty
608 * lists
5f8dcc21
MG
609 */
610 do {
a6f9edd6 611 batch_free++;
5f8dcc21
MG
612 if (++migratetype == MIGRATE_PCPTYPES)
613 migratetype = 0;
614 list = &pcp->lists[migratetype];
615 } while (list_empty(list));
48db57f8 616
1d16871d
NK
617 /* This is the only non-empty list. Free them all. */
618 if (batch_free == MIGRATE_PCPTYPES)
619 batch_free = to_free;
620
a6f9edd6
MG
621 do {
622 page = list_entry(list->prev, struct page, lru);
623 /* must delete as __free_one_page list manipulates */
624 list_del(&page->lru);
a7016235
HD
625 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
626 __free_one_page(page, zone, 0, page_private(page));
627 trace_mm_page_pcpu_drain(page, 0, page_private(page));
72853e29 628 } while (--to_free && --batch_free && !list_empty(list));
1da177e4 629 }
72853e29 630 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
c54ad30c 631 spin_unlock(&zone->lock);
1da177e4
LT
632}
633
ed0ae21d
MG
634static void free_one_page(struct zone *zone, struct page *page, int order,
635 int migratetype)
1da177e4 636{
006d22d9 637 spin_lock(&zone->lock);
93e4a89a 638 zone->all_unreclaimable = 0;
006d22d9 639 zone->pages_scanned = 0;
f2260e6b 640
ed0ae21d 641 __free_one_page(page, zone, order, migratetype);
72853e29 642 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
006d22d9 643 spin_unlock(&zone->lock);
48db57f8
NP
644}
645
ec95f53a 646static bool free_pages_prepare(struct page *page, unsigned int order)
48db57f8 647{
1da177e4 648 int i;
8cc3b392 649 int bad = 0;
1da177e4 650
f650316c 651 trace_mm_page_free_direct(page, order);
b1eeab67
VN
652 kmemcheck_free_shadow(page, order);
653
8dd60a3a
AA
654 if (PageAnon(page))
655 page->mapping = NULL;
656 for (i = 0; i < (1 << order); i++)
657 bad += free_pages_check(page + i);
8cc3b392 658 if (bad)
ec95f53a 659 return false;
689bcebf 660
3ac7fe5a 661 if (!PageHighMem(page)) {
9858db50 662 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
3ac7fe5a
TG
663 debug_check_no_obj_freed(page_address(page),
664 PAGE_SIZE << order);
665 }
dafb1367 666 arch_free_page(page, order);
48db57f8 667 kernel_map_pages(page, 1 << order, 0);
dafb1367 668
ec95f53a
KM
669 return true;
670}
671
672static void __free_pages_ok(struct page *page, unsigned int order)
673{
674 unsigned long flags;
675 int wasMlocked = __TestClearPageMlocked(page);
676
677 if (!free_pages_prepare(page, order))
678 return;
679
c54ad30c 680 local_irq_save(flags);
c277331d 681 if (unlikely(wasMlocked))
da456f14 682 free_page_mlock(page);
f8891e5e 683 __count_vm_events(PGFREE, 1 << order);
ed0ae21d
MG
684 free_one_page(page_zone(page), page, order,
685 get_pageblock_migratetype(page));
c54ad30c 686 local_irq_restore(flags);
1da177e4
LT
687}
688
a226f6c8
DH
689/*
690 * permit the bootmem allocator to evade page validation on high-order frees
691 */
af370fb8 692void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
a226f6c8
DH
693{
694 if (order == 0) {
695 __ClearPageReserved(page);
696 set_page_count(page, 0);
7835e98b 697 set_page_refcounted(page);
545b1ea9 698 __free_page(page);
a226f6c8 699 } else {
a226f6c8
DH
700 int loop;
701
545b1ea9 702 prefetchw(page);
a226f6c8
DH
703 for (loop = 0; loop < BITS_PER_LONG; loop++) {
704 struct page *p = &page[loop];
705
545b1ea9
NP
706 if (loop + 1 < BITS_PER_LONG)
707 prefetchw(p + 1);
a226f6c8
DH
708 __ClearPageReserved(p);
709 set_page_count(p, 0);
710 }
711
7835e98b 712 set_page_refcounted(page);
545b1ea9 713 __free_pages(page, order);
a226f6c8
DH
714 }
715}
716
1da177e4
LT
717
718/*
719 * The order of subdivision here is critical for the IO subsystem.
720 * Please do not alter this order without good reasons and regression
721 * testing. Specifically, as large blocks of memory are subdivided,
722 * the order in which smaller blocks are delivered depends on the order
723 * they're subdivided in this function. This is the primary factor
724 * influencing the order in which pages are delivered to the IO
725 * subsystem according to empirical testing, and this is also justified
726 * by considering the behavior of a buddy system containing a single
727 * large block of memory acted on by a series of small allocations.
728 * This behavior is a critical factor in sglist merging's success.
729 *
730 * -- wli
731 */
085cc7d5 732static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
733 int low, int high, struct free_area *area,
734 int migratetype)
1da177e4
LT
735{
736 unsigned long size = 1 << high;
737
738 while (high > low) {
739 area--;
740 high--;
741 size >>= 1;
725d704e 742 VM_BUG_ON(bad_range(zone, &page[size]));
b2a0ac88 743 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
744 area->nr_free++;
745 set_page_order(&page[size], high);
746 }
1da177e4
LT
747}
748
1da177e4
LT
749/*
750 * This page is about to be returned from the page allocator
751 */
2a7684a2 752static inline int check_new_page(struct page *page)
1da177e4 753{
92be2e33
NP
754 if (unlikely(page_mapcount(page) |
755 (page->mapping != NULL) |
a3af9c38 756 (atomic_read(&page->_count) != 0) |
8cc3b392 757 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
224abf92 758 bad_page(page);
689bcebf 759 return 1;
8cc3b392 760 }
2a7684a2
WF
761 return 0;
762}
763
764static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
765{
766 int i;
767
768 for (i = 0; i < (1 << order); i++) {
769 struct page *p = page + i;
770 if (unlikely(check_new_page(p)))
771 return 1;
772 }
689bcebf 773
4c21e2f2 774 set_page_private(page, 0);
7835e98b 775 set_page_refcounted(page);
cc102509
NP
776
777 arch_alloc_page(page, order);
1da177e4 778 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
779
780 if (gfp_flags & __GFP_ZERO)
781 prep_zero_page(page, order, gfp_flags);
782
783 if (order && (gfp_flags & __GFP_COMP))
784 prep_compound_page(page, order);
785
689bcebf 786 return 0;
1da177e4
LT
787}
788
56fd56b8
MG
789/*
790 * Go through the free lists for the given migratetype and remove
791 * the smallest available page from the freelists
792 */
728ec980
MG
793static inline
794struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
795 int migratetype)
796{
797 unsigned int current_order;
798 struct free_area * area;
799 struct page *page;
800
801 /* Find a page of the appropriate size in the preferred list */
802 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
803 area = &(zone->free_area[current_order]);
804 if (list_empty(&area->free_list[migratetype]))
805 continue;
806
807 page = list_entry(area->free_list[migratetype].next,
808 struct page, lru);
809 list_del(&page->lru);
810 rmv_page_order(page);
811 area->nr_free--;
56fd56b8
MG
812 expand(zone, page, order, current_order, area, migratetype);
813 return page;
814 }
815
816 return NULL;
817}
818
819
b2a0ac88
MG
820/*
821 * This array describes the order lists are fallen back to when
822 * the free lists for the desirable migrate type are depleted
823 */
824static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
64c5e135
MG
825 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
826 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
827 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
828 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
b2a0ac88
MG
829};
830
c361be55
MG
831/*
832 * Move the free pages in a range to the free lists of the requested type.
d9c23400 833 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
834 * boundary. If alignment is required, use move_freepages_block()
835 */
b69a7288
AB
836static int move_freepages(struct zone *zone,
837 struct page *start_page, struct page *end_page,
838 int migratetype)
c361be55
MG
839{
840 struct page *page;
841 unsigned long order;
d100313f 842 int pages_moved = 0;
c361be55
MG
843
844#ifndef CONFIG_HOLES_IN_ZONE
845 /*
846 * page_zone is not safe to call in this context when
847 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
848 * anyway as we check zone boundaries in move_freepages_block().
849 * Remove at a later date when no bug reports exist related to
ac0e5b7a 850 * grouping pages by mobility
c361be55
MG
851 */
852 BUG_ON(page_zone(start_page) != page_zone(end_page));
853#endif
854
855 for (page = start_page; page <= end_page;) {
344c790e
AL
856 /* Make sure we are not inadvertently changing nodes */
857 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
858
c361be55
MG
859 if (!pfn_valid_within(page_to_pfn(page))) {
860 page++;
861 continue;
862 }
863
864 if (!PageBuddy(page)) {
865 page++;
866 continue;
867 }
868
869 order = page_order(page);
84be48d8
KS
870 list_move(&page->lru,
871 &zone->free_area[order].free_list[migratetype]);
c361be55 872 page += 1 << order;
d100313f 873 pages_moved += 1 << order;
c361be55
MG
874 }
875
d100313f 876 return pages_moved;
c361be55
MG
877}
878
b69a7288
AB
879static int move_freepages_block(struct zone *zone, struct page *page,
880 int migratetype)
c361be55
MG
881{
882 unsigned long start_pfn, end_pfn;
883 struct page *start_page, *end_page;
884
885 start_pfn = page_to_pfn(page);
d9c23400 886 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 887 start_page = pfn_to_page(start_pfn);
d9c23400
MG
888 end_page = start_page + pageblock_nr_pages - 1;
889 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
890
891 /* Do not cross zone boundaries */
892 if (start_pfn < zone->zone_start_pfn)
893 start_page = page;
894 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
895 return 0;
896
897 return move_freepages(zone, start_page, end_page, migratetype);
898}
899
2f66a68f
MG
900static void change_pageblock_range(struct page *pageblock_page,
901 int start_order, int migratetype)
902{
903 int nr_pageblocks = 1 << (start_order - pageblock_order);
904
905 while (nr_pageblocks--) {
906 set_pageblock_migratetype(pageblock_page, migratetype);
907 pageblock_page += pageblock_nr_pages;
908 }
909}
910
b2a0ac88 911/* Remove an element from the buddy allocator from the fallback list */
0ac3a409
MG
912static inline struct page *
913__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
b2a0ac88
MG
914{
915 struct free_area * area;
916 int current_order;
917 struct page *page;
918 int migratetype, i;
919
920 /* Find the largest possible block of pages in the other list */
921 for (current_order = MAX_ORDER-1; current_order >= order;
922 --current_order) {
923 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
924 migratetype = fallbacks[start_migratetype][i];
925
56fd56b8
MG
926 /* MIGRATE_RESERVE handled later if necessary */
927 if (migratetype == MIGRATE_RESERVE)
928 continue;
e010487d 929
b2a0ac88
MG
930 area = &(zone->free_area[current_order]);
931 if (list_empty(&area->free_list[migratetype]))
932 continue;
933
934 page = list_entry(area->free_list[migratetype].next,
935 struct page, lru);
936 area->nr_free--;
937
938 /*
c361be55 939 * If breaking a large block of pages, move all free
46dafbca
MG
940 * pages to the preferred allocation list. If falling
941 * back for a reclaimable kernel allocation, be more
942 * agressive about taking ownership of free pages
b2a0ac88 943 */
d9c23400 944 if (unlikely(current_order >= (pageblock_order >> 1)) ||
dd5d241e
MG
945 start_migratetype == MIGRATE_RECLAIMABLE ||
946 page_group_by_mobility_disabled) {
46dafbca
MG
947 unsigned long pages;
948 pages = move_freepages_block(zone, page,
949 start_migratetype);
950
951 /* Claim the whole block if over half of it is free */
dd5d241e
MG
952 if (pages >= (1 << (pageblock_order-1)) ||
953 page_group_by_mobility_disabled)
46dafbca
MG
954 set_pageblock_migratetype(page,
955 start_migratetype);
956
b2a0ac88 957 migratetype = start_migratetype;
c361be55 958 }
b2a0ac88
MG
959
960 /* Remove the page from the freelists */
961 list_del(&page->lru);
962 rmv_page_order(page);
b2a0ac88 963
2f66a68f
MG
964 /* Take ownership for orders >= pageblock_order */
965 if (current_order >= pageblock_order)
966 change_pageblock_range(page, current_order,
b2a0ac88
MG
967 start_migratetype);
968
969 expand(zone, page, order, current_order, area, migratetype);
e0fff1bd
MG
970
971 trace_mm_page_alloc_extfrag(page, order, current_order,
972 start_migratetype, migratetype);
973
b2a0ac88
MG
974 return page;
975 }
976 }
977
728ec980 978 return NULL;
b2a0ac88
MG
979}
980
56fd56b8 981/*
1da177e4
LT
982 * Do the hard work of removing an element from the buddy allocator.
983 * Call me with the zone->lock already held.
984 */
b2a0ac88
MG
985static struct page *__rmqueue(struct zone *zone, unsigned int order,
986 int migratetype)
1da177e4 987{
1da177e4
LT
988 struct page *page;
989
728ec980 990retry_reserve:
56fd56b8 991 page = __rmqueue_smallest(zone, order, migratetype);
b2a0ac88 992
728ec980 993 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
56fd56b8 994 page = __rmqueue_fallback(zone, order, migratetype);
b2a0ac88 995
728ec980
MG
996 /*
997 * Use MIGRATE_RESERVE rather than fail an allocation. goto
998 * is used because __rmqueue_smallest is an inline function
999 * and we want just one call site
1000 */
1001 if (!page) {
1002 migratetype = MIGRATE_RESERVE;
1003 goto retry_reserve;
1004 }
1005 }
1006
0d3d062a 1007 trace_mm_page_alloc_zone_locked(page, order, migratetype);
b2a0ac88 1008 return page;
1da177e4
LT
1009}
1010
1011/*
1012 * Obtain a specified number of elements from the buddy allocator, all under
1013 * a single hold of the lock, for efficiency. Add them to the supplied list.
1014 * Returns the number of new pages which were placed at *list.
1015 */
1016static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 1017 unsigned long count, struct list_head *list,
e084b2d9 1018 int migratetype, int cold)
1da177e4 1019{
1da177e4 1020 int i;
1da177e4 1021
c54ad30c 1022 spin_lock(&zone->lock);
1da177e4 1023 for (i = 0; i < count; ++i) {
b2a0ac88 1024 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 1025 if (unlikely(page == NULL))
1da177e4 1026 break;
81eabcbe
MG
1027
1028 /*
1029 * Split buddy pages returned by expand() are received here
1030 * in physical page order. The page is added to the callers and
1031 * list and the list head then moves forward. From the callers
1032 * perspective, the linked list is ordered by page number in
1033 * some conditions. This is useful for IO devices that can
1034 * merge IO requests if the physical pages are ordered
1035 * properly.
1036 */
e084b2d9
MG
1037 if (likely(cold == 0))
1038 list_add(&page->lru, list);
1039 else
1040 list_add_tail(&page->lru, list);
535131e6 1041 set_page_private(page, migratetype);
81eabcbe 1042 list = &page->lru;
1da177e4 1043 }
f2260e6b 1044 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
c54ad30c 1045 spin_unlock(&zone->lock);
085cc7d5 1046 return i;
1da177e4
LT
1047}
1048
4ae7c039 1049#ifdef CONFIG_NUMA
8fce4d8e 1050/*
4037d452
CL
1051 * Called from the vmstat counter updater to drain pagesets of this
1052 * currently executing processor on remote nodes after they have
1053 * expired.
1054 *
879336c3
CL
1055 * Note that this function must be called with the thread pinned to
1056 * a single processor.
8fce4d8e 1057 */
4037d452 1058void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 1059{
4ae7c039 1060 unsigned long flags;
4037d452 1061 int to_drain;
4ae7c039 1062
4037d452
CL
1063 local_irq_save(flags);
1064 if (pcp->count >= pcp->batch)
1065 to_drain = pcp->batch;
1066 else
1067 to_drain = pcp->count;
5f8dcc21 1068 free_pcppages_bulk(zone, to_drain, pcp);
4037d452
CL
1069 pcp->count -= to_drain;
1070 local_irq_restore(flags);
4ae7c039
CL
1071}
1072#endif
1073
9f8f2172
CL
1074/*
1075 * Drain pages of the indicated processor.
1076 *
1077 * The processor must either be the current processor and the
1078 * thread pinned to the current processor or a processor that
1079 * is not online.
1080 */
1081static void drain_pages(unsigned int cpu)
1da177e4 1082{
c54ad30c 1083 unsigned long flags;
1da177e4 1084 struct zone *zone;
1da177e4 1085
ee99c71c 1086 for_each_populated_zone(zone) {
1da177e4 1087 struct per_cpu_pageset *pset;
3dfa5721 1088 struct per_cpu_pages *pcp;
1da177e4 1089
99dcc3e5
CL
1090 local_irq_save(flags);
1091 pset = per_cpu_ptr(zone->pageset, cpu);
3dfa5721
CL
1092
1093 pcp = &pset->pcp;
2ff754fa
DR
1094 if (pcp->count) {
1095 free_pcppages_bulk(zone, pcp->count, pcp);
1096 pcp->count = 0;
1097 }
3dfa5721 1098 local_irq_restore(flags);
1da177e4
LT
1099 }
1100}
1da177e4 1101
9f8f2172
CL
1102/*
1103 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1104 */
1105void drain_local_pages(void *arg)
1106{
1107 drain_pages(smp_processor_id());
1108}
1109
1110/*
1111 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1112 */
1113void drain_all_pages(void)
1114{
15c8b6c1 1115 on_each_cpu(drain_local_pages, NULL, 1);
9f8f2172
CL
1116}
1117
296699de 1118#ifdef CONFIG_HIBERNATION
1da177e4
LT
1119
1120void mark_free_pages(struct zone *zone)
1121{
f623f0db
RW
1122 unsigned long pfn, max_zone_pfn;
1123 unsigned long flags;
b2a0ac88 1124 int order, t;
1da177e4
LT
1125 struct list_head *curr;
1126
1127 if (!zone->spanned_pages)
1128 return;
1129
1130 spin_lock_irqsave(&zone->lock, flags);
f623f0db
RW
1131
1132 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1133 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1134 if (pfn_valid(pfn)) {
1135 struct page *page = pfn_to_page(pfn);
1136
7be98234
RW
1137 if (!swsusp_page_is_forbidden(page))
1138 swsusp_unset_page_free(page);
f623f0db 1139 }
1da177e4 1140
b2a0ac88
MG
1141 for_each_migratetype_order(order, t) {
1142 list_for_each(curr, &zone->free_area[order].free_list[t]) {
f623f0db 1143 unsigned long i;
1da177e4 1144
f623f0db
RW
1145 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1146 for (i = 0; i < (1UL << order); i++)
7be98234 1147 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 1148 }
b2a0ac88 1149 }
1da177e4
LT
1150 spin_unlock_irqrestore(&zone->lock, flags);
1151}
e2c55dc8 1152#endif /* CONFIG_PM */
1da177e4 1153
1da177e4
LT
1154/*
1155 * Free a 0-order page
fc91668e 1156 * cold == 1 ? free a cold page : free a hot page
1da177e4 1157 */
fc91668e 1158void free_hot_cold_page(struct page *page, int cold)
1da177e4
LT
1159{
1160 struct zone *zone = page_zone(page);
1161 struct per_cpu_pages *pcp;
1162 unsigned long flags;
5f8dcc21 1163 int migratetype;
451ea25d 1164 int wasMlocked = __TestClearPageMlocked(page);
1da177e4 1165
ec95f53a 1166 if (!free_pages_prepare(page, 0))
689bcebf
HD
1167 return;
1168
5f8dcc21
MG
1169 migratetype = get_pageblock_migratetype(page);
1170 set_page_private(page, migratetype);
1da177e4 1171 local_irq_save(flags);
c277331d 1172 if (unlikely(wasMlocked))
da456f14 1173 free_page_mlock(page);
f8891e5e 1174 __count_vm_event(PGFREE);
da456f14 1175
5f8dcc21
MG
1176 /*
1177 * We only track unmovable, reclaimable and movable on pcp lists.
1178 * Free ISOLATE pages back to the allocator because they are being
1179 * offlined but treat RESERVE as movable pages so we can get those
1180 * areas back if necessary. Otherwise, we may have to free
1181 * excessively into the page allocator
1182 */
1183 if (migratetype >= MIGRATE_PCPTYPES) {
1184 if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1185 free_one_page(zone, page, 0, migratetype);
1186 goto out;
1187 }
1188 migratetype = MIGRATE_MOVABLE;
1189 }
1190
99dcc3e5 1191 pcp = &this_cpu_ptr(zone->pageset)->pcp;
3dfa5721 1192 if (cold)
5f8dcc21 1193 list_add_tail(&page->lru, &pcp->lists[migratetype]);
3dfa5721 1194 else
5f8dcc21 1195 list_add(&page->lru, &pcp->lists[migratetype]);
1da177e4 1196 pcp->count++;
48db57f8 1197 if (pcp->count >= pcp->high) {
5f8dcc21 1198 free_pcppages_bulk(zone, pcp->batch, pcp);
48db57f8
NP
1199 pcp->count -= pcp->batch;
1200 }
5f8dcc21
MG
1201
1202out:
1da177e4 1203 local_irq_restore(flags);
1da177e4
LT
1204}
1205
8dfcc9ba
NP
1206/*
1207 * split_page takes a non-compound higher-order page, and splits it into
1208 * n (1<<order) sub-pages: page[0..n]
1209 * Each sub-page must be freed individually.
1210 *
1211 * Note: this is probably too low level an operation for use in drivers.
1212 * Please consult with lkml before using this in your driver.
1213 */
1214void split_page(struct page *page, unsigned int order)
1215{
1216 int i;
1217
725d704e
NP
1218 VM_BUG_ON(PageCompound(page));
1219 VM_BUG_ON(!page_count(page));
b1eeab67
VN
1220
1221#ifdef CONFIG_KMEMCHECK
1222 /*
1223 * Split shadow pages too, because free(page[0]) would
1224 * otherwise free the whole shadow.
1225 */
1226 if (kmemcheck_page_is_tracked(page))
1227 split_page(virt_to_page(page[0].shadow), order);
1228#endif
1229
7835e98b
NP
1230 for (i = 1; i < (1 << order); i++)
1231 set_page_refcounted(page + i);
8dfcc9ba 1232}
8dfcc9ba 1233
748446bb
MG
1234/*
1235 * Similar to split_page except the page is already free. As this is only
1236 * being used for migration, the migratetype of the block also changes.
1237 * As this is called with interrupts disabled, the caller is responsible
1238 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1239 * are enabled.
1240 *
1241 * Note: this is probably too low level an operation for use in drivers.
1242 * Please consult with lkml before using this in your driver.
1243 */
1244int split_free_page(struct page *page)
1245{
1246 unsigned int order;
1247 unsigned long watermark;
1248 struct zone *zone;
1249
1250 BUG_ON(!PageBuddy(page));
1251
1252 zone = page_zone(page);
1253 order = page_order(page);
1254
1255 /* Obey watermarks as if the page was being allocated */
1256 watermark = low_wmark_pages(zone) + (1 << order);
1257 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1258 return 0;
1259
1260 /* Remove page from free list */
1261 list_del(&page->lru);
1262 zone->free_area[order].nr_free--;
1263 rmv_page_order(page);
1264 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1265
1266 /* Split into individual pages */
1267 set_page_refcounted(page);
1268 split_page(page, order);
1269
1270 if (order >= pageblock_order - 1) {
1271 struct page *endpage = page + (1 << order) - 1;
1272 for (; page < endpage; page += pageblock_nr_pages)
1273 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1274 }
1275
1276 return 1 << order;
1277}
1278
1da177e4
LT
1279/*
1280 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1281 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1282 * or two.
1283 */
0a15c3e9
MG
1284static inline
1285struct page *buffered_rmqueue(struct zone *preferred_zone,
3dd28266
MG
1286 struct zone *zone, int order, gfp_t gfp_flags,
1287 int migratetype)
1da177e4
LT
1288{
1289 unsigned long flags;
689bcebf 1290 struct page *page;
1da177e4
LT
1291 int cold = !!(gfp_flags & __GFP_COLD);
1292
689bcebf 1293again:
48db57f8 1294 if (likely(order == 0)) {
1da177e4 1295 struct per_cpu_pages *pcp;
5f8dcc21 1296 struct list_head *list;
1da177e4 1297
1da177e4 1298 local_irq_save(flags);
99dcc3e5
CL
1299 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1300 list = &pcp->lists[migratetype];
5f8dcc21 1301 if (list_empty(list)) {
535131e6 1302 pcp->count += rmqueue_bulk(zone, 0,
5f8dcc21 1303 pcp->batch, list,
e084b2d9 1304 migratetype, cold);
5f8dcc21 1305 if (unlikely(list_empty(list)))
6fb332fa 1306 goto failed;
535131e6 1307 }
b92a6edd 1308
5f8dcc21
MG
1309 if (cold)
1310 page = list_entry(list->prev, struct page, lru);
1311 else
1312 page = list_entry(list->next, struct page, lru);
1313
b92a6edd
MG
1314 list_del(&page->lru);
1315 pcp->count--;
7fb1d9fc 1316 } else {
dab48dab
AM
1317 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1318 /*
1319 * __GFP_NOFAIL is not to be used in new code.
1320 *
1321 * All __GFP_NOFAIL callers should be fixed so that they
1322 * properly detect and handle allocation failures.
1323 *
1324 * We most definitely don't want callers attempting to
4923abf9 1325 * allocate greater than order-1 page units with
dab48dab
AM
1326 * __GFP_NOFAIL.
1327 */
4923abf9 1328 WARN_ON_ONCE(order > 1);
dab48dab 1329 }
1da177e4 1330 spin_lock_irqsave(&zone->lock, flags);
b2a0ac88 1331 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
1332 spin_unlock(&zone->lock);
1333 if (!page)
1334 goto failed;
6ccf80eb 1335 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1da177e4
LT
1336 }
1337
f8891e5e 1338 __count_zone_vm_events(PGALLOC, zone, 1 << order);
78afd561 1339 zone_statistics(preferred_zone, zone, gfp_flags);
a74609fa 1340 local_irq_restore(flags);
1da177e4 1341
725d704e 1342 VM_BUG_ON(bad_range(zone, page));
17cf4406 1343 if (prep_new_page(page, order, gfp_flags))
a74609fa 1344 goto again;
1da177e4 1345 return page;
a74609fa
NP
1346
1347failed:
1348 local_irq_restore(flags);
a74609fa 1349 return NULL;
1da177e4
LT
1350}
1351
41858966
MG
1352/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1353#define ALLOC_WMARK_MIN WMARK_MIN
1354#define ALLOC_WMARK_LOW WMARK_LOW
1355#define ALLOC_WMARK_HIGH WMARK_HIGH
1356#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1357
1358/* Mask to get the watermark bits */
1359#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1360
3148890b
NP
1361#define ALLOC_HARDER 0x10 /* try to alloc harder */
1362#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1363#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
7fb1d9fc 1364
933e312e
AM
1365#ifdef CONFIG_FAIL_PAGE_ALLOC
1366
1367static struct fail_page_alloc_attr {
1368 struct fault_attr attr;
1369
1370 u32 ignore_gfp_highmem;
1371 u32 ignore_gfp_wait;
54114994 1372 u32 min_order;
933e312e
AM
1373
1374#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1375
1376 struct dentry *ignore_gfp_highmem_file;
1377 struct dentry *ignore_gfp_wait_file;
54114994 1378 struct dentry *min_order_file;
933e312e
AM
1379
1380#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1381
1382} fail_page_alloc = {
1383 .attr = FAULT_ATTR_INITIALIZER,
6b1b60f4
DM
1384 .ignore_gfp_wait = 1,
1385 .ignore_gfp_highmem = 1,
54114994 1386 .min_order = 1,
933e312e
AM
1387};
1388
1389static int __init setup_fail_page_alloc(char *str)
1390{
1391 return setup_fault_attr(&fail_page_alloc.attr, str);
1392}
1393__setup("fail_page_alloc=", setup_fail_page_alloc);
1394
1395static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1396{
54114994
AM
1397 if (order < fail_page_alloc.min_order)
1398 return 0;
933e312e
AM
1399 if (gfp_mask & __GFP_NOFAIL)
1400 return 0;
1401 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1402 return 0;
1403 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1404 return 0;
1405
1406 return should_fail(&fail_page_alloc.attr, 1 << order);
1407}
1408
1409#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1410
1411static int __init fail_page_alloc_debugfs(void)
1412{
1413 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1414 struct dentry *dir;
1415 int err;
1416
1417 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1418 "fail_page_alloc");
1419 if (err)
1420 return err;
1421 dir = fail_page_alloc.attr.dentries.dir;
1422
1423 fail_page_alloc.ignore_gfp_wait_file =
1424 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1425 &fail_page_alloc.ignore_gfp_wait);
1426
1427 fail_page_alloc.ignore_gfp_highmem_file =
1428 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1429 &fail_page_alloc.ignore_gfp_highmem);
54114994
AM
1430 fail_page_alloc.min_order_file =
1431 debugfs_create_u32("min-order", mode, dir,
1432 &fail_page_alloc.min_order);
933e312e
AM
1433
1434 if (!fail_page_alloc.ignore_gfp_wait_file ||
54114994
AM
1435 !fail_page_alloc.ignore_gfp_highmem_file ||
1436 !fail_page_alloc.min_order_file) {
933e312e
AM
1437 err = -ENOMEM;
1438 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1439 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
54114994 1440 debugfs_remove(fail_page_alloc.min_order_file);
933e312e
AM
1441 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1442 }
1443
1444 return err;
1445}
1446
1447late_initcall(fail_page_alloc_debugfs);
1448
1449#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1450
1451#else /* CONFIG_FAIL_PAGE_ALLOC */
1452
1453static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1454{
1455 return 0;
1456}
1457
1458#endif /* CONFIG_FAIL_PAGE_ALLOC */
1459
1da177e4 1460/*
88f5acf8 1461 * Return true if free pages are above 'mark'. This takes into account the order
1da177e4
LT
1462 * of the allocation.
1463 */
88f5acf8
MG
1464static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1465 int classzone_idx, int alloc_flags, long free_pages)
1da177e4
LT
1466{
1467 /* free_pages my go negative - that's OK */
d23ad423 1468 long min = mark;
1da177e4
LT
1469 int o;
1470
88f5acf8 1471 free_pages -= (1 << order) + 1;
7fb1d9fc 1472 if (alloc_flags & ALLOC_HIGH)
1da177e4 1473 min -= min / 2;
7fb1d9fc 1474 if (alloc_flags & ALLOC_HARDER)
1da177e4
LT
1475 min -= min / 4;
1476
1477 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
88f5acf8 1478 return false;
1da177e4
LT
1479 for (o = 0; o < order; o++) {
1480 /* At the next order, this order's pages become unavailable */
1481 free_pages -= z->free_area[o].nr_free << o;
1482
1483 /* Require fewer higher order pages to be free */
1484 min >>= 1;
1485
1486 if (free_pages <= min)
88f5acf8 1487 return false;
1da177e4 1488 }
88f5acf8
MG
1489 return true;
1490}
1491
1492bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1493 int classzone_idx, int alloc_flags)
1494{
1495 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1496 zone_page_state(z, NR_FREE_PAGES));
1497}
1498
1499bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1500 int classzone_idx, int alloc_flags)
1501{
1502 long free_pages = zone_page_state(z, NR_FREE_PAGES);
1503
1504 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1505 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1506
1507 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1508 free_pages);
1da177e4
LT
1509}
1510
9276b1bc
PJ
1511#ifdef CONFIG_NUMA
1512/*
1513 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1514 * skip over zones that are not allowed by the cpuset, or that have
1515 * been recently (in last second) found to be nearly full. See further
1516 * comments in mmzone.h. Reduces cache footprint of zonelist scans
183ff22b 1517 * that have to skip over a lot of full or unallowed zones.
9276b1bc
PJ
1518 *
1519 * If the zonelist cache is present in the passed in zonelist, then
1520 * returns a pointer to the allowed node mask (either the current
37b07e41 1521 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
9276b1bc
PJ
1522 *
1523 * If the zonelist cache is not available for this zonelist, does
1524 * nothing and returns NULL.
1525 *
1526 * If the fullzones BITMAP in the zonelist cache is stale (more than
1527 * a second since last zap'd) then we zap it out (clear its bits.)
1528 *
1529 * We hold off even calling zlc_setup, until after we've checked the
1530 * first zone in the zonelist, on the theory that most allocations will
1531 * be satisfied from that first zone, so best to examine that zone as
1532 * quickly as we can.
1533 */
1534static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1535{
1536 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1537 nodemask_t *allowednodes; /* zonelist_cache approximation */
1538
1539 zlc = zonelist->zlcache_ptr;
1540 if (!zlc)
1541 return NULL;
1542
f05111f5 1543 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
9276b1bc
PJ
1544 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1545 zlc->last_full_zap = jiffies;
1546 }
1547
1548 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1549 &cpuset_current_mems_allowed :
37b07e41 1550 &node_states[N_HIGH_MEMORY];
9276b1bc
PJ
1551 return allowednodes;
1552}
1553
1554/*
1555 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1556 * if it is worth looking at further for free memory:
1557 * 1) Check that the zone isn't thought to be full (doesn't have its
1558 * bit set in the zonelist_cache fullzones BITMAP).
1559 * 2) Check that the zones node (obtained from the zonelist_cache
1560 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1561 * Return true (non-zero) if zone is worth looking at further, or
1562 * else return false (zero) if it is not.
1563 *
1564 * This check -ignores- the distinction between various watermarks,
1565 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1566 * found to be full for any variation of these watermarks, it will
1567 * be considered full for up to one second by all requests, unless
1568 * we are so low on memory on all allowed nodes that we are forced
1569 * into the second scan of the zonelist.
1570 *
1571 * In the second scan we ignore this zonelist cache and exactly
1572 * apply the watermarks to all zones, even it is slower to do so.
1573 * We are low on memory in the second scan, and should leave no stone
1574 * unturned looking for a free page.
1575 */
dd1a239f 1576static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1577 nodemask_t *allowednodes)
1578{
1579 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1580 int i; /* index of *z in zonelist zones */
1581 int n; /* node that zone *z is on */
1582
1583 zlc = zonelist->zlcache_ptr;
1584 if (!zlc)
1585 return 1;
1586
dd1a239f 1587 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1588 n = zlc->z_to_n[i];
1589
1590 /* This zone is worth trying if it is allowed but not full */
1591 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1592}
1593
1594/*
1595 * Given 'z' scanning a zonelist, set the corresponding bit in
1596 * zlc->fullzones, so that subsequent attempts to allocate a page
1597 * from that zone don't waste time re-examining it.
1598 */
dd1a239f 1599static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1600{
1601 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1602 int i; /* index of *z in zonelist zones */
1603
1604 zlc = zonelist->zlcache_ptr;
1605 if (!zlc)
1606 return;
1607
dd1a239f 1608 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1609
1610 set_bit(i, zlc->fullzones);
1611}
1612
1613#else /* CONFIG_NUMA */
1614
1615static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1616{
1617 return NULL;
1618}
1619
dd1a239f 1620static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1621 nodemask_t *allowednodes)
1622{
1623 return 1;
1624}
1625
dd1a239f 1626static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1627{
1628}
1629#endif /* CONFIG_NUMA */
1630
7fb1d9fc 1631/*
0798e519 1632 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
1633 * a page.
1634 */
1635static struct page *
19770b32 1636get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
5117f45d 1637 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
3dd28266 1638 struct zone *preferred_zone, int migratetype)
753ee728 1639{
dd1a239f 1640 struct zoneref *z;
7fb1d9fc 1641 struct page *page = NULL;
54a6eb5c 1642 int classzone_idx;
5117f45d 1643 struct zone *zone;
9276b1bc
PJ
1644 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1645 int zlc_active = 0; /* set if using zonelist_cache */
1646 int did_zlc_setup = 0; /* just call zlc_setup() one time */
54a6eb5c 1647
19770b32 1648 classzone_idx = zone_idx(preferred_zone);
9276b1bc 1649zonelist_scan:
7fb1d9fc 1650 /*
9276b1bc 1651 * Scan zonelist, looking for a zone with enough free.
7fb1d9fc
RS
1652 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1653 */
19770b32
MG
1654 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1655 high_zoneidx, nodemask) {
9276b1bc
PJ
1656 if (NUMA_BUILD && zlc_active &&
1657 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1658 continue;
7fb1d9fc 1659 if ((alloc_flags & ALLOC_CPUSET) &&
02a0e53d 1660 !cpuset_zone_allowed_softwall(zone, gfp_mask))
9276b1bc 1661 goto try_next_zone;
7fb1d9fc 1662
41858966 1663 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
7fb1d9fc 1664 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b 1665 unsigned long mark;
fa5e084e
MG
1666 int ret;
1667
41858966 1668 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
fa5e084e
MG
1669 if (zone_watermark_ok(zone, order, mark,
1670 classzone_idx, alloc_flags))
1671 goto try_this_zone;
1672
1673 if (zone_reclaim_mode == 0)
1674 goto this_zone_full;
1675
1676 ret = zone_reclaim(zone, gfp_mask, order);
1677 switch (ret) {
1678 case ZONE_RECLAIM_NOSCAN:
1679 /* did not scan */
1680 goto try_next_zone;
1681 case ZONE_RECLAIM_FULL:
1682 /* scanned but unreclaimable */
1683 goto this_zone_full;
1684 default:
1685 /* did we reclaim enough */
1686 if (!zone_watermark_ok(zone, order, mark,
1687 classzone_idx, alloc_flags))
9276b1bc 1688 goto this_zone_full;
0798e519 1689 }
7fb1d9fc
RS
1690 }
1691
fa5e084e 1692try_this_zone:
3dd28266
MG
1693 page = buffered_rmqueue(preferred_zone, zone, order,
1694 gfp_mask, migratetype);
0798e519 1695 if (page)
7fb1d9fc 1696 break;
9276b1bc
PJ
1697this_zone_full:
1698 if (NUMA_BUILD)
1699 zlc_mark_zone_full(zonelist, z);
1700try_next_zone:
62bc62a8 1701 if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
d395b734
MG
1702 /*
1703 * we do zlc_setup after the first zone is tried but only
1704 * if there are multiple nodes make it worthwhile
1705 */
9276b1bc
PJ
1706 allowednodes = zlc_setup(zonelist, alloc_flags);
1707 zlc_active = 1;
1708 did_zlc_setup = 1;
1709 }
54a6eb5c 1710 }
9276b1bc
PJ
1711
1712 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1713 /* Disable zlc cache for second zonelist scan */
1714 zlc_active = 0;
1715 goto zonelist_scan;
1716 }
7fb1d9fc 1717 return page;
753ee728
MH
1718}
1719
29423e77
DR
1720/*
1721 * Large machines with many possible nodes should not always dump per-node
1722 * meminfo in irq context.
1723 */
1724static inline bool should_suppress_show_mem(void)
1725{
1726 bool ret = false;
1727
1728#if NODES_SHIFT > 8
1729 ret = in_interrupt();
1730#endif
1731 return ret;
1732}
1733
11e33f6a
MG
1734static inline int
1735should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1736 unsigned long pages_reclaimed)
1da177e4 1737{
11e33f6a
MG
1738 /* Do not loop if specifically requested */
1739 if (gfp_mask & __GFP_NORETRY)
1740 return 0;
1da177e4 1741
11e33f6a
MG
1742 /*
1743 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1744 * means __GFP_NOFAIL, but that may not be true in other
1745 * implementations.
1746 */
1747 if (order <= PAGE_ALLOC_COSTLY_ORDER)
1748 return 1;
1749
1750 /*
1751 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1752 * specified, then we retry until we no longer reclaim any pages
1753 * (above), or we've reclaimed an order of pages at least as
1754 * large as the allocation's order. In both cases, if the
1755 * allocation still fails, we stop retrying.
1756 */
1757 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1758 return 1;
cf40bd16 1759
11e33f6a
MG
1760 /*
1761 * Don't let big-order allocations loop unless the caller
1762 * explicitly requests that.
1763 */
1764 if (gfp_mask & __GFP_NOFAIL)
1765 return 1;
1da177e4 1766
11e33f6a
MG
1767 return 0;
1768}
933e312e 1769
11e33f6a
MG
1770static inline struct page *
1771__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1772 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
1773 nodemask_t *nodemask, struct zone *preferred_zone,
1774 int migratetype)
11e33f6a
MG
1775{
1776 struct page *page;
1777
1778 /* Acquire the OOM killer lock for the zones in zonelist */
ff321fea 1779 if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
11e33f6a 1780 schedule_timeout_uninterruptible(1);
1da177e4
LT
1781 return NULL;
1782 }
6b1de916 1783
11e33f6a
MG
1784 /*
1785 * Go through the zonelist yet one more time, keep very high watermark
1786 * here, this is only to catch a parallel oom killing, we must fail if
1787 * we're still under heavy pressure.
1788 */
1789 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1790 order, zonelist, high_zoneidx,
5117f45d 1791 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
3dd28266 1792 preferred_zone, migratetype);
7fb1d9fc 1793 if (page)
11e33f6a
MG
1794 goto out;
1795
4365a567
KH
1796 if (!(gfp_mask & __GFP_NOFAIL)) {
1797 /* The OOM killer will not help higher order allocs */
1798 if (order > PAGE_ALLOC_COSTLY_ORDER)
1799 goto out;
03668b3c
DR
1800 /* The OOM killer does not needlessly kill tasks for lowmem */
1801 if (high_zoneidx < ZONE_NORMAL)
1802 goto out;
4365a567
KH
1803 /*
1804 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1805 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1806 * The caller should handle page allocation failure by itself if
1807 * it specifies __GFP_THISNODE.
1808 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1809 */
1810 if (gfp_mask & __GFP_THISNODE)
1811 goto out;
1812 }
11e33f6a 1813 /* Exhausted what can be done so it's blamo time */
4365a567 1814 out_of_memory(zonelist, gfp_mask, order, nodemask);
11e33f6a
MG
1815
1816out:
1817 clear_zonelist_oom(zonelist, gfp_mask);
1818 return page;
1819}
1820
56de7263
MG
1821#ifdef CONFIG_COMPACTION
1822/* Try memory compaction for high-order allocations before reclaim */
1823static struct page *
1824__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1825 struct zonelist *zonelist, enum zone_type high_zoneidx,
1826 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
77f1fe6b
MG
1827 int migratetype, unsigned long *did_some_progress,
1828 bool sync_migration)
56de7263
MG
1829{
1830 struct page *page;
1831
4f92e258 1832 if (!order || compaction_deferred(preferred_zone))
56de7263
MG
1833 return NULL;
1834
c06b1fca 1835 current->flags |= PF_MEMALLOC;
56de7263 1836 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
77f1fe6b 1837 nodemask, sync_migration);
c06b1fca 1838 current->flags &= ~PF_MEMALLOC;
56de7263
MG
1839 if (*did_some_progress != COMPACT_SKIPPED) {
1840
1841 /* Page migration frees to the PCP lists but we want merging */
1842 drain_pages(get_cpu());
1843 put_cpu();
1844
1845 page = get_page_from_freelist(gfp_mask, nodemask,
1846 order, zonelist, high_zoneidx,
1847 alloc_flags, preferred_zone,
1848 migratetype);
1849 if (page) {
4f92e258
MG
1850 preferred_zone->compact_considered = 0;
1851 preferred_zone->compact_defer_shift = 0;
56de7263
MG
1852 count_vm_event(COMPACTSUCCESS);
1853 return page;
1854 }
1855
1856 /*
1857 * It's bad if compaction run occurs and fails.
1858 * The most likely reason is that pages exist,
1859 * but not enough to satisfy watermarks.
1860 */
1861 count_vm_event(COMPACTFAIL);
4f92e258 1862 defer_compaction(preferred_zone);
56de7263
MG
1863
1864 cond_resched();
1865 }
1866
1867 return NULL;
1868}
1869#else
1870static inline struct page *
1871__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1872 struct zonelist *zonelist, enum zone_type high_zoneidx,
1873 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
77f1fe6b
MG
1874 int migratetype, unsigned long *did_some_progress,
1875 bool sync_migration)
56de7263
MG
1876{
1877 return NULL;
1878}
1879#endif /* CONFIG_COMPACTION */
1880
11e33f6a
MG
1881/* The really slow allocator path where we enter direct reclaim */
1882static inline struct page *
1883__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1884 struct zonelist *zonelist, enum zone_type high_zoneidx,
5117f45d 1885 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
3dd28266 1886 int migratetype, unsigned long *did_some_progress)
11e33f6a
MG
1887{
1888 struct page *page = NULL;
1889 struct reclaim_state reclaim_state;
9ee493ce 1890 bool drained = false;
11e33f6a
MG
1891
1892 cond_resched();
1893
1894 /* We now go into synchronous reclaim */
1895 cpuset_memory_pressure_bump();
c06b1fca 1896 current->flags |= PF_MEMALLOC;
11e33f6a
MG
1897 lockdep_set_current_reclaim_state(gfp_mask);
1898 reclaim_state.reclaimed_slab = 0;
c06b1fca 1899 current->reclaim_state = &reclaim_state;
11e33f6a
MG
1900
1901 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1902
c06b1fca 1903 current->reclaim_state = NULL;
11e33f6a 1904 lockdep_clear_current_reclaim_state();
c06b1fca 1905 current->flags &= ~PF_MEMALLOC;
11e33f6a
MG
1906
1907 cond_resched();
1908
9ee493ce
MG
1909 if (unlikely(!(*did_some_progress)))
1910 return NULL;
11e33f6a 1911
9ee493ce
MG
1912retry:
1913 page = get_page_from_freelist(gfp_mask, nodemask, order,
5117f45d 1914 zonelist, high_zoneidx,
3dd28266
MG
1915 alloc_flags, preferred_zone,
1916 migratetype);
9ee493ce
MG
1917
1918 /*
1919 * If an allocation failed after direct reclaim, it could be because
1920 * pages are pinned on the per-cpu lists. Drain them and try again
1921 */
1922 if (!page && !drained) {
1923 drain_all_pages();
1924 drained = true;
1925 goto retry;
1926 }
1927
11e33f6a
MG
1928 return page;
1929}
1930
1da177e4 1931/*
11e33f6a
MG
1932 * This is called in the allocator slow-path if the allocation request is of
1933 * sufficient urgency to ignore watermarks and take other desperate measures
1da177e4 1934 */
11e33f6a
MG
1935static inline struct page *
1936__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1937 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
1938 nodemask_t *nodemask, struct zone *preferred_zone,
1939 int migratetype)
11e33f6a
MG
1940{
1941 struct page *page;
1942
1943 do {
1944 page = get_page_from_freelist(gfp_mask, nodemask, order,
5117f45d 1945 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
3dd28266 1946 preferred_zone, migratetype);
11e33f6a
MG
1947
1948 if (!page && gfp_mask & __GFP_NOFAIL)
0e093d99 1949 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
11e33f6a
MG
1950 } while (!page && (gfp_mask & __GFP_NOFAIL));
1951
1952 return page;
1953}
1954
1955static inline
1956void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
99504748
MG
1957 enum zone_type high_zoneidx,
1958 enum zone_type classzone_idx)
1da177e4 1959{
dd1a239f
MG
1960 struct zoneref *z;
1961 struct zone *zone;
1da177e4 1962
11e33f6a 1963 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
99504748 1964 wakeup_kswapd(zone, order, classzone_idx);
11e33f6a 1965}
cf40bd16 1966
341ce06f
PZ
1967static inline int
1968gfp_to_alloc_flags(gfp_t gfp_mask)
1969{
341ce06f
PZ
1970 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1971 const gfp_t wait = gfp_mask & __GFP_WAIT;
1da177e4 1972
a56f57ff 1973 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
e6223a3b 1974 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
933e312e 1975
341ce06f
PZ
1976 /*
1977 * The caller may dip into page reserves a bit more if the caller
1978 * cannot run direct reclaim, or if the caller has realtime scheduling
1979 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1980 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1981 */
e6223a3b 1982 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1da177e4 1983
341ce06f 1984 if (!wait) {
5c3240d9
AA
1985 /*
1986 * Not worth trying to allocate harder for
1987 * __GFP_NOMEMALLOC even if it can't schedule.
1988 */
1989 if (!(gfp_mask & __GFP_NOMEMALLOC))
1990 alloc_flags |= ALLOC_HARDER;
523b9458 1991 /*
341ce06f
PZ
1992 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1993 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
523b9458 1994 */
341ce06f 1995 alloc_flags &= ~ALLOC_CPUSET;
c06b1fca 1996 } else if (unlikely(rt_task(current)) && !in_interrupt())
341ce06f
PZ
1997 alloc_flags |= ALLOC_HARDER;
1998
1999 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2000 if (!in_interrupt() &&
c06b1fca 2001 ((current->flags & PF_MEMALLOC) ||
341ce06f
PZ
2002 unlikely(test_thread_flag(TIF_MEMDIE))))
2003 alloc_flags |= ALLOC_NO_WATERMARKS;
1da177e4 2004 }
6b1de916 2005
341ce06f
PZ
2006 return alloc_flags;
2007}
2008
11e33f6a
MG
2009static inline struct page *
2010__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2011 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
2012 nodemask_t *nodemask, struct zone *preferred_zone,
2013 int migratetype)
11e33f6a
MG
2014{
2015 const gfp_t wait = gfp_mask & __GFP_WAIT;
2016 struct page *page = NULL;
2017 int alloc_flags;
2018 unsigned long pages_reclaimed = 0;
2019 unsigned long did_some_progress;
77f1fe6b 2020 bool sync_migration = false;
1da177e4 2021
72807a74
MG
2022 /*
2023 * In the slowpath, we sanity check order to avoid ever trying to
2024 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2025 * be using allocators in order of preference for an area that is
2026 * too large.
2027 */
1fc28b70
MG
2028 if (order >= MAX_ORDER) {
2029 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
72807a74 2030 return NULL;
1fc28b70 2031 }
1da177e4 2032
952f3b51
CL
2033 /*
2034 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2035 * __GFP_NOWARN set) should not cause reclaim since the subsystem
2036 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2037 * using a larger set of nodes after it has established that the
2038 * allowed per node queues are empty and that nodes are
2039 * over allocated.
2040 */
2041 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2042 goto nopage;
2043
cc4a6851 2044restart:
32dba98e
AA
2045 if (!(gfp_mask & __GFP_NO_KSWAPD))
2046 wake_all_kswapd(order, zonelist, high_zoneidx,
99504748 2047 zone_idx(preferred_zone));
1da177e4 2048
9bf2229f 2049 /*
7fb1d9fc
RS
2050 * OK, we're below the kswapd watermark and have kicked background
2051 * reclaim. Now things get more complex, so set up alloc_flags according
2052 * to how we want to proceed.
9bf2229f 2053 */
341ce06f 2054 alloc_flags = gfp_to_alloc_flags(gfp_mask);
1da177e4 2055
f33261d7
DR
2056 /*
2057 * Find the true preferred zone if the allocation is unconstrained by
2058 * cpusets.
2059 */
2060 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2061 first_zones_zonelist(zonelist, high_zoneidx, NULL,
2062 &preferred_zone);
2063
341ce06f 2064 /* This is the last chance, in general, before the goto nopage. */
19770b32 2065 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
341ce06f
PZ
2066 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2067 preferred_zone, migratetype);
7fb1d9fc
RS
2068 if (page)
2069 goto got_pg;
1da177e4 2070
b43a57bb 2071rebalance:
11e33f6a 2072 /* Allocate without watermarks if the context allows */
341ce06f
PZ
2073 if (alloc_flags & ALLOC_NO_WATERMARKS) {
2074 page = __alloc_pages_high_priority(gfp_mask, order,
2075 zonelist, high_zoneidx, nodemask,
2076 preferred_zone, migratetype);
2077 if (page)
2078 goto got_pg;
1da177e4
LT
2079 }
2080
2081 /* Atomic allocations - we can't balance anything */
2082 if (!wait)
2083 goto nopage;
2084
341ce06f 2085 /* Avoid recursion of direct reclaim */
c06b1fca 2086 if (current->flags & PF_MEMALLOC)
341ce06f
PZ
2087 goto nopage;
2088
6583bb64
DR
2089 /* Avoid allocations with no watermarks from looping endlessly */
2090 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2091 goto nopage;
2092
77f1fe6b
MG
2093 /*
2094 * Try direct compaction. The first pass is asynchronous. Subsequent
2095 * attempts after direct reclaim are synchronous
2096 */
56de7263
MG
2097 page = __alloc_pages_direct_compact(gfp_mask, order,
2098 zonelist, high_zoneidx,
2099 nodemask,
2100 alloc_flags, preferred_zone,
77f1fe6b
MG
2101 migratetype, &did_some_progress,
2102 sync_migration);
56de7263
MG
2103 if (page)
2104 goto got_pg;
11bc82d6 2105 sync_migration = !(gfp_mask & __GFP_NO_KSWAPD);
56de7263 2106
11e33f6a
MG
2107 /* Try direct reclaim and then allocating */
2108 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2109 zonelist, high_zoneidx,
2110 nodemask,
5117f45d 2111 alloc_flags, preferred_zone,
3dd28266 2112 migratetype, &did_some_progress);
11e33f6a
MG
2113 if (page)
2114 goto got_pg;
1da177e4 2115
e33c3b5e 2116 /*
11e33f6a
MG
2117 * If we failed to make any progress reclaiming, then we are
2118 * running out of options and have to consider going OOM
e33c3b5e 2119 */
11e33f6a
MG
2120 if (!did_some_progress) {
2121 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
7f33d49a
RW
2122 if (oom_killer_disabled)
2123 goto nopage;
11e33f6a
MG
2124 page = __alloc_pages_may_oom(gfp_mask, order,
2125 zonelist, high_zoneidx,
3dd28266
MG
2126 nodemask, preferred_zone,
2127 migratetype);
11e33f6a
MG
2128 if (page)
2129 goto got_pg;
1da177e4 2130
03668b3c
DR
2131 if (!(gfp_mask & __GFP_NOFAIL)) {
2132 /*
2133 * The oom killer is not called for high-order
2134 * allocations that may fail, so if no progress
2135 * is being made, there are no other options and
2136 * retrying is unlikely to help.
2137 */
2138 if (order > PAGE_ALLOC_COSTLY_ORDER)
2139 goto nopage;
2140 /*
2141 * The oom killer is not called for lowmem
2142 * allocations to prevent needlessly killing
2143 * innocent tasks.
2144 */
2145 if (high_zoneidx < ZONE_NORMAL)
2146 goto nopage;
2147 }
e2c55dc8 2148
ff0ceb9d
DR
2149 goto restart;
2150 }
1da177e4
LT
2151 }
2152
11e33f6a 2153 /* Check if we should retry the allocation */
a41f24ea 2154 pages_reclaimed += did_some_progress;
11e33f6a
MG
2155 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
2156 /* Wait for some write requests to complete then retry */
0e093d99 2157 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
1da177e4 2158 goto rebalance;
3e7d3449
MG
2159 } else {
2160 /*
2161 * High-order allocations do not necessarily loop after
2162 * direct reclaim and reclaim/compaction depends on compaction
2163 * being called after reclaim so call directly if necessary
2164 */
2165 page = __alloc_pages_direct_compact(gfp_mask, order,
2166 zonelist, high_zoneidx,
2167 nodemask,
2168 alloc_flags, preferred_zone,
77f1fe6b
MG
2169 migratetype, &did_some_progress,
2170 sync_migration);
3e7d3449
MG
2171 if (page)
2172 goto got_pg;
1da177e4
LT
2173 }
2174
2175nopage:
2176 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
cbf978bf
DR
2177 unsigned int filter = SHOW_MEM_FILTER_NODES;
2178
2179 /*
2180 * This documents exceptions given to allocations in certain
2181 * contexts that are allowed to allocate outside current's set
2182 * of allowed nodes.
2183 */
2184 if (!(gfp_mask & __GFP_NOMEMALLOC))
2185 if (test_thread_flag(TIF_MEMDIE) ||
2186 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2187 filter &= ~SHOW_MEM_FILTER_NODES;
2188 if (in_interrupt() || !wait)
2189 filter &= ~SHOW_MEM_FILTER_NODES;
2190
2191 pr_warning("%s: page allocation failure. order:%d, mode:0x%x\n",
c06b1fca 2192 current->comm, order, gfp_mask);
1da177e4 2193 dump_stack();
29423e77 2194 if (!should_suppress_show_mem())
cbf978bf 2195 __show_mem(filter);
1da177e4 2196 }
b1eeab67 2197 return page;
1da177e4 2198got_pg:
b1eeab67
VN
2199 if (kmemcheck_enabled)
2200 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1da177e4 2201 return page;
11e33f6a 2202
1da177e4 2203}
11e33f6a
MG
2204
2205/*
2206 * This is the 'heart' of the zoned buddy allocator.
2207 */
2208struct page *
2209__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2210 struct zonelist *zonelist, nodemask_t *nodemask)
2211{
2212 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
5117f45d 2213 struct zone *preferred_zone;
11e33f6a 2214 struct page *page;
3dd28266 2215 int migratetype = allocflags_to_migratetype(gfp_mask);
11e33f6a 2216
dcce284a
BH
2217 gfp_mask &= gfp_allowed_mask;
2218
11e33f6a
MG
2219 lockdep_trace_alloc(gfp_mask);
2220
2221 might_sleep_if(gfp_mask & __GFP_WAIT);
2222
2223 if (should_fail_alloc_page(gfp_mask, order))
2224 return NULL;
2225
2226 /*
2227 * Check the zones suitable for the gfp_mask contain at least one
2228 * valid zone. It's possible to have an empty zonelist as a result
2229 * of GFP_THISNODE and a memoryless node
2230 */
2231 if (unlikely(!zonelist->_zonerefs->zone))
2232 return NULL;
2233
c0ff7453 2234 get_mems_allowed();
5117f45d 2235 /* The preferred zone is used for statistics later */
f33261d7
DR
2236 first_zones_zonelist(zonelist, high_zoneidx,
2237 nodemask ? : &cpuset_current_mems_allowed,
2238 &preferred_zone);
c0ff7453
MX
2239 if (!preferred_zone) {
2240 put_mems_allowed();
5117f45d 2241 return NULL;
c0ff7453 2242 }
5117f45d
MG
2243
2244 /* First allocation attempt */
11e33f6a 2245 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
5117f45d 2246 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
3dd28266 2247 preferred_zone, migratetype);
11e33f6a
MG
2248 if (unlikely(!page))
2249 page = __alloc_pages_slowpath(gfp_mask, order,
5117f45d 2250 zonelist, high_zoneidx, nodemask,
3dd28266 2251 preferred_zone, migratetype);
c0ff7453 2252 put_mems_allowed();
11e33f6a 2253
4b4f278c 2254 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
11e33f6a 2255 return page;
1da177e4 2256}
d239171e 2257EXPORT_SYMBOL(__alloc_pages_nodemask);
1da177e4
LT
2258
2259/*
2260 * Common helper functions.
2261 */
920c7a5d 2262unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4 2263{
945a1113
AM
2264 struct page *page;
2265
2266 /*
2267 * __get_free_pages() returns a 32-bit address, which cannot represent
2268 * a highmem page
2269 */
2270 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2271
1da177e4
LT
2272 page = alloc_pages(gfp_mask, order);
2273 if (!page)
2274 return 0;
2275 return (unsigned long) page_address(page);
2276}
1da177e4
LT
2277EXPORT_SYMBOL(__get_free_pages);
2278
920c7a5d 2279unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4 2280{
945a1113 2281 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1da177e4 2282}
1da177e4
LT
2283EXPORT_SYMBOL(get_zeroed_page);
2284
2285void __pagevec_free(struct pagevec *pvec)
2286{
2287 int i = pagevec_count(pvec);
2288
4b4f278c
MG
2289 while (--i >= 0) {
2290 trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
1da177e4 2291 free_hot_cold_page(pvec->pages[i], pvec->cold);
4b4f278c 2292 }
1da177e4
LT
2293}
2294
920c7a5d 2295void __free_pages(struct page *page, unsigned int order)
1da177e4 2296{
b5810039 2297 if (put_page_testzero(page)) {
1da177e4 2298 if (order == 0)
fc91668e 2299 free_hot_cold_page(page, 0);
1da177e4
LT
2300 else
2301 __free_pages_ok(page, order);
2302 }
2303}
2304
2305EXPORT_SYMBOL(__free_pages);
2306
920c7a5d 2307void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
2308{
2309 if (addr != 0) {
725d704e 2310 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
2311 __free_pages(virt_to_page((void *)addr), order);
2312 }
2313}
2314
2315EXPORT_SYMBOL(free_pages);
2316
2be0ffe2
TT
2317/**
2318 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2319 * @size: the number of bytes to allocate
2320 * @gfp_mask: GFP flags for the allocation
2321 *
2322 * This function is similar to alloc_pages(), except that it allocates the
2323 * minimum number of pages to satisfy the request. alloc_pages() can only
2324 * allocate memory in power-of-two pages.
2325 *
2326 * This function is also limited by MAX_ORDER.
2327 *
2328 * Memory allocated by this function must be released by free_pages_exact().
2329 */
2330void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2331{
2332 unsigned int order = get_order(size);
2333 unsigned long addr;
2334
2335 addr = __get_free_pages(gfp_mask, order);
2336 if (addr) {
2337 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2338 unsigned long used = addr + PAGE_ALIGN(size);
2339
5bfd7560 2340 split_page(virt_to_page((void *)addr), order);
2be0ffe2
TT
2341 while (used < alloc_end) {
2342 free_page(used);
2343 used += PAGE_SIZE;
2344 }
2345 }
2346
2347 return (void *)addr;
2348}
2349EXPORT_SYMBOL(alloc_pages_exact);
2350
2351/**
2352 * free_pages_exact - release memory allocated via alloc_pages_exact()
2353 * @virt: the value returned by alloc_pages_exact.
2354 * @size: size of allocation, same value as passed to alloc_pages_exact().
2355 *
2356 * Release the memory allocated by a previous call to alloc_pages_exact.
2357 */
2358void free_pages_exact(void *virt, size_t size)
2359{
2360 unsigned long addr = (unsigned long)virt;
2361 unsigned long end = addr + PAGE_ALIGN(size);
2362
2363 while (addr < end) {
2364 free_page(addr);
2365 addr += PAGE_SIZE;
2366 }
2367}
2368EXPORT_SYMBOL(free_pages_exact);
2369
1da177e4
LT
2370static unsigned int nr_free_zone_pages(int offset)
2371{
dd1a239f 2372 struct zoneref *z;
54a6eb5c
MG
2373 struct zone *zone;
2374
e310fd43 2375 /* Just pick one node, since fallback list is circular */
1da177e4
LT
2376 unsigned int sum = 0;
2377
0e88460d 2378 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 2379
54a6eb5c 2380 for_each_zone_zonelist(zone, z, zonelist, offset) {
e310fd43 2381 unsigned long size = zone->present_pages;
41858966 2382 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
2383 if (size > high)
2384 sum += size - high;
1da177e4
LT
2385 }
2386
2387 return sum;
2388}
2389
2390/*
2391 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2392 */
2393unsigned int nr_free_buffer_pages(void)
2394{
af4ca457 2395 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 2396}
c2f1a551 2397EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4
LT
2398
2399/*
2400 * Amount of free RAM allocatable within all zones
2401 */
2402unsigned int nr_free_pagecache_pages(void)
2403{
2a1e274a 2404 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 2405}
08e0f6a9
CL
2406
2407static inline void show_node(struct zone *zone)
1da177e4 2408{
08e0f6a9 2409 if (NUMA_BUILD)
25ba77c1 2410 printk("Node %d ", zone_to_nid(zone));
1da177e4 2411}
1da177e4 2412
1da177e4
LT
2413void si_meminfo(struct sysinfo *val)
2414{
2415 val->totalram = totalram_pages;
2416 val->sharedram = 0;
d23ad423 2417 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 2418 val->bufferram = nr_blockdev_pages();
1da177e4
LT
2419 val->totalhigh = totalhigh_pages;
2420 val->freehigh = nr_free_highpages();
1da177e4
LT
2421 val->mem_unit = PAGE_SIZE;
2422}
2423
2424EXPORT_SYMBOL(si_meminfo);
2425
2426#ifdef CONFIG_NUMA
2427void si_meminfo_node(struct sysinfo *val, int nid)
2428{
2429 pg_data_t *pgdat = NODE_DATA(nid);
2430
2431 val->totalram = pgdat->node_present_pages;
d23ad423 2432 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 2433#ifdef CONFIG_HIGHMEM
1da177e4 2434 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
d23ad423
CL
2435 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2436 NR_FREE_PAGES);
98d2b0eb
CL
2437#else
2438 val->totalhigh = 0;
2439 val->freehigh = 0;
2440#endif
1da177e4
LT
2441 val->mem_unit = PAGE_SIZE;
2442}
2443#endif
2444
ddd588b5
DR
2445/*
2446 * Determine whether the zone's node should be displayed or not, depending on
2447 * whether SHOW_MEM_FILTER_NODES was passed to __show_free_areas().
2448 */
2449static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone)
2450{
2451 bool ret = false;
2452
2453 if (!(flags & SHOW_MEM_FILTER_NODES))
2454 goto out;
2455
2456 get_mems_allowed();
2457 ret = !node_isset(zone->zone_pgdat->node_id,
2458 cpuset_current_mems_allowed);
2459 put_mems_allowed();
2460out:
2461 return ret;
2462}
2463
1da177e4
LT
2464#define K(x) ((x) << (PAGE_SHIFT-10))
2465
2466/*
2467 * Show free area list (used inside shift_scroll-lock stuff)
2468 * We also calculate the percentage fragmentation. We do this by counting the
2469 * memory on each free list with the exception of the first item on the list.
ddd588b5
DR
2470 * Suppresses nodes that are not allowed by current's cpuset if
2471 * SHOW_MEM_FILTER_NODES is passed.
1da177e4 2472 */
ddd588b5 2473void __show_free_areas(unsigned int filter)
1da177e4 2474{
c7241913 2475 int cpu;
1da177e4
LT
2476 struct zone *zone;
2477
ee99c71c 2478 for_each_populated_zone(zone) {
ddd588b5
DR
2479 if (skip_free_areas_zone(filter, zone))
2480 continue;
c7241913
JS
2481 show_node(zone);
2482 printk("%s per-cpu:\n", zone->name);
1da177e4 2483
6b482c67 2484 for_each_online_cpu(cpu) {
1da177e4
LT
2485 struct per_cpu_pageset *pageset;
2486
99dcc3e5 2487 pageset = per_cpu_ptr(zone->pageset, cpu);
1da177e4 2488
3dfa5721
CL
2489 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2490 cpu, pageset->pcp.high,
2491 pageset->pcp.batch, pageset->pcp.count);
1da177e4
LT
2492 }
2493 }
2494
a731286d
KM
2495 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2496 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
7b854121 2497 " unevictable:%lu"
b76146ed 2498 " dirty:%lu writeback:%lu unstable:%lu\n"
3701b033 2499 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
4b02108a 2500 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
4f98a2fe 2501 global_page_state(NR_ACTIVE_ANON),
4f98a2fe 2502 global_page_state(NR_INACTIVE_ANON),
a731286d
KM
2503 global_page_state(NR_ISOLATED_ANON),
2504 global_page_state(NR_ACTIVE_FILE),
4f98a2fe 2505 global_page_state(NR_INACTIVE_FILE),
a731286d 2506 global_page_state(NR_ISOLATED_FILE),
7b854121 2507 global_page_state(NR_UNEVICTABLE),
b1e7a8fd 2508 global_page_state(NR_FILE_DIRTY),
ce866b34 2509 global_page_state(NR_WRITEBACK),
fd39fc85 2510 global_page_state(NR_UNSTABLE_NFS),
d23ad423 2511 global_page_state(NR_FREE_PAGES),
3701b033
KM
2512 global_page_state(NR_SLAB_RECLAIMABLE),
2513 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 2514 global_page_state(NR_FILE_MAPPED),
4b02108a 2515 global_page_state(NR_SHMEM),
a25700a5
AM
2516 global_page_state(NR_PAGETABLE),
2517 global_page_state(NR_BOUNCE));
1da177e4 2518
ee99c71c 2519 for_each_populated_zone(zone) {
1da177e4
LT
2520 int i;
2521
ddd588b5
DR
2522 if (skip_free_areas_zone(filter, zone))
2523 continue;
1da177e4
LT
2524 show_node(zone);
2525 printk("%s"
2526 " free:%lukB"
2527 " min:%lukB"
2528 " low:%lukB"
2529 " high:%lukB"
4f98a2fe
RR
2530 " active_anon:%lukB"
2531 " inactive_anon:%lukB"
2532 " active_file:%lukB"
2533 " inactive_file:%lukB"
7b854121 2534 " unevictable:%lukB"
a731286d
KM
2535 " isolated(anon):%lukB"
2536 " isolated(file):%lukB"
1da177e4 2537 " present:%lukB"
4a0aa73f
KM
2538 " mlocked:%lukB"
2539 " dirty:%lukB"
2540 " writeback:%lukB"
2541 " mapped:%lukB"
4b02108a 2542 " shmem:%lukB"
4a0aa73f
KM
2543 " slab_reclaimable:%lukB"
2544 " slab_unreclaimable:%lukB"
c6a7f572 2545 " kernel_stack:%lukB"
4a0aa73f
KM
2546 " pagetables:%lukB"
2547 " unstable:%lukB"
2548 " bounce:%lukB"
2549 " writeback_tmp:%lukB"
1da177e4
LT
2550 " pages_scanned:%lu"
2551 " all_unreclaimable? %s"
2552 "\n",
2553 zone->name,
88f5acf8 2554 K(zone_page_state(zone, NR_FREE_PAGES)),
41858966
MG
2555 K(min_wmark_pages(zone)),
2556 K(low_wmark_pages(zone)),
2557 K(high_wmark_pages(zone)),
4f98a2fe
RR
2558 K(zone_page_state(zone, NR_ACTIVE_ANON)),
2559 K(zone_page_state(zone, NR_INACTIVE_ANON)),
2560 K(zone_page_state(zone, NR_ACTIVE_FILE)),
2561 K(zone_page_state(zone, NR_INACTIVE_FILE)),
7b854121 2562 K(zone_page_state(zone, NR_UNEVICTABLE)),
a731286d
KM
2563 K(zone_page_state(zone, NR_ISOLATED_ANON)),
2564 K(zone_page_state(zone, NR_ISOLATED_FILE)),
1da177e4 2565 K(zone->present_pages),
4a0aa73f
KM
2566 K(zone_page_state(zone, NR_MLOCK)),
2567 K(zone_page_state(zone, NR_FILE_DIRTY)),
2568 K(zone_page_state(zone, NR_WRITEBACK)),
2569 K(zone_page_state(zone, NR_FILE_MAPPED)),
4b02108a 2570 K(zone_page_state(zone, NR_SHMEM)),
4a0aa73f
KM
2571 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2572 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
c6a7f572
KM
2573 zone_page_state(zone, NR_KERNEL_STACK) *
2574 THREAD_SIZE / 1024,
4a0aa73f
KM
2575 K(zone_page_state(zone, NR_PAGETABLE)),
2576 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2577 K(zone_page_state(zone, NR_BOUNCE)),
2578 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
1da177e4 2579 zone->pages_scanned,
93e4a89a 2580 (zone->all_unreclaimable ? "yes" : "no")
1da177e4
LT
2581 );
2582 printk("lowmem_reserve[]:");
2583 for (i = 0; i < MAX_NR_ZONES; i++)
2584 printk(" %lu", zone->lowmem_reserve[i]);
2585 printk("\n");
2586 }
2587
ee99c71c 2588 for_each_populated_zone(zone) {
8f9de51a 2589 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1da177e4 2590
ddd588b5
DR
2591 if (skip_free_areas_zone(filter, zone))
2592 continue;
1da177e4
LT
2593 show_node(zone);
2594 printk("%s: ", zone->name);
1da177e4
LT
2595
2596 spin_lock_irqsave(&zone->lock, flags);
2597 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a
KK
2598 nr[order] = zone->free_area[order].nr_free;
2599 total += nr[order] << order;
1da177e4
LT
2600 }
2601 spin_unlock_irqrestore(&zone->lock, flags);
8f9de51a
KK
2602 for (order = 0; order < MAX_ORDER; order++)
2603 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1da177e4
LT
2604 printk("= %lukB\n", K(total));
2605 }
2606
e6f3602d
LW
2607 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2608
1da177e4
LT
2609 show_swap_cache_info();
2610}
2611
ddd588b5
DR
2612void show_free_areas(void)
2613{
2614 __show_free_areas(0);
2615}
2616
19770b32
MG
2617static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2618{
2619 zoneref->zone = zone;
2620 zoneref->zone_idx = zone_idx(zone);
2621}
2622
1da177e4
LT
2623/*
2624 * Builds allocation fallback zone lists.
1a93205b
CL
2625 *
2626 * Add all populated zones of a node to the zonelist.
1da177e4 2627 */
f0c0b2b8
KH
2628static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2629 int nr_zones, enum zone_type zone_type)
1da177e4 2630{
1a93205b
CL
2631 struct zone *zone;
2632
98d2b0eb 2633 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 2634 zone_type++;
02a68a5e
CL
2635
2636 do {
2f6726e5 2637 zone_type--;
070f8032 2638 zone = pgdat->node_zones + zone_type;
1a93205b 2639 if (populated_zone(zone)) {
dd1a239f
MG
2640 zoneref_set_zone(zone,
2641 &zonelist->_zonerefs[nr_zones++]);
070f8032 2642 check_highest_zone(zone_type);
1da177e4 2643 }
02a68a5e 2644
2f6726e5 2645 } while (zone_type);
070f8032 2646 return nr_zones;
1da177e4
LT
2647}
2648
f0c0b2b8
KH
2649
2650/*
2651 * zonelist_order:
2652 * 0 = automatic detection of better ordering.
2653 * 1 = order by ([node] distance, -zonetype)
2654 * 2 = order by (-zonetype, [node] distance)
2655 *
2656 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2657 * the same zonelist. So only NUMA can configure this param.
2658 */
2659#define ZONELIST_ORDER_DEFAULT 0
2660#define ZONELIST_ORDER_NODE 1
2661#define ZONELIST_ORDER_ZONE 2
2662
2663/* zonelist order in the kernel.
2664 * set_zonelist_order() will set this to NODE or ZONE.
2665 */
2666static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2667static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2668
2669
1da177e4 2670#ifdef CONFIG_NUMA
f0c0b2b8
KH
2671/* The value user specified ....changed by config */
2672static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2673/* string for sysctl */
2674#define NUMA_ZONELIST_ORDER_LEN 16
2675char numa_zonelist_order[16] = "default";
2676
2677/*
2678 * interface for configure zonelist ordering.
2679 * command line option "numa_zonelist_order"
2680 * = "[dD]efault - default, automatic configuration.
2681 * = "[nN]ode - order by node locality, then by zone within node
2682 * = "[zZ]one - order by zone, then by locality within zone
2683 */
2684
2685static int __parse_numa_zonelist_order(char *s)
2686{
2687 if (*s == 'd' || *s == 'D') {
2688 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2689 } else if (*s == 'n' || *s == 'N') {
2690 user_zonelist_order = ZONELIST_ORDER_NODE;
2691 } else if (*s == 'z' || *s == 'Z') {
2692 user_zonelist_order = ZONELIST_ORDER_ZONE;
2693 } else {
2694 printk(KERN_WARNING
2695 "Ignoring invalid numa_zonelist_order value: "
2696 "%s\n", s);
2697 return -EINVAL;
2698 }
2699 return 0;
2700}
2701
2702static __init int setup_numa_zonelist_order(char *s)
2703{
ecb256f8
VL
2704 int ret;
2705
2706 if (!s)
2707 return 0;
2708
2709 ret = __parse_numa_zonelist_order(s);
2710 if (ret == 0)
2711 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
2712
2713 return ret;
f0c0b2b8
KH
2714}
2715early_param("numa_zonelist_order", setup_numa_zonelist_order);
2716
2717/*
2718 * sysctl handler for numa_zonelist_order
2719 */
2720int numa_zonelist_order_handler(ctl_table *table, int write,
8d65af78 2721 void __user *buffer, size_t *length,
f0c0b2b8
KH
2722 loff_t *ppos)
2723{
2724 char saved_string[NUMA_ZONELIST_ORDER_LEN];
2725 int ret;
443c6f14 2726 static DEFINE_MUTEX(zl_order_mutex);
f0c0b2b8 2727
443c6f14 2728 mutex_lock(&zl_order_mutex);
f0c0b2b8 2729 if (write)
443c6f14 2730 strcpy(saved_string, (char*)table->data);
8d65af78 2731 ret = proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8 2732 if (ret)
443c6f14 2733 goto out;
f0c0b2b8
KH
2734 if (write) {
2735 int oldval = user_zonelist_order;
2736 if (__parse_numa_zonelist_order((char*)table->data)) {
2737 /*
2738 * bogus value. restore saved string
2739 */
2740 strncpy((char*)table->data, saved_string,
2741 NUMA_ZONELIST_ORDER_LEN);
2742 user_zonelist_order = oldval;
4eaf3f64
HL
2743 } else if (oldval != user_zonelist_order) {
2744 mutex_lock(&zonelists_mutex);
1f522509 2745 build_all_zonelists(NULL);
4eaf3f64
HL
2746 mutex_unlock(&zonelists_mutex);
2747 }
f0c0b2b8 2748 }
443c6f14
AK
2749out:
2750 mutex_unlock(&zl_order_mutex);
2751 return ret;
f0c0b2b8
KH
2752}
2753
2754
62bc62a8 2755#define MAX_NODE_LOAD (nr_online_nodes)
f0c0b2b8
KH
2756static int node_load[MAX_NUMNODES];
2757
1da177e4 2758/**
4dc3b16b 2759 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
2760 * @node: node whose fallback list we're appending
2761 * @used_node_mask: nodemask_t of already used nodes
2762 *
2763 * We use a number of factors to determine which is the next node that should
2764 * appear on a given node's fallback list. The node should not have appeared
2765 * already in @node's fallback list, and it should be the next closest node
2766 * according to the distance array (which contains arbitrary distance values
2767 * from each node to each node in the system), and should also prefer nodes
2768 * with no CPUs, since presumably they'll have very little allocation pressure
2769 * on them otherwise.
2770 * It returns -1 if no node is found.
2771 */
f0c0b2b8 2772static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 2773{
4cf808eb 2774 int n, val;
1da177e4
LT
2775 int min_val = INT_MAX;
2776 int best_node = -1;
a70f7302 2777 const struct cpumask *tmp = cpumask_of_node(0);
1da177e4 2778
4cf808eb
LT
2779 /* Use the local node if we haven't already */
2780 if (!node_isset(node, *used_node_mask)) {
2781 node_set(node, *used_node_mask);
2782 return node;
2783 }
1da177e4 2784
37b07e41 2785 for_each_node_state(n, N_HIGH_MEMORY) {
1da177e4
LT
2786
2787 /* Don't want a node to appear more than once */
2788 if (node_isset(n, *used_node_mask))
2789 continue;
2790
1da177e4
LT
2791 /* Use the distance array to find the distance */
2792 val = node_distance(node, n);
2793
4cf808eb
LT
2794 /* Penalize nodes under us ("prefer the next node") */
2795 val += (n < node);
2796
1da177e4 2797 /* Give preference to headless and unused nodes */
a70f7302
RR
2798 tmp = cpumask_of_node(n);
2799 if (!cpumask_empty(tmp))
1da177e4
LT
2800 val += PENALTY_FOR_NODE_WITH_CPUS;
2801
2802 /* Slight preference for less loaded node */
2803 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2804 val += node_load[n];
2805
2806 if (val < min_val) {
2807 min_val = val;
2808 best_node = n;
2809 }
2810 }
2811
2812 if (best_node >= 0)
2813 node_set(best_node, *used_node_mask);
2814
2815 return best_node;
2816}
2817
f0c0b2b8
KH
2818
2819/*
2820 * Build zonelists ordered by node and zones within node.
2821 * This results in maximum locality--normal zone overflows into local
2822 * DMA zone, if any--but risks exhausting DMA zone.
2823 */
2824static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 2825{
f0c0b2b8 2826 int j;
1da177e4 2827 struct zonelist *zonelist;
f0c0b2b8 2828
54a6eb5c 2829 zonelist = &pgdat->node_zonelists[0];
dd1a239f 2830 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
54a6eb5c
MG
2831 ;
2832 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2833 MAX_NR_ZONES - 1);
dd1a239f
MG
2834 zonelist->_zonerefs[j].zone = NULL;
2835 zonelist->_zonerefs[j].zone_idx = 0;
f0c0b2b8
KH
2836}
2837
523b9458
CL
2838/*
2839 * Build gfp_thisnode zonelists
2840 */
2841static void build_thisnode_zonelists(pg_data_t *pgdat)
2842{
523b9458
CL
2843 int j;
2844 struct zonelist *zonelist;
2845
54a6eb5c
MG
2846 zonelist = &pgdat->node_zonelists[1];
2847 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
dd1a239f
MG
2848 zonelist->_zonerefs[j].zone = NULL;
2849 zonelist->_zonerefs[j].zone_idx = 0;
523b9458
CL
2850}
2851
f0c0b2b8
KH
2852/*
2853 * Build zonelists ordered by zone and nodes within zones.
2854 * This results in conserving DMA zone[s] until all Normal memory is
2855 * exhausted, but results in overflowing to remote node while memory
2856 * may still exist in local DMA zone.
2857 */
2858static int node_order[MAX_NUMNODES];
2859
2860static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2861{
f0c0b2b8
KH
2862 int pos, j, node;
2863 int zone_type; /* needs to be signed */
2864 struct zone *z;
2865 struct zonelist *zonelist;
2866
54a6eb5c
MG
2867 zonelist = &pgdat->node_zonelists[0];
2868 pos = 0;
2869 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2870 for (j = 0; j < nr_nodes; j++) {
2871 node = node_order[j];
2872 z = &NODE_DATA(node)->node_zones[zone_type];
2873 if (populated_zone(z)) {
dd1a239f
MG
2874 zoneref_set_zone(z,
2875 &zonelist->_zonerefs[pos++]);
54a6eb5c 2876 check_highest_zone(zone_type);
f0c0b2b8
KH
2877 }
2878 }
f0c0b2b8 2879 }
dd1a239f
MG
2880 zonelist->_zonerefs[pos].zone = NULL;
2881 zonelist->_zonerefs[pos].zone_idx = 0;
f0c0b2b8
KH
2882}
2883
2884static int default_zonelist_order(void)
2885{
2886 int nid, zone_type;
2887 unsigned long low_kmem_size,total_size;
2888 struct zone *z;
2889 int average_size;
2890 /*
88393161 2891 * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
f0c0b2b8
KH
2892 * If they are really small and used heavily, the system can fall
2893 * into OOM very easily.
e325c90f 2894 * This function detect ZONE_DMA/DMA32 size and configures zone order.
f0c0b2b8
KH
2895 */
2896 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2897 low_kmem_size = 0;
2898 total_size = 0;
2899 for_each_online_node(nid) {
2900 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2901 z = &NODE_DATA(nid)->node_zones[zone_type];
2902 if (populated_zone(z)) {
2903 if (zone_type < ZONE_NORMAL)
2904 low_kmem_size += z->present_pages;
2905 total_size += z->present_pages;
e325c90f
DR
2906 } else if (zone_type == ZONE_NORMAL) {
2907 /*
2908 * If any node has only lowmem, then node order
2909 * is preferred to allow kernel allocations
2910 * locally; otherwise, they can easily infringe
2911 * on other nodes when there is an abundance of
2912 * lowmem available to allocate from.
2913 */
2914 return ZONELIST_ORDER_NODE;
f0c0b2b8
KH
2915 }
2916 }
2917 }
2918 if (!low_kmem_size || /* there are no DMA area. */
2919 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2920 return ZONELIST_ORDER_NODE;
2921 /*
2922 * look into each node's config.
2923 * If there is a node whose DMA/DMA32 memory is very big area on
2924 * local memory, NODE_ORDER may be suitable.
2925 */
37b07e41
LS
2926 average_size = total_size /
2927 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
f0c0b2b8
KH
2928 for_each_online_node(nid) {
2929 low_kmem_size = 0;
2930 total_size = 0;
2931 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2932 z = &NODE_DATA(nid)->node_zones[zone_type];
2933 if (populated_zone(z)) {
2934 if (zone_type < ZONE_NORMAL)
2935 low_kmem_size += z->present_pages;
2936 total_size += z->present_pages;
2937 }
2938 }
2939 if (low_kmem_size &&
2940 total_size > average_size && /* ignore small node */
2941 low_kmem_size > total_size * 70/100)
2942 return ZONELIST_ORDER_NODE;
2943 }
2944 return ZONELIST_ORDER_ZONE;
2945}
2946
2947static void set_zonelist_order(void)
2948{
2949 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2950 current_zonelist_order = default_zonelist_order();
2951 else
2952 current_zonelist_order = user_zonelist_order;
2953}
2954
2955static void build_zonelists(pg_data_t *pgdat)
2956{
2957 int j, node, load;
2958 enum zone_type i;
1da177e4 2959 nodemask_t used_mask;
f0c0b2b8
KH
2960 int local_node, prev_node;
2961 struct zonelist *zonelist;
2962 int order = current_zonelist_order;
1da177e4
LT
2963
2964 /* initialize zonelists */
523b9458 2965 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4 2966 zonelist = pgdat->node_zonelists + i;
dd1a239f
MG
2967 zonelist->_zonerefs[0].zone = NULL;
2968 zonelist->_zonerefs[0].zone_idx = 0;
1da177e4
LT
2969 }
2970
2971 /* NUMA-aware ordering of nodes */
2972 local_node = pgdat->node_id;
62bc62a8 2973 load = nr_online_nodes;
1da177e4
LT
2974 prev_node = local_node;
2975 nodes_clear(used_mask);
f0c0b2b8 2976
f0c0b2b8
KH
2977 memset(node_order, 0, sizeof(node_order));
2978 j = 0;
2979
1da177e4 2980 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
9eeff239
CL
2981 int distance = node_distance(local_node, node);
2982
2983 /*
2984 * If another node is sufficiently far away then it is better
2985 * to reclaim pages in a zone before going off node.
2986 */
2987 if (distance > RECLAIM_DISTANCE)
2988 zone_reclaim_mode = 1;
2989
1da177e4
LT
2990 /*
2991 * We don't want to pressure a particular node.
2992 * So adding penalty to the first node in same
2993 * distance group to make it round-robin.
2994 */
9eeff239 2995 if (distance != node_distance(local_node, prev_node))
f0c0b2b8
KH
2996 node_load[node] = load;
2997
1da177e4
LT
2998 prev_node = node;
2999 load--;
f0c0b2b8
KH
3000 if (order == ZONELIST_ORDER_NODE)
3001 build_zonelists_in_node_order(pgdat, node);
3002 else
3003 node_order[j++] = node; /* remember order */
3004 }
1da177e4 3005
f0c0b2b8
KH
3006 if (order == ZONELIST_ORDER_ZONE) {
3007 /* calculate node order -- i.e., DMA last! */
3008 build_zonelists_in_zone_order(pgdat, j);
1da177e4 3009 }
523b9458
CL
3010
3011 build_thisnode_zonelists(pgdat);
1da177e4
LT
3012}
3013
9276b1bc 3014/* Construct the zonelist performance cache - see further mmzone.h */
f0c0b2b8 3015static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 3016{
54a6eb5c
MG
3017 struct zonelist *zonelist;
3018 struct zonelist_cache *zlc;
dd1a239f 3019 struct zoneref *z;
9276b1bc 3020
54a6eb5c
MG
3021 zonelist = &pgdat->node_zonelists[0];
3022 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3023 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
dd1a239f
MG
3024 for (z = zonelist->_zonerefs; z->zone; z++)
3025 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
9276b1bc
PJ
3026}
3027
7aac7898
LS
3028#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3029/*
3030 * Return node id of node used for "local" allocations.
3031 * I.e., first node id of first zone in arg node's generic zonelist.
3032 * Used for initializing percpu 'numa_mem', which is used primarily
3033 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3034 */
3035int local_memory_node(int node)
3036{
3037 struct zone *zone;
3038
3039 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3040 gfp_zone(GFP_KERNEL),
3041 NULL,
3042 &zone);
3043 return zone->node;
3044}
3045#endif
f0c0b2b8 3046
1da177e4
LT
3047#else /* CONFIG_NUMA */
3048
f0c0b2b8
KH
3049static void set_zonelist_order(void)
3050{
3051 current_zonelist_order = ZONELIST_ORDER_ZONE;
3052}
3053
3054static void build_zonelists(pg_data_t *pgdat)
1da177e4 3055{
19655d34 3056 int node, local_node;
54a6eb5c
MG
3057 enum zone_type j;
3058 struct zonelist *zonelist;
1da177e4
LT
3059
3060 local_node = pgdat->node_id;
1da177e4 3061
54a6eb5c
MG
3062 zonelist = &pgdat->node_zonelists[0];
3063 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
1da177e4 3064
54a6eb5c
MG
3065 /*
3066 * Now we build the zonelist so that it contains the zones
3067 * of all the other nodes.
3068 * We don't want to pressure a particular node, so when
3069 * building the zones for node N, we make sure that the
3070 * zones coming right after the local ones are those from
3071 * node N+1 (modulo N)
3072 */
3073 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3074 if (!node_online(node))
3075 continue;
3076 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3077 MAX_NR_ZONES - 1);
1da177e4 3078 }
54a6eb5c
MG
3079 for (node = 0; node < local_node; node++) {
3080 if (!node_online(node))
3081 continue;
3082 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3083 MAX_NR_ZONES - 1);
3084 }
3085
dd1a239f
MG
3086 zonelist->_zonerefs[j].zone = NULL;
3087 zonelist->_zonerefs[j].zone_idx = 0;
1da177e4
LT
3088}
3089
9276b1bc 3090/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
f0c0b2b8 3091static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 3092{
54a6eb5c 3093 pgdat->node_zonelists[0].zlcache_ptr = NULL;
9276b1bc
PJ
3094}
3095
1da177e4
LT
3096#endif /* CONFIG_NUMA */
3097
99dcc3e5
CL
3098/*
3099 * Boot pageset table. One per cpu which is going to be used for all
3100 * zones and all nodes. The parameters will be set in such a way
3101 * that an item put on a list will immediately be handed over to
3102 * the buddy list. This is safe since pageset manipulation is done
3103 * with interrupts disabled.
3104 *
3105 * The boot_pagesets must be kept even after bootup is complete for
3106 * unused processors and/or zones. They do play a role for bootstrapping
3107 * hotplugged processors.
3108 *
3109 * zoneinfo_show() and maybe other functions do
3110 * not check if the processor is online before following the pageset pointer.
3111 * Other parts of the kernel may not check if the zone is available.
3112 */
3113static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3114static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
1f522509 3115static void setup_zone_pageset(struct zone *zone);
99dcc3e5 3116
4eaf3f64
HL
3117/*
3118 * Global mutex to protect against size modification of zonelists
3119 * as well as to serialize pageset setup for the new populated zone.
3120 */
3121DEFINE_MUTEX(zonelists_mutex);
3122
9b1a4d38 3123/* return values int ....just for stop_machine() */
1f522509 3124static __init_refok int __build_all_zonelists(void *data)
1da177e4 3125{
6811378e 3126 int nid;
99dcc3e5 3127 int cpu;
9276b1bc 3128
7f9cfb31
BL
3129#ifdef CONFIG_NUMA
3130 memset(node_load, 0, sizeof(node_load));
3131#endif
9276b1bc 3132 for_each_online_node(nid) {
7ea1530a
CL
3133 pg_data_t *pgdat = NODE_DATA(nid);
3134
3135 build_zonelists(pgdat);
3136 build_zonelist_cache(pgdat);
9276b1bc 3137 }
99dcc3e5
CL
3138
3139 /*
3140 * Initialize the boot_pagesets that are going to be used
3141 * for bootstrapping processors. The real pagesets for
3142 * each zone will be allocated later when the per cpu
3143 * allocator is available.
3144 *
3145 * boot_pagesets are used also for bootstrapping offline
3146 * cpus if the system is already booted because the pagesets
3147 * are needed to initialize allocators on a specific cpu too.
3148 * F.e. the percpu allocator needs the page allocator which
3149 * needs the percpu allocator in order to allocate its pagesets
3150 * (a chicken-egg dilemma).
3151 */
7aac7898 3152 for_each_possible_cpu(cpu) {
99dcc3e5
CL
3153 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3154
7aac7898
LS
3155#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3156 /*
3157 * We now know the "local memory node" for each node--
3158 * i.e., the node of the first zone in the generic zonelist.
3159 * Set up numa_mem percpu variable for on-line cpus. During
3160 * boot, only the boot cpu should be on-line; we'll init the
3161 * secondary cpus' numa_mem as they come on-line. During
3162 * node/memory hotplug, we'll fixup all on-line cpus.
3163 */
3164 if (cpu_online(cpu))
3165 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3166#endif
3167 }
3168
6811378e
YG
3169 return 0;
3170}
3171
4eaf3f64
HL
3172/*
3173 * Called with zonelists_mutex held always
3174 * unless system_state == SYSTEM_BOOTING.
3175 */
1f522509 3176void build_all_zonelists(void *data)
6811378e 3177{
f0c0b2b8
KH
3178 set_zonelist_order();
3179
6811378e 3180 if (system_state == SYSTEM_BOOTING) {
423b41d7 3181 __build_all_zonelists(NULL);
68ad8df4 3182 mminit_verify_zonelist();
6811378e
YG
3183 cpuset_init_current_mems_allowed();
3184 } else {
183ff22b 3185 /* we have to stop all cpus to guarantee there is no user
6811378e 3186 of zonelist */
e9959f0f
KH
3187#ifdef CONFIG_MEMORY_HOTPLUG
3188 if (data)
3189 setup_zone_pageset((struct zone *)data);
3190#endif
3191 stop_machine(__build_all_zonelists, NULL, NULL);
6811378e
YG
3192 /* cpuset refresh routine should be here */
3193 }
bd1e22b8 3194 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
3195 /*
3196 * Disable grouping by mobility if the number of pages in the
3197 * system is too low to allow the mechanism to work. It would be
3198 * more accurate, but expensive to check per-zone. This check is
3199 * made on memory-hotadd so a system can start with mobility
3200 * disabled and enable it later
3201 */
d9c23400 3202 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
3203 page_group_by_mobility_disabled = 1;
3204 else
3205 page_group_by_mobility_disabled = 0;
3206
3207 printk("Built %i zonelists in %s order, mobility grouping %s. "
3208 "Total pages: %ld\n",
62bc62a8 3209 nr_online_nodes,
f0c0b2b8 3210 zonelist_order_name[current_zonelist_order],
9ef9acb0 3211 page_group_by_mobility_disabled ? "off" : "on",
f0c0b2b8
KH
3212 vm_total_pages);
3213#ifdef CONFIG_NUMA
3214 printk("Policy zone: %s\n", zone_names[policy_zone]);
3215#endif
1da177e4
LT
3216}
3217
3218/*
3219 * Helper functions to size the waitqueue hash table.
3220 * Essentially these want to choose hash table sizes sufficiently
3221 * large so that collisions trying to wait on pages are rare.
3222 * But in fact, the number of active page waitqueues on typical
3223 * systems is ridiculously low, less than 200. So this is even
3224 * conservative, even though it seems large.
3225 *
3226 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3227 * waitqueues, i.e. the size of the waitq table given the number of pages.
3228 */
3229#define PAGES_PER_WAITQUEUE 256
3230
cca448fe 3231#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 3232static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
3233{
3234 unsigned long size = 1;
3235
3236 pages /= PAGES_PER_WAITQUEUE;
3237
3238 while (size < pages)
3239 size <<= 1;
3240
3241 /*
3242 * Once we have dozens or even hundreds of threads sleeping
3243 * on IO we've got bigger problems than wait queue collision.
3244 * Limit the size of the wait table to a reasonable size.
3245 */
3246 size = min(size, 4096UL);
3247
3248 return max(size, 4UL);
3249}
cca448fe
YG
3250#else
3251/*
3252 * A zone's size might be changed by hot-add, so it is not possible to determine
3253 * a suitable size for its wait_table. So we use the maximum size now.
3254 *
3255 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
3256 *
3257 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
3258 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3259 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
3260 *
3261 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3262 * or more by the traditional way. (See above). It equals:
3263 *
3264 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
3265 * ia64(16K page size) : = ( 8G + 4M)byte.
3266 * powerpc (64K page size) : = (32G +16M)byte.
3267 */
3268static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3269{
3270 return 4096UL;
3271}
3272#endif
1da177e4
LT
3273
3274/*
3275 * This is an integer logarithm so that shifts can be used later
3276 * to extract the more random high bits from the multiplicative
3277 * hash function before the remainder is taken.
3278 */
3279static inline unsigned long wait_table_bits(unsigned long size)
3280{
3281 return ffz(~size);
3282}
3283
3284#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3285
56fd56b8 3286/*
d9c23400 3287 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
41858966
MG
3288 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3289 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
56fd56b8
MG
3290 * higher will lead to a bigger reserve which will get freed as contiguous
3291 * blocks as reclaim kicks in
3292 */
3293static void setup_zone_migrate_reserve(struct zone *zone)
3294{
3295 unsigned long start_pfn, pfn, end_pfn;
3296 struct page *page;
78986a67
MG
3297 unsigned long block_migratetype;
3298 int reserve;
56fd56b8
MG
3299
3300 /* Get the start pfn, end pfn and the number of blocks to reserve */
3301 start_pfn = zone->zone_start_pfn;
3302 end_pfn = start_pfn + zone->spanned_pages;
41858966 3303 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
d9c23400 3304 pageblock_order;
56fd56b8 3305
78986a67
MG
3306 /*
3307 * Reserve blocks are generally in place to help high-order atomic
3308 * allocations that are short-lived. A min_free_kbytes value that
3309 * would result in more than 2 reserve blocks for atomic allocations
3310 * is assumed to be in place to help anti-fragmentation for the
3311 * future allocation of hugepages at runtime.
3312 */
3313 reserve = min(2, reserve);
3314
d9c23400 3315 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
56fd56b8
MG
3316 if (!pfn_valid(pfn))
3317 continue;
3318 page = pfn_to_page(pfn);
3319
344c790e
AL
3320 /* Watch out for overlapping nodes */
3321 if (page_to_nid(page) != zone_to_nid(zone))
3322 continue;
3323
56fd56b8
MG
3324 /* Blocks with reserved pages will never free, skip them. */
3325 if (PageReserved(page))
3326 continue;
3327
3328 block_migratetype = get_pageblock_migratetype(page);
3329
3330 /* If this block is reserved, account for it */
3331 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
3332 reserve--;
3333 continue;
3334 }
3335
3336 /* Suitable for reserving if this block is movable */
3337 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
3338 set_pageblock_migratetype(page, MIGRATE_RESERVE);
3339 move_freepages_block(zone, page, MIGRATE_RESERVE);
3340 reserve--;
3341 continue;
3342 }
3343
3344 /*
3345 * If the reserve is met and this is a previous reserved block,
3346 * take it back
3347 */
3348 if (block_migratetype == MIGRATE_RESERVE) {
3349 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3350 move_freepages_block(zone, page, MIGRATE_MOVABLE);
3351 }
3352 }
3353}
ac0e5b7a 3354
1da177e4
LT
3355/*
3356 * Initially all pages are reserved - free ones are freed
3357 * up by free_all_bootmem() once the early boot process is
3358 * done. Non-atomic initialization, single-pass.
3359 */
c09b4240 3360void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 3361 unsigned long start_pfn, enum memmap_context context)
1da177e4 3362{
1da177e4 3363 struct page *page;
29751f69
AW
3364 unsigned long end_pfn = start_pfn + size;
3365 unsigned long pfn;
86051ca5 3366 struct zone *z;
1da177e4 3367
22b31eec
HD
3368 if (highest_memmap_pfn < end_pfn - 1)
3369 highest_memmap_pfn = end_pfn - 1;
3370
86051ca5 3371 z = &NODE_DATA(nid)->node_zones[zone];
cbe8dd4a 3372 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02
DH
3373 /*
3374 * There can be holes in boot-time mem_map[]s
3375 * handed to this function. They do not
3376 * exist on hotplugged memory.
3377 */
3378 if (context == MEMMAP_EARLY) {
3379 if (!early_pfn_valid(pfn))
3380 continue;
3381 if (!early_pfn_in_nid(pfn, nid))
3382 continue;
3383 }
d41dee36
AW
3384 page = pfn_to_page(pfn);
3385 set_page_links(page, zone, nid, pfn);
708614e6 3386 mminit_verify_page_links(page, zone, nid, pfn);
7835e98b 3387 init_page_count(page);
1da177e4
LT
3388 reset_page_mapcount(page);
3389 SetPageReserved(page);
b2a0ac88
MG
3390 /*
3391 * Mark the block movable so that blocks are reserved for
3392 * movable at startup. This will force kernel allocations
3393 * to reserve their blocks rather than leaking throughout
3394 * the address space during boot when many long-lived
56fd56b8
MG
3395 * kernel allocations are made. Later some blocks near
3396 * the start are marked MIGRATE_RESERVE by
3397 * setup_zone_migrate_reserve()
86051ca5
KH
3398 *
3399 * bitmap is created for zone's valid pfn range. but memmap
3400 * can be created for invalid pages (for alignment)
3401 * check here not to call set_pageblock_migratetype() against
3402 * pfn out of zone.
b2a0ac88 3403 */
86051ca5
KH
3404 if ((z->zone_start_pfn <= pfn)
3405 && (pfn < z->zone_start_pfn + z->spanned_pages)
3406 && !(pfn & (pageblock_nr_pages - 1)))
56fd56b8 3407 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
b2a0ac88 3408
1da177e4
LT
3409 INIT_LIST_HEAD(&page->lru);
3410#ifdef WANT_PAGE_VIRTUAL
3411 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
3412 if (!is_highmem_idx(zone))
3212c6be 3413 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 3414#endif
1da177e4
LT
3415 }
3416}
3417
1e548deb 3418static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 3419{
b2a0ac88
MG
3420 int order, t;
3421 for_each_migratetype_order(order, t) {
3422 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
3423 zone->free_area[order].nr_free = 0;
3424 }
3425}
3426
3427#ifndef __HAVE_ARCH_MEMMAP_INIT
3428#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 3429 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
3430#endif
3431
1d6f4e60 3432static int zone_batchsize(struct zone *zone)
e7c8d5c9 3433{
3a6be87f 3434#ifdef CONFIG_MMU
e7c8d5c9
CL
3435 int batch;
3436
3437 /*
3438 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 3439 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
3440 *
3441 * OK, so we don't know how big the cache is. So guess.
3442 */
3443 batch = zone->present_pages / 1024;
ba56e91c
SR
3444 if (batch * PAGE_SIZE > 512 * 1024)
3445 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
3446 batch /= 4; /* We effectively *= 4 below */
3447 if (batch < 1)
3448 batch = 1;
3449
3450 /*
0ceaacc9
NP
3451 * Clamp the batch to a 2^n - 1 value. Having a power
3452 * of 2 value was found to be more likely to have
3453 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 3454 *
0ceaacc9
NP
3455 * For example if 2 tasks are alternately allocating
3456 * batches of pages, one task can end up with a lot
3457 * of pages of one half of the possible page colors
3458 * and the other with pages of the other colors.
e7c8d5c9 3459 */
9155203a 3460 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 3461
e7c8d5c9 3462 return batch;
3a6be87f
DH
3463
3464#else
3465 /* The deferral and batching of frees should be suppressed under NOMMU
3466 * conditions.
3467 *
3468 * The problem is that NOMMU needs to be able to allocate large chunks
3469 * of contiguous memory as there's no hardware page translation to
3470 * assemble apparent contiguous memory from discontiguous pages.
3471 *
3472 * Queueing large contiguous runs of pages for batching, however,
3473 * causes the pages to actually be freed in smaller chunks. As there
3474 * can be a significant delay between the individual batches being
3475 * recycled, this leads to the once large chunks of space being
3476 * fragmented and becoming unavailable for high-order allocations.
3477 */
3478 return 0;
3479#endif
e7c8d5c9
CL
3480}
3481
b69a7288 3482static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2caaad41
CL
3483{
3484 struct per_cpu_pages *pcp;
5f8dcc21 3485 int migratetype;
2caaad41 3486
1c6fe946
MD
3487 memset(p, 0, sizeof(*p));
3488
3dfa5721 3489 pcp = &p->pcp;
2caaad41 3490 pcp->count = 0;
2caaad41
CL
3491 pcp->high = 6 * batch;
3492 pcp->batch = max(1UL, 1 * batch);
5f8dcc21
MG
3493 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3494 INIT_LIST_HEAD(&pcp->lists[migratetype]);
2caaad41
CL
3495}
3496
8ad4b1fb
RS
3497/*
3498 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3499 * to the value high for the pageset p.
3500 */
3501
3502static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3503 unsigned long high)
3504{
3505 struct per_cpu_pages *pcp;
3506
3dfa5721 3507 pcp = &p->pcp;
8ad4b1fb
RS
3508 pcp->high = high;
3509 pcp->batch = max(1UL, high/4);
3510 if ((high/4) > (PAGE_SHIFT * 8))
3511 pcp->batch = PAGE_SHIFT * 8;
3512}
3513
319774e2
WF
3514static __meminit void setup_zone_pageset(struct zone *zone)
3515{
3516 int cpu;
3517
3518 zone->pageset = alloc_percpu(struct per_cpu_pageset);
3519
3520 for_each_possible_cpu(cpu) {
3521 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3522
3523 setup_pageset(pcp, zone_batchsize(zone));
3524
3525 if (percpu_pagelist_fraction)
3526 setup_pagelist_highmark(pcp,
3527 (zone->present_pages /
3528 percpu_pagelist_fraction));
3529 }
3530}
3531
2caaad41 3532/*
99dcc3e5
CL
3533 * Allocate per cpu pagesets and initialize them.
3534 * Before this call only boot pagesets were available.
e7c8d5c9 3535 */
99dcc3e5 3536void __init setup_per_cpu_pageset(void)
e7c8d5c9 3537{
99dcc3e5 3538 struct zone *zone;
e7c8d5c9 3539
319774e2
WF
3540 for_each_populated_zone(zone)
3541 setup_zone_pageset(zone);
e7c8d5c9
CL
3542}
3543
577a32f6 3544static noinline __init_refok
cca448fe 3545int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
3546{
3547 int i;
3548 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 3549 size_t alloc_size;
ed8ece2e
DH
3550
3551 /*
3552 * The per-page waitqueue mechanism uses hashed waitqueues
3553 * per zone.
3554 */
02b694de
YG
3555 zone->wait_table_hash_nr_entries =
3556 wait_table_hash_nr_entries(zone_size_pages);
3557 zone->wait_table_bits =
3558 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
3559 alloc_size = zone->wait_table_hash_nr_entries
3560 * sizeof(wait_queue_head_t);
3561
cd94b9db 3562 if (!slab_is_available()) {
cca448fe
YG
3563 zone->wait_table = (wait_queue_head_t *)
3564 alloc_bootmem_node(pgdat, alloc_size);
3565 } else {
3566 /*
3567 * This case means that a zone whose size was 0 gets new memory
3568 * via memory hot-add.
3569 * But it may be the case that a new node was hot-added. In
3570 * this case vmalloc() will not be able to use this new node's
3571 * memory - this wait_table must be initialized to use this new
3572 * node itself as well.
3573 * To use this new node's memory, further consideration will be
3574 * necessary.
3575 */
8691f3a7 3576 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
3577 }
3578 if (!zone->wait_table)
3579 return -ENOMEM;
ed8ece2e 3580
02b694de 3581 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 3582 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
3583
3584 return 0;
ed8ece2e
DH
3585}
3586
112067f0
SL
3587static int __zone_pcp_update(void *data)
3588{
3589 struct zone *zone = data;
3590 int cpu;
3591 unsigned long batch = zone_batchsize(zone), flags;
3592
2d30a1f6 3593 for_each_possible_cpu(cpu) {
112067f0
SL
3594 struct per_cpu_pageset *pset;
3595 struct per_cpu_pages *pcp;
3596
99dcc3e5 3597 pset = per_cpu_ptr(zone->pageset, cpu);
112067f0
SL
3598 pcp = &pset->pcp;
3599
3600 local_irq_save(flags);
5f8dcc21 3601 free_pcppages_bulk(zone, pcp->count, pcp);
112067f0
SL
3602 setup_pageset(pset, batch);
3603 local_irq_restore(flags);
3604 }
3605 return 0;
3606}
3607
3608void zone_pcp_update(struct zone *zone)
3609{
3610 stop_machine(__zone_pcp_update, zone, NULL);
3611}
3612
c09b4240 3613static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e 3614{
99dcc3e5
CL
3615 /*
3616 * per cpu subsystem is not up at this point. The following code
3617 * relies on the ability of the linker to provide the
3618 * offset of a (static) per cpu variable into the per cpu area.
3619 */
3620 zone->pageset = &boot_pageset;
ed8ece2e 3621
f5335c0f 3622 if (zone->present_pages)
99dcc3e5
CL
3623 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
3624 zone->name, zone->present_pages,
3625 zone_batchsize(zone));
ed8ece2e
DH
3626}
3627
718127cc
YG
3628__meminit int init_currently_empty_zone(struct zone *zone,
3629 unsigned long zone_start_pfn,
a2f3aa02
DH
3630 unsigned long size,
3631 enum memmap_context context)
ed8ece2e
DH
3632{
3633 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
3634 int ret;
3635 ret = zone_wait_table_init(zone, size);
3636 if (ret)
3637 return ret;
ed8ece2e
DH
3638 pgdat->nr_zones = zone_idx(zone) + 1;
3639
ed8ece2e
DH
3640 zone->zone_start_pfn = zone_start_pfn;
3641
708614e6
MG
3642 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3643 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3644 pgdat->node_id,
3645 (unsigned long)zone_idx(zone),
3646 zone_start_pfn, (zone_start_pfn + size));
3647
1e548deb 3648 zone_init_free_lists(zone);
718127cc
YG
3649
3650 return 0;
ed8ece2e
DH
3651}
3652
c713216d
MG
3653#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3654/*
3655 * Basic iterator support. Return the first range of PFNs for a node
3656 * Note: nid == MAX_NUMNODES returns first region regardless of node
3657 */
a3142c8e 3658static int __meminit first_active_region_index_in_nid(int nid)
c713216d
MG
3659{
3660 int i;
3661
3662 for (i = 0; i < nr_nodemap_entries; i++)
3663 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3664 return i;
3665
3666 return -1;
3667}
3668
3669/*
3670 * Basic iterator support. Return the next active range of PFNs for a node
183ff22b 3671 * Note: nid == MAX_NUMNODES returns next region regardless of node
c713216d 3672 */
a3142c8e 3673static int __meminit next_active_region_index_in_nid(int index, int nid)
c713216d
MG
3674{
3675 for (index = index + 1; index < nr_nodemap_entries; index++)
3676 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3677 return index;
3678
3679 return -1;
3680}
3681
3682#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3683/*
3684 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3685 * Architectures may implement their own version but if add_active_range()
3686 * was used and there are no special requirements, this is a convenient
3687 * alternative
3688 */
f2dbcfa7 3689int __meminit __early_pfn_to_nid(unsigned long pfn)
c713216d
MG
3690{
3691 int i;
3692
3693 for (i = 0; i < nr_nodemap_entries; i++) {
3694 unsigned long start_pfn = early_node_map[i].start_pfn;
3695 unsigned long end_pfn = early_node_map[i].end_pfn;
3696
3697 if (start_pfn <= pfn && pfn < end_pfn)
3698 return early_node_map[i].nid;
3699 }
cc2559bc
KH
3700 /* This is a memory hole */
3701 return -1;
c713216d
MG
3702}
3703#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3704
f2dbcfa7
KH
3705int __meminit early_pfn_to_nid(unsigned long pfn)
3706{
cc2559bc
KH
3707 int nid;
3708
3709 nid = __early_pfn_to_nid(pfn);
3710 if (nid >= 0)
3711 return nid;
3712 /* just returns 0 */
3713 return 0;
f2dbcfa7
KH
3714}
3715
cc2559bc
KH
3716#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3717bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3718{
3719 int nid;
3720
3721 nid = __early_pfn_to_nid(pfn);
3722 if (nid >= 0 && nid != node)
3723 return false;
3724 return true;
3725}
3726#endif
f2dbcfa7 3727
c713216d
MG
3728/* Basic iterator support to walk early_node_map[] */
3729#define for_each_active_range_index_in_nid(i, nid) \
3730 for (i = first_active_region_index_in_nid(nid); i != -1; \
3731 i = next_active_region_index_in_nid(i, nid))
3732
3733/**
3734 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
88ca3b94
RD
3735 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3736 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
c713216d
MG
3737 *
3738 * If an architecture guarantees that all ranges registered with
3739 * add_active_ranges() contain no holes and may be freed, this
3740 * this function may be used instead of calling free_bootmem() manually.
3741 */
3742void __init free_bootmem_with_active_regions(int nid,
3743 unsigned long max_low_pfn)
3744{
3745 int i;
3746
3747 for_each_active_range_index_in_nid(i, nid) {
3748 unsigned long size_pages = 0;
3749 unsigned long end_pfn = early_node_map[i].end_pfn;
3750
3751 if (early_node_map[i].start_pfn >= max_low_pfn)
3752 continue;
3753
3754 if (end_pfn > max_low_pfn)
3755 end_pfn = max_low_pfn;
3756
3757 size_pages = end_pfn - early_node_map[i].start_pfn;
3758 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3759 PFN_PHYS(early_node_map[i].start_pfn),
3760 size_pages << PAGE_SHIFT);
3761 }
3762}
3763
edbe7d23 3764#ifdef CONFIG_HAVE_MEMBLOCK
cc289894
YL
3765/*
3766 * Basic iterator support. Return the last range of PFNs for a node
3767 * Note: nid == MAX_NUMNODES returns last region regardless of node
3768 */
3769static int __meminit last_active_region_index_in_nid(int nid)
3770{
3771 int i;
3772
3773 for (i = nr_nodemap_entries - 1; i >= 0; i--)
3774 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3775 return i;
3776
3777 return -1;
3778}
3779
3780/*
3781 * Basic iterator support. Return the previous active range of PFNs for a node
3782 * Note: nid == MAX_NUMNODES returns next region regardless of node
3783 */
3784static int __meminit previous_active_region_index_in_nid(int index, int nid)
3785{
3786 for (index = index - 1; index >= 0; index--)
3787 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3788 return index;
3789
3790 return -1;
3791}
3792
3793#define for_each_active_range_index_in_nid_reverse(i, nid) \
3794 for (i = last_active_region_index_in_nid(nid); i != -1; \
3795 i = previous_active_region_index_in_nid(i, nid))
3796
edbe7d23
YL
3797u64 __init find_memory_core_early(int nid, u64 size, u64 align,
3798 u64 goal, u64 limit)
3799{
3800 int i;
3801
3802 /* Need to go over early_node_map to find out good range for node */
1a4a678b 3803 for_each_active_range_index_in_nid_reverse(i, nid) {
edbe7d23
YL
3804 u64 addr;
3805 u64 ei_start, ei_last;
3806 u64 final_start, final_end;
3807
3808 ei_last = early_node_map[i].end_pfn;
3809 ei_last <<= PAGE_SHIFT;
3810 ei_start = early_node_map[i].start_pfn;
3811 ei_start <<= PAGE_SHIFT;
3812
3813 final_start = max(ei_start, goal);
3814 final_end = min(ei_last, limit);
3815
3816 if (final_start >= final_end)
3817 continue;
3818
3819 addr = memblock_find_in_range(final_start, final_end, size, align);
3820
3821 if (addr == MEMBLOCK_ERROR)
3822 continue;
3823
3824 return addr;
3825 }
3826
3827 return MEMBLOCK_ERROR;
3828}
3829#endif
3830
08677214
YL
3831int __init add_from_early_node_map(struct range *range, int az,
3832 int nr_range, int nid)
3833{
3834 int i;
3835 u64 start, end;
3836
3837 /* need to go over early_node_map to find out good range for node */
3838 for_each_active_range_index_in_nid(i, nid) {
3839 start = early_node_map[i].start_pfn;
3840 end = early_node_map[i].end_pfn;
3841 nr_range = add_range(range, az, nr_range, start, end);
3842 }
3843 return nr_range;
3844}
3845
b5bc6c0e
YL
3846void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3847{
3848 int i;
d52d53b8 3849 int ret;
b5bc6c0e 3850
d52d53b8
YL
3851 for_each_active_range_index_in_nid(i, nid) {
3852 ret = work_fn(early_node_map[i].start_pfn,
3853 early_node_map[i].end_pfn, data);
3854 if (ret)
3855 break;
3856 }
b5bc6c0e 3857}
c713216d
MG
3858/**
3859 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 3860 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d
MG
3861 *
3862 * If an architecture guarantees that all ranges registered with
3863 * add_active_ranges() contain no holes and may be freed, this
88ca3b94 3864 * function may be used instead of calling memory_present() manually.
c713216d
MG
3865 */
3866void __init sparse_memory_present_with_active_regions(int nid)
3867{
3868 int i;
3869
3870 for_each_active_range_index_in_nid(i, nid)
3871 memory_present(early_node_map[i].nid,
3872 early_node_map[i].start_pfn,
3873 early_node_map[i].end_pfn);
3874}
3875
3876/**
3877 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
3878 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3879 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3880 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
3881 *
3882 * It returns the start and end page frame of a node based on information
3883 * provided by an arch calling add_active_range(). If called for a node
3884 * with no available memory, a warning is printed and the start and end
88ca3b94 3885 * PFNs will be 0.
c713216d 3886 */
a3142c8e 3887void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
3888 unsigned long *start_pfn, unsigned long *end_pfn)
3889{
3890 int i;
3891 *start_pfn = -1UL;
3892 *end_pfn = 0;
3893
3894 for_each_active_range_index_in_nid(i, nid) {
3895 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3896 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3897 }
3898
633c0666 3899 if (*start_pfn == -1UL)
c713216d 3900 *start_pfn = 0;
c713216d
MG
3901}
3902
2a1e274a
MG
3903/*
3904 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3905 * assumption is made that zones within a node are ordered in monotonic
3906 * increasing memory addresses so that the "highest" populated zone is used
3907 */
b69a7288 3908static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
3909{
3910 int zone_index;
3911 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3912 if (zone_index == ZONE_MOVABLE)
3913 continue;
3914
3915 if (arch_zone_highest_possible_pfn[zone_index] >
3916 arch_zone_lowest_possible_pfn[zone_index])
3917 break;
3918 }
3919
3920 VM_BUG_ON(zone_index == -1);
3921 movable_zone = zone_index;
3922}
3923
3924/*
3925 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3926 * because it is sized independant of architecture. Unlike the other zones,
3927 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3928 * in each node depending on the size of each node and how evenly kernelcore
3929 * is distributed. This helper function adjusts the zone ranges
3930 * provided by the architecture for a given node by using the end of the
3931 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3932 * zones within a node are in order of monotonic increases memory addresses
3933 */
b69a7288 3934static void __meminit adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
3935 unsigned long zone_type,
3936 unsigned long node_start_pfn,
3937 unsigned long node_end_pfn,
3938 unsigned long *zone_start_pfn,
3939 unsigned long *zone_end_pfn)
3940{
3941 /* Only adjust if ZONE_MOVABLE is on this node */
3942 if (zone_movable_pfn[nid]) {
3943 /* Size ZONE_MOVABLE */
3944 if (zone_type == ZONE_MOVABLE) {
3945 *zone_start_pfn = zone_movable_pfn[nid];
3946 *zone_end_pfn = min(node_end_pfn,
3947 arch_zone_highest_possible_pfn[movable_zone]);
3948
3949 /* Adjust for ZONE_MOVABLE starting within this range */
3950 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3951 *zone_end_pfn > zone_movable_pfn[nid]) {
3952 *zone_end_pfn = zone_movable_pfn[nid];
3953
3954 /* Check if this whole range is within ZONE_MOVABLE */
3955 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3956 *zone_start_pfn = *zone_end_pfn;
3957 }
3958}
3959
c713216d
MG
3960/*
3961 * Return the number of pages a zone spans in a node, including holes
3962 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3963 */
6ea6e688 3964static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3965 unsigned long zone_type,
3966 unsigned long *ignored)
3967{
3968 unsigned long node_start_pfn, node_end_pfn;
3969 unsigned long zone_start_pfn, zone_end_pfn;
3970
3971 /* Get the start and end of the node and zone */
3972 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3973 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3974 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
3975 adjust_zone_range_for_zone_movable(nid, zone_type,
3976 node_start_pfn, node_end_pfn,
3977 &zone_start_pfn, &zone_end_pfn);
c713216d
MG
3978
3979 /* Check that this node has pages within the zone's required range */
3980 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3981 return 0;
3982
3983 /* Move the zone boundaries inside the node if necessary */
3984 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3985 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3986
3987 /* Return the spanned pages */
3988 return zone_end_pfn - zone_start_pfn;
3989}
3990
3991/*
3992 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 3993 * then all holes in the requested range will be accounted for.
c713216d 3994 */
32996250 3995unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
3996 unsigned long range_start_pfn,
3997 unsigned long range_end_pfn)
3998{
3999 int i = 0;
4000 unsigned long prev_end_pfn = 0, hole_pages = 0;
4001 unsigned long start_pfn;
4002
4003 /* Find the end_pfn of the first active range of pfns in the node */
4004 i = first_active_region_index_in_nid(nid);
4005 if (i == -1)
4006 return 0;
4007
b5445f95
MG
4008 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
4009
9c7cd687
MG
4010 /* Account for ranges before physical memory on this node */
4011 if (early_node_map[i].start_pfn > range_start_pfn)
b5445f95 4012 hole_pages = prev_end_pfn - range_start_pfn;
c713216d
MG
4013
4014 /* Find all holes for the zone within the node */
4015 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
4016
4017 /* No need to continue if prev_end_pfn is outside the zone */
4018 if (prev_end_pfn >= range_end_pfn)
4019 break;
4020
4021 /* Make sure the end of the zone is not within the hole */
4022 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
4023 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
4024
4025 /* Update the hole size cound and move on */
4026 if (start_pfn > range_start_pfn) {
4027 BUG_ON(prev_end_pfn > start_pfn);
4028 hole_pages += start_pfn - prev_end_pfn;
4029 }
4030 prev_end_pfn = early_node_map[i].end_pfn;
4031 }
4032
9c7cd687
MG
4033 /* Account for ranges past physical memory on this node */
4034 if (range_end_pfn > prev_end_pfn)
0c6cb974 4035 hole_pages += range_end_pfn -
9c7cd687
MG
4036 max(range_start_pfn, prev_end_pfn);
4037
c713216d
MG
4038 return hole_pages;
4039}
4040
4041/**
4042 * absent_pages_in_range - Return number of page frames in holes within a range
4043 * @start_pfn: The start PFN to start searching for holes
4044 * @end_pfn: The end PFN to stop searching for holes
4045 *
88ca3b94 4046 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
4047 */
4048unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4049 unsigned long end_pfn)
4050{
4051 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4052}
4053
4054/* Return the number of page frames in holes in a zone on a node */
6ea6e688 4055static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
4056 unsigned long zone_type,
4057 unsigned long *ignored)
4058{
9c7cd687
MG
4059 unsigned long node_start_pfn, node_end_pfn;
4060 unsigned long zone_start_pfn, zone_end_pfn;
4061
4062 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4063 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
4064 node_start_pfn);
4065 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
4066 node_end_pfn);
4067
2a1e274a
MG
4068 adjust_zone_range_for_zone_movable(nid, zone_type,
4069 node_start_pfn, node_end_pfn,
4070 &zone_start_pfn, &zone_end_pfn);
9c7cd687 4071 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
c713216d 4072}
0e0b864e 4073
c713216d 4074#else
6ea6e688 4075static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
4076 unsigned long zone_type,
4077 unsigned long *zones_size)
4078{
4079 return zones_size[zone_type];
4080}
4081
6ea6e688 4082static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
4083 unsigned long zone_type,
4084 unsigned long *zholes_size)
4085{
4086 if (!zholes_size)
4087 return 0;
4088
4089 return zholes_size[zone_type];
4090}
0e0b864e 4091
c713216d
MG
4092#endif
4093
a3142c8e 4094static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
c713216d
MG
4095 unsigned long *zones_size, unsigned long *zholes_size)
4096{
4097 unsigned long realtotalpages, totalpages = 0;
4098 enum zone_type i;
4099
4100 for (i = 0; i < MAX_NR_ZONES; i++)
4101 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4102 zones_size);
4103 pgdat->node_spanned_pages = totalpages;
4104
4105 realtotalpages = totalpages;
4106 for (i = 0; i < MAX_NR_ZONES; i++)
4107 realtotalpages -=
4108 zone_absent_pages_in_node(pgdat->node_id, i,
4109 zholes_size);
4110 pgdat->node_present_pages = realtotalpages;
4111 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4112 realtotalpages);
4113}
4114
835c134e
MG
4115#ifndef CONFIG_SPARSEMEM
4116/*
4117 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
4118 * Start by making sure zonesize is a multiple of pageblock_order by rounding
4119 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
4120 * round what is now in bits to nearest long in bits, then return it in
4121 * bytes.
4122 */
4123static unsigned long __init usemap_size(unsigned long zonesize)
4124{
4125 unsigned long usemapsize;
4126
d9c23400
MG
4127 usemapsize = roundup(zonesize, pageblock_nr_pages);
4128 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
4129 usemapsize *= NR_PAGEBLOCK_BITS;
4130 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4131
4132 return usemapsize / 8;
4133}
4134
4135static void __init setup_usemap(struct pglist_data *pgdat,
4136 struct zone *zone, unsigned long zonesize)
4137{
4138 unsigned long usemapsize = usemap_size(zonesize);
4139 zone->pageblock_flags = NULL;
58a01a45 4140 if (usemapsize)
835c134e 4141 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
835c134e
MG
4142}
4143#else
fa9f90be 4144static inline void setup_usemap(struct pglist_data *pgdat,
835c134e
MG
4145 struct zone *zone, unsigned long zonesize) {}
4146#endif /* CONFIG_SPARSEMEM */
4147
d9c23400 4148#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c
MG
4149
4150/* Return a sensible default order for the pageblock size. */
4151static inline int pageblock_default_order(void)
4152{
4153 if (HPAGE_SHIFT > PAGE_SHIFT)
4154 return HUGETLB_PAGE_ORDER;
4155
4156 return MAX_ORDER-1;
4157}
4158
d9c23400
MG
4159/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4160static inline void __init set_pageblock_order(unsigned int order)
4161{
4162 /* Check that pageblock_nr_pages has not already been setup */
4163 if (pageblock_order)
4164 return;
4165
4166 /*
4167 * Assume the largest contiguous order of interest is a huge page.
4168 * This value may be variable depending on boot parameters on IA64
4169 */
4170 pageblock_order = order;
4171}
4172#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4173
ba72cb8c
MG
4174/*
4175 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4176 * and pageblock_default_order() are unused as pageblock_order is set
4177 * at compile-time. See include/linux/pageblock-flags.h for the values of
4178 * pageblock_order based on the kernel config
4179 */
4180static inline int pageblock_default_order(unsigned int order)
4181{
4182 return MAX_ORDER-1;
4183}
d9c23400
MG
4184#define set_pageblock_order(x) do {} while (0)
4185
4186#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4187
1da177e4
LT
4188/*
4189 * Set up the zone data structures:
4190 * - mark all pages reserved
4191 * - mark all memory queues empty
4192 * - clear the memory bitmaps
4193 */
b5a0e011 4194static void __paginginit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
4195 unsigned long *zones_size, unsigned long *zholes_size)
4196{
2f1b6248 4197 enum zone_type j;
ed8ece2e 4198 int nid = pgdat->node_id;
1da177e4 4199 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 4200 int ret;
1da177e4 4201
208d54e5 4202 pgdat_resize_init(pgdat);
1da177e4
LT
4203 pgdat->nr_zones = 0;
4204 init_waitqueue_head(&pgdat->kswapd_wait);
4205 pgdat->kswapd_max_order = 0;
52d4b9ac 4206 pgdat_page_cgroup_init(pgdat);
1da177e4
LT
4207
4208 for (j = 0; j < MAX_NR_ZONES; j++) {
4209 struct zone *zone = pgdat->node_zones + j;
0e0b864e 4210 unsigned long size, realsize, memmap_pages;
b69408e8 4211 enum lru_list l;
1da177e4 4212
c713216d
MG
4213 size = zone_spanned_pages_in_node(nid, j, zones_size);
4214 realsize = size - zone_absent_pages_in_node(nid, j,
4215 zholes_size);
1da177e4 4216
0e0b864e
MG
4217 /*
4218 * Adjust realsize so that it accounts for how much memory
4219 * is used by this zone for memmap. This affects the watermark
4220 * and per-cpu initialisations
4221 */
f7232154
JW
4222 memmap_pages =
4223 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
0e0b864e
MG
4224 if (realsize >= memmap_pages) {
4225 realsize -= memmap_pages;
5594c8c8
YL
4226 if (memmap_pages)
4227 printk(KERN_DEBUG
4228 " %s zone: %lu pages used for memmap\n",
4229 zone_names[j], memmap_pages);
0e0b864e
MG
4230 } else
4231 printk(KERN_WARNING
4232 " %s zone: %lu pages exceeds realsize %lu\n",
4233 zone_names[j], memmap_pages, realsize);
4234
6267276f
CL
4235 /* Account for reserved pages */
4236 if (j == 0 && realsize > dma_reserve) {
0e0b864e 4237 realsize -= dma_reserve;
d903ef9f 4238 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6267276f 4239 zone_names[0], dma_reserve);
0e0b864e
MG
4240 }
4241
98d2b0eb 4242 if (!is_highmem_idx(j))
1da177e4
LT
4243 nr_kernel_pages += realsize;
4244 nr_all_pages += realsize;
4245
4246 zone->spanned_pages = size;
4247 zone->present_pages = realsize;
9614634f 4248#ifdef CONFIG_NUMA
d5f541ed 4249 zone->node = nid;
8417bba4 4250 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
9614634f 4251 / 100;
0ff38490 4252 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
9614634f 4253#endif
1da177e4
LT
4254 zone->name = zone_names[j];
4255 spin_lock_init(&zone->lock);
4256 spin_lock_init(&zone->lru_lock);
bdc8cb98 4257 zone_seqlock_init(zone);
1da177e4 4258 zone->zone_pgdat = pgdat;
1da177e4 4259
ed8ece2e 4260 zone_pcp_init(zone);
b69408e8
CL
4261 for_each_lru(l) {
4262 INIT_LIST_HEAD(&zone->lru[l].list);
f8629631 4263 zone->reclaim_stat.nr_saved_scan[l] = 0;
b69408e8 4264 }
6e901571
KM
4265 zone->reclaim_stat.recent_rotated[0] = 0;
4266 zone->reclaim_stat.recent_rotated[1] = 0;
4267 zone->reclaim_stat.recent_scanned[0] = 0;
4268 zone->reclaim_stat.recent_scanned[1] = 0;
2244b95a 4269 zap_zone_vm_stats(zone);
e815af95 4270 zone->flags = 0;
1da177e4
LT
4271 if (!size)
4272 continue;
4273
ba72cb8c 4274 set_pageblock_order(pageblock_default_order());
835c134e 4275 setup_usemap(pgdat, zone, size);
a2f3aa02
DH
4276 ret = init_currently_empty_zone(zone, zone_start_pfn,
4277 size, MEMMAP_EARLY);
718127cc 4278 BUG_ON(ret);
76cdd58e 4279 memmap_init(size, nid, j, zone_start_pfn);
1da177e4 4280 zone_start_pfn += size;
1da177e4
LT
4281 }
4282}
4283
577a32f6 4284static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 4285{
1da177e4
LT
4286 /* Skip empty nodes */
4287 if (!pgdat->node_spanned_pages)
4288 return;
4289
d41dee36 4290#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
4291 /* ia64 gets its own node_mem_map, before this, without bootmem */
4292 if (!pgdat->node_mem_map) {
e984bb43 4293 unsigned long size, start, end;
d41dee36
AW
4294 struct page *map;
4295
e984bb43
BP
4296 /*
4297 * The zone's endpoints aren't required to be MAX_ORDER
4298 * aligned but the node_mem_map endpoints must be in order
4299 * for the buddy allocator to function correctly.
4300 */
4301 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4302 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4303 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4304 size = (end - start) * sizeof(struct page);
6f167ec7
DH
4305 map = alloc_remap(pgdat->node_id, size);
4306 if (!map)
4307 map = alloc_bootmem_node(pgdat, size);
e984bb43 4308 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 4309 }
12d810c1 4310#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
4311 /*
4312 * With no DISCONTIG, the global mem_map is just set as node 0's
4313 */
c713216d 4314 if (pgdat == NODE_DATA(0)) {
1da177e4 4315 mem_map = NODE_DATA(0)->node_mem_map;
c713216d
MG
4316#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4317 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
467bc461 4318 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
c713216d
MG
4319#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4320 }
1da177e4 4321#endif
d41dee36 4322#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
4323}
4324
9109fb7b
JW
4325void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4326 unsigned long node_start_pfn, unsigned long *zholes_size)
1da177e4 4327{
9109fb7b
JW
4328 pg_data_t *pgdat = NODE_DATA(nid);
4329
1da177e4
LT
4330 pgdat->node_id = nid;
4331 pgdat->node_start_pfn = node_start_pfn;
c713216d 4332 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
4333
4334 alloc_node_mem_map(pgdat);
e8c27ac9
YL
4335#ifdef CONFIG_FLAT_NODE_MEM_MAP
4336 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4337 nid, (unsigned long)pgdat,
4338 (unsigned long)pgdat->node_mem_map);
4339#endif
1da177e4
LT
4340
4341 free_area_init_core(pgdat, zones_size, zholes_size);
4342}
4343
c713216d 4344#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
418508c1
MS
4345
4346#if MAX_NUMNODES > 1
4347/*
4348 * Figure out the number of possible node ids.
4349 */
4350static void __init setup_nr_node_ids(void)
4351{
4352 unsigned int node;
4353 unsigned int highest = 0;
4354
4355 for_each_node_mask(node, node_possible_map)
4356 highest = node;
4357 nr_node_ids = highest + 1;
4358}
4359#else
4360static inline void setup_nr_node_ids(void)
4361{
4362}
4363#endif
4364
c713216d
MG
4365/**
4366 * add_active_range - Register a range of PFNs backed by physical memory
4367 * @nid: The node ID the range resides on
4368 * @start_pfn: The start PFN of the available physical memory
4369 * @end_pfn: The end PFN of the available physical memory
4370 *
4371 * These ranges are stored in an early_node_map[] and later used by
4372 * free_area_init_nodes() to calculate zone sizes and holes. If the
4373 * range spans a memory hole, it is up to the architecture to ensure
4374 * the memory is not freed by the bootmem allocator. If possible
4375 * the range being registered will be merged with existing ranges.
4376 */
4377void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4378 unsigned long end_pfn)
4379{
4380 int i;
4381
6b74ab97
MG
4382 mminit_dprintk(MMINIT_TRACE, "memory_register",
4383 "Entering add_active_range(%d, %#lx, %#lx) "
4384 "%d entries of %d used\n",
4385 nid, start_pfn, end_pfn,
4386 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
c713216d 4387
2dbb51c4
MG
4388 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
4389
c713216d
MG
4390 /* Merge with existing active regions if possible */
4391 for (i = 0; i < nr_nodemap_entries; i++) {
4392 if (early_node_map[i].nid != nid)
4393 continue;
4394
4395 /* Skip if an existing region covers this new one */
4396 if (start_pfn >= early_node_map[i].start_pfn &&
4397 end_pfn <= early_node_map[i].end_pfn)
4398 return;
4399
4400 /* Merge forward if suitable */
4401 if (start_pfn <= early_node_map[i].end_pfn &&
4402 end_pfn > early_node_map[i].end_pfn) {
4403 early_node_map[i].end_pfn = end_pfn;
4404 return;
4405 }
4406
4407 /* Merge backward if suitable */
d2dbe08d 4408 if (start_pfn < early_node_map[i].start_pfn &&
c713216d
MG
4409 end_pfn >= early_node_map[i].start_pfn) {
4410 early_node_map[i].start_pfn = start_pfn;
4411 return;
4412 }
4413 }
4414
4415 /* Check that early_node_map is large enough */
4416 if (i >= MAX_ACTIVE_REGIONS) {
4417 printk(KERN_CRIT "More than %d memory regions, truncating\n",
4418 MAX_ACTIVE_REGIONS);
4419 return;
4420 }
4421
4422 early_node_map[i].nid = nid;
4423 early_node_map[i].start_pfn = start_pfn;
4424 early_node_map[i].end_pfn = end_pfn;
4425 nr_nodemap_entries = i + 1;
4426}
4427
4428/**
cc1050ba 4429 * remove_active_range - Shrink an existing registered range of PFNs
c713216d 4430 * @nid: The node id the range is on that should be shrunk
cc1050ba
YL
4431 * @start_pfn: The new PFN of the range
4432 * @end_pfn: The new PFN of the range
c713216d
MG
4433 *
4434 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
cc1a9d86
YL
4435 * The map is kept near the end physical page range that has already been
4436 * registered. This function allows an arch to shrink an existing registered
4437 * range.
c713216d 4438 */
cc1050ba
YL
4439void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4440 unsigned long end_pfn)
c713216d 4441{
cc1a9d86
YL
4442 int i, j;
4443 int removed = 0;
c713216d 4444
cc1050ba
YL
4445 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4446 nid, start_pfn, end_pfn);
4447
c713216d 4448 /* Find the old active region end and shrink */
cc1a9d86 4449 for_each_active_range_index_in_nid(i, nid) {
cc1050ba
YL
4450 if (early_node_map[i].start_pfn >= start_pfn &&
4451 early_node_map[i].end_pfn <= end_pfn) {
cc1a9d86 4452 /* clear it */
cc1050ba 4453 early_node_map[i].start_pfn = 0;
cc1a9d86
YL
4454 early_node_map[i].end_pfn = 0;
4455 removed = 1;
4456 continue;
4457 }
cc1050ba
YL
4458 if (early_node_map[i].start_pfn < start_pfn &&
4459 early_node_map[i].end_pfn > start_pfn) {
4460 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4461 early_node_map[i].end_pfn = start_pfn;
4462 if (temp_end_pfn > end_pfn)
4463 add_active_range(nid, end_pfn, temp_end_pfn);
4464 continue;
4465 }
4466 if (early_node_map[i].start_pfn >= start_pfn &&
4467 early_node_map[i].end_pfn > end_pfn &&
4468 early_node_map[i].start_pfn < end_pfn) {
4469 early_node_map[i].start_pfn = end_pfn;
cc1a9d86 4470 continue;
c713216d 4471 }
cc1a9d86
YL
4472 }
4473
4474 if (!removed)
4475 return;
4476
4477 /* remove the blank ones */
4478 for (i = nr_nodemap_entries - 1; i > 0; i--) {
4479 if (early_node_map[i].nid != nid)
4480 continue;
4481 if (early_node_map[i].end_pfn)
4482 continue;
4483 /* we found it, get rid of it */
4484 for (j = i; j < nr_nodemap_entries - 1; j++)
4485 memcpy(&early_node_map[j], &early_node_map[j+1],
4486 sizeof(early_node_map[j]));
4487 j = nr_nodemap_entries - 1;
4488 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4489 nr_nodemap_entries--;
4490 }
c713216d
MG
4491}
4492
4493/**
4494 * remove_all_active_ranges - Remove all currently registered regions
88ca3b94 4495 *
c713216d
MG
4496 * During discovery, it may be found that a table like SRAT is invalid
4497 * and an alternative discovery method must be used. This function removes
4498 * all currently registered regions.
4499 */
88ca3b94 4500void __init remove_all_active_ranges(void)
c713216d
MG
4501{
4502 memset(early_node_map, 0, sizeof(early_node_map));
4503 nr_nodemap_entries = 0;
4504}
4505
4506/* Compare two active node_active_regions */
4507static int __init cmp_node_active_region(const void *a, const void *b)
4508{
4509 struct node_active_region *arange = (struct node_active_region *)a;
4510 struct node_active_region *brange = (struct node_active_region *)b;
4511
4512 /* Done this way to avoid overflows */
4513 if (arange->start_pfn > brange->start_pfn)
4514 return 1;
4515 if (arange->start_pfn < brange->start_pfn)
4516 return -1;
4517
4518 return 0;
4519}
4520
4521/* sort the node_map by start_pfn */
32996250 4522void __init sort_node_map(void)
c713216d
MG
4523{
4524 sort(early_node_map, (size_t)nr_nodemap_entries,
4525 sizeof(struct node_active_region),
4526 cmp_node_active_region, NULL);
4527}
4528
a6af2bc3 4529/* Find the lowest pfn for a node */
b69a7288 4530static unsigned long __init find_min_pfn_for_node(int nid)
c713216d
MG
4531{
4532 int i;
a6af2bc3 4533 unsigned long min_pfn = ULONG_MAX;
1abbfb41 4534
c713216d
MG
4535 /* Assuming a sorted map, the first range found has the starting pfn */
4536 for_each_active_range_index_in_nid(i, nid)
a6af2bc3 4537 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
c713216d 4538
a6af2bc3
MG
4539 if (min_pfn == ULONG_MAX) {
4540 printk(KERN_WARNING
2bc0d261 4541 "Could not find start_pfn for node %d\n", nid);
a6af2bc3
MG
4542 return 0;
4543 }
4544
4545 return min_pfn;
c713216d
MG
4546}
4547
4548/**
4549 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4550 *
4551 * It returns the minimum PFN based on information provided via
88ca3b94 4552 * add_active_range().
c713216d
MG
4553 */
4554unsigned long __init find_min_pfn_with_active_regions(void)
4555{
4556 return find_min_pfn_for_node(MAX_NUMNODES);
4557}
4558
37b07e41
LS
4559/*
4560 * early_calculate_totalpages()
4561 * Sum pages in active regions for movable zone.
4562 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4563 */
484f51f8 4564static unsigned long __init early_calculate_totalpages(void)
7e63efef
MG
4565{
4566 int i;
4567 unsigned long totalpages = 0;
4568
37b07e41
LS
4569 for (i = 0; i < nr_nodemap_entries; i++) {
4570 unsigned long pages = early_node_map[i].end_pfn -
7e63efef 4571 early_node_map[i].start_pfn;
37b07e41
LS
4572 totalpages += pages;
4573 if (pages)
4574 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4575 }
4576 return totalpages;
7e63efef
MG
4577}
4578
2a1e274a
MG
4579/*
4580 * Find the PFN the Movable zone begins in each node. Kernel memory
4581 * is spread evenly between nodes as long as the nodes have enough
4582 * memory. When they don't, some nodes will have more kernelcore than
4583 * others
4584 */
b69a7288 4585static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
2a1e274a
MG
4586{
4587 int i, nid;
4588 unsigned long usable_startpfn;
4589 unsigned long kernelcore_node, kernelcore_remaining;
66918dcd
YL
4590 /* save the state before borrow the nodemask */
4591 nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
37b07e41
LS
4592 unsigned long totalpages = early_calculate_totalpages();
4593 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
2a1e274a 4594
7e63efef
MG
4595 /*
4596 * If movablecore was specified, calculate what size of
4597 * kernelcore that corresponds so that memory usable for
4598 * any allocation type is evenly spread. If both kernelcore
4599 * and movablecore are specified, then the value of kernelcore
4600 * will be used for required_kernelcore if it's greater than
4601 * what movablecore would have allowed.
4602 */
4603 if (required_movablecore) {
7e63efef
MG
4604 unsigned long corepages;
4605
4606 /*
4607 * Round-up so that ZONE_MOVABLE is at least as large as what
4608 * was requested by the user
4609 */
4610 required_movablecore =
4611 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4612 corepages = totalpages - required_movablecore;
4613
4614 required_kernelcore = max(required_kernelcore, corepages);
4615 }
4616
2a1e274a
MG
4617 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
4618 if (!required_kernelcore)
66918dcd 4619 goto out;
2a1e274a
MG
4620
4621 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4622 find_usable_zone_for_movable();
4623 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4624
4625restart:
4626 /* Spread kernelcore memory as evenly as possible throughout nodes */
4627 kernelcore_node = required_kernelcore / usable_nodes;
37b07e41 4628 for_each_node_state(nid, N_HIGH_MEMORY) {
2a1e274a
MG
4629 /*
4630 * Recalculate kernelcore_node if the division per node
4631 * now exceeds what is necessary to satisfy the requested
4632 * amount of memory for the kernel
4633 */
4634 if (required_kernelcore < kernelcore_node)
4635 kernelcore_node = required_kernelcore / usable_nodes;
4636
4637 /*
4638 * As the map is walked, we track how much memory is usable
4639 * by the kernel using kernelcore_remaining. When it is
4640 * 0, the rest of the node is usable by ZONE_MOVABLE
4641 */
4642 kernelcore_remaining = kernelcore_node;
4643
4644 /* Go through each range of PFNs within this node */
4645 for_each_active_range_index_in_nid(i, nid) {
4646 unsigned long start_pfn, end_pfn;
4647 unsigned long size_pages;
4648
4649 start_pfn = max(early_node_map[i].start_pfn,
4650 zone_movable_pfn[nid]);
4651 end_pfn = early_node_map[i].end_pfn;
4652 if (start_pfn >= end_pfn)
4653 continue;
4654
4655 /* Account for what is only usable for kernelcore */
4656 if (start_pfn < usable_startpfn) {
4657 unsigned long kernel_pages;
4658 kernel_pages = min(end_pfn, usable_startpfn)
4659 - start_pfn;
4660
4661 kernelcore_remaining -= min(kernel_pages,
4662 kernelcore_remaining);
4663 required_kernelcore -= min(kernel_pages,
4664 required_kernelcore);
4665
4666 /* Continue if range is now fully accounted */
4667 if (end_pfn <= usable_startpfn) {
4668
4669 /*
4670 * Push zone_movable_pfn to the end so
4671 * that if we have to rebalance
4672 * kernelcore across nodes, we will
4673 * not double account here
4674 */
4675 zone_movable_pfn[nid] = end_pfn;
4676 continue;
4677 }
4678 start_pfn = usable_startpfn;
4679 }
4680
4681 /*
4682 * The usable PFN range for ZONE_MOVABLE is from
4683 * start_pfn->end_pfn. Calculate size_pages as the
4684 * number of pages used as kernelcore
4685 */
4686 size_pages = end_pfn - start_pfn;
4687 if (size_pages > kernelcore_remaining)
4688 size_pages = kernelcore_remaining;
4689 zone_movable_pfn[nid] = start_pfn + size_pages;
4690
4691 /*
4692 * Some kernelcore has been met, update counts and
4693 * break if the kernelcore for this node has been
4694 * satisified
4695 */
4696 required_kernelcore -= min(required_kernelcore,
4697 size_pages);
4698 kernelcore_remaining -= size_pages;
4699 if (!kernelcore_remaining)
4700 break;
4701 }
4702 }
4703
4704 /*
4705 * If there is still required_kernelcore, we do another pass with one
4706 * less node in the count. This will push zone_movable_pfn[nid] further
4707 * along on the nodes that still have memory until kernelcore is
4708 * satisified
4709 */
4710 usable_nodes--;
4711 if (usable_nodes && required_kernelcore > usable_nodes)
4712 goto restart;
4713
4714 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4715 for (nid = 0; nid < MAX_NUMNODES; nid++)
4716 zone_movable_pfn[nid] =
4717 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
66918dcd
YL
4718
4719out:
4720 /* restore the node_state */
4721 node_states[N_HIGH_MEMORY] = saved_node_state;
2a1e274a
MG
4722}
4723
37b07e41
LS
4724/* Any regular memory on that node ? */
4725static void check_for_regular_memory(pg_data_t *pgdat)
4726{
4727#ifdef CONFIG_HIGHMEM
4728 enum zone_type zone_type;
4729
4730 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4731 struct zone *zone = &pgdat->node_zones[zone_type];
4732 if (zone->present_pages)
4733 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4734 }
4735#endif
4736}
4737
c713216d
MG
4738/**
4739 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 4740 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
4741 *
4742 * This will call free_area_init_node() for each active node in the system.
4743 * Using the page ranges provided by add_active_range(), the size of each
4744 * zone in each node and their holes is calculated. If the maximum PFN
4745 * between two adjacent zones match, it is assumed that the zone is empty.
4746 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4747 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4748 * starts where the previous one ended. For example, ZONE_DMA32 starts
4749 * at arch_max_dma_pfn.
4750 */
4751void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4752{
4753 unsigned long nid;
db99100d 4754 int i;
c713216d 4755
a6af2bc3
MG
4756 /* Sort early_node_map as initialisation assumes it is sorted */
4757 sort_node_map();
4758
c713216d
MG
4759 /* Record where the zone boundaries are */
4760 memset(arch_zone_lowest_possible_pfn, 0,
4761 sizeof(arch_zone_lowest_possible_pfn));
4762 memset(arch_zone_highest_possible_pfn, 0,
4763 sizeof(arch_zone_highest_possible_pfn));
4764 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4765 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4766 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
4767 if (i == ZONE_MOVABLE)
4768 continue;
c713216d
MG
4769 arch_zone_lowest_possible_pfn[i] =
4770 arch_zone_highest_possible_pfn[i-1];
4771 arch_zone_highest_possible_pfn[i] =
4772 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4773 }
2a1e274a
MG
4774 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4775 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4776
4777 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4778 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4779 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
c713216d 4780
c713216d
MG
4781 /* Print out the zone ranges */
4782 printk("Zone PFN ranges:\n");
2a1e274a
MG
4783 for (i = 0; i < MAX_NR_ZONES; i++) {
4784 if (i == ZONE_MOVABLE)
4785 continue;
72f0ba02
DR
4786 printk(" %-8s ", zone_names[i]);
4787 if (arch_zone_lowest_possible_pfn[i] ==
4788 arch_zone_highest_possible_pfn[i])
4789 printk("empty\n");
4790 else
4791 printk("%0#10lx -> %0#10lx\n",
c713216d
MG
4792 arch_zone_lowest_possible_pfn[i],
4793 arch_zone_highest_possible_pfn[i]);
2a1e274a
MG
4794 }
4795
4796 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4797 printk("Movable zone start PFN for each node\n");
4798 for (i = 0; i < MAX_NUMNODES; i++) {
4799 if (zone_movable_pfn[i])
4800 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
4801 }
c713216d
MG
4802
4803 /* Print out the early_node_map[] */
4804 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4805 for (i = 0; i < nr_nodemap_entries; i++)
5dab8ec1 4806 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
c713216d
MG
4807 early_node_map[i].start_pfn,
4808 early_node_map[i].end_pfn);
4809
4810 /* Initialise every node */
708614e6 4811 mminit_verify_pageflags_layout();
8ef82866 4812 setup_nr_node_ids();
c713216d
MG
4813 for_each_online_node(nid) {
4814 pg_data_t *pgdat = NODE_DATA(nid);
9109fb7b 4815 free_area_init_node(nid, NULL,
c713216d 4816 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
4817
4818 /* Any memory on that node */
4819 if (pgdat->node_present_pages)
4820 node_set_state(nid, N_HIGH_MEMORY);
4821 check_for_regular_memory(pgdat);
c713216d
MG
4822 }
4823}
2a1e274a 4824
7e63efef 4825static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
4826{
4827 unsigned long long coremem;
4828 if (!p)
4829 return -EINVAL;
4830
4831 coremem = memparse(p, &p);
7e63efef 4832 *core = coremem >> PAGE_SHIFT;
2a1e274a 4833
7e63efef 4834 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
4835 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4836
4837 return 0;
4838}
ed7ed365 4839
7e63efef
MG
4840/*
4841 * kernelcore=size sets the amount of memory for use for allocations that
4842 * cannot be reclaimed or migrated.
4843 */
4844static int __init cmdline_parse_kernelcore(char *p)
4845{
4846 return cmdline_parse_core(p, &required_kernelcore);
4847}
4848
4849/*
4850 * movablecore=size sets the amount of memory for use for allocations that
4851 * can be reclaimed or migrated.
4852 */
4853static int __init cmdline_parse_movablecore(char *p)
4854{
4855 return cmdline_parse_core(p, &required_movablecore);
4856}
4857
ed7ed365 4858early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 4859early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 4860
c713216d
MG
4861#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4862
0e0b864e 4863/**
88ca3b94
RD
4864 * set_dma_reserve - set the specified number of pages reserved in the first zone
4865 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e
MG
4866 *
4867 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4868 * In the DMA zone, a significant percentage may be consumed by kernel image
4869 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
4870 * function may optionally be used to account for unfreeable pages in the
4871 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4872 * smaller per-cpu batchsize.
0e0b864e
MG
4873 */
4874void __init set_dma_reserve(unsigned long new_dma_reserve)
4875{
4876 dma_reserve = new_dma_reserve;
4877}
4878
1da177e4
LT
4879void __init free_area_init(unsigned long *zones_size)
4880{
9109fb7b 4881 free_area_init_node(0, zones_size,
1da177e4
LT
4882 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4883}
1da177e4 4884
1da177e4
LT
4885static int page_alloc_cpu_notify(struct notifier_block *self,
4886 unsigned long action, void *hcpu)
4887{
4888 int cpu = (unsigned long)hcpu;
1da177e4 4889
8bb78442 4890 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
9f8f2172
CL
4891 drain_pages(cpu);
4892
4893 /*
4894 * Spill the event counters of the dead processor
4895 * into the current processors event counters.
4896 * This artificially elevates the count of the current
4897 * processor.
4898 */
f8891e5e 4899 vm_events_fold_cpu(cpu);
9f8f2172
CL
4900
4901 /*
4902 * Zero the differential counters of the dead processor
4903 * so that the vm statistics are consistent.
4904 *
4905 * This is only okay since the processor is dead and cannot
4906 * race with what we are doing.
4907 */
2244b95a 4908 refresh_cpu_vm_stats(cpu);
1da177e4
LT
4909 }
4910 return NOTIFY_OK;
4911}
1da177e4
LT
4912
4913void __init page_alloc_init(void)
4914{
4915 hotcpu_notifier(page_alloc_cpu_notify, 0);
4916}
4917
cb45b0e9
HA
4918/*
4919 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4920 * or min_free_kbytes changes.
4921 */
4922static void calculate_totalreserve_pages(void)
4923{
4924 struct pglist_data *pgdat;
4925 unsigned long reserve_pages = 0;
2f6726e5 4926 enum zone_type i, j;
cb45b0e9
HA
4927
4928 for_each_online_pgdat(pgdat) {
4929 for (i = 0; i < MAX_NR_ZONES; i++) {
4930 struct zone *zone = pgdat->node_zones + i;
4931 unsigned long max = 0;
4932
4933 /* Find valid and maximum lowmem_reserve in the zone */
4934 for (j = i; j < MAX_NR_ZONES; j++) {
4935 if (zone->lowmem_reserve[j] > max)
4936 max = zone->lowmem_reserve[j];
4937 }
4938
41858966
MG
4939 /* we treat the high watermark as reserved pages. */
4940 max += high_wmark_pages(zone);
cb45b0e9
HA
4941
4942 if (max > zone->present_pages)
4943 max = zone->present_pages;
4944 reserve_pages += max;
4945 }
4946 }
4947 totalreserve_pages = reserve_pages;
4948}
4949
1da177e4
LT
4950/*
4951 * setup_per_zone_lowmem_reserve - called whenever
4952 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
4953 * has a correct pages reserved value, so an adequate number of
4954 * pages are left in the zone after a successful __alloc_pages().
4955 */
4956static void setup_per_zone_lowmem_reserve(void)
4957{
4958 struct pglist_data *pgdat;
2f6726e5 4959 enum zone_type j, idx;
1da177e4 4960
ec936fc5 4961 for_each_online_pgdat(pgdat) {
1da177e4
LT
4962 for (j = 0; j < MAX_NR_ZONES; j++) {
4963 struct zone *zone = pgdat->node_zones + j;
4964 unsigned long present_pages = zone->present_pages;
4965
4966 zone->lowmem_reserve[j] = 0;
4967
2f6726e5
CL
4968 idx = j;
4969 while (idx) {
1da177e4
LT
4970 struct zone *lower_zone;
4971
2f6726e5
CL
4972 idx--;
4973
1da177e4
LT
4974 if (sysctl_lowmem_reserve_ratio[idx] < 1)
4975 sysctl_lowmem_reserve_ratio[idx] = 1;
4976
4977 lower_zone = pgdat->node_zones + idx;
4978 lower_zone->lowmem_reserve[j] = present_pages /
4979 sysctl_lowmem_reserve_ratio[idx];
4980 present_pages += lower_zone->present_pages;
4981 }
4982 }
4983 }
cb45b0e9
HA
4984
4985 /* update totalreserve_pages */
4986 calculate_totalreserve_pages();
1da177e4
LT
4987}
4988
88ca3b94 4989/**
bc75d33f 4990 * setup_per_zone_wmarks - called when min_free_kbytes changes
bce7394a 4991 * or when memory is hot-{added|removed}
88ca3b94 4992 *
bc75d33f
MK
4993 * Ensures that the watermark[min,low,high] values for each zone are set
4994 * correctly with respect to min_free_kbytes.
1da177e4 4995 */
bc75d33f 4996void setup_per_zone_wmarks(void)
1da177e4
LT
4997{
4998 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4999 unsigned long lowmem_pages = 0;
5000 struct zone *zone;
5001 unsigned long flags;
5002
5003 /* Calculate total number of !ZONE_HIGHMEM pages */
5004 for_each_zone(zone) {
5005 if (!is_highmem(zone))
5006 lowmem_pages += zone->present_pages;
5007 }
5008
5009 for_each_zone(zone) {
ac924c60
AM
5010 u64 tmp;
5011
1125b4e3 5012 spin_lock_irqsave(&zone->lock, flags);
ac924c60
AM
5013 tmp = (u64)pages_min * zone->present_pages;
5014 do_div(tmp, lowmem_pages);
1da177e4
LT
5015 if (is_highmem(zone)) {
5016 /*
669ed175
NP
5017 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5018 * need highmem pages, so cap pages_min to a small
5019 * value here.
5020 *
41858966 5021 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
669ed175
NP
5022 * deltas controls asynch page reclaim, and so should
5023 * not be capped for highmem.
1da177e4
LT
5024 */
5025 int min_pages;
5026
5027 min_pages = zone->present_pages / 1024;
5028 if (min_pages < SWAP_CLUSTER_MAX)
5029 min_pages = SWAP_CLUSTER_MAX;
5030 if (min_pages > 128)
5031 min_pages = 128;
41858966 5032 zone->watermark[WMARK_MIN] = min_pages;
1da177e4 5033 } else {
669ed175
NP
5034 /*
5035 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
5036 * proportionate to the zone's size.
5037 */
41858966 5038 zone->watermark[WMARK_MIN] = tmp;
1da177e4
LT
5039 }
5040
41858966
MG
5041 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
5042 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
56fd56b8 5043 setup_zone_migrate_reserve(zone);
1125b4e3 5044 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 5045 }
cb45b0e9
HA
5046
5047 /* update totalreserve_pages */
5048 calculate_totalreserve_pages();
1da177e4
LT
5049}
5050
55a4462a 5051/*
556adecb
RR
5052 * The inactive anon list should be small enough that the VM never has to
5053 * do too much work, but large enough that each inactive page has a chance
5054 * to be referenced again before it is swapped out.
5055 *
5056 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5057 * INACTIVE_ANON pages on this zone's LRU, maintained by the
5058 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5059 * the anonymous pages are kept on the inactive list.
5060 *
5061 * total target max
5062 * memory ratio inactive anon
5063 * -------------------------------------
5064 * 10MB 1 5MB
5065 * 100MB 1 50MB
5066 * 1GB 3 250MB
5067 * 10GB 10 0.9GB
5068 * 100GB 31 3GB
5069 * 1TB 101 10GB
5070 * 10TB 320 32GB
5071 */
96cb4df5 5072void calculate_zone_inactive_ratio(struct zone *zone)
556adecb 5073{
96cb4df5 5074 unsigned int gb, ratio;
556adecb 5075
96cb4df5
MK
5076 /* Zone size in gigabytes */
5077 gb = zone->present_pages >> (30 - PAGE_SHIFT);
5078 if (gb)
556adecb 5079 ratio = int_sqrt(10 * gb);
96cb4df5
MK
5080 else
5081 ratio = 1;
556adecb 5082
96cb4df5
MK
5083 zone->inactive_ratio = ratio;
5084}
556adecb 5085
96cb4df5
MK
5086static void __init setup_per_zone_inactive_ratio(void)
5087{
5088 struct zone *zone;
5089
5090 for_each_zone(zone)
5091 calculate_zone_inactive_ratio(zone);
556adecb
RR
5092}
5093
1da177e4
LT
5094/*
5095 * Initialise min_free_kbytes.
5096 *
5097 * For small machines we want it small (128k min). For large machines
5098 * we want it large (64MB max). But it is not linear, because network
5099 * bandwidth does not increase linearly with machine size. We use
5100 *
5101 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5102 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
5103 *
5104 * which yields
5105 *
5106 * 16MB: 512k
5107 * 32MB: 724k
5108 * 64MB: 1024k
5109 * 128MB: 1448k
5110 * 256MB: 2048k
5111 * 512MB: 2896k
5112 * 1024MB: 4096k
5113 * 2048MB: 5792k
5114 * 4096MB: 8192k
5115 * 8192MB: 11584k
5116 * 16384MB: 16384k
5117 */
bc75d33f 5118static int __init init_per_zone_wmark_min(void)
1da177e4
LT
5119{
5120 unsigned long lowmem_kbytes;
5121
5122 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5123
5124 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5125 if (min_free_kbytes < 128)
5126 min_free_kbytes = 128;
5127 if (min_free_kbytes > 65536)
5128 min_free_kbytes = 65536;
bc75d33f 5129 setup_per_zone_wmarks();
1da177e4 5130 setup_per_zone_lowmem_reserve();
556adecb 5131 setup_per_zone_inactive_ratio();
1da177e4
LT
5132 return 0;
5133}
bc75d33f 5134module_init(init_per_zone_wmark_min)
1da177e4
LT
5135
5136/*
5137 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5138 * that we can call two helper functions whenever min_free_kbytes
5139 * changes.
5140 */
5141int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
8d65af78 5142 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 5143{
8d65af78 5144 proc_dointvec(table, write, buffer, length, ppos);
3b1d92c5 5145 if (write)
bc75d33f 5146 setup_per_zone_wmarks();
1da177e4
LT
5147 return 0;
5148}
5149
9614634f
CL
5150#ifdef CONFIG_NUMA
5151int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5152 void __user *buffer, size_t *length, loff_t *ppos)
9614634f
CL
5153{
5154 struct zone *zone;
5155 int rc;
5156
8d65af78 5157 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
9614634f
CL
5158 if (rc)
5159 return rc;
5160
5161 for_each_zone(zone)
8417bba4 5162 zone->min_unmapped_pages = (zone->present_pages *
9614634f
CL
5163 sysctl_min_unmapped_ratio) / 100;
5164 return 0;
5165}
0ff38490
CL
5166
5167int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5168 void __user *buffer, size_t *length, loff_t *ppos)
0ff38490
CL
5169{
5170 struct zone *zone;
5171 int rc;
5172
8d65af78 5173 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
5174 if (rc)
5175 return rc;
5176
5177 for_each_zone(zone)
5178 zone->min_slab_pages = (zone->present_pages *
5179 sysctl_min_slab_ratio) / 100;
5180 return 0;
5181}
9614634f
CL
5182#endif
5183
1da177e4
LT
5184/*
5185 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5186 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5187 * whenever sysctl_lowmem_reserve_ratio changes.
5188 *
5189 * The reserve ratio obviously has absolutely no relation with the
41858966 5190 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
5191 * if in function of the boot time zone sizes.
5192 */
5193int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5194 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 5195{
8d65af78 5196 proc_dointvec_minmax(table, write, buffer, length, ppos);
1da177e4
LT
5197 setup_per_zone_lowmem_reserve();
5198 return 0;
5199}
5200
8ad4b1fb
RS
5201/*
5202 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5203 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
5204 * can have before it gets flushed back to buddy allocator.
5205 */
5206
5207int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
8d65af78 5208 void __user *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
5209{
5210 struct zone *zone;
5211 unsigned int cpu;
5212 int ret;
5213
8d65af78 5214 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8ad4b1fb
RS
5215 if (!write || (ret == -EINVAL))
5216 return ret;
364df0eb 5217 for_each_populated_zone(zone) {
99dcc3e5 5218 for_each_possible_cpu(cpu) {
8ad4b1fb
RS
5219 unsigned long high;
5220 high = zone->present_pages / percpu_pagelist_fraction;
99dcc3e5
CL
5221 setup_pagelist_highmark(
5222 per_cpu_ptr(zone->pageset, cpu), high);
8ad4b1fb
RS
5223 }
5224 }
5225 return 0;
5226}
5227
f034b5d4 5228int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
5229
5230#ifdef CONFIG_NUMA
5231static int __init set_hashdist(char *str)
5232{
5233 if (!str)
5234 return 0;
5235 hashdist = simple_strtoul(str, &str, 0);
5236 return 1;
5237}
5238__setup("hashdist=", set_hashdist);
5239#endif
5240
5241/*
5242 * allocate a large system hash table from bootmem
5243 * - it is assumed that the hash table must contain an exact power-of-2
5244 * quantity of entries
5245 * - limit is the number of hash buckets, not the total allocation size
5246 */
5247void *__init alloc_large_system_hash(const char *tablename,
5248 unsigned long bucketsize,
5249 unsigned long numentries,
5250 int scale,
5251 int flags,
5252 unsigned int *_hash_shift,
5253 unsigned int *_hash_mask,
5254 unsigned long limit)
5255{
5256 unsigned long long max = limit;
5257 unsigned long log2qty, size;
5258 void *table = NULL;
5259
5260 /* allow the kernel cmdline to have a say */
5261 if (!numentries) {
5262 /* round applicable memory size up to nearest megabyte */
04903664 5263 numentries = nr_kernel_pages;
1da177e4
LT
5264 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5265 numentries >>= 20 - PAGE_SHIFT;
5266 numentries <<= 20 - PAGE_SHIFT;
5267
5268 /* limit to 1 bucket per 2^scale bytes of low memory */
5269 if (scale > PAGE_SHIFT)
5270 numentries >>= (scale - PAGE_SHIFT);
5271 else
5272 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
5273
5274 /* Make sure we've got at least a 0-order allocation.. */
2c85f51d
JB
5275 if (unlikely(flags & HASH_SMALL)) {
5276 /* Makes no sense without HASH_EARLY */
5277 WARN_ON(!(flags & HASH_EARLY));
5278 if (!(numentries >> *_hash_shift)) {
5279 numentries = 1UL << *_hash_shift;
5280 BUG_ON(!numentries);
5281 }
5282 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9ab37b8f 5283 numentries = PAGE_SIZE / bucketsize;
1da177e4 5284 }
6e692ed3 5285 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
5286
5287 /* limit allocation size to 1/16 total memory by default */
5288 if (max == 0) {
5289 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5290 do_div(max, bucketsize);
5291 }
5292
5293 if (numentries > max)
5294 numentries = max;
5295
f0d1b0b3 5296 log2qty = ilog2(numentries);
1da177e4
LT
5297
5298 do {
5299 size = bucketsize << log2qty;
5300 if (flags & HASH_EARLY)
74768ed8 5301 table = alloc_bootmem_nopanic(size);
1da177e4
LT
5302 else if (hashdist)
5303 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5304 else {
1037b83b
ED
5305 /*
5306 * If bucketsize is not a power-of-two, we may free
a1dd268c
MG
5307 * some pages at the end of hash table which
5308 * alloc_pages_exact() automatically does
1037b83b 5309 */
264ef8a9 5310 if (get_order(size) < MAX_ORDER) {
a1dd268c 5311 table = alloc_pages_exact(size, GFP_ATOMIC);
264ef8a9
CM
5312 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5313 }
1da177e4
LT
5314 }
5315 } while (!table && size > PAGE_SIZE && --log2qty);
5316
5317 if (!table)
5318 panic("Failed to allocate %s hash table\n", tablename);
5319
f241e660 5320 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
1da177e4 5321 tablename,
f241e660 5322 (1UL << log2qty),
f0d1b0b3 5323 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
5324 size);
5325
5326 if (_hash_shift)
5327 *_hash_shift = log2qty;
5328 if (_hash_mask)
5329 *_hash_mask = (1 << log2qty) - 1;
5330
5331 return table;
5332}
a117e66e 5333
835c134e
MG
5334/* Return a pointer to the bitmap storing bits affecting a block of pages */
5335static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5336 unsigned long pfn)
5337{
5338#ifdef CONFIG_SPARSEMEM
5339 return __pfn_to_section(pfn)->pageblock_flags;
5340#else
5341 return zone->pageblock_flags;
5342#endif /* CONFIG_SPARSEMEM */
5343}
5344
5345static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5346{
5347#ifdef CONFIG_SPARSEMEM
5348 pfn &= (PAGES_PER_SECTION-1);
d9c23400 5349 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
5350#else
5351 pfn = pfn - zone->zone_start_pfn;
d9c23400 5352 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
5353#endif /* CONFIG_SPARSEMEM */
5354}
5355
5356/**
d9c23400 5357 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
835c134e
MG
5358 * @page: The page within the block of interest
5359 * @start_bitidx: The first bit of interest to retrieve
5360 * @end_bitidx: The last bit of interest
5361 * returns pageblock_bits flags
5362 */
5363unsigned long get_pageblock_flags_group(struct page *page,
5364 int start_bitidx, int end_bitidx)
5365{
5366 struct zone *zone;
5367 unsigned long *bitmap;
5368 unsigned long pfn, bitidx;
5369 unsigned long flags = 0;
5370 unsigned long value = 1;
5371
5372 zone = page_zone(page);
5373 pfn = page_to_pfn(page);
5374 bitmap = get_pageblock_bitmap(zone, pfn);
5375 bitidx = pfn_to_bitidx(zone, pfn);
5376
5377 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5378 if (test_bit(bitidx + start_bitidx, bitmap))
5379 flags |= value;
6220ec78 5380
835c134e
MG
5381 return flags;
5382}
5383
5384/**
d9c23400 5385 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
835c134e
MG
5386 * @page: The page within the block of interest
5387 * @start_bitidx: The first bit of interest
5388 * @end_bitidx: The last bit of interest
5389 * @flags: The flags to set
5390 */
5391void set_pageblock_flags_group(struct page *page, unsigned long flags,
5392 int start_bitidx, int end_bitidx)
5393{
5394 struct zone *zone;
5395 unsigned long *bitmap;
5396 unsigned long pfn, bitidx;
5397 unsigned long value = 1;
5398
5399 zone = page_zone(page);
5400 pfn = page_to_pfn(page);
5401 bitmap = get_pageblock_bitmap(zone, pfn);
5402 bitidx = pfn_to_bitidx(zone, pfn);
86051ca5
KH
5403 VM_BUG_ON(pfn < zone->zone_start_pfn);
5404 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
835c134e
MG
5405
5406 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5407 if (flags & value)
5408 __set_bit(bitidx + start_bitidx, bitmap);
5409 else
5410 __clear_bit(bitidx + start_bitidx, bitmap);
5411}
a5d76b54
KH
5412
5413/*
5414 * This is designed as sub function...plz see page_isolation.c also.
5415 * set/clear page block's type to be ISOLATE.
5416 * page allocater never alloc memory from ISOLATE block.
5417 */
5418
49ac8255
KH
5419static int
5420__count_immobile_pages(struct zone *zone, struct page *page, int count)
5421{
5422 unsigned long pfn, iter, found;
5423 /*
5424 * For avoiding noise data, lru_add_drain_all() should be called
5425 * If ZONE_MOVABLE, the zone never contains immobile pages
5426 */
5427 if (zone_idx(zone) == ZONE_MOVABLE)
5428 return true;
5429
5430 if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
5431 return true;
5432
5433 pfn = page_to_pfn(page);
5434 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5435 unsigned long check = pfn + iter;
5436
29723fcc 5437 if (!pfn_valid_within(check))
49ac8255 5438 continue;
29723fcc 5439
49ac8255
KH
5440 page = pfn_to_page(check);
5441 if (!page_count(page)) {
5442 if (PageBuddy(page))
5443 iter += (1 << page_order(page)) - 1;
5444 continue;
5445 }
5446 if (!PageLRU(page))
5447 found++;
5448 /*
5449 * If there are RECLAIMABLE pages, we need to check it.
5450 * But now, memory offline itself doesn't call shrink_slab()
5451 * and it still to be fixed.
5452 */
5453 /*
5454 * If the page is not RAM, page_count()should be 0.
5455 * we don't need more check. This is an _used_ not-movable page.
5456 *
5457 * The problematic thing here is PG_reserved pages. PG_reserved
5458 * is set to both of a memory hole page and a _used_ kernel
5459 * page at boot.
5460 */
5461 if (found > count)
5462 return false;
5463 }
5464 return true;
5465}
5466
5467bool is_pageblock_removable_nolock(struct page *page)
5468{
5469 struct zone *zone = page_zone(page);
5470 return __count_immobile_pages(zone, page, 0);
5471}
5472
a5d76b54
KH
5473int set_migratetype_isolate(struct page *page)
5474{
5475 struct zone *zone;
49ac8255 5476 unsigned long flags, pfn;
925cc71e
RJ
5477 struct memory_isolate_notify arg;
5478 int notifier_ret;
a5d76b54 5479 int ret = -EBUSY;
8e7e40d9 5480 int zone_idx;
a5d76b54
KH
5481
5482 zone = page_zone(page);
8e7e40d9 5483 zone_idx = zone_idx(zone);
925cc71e 5484
a5d76b54 5485 spin_lock_irqsave(&zone->lock, flags);
925cc71e
RJ
5486
5487 pfn = page_to_pfn(page);
5488 arg.start_pfn = pfn;
5489 arg.nr_pages = pageblock_nr_pages;
5490 arg.pages_found = 0;
5491
a5d76b54 5492 /*
925cc71e
RJ
5493 * It may be possible to isolate a pageblock even if the
5494 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5495 * notifier chain is used by balloon drivers to return the
5496 * number of pages in a range that are held by the balloon
5497 * driver to shrink memory. If all the pages are accounted for
5498 * by balloons, are free, or on the LRU, isolation can continue.
5499 * Later, for example, when memory hotplug notifier runs, these
5500 * pages reported as "can be isolated" should be isolated(freed)
5501 * by the balloon driver through the memory notifier chain.
a5d76b54 5502 */
925cc71e
RJ
5503 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5504 notifier_ret = notifier_to_errno(notifier_ret);
4b20477f 5505 if (notifier_ret)
a5d76b54 5506 goto out;
49ac8255
KH
5507 /*
5508 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
5509 * We just check MOVABLE pages.
5510 */
5511 if (__count_immobile_pages(zone, page, arg.pages_found))
925cc71e
RJ
5512 ret = 0;
5513
49ac8255
KH
5514 /*
5515 * immobile means "not-on-lru" paes. If immobile is larger than
5516 * removable-by-driver pages reported by notifier, we'll fail.
5517 */
5518
a5d76b54 5519out:
925cc71e
RJ
5520 if (!ret) {
5521 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5522 move_freepages_block(zone, page, MIGRATE_ISOLATE);
5523 }
5524
a5d76b54
KH
5525 spin_unlock_irqrestore(&zone->lock, flags);
5526 if (!ret)
9f8f2172 5527 drain_all_pages();
a5d76b54
KH
5528 return ret;
5529}
5530
5531void unset_migratetype_isolate(struct page *page)
5532{
5533 struct zone *zone;
5534 unsigned long flags;
5535 zone = page_zone(page);
5536 spin_lock_irqsave(&zone->lock, flags);
5537 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5538 goto out;
5539 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5540 move_freepages_block(zone, page, MIGRATE_MOVABLE);
5541out:
5542 spin_unlock_irqrestore(&zone->lock, flags);
5543}
0c0e6195
KH
5544
5545#ifdef CONFIG_MEMORY_HOTREMOVE
5546/*
5547 * All pages in the range must be isolated before calling this.
5548 */
5549void
5550__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5551{
5552 struct page *page;
5553 struct zone *zone;
5554 int order, i;
5555 unsigned long pfn;
5556 unsigned long flags;
5557 /* find the first valid pfn */
5558 for (pfn = start_pfn; pfn < end_pfn; pfn++)
5559 if (pfn_valid(pfn))
5560 break;
5561 if (pfn == end_pfn)
5562 return;
5563 zone = page_zone(pfn_to_page(pfn));
5564 spin_lock_irqsave(&zone->lock, flags);
5565 pfn = start_pfn;
5566 while (pfn < end_pfn) {
5567 if (!pfn_valid(pfn)) {
5568 pfn++;
5569 continue;
5570 }
5571 page = pfn_to_page(pfn);
5572 BUG_ON(page_count(page));
5573 BUG_ON(!PageBuddy(page));
5574 order = page_order(page);
5575#ifdef CONFIG_DEBUG_VM
5576 printk(KERN_INFO "remove from free list %lx %d %lx\n",
5577 pfn, 1 << order, end_pfn);
5578#endif
5579 list_del(&page->lru);
5580 rmv_page_order(page);
5581 zone->free_area[order].nr_free--;
5582 __mod_zone_page_state(zone, NR_FREE_PAGES,
5583 - (1UL << order));
5584 for (i = 0; i < (1 << order); i++)
5585 SetPageReserved((page+i));
5586 pfn += (1 << order);
5587 }
5588 spin_unlock_irqrestore(&zone->lock, flags);
5589}
5590#endif
8d22ba1b
WF
5591
5592#ifdef CONFIG_MEMORY_FAILURE
5593bool is_free_buddy_page(struct page *page)
5594{
5595 struct zone *zone = page_zone(page);
5596 unsigned long pfn = page_to_pfn(page);
5597 unsigned long flags;
5598 int order;
5599
5600 spin_lock_irqsave(&zone->lock, flags);
5601 for (order = 0; order < MAX_ORDER; order++) {
5602 struct page *page_head = page - (pfn & ((1 << order) - 1));
5603
5604 if (PageBuddy(page_head) && page_order(page_head) >= order)
5605 break;
5606 }
5607 spin_unlock_irqrestore(&zone->lock, flags);
5608
5609 return order < MAX_ORDER;
5610}
5611#endif
718a3821
WF
5612
5613static struct trace_print_flags pageflag_names[] = {
5614 {1UL << PG_locked, "locked" },
5615 {1UL << PG_error, "error" },
5616 {1UL << PG_referenced, "referenced" },
5617 {1UL << PG_uptodate, "uptodate" },
5618 {1UL << PG_dirty, "dirty" },
5619 {1UL << PG_lru, "lru" },
5620 {1UL << PG_active, "active" },
5621 {1UL << PG_slab, "slab" },
5622 {1UL << PG_owner_priv_1, "owner_priv_1" },
5623 {1UL << PG_arch_1, "arch_1" },
5624 {1UL << PG_reserved, "reserved" },
5625 {1UL << PG_private, "private" },
5626 {1UL << PG_private_2, "private_2" },
5627 {1UL << PG_writeback, "writeback" },
5628#ifdef CONFIG_PAGEFLAGS_EXTENDED
5629 {1UL << PG_head, "head" },
5630 {1UL << PG_tail, "tail" },
5631#else
5632 {1UL << PG_compound, "compound" },
5633#endif
5634 {1UL << PG_swapcache, "swapcache" },
5635 {1UL << PG_mappedtodisk, "mappedtodisk" },
5636 {1UL << PG_reclaim, "reclaim" },
718a3821
WF
5637 {1UL << PG_swapbacked, "swapbacked" },
5638 {1UL << PG_unevictable, "unevictable" },
5639#ifdef CONFIG_MMU
5640 {1UL << PG_mlocked, "mlocked" },
5641#endif
5642#ifdef CONFIG_ARCH_USES_PG_UNCACHED
5643 {1UL << PG_uncached, "uncached" },
5644#endif
5645#ifdef CONFIG_MEMORY_FAILURE
5646 {1UL << PG_hwpoison, "hwpoison" },
5647#endif
5648 {-1UL, NULL },
5649};
5650
5651static void dump_page_flags(unsigned long flags)
5652{
5653 const char *delim = "";
5654 unsigned long mask;
5655 int i;
5656
5657 printk(KERN_ALERT "page flags: %#lx(", flags);
5658
5659 /* remove zone id */
5660 flags &= (1UL << NR_PAGEFLAGS) - 1;
5661
5662 for (i = 0; pageflag_names[i].name && flags; i++) {
5663
5664 mask = pageflag_names[i].mask;
5665 if ((flags & mask) != mask)
5666 continue;
5667
5668 flags &= ~mask;
5669 printk("%s%s", delim, pageflag_names[i].name);
5670 delim = "|";
5671 }
5672
5673 /* check for left over flags */
5674 if (flags)
5675 printk("%s%#lx", delim, flags);
5676
5677 printk(")\n");
5678}
5679
5680void dump_page(struct page *page)
5681{
5682 printk(KERN_ALERT
5683 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
4e9f64c4 5684 page, atomic_read(&page->_count), page_mapcount(page),
718a3821
WF
5685 page->mapping, page->index);
5686 dump_page_flags(page->flags);
5687}