1 From b8ab9f82025adea77864115da73e70026fa4f540 Mon Sep 17 00:00:00 2001
2 From: Yinghai Lu <yinghai@kernel.org>
3 Date: Tue, 20 Jul 2010 13:24:31 -0700
4 Subject: x86,nobootmem: make alloc_bootmem_node fall back to other node when 32bit numa is used
6 From: Yinghai Lu <yinghai@kernel.org>
8 commit b8ab9f82025adea77864115da73e70026fa4f540 upstream.
10 Borislav Petkov reported his 32bit numa system has problem:
12 [ 0.000000] Reserving total of 4c00 pages for numa KVA remap
13 [ 0.000000] kva_start_pfn ~ 32800 max_low_pfn ~ 375fe
14 [ 0.000000] max_pfn = 238000
15 [ 0.000000] 8202MB HIGHMEM available.
16 [ 0.000000] 885MB LOWMEM available.
17 [ 0.000000] mapped low ram: 0 - 375fe000
18 [ 0.000000] low ram: 0 - 375fe000
19 [ 0.000000] alloc (nid=8 100000 - 7ee00000) (1000000 - ffffffff) 1000 1000 => 34e7000
20 [ 0.000000] alloc (nid=8 100000 - 7ee00000) (1000000 - ffffffff) 200 40 => 34c9d80
21 [ 0.000000] alloc (nid=0 100000 - 7ee00000) (1000000 - ffffffffffffffff) 180 40 => 34e6140
22 [ 0.000000] alloc (nid=1 80000000 - c7e60000) (1000000 - ffffffffffffffff) 240 40 => 80000000
23 [ 0.000000] BUG: unable to handle kernel paging request at 40000000
24 [ 0.000000] IP: [<c2c8cff1>] __alloc_memory_core_early+0x147/0x1d6
25 [ 0.000000] *pdpt = 0000000000000000 *pde = f000ff53f000ff00
27 [ 0.000000] Call Trace:
28 [ 0.000000] [<c2c8b4f8>] ? __alloc_bootmem_node+0x216/0x22f
29 [ 0.000000] [<c2c90c9b>] ? sparse_early_usemaps_alloc_node+0x5a/0x10b
30 [ 0.000000] [<c2c9149e>] ? sparse_init+0x1dc/0x499
31 [ 0.000000] [<c2c79118>] ? paging_init+0x168/0x1df
32 [ 0.000000] [<c2c780ff>] ? native_pagetable_setup_start+0xef/0x1bb
34 looks like it allocates too much high address for bootmem.
36 Try to cut limit with get_max_mapped()
38 Reported-by: Borislav Petkov <borislav.petkov@amd.com>
39 Tested-by: Conny Seidel <conny.seidel@amd.com>
40 Signed-off-by: Yinghai Lu <yinghai@kernel.org>
41 Cc: Ingo Molnar <mingo@elte.hu>
42 Cc: "H. Peter Anvin" <hpa@zytor.com>
43 Cc: Thomas Gleixner <tglx@linutronix.de>
44 Cc: Johannes Weiner <hannes@cmpxchg.org>
45 Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
46 Cc: Mel Gorman <mel@csn.ul.ie>
47 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
48 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
49 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
52 mm/bootmem.c | 24 ++++++++++++++++++++----
53 mm/page_alloc.c | 3 +++
54 2 files changed, 23 insertions(+), 4 deletions(-)
58 @@ -833,15 +833,24 @@ static void * __init ___alloc_bootmem_no
59 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
60 unsigned long align, unsigned long goal)
64 if (WARN_ON_ONCE(slab_is_available()))
65 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
67 #ifdef CONFIG_NO_BOOTMEM
68 - return __alloc_memory_core_early(pgdat->node_id, size, align,
69 + ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
74 + ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
77 - return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
78 + ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
84 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
85 @@ -977,14 +986,21 @@ void * __init __alloc_bootmem_low(unsign
86 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
87 unsigned long align, unsigned long goal)
91 if (WARN_ON_ONCE(slab_is_available()))
92 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
94 #ifdef CONFIG_NO_BOOTMEM
95 - return __alloc_memory_core_early(pgdat->node_id, size, align,
96 + ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
97 + goal, ARCH_LOW_ADDRESS_LIMIT);
100 + ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
101 goal, ARCH_LOW_ADDRESS_LIMIT);
103 - return ___alloc_bootmem_node(pgdat->bdata, size, align,
104 + ptr = ___alloc_bootmem_node(pgdat->bdata, size, align,
105 goal, ARCH_LOW_ADDRESS_LIMIT);
109 --- a/mm/page_alloc.c
110 +++ b/mm/page_alloc.c
111 @@ -3415,6 +3415,9 @@ void * __init __alloc_memory_core_early(
115 + if (limit > get_max_mapped())
116 + limit = get_max_mapped();
118 /* need to go over early_node_map to find out good range for node */
119 for_each_active_range_index_in_nid(i, nid) {