1 Subject: fix booting with memoryless nodes
2 From: haveblue@us.ibm.com
3 References: 443280 - LTC49675
5 I've reproduced this on 2.6.27.7. I'm pretty sure it is caused by this
8 http://git.kernel.org/gitweb.cgi?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=8f64e1f2d1e09267ac926e15090fd505c1c0cbcb
10 The problem is that Jon took a loop which was (in psuedocode):
13 NODE_DATA(nid) = careful_alloc(nid);
15 reserve_node_bootmem(nid);
20 NODE_DATA(nid) = careful_alloc(nid);
23 reserve_node_bootmem(nid);
25 The issue comes in when the 'careful_alloc()' is called on a node with
26 no memory. It falls back to using bootmem from a previously-initialized
27 node. But, bootmem has not yet been reserved when Jon's patch is
28 applied. It gives back bogus memory (0xc000000000000000) and pukes
31 The following patch collapses the loop back together. It also breaks
32 the mark_reserved_regions_for_nid() code out into a function and adds
33 some comments. I think a huge part of introducing this bug is because
34 for loop was too long and hard to read.
36 The actual bug fix here is the:
38 + if (end_pfn <= node->node_start_pfn ||
39 + start_pfn >= node_end_pfn)
42 Signed-off-by: Olaf Hering <olh@suse.de>
45 arch/powerpc/mm/numa.c | 130 ++++++++++++++++++++++++++++++-------------------
46 1 file changed, 82 insertions(+), 48 deletions(-)
48 --- a/arch/powerpc/mm/numa.c
49 +++ b/arch/powerpc/mm/numa.c
51 #include <linux/notifier.h>
52 #include <linux/lmb.h>
54 +#include <linux/pfn.h>
55 #include <asm/sparsemem.h>
57 #include <asm/system.h>
58 @@ -867,10 +868,75 @@ static struct notifier_block __cpuinitda
59 .priority = 1 /* Must run before sched domains notifier. */
62 +static void mark_reserved_regions_for_nid(int nid)
64 + struct pglist_data *node = NODE_DATA(nid);
67 + dbg("mark_reserved_regions_for_nid(%d) NODE_DATA: %p\n", nid, node);
68 + for (i = 0; i < lmb.reserved.cnt; i++) {
69 + unsigned long physbase = lmb.reserved.region[i].base;
70 + unsigned long size = lmb.reserved.region[i].size;
71 + unsigned long start_pfn = physbase >> PAGE_SHIFT;
72 + unsigned long end_pfn = PFN_UP(physbase + size);
73 + struct node_active_region node_ar;
74 + unsigned long node_end_pfn = node->node_start_pfn +
75 + node->node_spanned_pages;
78 + * Check to make sure that this lmb.reserved area is
79 + * within the bounds of the node that we care about.
80 + * Checking the nid of the start and end points is not
81 + * sufficient because the reserved area could span the
84 + if (end_pfn <= node->node_start_pfn ||
85 + start_pfn >= node_end_pfn)
88 + get_node_active_region(start_pfn, &node_ar);
89 + while (start_pfn < end_pfn &&
90 + node_ar.start_pfn < node_ar.end_pfn) {
91 + unsigned long reserve_size = size;
93 + * if reserved region extends past active region
94 + * then trim size to active region
96 + if (end_pfn > node_ar.end_pfn)
97 + reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
100 + * Only worry about *this* node, others may not
101 + * yet have valid NODE_DATA().
103 + if (node_ar.nid == nid)
104 + reserve_bootmem_node(NODE_DATA(node_ar.nid),
105 + physbase, reserve_size,
108 + * if reserved region is contained in the active region
111 + if (end_pfn <= node_ar.end_pfn)
115 + * reserved region extends past the active region
116 + * get next active region that contains this
119 + start_pfn = node_ar.end_pfn;
120 + physbase = start_pfn << PAGE_SHIFT;
121 + size = size - reserve_size;
122 + get_node_active_region(start_pfn, &node_ar);
128 void __init do_init_bootmem(void)
134 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
135 @@ -890,9 +956,16 @@ void __init do_init_bootmem(void)
136 unsigned long bootmem_paddr;
137 unsigned long bootmap_pages;
139 + dbg("node %d is online\n", nid);
140 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
142 - /* Allocate the node structure node local if possible */
144 + * Allocate the node structure node local if possible
146 + * Be careful moving this around, as it relies on all
147 + * previous nodes' bootmem to be initialized and have
148 + * all reserved areas marked.
150 NODE_DATA(nid) = careful_allocation(nid,
151 sizeof(struct pglist_data),
152 SMP_CACHE_BYTES, end_pfn);
153 @@ -924,53 +997,14 @@ void __init do_init_bootmem(void)
156 free_bootmem_with_active_regions(nid, end_pfn);
159 - /* Mark reserved regions */
160 - for (i = 0; i < lmb.reserved.cnt; i++) {
161 - unsigned long physbase = lmb.reserved.region[i].base;
162 - unsigned long size = lmb.reserved.region[i].size;
163 - unsigned long start_pfn = physbase >> PAGE_SHIFT;
164 - unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
165 - struct node_active_region node_ar;
167 - get_node_active_region(start_pfn, &node_ar);
168 - while (start_pfn < end_pfn &&
169 - node_ar.start_pfn < node_ar.end_pfn) {
170 - unsigned long reserve_size = size;
172 - * if reserved region extends past active region
173 - * then trim size to active region
175 - if (end_pfn > node_ar.end_pfn)
176 - reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
177 - - (start_pfn << PAGE_SHIFT);
178 - dbg("reserve_bootmem %lx %lx nid=%d\n", physbase,
179 - reserve_size, node_ar.nid);
180 - reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
181 - reserve_size, BOOTMEM_DEFAULT);
183 - * if reserved region is contained in the active region
186 - if (end_pfn <= node_ar.end_pfn)
190 - * reserved region extends past the active region
191 - * get next active region that contains this
194 - start_pfn = node_ar.end_pfn;
195 - physbase = start_pfn << PAGE_SHIFT;
196 - size = size - reserve_size;
197 - get_node_active_region(start_pfn, &node_ar);
202 - for_each_online_node(nid)
204 + * Be very careful about moving this around. Future
205 + * calls to careful_allocation() depend on this getting
208 + mark_reserved_regions_for_nid(nid);
209 sparse_memory_present_with_active_regions(nid);
213 void __init paging_init(void)