]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.fixes/Hibernate-Take-overlapping-zones-into-account-rev-2.patch
Reenabled linux-xen and xen-image build
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.fixes / Hibernate-Take-overlapping-zones-into-account-rev-2.patch
1 From: Rafael J. Wysocki <rjw@suse.de>
2 Subject: Hibernate: Take overlapping zones into account (rev. 2)
3 References: bnc#438914
4 Patch-mainline: 846705deb059c352cc0e5806d5964f815b8c6d98
5
6 It has been requested to make hibernation work with memory
7 hotplugging enabled and for this purpose the hibernation code has to
8 be reworked to take the possible overlapping of zones into account.
9 Thus, rework the hibernation memory bitmaps code to prevent
10 duplication of PFNs from occuring and add checks to make sure that
11 one page frame will not be marked as saveable many times.
12
13 Additionally, use list.h lists instead of open-coded lists to
14 implement the memory bitmaps.
15
16 Signed-off-by: Rafael J. Wysocki <rjw@suse.de>
17 ---
18 kernel/power/snapshot.c | 327 ++++++++++++++++++++++++------------------------
19 1 file changed, 167 insertions(+), 160 deletions(-)
20
21 Index: linux-2.6.27/kernel/power/snapshot.c
22 ===================================================================
23 --- linux-2.6.27.orig/kernel/power/snapshot.c
24 +++ linux-2.6.27/kernel/power/snapshot.c
25 @@ -25,6 +25,7 @@
26 #include <linux/syscalls.h>
27 #include <linux/console.h>
28 #include <linux/highmem.h>
29 +#include <linux/list.h>
30
31 #include <asm/uaccess.h>
32 #include <asm/mmu_context.h>
33 @@ -192,12 +193,6 @@ static void *chain_alloc(struct chain_al
34 return ret;
35 }
36
37 -static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
38 -{
39 - free_list_of_pages(ca->chain, clear_page_nosave);
40 - memset(ca, 0, sizeof(struct chain_allocator));
41 -}
42 -
43 /**
44 * Data types related to memory bitmaps.
45 *
46 @@ -233,7 +228,7 @@ static void chain_free(struct chain_allo
47 #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
48
49 struct bm_block {
50 - struct bm_block *next; /* next element of the list */
51 + struct list_head hook; /* hook into a list of bitmap blocks */
52 unsigned long start_pfn; /* pfn represented by the first bit */
53 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
54 unsigned long *data; /* bitmap representing pages */
55 @@ -244,24 +239,15 @@ static inline unsigned long bm_block_bit
56 return bb->end_pfn - bb->start_pfn;
57 }
58
59 -struct zone_bitmap {
60 - struct zone_bitmap *next; /* next element of the list */
61 - unsigned long start_pfn; /* minimal pfn in this zone */
62 - unsigned long end_pfn; /* maximal pfn in this zone plus 1 */
63 - struct bm_block *bm_blocks; /* list of bitmap blocks */
64 - struct bm_block *cur_block; /* recently used bitmap block */
65 -};
66 -
67 /* strcut bm_position is used for browsing memory bitmaps */
68
69 struct bm_position {
70 - struct zone_bitmap *zone_bm;
71 struct bm_block *block;
72 int bit;
73 };
74
75 struct memory_bitmap {
76 - struct zone_bitmap *zone_bm_list; /* list of zone bitmaps */
77 + struct list_head blocks; /* list of bitmap blocks */
78 struct linked_page *p_list; /* list of pages used to store zone
79 * bitmap objects and bitmap block
80 * objects
81 @@ -273,11 +259,7 @@ struct memory_bitmap {
82
83 static void memory_bm_position_reset(struct memory_bitmap *bm)
84 {
85 - struct zone_bitmap *zone_bm;
86 -
87 - zone_bm = bm->zone_bm_list;
88 - bm->cur.zone_bm = zone_bm;
89 - bm->cur.block = zone_bm->bm_blocks;
90 + bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
91 bm->cur.bit = 0;
92 }
93
94 @@ -285,151 +267,184 @@ static void memory_bm_free(struct memory
95
96 /**
97 * create_bm_block_list - create a list of block bitmap objects
98 - */
99 -
100 -static inline struct bm_block *
101 -create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca)
102 + * @nr_blocks - number of blocks to allocate
103 + * @list - list to put the allocated blocks into
104 + * @ca - chain allocator to be used for allocating memory
105 + */
106 +static int create_bm_block_list(unsigned long pages,
107 + struct list_head *list,
108 + struct chain_allocator *ca)
109 {
110 - struct bm_block *bblist = NULL;
111 + unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
112
113 while (nr_blocks-- > 0) {
114 struct bm_block *bb;
115
116 bb = chain_alloc(ca, sizeof(struct bm_block));
117 if (!bb)
118 - return NULL;
119 -
120 - bb->next = bblist;
121 - bblist = bb;
122 + return -ENOMEM;
123 + list_add(&bb->hook, list);
124 }
125 - return bblist;
126 +
127 + return 0;
128 }
129
130 +struct mem_extent {
131 + struct list_head hook;
132 + unsigned long start;
133 + unsigned long end;
134 +};
135 +
136 /**
137 - * create_zone_bm_list - create a list of zone bitmap objects
138 + * free_mem_extents - free a list of memory extents
139 + * @list - list of extents to empty
140 */
141 +static void free_mem_extents(struct list_head *list)
142 +{
143 + struct mem_extent *ext, *aux;
144
145 -static inline struct zone_bitmap *
146 -create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca)
147 + list_for_each_entry_safe(ext, aux, list, hook) {
148 + list_del(&ext->hook);
149 + kfree(ext);
150 + }
151 +}
152 +
153 +/**
154 + * create_mem_extents - create a list of memory extents representing
155 + * contiguous ranges of PFNs
156 + * @list - list to put the extents into
157 + * @gfp_mask - mask to use for memory allocations
158 + */
159 +static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
160 {
161 - struct zone_bitmap *zbmlist = NULL;
162 + struct zone *zone;
163
164 - while (nr_zones-- > 0) {
165 - struct zone_bitmap *zbm;
166 + INIT_LIST_HEAD(list);
167
168 - zbm = chain_alloc(ca, sizeof(struct zone_bitmap));
169 - if (!zbm)
170 - return NULL;
171 + for_each_zone(zone) {
172 + unsigned long zone_start, zone_end;
173 + struct mem_extent *ext, *cur, *aux;
174 +
175 + if (!populated_zone(zone))
176 + continue;
177 +
178 + zone_start = zone->zone_start_pfn;
179 + zone_end = zone->zone_start_pfn + zone->spanned_pages;
180 +
181 + list_for_each_entry(ext, list, hook)
182 + if (zone_start <= ext->end)
183 + break;
184 +
185 + if (&ext->hook == list || zone_end < ext->start) {
186 + /* New extent is necessary */
187 + struct mem_extent *new_ext;
188 +
189 + new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
190 + if (!new_ext) {
191 + free_mem_extents(list);
192 + return -ENOMEM;
193 + }
194 + new_ext->start = zone_start;
195 + new_ext->end = zone_end;
196 + list_add_tail(&new_ext->hook, &ext->hook);
197 + continue;
198 + }
199
200 - zbm->next = zbmlist;
201 - zbmlist = zbm;
202 + /* Merge this zone's range of PFNs with the existing one */
203 + if (zone_start < ext->start)
204 + ext->start = zone_start;
205 + if (zone_end > ext->end)
206 + ext->end = zone_end;
207 +
208 + /* More merging may be possible */
209 + cur = ext;
210 + list_for_each_entry_safe_continue(cur, aux, list, hook) {
211 + if (zone_end < cur->start)
212 + break;
213 + if (zone_end < cur->end)
214 + ext->end = cur->end;
215 + list_del(&cur->hook);
216 + kfree(cur);
217 + }
218 }
219 - return zbmlist;
220 +
221 + return 0;
222 }
223
224 /**
225 * memory_bm_create - allocate memory for a memory bitmap
226 */
227 -
228 static int
229 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
230 {
231 struct chain_allocator ca;
232 - struct zone *zone;
233 - struct zone_bitmap *zone_bm;
234 - struct bm_block *bb;
235 - unsigned int nr;
236 + struct list_head mem_extents;
237 + struct mem_extent *ext;
238 + int error;
239
240 chain_init(&ca, gfp_mask, safe_needed);
241 + INIT_LIST_HEAD(&bm->blocks);
242
243 - /* Compute the number of zones */
244 - nr = 0;
245 - for_each_zone(zone)
246 - if (populated_zone(zone))
247 - nr++;
248 -
249 - /* Allocate the list of zones bitmap objects */
250 - zone_bm = create_zone_bm_list(nr, &ca);
251 - bm->zone_bm_list = zone_bm;
252 - if (!zone_bm) {
253 - chain_free(&ca, PG_UNSAFE_CLEAR);
254 - return -ENOMEM;
255 - }
256 + error = create_mem_extents(&mem_extents, gfp_mask);
257 + if (error)
258 + return error;
259
260 - /* Initialize the zone bitmap objects */
261 - for_each_zone(zone) {
262 - unsigned long pfn;
263 + list_for_each_entry(ext, &mem_extents, hook) {
264 + struct bm_block *bb;
265 + unsigned long pfn = ext->start;
266 + unsigned long pages = ext->end - ext->start;
267
268 - if (!populated_zone(zone))
269 - continue;
270 + bb = list_entry(bm->blocks.prev, struct bm_block, hook);
271
272 - zone_bm->start_pfn = zone->zone_start_pfn;
273 - zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages;
274 - /* Allocate the list of bitmap block objects */
275 - nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
276 - bb = create_bm_block_list(nr, &ca);
277 - zone_bm->bm_blocks = bb;
278 - zone_bm->cur_block = bb;
279 - if (!bb)
280 - goto Free;
281 + error = create_bm_block_list(pages, bm->blocks.prev, &ca);
282 + if (error)
283 + goto Error;
284
285 - nr = zone->spanned_pages;
286 - pfn = zone->zone_start_pfn;
287 - /* Initialize the bitmap block objects */
288 - while (bb) {
289 - unsigned long *ptr;
290 -
291 - ptr = get_image_page(gfp_mask, safe_needed);
292 - bb->data = ptr;
293 - if (!ptr)
294 - goto Free;
295 + list_for_each_entry_continue(bb, &bm->blocks, hook) {
296 + bb->data = get_image_page(gfp_mask, safe_needed);
297 + if (!bb->data) {
298 + error = -ENOMEM;
299 + goto Error;
300 + }
301
302 bb->start_pfn = pfn;
303 - if (nr >= BM_BITS_PER_BLOCK) {
304 + if (pages >= BM_BITS_PER_BLOCK) {
305 pfn += BM_BITS_PER_BLOCK;
306 - nr -= BM_BITS_PER_BLOCK;
307 + pages -= BM_BITS_PER_BLOCK;
308 } else {
309 /* This is executed only once in the loop */
310 - pfn += nr;
311 + pfn += pages;
312 }
313 bb->end_pfn = pfn;
314 - bb = bb->next;
315 }
316 - zone_bm = zone_bm->next;
317 }
318 +
319 bm->p_list = ca.chain;
320 memory_bm_position_reset(bm);
321 - return 0;
322 + Exit:
323 + free_mem_extents(&mem_extents);
324 + return error;
325
326 - Free:
327 + Error:
328 bm->p_list = ca.chain;
329 memory_bm_free(bm, PG_UNSAFE_CLEAR);
330 - return -ENOMEM;
331 + goto Exit;
332 }
333
334 /**
335 * memory_bm_free - free memory occupied by the memory bitmap @bm
336 */
337 -
338 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
339 {
340 - struct zone_bitmap *zone_bm;
341 + struct bm_block *bb;
342
343 - /* Free the list of bit blocks for each zone_bitmap object */
344 - zone_bm = bm->zone_bm_list;
345 - while (zone_bm) {
346 - struct bm_block *bb;
347 + list_for_each_entry(bb, &bm->blocks, hook)
348 + if (bb->data)
349 + free_image_page(bb->data, clear_nosave_free);
350
351 - bb = zone_bm->bm_blocks;
352 - while (bb) {
353 - if (bb->data)
354 - free_image_page(bb->data, clear_nosave_free);
355 - bb = bb->next;
356 - }
357 - zone_bm = zone_bm->next;
358 - }
359 free_list_of_pages(bm->p_list, clear_nosave_free);
360 - bm->zone_bm_list = NULL;
361 +
362 + INIT_LIST_HEAD(&bm->blocks);
363 }
364
365 /**
366 @@ -437,38 +452,33 @@ static void memory_bm_free(struct memory
367 * to given pfn. The cur_zone_bm member of @bm and the cur_block member
368 * of @bm->cur_zone_bm are updated.
369 */
370 -
371 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
372 void **addr, unsigned int *bit_nr)
373 {
374 - struct zone_bitmap *zone_bm;
375 struct bm_block *bb;
376
377 - /* Check if the pfn is from the current zone */
378 - zone_bm = bm->cur.zone_bm;
379 - if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
380 - zone_bm = bm->zone_bm_list;
381 - /* We don't assume that the zones are sorted by pfns */
382 - while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
383 - zone_bm = zone_bm->next;
384 -
385 - if (!zone_bm)
386 - return -EFAULT;
387 - }
388 - bm->cur.zone_bm = zone_bm;
389 - }
390 - /* Check if the pfn corresponds to the current bitmap block */
391 - bb = zone_bm->cur_block;
392 + /*
393 + * Check if the pfn corresponds to the current bitmap block and find
394 + * the block where it fits if this is not the case.
395 + */
396 + bb = bm->cur.block;
397 if (pfn < bb->start_pfn)
398 - bb = zone_bm->bm_blocks;
399 + list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
400 + if (pfn >= bb->start_pfn)
401 + break;
402 +
403 + if (pfn >= bb->end_pfn)
404 + list_for_each_entry_continue(bb, &bm->blocks, hook)
405 + if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
406 + break;
407
408 - while (pfn >= bb->end_pfn) {
409 - bb = bb->next;
410 + if (&bb->hook == &bm->blocks)
411 + return -EFAULT;
412
413 - BUG_ON(!bb);
414 - }
415 - zone_bm->cur_block = bb;
416 + /* The block has been found */
417 + bm->cur.block = bb;
418 pfn -= bb->start_pfn;
419 + bm->cur.bit = pfn + 1;
420 *bit_nr = pfn;
421 *addr = bb->data;
422 return 0;
423 @@ -530,29 +540,21 @@ static int memory_bm_test_bit(struct mem
424
425 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
426 {
427 - struct zone_bitmap *zone_bm;
428 struct bm_block *bb;
429 int bit;
430
431 + bb = bm->cur.block;
432 do {
433 - bb = bm->cur.block;
434 - do {
435 - bit = bm->cur.bit;
436 - bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
437 - if (bit < bm_block_bits(bb))
438 - goto Return_pfn;
439 -
440 - bb = bb->next;
441 - bm->cur.block = bb;
442 - bm->cur.bit = 0;
443 - } while (bb);
444 - zone_bm = bm->cur.zone_bm->next;
445 - if (zone_bm) {
446 - bm->cur.zone_bm = zone_bm;
447 - bm->cur.block = zone_bm->bm_blocks;
448 - bm->cur.bit = 0;
449 - }
450 - } while (zone_bm);
451 + bit = bm->cur.bit;
452 + bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
453 + if (bit < bm_block_bits(bb))
454 + goto Return_pfn;
455 +
456 + bb = list_entry(bb->hook.next, struct bm_block, hook);
457 + bm->cur.block = bb;
458 + bm->cur.bit = 0;
459 + } while (&bb->hook != &bm->blocks);
460 +
461 memory_bm_position_reset(bm);
462 return BM_END_OF_MAP;
463
464 @@ -808,8 +810,7 @@ static unsigned int count_free_highmem_p
465 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
466 * and it isn't a part of a free chunk of pages.
467 */
468 -
469 -static struct page *saveable_highmem_page(unsigned long pfn)
470 +static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
471 {
472 struct page *page;
473
474 @@ -817,6 +818,8 @@ static struct page *saveable_highmem_pag
475 return NULL;
476
477 page = pfn_to_page(pfn);
478 + if (page_zone(page) != zone)
479 + return NULL;
480
481 BUG_ON(!PageHighMem(page));
482
483 @@ -846,13 +849,16 @@ unsigned int count_highmem_pages(void)
484 mark_free_pages(zone);
485 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
486 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
487 - if (saveable_highmem_page(pfn))
488 + if (saveable_highmem_page(zone, pfn))
489 n++;
490 }
491 return n;
492 }
493 #else
494 -static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
495 +static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
496 +{
497 + return NULL;
498 +}
499 #endif /* CONFIG_HIGHMEM */
500
501 /**
502 @@ -863,8 +869,7 @@ static inline void *saveable_highmem_pag
503 * of pages statically defined as 'unsaveable', and it isn't a part of
504 * a free chunk of pages.
505 */
506 -
507 -static struct page *saveable_page(unsigned long pfn)
508 +static struct page *saveable_page(struct zone *zone, unsigned long pfn)
509 {
510 struct page *page;
511
512 @@ -872,6 +877,8 @@ static struct page *saveable_page(unsign
513 return NULL;
514
515 page = pfn_to_page(pfn);
516 + if (page_zone(page) != zone)
517 + return NULL;
518
519 BUG_ON(PageHighMem(page));
520
521 @@ -903,7 +910,7 @@ unsigned int count_data_pages(void)
522 mark_free_pages(zone);
523 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
524 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
525 - if(saveable_page(pfn))
526 + if (saveable_page(zone, pfn))
527 n++;
528 }
529 return n;
530 @@ -944,7 +951,7 @@ static inline struct page *
531 page_is_saveable(struct zone *zone, unsigned long pfn)
532 {
533 return is_highmem(zone) ?
534 - saveable_highmem_page(pfn) : saveable_page(pfn);
535 + saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
536 }
537
538 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
539 @@ -975,7 +982,7 @@ static void copy_data_page(unsigned long
540 }
541 }
542 #else
543 -#define page_is_saveable(zone, pfn) saveable_page(pfn)
544 +#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
545
546 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
547 {