]> git.ipfire.org Git - ipfire-2.x.git/blob - src/patches/suse-2.6.27.39/patches.xen/xen-x86-dcr-fallback
Imported linux-2.6.27.39 suse/xen patches.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.xen / xen-x86-dcr-fallback
1 Subject: Add fallback when XENMEM_exchange fails to replace contiguous region
2 From: jbeulich@novell.com
3 Patch-mainline: obsolete
4 References: 181869
5
6 This avoids losing precious special memory in places where any memory can be
7 used.
8
9 --- sle11-2009-08-26.orig/arch/x86/mm/hypervisor.c 2009-08-31 11:47:04.000000000 +0200
10 +++ sle11-2009-08-26/arch/x86/mm/hypervisor.c 2009-03-30 12:18:24.000000000 +0200
11 @@ -42,6 +42,7 @@
12 #include <xen/interface/memory.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 +#include <linux/highmem.h>
16 #include <asm/tlbflush.h>
17 #include <linux/highmem.h>
18
19 @@ -712,6 +713,83 @@ void xen_destroy_contiguous_region(unsig
20 BUG();
21
22 balloon_unlock(flags);
23 +
24 + if (unlikely(!success)) {
25 + /* Try hard to get the special memory back to Xen. */
26 + exchange.in.extent_order = 0;
27 + set_xen_guest_handle(exchange.in.extent_start, &in_frame);
28 +
29 + for (i = 0; i < (1U<<order); i++) {
30 + struct page *page = alloc_page(__GFP_HIGHMEM|__GFP_COLD);
31 + unsigned long pfn;
32 + mmu_update_t mmu;
33 + unsigned int j = 0;
34 +
35 + if (!page) {
36 + printk(KERN_WARNING "Xen and kernel out of memory "
37 + "while trying to release an order %u "
38 + "contiguous region\n", order);
39 + break;
40 + }
41 + pfn = page_to_pfn(page);
42 +
43 + balloon_lock(flags);
44 +
45 + if (!PageHighMem(page)) {
46 + void *v = __va(pfn << PAGE_SHIFT);
47 +
48 + scrub_pages(v, 1);
49 + MULTI_update_va_mapping(cr_mcl + j, (unsigned long)v,
50 + __pte_ma(0), UVMF_INVLPG|UVMF_ALL);
51 + ++j;
52 + }
53 +#ifdef CONFIG_XEN_SCRUB_PAGES
54 + else {
55 + scrub_pages(kmap(page), 1);
56 + kunmap(page);
57 + kmap_flush_unused();
58 + }
59 +#endif
60 +
61 + frame = pfn_to_mfn(pfn);
62 + set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
63 +
64 + MULTI_update_va_mapping(cr_mcl + j, vstart,
65 + pfn_pte_ma(frame, PAGE_KERNEL),
66 + UVMF_INVLPG|UVMF_ALL);
67 + ++j;
68 +
69 + pfn = __pa(vstart) >> PAGE_SHIFT;
70 + set_phys_to_machine(pfn, frame);
71 + if (!xen_feature(XENFEAT_auto_translated_physmap)) {
72 + mmu.ptr = ((uint64_t)frame << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
73 + mmu.val = pfn;
74 + cr_mcl[j].op = __HYPERVISOR_mmu_update;
75 + cr_mcl[j].args[0] = (unsigned long)&mmu;
76 + cr_mcl[j].args[1] = 1;
77 + cr_mcl[j].args[2] = 0;
78 + cr_mcl[j].args[3] = DOMID_SELF;
79 + ++j;
80 + }
81 +
82 + cr_mcl[j].op = __HYPERVISOR_memory_op;
83 + cr_mcl[j].args[0] = XENMEM_decrease_reservation;
84 + cr_mcl[j].args[1] = (unsigned long)&exchange.in;
85 +
86 + if (HYPERVISOR_multicall(cr_mcl, j + 1))
87 + BUG();
88 + BUG_ON(cr_mcl[j].result != 1);
89 + while (j--)
90 + BUG_ON(cr_mcl[j].result != 0);
91 +
92 + balloon_unlock(flags);
93 +
94 + free_empty_pages(&page, 1);
95 +
96 + in_frame++;
97 + vstart += PAGE_SIZE;
98 + }
99 + }
100 }
101 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
102
103 --- sle11-2009-08-26.orig/drivers/xen/balloon/balloon.c 2009-08-31 11:47:38.000000000 +0200
104 +++ sle11-2009-08-26/drivers/xen/balloon/balloon.c 2009-08-31 11:48:33.000000000 +0200
105 @@ -696,7 +696,7 @@ struct page **alloc_empty_pages_and_page
106 goto out;
107 }
108
109 -void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
110 +static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages, int free_vec)
111 {
112 unsigned long flags;
113 int i;
114 @@ -707,15 +707,28 @@ void free_empty_pages_and_pagevec(struct
115 balloon_lock(flags);
116 for (i = 0; i < nr_pages; i++) {
117 BUG_ON(page_count(pagevec[i]) != 1);
118 - balloon_append(pagevec[i], 0);
119 + balloon_append(pagevec[i], !free_vec);
120 }
121 + if (!free_vec)
122 + totalram_pages = bs.current_pages -= nr_pages;
123 balloon_unlock(flags);
124
125 - kfree(pagevec);
126 + if (free_vec)
127 + kfree(pagevec);
128
129 schedule_work(&balloon_worker);
130 }
131
132 +void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
133 +{
134 + _free_empty_pages_and_pagevec(pagevec, nr_pages, 1);
135 +}
136 +
137 +void free_empty_pages(struct page **pagevec, int nr_pages)
138 +{
139 + _free_empty_pages_and_pagevec(pagevec, nr_pages, 0);
140 +}
141 +
142 void balloon_release_driver_page(struct page *page)
143 {
144 unsigned long flags;
145 --- sle11-2009-08-26.orig/include/xen/balloon.h 2009-08-31 11:47:04.000000000 +0200
146 +++ sle11-2009-08-26/include/xen/balloon.h 2009-03-16 16:40:33.000000000 +0100
147 @@ -47,6 +47,10 @@ void balloon_update_driver_allowance(lon
148 struct page **alloc_empty_pages_and_pagevec(int nr_pages);
149 void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
150
151 +/* Free an empty page range (not allocated through
152 + alloc_empty_pages_and_pagevec), adding to the balloon. */
153 +void free_empty_pages(struct page **pagevec, int nr_pages);
154 +
155 void balloon_release_driver_page(struct page *page);
156
157 /*