]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/60059_xen-x86-dcr-fallback.patch1
Imported xen patches.
[people/teissler/ipfire-2.x.git] / src / patches / 60059_xen-x86-dcr-fallback.patch1
1 Subject: Add fallback when XENMEM_exchange fails to replace contiguous region
2 From: jbeulich@novell.com
3 Patch-mainline: obsolete
4 References: 181869
5
6 This avoids losing precious special memory in places where any memory can be
7 used.
8
9 Index: head-2008-11-20/arch/x86/mm/hypervisor.c
10 ===================================================================
11 --- head-2008-11-20.orig/arch/x86/mm/hypervisor.c 2008-11-21 15:51:34.000000000 +0100
12 +++ head-2008-11-20/arch/x86/mm/hypervisor.c 2008-11-17 12:12:39.000000000 +0100
13 @@ -42,6 +42,7 @@
14 #include <xen/interface/memory.h>
15 #include <linux/module.h>
16 #include <linux/percpu.h>
17 +#include <linux/highmem.h>
18 #include <asm/tlbflush.h>
19 #include <linux/highmem.h>
20
21 @@ -668,6 +669,83 @@ void xen_destroy_contiguous_region(unsig
22 BUG();
23
24 balloon_unlock(flags);
25 +
26 + if (unlikely(!success)) {
27 + /* Try hard to get the special memory back to Xen. */
28 + exchange.in.extent_order = 0;
29 + set_xen_guest_handle(exchange.in.extent_start, &in_frame);
30 +
31 + for (i = 0; i < (1U<<order); i++) {
32 + struct page *page = alloc_page(__GFP_HIGHMEM|__GFP_COLD);
33 + unsigned long pfn;
34 + mmu_update_t mmu;
35 + unsigned int j = 0;
36 +
37 + if (!page) {
38 + printk(KERN_WARNING "Xen and kernel out of memory "
39 + "while trying to release an order %u "
40 + "contiguous region\n", order);
41 + break;
42 + }
43 + pfn = page_to_pfn(page);
44 +
45 + balloon_lock(flags);
46 +
47 + if (!PageHighMem(page)) {
48 + void *v = __va(pfn << PAGE_SHIFT);
49 +
50 + scrub_pages(v, 1);
51 + MULTI_update_va_mapping(cr_mcl + j, (unsigned long)v,
52 + __pte_ma(0), UVMF_INVLPG|UVMF_ALL);
53 + ++j;
54 + }
55 +#ifdef CONFIG_XEN_SCRUB_PAGES
56 + else {
57 + scrub_pages(kmap(page), 1);
58 + kunmap(page);
59 + kmap_flush_unused();
60 + }
61 +#endif
62 +
63 + frame = pfn_to_mfn(pfn);
64 + set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
65 +
66 + MULTI_update_va_mapping(cr_mcl + j, vstart,
67 + pfn_pte_ma(frame, PAGE_KERNEL),
68 + UVMF_INVLPG|UVMF_ALL);
69 + ++j;
70 +
71 + pfn = __pa(vstart) >> PAGE_SHIFT;
72 + set_phys_to_machine(pfn, frame);
73 + if (!xen_feature(XENFEAT_auto_translated_physmap)) {
74 + mmu.ptr = ((uint64_t)frame << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
75 + mmu.val = pfn;
76 + cr_mcl[j].op = __HYPERVISOR_mmu_update;
77 + cr_mcl[j].args[0] = (unsigned long)&mmu;
78 + cr_mcl[j].args[1] = 1;
79 + cr_mcl[j].args[2] = 0;
80 + cr_mcl[j].args[3] = DOMID_SELF;
81 + ++j;
82 + }
83 +
84 + cr_mcl[j].op = __HYPERVISOR_memory_op;
85 + cr_mcl[j].args[0] = XENMEM_decrease_reservation;
86 + cr_mcl[j].args[1] = (unsigned long)&exchange.in;
87 +
88 + if (HYPERVISOR_multicall(cr_mcl, j + 1))
89 + BUG();
90 + BUG_ON(cr_mcl[j].result != 1);
91 + while (j--)
92 + BUG_ON(cr_mcl[j].result != 0);
93 +
94 + balloon_unlock(flags);
95 +
96 + free_empty_pages(&page, 1);
97 +
98 + in_frame++;
99 + vstart += PAGE_SIZE;
100 + }
101 + }
102 }
103 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
104
105 Index: head-2008-11-20/drivers/xen/balloon/balloon.c
106 ===================================================================
107 --- head-2008-11-20.orig/drivers/xen/balloon/balloon.c 2008-11-21 15:56:56.000000000 +0100
108 +++ head-2008-11-20/drivers/xen/balloon/balloon.c 2008-11-21 16:00:18.000000000 +0100
109 @@ -687,7 +687,7 @@ struct page **alloc_empty_pages_and_page
110 goto out;
111 }
112
113 -void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
114 +static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages, int free_vec)
115 {
116 unsigned long flags;
117 int i;
118 @@ -702,11 +702,24 @@ void free_empty_pages_and_pagevec(struct
119 }
120 balloon_unlock(flags);
121
122 - kfree(pagevec);
123 + if (free_vec)
124 + kfree(pagevec);
125 + else
126 + totalram_pages = bs.current_pages -= nr_pages;
127
128 schedule_work(&balloon_worker);
129 }
130
131 +void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
132 +{
133 + _free_empty_pages_and_pagevec(pagevec, nr_pages, 1);
134 +}
135 +
136 +void free_empty_pages(struct page **pagevec, int nr_pages)
137 +{
138 + _free_empty_pages_and_pagevec(pagevec, nr_pages, 0);
139 +}
140 +
141 void balloon_release_driver_page(struct page *page)
142 {
143 unsigned long flags;
144 Index: head-2008-11-20/include/xen/balloon.h
145 ===================================================================
146 --- head-2008-11-20.orig/include/xen/balloon.h 2008-11-21 15:51:34.000000000 +0100
147 +++ head-2008-11-20/include/xen/balloon.h 2008-11-21 09:39:34.000000000 +0100
148 @@ -47,6 +47,10 @@ void balloon_update_driver_allowance(lon
149 struct page **alloc_empty_pages_and_pagevec(int nr_pages);
150 void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
151
152 +/* Free an empty page range (not allocated through
153 + alloc_empty_pages_and_pagevec), adding to the balloon. */
154 +void free_empty_pages(struct page **pagevec, int nr_pages);
155 +
156 void balloon_release_driver_page(struct page *page);
157
158 /*