]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.xen/747-x86-undo-mfn-limit.patch
Updated xen patches taken from suse.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.xen / 747-x86-undo-mfn-limit.patch
1 From: http://xenbits.xensource.com/linux-2.6.18-xen.hg?rev/39a8680e7a70
2 # HG changeset 747 patch
3 # User Keir Fraser <keir.fraser@citrix.com>
4 # Date 1227879058 0
5 # Node ID 39a8680e7a70a28ce639c507fb6a9bc0aa7d8f14
6 # Parent d545a95fca739d0b1963b73a9eb64ea64a244e76
7 Subject: linux/x86: revert the effect of xen_limit_pages_to_max_mfn()
8 Patch-mainline: obsolete
9
10 Signed-off-by: Jan Beulich <jbeulich@novell.com>
11
12 --- head-2009-01-16.orig/arch/x86/mm/hypervisor.c 2009-01-16 10:17:45.000000000 +0100
13 +++ head-2009-01-16/arch/x86/mm/hypervisor.c 2008-12-01 11:25:57.000000000 +0100
14 @@ -374,6 +374,15 @@ void xen_destroy_contiguous_region(unsig
15 }
16 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
17
18 +static void undo_limit_pages(struct page *pages, unsigned int order)
19 +{
20 + BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
21 + BUG_ON(order > MAX_CONTIG_ORDER);
22 + xen_limit_pages_to_max_mfn(pages, order, 0);
23 + ClearPageForeign(pages);
24 + __free_pages(pages, order);
25 +}
26 +
27 int xen_limit_pages_to_max_mfn(
28 struct page *pages, unsigned int order, unsigned int address_bits)
29 {
30 @@ -402,16 +411,28 @@ int xen_limit_pages_to_max_mfn(
31 if (unlikely(order > MAX_CONTIG_ORDER))
32 return -ENOMEM;
33
34 - bitmap_zero(limit_map, 1U << order);
35 + if (address_bits) {
36 + if (address_bits < PAGE_SHIFT)
37 + return -EINVAL;
38 + bitmap_zero(limit_map, 1U << order);
39 + } else if (order) {
40 + BUILD_BUG_ON(sizeof(pages->index) != sizeof(*limit_map));
41 + for (i = 0; i < BITS_TO_LONGS(1U << order); ++i)
42 + limit_map[i] = pages[i + 1].index;
43 + } else
44 + __set_bit(0, limit_map);
45 +
46 set_xen_guest_handle(exchange.in.extent_start, in_frames);
47 set_xen_guest_handle(exchange.out.extent_start, out_frames);
48
49 /* 0. Scrub the pages. */
50 for (i = 0, n = 0; i < 1U<<order ; i++) {
51 page = &pages[i];
52 - if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits - PAGE_SHIFT)))
53 - continue;
54 - __set_bit(i, limit_map);
55 + if (address_bits) {
56 + if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits - PAGE_SHIFT)))
57 + continue;
58 + __set_bit(i, limit_map);
59 + }
60
61 if (!PageHighMem(page))
62 scrub_pages(page_address(page), 1);
63 @@ -497,7 +518,19 @@ int xen_limit_pages_to_max_mfn(
64
65 balloon_unlock(flags);
66
67 - return success ? 0 : -ENOMEM;
68 + if (!success)
69 + return -ENOMEM;
70 +
71 + if (address_bits) {
72 + if (order) {
73 + BUILD_BUG_ON(sizeof(*limit_map) != sizeof(pages->index));
74 + for (i = 0; i < BITS_TO_LONGS(1U << order); ++i)
75 + pages[i + 1].index = limit_map[i];
76 + }
77 + SetPageForeign(pages, undo_limit_pages);
78 + }
79 +
80 + return 0;
81 }
82 EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn);
83
84 --- head-2009-01-16.orig/arch/x86/mm/pgtable_32-xen.c 2009-01-16 10:17:45.000000000 +0100
85 +++ head-2009-01-16/arch/x86/mm/pgtable_32-xen.c 2008-12-01 11:25:57.000000000 +0100
86 @@ -152,6 +152,12 @@ pte_t *pte_alloc_one_kernel(struct mm_st
87 return pte;
88 }
89
90 +static void _pte_free(struct page *page, unsigned int order)
91 +{
92 + BUG_ON(order);
93 + pte_free(page);
94 +}
95 +
96 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
97 {
98 struct page *pte;
99 @@ -162,7 +168,7 @@ struct page *pte_alloc_one(struct mm_str
100 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
101 #endif
102 if (pte) {
103 - SetPageForeign(pte, pte_free);
104 + SetPageForeign(pte, _pte_free);
105 init_page_count(pte);
106 }
107 return pte;
108 --- head-2009-01-16.orig/arch/x86/mm/pageattr_64-xen.c 2009-01-16 10:17:45.000000000 +0100
109 +++ head-2009-01-16/arch/x86/mm/pageattr_64-xen.c 2008-12-01 11:25:57.000000000 +0100
110 @@ -248,13 +248,19 @@ void _arch_exit_mmap(struct mm_struct *m
111 mm_unpin(mm);
112 }
113
114 +static void _pte_free(struct page *page, unsigned int order)
115 +{
116 + BUG_ON(order);
117 + pte_free(page);
118 +}
119 +
120 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
121 {
122 struct page *pte;
123
124 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
125 if (pte) {
126 - SetPageForeign(pte, pte_free);
127 + SetPageForeign(pte, _pte_free);
128 init_page_count(pte);
129 }
130 return pte;
131 --- head-2009-01-16.orig/drivers/xen/core/gnttab.c 2009-01-16 10:17:45.000000000 +0100
132 +++ head-2009-01-16/drivers/xen/core/gnttab.c 2008-12-01 11:25:57.000000000 +0100
133 @@ -505,8 +505,9 @@ static int gnttab_map(unsigned int start
134 return 0;
135 }
136
137 -static void gnttab_page_free(struct page *page)
138 +static void gnttab_page_free(struct page *page, unsigned int order)
139 {
140 + BUG_ON(order);
141 ClearPageForeign(page);
142 gnttab_reset_grant_page(page);
143 put_page(page);
144 --- head-2009-01-16.orig/drivers/xen/netback/netback.c 2009-01-16 10:17:45.000000000 +0100
145 +++ head-2009-01-16/drivers/xen/netback/netback.c 2008-12-01 11:25:57.000000000 +0100
146 @@ -55,7 +55,6 @@ struct netbk_tx_pending_inuse {
147 };
148
149 static void netif_idx_release(u16 pending_idx);
150 -static void netif_page_release(struct page *page);
151 static void make_tx_response(netif_t *netif,
152 netif_tx_request_t *txp,
153 s8 st);
154 @@ -1436,8 +1435,9 @@ static void netif_idx_release(u16 pendin
155 tasklet_schedule(&net_tx_tasklet);
156 }
157
158 -static void netif_page_release(struct page *page)
159 +static void netif_page_release(struct page *page, unsigned int order)
160 {
161 + BUG_ON(order);
162 netif_idx_release(netif_page_index(page));
163 }
164
165 --- head-2009-01-16.orig/include/linux/page-flags.h 2009-01-16 10:01:00.000000000 +0100
166 +++ head-2009-01-16/include/linux/page-flags.h 2009-01-16 10:20:18.000000000 +0100
167 @@ -277,15 +277,15 @@ CLEARPAGEFLAG(Uptodate, uptodate)
168 #define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
169 #define SetPageForeign(_page, dtor) do { \
170 set_bit(PG_foreign, &(_page)->flags); \
171 - BUG_ON((dtor) == (void (*)(struct page *))0); \
172 + BUG_ON((dtor) == (void (*)(struct page *, unsigned int))0); \
173 (_page)->index = (long)(dtor); \
174 } while (0)
175 #define ClearPageForeign(page) do { \
176 clear_bit(PG_foreign, &(page)->flags); \
177 (page)->index = 0; \
178 } while (0)
179 -#define PageForeignDestructor(_page) \
180 - ((void (*)(struct page *))(_page)->index)(_page)
181 +#define PageForeignDestructor(_page, order) \
182 + ((void (*)(struct page *, unsigned int))(_page)->index)(_page, order)
183
184 extern void cancel_dirty_page(struct page *page, unsigned int account_size);
185
186 --- head-2009-01-16.orig/mm/page_alloc.c 2008-12-08 13:17:58.000000000 +0100
187 +++ head-2009-01-16/mm/page_alloc.c 2008-12-01 11:25:57.000000000 +0100
188 @@ -535,7 +535,7 @@ static void __free_pages_ok(struct page
189
190 #ifdef CONFIG_XEN
191 if (PageForeign(page)) {
192 - PageForeignDestructor(page);
193 + PageForeignDestructor(page, order);
194 return;
195 }
196 #endif
197 @@ -1003,7 +1003,7 @@ static void free_hot_cold_page(struct pa
198
199 #ifdef CONFIG_XEN
200 if (PageForeign(page)) {
201 - PageForeignDestructor(page);
202 + PageForeignDestructor(page, 0);
203 return;
204 }
205 #endif