]> git.ipfire.org Git - ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.xen/xen-x86-pmd-handling
Add a patch to fix Intel E100 wake-on-lan problems.
[ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.xen / xen-x86-pmd-handling
1 From: jbeulich@novell.com
2 Subject: consolidate pmd/pud/pgd entry handling
3 Patch-mainline: obsolete
4
5 --- sle11-2009-04-09.orig/arch/x86/mm/hypervisor.c 2009-03-30 12:18:24.000000000 +0200
6 +++ sle11-2009-04-09/arch/x86/mm/hypervisor.c 2009-03-16 16:40:37.000000000 +0100
7 @@ -357,31 +357,91 @@ void xen_l1_entry_update(pte_t *ptr, pte
8 }
9 EXPORT_SYMBOL_GPL(xen_l1_entry_update);
10
11 +static void do_lN_entry_update(mmu_update_t *mmu, unsigned int mmu_count,
12 + struct page *page)
13 +{
14 + if (likely(page)) {
15 + multicall_entry_t mcl[2];
16 + unsigned long pfn = page_to_pfn(page);
17 +
18 + MULTI_update_va_mapping(mcl,
19 + (unsigned long)__va(pfn << PAGE_SHIFT),
20 + pfn_pte(pfn, PAGE_KERNEL_RO), 0);
21 + SetPagePinned(page);
22 + MULTI_mmu_update(mcl + 1, mmu, mmu_count, NULL, DOMID_SELF);
23 + if (unlikely(HYPERVISOR_multicall_check(mcl, 2, NULL)))
24 + BUG();
25 + } else if (unlikely(HYPERVISOR_mmu_update(mmu, mmu_count,
26 + NULL, DOMID_SELF) < 0))
27 + BUG();
28 +}
29 +
30 void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
31 {
32 mmu_update_t u;
33 + struct page *page = NULL;
34 +
35 + if (likely(pmd_present(val)) && likely(!pmd_large(val))
36 + && likely(mem_map)
37 + && likely(PagePinned(virt_to_page(ptr)))) {
38 + page = pmd_page(val);
39 + if (unlikely(PagePinned(page)))
40 + page = NULL;
41 + else if (PageHighMem(page)) {
42 +#ifdef CONFIG_HIGHPTE
43 + BUG();
44 +#endif
45 + kmap_flush_unused();
46 + page = NULL;
47 + }
48 + }
49 u.ptr = virt_to_machine(ptr);
50 u.val = __pmd_val(val);
51 - BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
52 + do_lN_entry_update(&u, 1, page);
53 }
54
55 #if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
56 void xen_l3_entry_update(pud_t *ptr, pud_t val)
57 {
58 mmu_update_t u;
59 + struct page *page = NULL;
60 +
61 + if (likely(pud_present(val))
62 +#ifdef CONFIG_X86_64
63 + && likely(!pud_large(val))
64 +#endif
65 + && likely(mem_map)
66 + && likely(PagePinned(virt_to_page(ptr)))) {
67 + page = pud_page(val);
68 + if (unlikely(PagePinned(page)))
69 + page = NULL;
70 + }
71 u.ptr = virt_to_machine(ptr);
72 u.val = __pud_val(val);
73 - BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
74 + do_lN_entry_update(&u, 1, page);
75 }
76 #endif
77
78 #ifdef CONFIG_X86_64
79 -void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
80 +void xen_l4_entry_update(pgd_t *ptr, int user, pgd_t val)
81 {
82 - mmu_update_t u;
83 - u.ptr = virt_to_machine(ptr);
84 - u.val = __pgd_val(val);
85 - BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
86 + mmu_update_t u[2];
87 + struct page *page = NULL;
88 +
89 + if (likely(pgd_present(val)) && likely(mem_map)
90 + && likely(PagePinned(virt_to_page(ptr)))) {
91 + page = pgd_page(val);
92 + if (unlikely(PagePinned(page)))
93 + page = NULL;
94 + }
95 + u[0].ptr = virt_to_machine(ptr);
96 + u[0].val = __pgd_val(val);
97 + if (user) {
98 + u[1].ptr = virt_to_machine(__user_pgd(ptr));
99 + u[1].val = __pgd_val(val);
100 + do_lN_entry_update(u, 2, page);
101 + } else
102 + do_lN_entry_update(u, 1, page);
103 }
104 #endif /* CONFIG_X86_64 */
105
106 --- sle11-2009-04-09.orig/arch/x86/mm/init_32-xen.c 2009-02-17 18:06:20.000000000 +0100
107 +++ sle11-2009-04-09/arch/x86/mm/init_32-xen.c 2009-03-16 17:39:12.000000000 +0100
108 @@ -728,6 +728,8 @@ static void __init zone_sizes_init(void)
109 #endif
110
111 free_area_init_nodes(max_zone_pfns);
112 +
113 + xen_init_pgd_pin();
114 }
115
116 void __init setup_bootmem_allocator(void)
117 @@ -1089,8 +1091,6 @@ void __init mem_init(void)
118 cpa_init();
119 save_pg_dir();
120 zap_low_mappings();
121 -
122 - SetPagePinned(virt_to_page(init_mm.pgd));
123 }
124
125 #ifdef CONFIG_MEMORY_HOTPLUG
126 --- sle11-2009-04-09.orig/arch/x86/mm/init_64-xen.c 2009-03-16 16:39:50.000000000 +0100
127 +++ sle11-2009-04-09/arch/x86/mm/init_64-xen.c 2009-03-16 16:40:37.000000000 +0100
128 @@ -195,7 +195,10 @@ set_pte_vaddr_pud(pud_t *pud_page, unsig
129 if (pud_none(*pud)) {
130 pmd = (pmd_t *) spp_getpage();
131 make_page_readonly(pmd, XENFEAT_writable_page_tables);
132 - pud_populate(&init_mm, pud, pmd);
133 + if (!after_bootmem)
134 + xen_l3_entry_update(pud, __pud(__pa(pmd) | _PAGE_TABLE));
135 + else
136 + pud_populate(&init_mm, pud, pmd);
137 if (pmd != pmd_offset(pud, 0)) {
138 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
139 pmd, pmd_offset(pud, 0));
140 @@ -478,7 +481,6 @@ phys_pmd_init(pmd_t *pmd_page, unsigned
141 XENFEAT_writable_page_tables);
142 *pmd = __pmd(pte_phys | _PAGE_TABLE);
143 } else {
144 - make_page_readonly(pte, XENFEAT_writable_page_tables);
145 spin_lock(&init_mm.page_table_lock);
146 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
147 spin_unlock(&init_mm.page_table_lock);
148 @@ -547,7 +549,6 @@ phys_pud_init(pud_t *pud_page, unsigned
149 else
150 *pud = __pud(pmd_phys | _PAGE_TABLE);
151 } else {
152 - make_page_readonly(pmd, XENFEAT_writable_page_tables);
153 spin_lock(&init_mm.page_table_lock);
154 pud_populate(&init_mm, pud, __va(pmd_phys));
155 spin_unlock(&init_mm.page_table_lock);
156 @@ -775,7 +776,6 @@ static unsigned long __meminit kernel_ph
157 XENFEAT_writable_page_tables);
158 xen_l4_entry_update(pgd, __pgd(pud_phys | _PAGE_TABLE));
159 } else {
160 - make_page_readonly(pud, XENFEAT_writable_page_tables);
161 spin_lock(&init_mm.page_table_lock);
162 pgd_populate(&init_mm, pgd, __va(pud_phys));
163 spin_unlock(&init_mm.page_table_lock);
164 @@ -1004,7 +1004,7 @@ void __init paging_init(void)
165 sparse_init();
166 free_area_init_nodes(max_zone_pfns);
167
168 - SetPagePinned(virt_to_page(init_mm.pgd));
169 + xen_init_pgd_pin();
170 }
171 #endif
172
173 --- sle11-2009-04-09.orig/arch/x86/mm/pgtable-xen.c 2009-03-16 16:38:16.000000000 +0100
174 +++ sle11-2009-04-09/arch/x86/mm/pgtable-xen.c 2009-04-09 14:54:03.000000000 +0200
175 @@ -42,16 +42,16 @@ pgtable_t pte_alloc_one(struct mm_struct
176 void __pte_free(pgtable_t pte)
177 {
178 if (!PageHighMem(pte)) {
179 - unsigned long va = (unsigned long)page_address(pte);
180 - unsigned int level;
181 - pte_t *ptep = lookup_address(va, &level);
182 -
183 - BUG_ON(!ptep || level != PG_LEVEL_4K || !pte_present(*ptep));
184 - if (!pte_write(*ptep)
185 - && HYPERVISOR_update_va_mapping(va,
186 - mk_pte(pte, PAGE_KERNEL),
187 - 0))
188 - BUG();
189 + if (PagePinned(pte)) {
190 + unsigned long pfn = page_to_pfn(pte);
191 +
192 + if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
193 + pfn_pte(pfn,
194 + PAGE_KERNEL),
195 + 0))
196 + BUG();
197 + ClearPagePinned(pte);
198 + }
199 } else
200 #ifdef CONFIG_HIGHPTE
201 ClearPagePinned(pte);
202 @@ -93,14 +93,15 @@ pmd_t *pmd_alloc_one(struct mm_struct *m
203
204 void __pmd_free(pgtable_t pmd)
205 {
206 - unsigned long va = (unsigned long)page_address(pmd);
207 - unsigned int level;
208 - pte_t *ptep = lookup_address(va, &level);
209 -
210 - BUG_ON(!ptep || level != PG_LEVEL_4K || !pte_present(*ptep));
211 - if (!pte_write(*ptep)
212 - && HYPERVISOR_update_va_mapping(va, mk_pte(pmd, PAGE_KERNEL), 0))
213 - BUG();
214 + if (PagePinned(pmd)) {
215 + unsigned long pfn = page_to_pfn(pmd);
216 +
217 + if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
218 + pfn_pte(pfn, PAGE_KERNEL),
219 + 0))
220 + BUG();
221 + ClearPagePinned(pmd);
222 + }
223
224 ClearPageForeign(pmd);
225 init_page_count(pmd);
226 @@ -192,21 +193,20 @@ static inline unsigned int pgd_walk_set_
227 {
228 unsigned long pfn = page_to_pfn(page);
229
230 - if (PageHighMem(page)) {
231 - if (pgprot_val(flags) & _PAGE_RW)
232 - ClearPagePinned(page);
233 - else
234 - SetPagePinned(page);
235 - } else {
236 - MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
237 - (unsigned long)__va(pfn << PAGE_SHIFT),
238 - pfn_pte(pfn, flags), 0);
239 - if (unlikely(++seq == PIN_BATCH)) {
240 - if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
241 - PIN_BATCH, NULL)))
242 - BUG();
243 - seq = 0;
244 - }
245 + if (pgprot_val(flags) & _PAGE_RW)
246 + ClearPagePinned(page);
247 + else
248 + SetPagePinned(page);
249 + if (PageHighMem(page))
250 + return seq;
251 + MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
252 + (unsigned long)__va(pfn << PAGE_SHIFT),
253 + pfn_pte(pfn, flags), 0);
254 + if (unlikely(++seq == PIN_BATCH)) {
255 + if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
256 + PIN_BATCH, NULL)))
257 + BUG();
258 + seq = 0;
259 }
260
261 return seq;
262 @@ -253,6 +253,16 @@ static void pgd_walk(pgd_t *pgd_base, pg
263 }
264 }
265
266 +#ifdef CONFIG_X86_PAE
267 + for (; g < PTRS_PER_PGD; g++, pgd++) {
268 + BUG_ON(pgd_none(*pgd));
269 + pud = pud_offset(pgd, 0);
270 + BUG_ON(pud_none(*pud));
271 + pmd = pmd_offset(pud, 0);
272 + seq = pgd_walk_set_prot(virt_to_page(pmd),flags,cpu,seq);
273 + }
274 +#endif
275 +
276 mcl = per_cpu(pb_mcl, cpu);
277 #ifdef CONFIG_X86_64
278 if (unlikely(seq > PIN_BATCH - 2)) {
279 @@ -288,6 +298,51 @@ static void pgd_walk(pgd_t *pgd_base, pg
280 put_cpu();
281 }
282
283 +void __init xen_init_pgd_pin(void)
284 +{
285 + pgd_t *pgd = init_mm.pgd;
286 + pud_t *pud;
287 + pmd_t *pmd;
288 + unsigned int g, u, m;
289 +
290 + if (xen_feature(XENFEAT_auto_translated_physmap))
291 + return;
292 +
293 + SetPagePinned(virt_to_page(pgd));
294 + for (g = 0; g < PTRS_PER_PGD; g++, pgd++) {
295 +#ifndef CONFIG_X86_PAE
296 + if (g >= pgd_index(HYPERVISOR_VIRT_START)
297 + && g <= pgd_index(HYPERVISOR_VIRT_END - 1))
298 + continue;
299 +#endif
300 + if (!pgd_present(*pgd))
301 + continue;
302 + pud = pud_offset(pgd, 0);
303 + if (PTRS_PER_PUD > 1) /* not folded */
304 + SetPagePinned(virt_to_page(pud));
305 + for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
306 + if (!pud_present(*pud))
307 + continue;
308 + pmd = pmd_offset(pud, 0);
309 + if (PTRS_PER_PMD > 1) /* not folded */
310 + SetPagePinned(virt_to_page(pmd));
311 + for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
312 +#ifdef CONFIG_X86_PAE
313 + if (g == pgd_index(HYPERVISOR_VIRT_START)
314 + && m >= pmd_index(HYPERVISOR_VIRT_START))
315 + continue;
316 +#endif
317 + if (!pmd_present(*pmd))
318 + continue;
319 + SetPagePinned(pmd_page(*pmd));
320 + }
321 + }
322 + }
323 +#ifdef CONFIG_X86_64
324 + SetPagePinned(virt_to_page(level3_user_pgt));
325 +#endif
326 +}
327 +
328 static void __pgd_pin(pgd_t *pgd)
329 {
330 pgd_walk(pgd, PAGE_KERNEL_RO);
331 @@ -480,21 +535,18 @@ static void pgd_dtor(void *pgd)
332
333 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
334 {
335 - struct page *page = virt_to_page(pmd);
336 - unsigned long pfn = page_to_pfn(page);
337 -
338 - paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
339 -
340 /* Note: almost everything apart from _PAGE_PRESENT is
341 reserved at the pmd (PDPT) level. */
342 - if (PagePinned(virt_to_page(mm->pgd))) {
343 - BUG_ON(PageHighMem(page));
344 - BUG_ON(HYPERVISOR_update_va_mapping(
345 - (unsigned long)__va(pfn << PAGE_SHIFT),
346 - pfn_pte(pfn, PAGE_KERNEL_RO), 0));
347 - set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
348 - } else
349 - *pudp = __pud(__pa(pmd) | _PAGE_PRESENT);
350 + pud_t pud = __pud(__pa(pmd) | _PAGE_PRESENT);
351 +
352 + paravirt_alloc_pmd(mm, page_to_pfn(virt_to_page(pmd)));
353 +
354 + if (likely(!PagePinned(virt_to_page(pudp)))) {
355 + *pudp = pud;
356 + return;
357 + }
358 +
359 + set_pud(pudp, pud);
360
361 /*
362 * According to Intel App note "TLBs, Paging-Structure Caches,
363 @@ -589,13 +641,10 @@ static void pgd_prepopulate_pmd(struct m
364 i++, pud++, addr += PUD_SIZE) {
365 pmd_t *pmd = pmds[i];
366
367 - if (i >= KERNEL_PGD_BOUNDARY) {
368 + if (i >= KERNEL_PGD_BOUNDARY)
369 memcpy(pmd,
370 (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
371 sizeof(pmd_t) * PTRS_PER_PMD);
372 - make_lowmem_page_readonly(
373 - pmd, XENFEAT_writable_page_tables);
374 - }
375
376 /* It is safe to poke machine addresses of pmds under the pgd_lock. */
377 pud_populate(mm, pud, pmd);
378 --- sle11-2009-04-09.orig/include/asm-x86/mach-xen/asm/hypervisor.h 2009-03-12 16:43:54.000000000 +0100
379 +++ sle11-2009-04-09/include/asm-x86/mach-xen/asm/hypervisor.h 2009-03-16 16:40:37.000000000 +0100
380 @@ -94,10 +94,12 @@ void xen_invlpg(unsigned long ptr);
381 void xen_l1_entry_update(pte_t *ptr, pte_t val);
382 void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
383 void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
384 -void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
385 +void xen_l4_entry_update(pgd_t *ptr, int user, pgd_t val); /* x86_64 only */
386 void xen_pgd_pin(unsigned long ptr);
387 void xen_pgd_unpin(unsigned long ptr);
388
389 +void xen_init_pgd_pin(void);
390 +
391 void xen_set_ldt(const void *ptr, unsigned int ents);
392
393 #ifdef CONFIG_SMP
394 @@ -331,6 +333,18 @@ MULTI_update_va_mapping(
395 }
396
397 static inline void
398 +MULTI_mmu_update(multicall_entry_t *mcl, mmu_update_t *req,
399 + unsigned int count, unsigned int *success_count,
400 + domid_t domid)
401 +{
402 + mcl->op = __HYPERVISOR_mmu_update;
403 + mcl->args[0] = (unsigned long)req;
404 + mcl->args[1] = count;
405 + mcl->args[2] = (unsigned long)success_count;
406 + mcl->args[3] = domid;
407 +}
408 +
409 +static inline void
410 MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
411 void *uop, unsigned int count)
412 {
413 --- sle11-2009-04-09.orig/include/asm-x86/mach-xen/asm/pgalloc.h 2009-03-16 16:38:16.000000000 +0100
414 +++ sle11-2009-04-09/include/asm-x86/mach-xen/asm/pgalloc.h 2009-03-16 16:40:37.000000000 +0100
415 @@ -64,20 +64,16 @@ static inline void pmd_populate(struct m
416 struct page *pte)
417 {
418 unsigned long pfn = page_to_pfn(pte);
419 + pmd_t ent = __pmd(((pmdval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE);
420
421 paravirt_alloc_pte(mm, pfn);
422 - if (PagePinned(virt_to_page(mm->pgd))) {
423 - if (!PageHighMem(pte))
424 - BUG_ON(HYPERVISOR_update_va_mapping(
425 - (unsigned long)__va(pfn << PAGE_SHIFT),
426 - pfn_pte(pfn, PAGE_KERNEL_RO), 0));
427 -#ifndef CONFIG_X86_64
428 - else if (!TestSetPagePinned(pte))
429 - kmap_flush_unused();
430 + if (PagePinned(virt_to_page(pmd))) {
431 +#ifndef CONFIG_HIGHPTE
432 + BUG_ON(PageHighMem(pte));
433 #endif
434 - set_pmd(pmd, __pmd(((pmdval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
435 + set_pmd(pmd, ent);
436 } else
437 - *pmd = __pmd(((pmdval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE);
438 + *pmd = ent;
439 }
440
441 #define pmd_pgtable(pmd) pmd_page(pmd)
442 @@ -99,39 +95,28 @@ extern void pud_populate(struct mm_struc
443 #else /* !CONFIG_X86_PAE */
444 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
445 {
446 + pud_t ent = __pud(_PAGE_TABLE | __pa(pmd));
447 +
448 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
449 - if (unlikely(PagePinned(virt_to_page((mm)->pgd)))) {
450 - BUG_ON(HYPERVISOR_update_va_mapping(
451 - (unsigned long)pmd,
452 - pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT,
453 - PAGE_KERNEL_RO), 0));
454 - set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
455 - } else
456 - *pud = __pud(_PAGE_TABLE | __pa(pmd));
457 + if (PagePinned(virt_to_page(pud)))
458 + set_pud(pud, ent);
459 + else
460 + *pud = ent;
461 }
462 #endif /* CONFIG_X86_PAE */
463
464 #if PAGETABLE_LEVELS > 3
465 #define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
466
467 -/*
468 - * We need to use the batch mode here, but pgd_pupulate() won't be
469 - * be called frequently.
470 - */
471 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
472 {
473 + pgd_t ent = __pgd(_PAGE_TABLE | __pa(pud));
474 +
475 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
476 - if (unlikely(PagePinned(virt_to_page((mm)->pgd)))) {
477 - BUG_ON(HYPERVISOR_update_va_mapping(
478 - (unsigned long)pud,
479 - pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT,
480 - PAGE_KERNEL_RO), 0));
481 - set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
482 - set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
483 - } else {
484 - *(pgd) = __pgd(_PAGE_TABLE | __pa(pud));
485 - *__user_pgd(pgd) = *(pgd);
486 - }
487 + if (unlikely(PagePinned(virt_to_page(pgd))))
488 + xen_l4_entry_update(pgd, 1, ent);
489 + else
490 + *__user_pgd(pgd) = *pgd = ent;
491 }
492
493 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
494 --- sle11-2009-04-09.orig/include/asm-x86/mach-xen/asm/pgtable-3level.h 2009-03-16 16:38:16.000000000 +0100
495 +++ sle11-2009-04-09/include/asm-x86/mach-xen/asm/pgtable-3level.h 2009-03-16 16:40:37.000000000 +0100
496 @@ -76,12 +76,15 @@ static inline void __xen_pte_clear(pte_t
497 ptep->pte_high = 0;
498 }
499
500 -static inline void xen_pmd_clear(pmd_t *pmd)
501 -{
502 - xen_l2_entry_update(pmd, __pmd(0));
503 -}
504 +#define xen_pmd_clear(pmd) \
505 +({ \
506 + pmd_t *__pmdp = (pmd); \
507 + PagePinned(virt_to_page(__pmdp)) \
508 + ? set_pmd(__pmdp, __pmd(0)) \
509 + : (void)(*__pmdp = __pmd(0)); \
510 +})
511
512 -static inline void pud_clear(pud_t *pudp)
513 +static inline void __xen_pud_clear(pud_t *pudp)
514 {
515 pgdval_t pgd;
516
517 @@ -102,13 +105,21 @@ static inline void pud_clear(pud_t *pudp
518 xen_tlb_flush();
519 }
520
521 -#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK))
522 +#define xen_pud_clear(pudp) \
523 +({ \
524 + pud_t *__pudp = (pudp); \
525 + PagePinned(virt_to_page(__pudp)) \
526 + ? __xen_pud_clear(__pudp) \
527 + : (void)(*__pudp = __pud(0)); \
528 +})
529 +
530 +#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
531
532 #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
533
534
535 /* Find an entry in the second-level page table.. */
536 -#define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \
537 +#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
538 pmd_index(address))
539
540 #ifdef CONFIG_SMP
541 --- sle11-2009-04-09.orig/include/asm-x86/mach-xen/asm/pgtable_64.h 2009-03-16 16:38:16.000000000 +0100
542 +++ sle11-2009-04-09/include/asm-x86/mach-xen/asm/pgtable_64.h 2009-03-16 16:40:37.000000000 +0100
543 @@ -110,33 +110,41 @@ static inline void xen_set_pmd(pmd_t *pm
544 xen_l2_entry_update(pmdp, pmd);
545 }
546
547 -static inline void xen_pmd_clear(pmd_t *pmd)
548 -{
549 - xen_set_pmd(pmd, xen_make_pmd(0));
550 -}
551 +#define xen_pmd_clear(pmd) \
552 +({ \
553 + pmd_t *__pmdp = (pmd); \
554 + PagePinned(virt_to_page(__pmdp)) \
555 + ? set_pmd(__pmdp, xen_make_pmd(0)) \
556 + : (void)(*__pmdp = xen_make_pmd(0)); \
557 +})
558
559 static inline void xen_set_pud(pud_t *pudp, pud_t pud)
560 {
561 xen_l3_entry_update(pudp, pud);
562 }
563
564 -static inline void xen_pud_clear(pud_t *pud)
565 -{
566 - xen_set_pud(pud, xen_make_pud(0));
567 -}
568 +#define xen_pud_clear(pud) \
569 +({ \
570 + pud_t *__pudp = (pud); \
571 + PagePinned(virt_to_page(__pudp)) \
572 + ? set_pud(__pudp, xen_make_pud(0)) \
573 + : (void)(*__pudp = xen_make_pud(0)); \
574 +})
575
576 #define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
577
578 static inline void xen_set_pgd(pgd_t *pgdp, pgd_t pgd)
579 {
580 - xen_l4_entry_update(pgdp, pgd);
581 + xen_l4_entry_update(pgdp, 0, pgd);
582 }
583
584 -static inline void xen_pgd_clear(pgd_t *pgd)
585 -{
586 - xen_set_pgd(pgd, xen_make_pgd(0));
587 - xen_set_pgd(__user_pgd(pgd), xen_make_pgd(0));
588 -}
589 +#define xen_pgd_clear(pgd) \
590 +({ \
591 + pgd_t *__pgdp = (pgd); \
592 + PagePinned(virt_to_page(__pgdp)) \
593 + ? xen_l4_entry_update(__pgdp, 1, xen_make_pgd(0)) \
594 + : (void)(*__user_pgd(__pgdp) = *__pgdp = xen_make_pgd(0)); \
595 +})
596
597 #define pte_same(a, b) ((a).pte == (b).pte)
598