1 From: Og <og@kroah.com>
2 Subject: Linux 2.6.27.19
4 Upstream 2.6.27.19 release from kernel.org
6 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
8 Automatically created from "patches.kernel.org/patch-2.6.27.18-19" by xen-port-patches.py
10 --- sle11-2009-03-16.orig/arch/x86/mm/hypervisor.c 2009-03-16 16:38:16.000000000 +0100
11 +++ sle11-2009-03-16/arch/x86/mm/hypervisor.c 2009-03-16 16:38:38.000000000 +0100
12 @@ -79,12 +79,12 @@ static void multicall_failed(const multi
16 -int xen_multicall_flush(bool ret_last) {
17 +static int _xen_multicall_flush(bool ret_last) {
18 struct lazy_mmu *lazy = &__get_cpu_var(lazy_mmu);
19 multicall_entry_t *mc = lazy->mc;
20 unsigned int count = lazy->nr_mc;
22 - if (!count || !use_lazy_mmu_mode())
27 @@ -112,6 +112,11 @@ int xen_multicall_flush(bool ret_last) {
32 +void xen_multicall_flush(bool force) {
33 + if (force || use_lazy_mmu_mode())
34 + _xen_multicall_flush(false);
36 EXPORT_SYMBOL(xen_multicall_flush);
38 int xen_multi_update_va_mapping(unsigned long va, pte_t pte,
39 @@ -130,7 +135,7 @@ int xen_multi_update_va_mapping(unsigned
42 if (unlikely(lazy->nr_mc == NR_MC))
43 - xen_multicall_flush(false);
44 + _xen_multicall_flush(false);
46 mc = lazy->mc + lazy->nr_mc++;
47 mc->op = __HYPERVISOR_update_va_mapping;
48 @@ -169,7 +174,7 @@ int xen_multi_mmu_update(mmu_update_t *s
49 merge = lazy->nr_mc && !commit
50 && mmu_may_merge(mc - 1, __HYPERVISOR_mmu_update, domid);
51 if (unlikely(lazy->nr_mc == NR_MC) && !merge) {
52 - xen_multicall_flush(false);
53 + _xen_multicall_flush(false);
55 commit = count > NR_MMU || success_count;
57 @@ -207,7 +212,7 @@ int xen_multi_mmu_update(mmu_update_t *s
61 - return commit ? xen_multicall_flush(true) : 0;
62 + return commit ? _xen_multicall_flush(true) : 0;
65 int xen_multi_mmuext_op(struct mmuext_op *src, unsigned int count,
66 @@ -291,7 +296,7 @@ int xen_multi_mmuext_op(struct mmuext_op
67 merge = lazy->nr_mc && !commit
68 && mmu_may_merge(mc - 1, __HYPERVISOR_mmuext_op, domid);
69 if (unlikely(lazy->nr_mc == NR_MC) && !merge) {
70 - xen_multicall_flush(false);
71 + _xen_multicall_flush(false);
73 commit = count > NR_MMUEXT || success_count;
75 @@ -338,7 +343,7 @@ int xen_multi_mmuext_op(struct mmuext_op
79 - return commit ? xen_multicall_flush(true) : 0;
80 + return commit ? _xen_multicall_flush(true) : 0;
83 void xen_l1_entry_update(pte_t *ptr, pte_t val)
84 --- sle11-2009-03-16.orig/arch/x86/mm/pageattr-xen.c 2009-03-16 16:38:34.000000000 +0100
85 +++ sle11-2009-03-16/arch/x86/mm/pageattr-xen.c 2009-03-16 16:38:38.000000000 +0100
86 @@ -639,6 +639,15 @@ static int __change_page_attr(struct cpa
91 + * If we're called with lazy mmu updates enabled, the
92 + * in-memory pte state may be stale. Flush pending updates to
93 + * bring them up to date.
95 + arch_flush_lazy_mmu_mode();*/
96 + if (arch_use_lazy_mmu_mode())
97 + xen_multicall_flush(true);
100 kpte = lookup_address(address, &level);
102 @@ -857,6 +866,14 @@ static int change_page_attr_set_clr(unsi
104 cpa_flush_all(cache);
107 + * If we've been called with lazy mmu updates enabled, then
108 + * make sure that everything gets flushed out before we
111 + arch_flush_lazy_mmu_mode();*/
112 + WARN_ON_ONCE(arch_use_lazy_mmu_mode() && !irq_count());
117 --- sle11-2009-03-16.orig/include/asm-x86/mach-xen/asm/hypervisor.h 2009-03-16 16:38:16.000000000 +0100
118 +++ sle11-2009-03-16/include/asm-x86/mach-xen/asm/hypervisor.h 2009-03-16 16:38:38.000000000 +0100
119 @@ -132,7 +132,7 @@ void scrub_pages(void *, unsigned int);
121 DECLARE_PER_CPU(bool, xen_lazy_mmu);
123 -int xen_multicall_flush(bool);
124 +void xen_multicall_flush(bool);
126 int __must_check xen_multi_update_va_mapping(unsigned long va, pte_t,
127 unsigned long flags);