]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/2.6.32.3/kvm-mmu-remove-prefault-from-invlpg-handler.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 2.6.32.3 / kvm-mmu-remove-prefault-from-invlpg-handler.patch
CommitLineData
80e6ae8f
GKH
1From fb341f572d26e0786167cd96b90cc4febed830cf Mon Sep 17 00:00:00 2001
2From: Marcelo Tosatti <mtosatti@redhat.com>
3Date: Sat, 5 Dec 2009 12:34:11 -0200
4Subject: KVM: MMU: remove prefault from invlpg handler
5
6From: Marcelo Tosatti <mtosatti@redhat.com>
7
8commit fb341f572d26e0786167cd96b90cc4febed830cf upstream.
9
10The invlpg prefault optimization breaks Windows 2008 R2 occasionally.
11
12The visible effect is that the invlpg handler instantiates a pte which
13is, microseconds later, written with a different gfn by another vcpu.
14
15The OS could have other mechanisms to prevent a present translation from
16being used, which the hypervisor is unaware of.
17
18While the documentation states that the cpu is at liberty to prefetch tlb
19entries, it looks like this is not heeded, so remove tlb prefetch from
20invlpg.
21
22Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
23Signed-off-by: Avi Kivity <avi@redhat.com>
24Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
25
26---
27 arch/x86/kvm/paging_tmpl.h | 18 ------------------
28 1 file changed, 18 deletions(-)
29
30--- a/arch/x86/kvm/paging_tmpl.h
31+++ b/arch/x86/kvm/paging_tmpl.h
32@@ -455,8 +455,6 @@ out_unlock:
33 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
34 {
35 struct kvm_shadow_walk_iterator iterator;
36- pt_element_t gpte;
37- gpa_t pte_gpa = -1;
38 int level;
39 u64 *sptep;
40 int need_flush = 0;
41@@ -471,10 +469,6 @@ static void FNAME(invlpg)(struct kvm_vcp
42 if (level == PT_PAGE_TABLE_LEVEL ||
43 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
44 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
45- struct kvm_mmu_page *sp = page_header(__pa(sptep));
46-
47- pte_gpa = (sp->gfn << PAGE_SHIFT);
48- pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
49
50 if (is_shadow_present_pte(*sptep)) {
51 rmap_remove(vcpu->kvm, sptep);
52@@ -493,18 +487,6 @@ static void FNAME(invlpg)(struct kvm_vcp
53 if (need_flush)
54 kvm_flush_remote_tlbs(vcpu->kvm);
55 spin_unlock(&vcpu->kvm->mmu_lock);
56-
57- if (pte_gpa == -1)
58- return;
59- if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
60- sizeof(pt_element_t)))
61- return;
62- if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
63- if (mmu_topup_memory_caches(vcpu))
64- return;
65- kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
66- sizeof(pt_element_t), 0);
67- }
68 }
69
70 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)