]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.19.31/kvm-call-kvm_arch_memslots_updated-before-updating-memslots.patch
Linux 4.19.31
[thirdparty/kernel/stable-queue.git] / releases / 4.19.31 / kvm-call-kvm_arch_memslots_updated-before-updating-memslots.patch
CommitLineData
7ffa1129
GKH
1From 152482580a1b0accb60676063a1ac57b2d12daf6 Mon Sep 17 00:00:00 2001
2From: Sean Christopherson <sean.j.christopherson@intel.com>
3Date: Tue, 5 Feb 2019 12:54:17 -0800
4Subject: KVM: Call kvm_arch_memslots_updated() before updating memslots
5
6From: Sean Christopherson <sean.j.christopherson@intel.com>
7
8commit 152482580a1b0accb60676063a1ac57b2d12daf6 upstream.
9
10kvm_arch_memslots_updated() is at this point in time an x86-specific
11hook for handling MMIO generation wraparound. x86 stashes 19 bits of
12the memslots generation number in its MMIO sptes in order to avoid
13full page fault walks for repeat faults on emulated MMIO addresses.
14Because only 19 bits are used, wrapping the MMIO generation number is
15possible, if unlikely. kvm_arch_memslots_updated() alerts x86 that
16the generation has changed so that it can invalidate all MMIO sptes in
17case the effective MMIO generation has wrapped so as to avoid using a
18stale spte, e.g. a (very) old spte that was created with generation==0.
19
20Given that the purpose of kvm_arch_memslots_updated() is to prevent
21consuming stale entries, it needs to be called before the new generation
22is propagated to memslots. Invalidating the MMIO sptes after updating
23memslots means that there is a window where a vCPU could dereference
24the new memslots generation, e.g. 0, and incorrectly reuse an old MMIO
25spte that was created with (pre-wrap) generation==0.
26
27Fixes: e59dbe09f8e6 ("KVM: Introduce kvm_arch_memslots_updated()")
28Cc: <stable@vger.kernel.org>
29Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
30Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
31Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
32
33---
34 arch/mips/include/asm/kvm_host.h | 2 +-
35 arch/powerpc/include/asm/kvm_host.h | 2 +-
36 arch/s390/include/asm/kvm_host.h | 2 +-
37 arch/x86/include/asm/kvm_host.h | 2 +-
38 arch/x86/kvm/mmu.c | 4 ++--
39 arch/x86/kvm/x86.c | 4 ++--
40 include/linux/kvm_host.h | 2 +-
41 virt/kvm/arm/mmu.c | 2 +-
42 virt/kvm/kvm_main.c | 7 +++++--
43 9 files changed, 15 insertions(+), 12 deletions(-)
44
45--- a/arch/mips/include/asm/kvm_host.h
46+++ b/arch/mips/include/asm/kvm_host.h
47@@ -1131,7 +1131,7 @@ static inline void kvm_arch_hardware_uns
48 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
49 static inline void kvm_arch_free_memslot(struct kvm *kvm,
50 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
51-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
52+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
53 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
54 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
55 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
56--- a/arch/powerpc/include/asm/kvm_host.h
57+++ b/arch/powerpc/include/asm/kvm_host.h
58@@ -822,7 +822,7 @@ struct kvm_vcpu_arch {
59 static inline void kvm_arch_hardware_disable(void) {}
60 static inline void kvm_arch_hardware_unsetup(void) {}
61 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
62-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
63+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
64 static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
65 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
66 static inline void kvm_arch_exit(void) {}
67--- a/arch/s390/include/asm/kvm_host.h
68+++ b/arch/s390/include/asm/kvm_host.h
69@@ -865,7 +865,7 @@ static inline void kvm_arch_vcpu_uninit(
70 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
71 static inline void kvm_arch_free_memslot(struct kvm *kvm,
72 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
73-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
74+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
75 static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
76 static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
77 struct kvm_memory_slot *slot) {}
78--- a/arch/x86/include/asm/kvm_host.h
79+++ b/arch/x86/include/asm/kvm_host.h
80@@ -1194,7 +1194,7 @@ void kvm_mmu_clear_dirty_pt_masked(struc
81 struct kvm_memory_slot *slot,
82 gfn_t gfn_offset, unsigned long mask);
83 void kvm_mmu_zap_all(struct kvm *kvm);
84-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
85+void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
86 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
87 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
88
89--- a/arch/x86/kvm/mmu.c
90+++ b/arch/x86/kvm/mmu.c
91@@ -5774,13 +5774,13 @@ static bool kvm_has_zapped_obsolete_page
92 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
93 }
94
95-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
96+void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
97 {
98 /*
99 * The very rare case: if the generation-number is round,
100 * zap all shadow pages.
101 */
102- if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
103+ if (unlikely((gen & MMIO_GEN_MASK) == 0)) {
104 kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
105 kvm_mmu_invalidate_zap_all_pages(kvm);
106 }
107--- a/arch/x86/kvm/x86.c
108+++ b/arch/x86/kvm/x86.c
109@@ -9108,13 +9108,13 @@ out_free:
110 return -ENOMEM;
111 }
112
113-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
114+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
115 {
116 /*
117 * memslots->generation has been incremented.
118 * mmio generation may have reached its maximum value.
119 */
120- kvm_mmu_invalidate_mmio_sptes(kvm, slots);
121+ kvm_mmu_invalidate_mmio_sptes(kvm, gen);
122 }
123
124 int kvm_arch_prepare_memory_region(struct kvm *kvm,
125--- a/include/linux/kvm_host.h
126+++ b/include/linux/kvm_host.h
127@@ -633,7 +633,7 @@ void kvm_arch_free_memslot(struct kvm *k
128 struct kvm_memory_slot *dont);
129 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
130 unsigned long npages);
131-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
132+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
133 int kvm_arch_prepare_memory_region(struct kvm *kvm,
134 struct kvm_memory_slot *memslot,
135 const struct kvm_userspace_memory_region *mem,
136--- a/virt/kvm/arm/mmu.c
137+++ b/virt/kvm/arm/mmu.c
138@@ -2154,7 +2154,7 @@ int kvm_arch_create_memslot(struct kvm *
139 return 0;
140 }
141
142-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
143+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
144 {
145 }
146
147--- a/virt/kvm/kvm_main.c
148+++ b/virt/kvm/kvm_main.c
149@@ -873,6 +873,7 @@ static struct kvm_memslots *install_new_
150 int as_id, struct kvm_memslots *slots)
151 {
152 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
153+ u64 gen;
154
155 /*
156 * Set the low bit in the generation, which disables SPTE caching
157@@ -895,9 +896,11 @@ static struct kvm_memslots *install_new_
158 * space 0 will use generations 0, 4, 8, ... while * address space 1 will
159 * use generations 2, 6, 10, 14, ...
160 */
161- slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
162+ gen = slots->generation + KVM_ADDRESS_SPACE_NUM * 2 - 1;
163
164- kvm_arch_memslots_updated(kvm, slots);
165+ kvm_arch_memslots_updated(kvm, gen);
166+
167+ slots->generation = gen;
168
169 return old_memslots;
170 }