]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.19.31/arm-arm64-kvm-allow-a-vcpu-to-fully-reset-itself.patch
Linux 4.14.108
[thirdparty/kernel/stable-queue.git] / releases / 4.19.31 / arm-arm64-kvm-allow-a-vcpu-to-fully-reset-itself.patch
1 From 59202e39d1dffd879c920aa4779b15114e9436e8 Mon Sep 17 00:00:00 2001
2 From: Marc Zyngier <marc.zyngier@arm.com>
3 Date: Thu, 20 Dec 2018 11:36:07 +0000
4 Subject: arm/arm64: KVM: Allow a VCPU to fully reset itself
5
6 [ Upstream commit 358b28f09f0ab074d781df72b8a671edb1547789 ]
7
8 The current kvm_psci_vcpu_on implementation will directly try to
9 manipulate the state of the VCPU to reset it. However, since this is
10 not done on the thread that runs the VCPU, we can end up in a strangely
11 corrupted state when the source and target VCPUs are running at the same
12 time.
13
14 Fix this by factoring out all reset logic from the PSCI implementation
15 and forwarding the required information along with a request to the
16 target VCPU.
17
18 Reviewed-by: Andrew Jones <drjones@redhat.com>
19 Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
20 Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
21 Signed-off-by: Sasha Levin <sashal@kernel.org>
22 ---
23 arch/arm/include/asm/kvm_host.h | 10 +++++++++
24 arch/arm/kvm/reset.c | 24 +++++++++++++++++++++
25 arch/arm64/include/asm/kvm_host.h | 11 ++++++++++
26 arch/arm64/kvm/reset.c | 24 +++++++++++++++++++++
27 virt/kvm/arm/arm.c | 10 +++++++++
28 virt/kvm/arm/psci.c | 36 ++++++++++++++-----------------
29 6 files changed, 95 insertions(+), 20 deletions(-)
30
31 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
32 index 3ad482d2f1eb..d0d0227fc70d 100644
33 --- a/arch/arm/include/asm/kvm_host.h
34 +++ b/arch/arm/include/asm/kvm_host.h
35 @@ -48,6 +48,7 @@
36 #define KVM_REQ_SLEEP \
37 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
38 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
39 +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
40
41 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
42
43 @@ -147,6 +148,13 @@ struct kvm_cpu_context {
44
45 typedef struct kvm_cpu_context kvm_cpu_context_t;
46
47 +struct vcpu_reset_state {
48 + unsigned long pc;
49 + unsigned long r0;
50 + bool be;
51 + bool reset;
52 +};
53 +
54 struct kvm_vcpu_arch {
55 struct kvm_cpu_context ctxt;
56
57 @@ -186,6 +194,8 @@ struct kvm_vcpu_arch {
58 /* Cache some mmu pages needed inside spinlock regions */
59 struct kvm_mmu_memory_cache mmu_page_cache;
60
61 + struct vcpu_reset_state reset_state;
62 +
63 /* Detect first run of a vcpu */
64 bool has_run_once;
65 };
66 diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
67 index 5ed0c3ee33d6..e53327912adc 100644
68 --- a/arch/arm/kvm/reset.c
69 +++ b/arch/arm/kvm/reset.c
70 @@ -26,6 +26,7 @@
71 #include <asm/cputype.h>
72 #include <asm/kvm_arm.h>
73 #include <asm/kvm_coproc.h>
74 +#include <asm/kvm_emulate.h>
75
76 #include <kvm/arm_arch_timer.h>
77
78 @@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
79 /* Reset CP15 registers */
80 kvm_reset_coprocs(vcpu);
81
82 + /*
83 + * Additional reset state handling that PSCI may have imposed on us.
84 + * Must be done after all the sys_reg reset.
85 + */
86 + if (READ_ONCE(vcpu->arch.reset_state.reset)) {
87 + unsigned long target_pc = vcpu->arch.reset_state.pc;
88 +
89 + /* Gracefully handle Thumb2 entry point */
90 + if (target_pc & 1) {
91 + target_pc &= ~1UL;
92 + vcpu_set_thumb(vcpu);
93 + }
94 +
95 + /* Propagate caller endianness */
96 + if (vcpu->arch.reset_state.be)
97 + kvm_vcpu_set_be(vcpu);
98 +
99 + *vcpu_pc(vcpu) = target_pc;
100 + vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
101 +
102 + vcpu->arch.reset_state.reset = false;
103 + }
104 +
105 /* Reset arch_timer context */
106 return kvm_timer_vcpu_reset(vcpu);
107 }
108 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
109 index 3d6d7336f871..6abe4002945f 100644
110 --- a/arch/arm64/include/asm/kvm_host.h
111 +++ b/arch/arm64/include/asm/kvm_host.h
112 @@ -48,6 +48,7 @@
113 #define KVM_REQ_SLEEP \
114 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
115 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
116 +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
117
118 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
119
120 @@ -206,6 +207,13 @@ struct kvm_cpu_context {
121
122 typedef struct kvm_cpu_context kvm_cpu_context_t;
123
124 +struct vcpu_reset_state {
125 + unsigned long pc;
126 + unsigned long r0;
127 + bool be;
128 + bool reset;
129 +};
130 +
131 struct kvm_vcpu_arch {
132 struct kvm_cpu_context ctxt;
133
134 @@ -295,6 +303,9 @@ struct kvm_vcpu_arch {
135 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
136 u64 vsesr_el2;
137
138 + /* Additional reset state */
139 + struct vcpu_reset_state reset_state;
140 +
141 /* True when deferrable sysregs are loaded on the physical CPU,
142 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
143 bool sysregs_loaded_on_cpu;
144 diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
145 index 644dd0050766..18b9a522a2b3 100644
146 --- a/arch/arm64/kvm/reset.c
147 +++ b/arch/arm64/kvm/reset.c
148 @@ -31,6 +31,7 @@
149 #include <asm/kvm_arm.h>
150 #include <asm/kvm_asm.h>
151 #include <asm/kvm_coproc.h>
152 +#include <asm/kvm_emulate.h>
153 #include <asm/kvm_mmu.h>
154
155 /*
156 @@ -140,6 +141,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
157 /* Reset system registers */
158 kvm_reset_sys_regs(vcpu);
159
160 + /*
161 + * Additional reset state handling that PSCI may have imposed on us.
162 + * Must be done after all the sys_reg reset.
163 + */
164 + if (vcpu->arch.reset_state.reset) {
165 + unsigned long target_pc = vcpu->arch.reset_state.pc;
166 +
167 + /* Gracefully handle Thumb2 entry point */
168 + if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
169 + target_pc &= ~1UL;
170 + vcpu_set_thumb(vcpu);
171 + }
172 +
173 + /* Propagate caller endianness */
174 + if (vcpu->arch.reset_state.be)
175 + kvm_vcpu_set_be(vcpu);
176 +
177 + *vcpu_pc(vcpu) = target_pc;
178 + vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
179 +
180 + vcpu->arch.reset_state.reset = false;
181 + }
182 +
183 /* Reset PMU */
184 kvm_pmu_vcpu_reset(vcpu);
185
186 diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
187 index 91495045ad5a..1415e36fed3d 100644
188 --- a/virt/kvm/arm/arm.c
189 +++ b/virt/kvm/arm/arm.c
190 @@ -624,6 +624,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
191 /* Awaken to handle a signal, request we sleep again later. */
192 kvm_make_request(KVM_REQ_SLEEP, vcpu);
193 }
194 +
195 + /*
196 + * Make sure we will observe a potential reset request if we've
197 + * observed a change to the power state. Pairs with the smp_wmb() in
198 + * kvm_psci_vcpu_on().
199 + */
200 + smp_rmb();
201 }
202
203 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
204 @@ -637,6 +644,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
205 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
206 vcpu_req_sleep(vcpu);
207
208 + if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
209 + kvm_reset_vcpu(vcpu);
210 +
211 /*
212 * Clear IRQ_PENDING requests that were made to guarantee
213 * that a VCPU sees new virtual interrupts.
214 diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
215 index 9b73d3ad918a..34d08ee63747 100644
216 --- a/virt/kvm/arm/psci.c
217 +++ b/virt/kvm/arm/psci.c
218 @@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
219
220 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
221 {
222 + struct vcpu_reset_state *reset_state;
223 struct kvm *kvm = source_vcpu->kvm;
224 struct kvm_vcpu *vcpu = NULL;
225 - struct swait_queue_head *wq;
226 unsigned long cpu_id;
227 - unsigned long context_id;
228 - phys_addr_t target_pc;
229
230 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
231 if (vcpu_mode_is_32bit(source_vcpu))
232 @@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
233 return PSCI_RET_INVALID_PARAMS;
234 }
235
236 - target_pc = smccc_get_arg2(source_vcpu);
237 - context_id = smccc_get_arg3(source_vcpu);
238 + reset_state = &vcpu->arch.reset_state;
239
240 - kvm_reset_vcpu(vcpu);
241 -
242 - /* Gracefully handle Thumb2 entry point */
243 - if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
244 - target_pc &= ~((phys_addr_t) 1);
245 - vcpu_set_thumb(vcpu);
246 - }
247 + reset_state->pc = smccc_get_arg2(source_vcpu);
248
249 /* Propagate caller endianness */
250 - if (kvm_vcpu_is_be(source_vcpu))
251 - kvm_vcpu_set_be(vcpu);
252 + reset_state->be = kvm_vcpu_is_be(source_vcpu);
253
254 - *vcpu_pc(vcpu) = target_pc;
255 /*
256 * NOTE: We always update r0 (or x0) because for PSCI v0.1
257 * the general puspose registers are undefined upon CPU_ON.
258 */
259 - smccc_set_retval(vcpu, context_id, 0, 0, 0);
260 - vcpu->arch.power_off = false;
261 - smp_mb(); /* Make sure the above is visible */
262 + reset_state->r0 = smccc_get_arg3(source_vcpu);
263 +
264 + WRITE_ONCE(reset_state->reset, true);
265 + kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
266
267 - wq = kvm_arch_vcpu_wq(vcpu);
268 - swake_up_one(wq);
269 + /*
270 + * Make sure the reset request is observed if the change to
271 + * power_state is observed.
272 + */
273 + smp_wmb();
274 +
275 + vcpu->arch.power_off = false;
276 + kvm_vcpu_wake_up(vcpu);
277
278 return PSCI_RET_SUCCESS;
279 }
280 --
281 2.19.1
282