]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-5.1/kvm-ppc-book3s-hv-use-new-mutex-to-synchronize-mmu-s.patch
fix up the 5.1 queue :(
[thirdparty/kernel/stable-queue.git] / queue-5.1 / kvm-ppc-book3s-hv-use-new-mutex-to-synchronize-mmu-s.patch
1 From aa3eb8a6138076f54a58d4a79720da5c6b93be6d Mon Sep 17 00:00:00 2001
2 From: Paul Mackerras <paulus@ozlabs.org>
3 Date: Thu, 23 May 2019 16:35:34 +1000
4 Subject: KVM: PPC: Book3S HV: Use new mutex to synchronize MMU setup
5
6 [ Upstream commit 0d4ee88d92884c661fcafd5576da243aa943dc24 ]
7
8 Currently the HV KVM code uses kvm->lock in conjunction with a flag,
9 kvm->arch.mmu_ready, to synchronize MMU setup and hold off vcpu
10 execution until the MMU-related data structures are ready. However,
11 this means that kvm->lock is being taken inside vcpu->mutex, which
12 is contrary to Documentation/virtual/kvm/locking.txt and results in
13 lockdep warnings.
14
15 To fix this, we add a new mutex, kvm->arch.mmu_setup_lock, which nests
16 inside the vcpu mutexes, and is taken in the places where kvm->lock
17 was taken that are related to MMU setup.
18
19 Additionally we take the new mutex in the vcpu creation code at the
20 point where we are creating a new vcore, in order to provide mutual
21 exclusion with kvmppc_update_lpcr() and ensure that an update to
22 kvm->arch.lpcr doesn't get missed, which could otherwise lead to a
23 stale vcore->lpcr value.
24
25 Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
26 Signed-off-by: Sasha Levin <sashal@kernel.org>
27 ---
28 arch/powerpc/include/asm/kvm_host.h | 1 +
29 arch/powerpc/kvm/book3s_64_mmu_hv.c | 36 ++++++++++++++---------------
30 arch/powerpc/kvm/book3s_hv.c | 31 ++++++++++++++++++-------
31 3 files changed, 42 insertions(+), 26 deletions(-)
32
33 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
34 index e6b5bb012ccb..8d3658275a34 100644
35 --- a/arch/powerpc/include/asm/kvm_host.h
36 +++ b/arch/powerpc/include/asm/kvm_host.h
37 @@ -317,6 +317,7 @@ struct kvm_arch {
38 #endif
39 struct kvmppc_ops *kvm_ops;
40 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
41 + struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
42 u64 l1_ptcr;
43 int max_nested_lpid;
44 struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
45 diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
46 index be7bc070eae5..c1ced22455f9 100644
47 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
48 +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
49 @@ -63,7 +63,7 @@ struct kvm_resize_hpt {
50 struct work_struct work;
51 u32 order;
52
53 - /* These fields protected by kvm->lock */
54 + /* These fields protected by kvm->arch.mmu_setup_lock */
55
56 /* Possible values and their usage:
57 * <0 an error occurred during allocation,
58 @@ -73,7 +73,7 @@ struct kvm_resize_hpt {
59 int error;
60
61 /* Private to the work thread, until error != -EBUSY,
62 - * then protected by kvm->lock.
63 + * then protected by kvm->arch.mmu_setup_lock.
64 */
65 struct kvm_hpt_info hpt;
66 };
67 @@ -139,7 +139,7 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
68 long err = -EBUSY;
69 struct kvm_hpt_info info;
70
71 - mutex_lock(&kvm->lock);
72 + mutex_lock(&kvm->arch.mmu_setup_lock);
73 if (kvm->arch.mmu_ready) {
74 kvm->arch.mmu_ready = 0;
75 /* order mmu_ready vs. vcpus_running */
76 @@ -183,7 +183,7 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
77 /* Ensure that each vcpu will flush its TLB on next entry. */
78 cpumask_setall(&kvm->arch.need_tlb_flush);
79
80 - mutex_unlock(&kvm->lock);
81 + mutex_unlock(&kvm->arch.mmu_setup_lock);
82 return err;
83 }
84
85 @@ -1447,7 +1447,7 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
86
87 static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
88 {
89 - if (WARN_ON(!mutex_is_locked(&kvm->lock)))
90 + if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock)))
91 return;
92
93 if (!resize)
94 @@ -1474,14 +1474,14 @@ static void resize_hpt_prepare_work(struct work_struct *work)
95 if (WARN_ON(resize->error != -EBUSY))
96 return;
97
98 - mutex_lock(&kvm->lock);
99 + mutex_lock(&kvm->arch.mmu_setup_lock);
100
101 /* Request is still current? */
102 if (kvm->arch.resize_hpt == resize) {
103 /* We may request large allocations here:
104 - * do not sleep with kvm->lock held for a while.
105 + * do not sleep with kvm->arch.mmu_setup_lock held for a while.
106 */
107 - mutex_unlock(&kvm->lock);
108 + mutex_unlock(&kvm->arch.mmu_setup_lock);
109
110 resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
111 resize->order);
112 @@ -1494,9 +1494,9 @@ static void resize_hpt_prepare_work(struct work_struct *work)
113 if (WARN_ON(err == -EBUSY))
114 err = -EINPROGRESS;
115
116 - mutex_lock(&kvm->lock);
117 + mutex_lock(&kvm->arch.mmu_setup_lock);
118 /* It is possible that kvm->arch.resize_hpt != resize
119 - * after we grab kvm->lock again.
120 + * after we grab kvm->arch.mmu_setup_lock again.
121 */
122 }
123
124 @@ -1505,7 +1505,7 @@ static void resize_hpt_prepare_work(struct work_struct *work)
125 if (kvm->arch.resize_hpt != resize)
126 resize_hpt_release(kvm, resize);
127
128 - mutex_unlock(&kvm->lock);
129 + mutex_unlock(&kvm->arch.mmu_setup_lock);
130 }
131
132 long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
133 @@ -1522,7 +1522,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
134 if (shift && ((shift < 18) || (shift > 46)))
135 return -EINVAL;
136
137 - mutex_lock(&kvm->lock);
138 + mutex_lock(&kvm->arch.mmu_setup_lock);
139
140 resize = kvm->arch.resize_hpt;
141
142 @@ -1565,7 +1565,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
143 ret = 100; /* estimated time in ms */
144
145 out:
146 - mutex_unlock(&kvm->lock);
147 + mutex_unlock(&kvm->arch.mmu_setup_lock);
148 return ret;
149 }
150
151 @@ -1588,7 +1588,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
152 if (shift && ((shift < 18) || (shift > 46)))
153 return -EINVAL;
154
155 - mutex_lock(&kvm->lock);
156 + mutex_lock(&kvm->arch.mmu_setup_lock);
157
158 resize = kvm->arch.resize_hpt;
159
160 @@ -1625,7 +1625,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
161 smp_mb();
162 out_no_hpt:
163 resize_hpt_release(kvm, resize);
164 - mutex_unlock(&kvm->lock);
165 + mutex_unlock(&kvm->arch.mmu_setup_lock);
166 return ret;
167 }
168
169 @@ -1868,7 +1868,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
170 return -EINVAL;
171
172 /* lock out vcpus from running while we're doing this */
173 - mutex_lock(&kvm->lock);
174 + mutex_lock(&kvm->arch.mmu_setup_lock);
175 mmu_ready = kvm->arch.mmu_ready;
176 if (mmu_ready) {
177 kvm->arch.mmu_ready = 0; /* temporarily */
178 @@ -1876,7 +1876,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
179 smp_mb();
180 if (atomic_read(&kvm->arch.vcpus_running)) {
181 kvm->arch.mmu_ready = 1;
182 - mutex_unlock(&kvm->lock);
183 + mutex_unlock(&kvm->arch.mmu_setup_lock);
184 return -EBUSY;
185 }
186 }
187 @@ -1963,7 +1963,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
188 /* Order HPTE updates vs. mmu_ready */
189 smp_wmb();
190 kvm->arch.mmu_ready = mmu_ready;
191 - mutex_unlock(&kvm->lock);
192 + mutex_unlock(&kvm->arch.mmu_setup_lock);
193
194 if (err)
195 return err;
196 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
197 index bd68b3e59de5..9f49087c3a41 100644
198 --- a/arch/powerpc/kvm/book3s_hv.c
199 +++ b/arch/powerpc/kvm/book3s_hv.c
200 @@ -2257,11 +2257,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
201 pr_devel("KVM: collision on id %u", id);
202 vcore = NULL;
203 } else if (!vcore) {
204 + /*
205 + * Take mmu_setup_lock for mutual exclusion
206 + * with kvmppc_update_lpcr().
207 + */
208 err = -ENOMEM;
209 vcore = kvmppc_vcore_create(kvm,
210 id & ~(kvm->arch.smt_mode - 1));
211 + mutex_lock(&kvm->arch.mmu_setup_lock);
212 kvm->arch.vcores[core] = vcore;
213 kvm->arch.online_vcores++;
214 + mutex_unlock(&kvm->arch.mmu_setup_lock);
215 }
216 }
217 mutex_unlock(&kvm->lock);
218 @@ -3821,7 +3827,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
219 int r = 0;
220 struct kvm *kvm = vcpu->kvm;
221
222 - mutex_lock(&kvm->lock);
223 + mutex_lock(&kvm->arch.mmu_setup_lock);
224 if (!kvm->arch.mmu_ready) {
225 if (!kvm_is_radix(kvm))
226 r = kvmppc_hv_setup_htab_rma(vcpu);
227 @@ -3831,7 +3837,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
228 kvm->arch.mmu_ready = 1;
229 }
230 }
231 - mutex_unlock(&kvm->lock);
232 + mutex_unlock(&kvm->arch.mmu_setup_lock);
233 return r;
234 }
235
236 @@ -4439,7 +4445,8 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
237
238 /*
239 * Update LPCR values in kvm->arch and in vcores.
240 - * Caller must hold kvm->lock.
241 + * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
242 + * of kvm->arch.lpcr update).
243 */
244 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
245 {
246 @@ -4491,7 +4498,7 @@ void kvmppc_setup_partition_table(struct kvm *kvm)
247
248 /*
249 * Set up HPT (hashed page table) and RMA (real-mode area).
250 - * Must be called with kvm->lock held.
251 + * Must be called with kvm->arch.mmu_setup_lock held.
252 */
253 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
254 {
255 @@ -4579,7 +4586,10 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
256 goto out_srcu;
257 }
258
259 -/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
260 +/*
261 + * Must be called with kvm->arch.mmu_setup_lock held and
262 + * mmu_ready = 0 and no vcpus running.
263 + */
264 int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
265 {
266 if (nesting_enabled(kvm))
267 @@ -4596,7 +4606,10 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
268 return 0;
269 }
270
271 -/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
272 +/*
273 + * Must be called with kvm->arch.mmu_setup_lock held and
274 + * mmu_ready = 0 and no vcpus running.
275 + */
276 int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
277 {
278 int err;
279 @@ -4701,6 +4714,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
280 char buf[32];
281 int ret;
282
283 + mutex_init(&kvm->arch.mmu_setup_lock);
284 +
285 /* Allocate the guest's logical partition ID */
286
287 lpid = kvmppc_alloc_lpid();
288 @@ -5226,7 +5241,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
289 if (kvmhv_on_pseries() && !radix)
290 return -EINVAL;
291
292 - mutex_lock(&kvm->lock);
293 + mutex_lock(&kvm->arch.mmu_setup_lock);
294 if (radix != kvm_is_radix(kvm)) {
295 if (kvm->arch.mmu_ready) {
296 kvm->arch.mmu_ready = 0;
297 @@ -5254,7 +5269,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
298 err = 0;
299
300 out_unlock:
301 - mutex_unlock(&kvm->lock);
302 + mutex_unlock(&kvm->arch.mmu_setup_lock);
303 return err;
304 }
305
306 --
307 2.20.1
308