]>
Commit | Line | Data |
---|---|---|
d94d71cb | 1 | // SPDX-License-Identifier: GPL-2.0-only |
bbf45ba5 | 2 | /* |
bbf45ba5 HB |
3 | * |
4 | * Copyright IBM Corp. 2007 | |
5 | * | |
6 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
7 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | |
8 | */ | |
9 | ||
10 | #include <linux/errno.h> | |
11 | #include <linux/err.h> | |
12 | #include <linux/kvm_host.h> | |
bbf45ba5 | 13 | #include <linux/vmalloc.h> |
544c6761 | 14 | #include <linux/hrtimer.h> |
174cd4b1 | 15 | #include <linux/sched/signal.h> |
bbf45ba5 | 16 | #include <linux/fs.h> |
5a0e3ad6 | 17 | #include <linux/slab.h> |
eb1e4f43 | 18 | #include <linux/file.h> |
cbbc58d4 | 19 | #include <linux/module.h> |
9576730d SW |
20 | #include <linux/irqbypass.h> |
21 | #include <linux/kvm_irqfd.h> | |
bbf45ba5 | 22 | #include <asm/cputable.h> |
7c0f6ba6 | 23 | #include <linux/uaccess.h> |
bbf45ba5 | 24 | #include <asm/kvm_ppc.h> |
371fefd6 | 25 | #include <asm/cputhreads.h> |
bd2be683 | 26 | #include <asm/irqflags.h> |
58ded420 | 27 | #include <asm/iommu.h> |
6f63e81b | 28 | #include <asm/switch_to.h> |
5af50993 | 29 | #include <asm/xive.h> |
3214d01f PM |
30 | #ifdef CONFIG_PPC_PSERIES |
31 | #include <asm/hvcall.h> | |
32 | #include <asm/plpar_wrappers.h> | |
33 | #endif | |
22945688 | 34 | #include <asm/ultravisor.h> |
5af50993 | 35 | |
73e75b41 | 36 | #include "timing.h" |
5efdb4be | 37 | #include "irq.h" |
fad7b9b5 | 38 | #include "../mm/mmu_decl.h" |
bbf45ba5 | 39 | |
46f43c6e MT |
40 | #define CREATE_TRACE_POINTS |
41 | #include "trace.h" | |
42 | ||
cbbc58d4 AK |
43 | struct kvmppc_ops *kvmppc_hv_ops; |
44 | EXPORT_SYMBOL_GPL(kvmppc_hv_ops); | |
45 | struct kvmppc_ops *kvmppc_pr_ops; | |
46 | EXPORT_SYMBOL_GPL(kvmppc_pr_ops); | |
47 | ||
3a167bea | 48 | |
bbf45ba5 HB |
49 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
50 | { | |
2fa6e1e1 | 51 | return !!(v->arch.pending_exceptions) || kvm_request_pending(v); |
bbf45ba5 HB |
52 | } |
53 | ||
17e433b5 WL |
54 | bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) |
55 | { | |
56 | return kvm_arch_vcpu_runnable(vcpu); | |
57 | } | |
58 | ||
199b5763 LM |
59 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
60 | { | |
61 | return false; | |
62 | } | |
63 | ||
b6d33834 CD |
64 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
65 | { | |
66 | return 1; | |
67 | } | |
68 | ||
03d25c5b AG |
69 | /* |
70 | * Common checks before entering the guest world. Call with interrupts | |
71 | * disabled. | |
72 | * | |
7ee78855 AG |
73 | * returns: |
74 | * | |
75 | * == 1 if we're ready to go into guest state | |
76 | * <= 0 if we need to go back to the host with return value | |
03d25c5b AG |
77 | */ |
78 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | |
79 | { | |
6c85f52b SW |
80 | int r; |
81 | ||
82 | WARN_ON(irqs_disabled()); | |
83 | hard_irq_disable(); | |
03d25c5b | 84 | |
03d25c5b AG |
85 | while (true) { |
86 | if (need_resched()) { | |
87 | local_irq_enable(); | |
88 | cond_resched(); | |
6c85f52b | 89 | hard_irq_disable(); |
03d25c5b AG |
90 | continue; |
91 | } | |
92 | ||
93 | if (signal_pending(current)) { | |
7ee78855 AG |
94 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); |
95 | vcpu->run->exit_reason = KVM_EXIT_INTR; | |
96 | r = -EINTR; | |
03d25c5b AG |
97 | break; |
98 | } | |
99 | ||
5bd1cf11 SW |
100 | vcpu->mode = IN_GUEST_MODE; |
101 | ||
102 | /* | |
103 | * Reading vcpu->requests must happen after setting vcpu->mode, | |
104 | * so we don't miss a request because the requester sees | |
105 | * OUTSIDE_GUEST_MODE and assumes we'll be checking requests | |
106 | * before next entering the guest (and thus doesn't IPI). | |
489153c7 LT |
107 | * This also orders the write to mode from any reads |
108 | * to the page tables done while the VCPU is running. | |
109 | * Please see the comment in kvm_flush_remote_tlbs. | |
5bd1cf11 | 110 | */ |
03d25c5b | 111 | smp_mb(); |
5bd1cf11 | 112 | |
2fa6e1e1 | 113 | if (kvm_request_pending(vcpu)) { |
03d25c5b AG |
114 | /* Make sure we process requests preemptable */ |
115 | local_irq_enable(); | |
116 | trace_kvm_check_requests(vcpu); | |
7c973a2e | 117 | r = kvmppc_core_check_requests(vcpu); |
6c85f52b | 118 | hard_irq_disable(); |
7c973a2e AG |
119 | if (r > 0) |
120 | continue; | |
121 | break; | |
03d25c5b AG |
122 | } |
123 | ||
124 | if (kvmppc_core_prepare_to_enter(vcpu)) { | |
125 | /* interrupts got enabled in between, so we | |
126 | are back at square 1 */ | |
127 | continue; | |
128 | } | |
129 | ||
6edaa530 | 130 | guest_enter_irqoff(); |
6c85f52b | 131 | return 1; |
03d25c5b AG |
132 | } |
133 | ||
6c85f52b SW |
134 | /* return to host */ |
135 | local_irq_enable(); | |
03d25c5b AG |
136 | return r; |
137 | } | |
2ba9f0d8 | 138 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); |
03d25c5b | 139 | |
5deb8e7a AG |
140 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
141 | static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) | |
142 | { | |
143 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; | |
144 | int i; | |
145 | ||
146 | shared->sprg0 = swab64(shared->sprg0); | |
147 | shared->sprg1 = swab64(shared->sprg1); | |
148 | shared->sprg2 = swab64(shared->sprg2); | |
149 | shared->sprg3 = swab64(shared->sprg3); | |
150 | shared->srr0 = swab64(shared->srr0); | |
151 | shared->srr1 = swab64(shared->srr1); | |
152 | shared->dar = swab64(shared->dar); | |
153 | shared->msr = swab64(shared->msr); | |
154 | shared->dsisr = swab32(shared->dsisr); | |
155 | shared->int_pending = swab32(shared->int_pending); | |
156 | for (i = 0; i < ARRAY_SIZE(shared->sr); i++) | |
157 | shared->sr[i] = swab32(shared->sr[i]); | |
158 | } | |
159 | #endif | |
160 | ||
2a342ed5 AG |
161 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
162 | { | |
163 | int nr = kvmppc_get_gpr(vcpu, 11); | |
164 | int r; | |
165 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); | |
166 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); | |
167 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); | |
168 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); | |
169 | unsigned long r2 = 0; | |
170 | ||
5deb8e7a | 171 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { |
2a342ed5 AG |
172 | /* 32 bit mode */ |
173 | param1 &= 0xffffffff; | |
174 | param2 &= 0xffffffff; | |
175 | param3 &= 0xffffffff; | |
176 | param4 &= 0xffffffff; | |
177 | } | |
178 | ||
179 | switch (nr) { | |
fdcf8bd7 | 180 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): |
5fc87407 | 181 | { |
5deb8e7a AG |
182 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
183 | /* Book3S can be little endian, find it out here */ | |
184 | int shared_big_endian = true; | |
185 | if (vcpu->arch.intr_msr & MSR_LE) | |
186 | shared_big_endian = false; | |
187 | if (shared_big_endian != vcpu->arch.shared_big_endian) | |
188 | kvmppc_swab_shared(vcpu); | |
189 | vcpu->arch.shared_big_endian = shared_big_endian; | |
190 | #endif | |
191 | ||
f3383cf8 AG |
192 | if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { |
193 | /* | |
194 | * Older versions of the Linux magic page code had | |
195 | * a bug where they would map their trampoline code | |
196 | * NX. If that's the case, remove !PR NX capability. | |
197 | */ | |
198 | vcpu->arch.disable_kernel_nx = true; | |
199 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | |
200 | } | |
201 | ||
202 | vcpu->arch.magic_page_pa = param1 & ~0xfffULL; | |
203 | vcpu->arch.magic_page_ea = param2 & ~0xfffULL; | |
5fc87407 | 204 | |
89b68c96 AG |
205 | #ifdef CONFIG_PPC_64K_PAGES |
206 | /* | |
207 | * Make sure our 4k magic page is in the same window of a 64k | |
208 | * page within the guest and within the host's page. | |
209 | */ | |
210 | if ((vcpu->arch.magic_page_pa & 0xf000) != | |
211 | ((ulong)vcpu->arch.shared & 0xf000)) { | |
212 | void *old_shared = vcpu->arch.shared; | |
213 | ulong shared = (ulong)vcpu->arch.shared; | |
214 | void *new_shared; | |
215 | ||
216 | shared &= PAGE_MASK; | |
217 | shared |= vcpu->arch.magic_page_pa & 0xf000; | |
218 | new_shared = (void*)shared; | |
219 | memcpy(new_shared, old_shared, 0x1000); | |
220 | vcpu->arch.shared = new_shared; | |
221 | } | |
222 | #endif | |
223 | ||
b5904972 | 224 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
7508e16c | 225 | |
fdcf8bd7 | 226 | r = EV_SUCCESS; |
5fc87407 AG |
227 | break; |
228 | } | |
fdcf8bd7 SY |
229 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): |
230 | r = EV_SUCCESS; | |
bf7ca4bd | 231 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
5fc87407 AG |
232 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
233 | #endif | |
2a342ed5 AG |
234 | |
235 | /* Second return value is in r4 */ | |
2a342ed5 | 236 | break; |
9202e076 LYB |
237 | case EV_HCALL_TOKEN(EV_IDLE): |
238 | r = EV_SUCCESS; | |
239 | kvm_vcpu_block(vcpu); | |
72875d8a | 240 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); |
9202e076 | 241 | break; |
2a342ed5 | 242 | default: |
fdcf8bd7 | 243 | r = EV_UNIMPLEMENTED; |
2a342ed5 AG |
244 | break; |
245 | } | |
246 | ||
7508e16c AG |
247 | kvmppc_set_gpr(vcpu, 4, r2); |
248 | ||
2a342ed5 AG |
249 | return r; |
250 | } | |
2ba9f0d8 | 251 | EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); |
bbf45ba5 | 252 | |
af8f38b3 AG |
253 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) |
254 | { | |
255 | int r = false; | |
256 | ||
257 | /* We have to know what CPU to virtualize */ | |
258 | if (!vcpu->arch.pvr) | |
259 | goto out; | |
260 | ||
261 | /* PAPR only works with book3s_64 */ | |
262 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) | |
263 | goto out; | |
264 | ||
af8f38b3 | 265 | /* HV KVM can only do PAPR mode for now */ |
a78b55d1 | 266 | if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) |
af8f38b3 | 267 | goto out; |
af8f38b3 | 268 | |
d30f6e48 SW |
269 | #ifdef CONFIG_KVM_BOOKE_HV |
270 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) | |
271 | goto out; | |
272 | #endif | |
273 | ||
af8f38b3 AG |
274 | r = true; |
275 | ||
276 | out: | |
277 | vcpu->arch.sane = r; | |
278 | return r ? 0 : -EINVAL; | |
279 | } | |
2ba9f0d8 | 280 | EXPORT_SYMBOL_GPL(kvmppc_sanity_check); |
af8f38b3 | 281 | |
bbf45ba5 HB |
282 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
283 | { | |
284 | enum emulation_result er; | |
285 | int r; | |
286 | ||
d69614a2 | 287 | er = kvmppc_emulate_loadstore(vcpu); |
bbf45ba5 HB |
288 | switch (er) { |
289 | case EMULATE_DONE: | |
290 | /* Future optimization: only reload non-volatiles if they were | |
291 | * actually modified. */ | |
292 | r = RESUME_GUEST_NV; | |
293 | break; | |
51f04726 MC |
294 | case EMULATE_AGAIN: |
295 | r = RESUME_GUEST; | |
296 | break; | |
bbf45ba5 HB |
297 | case EMULATE_DO_MMIO: |
298 | run->exit_reason = KVM_EXIT_MMIO; | |
299 | /* We must reload nonvolatiles because "update" load/store | |
300 | * instructions modify register state. */ | |
301 | /* Future optimization: only reload non-volatiles if they were | |
302 | * actually modified. */ | |
303 | r = RESUME_HOST_NV; | |
304 | break; | |
305 | case EMULATE_FAIL: | |
51f04726 MC |
306 | { |
307 | u32 last_inst; | |
308 | ||
8d0eff63 | 309 | kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
bbf45ba5 | 310 | /* XXX Deliver Program interrupt to guest. */ |
51f04726 | 311 | pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); |
bbf45ba5 HB |
312 | r = RESUME_HOST; |
313 | break; | |
51f04726 | 314 | } |
bbf45ba5 | 315 | default: |
5a33169e AG |
316 | WARN_ON(1); |
317 | r = RESUME_GUEST; | |
bbf45ba5 HB |
318 | } |
319 | ||
320 | return r; | |
321 | } | |
2ba9f0d8 | 322 | EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); |
bbf45ba5 | 323 | |
35c4a733 AG |
324 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
325 | bool data) | |
326 | { | |
c12fb43c | 327 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
35c4a733 | 328 | struct kvmppc_pte pte; |
cc6929cc | 329 | int r = -EINVAL; |
35c4a733 AG |
330 | |
331 | vcpu->stat.st++; | |
332 | ||
cc6929cc SJS |
333 | if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) |
334 | r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, | |
335 | size); | |
336 | ||
337 | if ((!r) || (r == -EAGAIN)) | |
338 | return r; | |
339 | ||
35c4a733 AG |
340 | r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, |
341 | XLATE_WRITE, &pte); | |
342 | if (r < 0) | |
343 | return r; | |
344 | ||
345 | *eaddr = pte.raddr; | |
346 | ||
347 | if (!pte.may_write) | |
348 | return -EPERM; | |
349 | ||
c12fb43c AG |
350 | /* Magic page override */ |
351 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && | |
352 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && | |
353 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | |
354 | void *magic = vcpu->arch.shared; | |
355 | magic += pte.eaddr & 0xfff; | |
356 | memcpy(magic, ptr, size); | |
357 | return EMULATE_DONE; | |
358 | } | |
359 | ||
35c4a733 AG |
360 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) |
361 | return EMULATE_DO_MMIO; | |
362 | ||
363 | return EMULATE_DONE; | |
364 | } | |
365 | EXPORT_SYMBOL_GPL(kvmppc_st); | |
366 | ||
367 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |
368 | bool data) | |
369 | { | |
c12fb43c | 370 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
35c4a733 | 371 | struct kvmppc_pte pte; |
cc6929cc | 372 | int rc = -EINVAL; |
35c4a733 AG |
373 | |
374 | vcpu->stat.ld++; | |
375 | ||
cc6929cc SJS |
376 | if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) |
377 | rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, | |
378 | size); | |
379 | ||
380 | if ((!rc) || (rc == -EAGAIN)) | |
381 | return rc; | |
382 | ||
35c4a733 AG |
383 | rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, |
384 | XLATE_READ, &pte); | |
385 | if (rc) | |
386 | return rc; | |
387 | ||
388 | *eaddr = pte.raddr; | |
389 | ||
390 | if (!pte.may_read) | |
391 | return -EPERM; | |
392 | ||
393 | if (!data && !pte.may_execute) | |
394 | return -ENOEXEC; | |
395 | ||
c12fb43c AG |
396 | /* Magic page override */ |
397 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && | |
398 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && | |
399 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | |
400 | void *magic = vcpu->arch.shared; | |
401 | magic += pte.eaddr & 0xfff; | |
402 | memcpy(ptr, magic, size); | |
403 | return EMULATE_DONE; | |
404 | } | |
405 | ||
c45c5514 AG |
406 | if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) |
407 | return EMULATE_DO_MMIO; | |
35c4a733 AG |
408 | |
409 | return EMULATE_DONE; | |
35c4a733 AG |
410 | } |
411 | EXPORT_SYMBOL_GPL(kvmppc_ld); | |
412 | ||
13a34e06 | 413 | int kvm_arch_hardware_enable(void) |
bbf45ba5 | 414 | { |
10474ae8 | 415 | return 0; |
bbf45ba5 HB |
416 | } |
417 | ||
b9904085 | 418 | int kvm_arch_hardware_setup(void *opaque) |
bbf45ba5 HB |
419 | { |
420 | return 0; | |
421 | } | |
422 | ||
b9904085 | 423 | int kvm_arch_check_processor_compat(void *opaque) |
bbf45ba5 | 424 | { |
f257d6dc | 425 | return kvmppc_core_check_processor_compat(); |
bbf45ba5 HB |
426 | } |
427 | ||
e08b9637 | 428 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
bbf45ba5 | 429 | { |
cbbc58d4 AK |
430 | struct kvmppc_ops *kvm_ops = NULL; |
431 | /* | |
432 | * if we have both HV and PR enabled, default is HV | |
433 | */ | |
434 | if (type == 0) { | |
435 | if (kvmppc_hv_ops) | |
436 | kvm_ops = kvmppc_hv_ops; | |
437 | else | |
438 | kvm_ops = kvmppc_pr_ops; | |
439 | if (!kvm_ops) | |
440 | goto err_out; | |
441 | } else if (type == KVM_VM_PPC_HV) { | |
442 | if (!kvmppc_hv_ops) | |
443 | goto err_out; | |
444 | kvm_ops = kvmppc_hv_ops; | |
445 | } else if (type == KVM_VM_PPC_PR) { | |
446 | if (!kvmppc_pr_ops) | |
447 | goto err_out; | |
448 | kvm_ops = kvmppc_pr_ops; | |
449 | } else | |
450 | goto err_out; | |
451 | ||
452 | if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) | |
453 | return -ENOENT; | |
454 | ||
455 | kvm->arch.kvm_ops = kvm_ops; | |
f9e0554d | 456 | return kvmppc_core_init_vm(kvm); |
cbbc58d4 AK |
457 | err_out: |
458 | return -EINVAL; | |
bbf45ba5 HB |
459 | } |
460 | ||
d89f5eff | 461 | void kvm_arch_destroy_vm(struct kvm *kvm) |
bbf45ba5 HB |
462 | { |
463 | unsigned int i; | |
988a2cae | 464 | struct kvm_vcpu *vcpu; |
bbf45ba5 | 465 | |
e17769eb SW |
466 | #ifdef CONFIG_KVM_XICS |
467 | /* | |
468 | * We call kick_all_cpus_sync() to ensure that all | |
469 | * CPUs have executed any pending IPIs before we | |
470 | * continue and free VCPUs structures below. | |
471 | */ | |
472 | if (is_kvmppc_hv_enabled(kvm)) | |
473 | kick_all_cpus_sync(); | |
474 | #endif | |
475 | ||
988a2cae | 476 | kvm_for_each_vcpu(i, vcpu, kvm) |
4543bdc0 | 477 | kvm_vcpu_destroy(vcpu); |
988a2cae GN |
478 | |
479 | mutex_lock(&kvm->lock); | |
480 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | |
481 | kvm->vcpus[i] = NULL; | |
482 | ||
483 | atomic_set(&kvm->online_vcpus, 0); | |
f9e0554d PM |
484 | |
485 | kvmppc_core_destroy_vm(kvm); | |
486 | ||
988a2cae | 487 | mutex_unlock(&kvm->lock); |
cbbc58d4 AK |
488 | |
489 | /* drop the module reference */ | |
490 | module_put(kvm->arch.kvm_ops->owner); | |
bbf45ba5 HB |
491 | } |
492 | ||
784aa3d7 | 493 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
bbf45ba5 HB |
494 | { |
495 | int r; | |
7a58777a | 496 | /* Assume we're using HV mode when the HV module is loaded */ |
cbbc58d4 | 497 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; |
bbf45ba5 | 498 | |
7a58777a AG |
499 | if (kvm) { |
500 | /* | |
501 | * Hooray - we know which VM type we're running on. Depend on | |
502 | * that rather than the guess above. | |
503 | */ | |
504 | hv_enabled = is_kvmppc_hv_enabled(kvm); | |
505 | } | |
506 | ||
bbf45ba5 | 507 | switch (ext) { |
5ce941ee SW |
508 | #ifdef CONFIG_BOOKE |
509 | case KVM_CAP_PPC_BOOKE_SREGS: | |
f61c94bb | 510 | case KVM_CAP_PPC_BOOKE_WATCHDOG: |
1c810636 | 511 | case KVM_CAP_PPC_EPR: |
5ce941ee | 512 | #else |
e15a1137 | 513 | case KVM_CAP_PPC_SEGSTATE: |
1022fc3d | 514 | case KVM_CAP_PPC_HIOR: |
930b412a | 515 | case KVM_CAP_PPC_PAPR: |
5ce941ee | 516 | #endif |
18978768 | 517 | case KVM_CAP_PPC_UNSET_IRQ: |
7b4203e8 | 518 | case KVM_CAP_PPC_IRQ_LEVEL: |
71fbfd5f | 519 | case KVM_CAP_ENABLE_CAP: |
e24ed81f | 520 | case KVM_CAP_ONE_REG: |
0e673fb6 | 521 | case KVM_CAP_IOEVENTFD: |
5df554ad | 522 | case KVM_CAP_DEVICE_CTRL: |
460df4c1 | 523 | case KVM_CAP_IMMEDIATE_EXIT: |
495907ec | 524 | case KVM_CAP_SET_GUEST_DEBUG: |
de56a948 PM |
525 | r = 1; |
526 | break; | |
1a9167a2 | 527 | case KVM_CAP_PPC_GUEST_DEBUG_SSTEP: |
de56a948 | 528 | case KVM_CAP_PPC_PAIRED_SINGLES: |
ad0a048b | 529 | case KVM_CAP_PPC_OSI: |
15711e9c | 530 | case KVM_CAP_PPC_GET_PVINFO: |
bf7ca4bd | 531 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc | 532 | case KVM_CAP_SW_TLB: |
eb1e4f43 | 533 | #endif |
699cc876 | 534 | /* We support this only for PR */ |
cbbc58d4 | 535 | r = !hv_enabled; |
e15a1137 | 536 | break; |
699cc876 AK |
537 | #ifdef CONFIG_KVM_MPIC |
538 | case KVM_CAP_IRQ_MPIC: | |
539 | r = 1; | |
540 | break; | |
541 | #endif | |
542 | ||
f31e65e1 | 543 | #ifdef CONFIG_PPC_BOOK3S_64 |
54738c09 | 544 | case KVM_CAP_SPAPR_TCE: |
58ded420 | 545 | case KVM_CAP_SPAPR_TCE_64: |
693ac10a SJS |
546 | r = 1; |
547 | break; | |
121f80ba | 548 | case KVM_CAP_SPAPR_TCE_VFIO: |
693ac10a SJS |
549 | r = !!cpu_has_feature(CPU_FTR_HVMODE); |
550 | break; | |
8e591cb7 | 551 | case KVM_CAP_PPC_RTAS: |
f2e91042 | 552 | case KVM_CAP_PPC_FIXUP_HCALL: |
699a0ea0 | 553 | case KVM_CAP_PPC_ENABLE_HCALL: |
5975a2e0 PM |
554 | #ifdef CONFIG_KVM_XICS |
555 | case KVM_CAP_IRQ_XICS: | |
556 | #endif | |
3214d01f | 557 | case KVM_CAP_PPC_GET_CPU_CHAR: |
54738c09 DG |
558 | r = 1; |
559 | break; | |
eacc56bb CLG |
560 | #ifdef CONFIG_KVM_XIVE |
561 | case KVM_CAP_PPC_IRQ_XIVE: | |
562 | /* | |
3fab2d10 CLG |
563 | * We need XIVE to be enabled on the platform (implies |
564 | * a POWER9 processor) and the PowerNV platform, as | |
565 | * nested is not yet supported. | |
eacc56bb | 566 | */ |
2ad7a27d PM |
567 | r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) && |
568 | kvmppc_xive_native_supported(); | |
eacc56bb CLG |
569 | break; |
570 | #endif | |
a8acaece DG |
571 | |
572 | case KVM_CAP_PPC_ALLOC_HTAB: | |
573 | r = hv_enabled; | |
574 | break; | |
f31e65e1 | 575 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
699cc876 | 576 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
371fefd6 | 577 | case KVM_CAP_PPC_SMT: |
45c940ba | 578 | r = 0; |
57900694 PM |
579 | if (kvm) { |
580 | if (kvm->arch.emul_smt_mode > 1) | |
581 | r = kvm->arch.emul_smt_mode; | |
582 | else | |
583 | r = kvm->arch.smt_mode; | |
584 | } else if (hv_enabled) { | |
45c940ba PM |
585 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
586 | r = 1; | |
587 | else | |
588 | r = threads_per_subcore; | |
589 | } | |
371fefd6 | 590 | break; |
2ed4f9dd PM |
591 | case KVM_CAP_PPC_SMT_POSSIBLE: |
592 | r = 1; | |
593 | if (hv_enabled) { | |
594 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | |
595 | r = ((threads_per_subcore << 1) - 1); | |
596 | else | |
597 | /* P9 can emulate dbells, so allow any mode */ | |
598 | r = 8 | 4 | 2 | 1; | |
599 | } | |
600 | break; | |
aa04b4cc | 601 | case KVM_CAP_PPC_RMA: |
c17b98cf | 602 | r = 0; |
aa04b4cc | 603 | break; |
e928e9cb ME |
604 | case KVM_CAP_PPC_HWRNG: |
605 | r = kvmppc_hwrng_present(); | |
606 | break; | |
c9270132 | 607 | case KVM_CAP_PPC_MMU_RADIX: |
8cf4ecc0 | 608 | r = !!(hv_enabled && radix_enabled()); |
c9270132 PM |
609 | break; |
610 | case KVM_CAP_PPC_MMU_HASH_V3: | |
de760db4 PM |
611 | r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) && |
612 | cpu_has_feature(CPU_FTR_HVMODE)); | |
c9270132 | 613 | break; |
aa069a99 PM |
614 | case KVM_CAP_PPC_NESTED_HV: |
615 | r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && | |
616 | !kvmppc_hv_ops->enable_nested(NULL)); | |
617 | break; | |
f4800b1f | 618 | #endif |
342d3db7 | 619 | case KVM_CAP_SYNC_MMU: |
699cc876 | 620 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
c17b98cf | 621 | r = hv_enabled; |
f4800b1f AG |
622 | #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
623 | r = 1; | |
624 | #else | |
625 | r = 0; | |
a2932923 | 626 | #endif |
699cc876 AK |
627 | break; |
628 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
a2932923 | 629 | case KVM_CAP_PPC_HTAB_FD: |
cbbc58d4 | 630 | r = hv_enabled; |
a2932923 | 631 | break; |
de56a948 | 632 | #endif |
b5434032 ME |
633 | case KVM_CAP_NR_VCPUS: |
634 | /* | |
635 | * Recommending a number of CPUs is somewhat arbitrary; we | |
636 | * return the number of present CPUs for -HV (since a host | |
637 | * will have secondary threads "offline"), and for other KVM | |
638 | * implementations just count online CPUs. | |
639 | */ | |
cbbc58d4 | 640 | if (hv_enabled) |
699cc876 AK |
641 | r = num_present_cpus(); |
642 | else | |
643 | r = num_online_cpus(); | |
b5434032 ME |
644 | break; |
645 | case KVM_CAP_MAX_VCPUS: | |
646 | r = KVM_MAX_VCPUS; | |
647 | break; | |
a86cb413 TH |
648 | case KVM_CAP_MAX_VCPU_ID: |
649 | r = KVM_MAX_VCPU_ID; | |
650 | break; | |
5b74716e BH |
651 | #ifdef CONFIG_PPC_BOOK3S_64 |
652 | case KVM_CAP_PPC_GET_SMMU_INFO: | |
653 | r = 1; | |
654 | break; | |
d3695aa4 AK |
655 | case KVM_CAP_SPAPR_MULTITCE: |
656 | r = 1; | |
657 | break; | |
050f2339 | 658 | case KVM_CAP_SPAPR_RESIZE_HPT: |
790a9df5 | 659 | r = !!hv_enabled; |
050f2339 | 660 | break; |
134764ed AP |
661 | #endif |
662 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
663 | case KVM_CAP_PPC_FWNMI: | |
664 | r = hv_enabled; | |
665 | break; | |
5b74716e | 666 | #endif |
4bb3c7a0 | 667 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
23528bb2 | 668 | case KVM_CAP_PPC_HTM: |
d234d68e SG |
669 | r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || |
670 | (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); | |
23528bb2 | 671 | break; |
9a5788c6 PM |
672 | #endif |
673 | #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) | |
674 | case KVM_CAP_PPC_SECURE_GUEST: | |
675 | r = hv_enabled && kvmppc_hv_ops->enable_svm && | |
676 | !kvmppc_hv_ops->enable_svm(NULL); | |
677 | break; | |
4bb3c7a0 | 678 | #endif |
bbf45ba5 HB |
679 | default: |
680 | r = 0; | |
681 | break; | |
682 | } | |
683 | return r; | |
684 | ||
685 | } | |
686 | ||
687 | long kvm_arch_dev_ioctl(struct file *filp, | |
688 | unsigned int ioctl, unsigned long arg) | |
689 | { | |
690 | return -EINVAL; | |
691 | } | |
692 | ||
e96c81ee | 693 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
db3fe4eb | 694 | { |
e96c81ee | 695 | kvmppc_core_free_memslot(kvm, slot); |
db3fe4eb TY |
696 | } |
697 | ||
f7784b8e | 698 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
462fce46 | 699 | struct kvm_memory_slot *memslot, |
09170a49 | 700 | const struct kvm_userspace_memory_region *mem, |
7b6195a9 | 701 | enum kvm_mr_change change) |
bbf45ba5 | 702 | { |
82307e67 | 703 | return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change); |
bbf45ba5 HB |
704 | } |
705 | ||
f7784b8e | 706 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
09170a49 | 707 | const struct kvm_userspace_memory_region *mem, |
9d4c197c | 708 | struct kvm_memory_slot *old, |
f36f3f28 | 709 | const struct kvm_memory_slot *new, |
8482644a | 710 | enum kvm_mr_change change) |
f7784b8e | 711 | { |
f032b734 | 712 | kvmppc_core_commit_memory_region(kvm, mem, old, new, change); |
f7784b8e MT |
713 | } |
714 | ||
2df72e9b MT |
715 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
716 | struct kvm_memory_slot *slot) | |
34d4cb8f | 717 | { |
dfe49dbd | 718 | kvmppc_core_flush_memslot(kvm, slot); |
34d4cb8f MT |
719 | } |
720 | ||
897cc38e SC |
721 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
722 | { | |
723 | return 0; | |
724 | } | |
725 | ||
74ce2e60 SC |
726 | static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) |
727 | { | |
728 | struct kvm_vcpu *vcpu; | |
729 | ||
730 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); | |
731 | kvmppc_decrementer_func(vcpu); | |
732 | ||
733 | return HRTIMER_NORESTART; | |
734 | } | |
735 | ||
e529ef66 | 736 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
bbf45ba5 | 737 | { |
c50bfbdc SC |
738 | int err; |
739 | ||
74ce2e60 SC |
740 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
741 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; | |
742 | vcpu->arch.dec_expires = get_tb(); | |
743 | ||
744 | #ifdef CONFIG_KVM_EXIT_TIMING | |
745 | mutex_init(&vcpu->arch.exit_timing_lock); | |
746 | #endif | |
747 | err = kvmppc_subarch_vcpu_init(vcpu); | |
ff030fdf | 748 | if (err) |
e529ef66 | 749 | return err; |
ff030fdf | 750 | |
74ce2e60 SC |
751 | err = kvmppc_core_vcpu_create(vcpu); |
752 | if (err) | |
753 | goto out_vcpu_uninit; | |
754 | ||
c50bfbdc | 755 | vcpu->arch.wqp = &vcpu->wq; |
e529ef66 SC |
756 | kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id); |
757 | return 0; | |
74ce2e60 SC |
758 | |
759 | out_vcpu_uninit: | |
74ce2e60 SC |
760 | kvmppc_subarch_vcpu_uninit(vcpu); |
761 | return err; | |
bbf45ba5 HB |
762 | } |
763 | ||
31928aa5 | 764 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
42897d86 | 765 | { |
42897d86 MT |
766 | } |
767 | ||
d5279f3a | 768 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
bbf45ba5 | 769 | { |
a595405d AG |
770 | /* Make sure we're not using the vcpu anymore */ |
771 | hrtimer_cancel(&vcpu->arch.dec_timer); | |
a595405d | 772 | |
73e75b41 | 773 | kvmppc_remove_vcpu_debugfs(vcpu); |
eb1e4f43 SW |
774 | |
775 | switch (vcpu->arch.irq_type) { | |
776 | case KVMPPC_IRQ_MPIC: | |
777 | kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); | |
778 | break; | |
bc5ad3f3 | 779 | case KVMPPC_IRQ_XICS: |
03f95332 | 780 | if (xics_on_xive()) |
5af50993 BH |
781 | kvmppc_xive_cleanup_vcpu(vcpu); |
782 | else | |
783 | kvmppc_xics_free_icp(vcpu); | |
bc5ad3f3 | 784 | break; |
eacc56bb CLG |
785 | case KVMPPC_IRQ_XIVE: |
786 | kvmppc_xive_native_cleanup_vcpu(vcpu); | |
787 | break; | |
eb1e4f43 SW |
788 | } |
789 | ||
db93f574 | 790 | kvmppc_core_vcpu_free(vcpu); |
74ce2e60 | 791 | |
74ce2e60 | 792 | kvmppc_subarch_vcpu_uninit(vcpu); |
bbf45ba5 HB |
793 | } |
794 | ||
bbf45ba5 HB |
795 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
796 | { | |
9dd921cf | 797 | return kvmppc_core_pending_dec(vcpu); |
bbf45ba5 HB |
798 | } |
799 | ||
bbf45ba5 HB |
800 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
801 | { | |
eab17672 SW |
802 | #ifdef CONFIG_BOOKE |
803 | /* | |
804 | * vrsave (formerly usprg0) isn't used by Linux, but may | |
805 | * be used by the guest. | |
806 | * | |
807 | * On non-booke this is associated with Altivec and | |
808 | * is handled by code in book3s.c. | |
809 | */ | |
810 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); | |
811 | #endif | |
9dd921cf | 812 | kvmppc_core_vcpu_load(vcpu, cpu); |
bbf45ba5 HB |
813 | } |
814 | ||
815 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |
816 | { | |
9dd921cf | 817 | kvmppc_core_vcpu_put(vcpu); |
eab17672 SW |
818 | #ifdef CONFIG_BOOKE |
819 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); | |
820 | #endif | |
bbf45ba5 HB |
821 | } |
822 | ||
9576730d SW |
823 | /* |
824 | * irq_bypass_add_producer and irq_bypass_del_producer are only | |
825 | * useful if the architecture supports PCI passthrough. | |
826 | * irq_bypass_stop and irq_bypass_start are not needed and so | |
827 | * kvm_ops are not defined for them. | |
828 | */ | |
829 | bool kvm_arch_has_irq_bypass(void) | |
830 | { | |
831 | return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || | |
832 | (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); | |
833 | } | |
834 | ||
835 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, | |
836 | struct irq_bypass_producer *prod) | |
837 | { | |
838 | struct kvm_kernel_irqfd *irqfd = | |
839 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
840 | struct kvm *kvm = irqfd->kvm; | |
841 | ||
842 | if (kvm->arch.kvm_ops->irq_bypass_add_producer) | |
843 | return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); | |
844 | ||
845 | return 0; | |
846 | } | |
847 | ||
848 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, | |
849 | struct irq_bypass_producer *prod) | |
850 | { | |
851 | struct kvm_kernel_irqfd *irqfd = | |
852 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
853 | struct kvm *kvm = irqfd->kvm; | |
854 | ||
855 | if (kvm->arch.kvm_ops->irq_bypass_del_producer) | |
856 | kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); | |
857 | } | |
858 | ||
6f63e81b BL |
859 | #ifdef CONFIG_VSX |
860 | static inline int kvmppc_get_vsr_dword_offset(int index) | |
861 | { | |
862 | int offset; | |
863 | ||
864 | if ((index != 0) && (index != 1)) | |
865 | return -1; | |
866 | ||
867 | #ifdef __BIG_ENDIAN | |
868 | offset = index; | |
869 | #else | |
870 | offset = 1 - index; | |
871 | #endif | |
872 | ||
873 | return offset; | |
874 | } | |
875 | ||
876 | static inline int kvmppc_get_vsr_word_offset(int index) | |
877 | { | |
878 | int offset; | |
879 | ||
880 | if ((index > 3) || (index < 0)) | |
881 | return -1; | |
882 | ||
883 | #ifdef __BIG_ENDIAN | |
884 | offset = index; | |
885 | #else | |
886 | offset = 3 - index; | |
887 | #endif | |
888 | return offset; | |
889 | } | |
890 | ||
891 | static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, | |
892 | u64 gpr) | |
893 | { | |
894 | union kvmppc_one_reg val; | |
895 | int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); | |
896 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
897 | ||
898 | if (offset == -1) | |
899 | return; | |
900 | ||
4eeb8556 SG |
901 | if (index >= 32) { |
902 | val.vval = VCPU_VSX_VR(vcpu, index - 32); | |
6f63e81b | 903 | val.vsxval[offset] = gpr; |
4eeb8556 | 904 | VCPU_VSX_VR(vcpu, index - 32) = val.vval; |
6f63e81b BL |
905 | } else { |
906 | VCPU_VSX_FPR(vcpu, index, offset) = gpr; | |
907 | } | |
908 | } | |
909 | ||
910 | static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, | |
911 | u64 gpr) | |
912 | { | |
913 | union kvmppc_one_reg val; | |
914 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
915 | ||
4eeb8556 SG |
916 | if (index >= 32) { |
917 | val.vval = VCPU_VSX_VR(vcpu, index - 32); | |
6f63e81b BL |
918 | val.vsxval[0] = gpr; |
919 | val.vsxval[1] = gpr; | |
4eeb8556 | 920 | VCPU_VSX_VR(vcpu, index - 32) = val.vval; |
6f63e81b BL |
921 | } else { |
922 | VCPU_VSX_FPR(vcpu, index, 0) = gpr; | |
923 | VCPU_VSX_FPR(vcpu, index, 1) = gpr; | |
924 | } | |
925 | } | |
926 | ||
94dd7fa1 SG |
927 | static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, |
928 | u32 gpr) | |
929 | { | |
930 | union kvmppc_one_reg val; | |
931 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
932 | ||
4eeb8556 | 933 | if (index >= 32) { |
94dd7fa1 SG |
934 | val.vsx32val[0] = gpr; |
935 | val.vsx32val[1] = gpr; | |
936 | val.vsx32val[2] = gpr; | |
937 | val.vsx32val[3] = gpr; | |
4eeb8556 | 938 | VCPU_VSX_VR(vcpu, index - 32) = val.vval; |
94dd7fa1 SG |
939 | } else { |
940 | val.vsx32val[0] = gpr; | |
941 | val.vsx32val[1] = gpr; | |
942 | VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; | |
943 | VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; | |
944 | } | |
945 | } | |
946 | ||
6f63e81b BL |
947 | static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, |
948 | u32 gpr32) | |
949 | { | |
950 | union kvmppc_one_reg val; | |
951 | int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); | |
952 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
953 | int dword_offset, word_offset; | |
954 | ||
955 | if (offset == -1) | |
956 | return; | |
957 | ||
4eeb8556 SG |
958 | if (index >= 32) { |
959 | val.vval = VCPU_VSX_VR(vcpu, index - 32); | |
6f63e81b | 960 | val.vsx32val[offset] = gpr32; |
4eeb8556 | 961 | VCPU_VSX_VR(vcpu, index - 32) = val.vval; |
6f63e81b BL |
962 | } else { |
963 | dword_offset = offset / 2; | |
964 | word_offset = offset % 2; | |
965 | val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); | |
966 | val.vsx32val[word_offset] = gpr32; | |
967 | VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; | |
968 | } | |
969 | } | |
970 | #endif /* CONFIG_VSX */ | |
971 | ||
09f98496 | 972 | #ifdef CONFIG_ALTIVEC |
acc9eb93 SG |
973 | static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, |
974 | int index, int element_size) | |
975 | { | |
976 | int offset; | |
977 | int elts = sizeof(vector128)/element_size; | |
978 | ||
979 | if ((index < 0) || (index >= elts)) | |
980 | return -1; | |
981 | ||
982 | if (kvmppc_need_byteswap(vcpu)) | |
983 | offset = elts - index - 1; | |
984 | else | |
985 | offset = index; | |
986 | ||
987 | return offset; | |
988 | } | |
989 | ||
990 | static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, | |
991 | int index) | |
992 | { | |
993 | return kvmppc_get_vmx_offset_generic(vcpu, index, 8); | |
994 | } | |
995 | ||
996 | static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, | |
997 | int index) | |
998 | { | |
999 | return kvmppc_get_vmx_offset_generic(vcpu, index, 4); | |
1000 | } | |
1001 | ||
1002 | static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, | |
1003 | int index) | |
1004 | { | |
1005 | return kvmppc_get_vmx_offset_generic(vcpu, index, 2); | |
1006 | } | |
1007 | ||
1008 | static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, | |
1009 | int index) | |
1010 | { | |
1011 | return kvmppc_get_vmx_offset_generic(vcpu, index, 1); | |
1012 | } | |
1013 | ||
1014 | ||
09f98496 | 1015 | static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, |
acc9eb93 | 1016 | u64 gpr) |
09f98496 | 1017 | { |
acc9eb93 SG |
1018 | union kvmppc_one_reg val; |
1019 | int offset = kvmppc_get_vmx_dword_offset(vcpu, | |
1020 | vcpu->arch.mmio_vmx_offset); | |
09f98496 | 1021 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
09f98496 | 1022 | |
acc9eb93 SG |
1023 | if (offset == -1) |
1024 | return; | |
1025 | ||
1026 | val.vval = VCPU_VSX_VR(vcpu, index); | |
1027 | val.vsxval[offset] = gpr; | |
1028 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
1029 | } | |
1030 | ||
1031 | static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, | |
1032 | u32 gpr32) | |
1033 | { | |
1034 | union kvmppc_one_reg val; | |
1035 | int offset = kvmppc_get_vmx_word_offset(vcpu, | |
1036 | vcpu->arch.mmio_vmx_offset); | |
1037 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
09f98496 | 1038 | |
acc9eb93 | 1039 | if (offset == -1) |
09f98496 JRZ |
1040 | return; |
1041 | ||
acc9eb93 SG |
1042 | val.vval = VCPU_VSX_VR(vcpu, index); |
1043 | val.vsx32val[offset] = gpr32; | |
1044 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
1045 | } | |
09f98496 | 1046 | |
acc9eb93 SG |
1047 | static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, |
1048 | u16 gpr16) | |
1049 | { | |
1050 | union kvmppc_one_reg val; | |
1051 | int offset = kvmppc_get_vmx_hword_offset(vcpu, | |
1052 | vcpu->arch.mmio_vmx_offset); | |
1053 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
1054 | ||
1055 | if (offset == -1) | |
09f98496 JRZ |
1056 | return; |
1057 | ||
acc9eb93 SG |
1058 | val.vval = VCPU_VSX_VR(vcpu, index); |
1059 | val.vsx16val[offset] = gpr16; | |
1060 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
1061 | } | |
1062 | ||
1063 | static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, | |
1064 | u8 gpr8) | |
1065 | { | |
1066 | union kvmppc_one_reg val; | |
1067 | int offset = kvmppc_get_vmx_byte_offset(vcpu, | |
1068 | vcpu->arch.mmio_vmx_offset); | |
1069 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
09f98496 | 1070 | |
acc9eb93 SG |
1071 | if (offset == -1) |
1072 | return; | |
09f98496 | 1073 | |
acc9eb93 SG |
1074 | val.vval = VCPU_VSX_VR(vcpu, index); |
1075 | val.vsx8val[offset] = gpr8; | |
1076 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
09f98496 JRZ |
1077 | } |
1078 | #endif /* CONFIG_ALTIVEC */ | |
1079 | ||
6f63e81b BL |
1080 | #ifdef CONFIG_PPC_FPU |
1081 | static inline u64 sp_to_dp(u32 fprs) | |
1082 | { | |
1083 | u64 fprd; | |
1084 | ||
1085 | preempt_disable(); | |
1086 | enable_kernel_fp(); | |
1087 | asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs) | |
1088 | : "fr0"); | |
1089 | preempt_enable(); | |
1090 | return fprd; | |
1091 | } | |
1092 | ||
1093 | static inline u32 dp_to_sp(u64 fprd) | |
1094 | { | |
1095 | u32 fprs; | |
1096 | ||
1097 | preempt_disable(); | |
1098 | enable_kernel_fp(); | |
1099 | asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd) | |
1100 | : "fr0"); | |
1101 | preempt_enable(); | |
1102 | return fprs; | |
1103 | } | |
1104 | ||
1105 | #else | |
1106 | #define sp_to_dp(x) (x) | |
1107 | #define dp_to_sp(x) (x) | |
1108 | #endif /* CONFIG_PPC_FPU */ | |
1109 | ||
bbf45ba5 HB |
1110 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
1111 | struct kvm_run *run) | |
1112 | { | |
69b61833 | 1113 | u64 uninitialized_var(gpr); |
bbf45ba5 | 1114 | |
8e5b26b5 | 1115 | if (run->mmio.len > sizeof(gpr)) { |
bbf45ba5 HB |
1116 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
1117 | return; | |
1118 | } | |
1119 | ||
d078eed3 | 1120 | if (!vcpu->arch.mmio_host_swabbed) { |
bbf45ba5 | 1121 | switch (run->mmio.len) { |
b104d066 | 1122 | case 8: gpr = *(u64 *)run->mmio.data; break; |
8e5b26b5 AG |
1123 | case 4: gpr = *(u32 *)run->mmio.data; break; |
1124 | case 2: gpr = *(u16 *)run->mmio.data; break; | |
1125 | case 1: gpr = *(u8 *)run->mmio.data; break; | |
bbf45ba5 HB |
1126 | } |
1127 | } else { | |
bbf45ba5 | 1128 | switch (run->mmio.len) { |
d078eed3 DG |
1129 | case 8: gpr = swab64(*(u64 *)run->mmio.data); break; |
1130 | case 4: gpr = swab32(*(u32 *)run->mmio.data); break; | |
1131 | case 2: gpr = swab16(*(u16 *)run->mmio.data); break; | |
8e5b26b5 | 1132 | case 1: gpr = *(u8 *)run->mmio.data; break; |
bbf45ba5 HB |
1133 | } |
1134 | } | |
8e5b26b5 | 1135 | |
6f63e81b BL |
1136 | /* conversion between single and double precision */ |
1137 | if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) | |
1138 | gpr = sp_to_dp(gpr); | |
1139 | ||
3587d534 AG |
1140 | if (vcpu->arch.mmio_sign_extend) { |
1141 | switch (run->mmio.len) { | |
1142 | #ifdef CONFIG_PPC64 | |
1143 | case 4: | |
1144 | gpr = (s64)(s32)gpr; | |
1145 | break; | |
1146 | #endif | |
1147 | case 2: | |
1148 | gpr = (s64)(s16)gpr; | |
1149 | break; | |
1150 | case 1: | |
1151 | gpr = (s64)(s8)gpr; | |
1152 | break; | |
1153 | } | |
1154 | } | |
1155 | ||
b3c5d3c2 AG |
1156 | switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { |
1157 | case KVM_MMIO_REG_GPR: | |
b104d066 AG |
1158 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
1159 | break; | |
b3c5d3c2 | 1160 | case KVM_MMIO_REG_FPR: |
2e6baa46 SG |
1161 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
1162 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); | |
1163 | ||
efff1912 | 1164 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
b104d066 | 1165 | break; |
287d5611 | 1166 | #ifdef CONFIG_PPC_BOOK3S |
b3c5d3c2 AG |
1167 | case KVM_MMIO_REG_QPR: |
1168 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
b104d066 | 1169 | break; |
b3c5d3c2 | 1170 | case KVM_MMIO_REG_FQPR: |
efff1912 | 1171 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
b3c5d3c2 | 1172 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
b104d066 | 1173 | break; |
6f63e81b BL |
1174 | #endif |
1175 | #ifdef CONFIG_VSX | |
1176 | case KVM_MMIO_REG_VSX: | |
2e6baa46 SG |
1177 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
1178 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); | |
1179 | ||
da2a32b8 | 1180 | if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) |
6f63e81b | 1181 | kvmppc_set_vsr_dword(vcpu, gpr); |
da2a32b8 | 1182 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) |
6f63e81b | 1183 | kvmppc_set_vsr_word(vcpu, gpr); |
da2a32b8 | 1184 | else if (vcpu->arch.mmio_copy_type == |
6f63e81b BL |
1185 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) |
1186 | kvmppc_set_vsr_dword_dump(vcpu, gpr); | |
da2a32b8 | 1187 | else if (vcpu->arch.mmio_copy_type == |
94dd7fa1 SG |
1188 | KVMPPC_VSX_COPY_WORD_LOAD_DUMP) |
1189 | kvmppc_set_vsr_word_dump(vcpu, gpr); | |
6f63e81b | 1190 | break; |
09f98496 JRZ |
1191 | #endif |
1192 | #ifdef CONFIG_ALTIVEC | |
1193 | case KVM_MMIO_REG_VMX: | |
2e6baa46 SG |
1194 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
1195 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); | |
1196 | ||
acc9eb93 SG |
1197 | if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) |
1198 | kvmppc_set_vmx_dword(vcpu, gpr); | |
1199 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) | |
1200 | kvmppc_set_vmx_word(vcpu, gpr); | |
1201 | else if (vcpu->arch.mmio_copy_type == | |
1202 | KVMPPC_VMX_COPY_HWORD) | |
1203 | kvmppc_set_vmx_hword(vcpu, gpr); | |
1204 | else if (vcpu->arch.mmio_copy_type == | |
1205 | KVMPPC_VMX_COPY_BYTE) | |
1206 | kvmppc_set_vmx_byte(vcpu, gpr); | |
09f98496 | 1207 | break; |
873db2cd SJS |
1208 | #endif |
1209 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
1210 | case KVM_MMIO_REG_NESTED_GPR: | |
1211 | if (kvmppc_need_byteswap(vcpu)) | |
1212 | gpr = swab64(gpr); | |
1213 | kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, | |
1214 | sizeof(gpr)); | |
1215 | break; | |
287d5611 | 1216 | #endif |
b104d066 AG |
1217 | default: |
1218 | BUG(); | |
1219 | } | |
bbf45ba5 HB |
1220 | } |
1221 | ||
eb8b0560 PM |
1222 | static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
1223 | unsigned int rt, unsigned int bytes, | |
1224 | int is_default_endian, int sign_extend) | |
bbf45ba5 | 1225 | { |
ed840ee9 | 1226 | int idx, ret; |
d078eed3 | 1227 | bool host_swabbed; |
73601775 | 1228 | |
d078eed3 | 1229 | /* Pity C doesn't have a logical XOR operator */ |
73601775 | 1230 | if (kvmppc_need_byteswap(vcpu)) { |
d078eed3 | 1231 | host_swabbed = is_default_endian; |
73601775 | 1232 | } else { |
d078eed3 | 1233 | host_swabbed = !is_default_endian; |
73601775 | 1234 | } |
ed840ee9 | 1235 | |
bbf45ba5 HB |
1236 | if (bytes > sizeof(run->mmio.data)) { |
1237 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
1238 | run->mmio.len); | |
1239 | } | |
1240 | ||
1241 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
1242 | run->mmio.len = bytes; | |
1243 | run->mmio.is_write = 0; | |
1244 | ||
1245 | vcpu->arch.io_gpr = rt; | |
d078eed3 | 1246 | vcpu->arch.mmio_host_swabbed = host_swabbed; |
bbf45ba5 HB |
1247 | vcpu->mmio_needed = 1; |
1248 | vcpu->mmio_is_write = 0; | |
eb8b0560 | 1249 | vcpu->arch.mmio_sign_extend = sign_extend; |
bbf45ba5 | 1250 | |
ed840ee9 SW |
1251 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
1252 | ||
e32edf4f | 1253 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
ed840ee9 SW |
1254 | bytes, &run->mmio.data); |
1255 | ||
1256 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
1257 | ||
1258 | if (!ret) { | |
0e673fb6 AG |
1259 | kvmppc_complete_mmio_load(vcpu, run); |
1260 | vcpu->mmio_needed = 0; | |
1261 | return EMULATE_DONE; | |
1262 | } | |
1263 | ||
bbf45ba5 HB |
1264 | return EMULATE_DO_MMIO; |
1265 | } | |
eb8b0560 PM |
1266 | |
1267 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1268 | unsigned int rt, unsigned int bytes, | |
1269 | int is_default_endian) | |
1270 | { | |
1271 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); | |
1272 | } | |
2ba9f0d8 | 1273 | EXPORT_SYMBOL_GPL(kvmppc_handle_load); |
bbf45ba5 | 1274 | |
3587d534 AG |
1275 | /* Same as above, but sign extends */ |
1276 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
73601775 CLG |
1277 | unsigned int rt, unsigned int bytes, |
1278 | int is_default_endian) | |
3587d534 | 1279 | { |
eb8b0560 | 1280 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); |
3587d534 AG |
1281 | } |
1282 | ||
6f63e81b BL |
1283 | #ifdef CONFIG_VSX |
1284 | int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1285 | unsigned int rt, unsigned int bytes, | |
1286 | int is_default_endian, int mmio_sign_extend) | |
1287 | { | |
1288 | enum emulation_result emulated = EMULATE_DONE; | |
1289 | ||
9aa6825b PM |
1290 | /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ |
1291 | if (vcpu->arch.mmio_vsx_copy_nums > 4) | |
6f63e81b | 1292 | return EMULATE_FAIL; |
6f63e81b BL |
1293 | |
1294 | while (vcpu->arch.mmio_vsx_copy_nums) { | |
1295 | emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, | |
1296 | is_default_endian, mmio_sign_extend); | |
1297 | ||
1298 | if (emulated != EMULATE_DONE) | |
1299 | break; | |
1300 | ||
1301 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1302 | ||
1303 | vcpu->arch.mmio_vsx_copy_nums--; | |
1304 | vcpu->arch.mmio_vsx_offset++; | |
1305 | } | |
1306 | return emulated; | |
1307 | } | |
1308 | #endif /* CONFIG_VSX */ | |
1309 | ||
bbf45ba5 | 1310 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
73601775 | 1311 | u64 val, unsigned int bytes, int is_default_endian) |
bbf45ba5 HB |
1312 | { |
1313 | void *data = run->mmio.data; | |
ed840ee9 | 1314 | int idx, ret; |
d078eed3 | 1315 | bool host_swabbed; |
73601775 | 1316 | |
d078eed3 | 1317 | /* Pity C doesn't have a logical XOR operator */ |
73601775 | 1318 | if (kvmppc_need_byteswap(vcpu)) { |
d078eed3 | 1319 | host_swabbed = is_default_endian; |
73601775 | 1320 | } else { |
d078eed3 | 1321 | host_swabbed = !is_default_endian; |
73601775 | 1322 | } |
bbf45ba5 HB |
1323 | |
1324 | if (bytes > sizeof(run->mmio.data)) { | |
1325 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
1326 | run->mmio.len); | |
1327 | } | |
1328 | ||
1329 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
1330 | run->mmio.len = bytes; | |
1331 | run->mmio.is_write = 1; | |
1332 | vcpu->mmio_needed = 1; | |
1333 | vcpu->mmio_is_write = 1; | |
1334 | ||
6f63e81b BL |
1335 | if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) |
1336 | val = dp_to_sp(val); | |
1337 | ||
bbf45ba5 | 1338 | /* Store the value at the lowest bytes in 'data'. */ |
d078eed3 | 1339 | if (!host_swabbed) { |
bbf45ba5 | 1340 | switch (bytes) { |
b104d066 | 1341 | case 8: *(u64 *)data = val; break; |
bbf45ba5 HB |
1342 | case 4: *(u32 *)data = val; break; |
1343 | case 2: *(u16 *)data = val; break; | |
1344 | case 1: *(u8 *)data = val; break; | |
1345 | } | |
1346 | } else { | |
bbf45ba5 | 1347 | switch (bytes) { |
d078eed3 DG |
1348 | case 8: *(u64 *)data = swab64(val); break; |
1349 | case 4: *(u32 *)data = swab32(val); break; | |
1350 | case 2: *(u16 *)data = swab16(val); break; | |
1351 | case 1: *(u8 *)data = val; break; | |
bbf45ba5 HB |
1352 | } |
1353 | } | |
1354 | ||
ed840ee9 SW |
1355 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
1356 | ||
e32edf4f | 1357 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
ed840ee9 SW |
1358 | bytes, &run->mmio.data); |
1359 | ||
1360 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
1361 | ||
1362 | if (!ret) { | |
0e673fb6 AG |
1363 | vcpu->mmio_needed = 0; |
1364 | return EMULATE_DONE; | |
1365 | } | |
1366 | ||
bbf45ba5 HB |
1367 | return EMULATE_DO_MMIO; |
1368 | } | |
2ba9f0d8 | 1369 | EXPORT_SYMBOL_GPL(kvmppc_handle_store); |
bbf45ba5 | 1370 | |
6f63e81b BL |
1371 | #ifdef CONFIG_VSX |
1372 | static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) | |
1373 | { | |
1374 | u32 dword_offset, word_offset; | |
1375 | union kvmppc_one_reg reg; | |
1376 | int vsx_offset = 0; | |
da2a32b8 | 1377 | int copy_type = vcpu->arch.mmio_copy_type; |
6f63e81b BL |
1378 | int result = 0; |
1379 | ||
1380 | switch (copy_type) { | |
1381 | case KVMPPC_VSX_COPY_DWORD: | |
1382 | vsx_offset = | |
1383 | kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); | |
1384 | ||
1385 | if (vsx_offset == -1) { | |
1386 | result = -1; | |
1387 | break; | |
1388 | } | |
1389 | ||
4eeb8556 | 1390 | if (rs < 32) { |
6f63e81b BL |
1391 | *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); |
1392 | } else { | |
4eeb8556 | 1393 | reg.vval = VCPU_VSX_VR(vcpu, rs - 32); |
6f63e81b BL |
1394 | *val = reg.vsxval[vsx_offset]; |
1395 | } | |
1396 | break; | |
1397 | ||
1398 | case KVMPPC_VSX_COPY_WORD: | |
1399 | vsx_offset = | |
1400 | kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); | |
1401 | ||
1402 | if (vsx_offset == -1) { | |
1403 | result = -1; | |
1404 | break; | |
1405 | } | |
1406 | ||
4eeb8556 | 1407 | if (rs < 32) { |
6f63e81b BL |
1408 | dword_offset = vsx_offset / 2; |
1409 | word_offset = vsx_offset % 2; | |
1410 | reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); | |
1411 | *val = reg.vsx32val[word_offset]; | |
1412 | } else { | |
4eeb8556 | 1413 | reg.vval = VCPU_VSX_VR(vcpu, rs - 32); |
6f63e81b BL |
1414 | *val = reg.vsx32val[vsx_offset]; |
1415 | } | |
1416 | break; | |
1417 | ||
1418 | default: | |
1419 | result = -1; | |
1420 | break; | |
1421 | } | |
1422 | ||
1423 | return result; | |
1424 | } | |
1425 | ||
1426 | int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1427 | int rs, unsigned int bytes, int is_default_endian) | |
1428 | { | |
1429 | u64 val; | |
1430 | enum emulation_result emulated = EMULATE_DONE; | |
1431 | ||
1432 | vcpu->arch.io_gpr = rs; | |
1433 | ||
9aa6825b PM |
1434 | /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ |
1435 | if (vcpu->arch.mmio_vsx_copy_nums > 4) | |
6f63e81b | 1436 | return EMULATE_FAIL; |
6f63e81b BL |
1437 | |
1438 | while (vcpu->arch.mmio_vsx_copy_nums) { | |
1439 | if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) | |
1440 | return EMULATE_FAIL; | |
1441 | ||
1442 | emulated = kvmppc_handle_store(run, vcpu, | |
1443 | val, bytes, is_default_endian); | |
1444 | ||
1445 | if (emulated != EMULATE_DONE) | |
1446 | break; | |
1447 | ||
1448 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1449 | ||
1450 | vcpu->arch.mmio_vsx_copy_nums--; | |
1451 | vcpu->arch.mmio_vsx_offset++; | |
1452 | } | |
1453 | ||
1454 | return emulated; | |
1455 | } | |
1456 | ||
1457 | static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, | |
1458 | struct kvm_run *run) | |
1459 | { | |
1460 | enum emulation_result emulated = EMULATE_FAIL; | |
1461 | int r; | |
1462 | ||
1463 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1464 | ||
1465 | if (!vcpu->mmio_is_write) { | |
1466 | emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, | |
1467 | run->mmio.len, 1, vcpu->arch.mmio_sign_extend); | |
1468 | } else { | |
1469 | emulated = kvmppc_handle_vsx_store(run, vcpu, | |
1470 | vcpu->arch.io_gpr, run->mmio.len, 1); | |
1471 | } | |
1472 | ||
1473 | switch (emulated) { | |
1474 | case EMULATE_DO_MMIO: | |
1475 | run->exit_reason = KVM_EXIT_MMIO; | |
1476 | r = RESUME_HOST; | |
1477 | break; | |
1478 | case EMULATE_FAIL: | |
1479 | pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); | |
1480 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1481 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; | |
1482 | r = RESUME_HOST; | |
1483 | break; | |
1484 | default: | |
1485 | r = RESUME_GUEST; | |
1486 | break; | |
1487 | } | |
1488 | return r; | |
1489 | } | |
1490 | #endif /* CONFIG_VSX */ | |
1491 | ||
09f98496 | 1492 | #ifdef CONFIG_ALTIVEC |
acc9eb93 SG |
1493 | int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
1494 | unsigned int rt, unsigned int bytes, int is_default_endian) | |
09f98496 | 1495 | { |
6df3877f | 1496 | enum emulation_result emulated = EMULATE_DONE; |
09f98496 | 1497 | |
acc9eb93 SG |
1498 | if (vcpu->arch.mmio_vsx_copy_nums > 2) |
1499 | return EMULATE_FAIL; | |
1500 | ||
09f98496 | 1501 | while (vcpu->arch.mmio_vmx_copy_nums) { |
acc9eb93 | 1502 | emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, |
09f98496 JRZ |
1503 | is_default_endian, 0); |
1504 | ||
1505 | if (emulated != EMULATE_DONE) | |
1506 | break; | |
1507 | ||
1508 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1509 | vcpu->arch.mmio_vmx_copy_nums--; | |
acc9eb93 | 1510 | vcpu->arch.mmio_vmx_offset++; |
09f98496 JRZ |
1511 | } |
1512 | ||
1513 | return emulated; | |
1514 | } | |
1515 | ||
acc9eb93 | 1516 | int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) |
09f98496 | 1517 | { |
acc9eb93 SG |
1518 | union kvmppc_one_reg reg; |
1519 | int vmx_offset = 0; | |
1520 | int result = 0; | |
09f98496 | 1521 | |
acc9eb93 SG |
1522 | vmx_offset = |
1523 | kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); | |
09f98496 | 1524 | |
acc9eb93 | 1525 | if (vmx_offset == -1) |
09f98496 JRZ |
1526 | return -1; |
1527 | ||
acc9eb93 SG |
1528 | reg.vval = VCPU_VSX_VR(vcpu, index); |
1529 | *val = reg.vsxval[vmx_offset]; | |
09f98496 | 1530 | |
acc9eb93 SG |
1531 | return result; |
1532 | } | |
09f98496 | 1533 | |
acc9eb93 SG |
1534 | int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) |
1535 | { | |
1536 | union kvmppc_one_reg reg; | |
1537 | int vmx_offset = 0; | |
1538 | int result = 0; | |
1539 | ||
1540 | vmx_offset = | |
1541 | kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); | |
1542 | ||
1543 | if (vmx_offset == -1) | |
1544 | return -1; | |
1545 | ||
1546 | reg.vval = VCPU_VSX_VR(vcpu, index); | |
1547 | *val = reg.vsx32val[vmx_offset]; | |
1548 | ||
1549 | return result; | |
1550 | } | |
1551 | ||
1552 | int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) | |
1553 | { | |
1554 | union kvmppc_one_reg reg; | |
1555 | int vmx_offset = 0; | |
1556 | int result = 0; | |
1557 | ||
1558 | vmx_offset = | |
1559 | kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); | |
1560 | ||
1561 | if (vmx_offset == -1) | |
1562 | return -1; | |
1563 | ||
1564 | reg.vval = VCPU_VSX_VR(vcpu, index); | |
1565 | *val = reg.vsx16val[vmx_offset]; | |
1566 | ||
1567 | return result; | |
09f98496 JRZ |
1568 | } |
1569 | ||
acc9eb93 SG |
1570 | int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) |
1571 | { | |
1572 | union kvmppc_one_reg reg; | |
1573 | int vmx_offset = 0; | |
1574 | int result = 0; | |
1575 | ||
1576 | vmx_offset = | |
1577 | kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); | |
1578 | ||
1579 | if (vmx_offset == -1) | |
1580 | return -1; | |
1581 | ||
1582 | reg.vval = VCPU_VSX_VR(vcpu, index); | |
1583 | *val = reg.vsx8val[vmx_offset]; | |
1584 | ||
1585 | return result; | |
09f98496 JRZ |
1586 | } |
1587 | ||
acc9eb93 SG |
1588 | int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
1589 | unsigned int rs, unsigned int bytes, int is_default_endian) | |
09f98496 JRZ |
1590 | { |
1591 | u64 val = 0; | |
acc9eb93 | 1592 | unsigned int index = rs & KVM_MMIO_REG_MASK; |
09f98496 JRZ |
1593 | enum emulation_result emulated = EMULATE_DONE; |
1594 | ||
acc9eb93 SG |
1595 | if (vcpu->arch.mmio_vsx_copy_nums > 2) |
1596 | return EMULATE_FAIL; | |
1597 | ||
09f98496 JRZ |
1598 | vcpu->arch.io_gpr = rs; |
1599 | ||
1600 | while (vcpu->arch.mmio_vmx_copy_nums) { | |
acc9eb93 SG |
1601 | switch (vcpu->arch.mmio_copy_type) { |
1602 | case KVMPPC_VMX_COPY_DWORD: | |
1603 | if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) | |
1604 | return EMULATE_FAIL; | |
1605 | ||
1606 | break; | |
1607 | case KVMPPC_VMX_COPY_WORD: | |
1608 | if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) | |
1609 | return EMULATE_FAIL; | |
1610 | break; | |
1611 | case KVMPPC_VMX_COPY_HWORD: | |
1612 | if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) | |
1613 | return EMULATE_FAIL; | |
1614 | break; | |
1615 | case KVMPPC_VMX_COPY_BYTE: | |
1616 | if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) | |
1617 | return EMULATE_FAIL; | |
1618 | break; | |
1619 | default: | |
09f98496 | 1620 | return EMULATE_FAIL; |
acc9eb93 | 1621 | } |
09f98496 | 1622 | |
acc9eb93 | 1623 | emulated = kvmppc_handle_store(run, vcpu, val, bytes, |
09f98496 JRZ |
1624 | is_default_endian); |
1625 | if (emulated != EMULATE_DONE) | |
1626 | break; | |
1627 | ||
1628 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1629 | vcpu->arch.mmio_vmx_copy_nums--; | |
acc9eb93 | 1630 | vcpu->arch.mmio_vmx_offset++; |
09f98496 JRZ |
1631 | } |
1632 | ||
1633 | return emulated; | |
1634 | } | |
1635 | ||
1636 | static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, | |
1637 | struct kvm_run *run) | |
1638 | { | |
1639 | enum emulation_result emulated = EMULATE_FAIL; | |
1640 | int r; | |
1641 | ||
1642 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1643 | ||
1644 | if (!vcpu->mmio_is_write) { | |
acc9eb93 SG |
1645 | emulated = kvmppc_handle_vmx_load(run, vcpu, |
1646 | vcpu->arch.io_gpr, run->mmio.len, 1); | |
09f98496 | 1647 | } else { |
acc9eb93 SG |
1648 | emulated = kvmppc_handle_vmx_store(run, vcpu, |
1649 | vcpu->arch.io_gpr, run->mmio.len, 1); | |
09f98496 JRZ |
1650 | } |
1651 | ||
1652 | switch (emulated) { | |
1653 | case EMULATE_DO_MMIO: | |
1654 | run->exit_reason = KVM_EXIT_MMIO; | |
1655 | r = RESUME_HOST; | |
1656 | break; | |
1657 | case EMULATE_FAIL: | |
1658 | pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); | |
1659 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1660 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; | |
1661 | r = RESUME_HOST; | |
1662 | break; | |
1663 | default: | |
1664 | r = RESUME_GUEST; | |
1665 | break; | |
1666 | } | |
1667 | return r; | |
1668 | } | |
1669 | #endif /* CONFIG_ALTIVEC */ | |
1670 | ||
8a41ea53 MC |
1671 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
1672 | { | |
1673 | int r = 0; | |
1674 | union kvmppc_one_reg val; | |
1675 | int size; | |
1676 | ||
1677 | size = one_reg_size(reg->id); | |
1678 | if (size > sizeof(val)) | |
1679 | return -EINVAL; | |
1680 | ||
1681 | r = kvmppc_get_one_reg(vcpu, reg->id, &val); | |
1682 | if (r == -EINVAL) { | |
1683 | r = 0; | |
1684 | switch (reg->id) { | |
3840edc8 MC |
1685 | #ifdef CONFIG_ALTIVEC |
1686 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: | |
1687 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1688 | r = -ENXIO; | |
1689 | break; | |
1690 | } | |
b4d7f161 | 1691 | val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; |
3840edc8 MC |
1692 | break; |
1693 | case KVM_REG_PPC_VSCR: | |
1694 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1695 | r = -ENXIO; | |
1696 | break; | |
1697 | } | |
b4d7f161 | 1698 | val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); |
3840edc8 MC |
1699 | break; |
1700 | case KVM_REG_PPC_VRSAVE: | |
b4d7f161 | 1701 | val = get_reg_val(reg->id, vcpu->arch.vrsave); |
3840edc8 MC |
1702 | break; |
1703 | #endif /* CONFIG_ALTIVEC */ | |
8a41ea53 MC |
1704 | default: |
1705 | r = -EINVAL; | |
1706 | break; | |
1707 | } | |
1708 | } | |
1709 | ||
1710 | if (r) | |
1711 | return r; | |
1712 | ||
1713 | if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) | |
1714 | r = -EFAULT; | |
1715 | ||
1716 | return r; | |
1717 | } | |
1718 | ||
1719 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |
1720 | { | |
1721 | int r; | |
1722 | union kvmppc_one_reg val; | |
1723 | int size; | |
1724 | ||
1725 | size = one_reg_size(reg->id); | |
1726 | if (size > sizeof(val)) | |
1727 | return -EINVAL; | |
1728 | ||
1729 | if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) | |
1730 | return -EFAULT; | |
1731 | ||
1732 | r = kvmppc_set_one_reg(vcpu, reg->id, &val); | |
1733 | if (r == -EINVAL) { | |
1734 | r = 0; | |
1735 | switch (reg->id) { | |
3840edc8 MC |
1736 | #ifdef CONFIG_ALTIVEC |
1737 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: | |
1738 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1739 | r = -ENXIO; | |
1740 | break; | |
1741 | } | |
b4d7f161 | 1742 | vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; |
3840edc8 MC |
1743 | break; |
1744 | case KVM_REG_PPC_VSCR: | |
1745 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1746 | r = -ENXIO; | |
1747 | break; | |
1748 | } | |
b4d7f161 | 1749 | vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); |
3840edc8 MC |
1750 | break; |
1751 | case KVM_REG_PPC_VRSAVE: | |
b4d7f161 GK |
1752 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
1753 | r = -ENXIO; | |
1754 | break; | |
1755 | } | |
1756 | vcpu->arch.vrsave = set_reg_val(reg->id, val); | |
3840edc8 MC |
1757 | break; |
1758 | #endif /* CONFIG_ALTIVEC */ | |
8a41ea53 MC |
1759 | default: |
1760 | r = -EINVAL; | |
1761 | break; | |
1762 | } | |
1763 | } | |
1764 | ||
1765 | return r; | |
1766 | } | |
1767 | ||
bbf45ba5 HB |
1768 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
1769 | { | |
1770 | int r; | |
bbf45ba5 | 1771 | |
accb757d CD |
1772 | vcpu_load(vcpu); |
1773 | ||
bbf45ba5 | 1774 | if (vcpu->mmio_needed) { |
6f63e81b | 1775 | vcpu->mmio_needed = 0; |
bbf45ba5 HB |
1776 | if (!vcpu->mmio_is_write) |
1777 | kvmppc_complete_mmio_load(vcpu, run); | |
6f63e81b BL |
1778 | #ifdef CONFIG_VSX |
1779 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { | |
1780 | vcpu->arch.mmio_vsx_copy_nums--; | |
1781 | vcpu->arch.mmio_vsx_offset++; | |
1782 | } | |
1783 | ||
1784 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { | |
1785 | r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); | |
1786 | if (r == RESUME_HOST) { | |
1787 | vcpu->mmio_needed = 1; | |
accb757d | 1788 | goto out; |
6f63e81b BL |
1789 | } |
1790 | } | |
09f98496 JRZ |
1791 | #endif |
1792 | #ifdef CONFIG_ALTIVEC | |
acc9eb93 | 1793 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
09f98496 | 1794 | vcpu->arch.mmio_vmx_copy_nums--; |
acc9eb93 SG |
1795 | vcpu->arch.mmio_vmx_offset++; |
1796 | } | |
09f98496 JRZ |
1797 | |
1798 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { | |
1799 | r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); | |
1800 | if (r == RESUME_HOST) { | |
1801 | vcpu->mmio_needed = 1; | |
1ab03c07 | 1802 | goto out; |
09f98496 JRZ |
1803 | } |
1804 | } | |
6f63e81b | 1805 | #endif |
ad0a048b AG |
1806 | } else if (vcpu->arch.osi_needed) { |
1807 | u64 *gprs = run->osi.gprs; | |
1808 | int i; | |
1809 | ||
1810 | for (i = 0; i < 32; i++) | |
1811 | kvmppc_set_gpr(vcpu, i, gprs[i]); | |
1812 | vcpu->arch.osi_needed = 0; | |
de56a948 PM |
1813 | } else if (vcpu->arch.hcall_needed) { |
1814 | int i; | |
1815 | ||
1816 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); | |
1817 | for (i = 0; i < 9; ++i) | |
1818 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); | |
1819 | vcpu->arch.hcall_needed = 0; | |
1c810636 AG |
1820 | #ifdef CONFIG_BOOKE |
1821 | } else if (vcpu->arch.epr_needed) { | |
1822 | kvmppc_set_epr(vcpu, run->epr.epr); | |
1823 | vcpu->arch.epr_needed = 0; | |
1824 | #endif | |
bbf45ba5 HB |
1825 | } |
1826 | ||
20b7035c | 1827 | kvm_sigset_activate(vcpu); |
6f63e81b | 1828 | |
460df4c1 PB |
1829 | if (run->immediate_exit) |
1830 | r = -EINTR; | |
1831 | else | |
1832 | r = kvmppc_vcpu_run(run, vcpu); | |
bbf45ba5 | 1833 | |
20b7035c | 1834 | kvm_sigset_deactivate(vcpu); |
bbf45ba5 | 1835 | |
c662f773 | 1836 | #ifdef CONFIG_ALTIVEC |
accb757d | 1837 | out: |
c662f773 | 1838 | #endif |
accb757d | 1839 | vcpu_put(vcpu); |
bbf45ba5 HB |
1840 | return r; |
1841 | } | |
1842 | ||
1843 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |
1844 | { | |
19ccb76a | 1845 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
4fe27d2a | 1846 | kvmppc_core_dequeue_external(vcpu); |
19ccb76a PM |
1847 | return 0; |
1848 | } | |
1849 | ||
1850 | kvmppc_core_queue_external(vcpu, irq); | |
b6d33834 | 1851 | |
dfd4d47e | 1852 | kvm_vcpu_kick(vcpu); |
45c5eb67 | 1853 | |
bbf45ba5 HB |
1854 | return 0; |
1855 | } | |
1856 | ||
71fbfd5f AG |
1857 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
1858 | struct kvm_enable_cap *cap) | |
1859 | { | |
1860 | int r; | |
1861 | ||
1862 | if (cap->flags) | |
1863 | return -EINVAL; | |
1864 | ||
1865 | switch (cap->cap) { | |
ad0a048b AG |
1866 | case KVM_CAP_PPC_OSI: |
1867 | r = 0; | |
1868 | vcpu->arch.osi_enabled = true; | |
1869 | break; | |
930b412a AG |
1870 | case KVM_CAP_PPC_PAPR: |
1871 | r = 0; | |
1872 | vcpu->arch.papr_enabled = true; | |
1873 | break; | |
1c810636 AG |
1874 | case KVM_CAP_PPC_EPR: |
1875 | r = 0; | |
5df554ad SW |
1876 | if (cap->args[0]) |
1877 | vcpu->arch.epr_flags |= KVMPPC_EPR_USER; | |
1878 | else | |
1879 | vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; | |
1c810636 | 1880 | break; |
f61c94bb BB |
1881 | #ifdef CONFIG_BOOKE |
1882 | case KVM_CAP_PPC_BOOKE_WATCHDOG: | |
1883 | r = 0; | |
1884 | vcpu->arch.watchdog_enabled = true; | |
1885 | break; | |
1886 | #endif | |
bf7ca4bd | 1887 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
1888 | case KVM_CAP_SW_TLB: { |
1889 | struct kvm_config_tlb cfg; | |
1890 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; | |
1891 | ||
1892 | r = -EFAULT; | |
1893 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) | |
1894 | break; | |
1895 | ||
1896 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); | |
1897 | break; | |
eb1e4f43 SW |
1898 | } |
1899 | #endif | |
1900 | #ifdef CONFIG_KVM_MPIC | |
1901 | case KVM_CAP_IRQ_MPIC: { | |
70abaded | 1902 | struct fd f; |
eb1e4f43 SW |
1903 | struct kvm_device *dev; |
1904 | ||
1905 | r = -EBADF; | |
70abaded AV |
1906 | f = fdget(cap->args[0]); |
1907 | if (!f.file) | |
eb1e4f43 SW |
1908 | break; |
1909 | ||
1910 | r = -EPERM; | |
70abaded | 1911 | dev = kvm_device_from_filp(f.file); |
eb1e4f43 SW |
1912 | if (dev) |
1913 | r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); | |
1914 | ||
70abaded | 1915 | fdput(f); |
eb1e4f43 | 1916 | break; |
dc83b8bc SW |
1917 | } |
1918 | #endif | |
5975a2e0 PM |
1919 | #ifdef CONFIG_KVM_XICS |
1920 | case KVM_CAP_IRQ_XICS: { | |
70abaded | 1921 | struct fd f; |
5975a2e0 PM |
1922 | struct kvm_device *dev; |
1923 | ||
1924 | r = -EBADF; | |
70abaded AV |
1925 | f = fdget(cap->args[0]); |
1926 | if (!f.file) | |
5975a2e0 PM |
1927 | break; |
1928 | ||
1929 | r = -EPERM; | |
70abaded | 1930 | dev = kvm_device_from_filp(f.file); |
5af50993 | 1931 | if (dev) { |
03f95332 | 1932 | if (xics_on_xive()) |
5af50993 BH |
1933 | r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); |
1934 | else | |
1935 | r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); | |
1936 | } | |
5975a2e0 | 1937 | |
70abaded | 1938 | fdput(f); |
5975a2e0 PM |
1939 | break; |
1940 | } | |
1941 | #endif /* CONFIG_KVM_XICS */ | |
eacc56bb CLG |
1942 | #ifdef CONFIG_KVM_XIVE |
1943 | case KVM_CAP_PPC_IRQ_XIVE: { | |
1944 | struct fd f; | |
1945 | struct kvm_device *dev; | |
1946 | ||
1947 | r = -EBADF; | |
1948 | f = fdget(cap->args[0]); | |
1949 | if (!f.file) | |
1950 | break; | |
1951 | ||
1952 | r = -ENXIO; | |
1953 | if (!xive_enabled()) | |
1954 | break; | |
1955 | ||
1956 | r = -EPERM; | |
1957 | dev = kvm_device_from_filp(f.file); | |
1958 | if (dev) | |
1959 | r = kvmppc_xive_native_connect_vcpu(dev, vcpu, | |
1960 | cap->args[1]); | |
1961 | ||
1962 | fdput(f); | |
1963 | break; | |
1964 | } | |
1965 | #endif /* CONFIG_KVM_XIVE */ | |
134764ed AP |
1966 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
1967 | case KVM_CAP_PPC_FWNMI: | |
1968 | r = -EINVAL; | |
1969 | if (!is_kvmppc_hv_enabled(vcpu->kvm)) | |
1970 | break; | |
1971 | r = 0; | |
1972 | vcpu->kvm->arch.fwnmi_enabled = true; | |
1973 | break; | |
1974 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ | |
71fbfd5f AG |
1975 | default: |
1976 | r = -EINVAL; | |
1977 | break; | |
1978 | } | |
1979 | ||
af8f38b3 AG |
1980 | if (!r) |
1981 | r = kvmppc_sanity_check(vcpu); | |
1982 | ||
71fbfd5f AG |
1983 | return r; |
1984 | } | |
1985 | ||
34a75b0f PM |
1986 | bool kvm_arch_intc_initialized(struct kvm *kvm) |
1987 | { | |
1988 | #ifdef CONFIG_KVM_MPIC | |
1989 | if (kvm->arch.mpic) | |
1990 | return true; | |
1991 | #endif | |
1992 | #ifdef CONFIG_KVM_XICS | |
5af50993 | 1993 | if (kvm->arch.xics || kvm->arch.xive) |
34a75b0f PM |
1994 | return true; |
1995 | #endif | |
1996 | return false; | |
1997 | } | |
1998 | ||
bbf45ba5 HB |
1999 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
2000 | struct kvm_mp_state *mp_state) | |
2001 | { | |
2002 | return -EINVAL; | |
2003 | } | |
2004 | ||
2005 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
2006 | struct kvm_mp_state *mp_state) | |
2007 | { | |
2008 | return -EINVAL; | |
2009 | } | |
2010 | ||
5cb0944c PB |
2011 | long kvm_arch_vcpu_async_ioctl(struct file *filp, |
2012 | unsigned int ioctl, unsigned long arg) | |
bbf45ba5 HB |
2013 | { |
2014 | struct kvm_vcpu *vcpu = filp->private_data; | |
2015 | void __user *argp = (void __user *)arg; | |
bbf45ba5 | 2016 | |
9b062471 | 2017 | if (ioctl == KVM_INTERRUPT) { |
bbf45ba5 | 2018 | struct kvm_interrupt irq; |
bbf45ba5 | 2019 | if (copy_from_user(&irq, argp, sizeof(irq))) |
9b062471 CD |
2020 | return -EFAULT; |
2021 | return kvm_vcpu_ioctl_interrupt(vcpu, &irq); | |
bbf45ba5 | 2022 | } |
5cb0944c PB |
2023 | return -ENOIOCTLCMD; |
2024 | } | |
2025 | ||
2026 | long kvm_arch_vcpu_ioctl(struct file *filp, | |
2027 | unsigned int ioctl, unsigned long arg) | |
2028 | { | |
2029 | struct kvm_vcpu *vcpu = filp->private_data; | |
2030 | void __user *argp = (void __user *)arg; | |
2031 | long r; | |
19483d14 | 2032 | |
9b062471 | 2033 | switch (ioctl) { |
71fbfd5f AG |
2034 | case KVM_ENABLE_CAP: |
2035 | { | |
2036 | struct kvm_enable_cap cap; | |
2037 | r = -EFAULT; | |
b3cebfe8 | 2038 | vcpu_load(vcpu); |
71fbfd5f AG |
2039 | if (copy_from_user(&cap, argp, sizeof(cap))) |
2040 | goto out; | |
2041 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | |
b3cebfe8 | 2042 | vcpu_put(vcpu); |
71fbfd5f AG |
2043 | break; |
2044 | } | |
dc83b8bc | 2045 | |
e24ed81f AG |
2046 | case KVM_SET_ONE_REG: |
2047 | case KVM_GET_ONE_REG: | |
2048 | { | |
2049 | struct kvm_one_reg reg; | |
2050 | r = -EFAULT; | |
2051 | if (copy_from_user(®, argp, sizeof(reg))) | |
2052 | goto out; | |
2053 | if (ioctl == KVM_SET_ONE_REG) | |
2054 | r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); | |
2055 | else | |
2056 | r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); | |
2057 | break; | |
2058 | } | |
2059 | ||
bf7ca4bd | 2060 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
2061 | case KVM_DIRTY_TLB: { |
2062 | struct kvm_dirty_tlb dirty; | |
2063 | r = -EFAULT; | |
b3cebfe8 | 2064 | vcpu_load(vcpu); |
dc83b8bc SW |
2065 | if (copy_from_user(&dirty, argp, sizeof(dirty))) |
2066 | goto out; | |
2067 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); | |
b3cebfe8 | 2068 | vcpu_put(vcpu); |
dc83b8bc SW |
2069 | break; |
2070 | } | |
2071 | #endif | |
bbf45ba5 HB |
2072 | default: |
2073 | r = -EINVAL; | |
2074 | } | |
2075 | ||
2076 | out: | |
2077 | return r; | |
2078 | } | |
2079 | ||
1499fa80 | 2080 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
5b1c1493 CO |
2081 | { |
2082 | return VM_FAULT_SIGBUS; | |
2083 | } | |
2084 | ||
15711e9c AG |
2085 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) |
2086 | { | |
784bafac SY |
2087 | u32 inst_nop = 0x60000000; |
2088 | #ifdef CONFIG_KVM_BOOKE_HV | |
2089 | u32 inst_sc1 = 0x44000022; | |
2743103f AG |
2090 | pvinfo->hcall[0] = cpu_to_be32(inst_sc1); |
2091 | pvinfo->hcall[1] = cpu_to_be32(inst_nop); | |
2092 | pvinfo->hcall[2] = cpu_to_be32(inst_nop); | |
2093 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); | |
784bafac | 2094 | #else |
15711e9c AG |
2095 | u32 inst_lis = 0x3c000000; |
2096 | u32 inst_ori = 0x60000000; | |
15711e9c AG |
2097 | u32 inst_sc = 0x44000002; |
2098 | u32 inst_imm_mask = 0xffff; | |
2099 | ||
2100 | /* | |
2101 | * The hypercall to get into KVM from within guest context is as | |
2102 | * follows: | |
2103 | * | |
2104 | * lis r0, r0, KVM_SC_MAGIC_R0@h | |
2105 | * ori r0, KVM_SC_MAGIC_R0@l | |
2106 | * sc | |
2107 | * nop | |
2108 | */ | |
2743103f AG |
2109 | pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); |
2110 | pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); | |
2111 | pvinfo->hcall[2] = cpu_to_be32(inst_sc); | |
2112 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); | |
784bafac | 2113 | #endif |
15711e9c | 2114 | |
9202e076 LYB |
2115 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; |
2116 | ||
15711e9c AG |
2117 | return 0; |
2118 | } | |
2119 | ||
5efdb4be AG |
2120 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, |
2121 | bool line_status) | |
2122 | { | |
2123 | if (!irqchip_in_kernel(kvm)) | |
2124 | return -ENXIO; | |
2125 | ||
2126 | irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | |
2127 | irq_event->irq, irq_event->level, | |
2128 | line_status); | |
2129 | return 0; | |
2130 | } | |
2131 | ||
699a0ea0 | 2132 | |
e5d83c74 PB |
2133 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
2134 | struct kvm_enable_cap *cap) | |
699a0ea0 PM |
2135 | { |
2136 | int r; | |
2137 | ||
2138 | if (cap->flags) | |
2139 | return -EINVAL; | |
2140 | ||
2141 | switch (cap->cap) { | |
2142 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | |
2143 | case KVM_CAP_PPC_ENABLE_HCALL: { | |
2144 | unsigned long hcall = cap->args[0]; | |
2145 | ||
2146 | r = -EINVAL; | |
2147 | if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || | |
2148 | cap->args[1] > 1) | |
2149 | break; | |
ae2113a4 PM |
2150 | if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) |
2151 | break; | |
699a0ea0 PM |
2152 | if (cap->args[1]) |
2153 | set_bit(hcall / 4, kvm->arch.enabled_hcalls); | |
2154 | else | |
2155 | clear_bit(hcall / 4, kvm->arch.enabled_hcalls); | |
2156 | r = 0; | |
2157 | break; | |
2158 | } | |
3c313524 PM |
2159 | case KVM_CAP_PPC_SMT: { |
2160 | unsigned long mode = cap->args[0]; | |
2161 | unsigned long flags = cap->args[1]; | |
2162 | ||
2163 | r = -EINVAL; | |
2164 | if (kvm->arch.kvm_ops->set_smt_mode) | |
2165 | r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); | |
2166 | break; | |
2167 | } | |
aa069a99 PM |
2168 | |
2169 | case KVM_CAP_PPC_NESTED_HV: | |
2170 | r = -EINVAL; | |
2171 | if (!is_kvmppc_hv_enabled(kvm) || | |
2172 | !kvm->arch.kvm_ops->enable_nested) | |
2173 | break; | |
2174 | r = kvm->arch.kvm_ops->enable_nested(kvm); | |
2175 | break; | |
9a5788c6 PM |
2176 | #endif |
2177 | #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) | |
2178 | case KVM_CAP_PPC_SECURE_GUEST: | |
2179 | r = -EINVAL; | |
2180 | if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) | |
2181 | break; | |
2182 | r = kvm->arch.kvm_ops->enable_svm(kvm); | |
2183 | break; | |
699a0ea0 PM |
2184 | #endif |
2185 | default: | |
2186 | r = -EINVAL; | |
2187 | break; | |
2188 | } | |
2189 | ||
2190 | return r; | |
2191 | } | |
2192 | ||
3214d01f PM |
2193 | #ifdef CONFIG_PPC_BOOK3S_64 |
2194 | /* | |
2195 | * These functions check whether the underlying hardware is safe | |
2196 | * against attacks based on observing the effects of speculatively | |
2197 | * executed instructions, and whether it supplies instructions for | |
2198 | * use in workarounds. The information comes from firmware, either | |
2199 | * via the device tree on powernv platforms or from an hcall on | |
2200 | * pseries platforms. | |
2201 | */ | |
2202 | #ifdef CONFIG_PPC_PSERIES | |
2203 | static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) | |
2204 | { | |
2205 | struct h_cpu_char_result c; | |
2206 | unsigned long rc; | |
2207 | ||
2208 | if (!machine_is(pseries)) | |
2209 | return -ENOTTY; | |
2210 | ||
2211 | rc = plpar_get_cpu_characteristics(&c); | |
2212 | if (rc == H_SUCCESS) { | |
2213 | cp->character = c.character; | |
2214 | cp->behaviour = c.behaviour; | |
2215 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | | |
2216 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | | |
2217 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | | |
2218 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | | |
2219 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | | |
2220 | KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | | |
2221 | KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | | |
2b57ecd0 SJS |
2222 | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | |
2223 | KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; | |
3214d01f PM |
2224 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | |
2225 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | | |
2b57ecd0 SJS |
2226 | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | |
2227 | KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; | |
3214d01f PM |
2228 | } |
2229 | return 0; | |
2230 | } | |
2231 | #else | |
2232 | static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) | |
2233 | { | |
2234 | return -ENOTTY; | |
2235 | } | |
2236 | #endif | |
2237 | ||
2238 | static inline bool have_fw_feat(struct device_node *fw_features, | |
2239 | const char *state, const char *name) | |
2240 | { | |
2241 | struct device_node *np; | |
2242 | bool r = false; | |
2243 | ||
2244 | np = of_get_child_by_name(fw_features, name); | |
2245 | if (np) { | |
2246 | r = of_property_read_bool(np, state); | |
2247 | of_node_put(np); | |
2248 | } | |
2249 | return r; | |
2250 | } | |
2251 | ||
2252 | static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) | |
2253 | { | |
2254 | struct device_node *np, *fw_features; | |
2255 | int r; | |
2256 | ||
2257 | memset(cp, 0, sizeof(*cp)); | |
2258 | r = pseries_get_cpu_char(cp); | |
2259 | if (r != -ENOTTY) | |
2260 | return r; | |
2261 | ||
2262 | np = of_find_node_by_name(NULL, "ibm,opal"); | |
2263 | if (np) { | |
2264 | fw_features = of_get_child_by_name(np, "fw-features"); | |
2265 | of_node_put(np); | |
2266 | if (!fw_features) | |
2267 | return 0; | |
2268 | if (have_fw_feat(fw_features, "enabled", | |
2269 | "inst-spec-barrier-ori31,31,0")) | |
2270 | cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; | |
2271 | if (have_fw_feat(fw_features, "enabled", | |
2272 | "fw-bcctrl-serialized")) | |
2273 | cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; | |
2274 | if (have_fw_feat(fw_features, "enabled", | |
2275 | "inst-l1d-flush-ori30,30,0")) | |
2276 | cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; | |
2277 | if (have_fw_feat(fw_features, "enabled", | |
2278 | "inst-l1d-flush-trig2")) | |
2279 | cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; | |
2280 | if (have_fw_feat(fw_features, "enabled", | |
2281 | "fw-l1d-thread-split")) | |
2282 | cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; | |
2283 | if (have_fw_feat(fw_features, "enabled", | |
2284 | "fw-count-cache-disabled")) | |
2285 | cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; | |
2b57ecd0 SJS |
2286 | if (have_fw_feat(fw_features, "enabled", |
2287 | "fw-count-cache-flush-bcctr2,0,0")) | |
2288 | cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; | |
3214d01f PM |
2289 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | |
2290 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | | |
2291 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | | |
2292 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | | |
2293 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | | |
2b57ecd0 SJS |
2294 | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | |
2295 | KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; | |
3214d01f PM |
2296 | |
2297 | if (have_fw_feat(fw_features, "enabled", | |
2298 | "speculation-policy-favor-security")) | |
2299 | cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; | |
2300 | if (!have_fw_feat(fw_features, "disabled", | |
2301 | "needs-l1d-flush-msr-pr-0-to-1")) | |
2302 | cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; | |
2303 | if (!have_fw_feat(fw_features, "disabled", | |
2304 | "needs-spec-barrier-for-bound-checks")) | |
2305 | cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; | |
2b57ecd0 SJS |
2306 | if (have_fw_feat(fw_features, "enabled", |
2307 | "needs-count-cache-flush-on-context-switch")) | |
2308 | cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; | |
3214d01f PM |
2309 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | |
2310 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | | |
2b57ecd0 SJS |
2311 | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | |
2312 | KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; | |
3214d01f PM |
2313 | |
2314 | of_node_put(fw_features); | |
2315 | } | |
2316 | ||
2317 | return 0; | |
2318 | } | |
2319 | #endif | |
2320 | ||
bbf45ba5 HB |
2321 | long kvm_arch_vm_ioctl(struct file *filp, |
2322 | unsigned int ioctl, unsigned long arg) | |
2323 | { | |
5df554ad | 2324 | struct kvm *kvm __maybe_unused = filp->private_data; |
15711e9c | 2325 | void __user *argp = (void __user *)arg; |
bbf45ba5 HB |
2326 | long r; |
2327 | ||
2328 | switch (ioctl) { | |
15711e9c AG |
2329 | case KVM_PPC_GET_PVINFO: { |
2330 | struct kvm_ppc_pvinfo pvinfo; | |
d8cdddcd | 2331 | memset(&pvinfo, 0, sizeof(pvinfo)); |
15711e9c AG |
2332 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); |
2333 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { | |
2334 | r = -EFAULT; | |
2335 | goto out; | |
2336 | } | |
2337 | ||
2338 | break; | |
2339 | } | |
76d837a4 | 2340 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
58ded420 AK |
2341 | case KVM_CREATE_SPAPR_TCE_64: { |
2342 | struct kvm_create_spapr_tce_64 create_tce_64; | |
2343 | ||
2344 | r = -EFAULT; | |
2345 | if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) | |
2346 | goto out; | |
2347 | if (create_tce_64.flags) { | |
2348 | r = -EINVAL; | |
2349 | goto out; | |
2350 | } | |
2351 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); | |
2352 | goto out; | |
2353 | } | |
54738c09 DG |
2354 | case KVM_CREATE_SPAPR_TCE: { |
2355 | struct kvm_create_spapr_tce create_tce; | |
58ded420 | 2356 | struct kvm_create_spapr_tce_64 create_tce_64; |
54738c09 DG |
2357 | |
2358 | r = -EFAULT; | |
2359 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) | |
2360 | goto out; | |
58ded420 AK |
2361 | |
2362 | create_tce_64.liobn = create_tce.liobn; | |
2363 | create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; | |
2364 | create_tce_64.offset = 0; | |
2365 | create_tce_64.size = create_tce.window_size >> | |
2366 | IOMMU_PAGE_SHIFT_4K; | |
2367 | create_tce_64.flags = 0; | |
2368 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); | |
54738c09 DG |
2369 | goto out; |
2370 | } | |
76d837a4 PM |
2371 | #endif |
2372 | #ifdef CONFIG_PPC_BOOK3S_64 | |
5b74716e | 2373 | case KVM_PPC_GET_SMMU_INFO: { |
5b74716e | 2374 | struct kvm_ppc_smmu_info info; |
cbbc58d4 | 2375 | struct kvm *kvm = filp->private_data; |
5b74716e BH |
2376 | |
2377 | memset(&info, 0, sizeof(info)); | |
cbbc58d4 | 2378 | r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); |
5b74716e BH |
2379 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
2380 | r = -EFAULT; | |
2381 | break; | |
2382 | } | |
8e591cb7 ME |
2383 | case KVM_PPC_RTAS_DEFINE_TOKEN: { |
2384 | struct kvm *kvm = filp->private_data; | |
2385 | ||
2386 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); | |
2387 | break; | |
2388 | } | |
c9270132 PM |
2389 | case KVM_PPC_CONFIGURE_V3_MMU: { |
2390 | struct kvm *kvm = filp->private_data; | |
2391 | struct kvm_ppc_mmuv3_cfg cfg; | |
2392 | ||
2393 | r = -EINVAL; | |
2394 | if (!kvm->arch.kvm_ops->configure_mmu) | |
2395 | goto out; | |
2396 | r = -EFAULT; | |
2397 | if (copy_from_user(&cfg, argp, sizeof(cfg))) | |
2398 | goto out; | |
2399 | r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); | |
2400 | break; | |
2401 | } | |
2402 | case KVM_PPC_GET_RMMU_INFO: { | |
2403 | struct kvm *kvm = filp->private_data; | |
2404 | struct kvm_ppc_rmmu_info info; | |
2405 | ||
2406 | r = -EINVAL; | |
2407 | if (!kvm->arch.kvm_ops->get_rmmu_info) | |
2408 | goto out; | |
2409 | r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); | |
2410 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) | |
2411 | r = -EFAULT; | |
2412 | break; | |
2413 | } | |
3214d01f PM |
2414 | case KVM_PPC_GET_CPU_CHAR: { |
2415 | struct kvm_ppc_cpu_char cpuchar; | |
2416 | ||
2417 | r = kvmppc_get_cpu_char(&cpuchar); | |
2418 | if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar))) | |
2419 | r = -EFAULT; | |
2420 | break; | |
2421 | } | |
22945688 BR |
2422 | case KVM_PPC_SVM_OFF: { |
2423 | struct kvm *kvm = filp->private_data; | |
2424 | ||
2425 | r = 0; | |
2426 | if (!kvm->arch.kvm_ops->svm_off) | |
2427 | goto out; | |
2428 | ||
2429 | r = kvm->arch.kvm_ops->svm_off(kvm); | |
2430 | break; | |
2431 | } | |
cbbc58d4 AK |
2432 | default: { |
2433 | struct kvm *kvm = filp->private_data; | |
2434 | r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); | |
2435 | } | |
3a167bea | 2436 | #else /* CONFIG_PPC_BOOK3S_64 */ |
bbf45ba5 | 2437 | default: |
367e1319 | 2438 | r = -ENOTTY; |
3a167bea | 2439 | #endif |
bbf45ba5 | 2440 | } |
15711e9c | 2441 | out: |
bbf45ba5 HB |
2442 | return r; |
2443 | } | |
2444 | ||
043cc4d7 SW |
2445 | static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; |
2446 | static unsigned long nr_lpids; | |
2447 | ||
2448 | long kvmppc_alloc_lpid(void) | |
2449 | { | |
2450 | long lpid; | |
2451 | ||
2452 | do { | |
2453 | lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); | |
2454 | if (lpid >= nr_lpids) { | |
2455 | pr_err("%s: No LPIDs free\n", __func__); | |
2456 | return -ENOMEM; | |
2457 | } | |
2458 | } while (test_and_set_bit(lpid, lpid_inuse)); | |
2459 | ||
2460 | return lpid; | |
2461 | } | |
2ba9f0d8 | 2462 | EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); |
043cc4d7 SW |
2463 | |
2464 | void kvmppc_claim_lpid(long lpid) | |
2465 | { | |
2466 | set_bit(lpid, lpid_inuse); | |
2467 | } | |
2ba9f0d8 | 2468 | EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); |
043cc4d7 SW |
2469 | |
2470 | void kvmppc_free_lpid(long lpid) | |
2471 | { | |
2472 | clear_bit(lpid, lpid_inuse); | |
2473 | } | |
2ba9f0d8 | 2474 | EXPORT_SYMBOL_GPL(kvmppc_free_lpid); |
043cc4d7 SW |
2475 | |
2476 | void kvmppc_init_lpid(unsigned long nr_lpids_param) | |
2477 | { | |
2478 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); | |
2479 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | |
2480 | } | |
2ba9f0d8 | 2481 | EXPORT_SYMBOL_GPL(kvmppc_init_lpid); |
043cc4d7 | 2482 | |
bbf45ba5 HB |
2483 | int kvm_arch_init(void *opaque) |
2484 | { | |
2485 | return 0; | |
2486 | } | |
2487 | ||
478d6686 | 2488 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); |