]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/powerpc/kvm/powerpc.c
Merge branch 'kvm-guestmemfd' into HEAD
[thirdparty/linux.git] / arch / powerpc / kvm / powerpc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright IBM Corp. 2007
5 *
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8 */
9
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
22 #include <linux/of.h>
23 #include <asm/cputable.h>
24 #include <linux/uaccess.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/cputhreads.h>
27 #include <asm/irqflags.h>
28 #include <asm/iommu.h>
29 #include <asm/switch_to.h>
30 #include <asm/xive.h>
31 #ifdef CONFIG_PPC_PSERIES
32 #include <asm/hvcall.h>
33 #include <asm/plpar_wrappers.h>
34 #endif
35 #include <asm/ultravisor.h>
36 #include <asm/setup.h>
37
38 #include "timing.h"
39 #include "../mm/mmu_decl.h"
40
41 #define CREATE_TRACE_POINTS
42 #include "trace.h"
43
44 struct kvmppc_ops *kvmppc_hv_ops;
45 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46 struct kvmppc_ops *kvmppc_pr_ops;
47 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
48
49
50 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
51 {
52 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
53 }
54
55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
56 {
57 return kvm_arch_vcpu_runnable(vcpu);
58 }
59
60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
61 {
62 return false;
63 }
64
65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
66 {
67 return 1;
68 }
69
70 /*
71 * Common checks before entering the guest world. Call with interrupts
72 * disabled.
73 *
74 * returns:
75 *
76 * == 1 if we're ready to go into guest state
77 * <= 0 if we need to go back to the host with return value
78 */
79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
80 {
81 int r;
82
83 WARN_ON(irqs_disabled());
84 hard_irq_disable();
85
86 while (true) {
87 if (need_resched()) {
88 local_irq_enable();
89 cond_resched();
90 hard_irq_disable();
91 continue;
92 }
93
94 if (signal_pending(current)) {
95 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
96 vcpu->run->exit_reason = KVM_EXIT_INTR;
97 r = -EINTR;
98 break;
99 }
100
101 vcpu->mode = IN_GUEST_MODE;
102
103 /*
104 * Reading vcpu->requests must happen after setting vcpu->mode,
105 * so we don't miss a request because the requester sees
106 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
107 * before next entering the guest (and thus doesn't IPI).
108 * This also orders the write to mode from any reads
109 * to the page tables done while the VCPU is running.
110 * Please see the comment in kvm_flush_remote_tlbs.
111 */
112 smp_mb();
113
114 if (kvm_request_pending(vcpu)) {
115 /* Make sure we process requests preemptable */
116 local_irq_enable();
117 trace_kvm_check_requests(vcpu);
118 r = kvmppc_core_check_requests(vcpu);
119 hard_irq_disable();
120 if (r > 0)
121 continue;
122 break;
123 }
124
125 if (kvmppc_core_prepare_to_enter(vcpu)) {
126 /* interrupts got enabled in between, so we
127 are back at square 1 */
128 continue;
129 }
130
131 guest_enter_irqoff();
132 return 1;
133 }
134
135 /* return to host */
136 local_irq_enable();
137 return r;
138 }
139 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
140
141 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
142 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
143 {
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
145 int i;
146
147 shared->sprg0 = swab64(shared->sprg0);
148 shared->sprg1 = swab64(shared->sprg1);
149 shared->sprg2 = swab64(shared->sprg2);
150 shared->sprg3 = swab64(shared->sprg3);
151 shared->srr0 = swab64(shared->srr0);
152 shared->srr1 = swab64(shared->srr1);
153 shared->dar = swab64(shared->dar);
154 shared->msr = swab64(shared->msr);
155 shared->dsisr = swab32(shared->dsisr);
156 shared->int_pending = swab32(shared->int_pending);
157 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
158 shared->sr[i] = swab32(shared->sr[i]);
159 }
160 #endif
161
162 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
163 {
164 int nr = kvmppc_get_gpr(vcpu, 11);
165 int r;
166 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
167 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
168 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
169 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
170 unsigned long r2 = 0;
171
172 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
173 /* 32 bit mode */
174 param1 &= 0xffffffff;
175 param2 &= 0xffffffff;
176 param3 &= 0xffffffff;
177 param4 &= 0xffffffff;
178 }
179
180 switch (nr) {
181 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
182 {
183 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
184 /* Book3S can be little endian, find it out here */
185 int shared_big_endian = true;
186 if (vcpu->arch.intr_msr & MSR_LE)
187 shared_big_endian = false;
188 if (shared_big_endian != vcpu->arch.shared_big_endian)
189 kvmppc_swab_shared(vcpu);
190 vcpu->arch.shared_big_endian = shared_big_endian;
191 #endif
192
193 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
194 /*
195 * Older versions of the Linux magic page code had
196 * a bug where they would map their trampoline code
197 * NX. If that's the case, remove !PR NX capability.
198 */
199 vcpu->arch.disable_kernel_nx = true;
200 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
201 }
202
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
205
206 #ifdef CONFIG_PPC_64K_PAGES
207 /*
208 * Make sure our 4k magic page is in the same window of a 64k
209 * page within the guest and within the host's page.
210 */
211 if ((vcpu->arch.magic_page_pa & 0xf000) !=
212 ((ulong)vcpu->arch.shared & 0xf000)) {
213 void *old_shared = vcpu->arch.shared;
214 ulong shared = (ulong)vcpu->arch.shared;
215 void *new_shared;
216
217 shared &= PAGE_MASK;
218 shared |= vcpu->arch.magic_page_pa & 0xf000;
219 new_shared = (void*)shared;
220 memcpy(new_shared, old_shared, 0x1000);
221 vcpu->arch.shared = new_shared;
222 }
223 #endif
224
225 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
226
227 r = EV_SUCCESS;
228 break;
229 }
230 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
231 r = EV_SUCCESS;
232 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
233 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
234 #endif
235
236 /* Second return value is in r4 */
237 break;
238 case EV_HCALL_TOKEN(EV_IDLE):
239 r = EV_SUCCESS;
240 kvm_vcpu_halt(vcpu);
241 break;
242 default:
243 r = EV_UNIMPLEMENTED;
244 break;
245 }
246
247 kvmppc_set_gpr(vcpu, 4, r2);
248
249 return r;
250 }
251 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
252
253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
254 {
255 int r = false;
256
257 /* We have to know what CPU to virtualize */
258 if (!vcpu->arch.pvr)
259 goto out;
260
261 /* PAPR only works with book3s_64 */
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
263 goto out;
264
265 /* HV KVM can only do PAPR mode for now */
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
267 goto out;
268
269 #ifdef CONFIG_KVM_BOOKE_HV
270 if (!cpu_has_feature(CPU_FTR_EMB_HV))
271 goto out;
272 #endif
273
274 r = true;
275
276 out:
277 vcpu->arch.sane = r;
278 return r ? 0 : -EINVAL;
279 }
280 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
281
282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
283 {
284 enum emulation_result er;
285 int r;
286
287 er = kvmppc_emulate_loadstore(vcpu);
288 switch (er) {
289 case EMULATE_DONE:
290 /* Future optimization: only reload non-volatiles if they were
291 * actually modified. */
292 r = RESUME_GUEST_NV;
293 break;
294 case EMULATE_AGAIN:
295 r = RESUME_GUEST;
296 break;
297 case EMULATE_DO_MMIO:
298 vcpu->run->exit_reason = KVM_EXIT_MMIO;
299 /* We must reload nonvolatiles because "update" load/store
300 * instructions modify register state. */
301 /* Future optimization: only reload non-volatiles if they were
302 * actually modified. */
303 r = RESUME_HOST_NV;
304 break;
305 case EMULATE_FAIL:
306 {
307 ppc_inst_t last_inst;
308
309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
310 kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
311 ppc_inst_val(last_inst));
312
313 /*
314 * Injecting a Data Storage here is a bit more
315 * accurate since the instruction that caused the
316 * access could still be a valid one.
317 */
318 if (!IS_ENABLED(CONFIG_BOOKE)) {
319 ulong dsisr = DSISR_BADACCESS;
320
321 if (vcpu->mmio_is_write)
322 dsisr |= DSISR_ISSTORE;
323
324 kvmppc_core_queue_data_storage(vcpu,
325 kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
326 vcpu->arch.vaddr_accessed, dsisr);
327 } else {
328 /*
329 * BookE does not send a SIGBUS on a bad
330 * fault, so use a Program interrupt instead
331 * to avoid a fault loop.
332 */
333 kvmppc_core_queue_program(vcpu, 0);
334 }
335
336 r = RESUME_GUEST;
337 break;
338 }
339 default:
340 WARN_ON(1);
341 r = RESUME_GUEST;
342 }
343
344 return r;
345 }
346 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
347
348 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
349 bool data)
350 {
351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
352 struct kvmppc_pte pte;
353 int r = -EINVAL;
354
355 vcpu->stat.st++;
356
357 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
358 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
359 size);
360
361 if ((!r) || (r == -EAGAIN))
362 return r;
363
364 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
365 XLATE_WRITE, &pte);
366 if (r < 0)
367 return r;
368
369 *eaddr = pte.raddr;
370
371 if (!pte.may_write)
372 return -EPERM;
373
374 /* Magic page override */
375 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
376 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
377 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
378 void *magic = vcpu->arch.shared;
379 magic += pte.eaddr & 0xfff;
380 memcpy(magic, ptr, size);
381 return EMULATE_DONE;
382 }
383
384 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
385 return EMULATE_DO_MMIO;
386
387 return EMULATE_DONE;
388 }
389 EXPORT_SYMBOL_GPL(kvmppc_st);
390
391 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
392 bool data)
393 {
394 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
395 struct kvmppc_pte pte;
396 int rc = -EINVAL;
397
398 vcpu->stat.ld++;
399
400 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
401 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
402 size);
403
404 if ((!rc) || (rc == -EAGAIN))
405 return rc;
406
407 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
408 XLATE_READ, &pte);
409 if (rc)
410 return rc;
411
412 *eaddr = pte.raddr;
413
414 if (!pte.may_read)
415 return -EPERM;
416
417 if (!data && !pte.may_execute)
418 return -ENOEXEC;
419
420 /* Magic page override */
421 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
422 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
423 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
424 void *magic = vcpu->arch.shared;
425 magic += pte.eaddr & 0xfff;
426 memcpy(ptr, magic, size);
427 return EMULATE_DONE;
428 }
429
430 kvm_vcpu_srcu_read_lock(vcpu);
431 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
432 kvm_vcpu_srcu_read_unlock(vcpu);
433 if (rc)
434 return EMULATE_DO_MMIO;
435
436 return EMULATE_DONE;
437 }
438 EXPORT_SYMBOL_GPL(kvmppc_ld);
439
440 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
441 {
442 struct kvmppc_ops *kvm_ops = NULL;
443 int r;
444
445 /*
446 * if we have both HV and PR enabled, default is HV
447 */
448 if (type == 0) {
449 if (kvmppc_hv_ops)
450 kvm_ops = kvmppc_hv_ops;
451 else
452 kvm_ops = kvmppc_pr_ops;
453 if (!kvm_ops)
454 goto err_out;
455 } else if (type == KVM_VM_PPC_HV) {
456 if (!kvmppc_hv_ops)
457 goto err_out;
458 kvm_ops = kvmppc_hv_ops;
459 } else if (type == KVM_VM_PPC_PR) {
460 if (!kvmppc_pr_ops)
461 goto err_out;
462 kvm_ops = kvmppc_pr_ops;
463 } else
464 goto err_out;
465
466 if (!try_module_get(kvm_ops->owner))
467 return -ENOENT;
468
469 kvm->arch.kvm_ops = kvm_ops;
470 r = kvmppc_core_init_vm(kvm);
471 if (r)
472 module_put(kvm_ops->owner);
473 return r;
474 err_out:
475 return -EINVAL;
476 }
477
478 void kvm_arch_destroy_vm(struct kvm *kvm)
479 {
480 #ifdef CONFIG_KVM_XICS
481 /*
482 * We call kick_all_cpus_sync() to ensure that all
483 * CPUs have executed any pending IPIs before we
484 * continue and free VCPUs structures below.
485 */
486 if (is_kvmppc_hv_enabled(kvm))
487 kick_all_cpus_sync();
488 #endif
489
490 kvm_destroy_vcpus(kvm);
491
492 mutex_lock(&kvm->lock);
493
494 kvmppc_core_destroy_vm(kvm);
495
496 mutex_unlock(&kvm->lock);
497
498 /* drop the module reference */
499 module_put(kvm->arch.kvm_ops->owner);
500 }
501
502 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
503 {
504 int r;
505 /* Assume we're using HV mode when the HV module is loaded */
506 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
507
508 if (kvm) {
509 /*
510 * Hooray - we know which VM type we're running on. Depend on
511 * that rather than the guess above.
512 */
513 hv_enabled = is_kvmppc_hv_enabled(kvm);
514 }
515
516 switch (ext) {
517 #ifdef CONFIG_BOOKE
518 case KVM_CAP_PPC_BOOKE_SREGS:
519 case KVM_CAP_PPC_BOOKE_WATCHDOG:
520 case KVM_CAP_PPC_EPR:
521 #else
522 case KVM_CAP_PPC_SEGSTATE:
523 case KVM_CAP_PPC_HIOR:
524 case KVM_CAP_PPC_PAPR:
525 #endif
526 case KVM_CAP_PPC_UNSET_IRQ:
527 case KVM_CAP_PPC_IRQ_LEVEL:
528 case KVM_CAP_ENABLE_CAP:
529 case KVM_CAP_ONE_REG:
530 case KVM_CAP_IOEVENTFD:
531 case KVM_CAP_DEVICE_CTRL:
532 case KVM_CAP_IMMEDIATE_EXIT:
533 case KVM_CAP_SET_GUEST_DEBUG:
534 r = 1;
535 break;
536 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
537 case KVM_CAP_PPC_PAIRED_SINGLES:
538 case KVM_CAP_PPC_OSI:
539 case KVM_CAP_PPC_GET_PVINFO:
540 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
541 case KVM_CAP_SW_TLB:
542 #endif
543 /* We support this only for PR */
544 r = !hv_enabled;
545 break;
546 #ifdef CONFIG_KVM_MPIC
547 case KVM_CAP_IRQ_MPIC:
548 r = 1;
549 break;
550 #endif
551
552 #ifdef CONFIG_PPC_BOOK3S_64
553 case KVM_CAP_SPAPR_TCE:
554 case KVM_CAP_SPAPR_TCE_64:
555 r = 1;
556 break;
557 case KVM_CAP_SPAPR_TCE_VFIO:
558 r = !!cpu_has_feature(CPU_FTR_HVMODE);
559 break;
560 case KVM_CAP_PPC_RTAS:
561 case KVM_CAP_PPC_FIXUP_HCALL:
562 case KVM_CAP_PPC_ENABLE_HCALL:
563 #ifdef CONFIG_KVM_XICS
564 case KVM_CAP_IRQ_XICS:
565 #endif
566 case KVM_CAP_PPC_GET_CPU_CHAR:
567 r = 1;
568 break;
569 #ifdef CONFIG_KVM_XIVE
570 case KVM_CAP_PPC_IRQ_XIVE:
571 /*
572 * We need XIVE to be enabled on the platform (implies
573 * a POWER9 processor) and the PowerNV platform, as
574 * nested is not yet supported.
575 */
576 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
577 kvmppc_xive_native_supported();
578 break;
579 #endif
580
581 #ifdef CONFIG_HAVE_KVM_IRQFD
582 case KVM_CAP_IRQFD_RESAMPLE:
583 r = !xive_enabled();
584 break;
585 #endif
586
587 case KVM_CAP_PPC_ALLOC_HTAB:
588 r = hv_enabled;
589 break;
590 #endif /* CONFIG_PPC_BOOK3S_64 */
591 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
592 case KVM_CAP_PPC_SMT:
593 r = 0;
594 if (kvm) {
595 if (kvm->arch.emul_smt_mode > 1)
596 r = kvm->arch.emul_smt_mode;
597 else
598 r = kvm->arch.smt_mode;
599 } else if (hv_enabled) {
600 if (cpu_has_feature(CPU_FTR_ARCH_300))
601 r = 1;
602 else
603 r = threads_per_subcore;
604 }
605 break;
606 case KVM_CAP_PPC_SMT_POSSIBLE:
607 r = 1;
608 if (hv_enabled) {
609 if (!cpu_has_feature(CPU_FTR_ARCH_300))
610 r = ((threads_per_subcore << 1) - 1);
611 else
612 /* P9 can emulate dbells, so allow any mode */
613 r = 8 | 4 | 2 | 1;
614 }
615 break;
616 case KVM_CAP_PPC_RMA:
617 r = 0;
618 break;
619 case KVM_CAP_PPC_HWRNG:
620 r = kvmppc_hwrng_present();
621 break;
622 case KVM_CAP_PPC_MMU_RADIX:
623 r = !!(hv_enabled && radix_enabled());
624 break;
625 case KVM_CAP_PPC_MMU_HASH_V3:
626 r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
627 kvmppc_hv_ops->hash_v3_possible());
628 break;
629 case KVM_CAP_PPC_NESTED_HV:
630 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
631 !kvmppc_hv_ops->enable_nested(NULL));
632 break;
633 #endif
634 case KVM_CAP_SYNC_MMU:
635 BUILD_BUG_ON(!IS_ENABLED(CONFIG_KVM_GENERIC_MMU_NOTIFIER));
636 r = 1;
637 break;
638 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
639 case KVM_CAP_PPC_HTAB_FD:
640 r = hv_enabled;
641 break;
642 #endif
643 case KVM_CAP_NR_VCPUS:
644 /*
645 * Recommending a number of CPUs is somewhat arbitrary; we
646 * return the number of present CPUs for -HV (since a host
647 * will have secondary threads "offline"), and for other KVM
648 * implementations just count online CPUs.
649 */
650 if (hv_enabled)
651 r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
652 else
653 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
654 break;
655 case KVM_CAP_MAX_VCPUS:
656 r = KVM_MAX_VCPUS;
657 break;
658 case KVM_CAP_MAX_VCPU_ID:
659 r = KVM_MAX_VCPU_IDS;
660 break;
661 #ifdef CONFIG_PPC_BOOK3S_64
662 case KVM_CAP_PPC_GET_SMMU_INFO:
663 r = 1;
664 break;
665 case KVM_CAP_SPAPR_MULTITCE:
666 r = 1;
667 break;
668 case KVM_CAP_SPAPR_RESIZE_HPT:
669 r = !!hv_enabled;
670 break;
671 #endif
672 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
673 case KVM_CAP_PPC_FWNMI:
674 r = hv_enabled;
675 break;
676 #endif
677 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
678 case KVM_CAP_PPC_HTM:
679 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
680 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
681 break;
682 #endif
683 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
684 case KVM_CAP_PPC_SECURE_GUEST:
685 r = hv_enabled && kvmppc_hv_ops->enable_svm &&
686 !kvmppc_hv_ops->enable_svm(NULL);
687 break;
688 case KVM_CAP_PPC_DAWR1:
689 r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
690 !kvmppc_hv_ops->enable_dawr1(NULL));
691 break;
692 case KVM_CAP_PPC_RPT_INVALIDATE:
693 r = 1;
694 break;
695 #endif
696 case KVM_CAP_PPC_AIL_MODE_3:
697 r = 0;
698 /*
699 * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
700 * The POWER9s can support it if the guest runs in hash mode,
701 * but QEMU doesn't necessarily query the capability in time.
702 */
703 if (hv_enabled) {
704 if (kvmhv_on_pseries()) {
705 if (pseries_reloc_on_exception())
706 r = 1;
707 } else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
708 !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
709 r = 1;
710 }
711 }
712 break;
713 default:
714 r = 0;
715 break;
716 }
717 return r;
718
719 }
720
721 long kvm_arch_dev_ioctl(struct file *filp,
722 unsigned int ioctl, unsigned long arg)
723 {
724 return -EINVAL;
725 }
726
727 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
728 {
729 kvmppc_core_free_memslot(kvm, slot);
730 }
731
732 int kvm_arch_prepare_memory_region(struct kvm *kvm,
733 const struct kvm_memory_slot *old,
734 struct kvm_memory_slot *new,
735 enum kvm_mr_change change)
736 {
737 return kvmppc_core_prepare_memory_region(kvm, old, new, change);
738 }
739
740 void kvm_arch_commit_memory_region(struct kvm *kvm,
741 struct kvm_memory_slot *old,
742 const struct kvm_memory_slot *new,
743 enum kvm_mr_change change)
744 {
745 kvmppc_core_commit_memory_region(kvm, old, new, change);
746 }
747
748 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
749 struct kvm_memory_slot *slot)
750 {
751 kvmppc_core_flush_memslot(kvm, slot);
752 }
753
754 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
755 {
756 return 0;
757 }
758
759 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
760 {
761 struct kvm_vcpu *vcpu;
762
763 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
764 kvmppc_decrementer_func(vcpu);
765
766 return HRTIMER_NORESTART;
767 }
768
769 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
770 {
771 int err;
772
773 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
774 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
775
776 #ifdef CONFIG_KVM_EXIT_TIMING
777 mutex_init(&vcpu->arch.exit_timing_lock);
778 #endif
779 err = kvmppc_subarch_vcpu_init(vcpu);
780 if (err)
781 return err;
782
783 err = kvmppc_core_vcpu_create(vcpu);
784 if (err)
785 goto out_vcpu_uninit;
786
787 rcuwait_init(&vcpu->arch.wait);
788 vcpu->arch.waitp = &vcpu->arch.wait;
789 return 0;
790
791 out_vcpu_uninit:
792 kvmppc_subarch_vcpu_uninit(vcpu);
793 return err;
794 }
795
796 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
797 {
798 }
799
800 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
801 {
802 /* Make sure we're not using the vcpu anymore */
803 hrtimer_cancel(&vcpu->arch.dec_timer);
804
805 switch (vcpu->arch.irq_type) {
806 case KVMPPC_IRQ_MPIC:
807 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
808 break;
809 case KVMPPC_IRQ_XICS:
810 if (xics_on_xive())
811 kvmppc_xive_cleanup_vcpu(vcpu);
812 else
813 kvmppc_xics_free_icp(vcpu);
814 break;
815 case KVMPPC_IRQ_XIVE:
816 kvmppc_xive_native_cleanup_vcpu(vcpu);
817 break;
818 }
819
820 kvmppc_core_vcpu_free(vcpu);
821
822 kvmppc_subarch_vcpu_uninit(vcpu);
823 }
824
825 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
826 {
827 return kvmppc_core_pending_dec(vcpu);
828 }
829
830 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
831 {
832 #ifdef CONFIG_BOOKE
833 /*
834 * vrsave (formerly usprg0) isn't used by Linux, but may
835 * be used by the guest.
836 *
837 * On non-booke this is associated with Altivec and
838 * is handled by code in book3s.c.
839 */
840 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
841 #endif
842 kvmppc_core_vcpu_load(vcpu, cpu);
843 }
844
845 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
846 {
847 kvmppc_core_vcpu_put(vcpu);
848 #ifdef CONFIG_BOOKE
849 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
850 #endif
851 }
852
853 /*
854 * irq_bypass_add_producer and irq_bypass_del_producer are only
855 * useful if the architecture supports PCI passthrough.
856 * irq_bypass_stop and irq_bypass_start are not needed and so
857 * kvm_ops are not defined for them.
858 */
859 bool kvm_arch_has_irq_bypass(void)
860 {
861 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
862 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
863 }
864
865 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
866 struct irq_bypass_producer *prod)
867 {
868 struct kvm_kernel_irqfd *irqfd =
869 container_of(cons, struct kvm_kernel_irqfd, consumer);
870 struct kvm *kvm = irqfd->kvm;
871
872 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
873 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
874
875 return 0;
876 }
877
878 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
879 struct irq_bypass_producer *prod)
880 {
881 struct kvm_kernel_irqfd *irqfd =
882 container_of(cons, struct kvm_kernel_irqfd, consumer);
883 struct kvm *kvm = irqfd->kvm;
884
885 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
886 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
887 }
888
889 #ifdef CONFIG_VSX
890 static inline int kvmppc_get_vsr_dword_offset(int index)
891 {
892 int offset;
893
894 if ((index != 0) && (index != 1))
895 return -1;
896
897 #ifdef __BIG_ENDIAN
898 offset = index;
899 #else
900 offset = 1 - index;
901 #endif
902
903 return offset;
904 }
905
906 static inline int kvmppc_get_vsr_word_offset(int index)
907 {
908 int offset;
909
910 if ((index > 3) || (index < 0))
911 return -1;
912
913 #ifdef __BIG_ENDIAN
914 offset = index;
915 #else
916 offset = 3 - index;
917 #endif
918 return offset;
919 }
920
921 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
922 u64 gpr)
923 {
924 union kvmppc_one_reg val;
925 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
926 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
927
928 if (offset == -1)
929 return;
930
931 if (index >= 32) {
932 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
933 val.vsxval[offset] = gpr;
934 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
935 } else {
936 kvmppc_set_vsx_fpr(vcpu, index, offset, gpr);
937 }
938 }
939
940 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
941 u64 gpr)
942 {
943 union kvmppc_one_reg val;
944 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
945
946 if (index >= 32) {
947 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
948 val.vsxval[0] = gpr;
949 val.vsxval[1] = gpr;
950 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
951 } else {
952 kvmppc_set_vsx_fpr(vcpu, index, 0, gpr);
953 kvmppc_set_vsx_fpr(vcpu, index, 1, gpr);
954 }
955 }
956
957 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
958 u32 gpr)
959 {
960 union kvmppc_one_reg val;
961 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
962
963 if (index >= 32) {
964 val.vsx32val[0] = gpr;
965 val.vsx32val[1] = gpr;
966 val.vsx32val[2] = gpr;
967 val.vsx32val[3] = gpr;
968 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
969 } else {
970 val.vsx32val[0] = gpr;
971 val.vsx32val[1] = gpr;
972 kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]);
973 kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]);
974 }
975 }
976
977 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
978 u32 gpr32)
979 {
980 union kvmppc_one_reg val;
981 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
982 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
983 int dword_offset, word_offset;
984
985 if (offset == -1)
986 return;
987
988 if (index >= 32) {
989 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
990 val.vsx32val[offset] = gpr32;
991 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
992 } else {
993 dword_offset = offset / 2;
994 word_offset = offset % 2;
995 val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset);
996 val.vsx32val[word_offset] = gpr32;
997 kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]);
998 }
999 }
1000 #endif /* CONFIG_VSX */
1001
1002 #ifdef CONFIG_ALTIVEC
1003 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
1004 int index, int element_size)
1005 {
1006 int offset;
1007 int elts = sizeof(vector128)/element_size;
1008
1009 if ((index < 0) || (index >= elts))
1010 return -1;
1011
1012 if (kvmppc_need_byteswap(vcpu))
1013 offset = elts - index - 1;
1014 else
1015 offset = index;
1016
1017 return offset;
1018 }
1019
1020 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1021 int index)
1022 {
1023 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1024 }
1025
1026 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1027 int index)
1028 {
1029 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1030 }
1031
1032 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1033 int index)
1034 {
1035 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1036 }
1037
1038 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1039 int index)
1040 {
1041 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1042 }
1043
1044
1045 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1046 u64 gpr)
1047 {
1048 union kvmppc_one_reg val;
1049 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1050 vcpu->arch.mmio_vmx_offset);
1051 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1052
1053 if (offset == -1)
1054 return;
1055
1056 kvmppc_get_vsx_vr(vcpu, index, &val.vval);
1057 val.vsxval[offset] = gpr;
1058 kvmppc_set_vsx_vr(vcpu, index, &val.vval);
1059 }
1060
1061 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1062 u32 gpr32)
1063 {
1064 union kvmppc_one_reg val;
1065 int offset = kvmppc_get_vmx_word_offset(vcpu,
1066 vcpu->arch.mmio_vmx_offset);
1067 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1068
1069 if (offset == -1)
1070 return;
1071
1072 kvmppc_get_vsx_vr(vcpu, index, &val.vval);
1073 val.vsx32val[offset] = gpr32;
1074 kvmppc_set_vsx_vr(vcpu, index, &val.vval);
1075 }
1076
1077 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1078 u16 gpr16)
1079 {
1080 union kvmppc_one_reg val;
1081 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1082 vcpu->arch.mmio_vmx_offset);
1083 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1084
1085 if (offset == -1)
1086 return;
1087
1088 kvmppc_get_vsx_vr(vcpu, index, &val.vval);
1089 val.vsx16val[offset] = gpr16;
1090 kvmppc_set_vsx_vr(vcpu, index, &val.vval);
1091 }
1092
1093 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1094 u8 gpr8)
1095 {
1096 union kvmppc_one_reg val;
1097 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1098 vcpu->arch.mmio_vmx_offset);
1099 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1100
1101 if (offset == -1)
1102 return;
1103
1104 kvmppc_get_vsx_vr(vcpu, index, &val.vval);
1105 val.vsx8val[offset] = gpr8;
1106 kvmppc_set_vsx_vr(vcpu, index, &val.vval);
1107 }
1108 #endif /* CONFIG_ALTIVEC */
1109
1110 #ifdef CONFIG_PPC_FPU
1111 static inline u64 sp_to_dp(u32 fprs)
1112 {
1113 u64 fprd;
1114
1115 preempt_disable();
1116 enable_kernel_fp();
1117 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
1118 : "fr0");
1119 preempt_enable();
1120 return fprd;
1121 }
1122
1123 static inline u32 dp_to_sp(u64 fprd)
1124 {
1125 u32 fprs;
1126
1127 preempt_disable();
1128 enable_kernel_fp();
1129 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
1130 : "fr0");
1131 preempt_enable();
1132 return fprs;
1133 }
1134
1135 #else
1136 #define sp_to_dp(x) (x)
1137 #define dp_to_sp(x) (x)
1138 #endif /* CONFIG_PPC_FPU */
1139
1140 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1141 {
1142 struct kvm_run *run = vcpu->run;
1143 u64 gpr;
1144
1145 if (run->mmio.len > sizeof(gpr))
1146 return;
1147
1148 if (!vcpu->arch.mmio_host_swabbed) {
1149 switch (run->mmio.len) {
1150 case 8: gpr = *(u64 *)run->mmio.data; break;
1151 case 4: gpr = *(u32 *)run->mmio.data; break;
1152 case 2: gpr = *(u16 *)run->mmio.data; break;
1153 case 1: gpr = *(u8 *)run->mmio.data; break;
1154 }
1155 } else {
1156 switch (run->mmio.len) {
1157 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1158 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1159 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1160 case 1: gpr = *(u8 *)run->mmio.data; break;
1161 }
1162 }
1163
1164 /* conversion between single and double precision */
1165 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1166 gpr = sp_to_dp(gpr);
1167
1168 if (vcpu->arch.mmio_sign_extend) {
1169 switch (run->mmio.len) {
1170 #ifdef CONFIG_PPC64
1171 case 4:
1172 gpr = (s64)(s32)gpr;
1173 break;
1174 #endif
1175 case 2:
1176 gpr = (s64)(s16)gpr;
1177 break;
1178 case 1:
1179 gpr = (s64)(s8)gpr;
1180 break;
1181 }
1182 }
1183
1184 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1185 case KVM_MMIO_REG_GPR:
1186 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1187 break;
1188 case KVM_MMIO_REG_FPR:
1189 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1190 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1191
1192 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
1193 break;
1194 #ifdef CONFIG_PPC_BOOK3S
1195 case KVM_MMIO_REG_QPR:
1196 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1197 break;
1198 case KVM_MMIO_REG_FQPR:
1199 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
1200 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1201 break;
1202 #endif
1203 #ifdef CONFIG_VSX
1204 case KVM_MMIO_REG_VSX:
1205 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1206 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1207
1208 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1209 kvmppc_set_vsr_dword(vcpu, gpr);
1210 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1211 kvmppc_set_vsr_word(vcpu, gpr);
1212 else if (vcpu->arch.mmio_copy_type ==
1213 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1214 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1215 else if (vcpu->arch.mmio_copy_type ==
1216 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1217 kvmppc_set_vsr_word_dump(vcpu, gpr);
1218 break;
1219 #endif
1220 #ifdef CONFIG_ALTIVEC
1221 case KVM_MMIO_REG_VMX:
1222 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1223 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1224
1225 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1226 kvmppc_set_vmx_dword(vcpu, gpr);
1227 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1228 kvmppc_set_vmx_word(vcpu, gpr);
1229 else if (vcpu->arch.mmio_copy_type ==
1230 KVMPPC_VMX_COPY_HWORD)
1231 kvmppc_set_vmx_hword(vcpu, gpr);
1232 else if (vcpu->arch.mmio_copy_type ==
1233 KVMPPC_VMX_COPY_BYTE)
1234 kvmppc_set_vmx_byte(vcpu, gpr);
1235 break;
1236 #endif
1237 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1238 case KVM_MMIO_REG_NESTED_GPR:
1239 if (kvmppc_need_byteswap(vcpu))
1240 gpr = swab64(gpr);
1241 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1242 sizeof(gpr));
1243 break;
1244 #endif
1245 default:
1246 BUG();
1247 }
1248 }
1249
1250 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1251 unsigned int rt, unsigned int bytes,
1252 int is_default_endian, int sign_extend)
1253 {
1254 struct kvm_run *run = vcpu->run;
1255 int idx, ret;
1256 bool host_swabbed;
1257
1258 /* Pity C doesn't have a logical XOR operator */
1259 if (kvmppc_need_byteswap(vcpu)) {
1260 host_swabbed = is_default_endian;
1261 } else {
1262 host_swabbed = !is_default_endian;
1263 }
1264
1265 if (bytes > sizeof(run->mmio.data))
1266 return EMULATE_FAIL;
1267
1268 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1269 run->mmio.len = bytes;
1270 run->mmio.is_write = 0;
1271
1272 vcpu->arch.io_gpr = rt;
1273 vcpu->arch.mmio_host_swabbed = host_swabbed;
1274 vcpu->mmio_needed = 1;
1275 vcpu->mmio_is_write = 0;
1276 vcpu->arch.mmio_sign_extend = sign_extend;
1277
1278 idx = srcu_read_lock(&vcpu->kvm->srcu);
1279
1280 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1281 bytes, &run->mmio.data);
1282
1283 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1284
1285 if (!ret) {
1286 kvmppc_complete_mmio_load(vcpu);
1287 vcpu->mmio_needed = 0;
1288 return EMULATE_DONE;
1289 }
1290
1291 return EMULATE_DO_MMIO;
1292 }
1293
1294 int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1295 unsigned int rt, unsigned int bytes,
1296 int is_default_endian)
1297 {
1298 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1299 }
1300 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1301
1302 /* Same as above, but sign extends */
1303 int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1304 unsigned int rt, unsigned int bytes,
1305 int is_default_endian)
1306 {
1307 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1308 }
1309
1310 #ifdef CONFIG_VSX
1311 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1312 unsigned int rt, unsigned int bytes,
1313 int is_default_endian, int mmio_sign_extend)
1314 {
1315 enum emulation_result emulated = EMULATE_DONE;
1316
1317 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1318 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1319 return EMULATE_FAIL;
1320
1321 while (vcpu->arch.mmio_vsx_copy_nums) {
1322 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1323 is_default_endian, mmio_sign_extend);
1324
1325 if (emulated != EMULATE_DONE)
1326 break;
1327
1328 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1329
1330 vcpu->arch.mmio_vsx_copy_nums--;
1331 vcpu->arch.mmio_vsx_offset++;
1332 }
1333 return emulated;
1334 }
1335 #endif /* CONFIG_VSX */
1336
1337 int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1338 u64 val, unsigned int bytes, int is_default_endian)
1339 {
1340 struct kvm_run *run = vcpu->run;
1341 void *data = run->mmio.data;
1342 int idx, ret;
1343 bool host_swabbed;
1344
1345 /* Pity C doesn't have a logical XOR operator */
1346 if (kvmppc_need_byteswap(vcpu)) {
1347 host_swabbed = is_default_endian;
1348 } else {
1349 host_swabbed = !is_default_endian;
1350 }
1351
1352 if (bytes > sizeof(run->mmio.data))
1353 return EMULATE_FAIL;
1354
1355 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1356 run->mmio.len = bytes;
1357 run->mmio.is_write = 1;
1358 vcpu->mmio_needed = 1;
1359 vcpu->mmio_is_write = 1;
1360
1361 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1362 val = dp_to_sp(val);
1363
1364 /* Store the value at the lowest bytes in 'data'. */
1365 if (!host_swabbed) {
1366 switch (bytes) {
1367 case 8: *(u64 *)data = val; break;
1368 case 4: *(u32 *)data = val; break;
1369 case 2: *(u16 *)data = val; break;
1370 case 1: *(u8 *)data = val; break;
1371 }
1372 } else {
1373 switch (bytes) {
1374 case 8: *(u64 *)data = swab64(val); break;
1375 case 4: *(u32 *)data = swab32(val); break;
1376 case 2: *(u16 *)data = swab16(val); break;
1377 case 1: *(u8 *)data = val; break;
1378 }
1379 }
1380
1381 idx = srcu_read_lock(&vcpu->kvm->srcu);
1382
1383 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1384 bytes, &run->mmio.data);
1385
1386 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1387
1388 if (!ret) {
1389 vcpu->mmio_needed = 0;
1390 return EMULATE_DONE;
1391 }
1392
1393 return EMULATE_DO_MMIO;
1394 }
1395 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1396
1397 #ifdef CONFIG_VSX
1398 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1399 {
1400 u32 dword_offset, word_offset;
1401 union kvmppc_one_reg reg;
1402 int vsx_offset = 0;
1403 int copy_type = vcpu->arch.mmio_copy_type;
1404 int result = 0;
1405
1406 switch (copy_type) {
1407 case KVMPPC_VSX_COPY_DWORD:
1408 vsx_offset =
1409 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1410
1411 if (vsx_offset == -1) {
1412 result = -1;
1413 break;
1414 }
1415
1416 if (rs < 32) {
1417 *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset);
1418 } else {
1419 kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
1420 *val = reg.vsxval[vsx_offset];
1421 }
1422 break;
1423
1424 case KVMPPC_VSX_COPY_WORD:
1425 vsx_offset =
1426 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1427
1428 if (vsx_offset == -1) {
1429 result = -1;
1430 break;
1431 }
1432
1433 if (rs < 32) {
1434 dword_offset = vsx_offset / 2;
1435 word_offset = vsx_offset % 2;
1436 reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset);
1437 *val = reg.vsx32val[word_offset];
1438 } else {
1439 kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
1440 *val = reg.vsx32val[vsx_offset];
1441 }
1442 break;
1443
1444 default:
1445 result = -1;
1446 break;
1447 }
1448
1449 return result;
1450 }
1451
1452 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1453 int rs, unsigned int bytes, int is_default_endian)
1454 {
1455 u64 val;
1456 enum emulation_result emulated = EMULATE_DONE;
1457
1458 vcpu->arch.io_gpr = rs;
1459
1460 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1461 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1462 return EMULATE_FAIL;
1463
1464 while (vcpu->arch.mmio_vsx_copy_nums) {
1465 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1466 return EMULATE_FAIL;
1467
1468 emulated = kvmppc_handle_store(vcpu,
1469 val, bytes, is_default_endian);
1470
1471 if (emulated != EMULATE_DONE)
1472 break;
1473
1474 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1475
1476 vcpu->arch.mmio_vsx_copy_nums--;
1477 vcpu->arch.mmio_vsx_offset++;
1478 }
1479
1480 return emulated;
1481 }
1482
1483 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1484 {
1485 struct kvm_run *run = vcpu->run;
1486 enum emulation_result emulated = EMULATE_FAIL;
1487 int r;
1488
1489 vcpu->arch.paddr_accessed += run->mmio.len;
1490
1491 if (!vcpu->mmio_is_write) {
1492 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1493 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1494 } else {
1495 emulated = kvmppc_handle_vsx_store(vcpu,
1496 vcpu->arch.io_gpr, run->mmio.len, 1);
1497 }
1498
1499 switch (emulated) {
1500 case EMULATE_DO_MMIO:
1501 run->exit_reason = KVM_EXIT_MMIO;
1502 r = RESUME_HOST;
1503 break;
1504 case EMULATE_FAIL:
1505 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1506 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1507 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1508 r = RESUME_HOST;
1509 break;
1510 default:
1511 r = RESUME_GUEST;
1512 break;
1513 }
1514 return r;
1515 }
1516 #endif /* CONFIG_VSX */
1517
1518 #ifdef CONFIG_ALTIVEC
1519 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1520 unsigned int rt, unsigned int bytes, int is_default_endian)
1521 {
1522 enum emulation_result emulated = EMULATE_DONE;
1523
1524 if (vcpu->arch.mmio_vmx_copy_nums > 2)
1525 return EMULATE_FAIL;
1526
1527 while (vcpu->arch.mmio_vmx_copy_nums) {
1528 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1529 is_default_endian, 0);
1530
1531 if (emulated != EMULATE_DONE)
1532 break;
1533
1534 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1535 vcpu->arch.mmio_vmx_copy_nums--;
1536 vcpu->arch.mmio_vmx_offset++;
1537 }
1538
1539 return emulated;
1540 }
1541
1542 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1543 {
1544 union kvmppc_one_reg reg;
1545 int vmx_offset = 0;
1546 int result = 0;
1547
1548 vmx_offset =
1549 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1550
1551 if (vmx_offset == -1)
1552 return -1;
1553
1554 kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
1555 *val = reg.vsxval[vmx_offset];
1556
1557 return result;
1558 }
1559
1560 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1561 {
1562 union kvmppc_one_reg reg;
1563 int vmx_offset = 0;
1564 int result = 0;
1565
1566 vmx_offset =
1567 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1568
1569 if (vmx_offset == -1)
1570 return -1;
1571
1572 kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
1573 *val = reg.vsx32val[vmx_offset];
1574
1575 return result;
1576 }
1577
1578 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1579 {
1580 union kvmppc_one_reg reg;
1581 int vmx_offset = 0;
1582 int result = 0;
1583
1584 vmx_offset =
1585 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1586
1587 if (vmx_offset == -1)
1588 return -1;
1589
1590 kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
1591 *val = reg.vsx16val[vmx_offset];
1592
1593 return result;
1594 }
1595
1596 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1597 {
1598 union kvmppc_one_reg reg;
1599 int vmx_offset = 0;
1600 int result = 0;
1601
1602 vmx_offset =
1603 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1604
1605 if (vmx_offset == -1)
1606 return -1;
1607
1608 kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
1609 *val = reg.vsx8val[vmx_offset];
1610
1611 return result;
1612 }
1613
1614 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1615 unsigned int rs, unsigned int bytes, int is_default_endian)
1616 {
1617 u64 val = 0;
1618 unsigned int index = rs & KVM_MMIO_REG_MASK;
1619 enum emulation_result emulated = EMULATE_DONE;
1620
1621 if (vcpu->arch.mmio_vmx_copy_nums > 2)
1622 return EMULATE_FAIL;
1623
1624 vcpu->arch.io_gpr = rs;
1625
1626 while (vcpu->arch.mmio_vmx_copy_nums) {
1627 switch (vcpu->arch.mmio_copy_type) {
1628 case KVMPPC_VMX_COPY_DWORD:
1629 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1630 return EMULATE_FAIL;
1631
1632 break;
1633 case KVMPPC_VMX_COPY_WORD:
1634 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1635 return EMULATE_FAIL;
1636 break;
1637 case KVMPPC_VMX_COPY_HWORD:
1638 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1639 return EMULATE_FAIL;
1640 break;
1641 case KVMPPC_VMX_COPY_BYTE:
1642 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1643 return EMULATE_FAIL;
1644 break;
1645 default:
1646 return EMULATE_FAIL;
1647 }
1648
1649 emulated = kvmppc_handle_store(vcpu, val, bytes,
1650 is_default_endian);
1651 if (emulated != EMULATE_DONE)
1652 break;
1653
1654 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1655 vcpu->arch.mmio_vmx_copy_nums--;
1656 vcpu->arch.mmio_vmx_offset++;
1657 }
1658
1659 return emulated;
1660 }
1661
1662 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1663 {
1664 struct kvm_run *run = vcpu->run;
1665 enum emulation_result emulated = EMULATE_FAIL;
1666 int r;
1667
1668 vcpu->arch.paddr_accessed += run->mmio.len;
1669
1670 if (!vcpu->mmio_is_write) {
1671 emulated = kvmppc_handle_vmx_load(vcpu,
1672 vcpu->arch.io_gpr, run->mmio.len, 1);
1673 } else {
1674 emulated = kvmppc_handle_vmx_store(vcpu,
1675 vcpu->arch.io_gpr, run->mmio.len, 1);
1676 }
1677
1678 switch (emulated) {
1679 case EMULATE_DO_MMIO:
1680 run->exit_reason = KVM_EXIT_MMIO;
1681 r = RESUME_HOST;
1682 break;
1683 case EMULATE_FAIL:
1684 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1685 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1686 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1687 r = RESUME_HOST;
1688 break;
1689 default:
1690 r = RESUME_GUEST;
1691 break;
1692 }
1693 return r;
1694 }
1695 #endif /* CONFIG_ALTIVEC */
1696
1697 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1698 {
1699 int r = 0;
1700 union kvmppc_one_reg val;
1701 int size;
1702
1703 size = one_reg_size(reg->id);
1704 if (size > sizeof(val))
1705 return -EINVAL;
1706
1707 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1708 if (r == -EINVAL) {
1709 r = 0;
1710 switch (reg->id) {
1711 #ifdef CONFIG_ALTIVEC
1712 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1713 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1714 r = -ENXIO;
1715 break;
1716 }
1717 kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
1718 break;
1719 case KVM_REG_PPC_VSCR:
1720 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1721 r = -ENXIO;
1722 break;
1723 }
1724 val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu));
1725 break;
1726 case KVM_REG_PPC_VRSAVE:
1727 val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu));
1728 break;
1729 #endif /* CONFIG_ALTIVEC */
1730 default:
1731 r = -EINVAL;
1732 break;
1733 }
1734 }
1735
1736 if (r)
1737 return r;
1738
1739 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1740 r = -EFAULT;
1741
1742 return r;
1743 }
1744
1745 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1746 {
1747 int r;
1748 union kvmppc_one_reg val;
1749 int size;
1750
1751 size = one_reg_size(reg->id);
1752 if (size > sizeof(val))
1753 return -EINVAL;
1754
1755 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1756 return -EFAULT;
1757
1758 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1759 if (r == -EINVAL) {
1760 r = 0;
1761 switch (reg->id) {
1762 #ifdef CONFIG_ALTIVEC
1763 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1764 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1765 r = -ENXIO;
1766 break;
1767 }
1768 kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
1769 break;
1770 case KVM_REG_PPC_VSCR:
1771 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1772 r = -ENXIO;
1773 break;
1774 }
1775 kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val));
1776 break;
1777 case KVM_REG_PPC_VRSAVE:
1778 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1779 r = -ENXIO;
1780 break;
1781 }
1782 kvmppc_set_vrsave(vcpu, set_reg_val(reg->id, val));
1783 break;
1784 #endif /* CONFIG_ALTIVEC */
1785 default:
1786 r = -EINVAL;
1787 break;
1788 }
1789 }
1790
1791 return r;
1792 }
1793
1794 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1795 {
1796 struct kvm_run *run = vcpu->run;
1797 int r;
1798
1799 vcpu_load(vcpu);
1800
1801 if (vcpu->mmio_needed) {
1802 vcpu->mmio_needed = 0;
1803 if (!vcpu->mmio_is_write)
1804 kvmppc_complete_mmio_load(vcpu);
1805 #ifdef CONFIG_VSX
1806 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1807 vcpu->arch.mmio_vsx_copy_nums--;
1808 vcpu->arch.mmio_vsx_offset++;
1809 }
1810
1811 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1812 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1813 if (r == RESUME_HOST) {
1814 vcpu->mmio_needed = 1;
1815 goto out;
1816 }
1817 }
1818 #endif
1819 #ifdef CONFIG_ALTIVEC
1820 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1821 vcpu->arch.mmio_vmx_copy_nums--;
1822 vcpu->arch.mmio_vmx_offset++;
1823 }
1824
1825 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1826 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1827 if (r == RESUME_HOST) {
1828 vcpu->mmio_needed = 1;
1829 goto out;
1830 }
1831 }
1832 #endif
1833 } else if (vcpu->arch.osi_needed) {
1834 u64 *gprs = run->osi.gprs;
1835 int i;
1836
1837 for (i = 0; i < 32; i++)
1838 kvmppc_set_gpr(vcpu, i, gprs[i]);
1839 vcpu->arch.osi_needed = 0;
1840 } else if (vcpu->arch.hcall_needed) {
1841 int i;
1842
1843 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1844 for (i = 0; i < 9; ++i)
1845 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1846 vcpu->arch.hcall_needed = 0;
1847 #ifdef CONFIG_BOOKE
1848 } else if (vcpu->arch.epr_needed) {
1849 kvmppc_set_epr(vcpu, run->epr.epr);
1850 vcpu->arch.epr_needed = 0;
1851 #endif
1852 }
1853
1854 kvm_sigset_activate(vcpu);
1855
1856 if (run->immediate_exit)
1857 r = -EINTR;
1858 else
1859 r = kvmppc_vcpu_run(vcpu);
1860
1861 kvm_sigset_deactivate(vcpu);
1862
1863 #ifdef CONFIG_ALTIVEC
1864 out:
1865 #endif
1866
1867 /*
1868 * We're already returning to userspace, don't pass the
1869 * RESUME_HOST flags along.
1870 */
1871 if (r > 0)
1872 r = 0;
1873
1874 vcpu_put(vcpu);
1875 return r;
1876 }
1877
1878 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1879 {
1880 if (irq->irq == KVM_INTERRUPT_UNSET) {
1881 kvmppc_core_dequeue_external(vcpu);
1882 return 0;
1883 }
1884
1885 kvmppc_core_queue_external(vcpu, irq);
1886
1887 kvm_vcpu_kick(vcpu);
1888
1889 return 0;
1890 }
1891
1892 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1893 struct kvm_enable_cap *cap)
1894 {
1895 int r;
1896
1897 if (cap->flags)
1898 return -EINVAL;
1899
1900 switch (cap->cap) {
1901 case KVM_CAP_PPC_OSI:
1902 r = 0;
1903 vcpu->arch.osi_enabled = true;
1904 break;
1905 case KVM_CAP_PPC_PAPR:
1906 r = 0;
1907 vcpu->arch.papr_enabled = true;
1908 break;
1909 case KVM_CAP_PPC_EPR:
1910 r = 0;
1911 if (cap->args[0])
1912 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1913 else
1914 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1915 break;
1916 #ifdef CONFIG_BOOKE
1917 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1918 r = 0;
1919 vcpu->arch.watchdog_enabled = true;
1920 break;
1921 #endif
1922 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1923 case KVM_CAP_SW_TLB: {
1924 struct kvm_config_tlb cfg;
1925 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1926
1927 r = -EFAULT;
1928 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1929 break;
1930
1931 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1932 break;
1933 }
1934 #endif
1935 #ifdef CONFIG_KVM_MPIC
1936 case KVM_CAP_IRQ_MPIC: {
1937 struct fd f;
1938 struct kvm_device *dev;
1939
1940 r = -EBADF;
1941 f = fdget(cap->args[0]);
1942 if (!f.file)
1943 break;
1944
1945 r = -EPERM;
1946 dev = kvm_device_from_filp(f.file);
1947 if (dev)
1948 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1949
1950 fdput(f);
1951 break;
1952 }
1953 #endif
1954 #ifdef CONFIG_KVM_XICS
1955 case KVM_CAP_IRQ_XICS: {
1956 struct fd f;
1957 struct kvm_device *dev;
1958
1959 r = -EBADF;
1960 f = fdget(cap->args[0]);
1961 if (!f.file)
1962 break;
1963
1964 r = -EPERM;
1965 dev = kvm_device_from_filp(f.file);
1966 if (dev) {
1967 if (xics_on_xive())
1968 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1969 else
1970 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1971 }
1972
1973 fdput(f);
1974 break;
1975 }
1976 #endif /* CONFIG_KVM_XICS */
1977 #ifdef CONFIG_KVM_XIVE
1978 case KVM_CAP_PPC_IRQ_XIVE: {
1979 struct fd f;
1980 struct kvm_device *dev;
1981
1982 r = -EBADF;
1983 f = fdget(cap->args[0]);
1984 if (!f.file)
1985 break;
1986
1987 r = -ENXIO;
1988 if (!xive_enabled())
1989 break;
1990
1991 r = -EPERM;
1992 dev = kvm_device_from_filp(f.file);
1993 if (dev)
1994 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1995 cap->args[1]);
1996
1997 fdput(f);
1998 break;
1999 }
2000 #endif /* CONFIG_KVM_XIVE */
2001 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2002 case KVM_CAP_PPC_FWNMI:
2003 r = -EINVAL;
2004 if (!is_kvmppc_hv_enabled(vcpu->kvm))
2005 break;
2006 r = 0;
2007 vcpu->kvm->arch.fwnmi_enabled = true;
2008 break;
2009 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2010 default:
2011 r = -EINVAL;
2012 break;
2013 }
2014
2015 if (!r)
2016 r = kvmppc_sanity_check(vcpu);
2017
2018 return r;
2019 }
2020
2021 bool kvm_arch_intc_initialized(struct kvm *kvm)
2022 {
2023 #ifdef CONFIG_KVM_MPIC
2024 if (kvm->arch.mpic)
2025 return true;
2026 #endif
2027 #ifdef CONFIG_KVM_XICS
2028 if (kvm->arch.xics || kvm->arch.xive)
2029 return true;
2030 #endif
2031 return false;
2032 }
2033
2034 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2035 struct kvm_mp_state *mp_state)
2036 {
2037 return -EINVAL;
2038 }
2039
2040 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2041 struct kvm_mp_state *mp_state)
2042 {
2043 return -EINVAL;
2044 }
2045
2046 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2047 unsigned int ioctl, unsigned long arg)
2048 {
2049 struct kvm_vcpu *vcpu = filp->private_data;
2050 void __user *argp = (void __user *)arg;
2051
2052 if (ioctl == KVM_INTERRUPT) {
2053 struct kvm_interrupt irq;
2054 if (copy_from_user(&irq, argp, sizeof(irq)))
2055 return -EFAULT;
2056 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2057 }
2058 return -ENOIOCTLCMD;
2059 }
2060
2061 long kvm_arch_vcpu_ioctl(struct file *filp,
2062 unsigned int ioctl, unsigned long arg)
2063 {
2064 struct kvm_vcpu *vcpu = filp->private_data;
2065 void __user *argp = (void __user *)arg;
2066 long r;
2067
2068 switch (ioctl) {
2069 case KVM_ENABLE_CAP:
2070 {
2071 struct kvm_enable_cap cap;
2072 r = -EFAULT;
2073 if (copy_from_user(&cap, argp, sizeof(cap)))
2074 goto out;
2075 vcpu_load(vcpu);
2076 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2077 vcpu_put(vcpu);
2078 break;
2079 }
2080
2081 case KVM_SET_ONE_REG:
2082 case KVM_GET_ONE_REG:
2083 {
2084 struct kvm_one_reg reg;
2085 r = -EFAULT;
2086 if (copy_from_user(&reg, argp, sizeof(reg)))
2087 goto out;
2088 if (ioctl == KVM_SET_ONE_REG)
2089 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2090 else
2091 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2092 break;
2093 }
2094
2095 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2096 case KVM_DIRTY_TLB: {
2097 struct kvm_dirty_tlb dirty;
2098 r = -EFAULT;
2099 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2100 goto out;
2101 vcpu_load(vcpu);
2102 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2103 vcpu_put(vcpu);
2104 break;
2105 }
2106 #endif
2107 default:
2108 r = -EINVAL;
2109 }
2110
2111 out:
2112 return r;
2113 }
2114
2115 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2116 {
2117 return VM_FAULT_SIGBUS;
2118 }
2119
2120 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2121 {
2122 u32 inst_nop = 0x60000000;
2123 #ifdef CONFIG_KVM_BOOKE_HV
2124 u32 inst_sc1 = 0x44000022;
2125 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2126 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2127 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2128 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2129 #else
2130 u32 inst_lis = 0x3c000000;
2131 u32 inst_ori = 0x60000000;
2132 u32 inst_sc = 0x44000002;
2133 u32 inst_imm_mask = 0xffff;
2134
2135 /*
2136 * The hypercall to get into KVM from within guest context is as
2137 * follows:
2138 *
2139 * lis r0, r0, KVM_SC_MAGIC_R0@h
2140 * ori r0, KVM_SC_MAGIC_R0@l
2141 * sc
2142 * nop
2143 */
2144 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2145 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2146 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2147 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2148 #endif
2149
2150 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2151
2152 return 0;
2153 }
2154
2155 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
2156 {
2157 int ret = 0;
2158
2159 #ifdef CONFIG_KVM_MPIC
2160 ret = ret || (kvm->arch.mpic != NULL);
2161 #endif
2162 #ifdef CONFIG_KVM_XICS
2163 ret = ret || (kvm->arch.xics != NULL);
2164 ret = ret || (kvm->arch.xive != NULL);
2165 #endif
2166 smp_rmb();
2167 return ret;
2168 }
2169
2170 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2171 bool line_status)
2172 {
2173 if (!kvm_arch_irqchip_in_kernel(kvm))
2174 return -ENXIO;
2175
2176 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2177 irq_event->irq, irq_event->level,
2178 line_status);
2179 return 0;
2180 }
2181
2182
2183 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2184 struct kvm_enable_cap *cap)
2185 {
2186 int r;
2187
2188 if (cap->flags)
2189 return -EINVAL;
2190
2191 switch (cap->cap) {
2192 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2193 case KVM_CAP_PPC_ENABLE_HCALL: {
2194 unsigned long hcall = cap->args[0];
2195
2196 r = -EINVAL;
2197 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2198 cap->args[1] > 1)
2199 break;
2200 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2201 break;
2202 if (cap->args[1])
2203 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2204 else
2205 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2206 r = 0;
2207 break;
2208 }
2209 case KVM_CAP_PPC_SMT: {
2210 unsigned long mode = cap->args[0];
2211 unsigned long flags = cap->args[1];
2212
2213 r = -EINVAL;
2214 if (kvm->arch.kvm_ops->set_smt_mode)
2215 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2216 break;
2217 }
2218
2219 case KVM_CAP_PPC_NESTED_HV:
2220 r = -EINVAL;
2221 if (!is_kvmppc_hv_enabled(kvm) ||
2222 !kvm->arch.kvm_ops->enable_nested)
2223 break;
2224 r = kvm->arch.kvm_ops->enable_nested(kvm);
2225 break;
2226 #endif
2227 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2228 case KVM_CAP_PPC_SECURE_GUEST:
2229 r = -EINVAL;
2230 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2231 break;
2232 r = kvm->arch.kvm_ops->enable_svm(kvm);
2233 break;
2234 case KVM_CAP_PPC_DAWR1:
2235 r = -EINVAL;
2236 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2237 break;
2238 r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2239 break;
2240 #endif
2241 default:
2242 r = -EINVAL;
2243 break;
2244 }
2245
2246 return r;
2247 }
2248
2249 #ifdef CONFIG_PPC_BOOK3S_64
2250 /*
2251 * These functions check whether the underlying hardware is safe
2252 * against attacks based on observing the effects of speculatively
2253 * executed instructions, and whether it supplies instructions for
2254 * use in workarounds. The information comes from firmware, either
2255 * via the device tree on powernv platforms or from an hcall on
2256 * pseries platforms.
2257 */
2258 #ifdef CONFIG_PPC_PSERIES
2259 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2260 {
2261 struct h_cpu_char_result c;
2262 unsigned long rc;
2263
2264 if (!machine_is(pseries))
2265 return -ENOTTY;
2266
2267 rc = plpar_get_cpu_characteristics(&c);
2268 if (rc == H_SUCCESS) {
2269 cp->character = c.character;
2270 cp->behaviour = c.behaviour;
2271 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2272 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2273 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2274 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2275 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2276 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2277 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2278 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2279 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2280 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2281 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2282 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2283 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2284 }
2285 return 0;
2286 }
2287 #else
2288 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2289 {
2290 return -ENOTTY;
2291 }
2292 #endif
2293
2294 static inline bool have_fw_feat(struct device_node *fw_features,
2295 const char *state, const char *name)
2296 {
2297 struct device_node *np;
2298 bool r = false;
2299
2300 np = of_get_child_by_name(fw_features, name);
2301 if (np) {
2302 r = of_property_read_bool(np, state);
2303 of_node_put(np);
2304 }
2305 return r;
2306 }
2307
2308 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2309 {
2310 struct device_node *np, *fw_features;
2311 int r;
2312
2313 memset(cp, 0, sizeof(*cp));
2314 r = pseries_get_cpu_char(cp);
2315 if (r != -ENOTTY)
2316 return r;
2317
2318 np = of_find_node_by_name(NULL, "ibm,opal");
2319 if (np) {
2320 fw_features = of_get_child_by_name(np, "fw-features");
2321 of_node_put(np);
2322 if (!fw_features)
2323 return 0;
2324 if (have_fw_feat(fw_features, "enabled",
2325 "inst-spec-barrier-ori31,31,0"))
2326 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2327 if (have_fw_feat(fw_features, "enabled",
2328 "fw-bcctrl-serialized"))
2329 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2330 if (have_fw_feat(fw_features, "enabled",
2331 "inst-l1d-flush-ori30,30,0"))
2332 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2333 if (have_fw_feat(fw_features, "enabled",
2334 "inst-l1d-flush-trig2"))
2335 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2336 if (have_fw_feat(fw_features, "enabled",
2337 "fw-l1d-thread-split"))
2338 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2339 if (have_fw_feat(fw_features, "enabled",
2340 "fw-count-cache-disabled"))
2341 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2342 if (have_fw_feat(fw_features, "enabled",
2343 "fw-count-cache-flush-bcctr2,0,0"))
2344 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2345 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2346 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2347 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2348 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2349 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2350 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2351 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2352
2353 if (have_fw_feat(fw_features, "enabled",
2354 "speculation-policy-favor-security"))
2355 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2356 if (!have_fw_feat(fw_features, "disabled",
2357 "needs-l1d-flush-msr-pr-0-to-1"))
2358 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2359 if (!have_fw_feat(fw_features, "disabled",
2360 "needs-spec-barrier-for-bound-checks"))
2361 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2362 if (have_fw_feat(fw_features, "enabled",
2363 "needs-count-cache-flush-on-context-switch"))
2364 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2365 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2366 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2367 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2368 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2369
2370 of_node_put(fw_features);
2371 }
2372
2373 return 0;
2374 }
2375 #endif
2376
2377 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
2378 {
2379 struct kvm *kvm __maybe_unused = filp->private_data;
2380 void __user *argp = (void __user *)arg;
2381 int r;
2382
2383 switch (ioctl) {
2384 case KVM_PPC_GET_PVINFO: {
2385 struct kvm_ppc_pvinfo pvinfo;
2386 memset(&pvinfo, 0, sizeof(pvinfo));
2387 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2388 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2389 r = -EFAULT;
2390 goto out;
2391 }
2392
2393 break;
2394 }
2395 #ifdef CONFIG_SPAPR_TCE_IOMMU
2396 case KVM_CREATE_SPAPR_TCE_64: {
2397 struct kvm_create_spapr_tce_64 create_tce_64;
2398
2399 r = -EFAULT;
2400 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2401 goto out;
2402 if (create_tce_64.flags) {
2403 r = -EINVAL;
2404 goto out;
2405 }
2406 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2407 goto out;
2408 }
2409 case KVM_CREATE_SPAPR_TCE: {
2410 struct kvm_create_spapr_tce create_tce;
2411 struct kvm_create_spapr_tce_64 create_tce_64;
2412
2413 r = -EFAULT;
2414 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2415 goto out;
2416
2417 create_tce_64.liobn = create_tce.liobn;
2418 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2419 create_tce_64.offset = 0;
2420 create_tce_64.size = create_tce.window_size >>
2421 IOMMU_PAGE_SHIFT_4K;
2422 create_tce_64.flags = 0;
2423 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2424 goto out;
2425 }
2426 #endif
2427 #ifdef CONFIG_PPC_BOOK3S_64
2428 case KVM_PPC_GET_SMMU_INFO: {
2429 struct kvm_ppc_smmu_info info;
2430 struct kvm *kvm = filp->private_data;
2431
2432 memset(&info, 0, sizeof(info));
2433 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2434 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2435 r = -EFAULT;
2436 break;
2437 }
2438 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2439 struct kvm *kvm = filp->private_data;
2440
2441 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2442 break;
2443 }
2444 case KVM_PPC_CONFIGURE_V3_MMU: {
2445 struct kvm *kvm = filp->private_data;
2446 struct kvm_ppc_mmuv3_cfg cfg;
2447
2448 r = -EINVAL;
2449 if (!kvm->arch.kvm_ops->configure_mmu)
2450 goto out;
2451 r = -EFAULT;
2452 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2453 goto out;
2454 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2455 break;
2456 }
2457 case KVM_PPC_GET_RMMU_INFO: {
2458 struct kvm *kvm = filp->private_data;
2459 struct kvm_ppc_rmmu_info info;
2460
2461 r = -EINVAL;
2462 if (!kvm->arch.kvm_ops->get_rmmu_info)
2463 goto out;
2464 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2465 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2466 r = -EFAULT;
2467 break;
2468 }
2469 case KVM_PPC_GET_CPU_CHAR: {
2470 struct kvm_ppc_cpu_char cpuchar;
2471
2472 r = kvmppc_get_cpu_char(&cpuchar);
2473 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2474 r = -EFAULT;
2475 break;
2476 }
2477 case KVM_PPC_SVM_OFF: {
2478 struct kvm *kvm = filp->private_data;
2479
2480 r = 0;
2481 if (!kvm->arch.kvm_ops->svm_off)
2482 goto out;
2483
2484 r = kvm->arch.kvm_ops->svm_off(kvm);
2485 break;
2486 }
2487 default: {
2488 struct kvm *kvm = filp->private_data;
2489 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2490 }
2491 #else /* CONFIG_PPC_BOOK3S_64 */
2492 default:
2493 r = -ENOTTY;
2494 #endif
2495 }
2496 out:
2497 return r;
2498 }
2499
2500 static DEFINE_IDA(lpid_inuse);
2501 static unsigned long nr_lpids;
2502
2503 long kvmppc_alloc_lpid(void)
2504 {
2505 int lpid;
2506
2507 /* The host LPID must always be 0 (allocation starts at 1) */
2508 lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
2509 if (lpid < 0) {
2510 if (lpid == -ENOMEM)
2511 pr_err("%s: Out of memory\n", __func__);
2512 else
2513 pr_err("%s: No LPIDs free\n", __func__);
2514 return -ENOMEM;
2515 }
2516
2517 return lpid;
2518 }
2519 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2520
2521 void kvmppc_free_lpid(long lpid)
2522 {
2523 ida_free(&lpid_inuse, lpid);
2524 }
2525 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2526
2527 /* nr_lpids_param includes the host LPID */
2528 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2529 {
2530 nr_lpids = nr_lpids_param;
2531 }
2532 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2533
2534 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2535
2536 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
2537 {
2538 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2539 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2540 }
2541
2542 int kvm_arch_create_vm_debugfs(struct kvm *kvm)
2543 {
2544 if (kvm->arch.kvm_ops->create_vm_debugfs)
2545 kvm->arch.kvm_ops->create_vm_debugfs(kvm);
2546 return 0;
2547 }