1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright IBM Corp. 2007
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
22 #include <asm/cputable.h>
23 #include <linux/uaccess.h>
24 #include <asm/kvm_ppc.h>
25 #include <asm/cputhreads.h>
26 #include <asm/irqflags.h>
27 #include <asm/iommu.h>
28 #include <asm/switch_to.h>
30 #ifdef CONFIG_PPC_PSERIES
31 #include <asm/hvcall.h>
32 #include <asm/plpar_wrappers.h>
34 #include <asm/ultravisor.h>
38 #include "../mm/mmu_decl.h"
40 #define CREATE_TRACE_POINTS
43 struct kvmppc_ops
*kvmppc_hv_ops
;
44 EXPORT_SYMBOL_GPL(kvmppc_hv_ops
);
45 struct kvmppc_ops
*kvmppc_pr_ops
;
46 EXPORT_SYMBOL_GPL(kvmppc_pr_ops
);
49 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
51 return !!(v
->arch
.pending_exceptions
) || kvm_request_pending(v
);
54 bool kvm_arch_dy_runnable(struct kvm_vcpu
*vcpu
)
56 return kvm_arch_vcpu_runnable(vcpu
);
59 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
64 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
70 * Common checks before entering the guest world. Call with interrupts
75 * == 1 if we're ready to go into guest state
76 * <= 0 if we need to go back to the host with return value
78 int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
82 WARN_ON(irqs_disabled());
93 if (signal_pending(current
)) {
94 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
95 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
100 vcpu
->mode
= IN_GUEST_MODE
;
103 * Reading vcpu->requests must happen after setting vcpu->mode,
104 * so we don't miss a request because the requester sees
105 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
106 * before next entering the guest (and thus doesn't IPI).
107 * This also orders the write to mode from any reads
108 * to the page tables done while the VCPU is running.
109 * Please see the comment in kvm_flush_remote_tlbs.
113 if (kvm_request_pending(vcpu
)) {
114 /* Make sure we process requests preemptable */
116 trace_kvm_check_requests(vcpu
);
117 r
= kvmppc_core_check_requests(vcpu
);
124 if (kvmppc_core_prepare_to_enter(vcpu
)) {
125 /* interrupts got enabled in between, so we
126 are back at square 1 */
130 guest_enter_irqoff();
138 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter
);
140 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
141 static void kvmppc_swab_shared(struct kvm_vcpu
*vcpu
)
143 struct kvm_vcpu_arch_shared
*shared
= vcpu
->arch
.shared
;
146 shared
->sprg0
= swab64(shared
->sprg0
);
147 shared
->sprg1
= swab64(shared
->sprg1
);
148 shared
->sprg2
= swab64(shared
->sprg2
);
149 shared
->sprg3
= swab64(shared
->sprg3
);
150 shared
->srr0
= swab64(shared
->srr0
);
151 shared
->srr1
= swab64(shared
->srr1
);
152 shared
->dar
= swab64(shared
->dar
);
153 shared
->msr
= swab64(shared
->msr
);
154 shared
->dsisr
= swab32(shared
->dsisr
);
155 shared
->int_pending
= swab32(shared
->int_pending
);
156 for (i
= 0; i
< ARRAY_SIZE(shared
->sr
); i
++)
157 shared
->sr
[i
] = swab32(shared
->sr
[i
]);
161 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
163 int nr
= kvmppc_get_gpr(vcpu
, 11);
165 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
166 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
167 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
168 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
169 unsigned long r2
= 0;
171 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
173 param1
&= 0xffffffff;
174 param2
&= 0xffffffff;
175 param3
&= 0xffffffff;
176 param4
&= 0xffffffff;
180 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
):
182 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
183 /* Book3S can be little endian, find it out here */
184 int shared_big_endian
= true;
185 if (vcpu
->arch
.intr_msr
& MSR_LE
)
186 shared_big_endian
= false;
187 if (shared_big_endian
!= vcpu
->arch
.shared_big_endian
)
188 kvmppc_swab_shared(vcpu
);
189 vcpu
->arch
.shared_big_endian
= shared_big_endian
;
192 if (!(param2
& MAGIC_PAGE_FLAG_NOT_MAPPED_NX
)) {
194 * Older versions of the Linux magic page code had
195 * a bug where they would map their trampoline code
196 * NX. If that's the case, remove !PR NX capability.
198 vcpu
->arch
.disable_kernel_nx
= true;
199 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
202 vcpu
->arch
.magic_page_pa
= param1
& ~0xfffULL
;
203 vcpu
->arch
.magic_page_ea
= param2
& ~0xfffULL
;
205 #ifdef CONFIG_PPC_64K_PAGES
207 * Make sure our 4k magic page is in the same window of a 64k
208 * page within the guest and within the host's page.
210 if ((vcpu
->arch
.magic_page_pa
& 0xf000) !=
211 ((ulong
)vcpu
->arch
.shared
& 0xf000)) {
212 void *old_shared
= vcpu
->arch
.shared
;
213 ulong shared
= (ulong
)vcpu
->arch
.shared
;
217 shared
|= vcpu
->arch
.magic_page_pa
& 0xf000;
218 new_shared
= (void*)shared
;
219 memcpy(new_shared
, old_shared
, 0x1000);
220 vcpu
->arch
.shared
= new_shared
;
224 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
229 case KVM_HCALL_TOKEN(KVM_HC_FEATURES
):
231 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
232 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
235 /* Second return value is in r4 */
237 case EV_HCALL_TOKEN(EV_IDLE
):
239 kvm_vcpu_block(vcpu
);
240 kvm_clear_request(KVM_REQ_UNHALT
, vcpu
);
243 r
= EV_UNIMPLEMENTED
;
247 kvmppc_set_gpr(vcpu
, 4, r2
);
251 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv
);
253 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
257 /* We have to know what CPU to virtualize */
261 /* PAPR only works with book3s_64 */
262 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
265 /* HV KVM can only do PAPR mode for now */
266 if (!vcpu
->arch
.papr_enabled
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
269 #ifdef CONFIG_KVM_BOOKE_HV
270 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
278 return r
? 0 : -EINVAL
;
280 EXPORT_SYMBOL_GPL(kvmppc_sanity_check
);
282 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
284 enum emulation_result er
;
287 er
= kvmppc_emulate_loadstore(vcpu
);
290 /* Future optimization: only reload non-volatiles if they were
291 * actually modified. */
297 case EMULATE_DO_MMIO
:
298 run
->exit_reason
= KVM_EXIT_MMIO
;
299 /* We must reload nonvolatiles because "update" load/store
300 * instructions modify register state. */
301 /* Future optimization: only reload non-volatiles if they were
302 * actually modified. */
309 kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &last_inst
);
310 /* XXX Deliver Program interrupt to guest. */
311 pr_emerg("%s: emulation failed (%08x)\n", __func__
, last_inst
);
322 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio
);
324 int kvmppc_st(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
327 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
& PAGE_MASK
;
328 struct kvmppc_pte pte
;
333 if (vcpu
->kvm
->arch
.kvm_ops
&& vcpu
->kvm
->arch
.kvm_ops
->store_to_eaddr
)
334 r
= vcpu
->kvm
->arch
.kvm_ops
->store_to_eaddr(vcpu
, eaddr
, ptr
,
337 if ((!r
) || (r
== -EAGAIN
))
340 r
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
350 /* Magic page override */
351 if (kvmppc_supports_magic_page(vcpu
) && mp_pa
&&
352 ((pte
.raddr
& KVM_PAM
& PAGE_MASK
) == mp_pa
) &&
353 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
354 void *magic
= vcpu
->arch
.shared
;
355 magic
+= pte
.eaddr
& 0xfff;
356 memcpy(magic
, ptr
, size
);
360 if (kvm_write_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
361 return EMULATE_DO_MMIO
;
365 EXPORT_SYMBOL_GPL(kvmppc_st
);
367 int kvmppc_ld(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
370 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
& PAGE_MASK
;
371 struct kvmppc_pte pte
;
376 if (vcpu
->kvm
->arch
.kvm_ops
&& vcpu
->kvm
->arch
.kvm_ops
->load_from_eaddr
)
377 rc
= vcpu
->kvm
->arch
.kvm_ops
->load_from_eaddr(vcpu
, eaddr
, ptr
,
380 if ((!rc
) || (rc
== -EAGAIN
))
383 rc
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
393 if (!data
&& !pte
.may_execute
)
396 /* Magic page override */
397 if (kvmppc_supports_magic_page(vcpu
) && mp_pa
&&
398 ((pte
.raddr
& KVM_PAM
& PAGE_MASK
) == mp_pa
) &&
399 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
400 void *magic
= vcpu
->arch
.shared
;
401 magic
+= pte
.eaddr
& 0xfff;
402 memcpy(ptr
, magic
, size
);
406 if (kvm_read_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
407 return EMULATE_DO_MMIO
;
411 EXPORT_SYMBOL_GPL(kvmppc_ld
);
413 int kvm_arch_hardware_enable(void)
418 int kvm_arch_hardware_setup(void *opaque
)
423 int kvm_arch_check_processor_compat(void *opaque
)
425 return kvmppc_core_check_processor_compat();
428 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
430 struct kvmppc_ops
*kvm_ops
= NULL
;
432 * if we have both HV and PR enabled, default is HV
436 kvm_ops
= kvmppc_hv_ops
;
438 kvm_ops
= kvmppc_pr_ops
;
441 } else if (type
== KVM_VM_PPC_HV
) {
444 kvm_ops
= kvmppc_hv_ops
;
445 } else if (type
== KVM_VM_PPC_PR
) {
448 kvm_ops
= kvmppc_pr_ops
;
452 if (kvm_ops
->owner
&& !try_module_get(kvm_ops
->owner
))
455 kvm
->arch
.kvm_ops
= kvm_ops
;
456 return kvmppc_core_init_vm(kvm
);
461 void kvm_arch_destroy_vm(struct kvm
*kvm
)
464 struct kvm_vcpu
*vcpu
;
466 #ifdef CONFIG_KVM_XICS
468 * We call kick_all_cpus_sync() to ensure that all
469 * CPUs have executed any pending IPIs before we
470 * continue and free VCPUs structures below.
472 if (is_kvmppc_hv_enabled(kvm
))
473 kick_all_cpus_sync();
476 kvm_for_each_vcpu(i
, vcpu
, kvm
)
477 kvm_vcpu_destroy(vcpu
);
479 mutex_lock(&kvm
->lock
);
480 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
481 kvm
->vcpus
[i
] = NULL
;
483 atomic_set(&kvm
->online_vcpus
, 0);
485 kvmppc_core_destroy_vm(kvm
);
487 mutex_unlock(&kvm
->lock
);
489 /* drop the module reference */
490 module_put(kvm
->arch
.kvm_ops
->owner
);
493 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
496 /* Assume we're using HV mode when the HV module is loaded */
497 int hv_enabled
= kvmppc_hv_ops
? 1 : 0;
501 * Hooray - we know which VM type we're running on. Depend on
502 * that rather than the guess above.
504 hv_enabled
= is_kvmppc_hv_enabled(kvm
);
509 case KVM_CAP_PPC_BOOKE_SREGS
:
510 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
511 case KVM_CAP_PPC_EPR
:
513 case KVM_CAP_PPC_SEGSTATE
:
514 case KVM_CAP_PPC_HIOR
:
515 case KVM_CAP_PPC_PAPR
:
517 case KVM_CAP_PPC_UNSET_IRQ
:
518 case KVM_CAP_PPC_IRQ_LEVEL
:
519 case KVM_CAP_ENABLE_CAP
:
520 case KVM_CAP_ONE_REG
:
521 case KVM_CAP_IOEVENTFD
:
522 case KVM_CAP_DEVICE_CTRL
:
523 case KVM_CAP_IMMEDIATE_EXIT
:
524 case KVM_CAP_SET_GUEST_DEBUG
:
527 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP
:
528 case KVM_CAP_PPC_PAIRED_SINGLES
:
529 case KVM_CAP_PPC_OSI
:
530 case KVM_CAP_PPC_GET_PVINFO
:
531 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
534 /* We support this only for PR */
537 #ifdef CONFIG_KVM_MPIC
538 case KVM_CAP_IRQ_MPIC
:
543 #ifdef CONFIG_PPC_BOOK3S_64
544 case KVM_CAP_SPAPR_TCE
:
545 case KVM_CAP_SPAPR_TCE_64
:
548 case KVM_CAP_SPAPR_TCE_VFIO
:
549 r
= !!cpu_has_feature(CPU_FTR_HVMODE
);
551 case KVM_CAP_PPC_RTAS
:
552 case KVM_CAP_PPC_FIXUP_HCALL
:
553 case KVM_CAP_PPC_ENABLE_HCALL
:
554 #ifdef CONFIG_KVM_XICS
555 case KVM_CAP_IRQ_XICS
:
557 case KVM_CAP_PPC_GET_CPU_CHAR
:
560 #ifdef CONFIG_KVM_XIVE
561 case KVM_CAP_PPC_IRQ_XIVE
:
563 * We need XIVE to be enabled on the platform (implies
564 * a POWER9 processor) and the PowerNV platform, as
565 * nested is not yet supported.
567 r
= xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE
) &&
568 kvmppc_xive_native_supported();
572 case KVM_CAP_PPC_ALLOC_HTAB
:
575 #endif /* CONFIG_PPC_BOOK3S_64 */
576 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
577 case KVM_CAP_PPC_SMT
:
580 if (kvm
->arch
.emul_smt_mode
> 1)
581 r
= kvm
->arch
.emul_smt_mode
;
583 r
= kvm
->arch
.smt_mode
;
584 } else if (hv_enabled
) {
585 if (cpu_has_feature(CPU_FTR_ARCH_300
))
588 r
= threads_per_subcore
;
591 case KVM_CAP_PPC_SMT_POSSIBLE
:
594 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
595 r
= ((threads_per_subcore
<< 1) - 1);
597 /* P9 can emulate dbells, so allow any mode */
601 case KVM_CAP_PPC_RMA
:
604 case KVM_CAP_PPC_HWRNG
:
605 r
= kvmppc_hwrng_present();
607 case KVM_CAP_PPC_MMU_RADIX
:
608 r
= !!(hv_enabled
&& radix_enabled());
610 case KVM_CAP_PPC_MMU_HASH_V3
:
611 r
= !!(hv_enabled
&& cpu_has_feature(CPU_FTR_ARCH_300
) &&
612 cpu_has_feature(CPU_FTR_HVMODE
));
614 case KVM_CAP_PPC_NESTED_HV
:
615 r
= !!(hv_enabled
&& kvmppc_hv_ops
->enable_nested
&&
616 !kvmppc_hv_ops
->enable_nested(NULL
));
619 case KVM_CAP_SYNC_MMU
:
620 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
622 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
628 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
629 case KVM_CAP_PPC_HTAB_FD
:
633 case KVM_CAP_NR_VCPUS
:
635 * Recommending a number of CPUs is somewhat arbitrary; we
636 * return the number of present CPUs for -HV (since a host
637 * will have secondary threads "offline"), and for other KVM
638 * implementations just count online CPUs.
641 r
= num_present_cpus();
643 r
= num_online_cpus();
645 case KVM_CAP_MAX_VCPUS
:
648 case KVM_CAP_MAX_VCPU_ID
:
651 #ifdef CONFIG_PPC_BOOK3S_64
652 case KVM_CAP_PPC_GET_SMMU_INFO
:
655 case KVM_CAP_SPAPR_MULTITCE
:
658 case KVM_CAP_SPAPR_RESIZE_HPT
:
662 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
663 case KVM_CAP_PPC_FWNMI
:
667 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
668 case KVM_CAP_PPC_HTM
:
669 r
= !!(cur_cpu_spec
->cpu_user_features2
& PPC_FEATURE2_HTM
) ||
670 (hv_enabled
&& cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST
));
673 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
674 case KVM_CAP_PPC_SECURE_GUEST
:
675 r
= hv_enabled
&& kvmppc_hv_ops
->enable_svm
&&
676 !kvmppc_hv_ops
->enable_svm(NULL
);
687 long kvm_arch_dev_ioctl(struct file
*filp
,
688 unsigned int ioctl
, unsigned long arg
)
693 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
695 kvmppc_core_free_memslot(kvm
, slot
);
698 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
699 struct kvm_memory_slot
*memslot
,
700 const struct kvm_userspace_memory_region
*mem
,
701 enum kvm_mr_change change
)
703 return kvmppc_core_prepare_memory_region(kvm
, memslot
, mem
, change
);
706 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
707 const struct kvm_userspace_memory_region
*mem
,
708 struct kvm_memory_slot
*old
,
709 const struct kvm_memory_slot
*new,
710 enum kvm_mr_change change
)
712 kvmppc_core_commit_memory_region(kvm
, mem
, old
, new, change
);
715 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
716 struct kvm_memory_slot
*slot
)
718 kvmppc_core_flush_memslot(kvm
, slot
);
721 int kvm_arch_vcpu_precreate(struct kvm
*kvm
, unsigned int id
)
726 static enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
728 struct kvm_vcpu
*vcpu
;
730 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
731 kvmppc_decrementer_func(vcpu
);
733 return HRTIMER_NORESTART
;
736 int kvm_arch_vcpu_create(struct kvm_vcpu
*vcpu
)
740 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
741 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
742 vcpu
->arch
.dec_expires
= get_tb();
744 #ifdef CONFIG_KVM_EXIT_TIMING
745 mutex_init(&vcpu
->arch
.exit_timing_lock
);
747 err
= kvmppc_subarch_vcpu_init(vcpu
);
751 err
= kvmppc_core_vcpu_create(vcpu
);
753 goto out_vcpu_uninit
;
755 vcpu
->arch
.wqp
= &vcpu
->wq
;
756 kvmppc_create_vcpu_debugfs(vcpu
, vcpu
->vcpu_id
);
760 kvmppc_subarch_vcpu_uninit(vcpu
);
764 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
768 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
770 /* Make sure we're not using the vcpu anymore */
771 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
773 kvmppc_remove_vcpu_debugfs(vcpu
);
775 switch (vcpu
->arch
.irq_type
) {
776 case KVMPPC_IRQ_MPIC
:
777 kvmppc_mpic_disconnect_vcpu(vcpu
->arch
.mpic
, vcpu
);
779 case KVMPPC_IRQ_XICS
:
781 kvmppc_xive_cleanup_vcpu(vcpu
);
783 kvmppc_xics_free_icp(vcpu
);
785 case KVMPPC_IRQ_XIVE
:
786 kvmppc_xive_native_cleanup_vcpu(vcpu
);
790 kvmppc_core_vcpu_free(vcpu
);
792 kvmppc_subarch_vcpu_uninit(vcpu
);
795 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
797 return kvmppc_core_pending_dec(vcpu
);
800 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
804 * vrsave (formerly usprg0) isn't used by Linux, but may
805 * be used by the guest.
807 * On non-booke this is associated with Altivec and
808 * is handled by code in book3s.c.
810 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
812 kvmppc_core_vcpu_load(vcpu
, cpu
);
815 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
817 kvmppc_core_vcpu_put(vcpu
);
819 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
824 * irq_bypass_add_producer and irq_bypass_del_producer are only
825 * useful if the architecture supports PCI passthrough.
826 * irq_bypass_stop and irq_bypass_start are not needed and so
827 * kvm_ops are not defined for them.
829 bool kvm_arch_has_irq_bypass(void)
831 return ((kvmppc_hv_ops
&& kvmppc_hv_ops
->irq_bypass_add_producer
) ||
832 (kvmppc_pr_ops
&& kvmppc_pr_ops
->irq_bypass_add_producer
));
835 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer
*cons
,
836 struct irq_bypass_producer
*prod
)
838 struct kvm_kernel_irqfd
*irqfd
=
839 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
840 struct kvm
*kvm
= irqfd
->kvm
;
842 if (kvm
->arch
.kvm_ops
->irq_bypass_add_producer
)
843 return kvm
->arch
.kvm_ops
->irq_bypass_add_producer(cons
, prod
);
848 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer
*cons
,
849 struct irq_bypass_producer
*prod
)
851 struct kvm_kernel_irqfd
*irqfd
=
852 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
853 struct kvm
*kvm
= irqfd
->kvm
;
855 if (kvm
->arch
.kvm_ops
->irq_bypass_del_producer
)
856 kvm
->arch
.kvm_ops
->irq_bypass_del_producer(cons
, prod
);
860 static inline int kvmppc_get_vsr_dword_offset(int index
)
864 if ((index
!= 0) && (index
!= 1))
876 static inline int kvmppc_get_vsr_word_offset(int index
)
880 if ((index
> 3) || (index
< 0))
891 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu
*vcpu
,
894 union kvmppc_one_reg val
;
895 int offset
= kvmppc_get_vsr_dword_offset(vcpu
->arch
.mmio_vsx_offset
);
896 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
902 val
.vval
= VCPU_VSX_VR(vcpu
, index
- 32);
903 val
.vsxval
[offset
] = gpr
;
904 VCPU_VSX_VR(vcpu
, index
- 32) = val
.vval
;
906 VCPU_VSX_FPR(vcpu
, index
, offset
) = gpr
;
910 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu
*vcpu
,
913 union kvmppc_one_reg val
;
914 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
917 val
.vval
= VCPU_VSX_VR(vcpu
, index
- 32);
920 VCPU_VSX_VR(vcpu
, index
- 32) = val
.vval
;
922 VCPU_VSX_FPR(vcpu
, index
, 0) = gpr
;
923 VCPU_VSX_FPR(vcpu
, index
, 1) = gpr
;
927 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu
*vcpu
,
930 union kvmppc_one_reg val
;
931 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
934 val
.vsx32val
[0] = gpr
;
935 val
.vsx32val
[1] = gpr
;
936 val
.vsx32val
[2] = gpr
;
937 val
.vsx32val
[3] = gpr
;
938 VCPU_VSX_VR(vcpu
, index
- 32) = val
.vval
;
940 val
.vsx32val
[0] = gpr
;
941 val
.vsx32val
[1] = gpr
;
942 VCPU_VSX_FPR(vcpu
, index
, 0) = val
.vsxval
[0];
943 VCPU_VSX_FPR(vcpu
, index
, 1) = val
.vsxval
[0];
947 static inline void kvmppc_set_vsr_word(struct kvm_vcpu
*vcpu
,
950 union kvmppc_one_reg val
;
951 int offset
= kvmppc_get_vsr_word_offset(vcpu
->arch
.mmio_vsx_offset
);
952 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
953 int dword_offset
, word_offset
;
959 val
.vval
= VCPU_VSX_VR(vcpu
, index
- 32);
960 val
.vsx32val
[offset
] = gpr32
;
961 VCPU_VSX_VR(vcpu
, index
- 32) = val
.vval
;
963 dword_offset
= offset
/ 2;
964 word_offset
= offset
% 2;
965 val
.vsxval
[0] = VCPU_VSX_FPR(vcpu
, index
, dword_offset
);
966 val
.vsx32val
[word_offset
] = gpr32
;
967 VCPU_VSX_FPR(vcpu
, index
, dword_offset
) = val
.vsxval
[0];
970 #endif /* CONFIG_VSX */
972 #ifdef CONFIG_ALTIVEC
973 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu
*vcpu
,
974 int index
, int element_size
)
977 int elts
= sizeof(vector128
)/element_size
;
979 if ((index
< 0) || (index
>= elts
))
982 if (kvmppc_need_byteswap(vcpu
))
983 offset
= elts
- index
- 1;
990 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu
*vcpu
,
993 return kvmppc_get_vmx_offset_generic(vcpu
, index
, 8);
996 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu
*vcpu
,
999 return kvmppc_get_vmx_offset_generic(vcpu
, index
, 4);
1002 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu
*vcpu
,
1005 return kvmppc_get_vmx_offset_generic(vcpu
, index
, 2);
1008 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu
*vcpu
,
1011 return kvmppc_get_vmx_offset_generic(vcpu
, index
, 1);
1015 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu
*vcpu
,
1018 union kvmppc_one_reg val
;
1019 int offset
= kvmppc_get_vmx_dword_offset(vcpu
,
1020 vcpu
->arch
.mmio_vmx_offset
);
1021 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
1026 val
.vval
= VCPU_VSX_VR(vcpu
, index
);
1027 val
.vsxval
[offset
] = gpr
;
1028 VCPU_VSX_VR(vcpu
, index
) = val
.vval
;
1031 static inline void kvmppc_set_vmx_word(struct kvm_vcpu
*vcpu
,
1034 union kvmppc_one_reg val
;
1035 int offset
= kvmppc_get_vmx_word_offset(vcpu
,
1036 vcpu
->arch
.mmio_vmx_offset
);
1037 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
1042 val
.vval
= VCPU_VSX_VR(vcpu
, index
);
1043 val
.vsx32val
[offset
] = gpr32
;
1044 VCPU_VSX_VR(vcpu
, index
) = val
.vval
;
1047 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu
*vcpu
,
1050 union kvmppc_one_reg val
;
1051 int offset
= kvmppc_get_vmx_hword_offset(vcpu
,
1052 vcpu
->arch
.mmio_vmx_offset
);
1053 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
1058 val
.vval
= VCPU_VSX_VR(vcpu
, index
);
1059 val
.vsx16val
[offset
] = gpr16
;
1060 VCPU_VSX_VR(vcpu
, index
) = val
.vval
;
1063 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu
*vcpu
,
1066 union kvmppc_one_reg val
;
1067 int offset
= kvmppc_get_vmx_byte_offset(vcpu
,
1068 vcpu
->arch
.mmio_vmx_offset
);
1069 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
1074 val
.vval
= VCPU_VSX_VR(vcpu
, index
);
1075 val
.vsx8val
[offset
] = gpr8
;
1076 VCPU_VSX_VR(vcpu
, index
) = val
.vval
;
1078 #endif /* CONFIG_ALTIVEC */
1080 #ifdef CONFIG_PPC_FPU
1081 static inline u64
sp_to_dp(u32 fprs
)
1087 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd
) : "m" (fprs
)
1093 static inline u32
dp_to_sp(u64 fprd
)
1099 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs
) : "m" (fprd
)
1106 #define sp_to_dp(x) (x)
1107 #define dp_to_sp(x) (x)
1108 #endif /* CONFIG_PPC_FPU */
1110 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
1111 struct kvm_run
*run
)
1113 u64
uninitialized_var(gpr
);
1115 if (run
->mmio
.len
> sizeof(gpr
)) {
1116 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
1120 if (!vcpu
->arch
.mmio_host_swabbed
) {
1121 switch (run
->mmio
.len
) {
1122 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
1123 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
1124 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
1125 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
1128 switch (run
->mmio
.len
) {
1129 case 8: gpr
= swab64(*(u64
*)run
->mmio
.data
); break;
1130 case 4: gpr
= swab32(*(u32
*)run
->mmio
.data
); break;
1131 case 2: gpr
= swab16(*(u16
*)run
->mmio
.data
); break;
1132 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
1136 /* conversion between single and double precision */
1137 if ((vcpu
->arch
.mmio_sp64_extend
) && (run
->mmio
.len
== 4))
1138 gpr
= sp_to_dp(gpr
);
1140 if (vcpu
->arch
.mmio_sign_extend
) {
1141 switch (run
->mmio
.len
) {
1144 gpr
= (s64
)(s32
)gpr
;
1148 gpr
= (s64
)(s16
)gpr
;
1156 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
1157 case KVM_MMIO_REG_GPR
:
1158 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
1160 case KVM_MMIO_REG_FPR
:
1161 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
1162 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
, MSR_FP
);
1164 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
1166 #ifdef CONFIG_PPC_BOOK3S
1167 case KVM_MMIO_REG_QPR
:
1168 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
1170 case KVM_MMIO_REG_FQPR
:
1171 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
1172 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
1176 case KVM_MMIO_REG_VSX
:
1177 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
1178 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
, MSR_VSX
);
1180 if (vcpu
->arch
.mmio_copy_type
== KVMPPC_VSX_COPY_DWORD
)
1181 kvmppc_set_vsr_dword(vcpu
, gpr
);
1182 else if (vcpu
->arch
.mmio_copy_type
== KVMPPC_VSX_COPY_WORD
)
1183 kvmppc_set_vsr_word(vcpu
, gpr
);
1184 else if (vcpu
->arch
.mmio_copy_type
==
1185 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP
)
1186 kvmppc_set_vsr_dword_dump(vcpu
, gpr
);
1187 else if (vcpu
->arch
.mmio_copy_type
==
1188 KVMPPC_VSX_COPY_WORD_LOAD_DUMP
)
1189 kvmppc_set_vsr_word_dump(vcpu
, gpr
);
1192 #ifdef CONFIG_ALTIVEC
1193 case KVM_MMIO_REG_VMX
:
1194 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
1195 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
, MSR_VEC
);
1197 if (vcpu
->arch
.mmio_copy_type
== KVMPPC_VMX_COPY_DWORD
)
1198 kvmppc_set_vmx_dword(vcpu
, gpr
);
1199 else if (vcpu
->arch
.mmio_copy_type
== KVMPPC_VMX_COPY_WORD
)
1200 kvmppc_set_vmx_word(vcpu
, gpr
);
1201 else if (vcpu
->arch
.mmio_copy_type
==
1202 KVMPPC_VMX_COPY_HWORD
)
1203 kvmppc_set_vmx_hword(vcpu
, gpr
);
1204 else if (vcpu
->arch
.mmio_copy_type
==
1205 KVMPPC_VMX_COPY_BYTE
)
1206 kvmppc_set_vmx_byte(vcpu
, gpr
);
1209 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1210 case KVM_MMIO_REG_NESTED_GPR
:
1211 if (kvmppc_need_byteswap(vcpu
))
1213 kvm_vcpu_write_guest(vcpu
, vcpu
->arch
.nested_io_gpr
, &gpr
,
1222 static int __kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1223 unsigned int rt
, unsigned int bytes
,
1224 int is_default_endian
, int sign_extend
)
1229 /* Pity C doesn't have a logical XOR operator */
1230 if (kvmppc_need_byteswap(vcpu
)) {
1231 host_swabbed
= is_default_endian
;
1233 host_swabbed
= !is_default_endian
;
1236 if (bytes
> sizeof(run
->mmio
.data
)) {
1237 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
1241 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
1242 run
->mmio
.len
= bytes
;
1243 run
->mmio
.is_write
= 0;
1245 vcpu
->arch
.io_gpr
= rt
;
1246 vcpu
->arch
.mmio_host_swabbed
= host_swabbed
;
1247 vcpu
->mmio_needed
= 1;
1248 vcpu
->mmio_is_write
= 0;
1249 vcpu
->arch
.mmio_sign_extend
= sign_extend
;
1251 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1253 ret
= kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
1254 bytes
, &run
->mmio
.data
);
1256 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1259 kvmppc_complete_mmio_load(vcpu
, run
);
1260 vcpu
->mmio_needed
= 0;
1261 return EMULATE_DONE
;
1264 return EMULATE_DO_MMIO
;
1267 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1268 unsigned int rt
, unsigned int bytes
,
1269 int is_default_endian
)
1271 return __kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_default_endian
, 0);
1273 EXPORT_SYMBOL_GPL(kvmppc_handle_load
);
1275 /* Same as above, but sign extends */
1276 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1277 unsigned int rt
, unsigned int bytes
,
1278 int is_default_endian
)
1280 return __kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_default_endian
, 1);
1284 int kvmppc_handle_vsx_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1285 unsigned int rt
, unsigned int bytes
,
1286 int is_default_endian
, int mmio_sign_extend
)
1288 enum emulation_result emulated
= EMULATE_DONE
;
1290 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1291 if (vcpu
->arch
.mmio_vsx_copy_nums
> 4)
1292 return EMULATE_FAIL
;
1294 while (vcpu
->arch
.mmio_vsx_copy_nums
) {
1295 emulated
= __kvmppc_handle_load(run
, vcpu
, rt
, bytes
,
1296 is_default_endian
, mmio_sign_extend
);
1298 if (emulated
!= EMULATE_DONE
)
1301 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1303 vcpu
->arch
.mmio_vsx_copy_nums
--;
1304 vcpu
->arch
.mmio_vsx_offset
++;
1308 #endif /* CONFIG_VSX */
1310 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1311 u64 val
, unsigned int bytes
, int is_default_endian
)
1313 void *data
= run
->mmio
.data
;
1317 /* Pity C doesn't have a logical XOR operator */
1318 if (kvmppc_need_byteswap(vcpu
)) {
1319 host_swabbed
= is_default_endian
;
1321 host_swabbed
= !is_default_endian
;
1324 if (bytes
> sizeof(run
->mmio
.data
)) {
1325 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
1329 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
1330 run
->mmio
.len
= bytes
;
1331 run
->mmio
.is_write
= 1;
1332 vcpu
->mmio_needed
= 1;
1333 vcpu
->mmio_is_write
= 1;
1335 if ((vcpu
->arch
.mmio_sp64_extend
) && (bytes
== 4))
1336 val
= dp_to_sp(val
);
1338 /* Store the value at the lowest bytes in 'data'. */
1339 if (!host_swabbed
) {
1341 case 8: *(u64
*)data
= val
; break;
1342 case 4: *(u32
*)data
= val
; break;
1343 case 2: *(u16
*)data
= val
; break;
1344 case 1: *(u8
*)data
= val
; break;
1348 case 8: *(u64
*)data
= swab64(val
); break;
1349 case 4: *(u32
*)data
= swab32(val
); break;
1350 case 2: *(u16
*)data
= swab16(val
); break;
1351 case 1: *(u8
*)data
= val
; break;
1355 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1357 ret
= kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
1358 bytes
, &run
->mmio
.data
);
1360 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1363 vcpu
->mmio_needed
= 0;
1364 return EMULATE_DONE
;
1367 return EMULATE_DO_MMIO
;
1369 EXPORT_SYMBOL_GPL(kvmppc_handle_store
);
1372 static inline int kvmppc_get_vsr_data(struct kvm_vcpu
*vcpu
, int rs
, u64
*val
)
1374 u32 dword_offset
, word_offset
;
1375 union kvmppc_one_reg reg
;
1377 int copy_type
= vcpu
->arch
.mmio_copy_type
;
1380 switch (copy_type
) {
1381 case KVMPPC_VSX_COPY_DWORD
:
1383 kvmppc_get_vsr_dword_offset(vcpu
->arch
.mmio_vsx_offset
);
1385 if (vsx_offset
== -1) {
1391 *val
= VCPU_VSX_FPR(vcpu
, rs
, vsx_offset
);
1393 reg
.vval
= VCPU_VSX_VR(vcpu
, rs
- 32);
1394 *val
= reg
.vsxval
[vsx_offset
];
1398 case KVMPPC_VSX_COPY_WORD
:
1400 kvmppc_get_vsr_word_offset(vcpu
->arch
.mmio_vsx_offset
);
1402 if (vsx_offset
== -1) {
1408 dword_offset
= vsx_offset
/ 2;
1409 word_offset
= vsx_offset
% 2;
1410 reg
.vsxval
[0] = VCPU_VSX_FPR(vcpu
, rs
, dword_offset
);
1411 *val
= reg
.vsx32val
[word_offset
];
1413 reg
.vval
= VCPU_VSX_VR(vcpu
, rs
- 32);
1414 *val
= reg
.vsx32val
[vsx_offset
];
1426 int kvmppc_handle_vsx_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1427 int rs
, unsigned int bytes
, int is_default_endian
)
1430 enum emulation_result emulated
= EMULATE_DONE
;
1432 vcpu
->arch
.io_gpr
= rs
;
1434 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1435 if (vcpu
->arch
.mmio_vsx_copy_nums
> 4)
1436 return EMULATE_FAIL
;
1438 while (vcpu
->arch
.mmio_vsx_copy_nums
) {
1439 if (kvmppc_get_vsr_data(vcpu
, rs
, &val
) == -1)
1440 return EMULATE_FAIL
;
1442 emulated
= kvmppc_handle_store(run
, vcpu
,
1443 val
, bytes
, is_default_endian
);
1445 if (emulated
!= EMULATE_DONE
)
1448 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1450 vcpu
->arch
.mmio_vsx_copy_nums
--;
1451 vcpu
->arch
.mmio_vsx_offset
++;
1457 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu
*vcpu
,
1458 struct kvm_run
*run
)
1460 enum emulation_result emulated
= EMULATE_FAIL
;
1463 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1465 if (!vcpu
->mmio_is_write
) {
1466 emulated
= kvmppc_handle_vsx_load(run
, vcpu
, vcpu
->arch
.io_gpr
,
1467 run
->mmio
.len
, 1, vcpu
->arch
.mmio_sign_extend
);
1469 emulated
= kvmppc_handle_vsx_store(run
, vcpu
,
1470 vcpu
->arch
.io_gpr
, run
->mmio
.len
, 1);
1474 case EMULATE_DO_MMIO
:
1475 run
->exit_reason
= KVM_EXIT_MMIO
;
1479 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1480 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1481 run
->internal
.suberror
= KVM_INTERNAL_ERROR_EMULATION
;
1490 #endif /* CONFIG_VSX */
1492 #ifdef CONFIG_ALTIVEC
1493 int kvmppc_handle_vmx_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1494 unsigned int rt
, unsigned int bytes
, int is_default_endian
)
1496 enum emulation_result emulated
= EMULATE_DONE
;
1498 if (vcpu
->arch
.mmio_vsx_copy_nums
> 2)
1499 return EMULATE_FAIL
;
1501 while (vcpu
->arch
.mmio_vmx_copy_nums
) {
1502 emulated
= __kvmppc_handle_load(run
, vcpu
, rt
, bytes
,
1503 is_default_endian
, 0);
1505 if (emulated
!= EMULATE_DONE
)
1508 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1509 vcpu
->arch
.mmio_vmx_copy_nums
--;
1510 vcpu
->arch
.mmio_vmx_offset
++;
1516 int kvmppc_get_vmx_dword(struct kvm_vcpu
*vcpu
, int index
, u64
*val
)
1518 union kvmppc_one_reg reg
;
1523 kvmppc_get_vmx_dword_offset(vcpu
, vcpu
->arch
.mmio_vmx_offset
);
1525 if (vmx_offset
== -1)
1528 reg
.vval
= VCPU_VSX_VR(vcpu
, index
);
1529 *val
= reg
.vsxval
[vmx_offset
];
1534 int kvmppc_get_vmx_word(struct kvm_vcpu
*vcpu
, int index
, u64
*val
)
1536 union kvmppc_one_reg reg
;
1541 kvmppc_get_vmx_word_offset(vcpu
, vcpu
->arch
.mmio_vmx_offset
);
1543 if (vmx_offset
== -1)
1546 reg
.vval
= VCPU_VSX_VR(vcpu
, index
);
1547 *val
= reg
.vsx32val
[vmx_offset
];
1552 int kvmppc_get_vmx_hword(struct kvm_vcpu
*vcpu
, int index
, u64
*val
)
1554 union kvmppc_one_reg reg
;
1559 kvmppc_get_vmx_hword_offset(vcpu
, vcpu
->arch
.mmio_vmx_offset
);
1561 if (vmx_offset
== -1)
1564 reg
.vval
= VCPU_VSX_VR(vcpu
, index
);
1565 *val
= reg
.vsx16val
[vmx_offset
];
1570 int kvmppc_get_vmx_byte(struct kvm_vcpu
*vcpu
, int index
, u64
*val
)
1572 union kvmppc_one_reg reg
;
1577 kvmppc_get_vmx_byte_offset(vcpu
, vcpu
->arch
.mmio_vmx_offset
);
1579 if (vmx_offset
== -1)
1582 reg
.vval
= VCPU_VSX_VR(vcpu
, index
);
1583 *val
= reg
.vsx8val
[vmx_offset
];
1588 int kvmppc_handle_vmx_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1589 unsigned int rs
, unsigned int bytes
, int is_default_endian
)
1592 unsigned int index
= rs
& KVM_MMIO_REG_MASK
;
1593 enum emulation_result emulated
= EMULATE_DONE
;
1595 if (vcpu
->arch
.mmio_vsx_copy_nums
> 2)
1596 return EMULATE_FAIL
;
1598 vcpu
->arch
.io_gpr
= rs
;
1600 while (vcpu
->arch
.mmio_vmx_copy_nums
) {
1601 switch (vcpu
->arch
.mmio_copy_type
) {
1602 case KVMPPC_VMX_COPY_DWORD
:
1603 if (kvmppc_get_vmx_dword(vcpu
, index
, &val
) == -1)
1604 return EMULATE_FAIL
;
1607 case KVMPPC_VMX_COPY_WORD
:
1608 if (kvmppc_get_vmx_word(vcpu
, index
, &val
) == -1)
1609 return EMULATE_FAIL
;
1611 case KVMPPC_VMX_COPY_HWORD
:
1612 if (kvmppc_get_vmx_hword(vcpu
, index
, &val
) == -1)
1613 return EMULATE_FAIL
;
1615 case KVMPPC_VMX_COPY_BYTE
:
1616 if (kvmppc_get_vmx_byte(vcpu
, index
, &val
) == -1)
1617 return EMULATE_FAIL
;
1620 return EMULATE_FAIL
;
1623 emulated
= kvmppc_handle_store(run
, vcpu
, val
, bytes
,
1625 if (emulated
!= EMULATE_DONE
)
1628 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1629 vcpu
->arch
.mmio_vmx_copy_nums
--;
1630 vcpu
->arch
.mmio_vmx_offset
++;
1636 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu
*vcpu
,
1637 struct kvm_run
*run
)
1639 enum emulation_result emulated
= EMULATE_FAIL
;
1642 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1644 if (!vcpu
->mmio_is_write
) {
1645 emulated
= kvmppc_handle_vmx_load(run
, vcpu
,
1646 vcpu
->arch
.io_gpr
, run
->mmio
.len
, 1);
1648 emulated
= kvmppc_handle_vmx_store(run
, vcpu
,
1649 vcpu
->arch
.io_gpr
, run
->mmio
.len
, 1);
1653 case EMULATE_DO_MMIO
:
1654 run
->exit_reason
= KVM_EXIT_MMIO
;
1658 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1659 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1660 run
->internal
.suberror
= KVM_INTERNAL_ERROR_EMULATION
;
1669 #endif /* CONFIG_ALTIVEC */
1671 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1674 union kvmppc_one_reg val
;
1677 size
= one_reg_size(reg
->id
);
1678 if (size
> sizeof(val
))
1681 r
= kvmppc_get_one_reg(vcpu
, reg
->id
, &val
);
1685 #ifdef CONFIG_ALTIVEC
1686 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
1687 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1691 val
.vval
= vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
];
1693 case KVM_REG_PPC_VSCR
:
1694 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1698 val
= get_reg_val(reg
->id
, vcpu
->arch
.vr
.vscr
.u
[3]);
1700 case KVM_REG_PPC_VRSAVE
:
1701 val
= get_reg_val(reg
->id
, vcpu
->arch
.vrsave
);
1703 #endif /* CONFIG_ALTIVEC */
1713 if (copy_to_user((char __user
*)(unsigned long)reg
->addr
, &val
, size
))
1719 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1722 union kvmppc_one_reg val
;
1725 size
= one_reg_size(reg
->id
);
1726 if (size
> sizeof(val
))
1729 if (copy_from_user(&val
, (char __user
*)(unsigned long)reg
->addr
, size
))
1732 r
= kvmppc_set_one_reg(vcpu
, reg
->id
, &val
);
1736 #ifdef CONFIG_ALTIVEC
1737 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
1738 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1742 vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
] = val
.vval
;
1744 case KVM_REG_PPC_VSCR
:
1745 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1749 vcpu
->arch
.vr
.vscr
.u
[3] = set_reg_val(reg
->id
, val
);
1751 case KVM_REG_PPC_VRSAVE
:
1752 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1756 vcpu
->arch
.vrsave
= set_reg_val(reg
->id
, val
);
1758 #endif /* CONFIG_ALTIVEC */
1768 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1774 if (vcpu
->mmio_needed
) {
1775 vcpu
->mmio_needed
= 0;
1776 if (!vcpu
->mmio_is_write
)
1777 kvmppc_complete_mmio_load(vcpu
, run
);
1779 if (vcpu
->arch
.mmio_vsx_copy_nums
> 0) {
1780 vcpu
->arch
.mmio_vsx_copy_nums
--;
1781 vcpu
->arch
.mmio_vsx_offset
++;
1784 if (vcpu
->arch
.mmio_vsx_copy_nums
> 0) {
1785 r
= kvmppc_emulate_mmio_vsx_loadstore(vcpu
, run
);
1786 if (r
== RESUME_HOST
) {
1787 vcpu
->mmio_needed
= 1;
1792 #ifdef CONFIG_ALTIVEC
1793 if (vcpu
->arch
.mmio_vmx_copy_nums
> 0) {
1794 vcpu
->arch
.mmio_vmx_copy_nums
--;
1795 vcpu
->arch
.mmio_vmx_offset
++;
1798 if (vcpu
->arch
.mmio_vmx_copy_nums
> 0) {
1799 r
= kvmppc_emulate_mmio_vmx_loadstore(vcpu
, run
);
1800 if (r
== RESUME_HOST
) {
1801 vcpu
->mmio_needed
= 1;
1806 } else if (vcpu
->arch
.osi_needed
) {
1807 u64
*gprs
= run
->osi
.gprs
;
1810 for (i
= 0; i
< 32; i
++)
1811 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
1812 vcpu
->arch
.osi_needed
= 0;
1813 } else if (vcpu
->arch
.hcall_needed
) {
1816 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
1817 for (i
= 0; i
< 9; ++i
)
1818 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
1819 vcpu
->arch
.hcall_needed
= 0;
1821 } else if (vcpu
->arch
.epr_needed
) {
1822 kvmppc_set_epr(vcpu
, run
->epr
.epr
);
1823 vcpu
->arch
.epr_needed
= 0;
1827 kvm_sigset_activate(vcpu
);
1829 if (run
->immediate_exit
)
1832 r
= kvmppc_vcpu_run(run
, vcpu
);
1834 kvm_sigset_deactivate(vcpu
);
1836 #ifdef CONFIG_ALTIVEC
1843 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
1845 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
1846 kvmppc_core_dequeue_external(vcpu
);
1850 kvmppc_core_queue_external(vcpu
, irq
);
1852 kvm_vcpu_kick(vcpu
);
1857 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
1858 struct kvm_enable_cap
*cap
)
1866 case KVM_CAP_PPC_OSI
:
1868 vcpu
->arch
.osi_enabled
= true;
1870 case KVM_CAP_PPC_PAPR
:
1872 vcpu
->arch
.papr_enabled
= true;
1874 case KVM_CAP_PPC_EPR
:
1877 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_USER
;
1879 vcpu
->arch
.epr_flags
&= ~KVMPPC_EPR_USER
;
1882 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
1884 vcpu
->arch
.watchdog_enabled
= true;
1887 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1888 case KVM_CAP_SW_TLB
: {
1889 struct kvm_config_tlb cfg
;
1890 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
1893 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
1896 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
1900 #ifdef CONFIG_KVM_MPIC
1901 case KVM_CAP_IRQ_MPIC
: {
1903 struct kvm_device
*dev
;
1906 f
= fdget(cap
->args
[0]);
1911 dev
= kvm_device_from_filp(f
.file
);
1913 r
= kvmppc_mpic_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1919 #ifdef CONFIG_KVM_XICS
1920 case KVM_CAP_IRQ_XICS
: {
1922 struct kvm_device
*dev
;
1925 f
= fdget(cap
->args
[0]);
1930 dev
= kvm_device_from_filp(f
.file
);
1933 r
= kvmppc_xive_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1935 r
= kvmppc_xics_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1941 #endif /* CONFIG_KVM_XICS */
1942 #ifdef CONFIG_KVM_XIVE
1943 case KVM_CAP_PPC_IRQ_XIVE
: {
1945 struct kvm_device
*dev
;
1948 f
= fdget(cap
->args
[0]);
1953 if (!xive_enabled())
1957 dev
= kvm_device_from_filp(f
.file
);
1959 r
= kvmppc_xive_native_connect_vcpu(dev
, vcpu
,
1965 #endif /* CONFIG_KVM_XIVE */
1966 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1967 case KVM_CAP_PPC_FWNMI
:
1969 if (!is_kvmppc_hv_enabled(vcpu
->kvm
))
1972 vcpu
->kvm
->arch
.fwnmi_enabled
= true;
1974 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1981 r
= kvmppc_sanity_check(vcpu
);
1986 bool kvm_arch_intc_initialized(struct kvm
*kvm
)
1988 #ifdef CONFIG_KVM_MPIC
1992 #ifdef CONFIG_KVM_XICS
1993 if (kvm
->arch
.xics
|| kvm
->arch
.xive
)
1999 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
2000 struct kvm_mp_state
*mp_state
)
2005 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
2006 struct kvm_mp_state
*mp_state
)
2011 long kvm_arch_vcpu_async_ioctl(struct file
*filp
,
2012 unsigned int ioctl
, unsigned long arg
)
2014 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2015 void __user
*argp
= (void __user
*)arg
;
2017 if (ioctl
== KVM_INTERRUPT
) {
2018 struct kvm_interrupt irq
;
2019 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
2021 return kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
2023 return -ENOIOCTLCMD
;
2026 long kvm_arch_vcpu_ioctl(struct file
*filp
,
2027 unsigned int ioctl
, unsigned long arg
)
2029 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2030 void __user
*argp
= (void __user
*)arg
;
2034 case KVM_ENABLE_CAP
:
2036 struct kvm_enable_cap cap
;
2039 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
2041 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
2046 case KVM_SET_ONE_REG
:
2047 case KVM_GET_ONE_REG
:
2049 struct kvm_one_reg reg
;
2051 if (copy_from_user(®
, argp
, sizeof(reg
)))
2053 if (ioctl
== KVM_SET_ONE_REG
)
2054 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
2056 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
2060 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2061 case KVM_DIRTY_TLB
: {
2062 struct kvm_dirty_tlb dirty
;
2065 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
2067 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
2080 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
2082 return VM_FAULT_SIGBUS
;
2085 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
2087 u32 inst_nop
= 0x60000000;
2088 #ifdef CONFIG_KVM_BOOKE_HV
2089 u32 inst_sc1
= 0x44000022;
2090 pvinfo
->hcall
[0] = cpu_to_be32(inst_sc1
);
2091 pvinfo
->hcall
[1] = cpu_to_be32(inst_nop
);
2092 pvinfo
->hcall
[2] = cpu_to_be32(inst_nop
);
2093 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
2095 u32 inst_lis
= 0x3c000000;
2096 u32 inst_ori
= 0x60000000;
2097 u32 inst_sc
= 0x44000002;
2098 u32 inst_imm_mask
= 0xffff;
2101 * The hypercall to get into KVM from within guest context is as
2104 * lis r0, r0, KVM_SC_MAGIC_R0@h
2105 * ori r0, KVM_SC_MAGIC_R0@l
2109 pvinfo
->hcall
[0] = cpu_to_be32(inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
));
2110 pvinfo
->hcall
[1] = cpu_to_be32(inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
));
2111 pvinfo
->hcall
[2] = cpu_to_be32(inst_sc
);
2112 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
2115 pvinfo
->flags
= KVM_PPC_PVINFO_FLAGS_EV_IDLE
;
2120 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_event
,
2123 if (!irqchip_in_kernel(kvm
))
2126 irq_event
->status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
2127 irq_event
->irq
, irq_event
->level
,
2133 int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
,
2134 struct kvm_enable_cap
*cap
)
2142 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2143 case KVM_CAP_PPC_ENABLE_HCALL
: {
2144 unsigned long hcall
= cap
->args
[0];
2147 if (hcall
> MAX_HCALL_OPCODE
|| (hcall
& 3) ||
2150 if (!kvmppc_book3s_hcall_implemented(kvm
, hcall
))
2153 set_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
2155 clear_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
2159 case KVM_CAP_PPC_SMT
: {
2160 unsigned long mode
= cap
->args
[0];
2161 unsigned long flags
= cap
->args
[1];
2164 if (kvm
->arch
.kvm_ops
->set_smt_mode
)
2165 r
= kvm
->arch
.kvm_ops
->set_smt_mode(kvm
, mode
, flags
);
2169 case KVM_CAP_PPC_NESTED_HV
:
2171 if (!is_kvmppc_hv_enabled(kvm
) ||
2172 !kvm
->arch
.kvm_ops
->enable_nested
)
2174 r
= kvm
->arch
.kvm_ops
->enable_nested(kvm
);
2177 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2178 case KVM_CAP_PPC_SECURE_GUEST
:
2180 if (!is_kvmppc_hv_enabled(kvm
) || !kvm
->arch
.kvm_ops
->enable_svm
)
2182 r
= kvm
->arch
.kvm_ops
->enable_svm(kvm
);
2193 #ifdef CONFIG_PPC_BOOK3S_64
2195 * These functions check whether the underlying hardware is safe
2196 * against attacks based on observing the effects of speculatively
2197 * executed instructions, and whether it supplies instructions for
2198 * use in workarounds. The information comes from firmware, either
2199 * via the device tree on powernv platforms or from an hcall on
2200 * pseries platforms.
2202 #ifdef CONFIG_PPC_PSERIES
2203 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char
*cp
)
2205 struct h_cpu_char_result c
;
2208 if (!machine_is(pseries
))
2211 rc
= plpar_get_cpu_characteristics(&c
);
2212 if (rc
== H_SUCCESS
) {
2213 cp
->character
= c
.character
;
2214 cp
->behaviour
= c
.behaviour
;
2215 cp
->character_mask
= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31
|
2216 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED
|
2217 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30
|
2218 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2
|
2219 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV
|
2220 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED
|
2221 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF
|
2222 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS
|
2223 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST
;
2224 cp
->behaviour_mask
= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY
|
2225 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR
|
2226 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR
|
2227 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE
;
2232 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char
*cp
)
2238 static inline bool have_fw_feat(struct device_node
*fw_features
,
2239 const char *state
, const char *name
)
2241 struct device_node
*np
;
2244 np
= of_get_child_by_name(fw_features
, name
);
2246 r
= of_property_read_bool(np
, state
);
2252 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char
*cp
)
2254 struct device_node
*np
, *fw_features
;
2257 memset(cp
, 0, sizeof(*cp
));
2258 r
= pseries_get_cpu_char(cp
);
2262 np
= of_find_node_by_name(NULL
, "ibm,opal");
2264 fw_features
= of_get_child_by_name(np
, "fw-features");
2268 if (have_fw_feat(fw_features
, "enabled",
2269 "inst-spec-barrier-ori31,31,0"))
2270 cp
->character
|= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31
;
2271 if (have_fw_feat(fw_features
, "enabled",
2272 "fw-bcctrl-serialized"))
2273 cp
->character
|= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED
;
2274 if (have_fw_feat(fw_features
, "enabled",
2275 "inst-l1d-flush-ori30,30,0"))
2276 cp
->character
|= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30
;
2277 if (have_fw_feat(fw_features
, "enabled",
2278 "inst-l1d-flush-trig2"))
2279 cp
->character
|= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2
;
2280 if (have_fw_feat(fw_features
, "enabled",
2281 "fw-l1d-thread-split"))
2282 cp
->character
|= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV
;
2283 if (have_fw_feat(fw_features
, "enabled",
2284 "fw-count-cache-disabled"))
2285 cp
->character
|= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS
;
2286 if (have_fw_feat(fw_features
, "enabled",
2287 "fw-count-cache-flush-bcctr2,0,0"))
2288 cp
->character
|= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST
;
2289 cp
->character_mask
= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31
|
2290 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED
|
2291 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30
|
2292 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2
|
2293 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV
|
2294 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS
|
2295 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST
;
2297 if (have_fw_feat(fw_features
, "enabled",
2298 "speculation-policy-favor-security"))
2299 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY
;
2300 if (!have_fw_feat(fw_features
, "disabled",
2301 "needs-l1d-flush-msr-pr-0-to-1"))
2302 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR
;
2303 if (!have_fw_feat(fw_features
, "disabled",
2304 "needs-spec-barrier-for-bound-checks"))
2305 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR
;
2306 if (have_fw_feat(fw_features
, "enabled",
2307 "needs-count-cache-flush-on-context-switch"))
2308 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE
;
2309 cp
->behaviour_mask
= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY
|
2310 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR
|
2311 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR
|
2312 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE
;
2314 of_node_put(fw_features
);
2321 long kvm_arch_vm_ioctl(struct file
*filp
,
2322 unsigned int ioctl
, unsigned long arg
)
2324 struct kvm
*kvm __maybe_unused
= filp
->private_data
;
2325 void __user
*argp
= (void __user
*)arg
;
2329 case KVM_PPC_GET_PVINFO
: {
2330 struct kvm_ppc_pvinfo pvinfo
;
2331 memset(&pvinfo
, 0, sizeof(pvinfo
));
2332 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
2333 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
2340 #ifdef CONFIG_SPAPR_TCE_IOMMU
2341 case KVM_CREATE_SPAPR_TCE_64
: {
2342 struct kvm_create_spapr_tce_64 create_tce_64
;
2345 if (copy_from_user(&create_tce_64
, argp
, sizeof(create_tce_64
)))
2347 if (create_tce_64
.flags
) {
2351 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce_64
);
2354 case KVM_CREATE_SPAPR_TCE
: {
2355 struct kvm_create_spapr_tce create_tce
;
2356 struct kvm_create_spapr_tce_64 create_tce_64
;
2359 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
2362 create_tce_64
.liobn
= create_tce
.liobn
;
2363 create_tce_64
.page_shift
= IOMMU_PAGE_SHIFT_4K
;
2364 create_tce_64
.offset
= 0;
2365 create_tce_64
.size
= create_tce
.window_size
>>
2366 IOMMU_PAGE_SHIFT_4K
;
2367 create_tce_64
.flags
= 0;
2368 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce_64
);
2372 #ifdef CONFIG_PPC_BOOK3S_64
2373 case KVM_PPC_GET_SMMU_INFO
: {
2374 struct kvm_ppc_smmu_info info
;
2375 struct kvm
*kvm
= filp
->private_data
;
2377 memset(&info
, 0, sizeof(info
));
2378 r
= kvm
->arch
.kvm_ops
->get_smmu_info(kvm
, &info
);
2379 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
2383 case KVM_PPC_RTAS_DEFINE_TOKEN
: {
2384 struct kvm
*kvm
= filp
->private_data
;
2386 r
= kvm_vm_ioctl_rtas_define_token(kvm
, argp
);
2389 case KVM_PPC_CONFIGURE_V3_MMU
: {
2390 struct kvm
*kvm
= filp
->private_data
;
2391 struct kvm_ppc_mmuv3_cfg cfg
;
2394 if (!kvm
->arch
.kvm_ops
->configure_mmu
)
2397 if (copy_from_user(&cfg
, argp
, sizeof(cfg
)))
2399 r
= kvm
->arch
.kvm_ops
->configure_mmu(kvm
, &cfg
);
2402 case KVM_PPC_GET_RMMU_INFO
: {
2403 struct kvm
*kvm
= filp
->private_data
;
2404 struct kvm_ppc_rmmu_info info
;
2407 if (!kvm
->arch
.kvm_ops
->get_rmmu_info
)
2409 r
= kvm
->arch
.kvm_ops
->get_rmmu_info(kvm
, &info
);
2410 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
2414 case KVM_PPC_GET_CPU_CHAR
: {
2415 struct kvm_ppc_cpu_char cpuchar
;
2417 r
= kvmppc_get_cpu_char(&cpuchar
);
2418 if (r
>= 0 && copy_to_user(argp
, &cpuchar
, sizeof(cpuchar
)))
2422 case KVM_PPC_SVM_OFF
: {
2423 struct kvm
*kvm
= filp
->private_data
;
2426 if (!kvm
->arch
.kvm_ops
->svm_off
)
2429 r
= kvm
->arch
.kvm_ops
->svm_off(kvm
);
2433 struct kvm
*kvm
= filp
->private_data
;
2434 r
= kvm
->arch
.kvm_ops
->arch_vm_ioctl(filp
, ioctl
, arg
);
2436 #else /* CONFIG_PPC_BOOK3S_64 */
2445 static unsigned long lpid_inuse
[BITS_TO_LONGS(KVMPPC_NR_LPIDS
)];
2446 static unsigned long nr_lpids
;
2448 long kvmppc_alloc_lpid(void)
2453 lpid
= find_first_zero_bit(lpid_inuse
, KVMPPC_NR_LPIDS
);
2454 if (lpid
>= nr_lpids
) {
2455 pr_err("%s: No LPIDs free\n", __func__
);
2458 } while (test_and_set_bit(lpid
, lpid_inuse
));
2462 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid
);
2464 void kvmppc_claim_lpid(long lpid
)
2466 set_bit(lpid
, lpid_inuse
);
2468 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid
);
2470 void kvmppc_free_lpid(long lpid
)
2472 clear_bit(lpid
, lpid_inuse
);
2474 EXPORT_SYMBOL_GPL(kvmppc_free_lpid
);
2476 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
2478 nr_lpids
= min_t(unsigned long, KVMPPC_NR_LPIDS
, nr_lpids_param
);
2479 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
2481 EXPORT_SYMBOL_GPL(kvmppc_init_lpid
);
2483 int kvm_arch_init(void *opaque
)
2488 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr
);