1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright IBM Corp. 2007
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
23 #include <asm/cputable.h>
24 #include <linux/uaccess.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/cputhreads.h>
27 #include <asm/irqflags.h>
28 #include <asm/iommu.h>
29 #include <asm/switch_to.h>
31 #ifdef CONFIG_PPC_PSERIES
32 #include <asm/hvcall.h>
33 #include <asm/plpar_wrappers.h>
35 #include <asm/ultravisor.h>
36 #include <asm/setup.h>
39 #include "../mm/mmu_decl.h"
41 #define CREATE_TRACE_POINTS
44 struct kvmppc_ops
*kvmppc_hv_ops
;
45 EXPORT_SYMBOL_GPL(kvmppc_hv_ops
);
46 struct kvmppc_ops
*kvmppc_pr_ops
;
47 EXPORT_SYMBOL_GPL(kvmppc_pr_ops
);
50 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
52 return !!(v
->arch
.pending_exceptions
) || kvm_request_pending(v
);
55 bool kvm_arch_dy_runnable(struct kvm_vcpu
*vcpu
)
57 return kvm_arch_vcpu_runnable(vcpu
);
60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
71 * Common checks before entering the guest world. Call with interrupts
76 * == 1 if we're ready to go into guest state
77 * <= 0 if we need to go back to the host with return value
79 int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
83 WARN_ON(irqs_disabled());
94 if (signal_pending(current
)) {
95 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
96 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
101 vcpu
->mode
= IN_GUEST_MODE
;
104 * Reading vcpu->requests must happen after setting vcpu->mode,
105 * so we don't miss a request because the requester sees
106 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
107 * before next entering the guest (and thus doesn't IPI).
108 * This also orders the write to mode from any reads
109 * to the page tables done while the VCPU is running.
110 * Please see the comment in kvm_flush_remote_tlbs.
114 if (kvm_request_pending(vcpu
)) {
115 /* Make sure we process requests preemptable */
117 trace_kvm_check_requests(vcpu
);
118 r
= kvmppc_core_check_requests(vcpu
);
125 if (kvmppc_core_prepare_to_enter(vcpu
)) {
126 /* interrupts got enabled in between, so we
127 are back at square 1 */
131 guest_enter_irqoff();
139 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter
);
141 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
142 static void kvmppc_swab_shared(struct kvm_vcpu
*vcpu
)
144 struct kvm_vcpu_arch_shared
*shared
= vcpu
->arch
.shared
;
147 shared
->sprg0
= swab64(shared
->sprg0
);
148 shared
->sprg1
= swab64(shared
->sprg1
);
149 shared
->sprg2
= swab64(shared
->sprg2
);
150 shared
->sprg3
= swab64(shared
->sprg3
);
151 shared
->srr0
= swab64(shared
->srr0
);
152 shared
->srr1
= swab64(shared
->srr1
);
153 shared
->dar
= swab64(shared
->dar
);
154 shared
->msr
= swab64(shared
->msr
);
155 shared
->dsisr
= swab32(shared
->dsisr
);
156 shared
->int_pending
= swab32(shared
->int_pending
);
157 for (i
= 0; i
< ARRAY_SIZE(shared
->sr
); i
++)
158 shared
->sr
[i
] = swab32(shared
->sr
[i
]);
162 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
164 int nr
= kvmppc_get_gpr(vcpu
, 11);
166 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
167 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
168 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
169 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
170 unsigned long r2
= 0;
172 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
174 param1
&= 0xffffffff;
175 param2
&= 0xffffffff;
176 param3
&= 0xffffffff;
177 param4
&= 0xffffffff;
181 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
):
183 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
184 /* Book3S can be little endian, find it out here */
185 int shared_big_endian
= true;
186 if (vcpu
->arch
.intr_msr
& MSR_LE
)
187 shared_big_endian
= false;
188 if (shared_big_endian
!= vcpu
->arch
.shared_big_endian
)
189 kvmppc_swab_shared(vcpu
);
190 vcpu
->arch
.shared_big_endian
= shared_big_endian
;
193 if (!(param2
& MAGIC_PAGE_FLAG_NOT_MAPPED_NX
)) {
195 * Older versions of the Linux magic page code had
196 * a bug where they would map their trampoline code
197 * NX. If that's the case, remove !PR NX capability.
199 vcpu
->arch
.disable_kernel_nx
= true;
200 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
203 vcpu
->arch
.magic_page_pa
= param1
& ~0xfffULL
;
204 vcpu
->arch
.magic_page_ea
= param2
& ~0xfffULL
;
206 #ifdef CONFIG_PPC_64K_PAGES
208 * Make sure our 4k magic page is in the same window of a 64k
209 * page within the guest and within the host's page.
211 if ((vcpu
->arch
.magic_page_pa
& 0xf000) !=
212 ((ulong
)vcpu
->arch
.shared
& 0xf000)) {
213 void *old_shared
= vcpu
->arch
.shared
;
214 ulong shared
= (ulong
)vcpu
->arch
.shared
;
218 shared
|= vcpu
->arch
.magic_page_pa
& 0xf000;
219 new_shared
= (void*)shared
;
220 memcpy(new_shared
, old_shared
, 0x1000);
221 vcpu
->arch
.shared
= new_shared
;
225 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
230 case KVM_HCALL_TOKEN(KVM_HC_FEATURES
):
232 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
233 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
236 /* Second return value is in r4 */
238 case EV_HCALL_TOKEN(EV_IDLE
):
243 r
= EV_UNIMPLEMENTED
;
247 kvmppc_set_gpr(vcpu
, 4, r2
);
251 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv
);
253 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
257 /* We have to know what CPU to virtualize */
261 /* PAPR only works with book3s_64 */
262 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
265 /* HV KVM can only do PAPR mode for now */
266 if (!vcpu
->arch
.papr_enabled
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
269 #ifdef CONFIG_KVM_BOOKE_HV
270 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
278 return r
? 0 : -EINVAL
;
280 EXPORT_SYMBOL_GPL(kvmppc_sanity_check
);
282 int kvmppc_emulate_mmio(struct kvm_vcpu
*vcpu
)
284 enum emulation_result er
;
287 er
= kvmppc_emulate_loadstore(vcpu
);
290 /* Future optimization: only reload non-volatiles if they were
291 * actually modified. */
297 case EMULATE_DO_MMIO
:
298 vcpu
->run
->exit_reason
= KVM_EXIT_MMIO
;
299 /* We must reload nonvolatiles because "update" load/store
300 * instructions modify register state. */
301 /* Future optimization: only reload non-volatiles if they were
302 * actually modified. */
307 ppc_inst_t last_inst
;
309 kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &last_inst
);
310 kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
311 ppc_inst_val(last_inst
));
314 * Injecting a Data Storage here is a bit more
315 * accurate since the instruction that caused the
316 * access could still be a valid one.
318 if (!IS_ENABLED(CONFIG_BOOKE
)) {
319 ulong dsisr
= DSISR_BADACCESS
;
321 if (vcpu
->mmio_is_write
)
322 dsisr
|= DSISR_ISSTORE
;
324 kvmppc_core_queue_data_storage(vcpu
,
325 kvmppc_get_msr(vcpu
) & SRR1_PREFIXED
,
326 vcpu
->arch
.vaddr_accessed
, dsisr
);
329 * BookE does not send a SIGBUS on a bad
330 * fault, so use a Program interrupt instead
331 * to avoid a fault loop.
333 kvmppc_core_queue_program(vcpu
, 0);
346 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio
);
348 int kvmppc_st(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
351 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
& PAGE_MASK
;
352 struct kvmppc_pte pte
;
357 if (vcpu
->kvm
->arch
.kvm_ops
&& vcpu
->kvm
->arch
.kvm_ops
->store_to_eaddr
)
358 r
= vcpu
->kvm
->arch
.kvm_ops
->store_to_eaddr(vcpu
, eaddr
, ptr
,
361 if ((!r
) || (r
== -EAGAIN
))
364 r
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
374 /* Magic page override */
375 if (kvmppc_supports_magic_page(vcpu
) && mp_pa
&&
376 ((pte
.raddr
& KVM_PAM
& PAGE_MASK
) == mp_pa
) &&
377 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
378 void *magic
= vcpu
->arch
.shared
;
379 magic
+= pte
.eaddr
& 0xfff;
380 memcpy(magic
, ptr
, size
);
384 if (kvm_write_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
385 return EMULATE_DO_MMIO
;
389 EXPORT_SYMBOL_GPL(kvmppc_st
);
391 int kvmppc_ld(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
394 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
& PAGE_MASK
;
395 struct kvmppc_pte pte
;
400 if (vcpu
->kvm
->arch
.kvm_ops
&& vcpu
->kvm
->arch
.kvm_ops
->load_from_eaddr
)
401 rc
= vcpu
->kvm
->arch
.kvm_ops
->load_from_eaddr(vcpu
, eaddr
, ptr
,
404 if ((!rc
) || (rc
== -EAGAIN
))
407 rc
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
417 if (!data
&& !pte
.may_execute
)
420 /* Magic page override */
421 if (kvmppc_supports_magic_page(vcpu
) && mp_pa
&&
422 ((pte
.raddr
& KVM_PAM
& PAGE_MASK
) == mp_pa
) &&
423 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
424 void *magic
= vcpu
->arch
.shared
;
425 magic
+= pte
.eaddr
& 0xfff;
426 memcpy(ptr
, magic
, size
);
430 kvm_vcpu_srcu_read_lock(vcpu
);
431 rc
= kvm_read_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
);
432 kvm_vcpu_srcu_read_unlock(vcpu
);
434 return EMULATE_DO_MMIO
;
438 EXPORT_SYMBOL_GPL(kvmppc_ld
);
440 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
442 struct kvmppc_ops
*kvm_ops
= NULL
;
446 * if we have both HV and PR enabled, default is HV
450 kvm_ops
= kvmppc_hv_ops
;
452 kvm_ops
= kvmppc_pr_ops
;
455 } else if (type
== KVM_VM_PPC_HV
) {
458 kvm_ops
= kvmppc_hv_ops
;
459 } else if (type
== KVM_VM_PPC_PR
) {
462 kvm_ops
= kvmppc_pr_ops
;
466 if (!try_module_get(kvm_ops
->owner
))
469 kvm
->arch
.kvm_ops
= kvm_ops
;
470 r
= kvmppc_core_init_vm(kvm
);
472 module_put(kvm_ops
->owner
);
478 void kvm_arch_destroy_vm(struct kvm
*kvm
)
480 #ifdef CONFIG_KVM_XICS
482 * We call kick_all_cpus_sync() to ensure that all
483 * CPUs have executed any pending IPIs before we
484 * continue and free VCPUs structures below.
486 if (is_kvmppc_hv_enabled(kvm
))
487 kick_all_cpus_sync();
490 kvm_destroy_vcpus(kvm
);
492 mutex_lock(&kvm
->lock
);
494 kvmppc_core_destroy_vm(kvm
);
496 mutex_unlock(&kvm
->lock
);
498 /* drop the module reference */
499 module_put(kvm
->arch
.kvm_ops
->owner
);
502 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
505 /* Assume we're using HV mode when the HV module is loaded */
506 int hv_enabled
= kvmppc_hv_ops
? 1 : 0;
510 * Hooray - we know which VM type we're running on. Depend on
511 * that rather than the guess above.
513 hv_enabled
= is_kvmppc_hv_enabled(kvm
);
518 case KVM_CAP_PPC_BOOKE_SREGS
:
519 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
520 case KVM_CAP_PPC_EPR
:
522 case KVM_CAP_PPC_SEGSTATE
:
523 case KVM_CAP_PPC_HIOR
:
524 case KVM_CAP_PPC_PAPR
:
526 case KVM_CAP_PPC_UNSET_IRQ
:
527 case KVM_CAP_PPC_IRQ_LEVEL
:
528 case KVM_CAP_ENABLE_CAP
:
529 case KVM_CAP_ONE_REG
:
530 case KVM_CAP_IOEVENTFD
:
531 case KVM_CAP_DEVICE_CTRL
:
532 case KVM_CAP_IMMEDIATE_EXIT
:
533 case KVM_CAP_SET_GUEST_DEBUG
:
536 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP
:
537 case KVM_CAP_PPC_PAIRED_SINGLES
:
538 case KVM_CAP_PPC_OSI
:
539 case KVM_CAP_PPC_GET_PVINFO
:
540 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
543 /* We support this only for PR */
546 #ifdef CONFIG_KVM_MPIC
547 case KVM_CAP_IRQ_MPIC
:
552 #ifdef CONFIG_PPC_BOOK3S_64
553 case KVM_CAP_SPAPR_TCE
:
554 case KVM_CAP_SPAPR_TCE_64
:
557 case KVM_CAP_SPAPR_TCE_VFIO
:
558 r
= !!cpu_has_feature(CPU_FTR_HVMODE
);
560 case KVM_CAP_PPC_RTAS
:
561 case KVM_CAP_PPC_FIXUP_HCALL
:
562 case KVM_CAP_PPC_ENABLE_HCALL
:
563 #ifdef CONFIG_KVM_XICS
564 case KVM_CAP_IRQ_XICS
:
566 case KVM_CAP_PPC_GET_CPU_CHAR
:
569 #ifdef CONFIG_KVM_XIVE
570 case KVM_CAP_PPC_IRQ_XIVE
:
572 * We need XIVE to be enabled on the platform (implies
573 * a POWER9 processor) and the PowerNV platform, as
574 * nested is not yet supported.
576 r
= xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE
) &&
577 kvmppc_xive_native_supported();
581 #ifdef CONFIG_HAVE_KVM_IRQFD
582 case KVM_CAP_IRQFD_RESAMPLE
:
587 case KVM_CAP_PPC_ALLOC_HTAB
:
590 #endif /* CONFIG_PPC_BOOK3S_64 */
591 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
592 case KVM_CAP_PPC_SMT
:
595 if (kvm
->arch
.emul_smt_mode
> 1)
596 r
= kvm
->arch
.emul_smt_mode
;
598 r
= kvm
->arch
.smt_mode
;
599 } else if (hv_enabled
) {
600 if (cpu_has_feature(CPU_FTR_ARCH_300
))
603 r
= threads_per_subcore
;
606 case KVM_CAP_PPC_SMT_POSSIBLE
:
609 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
610 r
= ((threads_per_subcore
<< 1) - 1);
612 /* P9 can emulate dbells, so allow any mode */
616 case KVM_CAP_PPC_RMA
:
619 case KVM_CAP_PPC_HWRNG
:
620 r
= kvmppc_hwrng_present();
622 case KVM_CAP_PPC_MMU_RADIX
:
623 r
= !!(hv_enabled
&& radix_enabled());
625 case KVM_CAP_PPC_MMU_HASH_V3
:
626 r
= !!(hv_enabled
&& kvmppc_hv_ops
->hash_v3_possible
&&
627 kvmppc_hv_ops
->hash_v3_possible());
629 case KVM_CAP_PPC_NESTED_HV
:
630 r
= !!(hv_enabled
&& kvmppc_hv_ops
->enable_nested
&&
631 !kvmppc_hv_ops
->enable_nested(NULL
));
634 case KVM_CAP_SYNC_MMU
:
635 BUILD_BUG_ON(!IS_ENABLED(CONFIG_KVM_GENERIC_MMU_NOTIFIER
));
638 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
639 case KVM_CAP_PPC_HTAB_FD
:
643 case KVM_CAP_NR_VCPUS
:
645 * Recommending a number of CPUs is somewhat arbitrary; we
646 * return the number of present CPUs for -HV (since a host
647 * will have secondary threads "offline"), and for other KVM
648 * implementations just count online CPUs.
651 r
= min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS
);
653 r
= min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS
);
655 case KVM_CAP_MAX_VCPUS
:
658 case KVM_CAP_MAX_VCPU_ID
:
659 r
= KVM_MAX_VCPU_IDS
;
661 #ifdef CONFIG_PPC_BOOK3S_64
662 case KVM_CAP_PPC_GET_SMMU_INFO
:
665 case KVM_CAP_SPAPR_MULTITCE
:
668 case KVM_CAP_SPAPR_RESIZE_HPT
:
672 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
673 case KVM_CAP_PPC_FWNMI
:
677 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
678 case KVM_CAP_PPC_HTM
:
679 r
= !!(cur_cpu_spec
->cpu_user_features2
& PPC_FEATURE2_HTM
) ||
680 (hv_enabled
&& cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST
));
683 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
684 case KVM_CAP_PPC_SECURE_GUEST
:
685 r
= hv_enabled
&& kvmppc_hv_ops
->enable_svm
&&
686 !kvmppc_hv_ops
->enable_svm(NULL
);
688 case KVM_CAP_PPC_DAWR1
:
689 r
= !!(hv_enabled
&& kvmppc_hv_ops
->enable_dawr1
&&
690 !kvmppc_hv_ops
->enable_dawr1(NULL
));
692 case KVM_CAP_PPC_RPT_INVALIDATE
:
696 case KVM_CAP_PPC_AIL_MODE_3
:
699 * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
700 * The POWER9s can support it if the guest runs in hash mode,
701 * but QEMU doesn't necessarily query the capability in time.
704 if (kvmhv_on_pseries()) {
705 if (pseries_reloc_on_exception())
707 } else if (cpu_has_feature(CPU_FTR_ARCH_207S
) &&
708 !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG
)) {
721 long kvm_arch_dev_ioctl(struct file
*filp
,
722 unsigned int ioctl
, unsigned long arg
)
727 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
729 kvmppc_core_free_memslot(kvm
, slot
);
732 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
733 const struct kvm_memory_slot
*old
,
734 struct kvm_memory_slot
*new,
735 enum kvm_mr_change change
)
737 return kvmppc_core_prepare_memory_region(kvm
, old
, new, change
);
740 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
741 struct kvm_memory_slot
*old
,
742 const struct kvm_memory_slot
*new,
743 enum kvm_mr_change change
)
745 kvmppc_core_commit_memory_region(kvm
, old
, new, change
);
748 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
749 struct kvm_memory_slot
*slot
)
751 kvmppc_core_flush_memslot(kvm
, slot
);
754 int kvm_arch_vcpu_precreate(struct kvm
*kvm
, unsigned int id
)
759 static enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
761 struct kvm_vcpu
*vcpu
;
763 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
764 kvmppc_decrementer_func(vcpu
);
766 return HRTIMER_NORESTART
;
769 int kvm_arch_vcpu_create(struct kvm_vcpu
*vcpu
)
773 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
774 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
776 #ifdef CONFIG_KVM_EXIT_TIMING
777 mutex_init(&vcpu
->arch
.exit_timing_lock
);
779 err
= kvmppc_subarch_vcpu_init(vcpu
);
783 err
= kvmppc_core_vcpu_create(vcpu
);
785 goto out_vcpu_uninit
;
787 rcuwait_init(&vcpu
->arch
.wait
);
788 vcpu
->arch
.waitp
= &vcpu
->arch
.wait
;
792 kvmppc_subarch_vcpu_uninit(vcpu
);
796 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
800 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
802 /* Make sure we're not using the vcpu anymore */
803 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
805 switch (vcpu
->arch
.irq_type
) {
806 case KVMPPC_IRQ_MPIC
:
807 kvmppc_mpic_disconnect_vcpu(vcpu
->arch
.mpic
, vcpu
);
809 case KVMPPC_IRQ_XICS
:
811 kvmppc_xive_cleanup_vcpu(vcpu
);
813 kvmppc_xics_free_icp(vcpu
);
815 case KVMPPC_IRQ_XIVE
:
816 kvmppc_xive_native_cleanup_vcpu(vcpu
);
820 kvmppc_core_vcpu_free(vcpu
);
822 kvmppc_subarch_vcpu_uninit(vcpu
);
825 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
827 return kvmppc_core_pending_dec(vcpu
);
830 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
834 * vrsave (formerly usprg0) isn't used by Linux, but may
835 * be used by the guest.
837 * On non-booke this is associated with Altivec and
838 * is handled by code in book3s.c.
840 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
842 kvmppc_core_vcpu_load(vcpu
, cpu
);
845 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
847 kvmppc_core_vcpu_put(vcpu
);
849 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
854 * irq_bypass_add_producer and irq_bypass_del_producer are only
855 * useful if the architecture supports PCI passthrough.
856 * irq_bypass_stop and irq_bypass_start are not needed and so
857 * kvm_ops are not defined for them.
859 bool kvm_arch_has_irq_bypass(void)
861 return ((kvmppc_hv_ops
&& kvmppc_hv_ops
->irq_bypass_add_producer
) ||
862 (kvmppc_pr_ops
&& kvmppc_pr_ops
->irq_bypass_add_producer
));
865 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer
*cons
,
866 struct irq_bypass_producer
*prod
)
868 struct kvm_kernel_irqfd
*irqfd
=
869 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
870 struct kvm
*kvm
= irqfd
->kvm
;
872 if (kvm
->arch
.kvm_ops
->irq_bypass_add_producer
)
873 return kvm
->arch
.kvm_ops
->irq_bypass_add_producer(cons
, prod
);
878 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer
*cons
,
879 struct irq_bypass_producer
*prod
)
881 struct kvm_kernel_irqfd
*irqfd
=
882 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
883 struct kvm
*kvm
= irqfd
->kvm
;
885 if (kvm
->arch
.kvm_ops
->irq_bypass_del_producer
)
886 kvm
->arch
.kvm_ops
->irq_bypass_del_producer(cons
, prod
);
890 static inline int kvmppc_get_vsr_dword_offset(int index
)
894 if ((index
!= 0) && (index
!= 1))
906 static inline int kvmppc_get_vsr_word_offset(int index
)
910 if ((index
> 3) || (index
< 0))
921 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu
*vcpu
,
924 union kvmppc_one_reg val
;
925 int offset
= kvmppc_get_vsr_dword_offset(vcpu
->arch
.mmio_vsx_offset
);
926 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
932 kvmppc_get_vsx_vr(vcpu
, index
- 32, &val
.vval
);
933 val
.vsxval
[offset
] = gpr
;
934 kvmppc_set_vsx_vr(vcpu
, index
- 32, &val
.vval
);
936 kvmppc_set_vsx_fpr(vcpu
, index
, offset
, gpr
);
940 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu
*vcpu
,
943 union kvmppc_one_reg val
;
944 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
947 kvmppc_get_vsx_vr(vcpu
, index
- 32, &val
.vval
);
950 kvmppc_set_vsx_vr(vcpu
, index
- 32, &val
.vval
);
952 kvmppc_set_vsx_fpr(vcpu
, index
, 0, gpr
);
953 kvmppc_set_vsx_fpr(vcpu
, index
, 1, gpr
);
957 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu
*vcpu
,
960 union kvmppc_one_reg val
;
961 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
964 val
.vsx32val
[0] = gpr
;
965 val
.vsx32val
[1] = gpr
;
966 val
.vsx32val
[2] = gpr
;
967 val
.vsx32val
[3] = gpr
;
968 kvmppc_set_vsx_vr(vcpu
, index
- 32, &val
.vval
);
970 val
.vsx32val
[0] = gpr
;
971 val
.vsx32val
[1] = gpr
;
972 kvmppc_set_vsx_fpr(vcpu
, index
, 0, val
.vsxval
[0]);
973 kvmppc_set_vsx_fpr(vcpu
, index
, 1, val
.vsxval
[0]);
977 static inline void kvmppc_set_vsr_word(struct kvm_vcpu
*vcpu
,
980 union kvmppc_one_reg val
;
981 int offset
= kvmppc_get_vsr_word_offset(vcpu
->arch
.mmio_vsx_offset
);
982 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
983 int dword_offset
, word_offset
;
989 kvmppc_get_vsx_vr(vcpu
, index
- 32, &val
.vval
);
990 val
.vsx32val
[offset
] = gpr32
;
991 kvmppc_set_vsx_vr(vcpu
, index
- 32, &val
.vval
);
993 dword_offset
= offset
/ 2;
994 word_offset
= offset
% 2;
995 val
.vsxval
[0] = kvmppc_get_vsx_fpr(vcpu
, index
, dword_offset
);
996 val
.vsx32val
[word_offset
] = gpr32
;
997 kvmppc_set_vsx_fpr(vcpu
, index
, dword_offset
, val
.vsxval
[0]);
1000 #endif /* CONFIG_VSX */
1002 #ifdef CONFIG_ALTIVEC
1003 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu
*vcpu
,
1004 int index
, int element_size
)
1007 int elts
= sizeof(vector128
)/element_size
;
1009 if ((index
< 0) || (index
>= elts
))
1012 if (kvmppc_need_byteswap(vcpu
))
1013 offset
= elts
- index
- 1;
1020 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu
*vcpu
,
1023 return kvmppc_get_vmx_offset_generic(vcpu
, index
, 8);
1026 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu
*vcpu
,
1029 return kvmppc_get_vmx_offset_generic(vcpu
, index
, 4);
1032 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu
*vcpu
,
1035 return kvmppc_get_vmx_offset_generic(vcpu
, index
, 2);
1038 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu
*vcpu
,
1041 return kvmppc_get_vmx_offset_generic(vcpu
, index
, 1);
1045 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu
*vcpu
,
1048 union kvmppc_one_reg val
;
1049 int offset
= kvmppc_get_vmx_dword_offset(vcpu
,
1050 vcpu
->arch
.mmio_vmx_offset
);
1051 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
1056 kvmppc_get_vsx_vr(vcpu
, index
, &val
.vval
);
1057 val
.vsxval
[offset
] = gpr
;
1058 kvmppc_set_vsx_vr(vcpu
, index
, &val
.vval
);
1061 static inline void kvmppc_set_vmx_word(struct kvm_vcpu
*vcpu
,
1064 union kvmppc_one_reg val
;
1065 int offset
= kvmppc_get_vmx_word_offset(vcpu
,
1066 vcpu
->arch
.mmio_vmx_offset
);
1067 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
1072 kvmppc_get_vsx_vr(vcpu
, index
, &val
.vval
);
1073 val
.vsx32val
[offset
] = gpr32
;
1074 kvmppc_set_vsx_vr(vcpu
, index
, &val
.vval
);
1077 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu
*vcpu
,
1080 union kvmppc_one_reg val
;
1081 int offset
= kvmppc_get_vmx_hword_offset(vcpu
,
1082 vcpu
->arch
.mmio_vmx_offset
);
1083 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
1088 kvmppc_get_vsx_vr(vcpu
, index
, &val
.vval
);
1089 val
.vsx16val
[offset
] = gpr16
;
1090 kvmppc_set_vsx_vr(vcpu
, index
, &val
.vval
);
1093 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu
*vcpu
,
1096 union kvmppc_one_reg val
;
1097 int offset
= kvmppc_get_vmx_byte_offset(vcpu
,
1098 vcpu
->arch
.mmio_vmx_offset
);
1099 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
1104 kvmppc_get_vsx_vr(vcpu
, index
, &val
.vval
);
1105 val
.vsx8val
[offset
] = gpr8
;
1106 kvmppc_set_vsx_vr(vcpu
, index
, &val
.vval
);
1108 #endif /* CONFIG_ALTIVEC */
1110 #ifdef CONFIG_PPC_FPU
1111 static inline u64
sp_to_dp(u32 fprs
)
1117 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd
) : "m<>" (fprs
)
1123 static inline u32
dp_to_sp(u64 fprd
)
1129 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs
) : "m<>" (fprd
)
1136 #define sp_to_dp(x) (x)
1137 #define dp_to_sp(x) (x)
1138 #endif /* CONFIG_PPC_FPU */
1140 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
)
1142 struct kvm_run
*run
= vcpu
->run
;
1145 if (run
->mmio
.len
> sizeof(gpr
))
1148 if (!vcpu
->arch
.mmio_host_swabbed
) {
1149 switch (run
->mmio
.len
) {
1150 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
1151 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
1152 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
1153 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
1156 switch (run
->mmio
.len
) {
1157 case 8: gpr
= swab64(*(u64
*)run
->mmio
.data
); break;
1158 case 4: gpr
= swab32(*(u32
*)run
->mmio
.data
); break;
1159 case 2: gpr
= swab16(*(u16
*)run
->mmio
.data
); break;
1160 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
1164 /* conversion between single and double precision */
1165 if ((vcpu
->arch
.mmio_sp64_extend
) && (run
->mmio
.len
== 4))
1166 gpr
= sp_to_dp(gpr
);
1168 if (vcpu
->arch
.mmio_sign_extend
) {
1169 switch (run
->mmio
.len
) {
1172 gpr
= (s64
)(s32
)gpr
;
1176 gpr
= (s64
)(s16
)gpr
;
1184 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
1185 case KVM_MMIO_REG_GPR
:
1186 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
1188 case KVM_MMIO_REG_FPR
:
1189 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
1190 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
, MSR_FP
);
1192 kvmppc_set_fpr(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
, gpr
);
1194 #ifdef CONFIG_PPC_BOOK3S
1195 case KVM_MMIO_REG_QPR
:
1196 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
1198 case KVM_MMIO_REG_FQPR
:
1199 kvmppc_set_fpr(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
, gpr
);
1200 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
1204 case KVM_MMIO_REG_VSX
:
1205 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
1206 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
, MSR_VSX
);
1208 if (vcpu
->arch
.mmio_copy_type
== KVMPPC_VSX_COPY_DWORD
)
1209 kvmppc_set_vsr_dword(vcpu
, gpr
);
1210 else if (vcpu
->arch
.mmio_copy_type
== KVMPPC_VSX_COPY_WORD
)
1211 kvmppc_set_vsr_word(vcpu
, gpr
);
1212 else if (vcpu
->arch
.mmio_copy_type
==
1213 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP
)
1214 kvmppc_set_vsr_dword_dump(vcpu
, gpr
);
1215 else if (vcpu
->arch
.mmio_copy_type
==
1216 KVMPPC_VSX_COPY_WORD_LOAD_DUMP
)
1217 kvmppc_set_vsr_word_dump(vcpu
, gpr
);
1220 #ifdef CONFIG_ALTIVEC
1221 case KVM_MMIO_REG_VMX
:
1222 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
1223 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
, MSR_VEC
);
1225 if (vcpu
->arch
.mmio_copy_type
== KVMPPC_VMX_COPY_DWORD
)
1226 kvmppc_set_vmx_dword(vcpu
, gpr
);
1227 else if (vcpu
->arch
.mmio_copy_type
== KVMPPC_VMX_COPY_WORD
)
1228 kvmppc_set_vmx_word(vcpu
, gpr
);
1229 else if (vcpu
->arch
.mmio_copy_type
==
1230 KVMPPC_VMX_COPY_HWORD
)
1231 kvmppc_set_vmx_hword(vcpu
, gpr
);
1232 else if (vcpu
->arch
.mmio_copy_type
==
1233 KVMPPC_VMX_COPY_BYTE
)
1234 kvmppc_set_vmx_byte(vcpu
, gpr
);
1237 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1238 case KVM_MMIO_REG_NESTED_GPR
:
1239 if (kvmppc_need_byteswap(vcpu
))
1241 kvm_vcpu_write_guest(vcpu
, vcpu
->arch
.nested_io_gpr
, &gpr
,
1250 static int __kvmppc_handle_load(struct kvm_vcpu
*vcpu
,
1251 unsigned int rt
, unsigned int bytes
,
1252 int is_default_endian
, int sign_extend
)
1254 struct kvm_run
*run
= vcpu
->run
;
1258 /* Pity C doesn't have a logical XOR operator */
1259 if (kvmppc_need_byteswap(vcpu
)) {
1260 host_swabbed
= is_default_endian
;
1262 host_swabbed
= !is_default_endian
;
1265 if (bytes
> sizeof(run
->mmio
.data
))
1266 return EMULATE_FAIL
;
1268 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
1269 run
->mmio
.len
= bytes
;
1270 run
->mmio
.is_write
= 0;
1272 vcpu
->arch
.io_gpr
= rt
;
1273 vcpu
->arch
.mmio_host_swabbed
= host_swabbed
;
1274 vcpu
->mmio_needed
= 1;
1275 vcpu
->mmio_is_write
= 0;
1276 vcpu
->arch
.mmio_sign_extend
= sign_extend
;
1278 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1280 ret
= kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
1281 bytes
, &run
->mmio
.data
);
1283 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1286 kvmppc_complete_mmio_load(vcpu
);
1287 vcpu
->mmio_needed
= 0;
1288 return EMULATE_DONE
;
1291 return EMULATE_DO_MMIO
;
1294 int kvmppc_handle_load(struct kvm_vcpu
*vcpu
,
1295 unsigned int rt
, unsigned int bytes
,
1296 int is_default_endian
)
1298 return __kvmppc_handle_load(vcpu
, rt
, bytes
, is_default_endian
, 0);
1300 EXPORT_SYMBOL_GPL(kvmppc_handle_load
);
1302 /* Same as above, but sign extends */
1303 int kvmppc_handle_loads(struct kvm_vcpu
*vcpu
,
1304 unsigned int rt
, unsigned int bytes
,
1305 int is_default_endian
)
1307 return __kvmppc_handle_load(vcpu
, rt
, bytes
, is_default_endian
, 1);
1311 int kvmppc_handle_vsx_load(struct kvm_vcpu
*vcpu
,
1312 unsigned int rt
, unsigned int bytes
,
1313 int is_default_endian
, int mmio_sign_extend
)
1315 enum emulation_result emulated
= EMULATE_DONE
;
1317 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1318 if (vcpu
->arch
.mmio_vsx_copy_nums
> 4)
1319 return EMULATE_FAIL
;
1321 while (vcpu
->arch
.mmio_vsx_copy_nums
) {
1322 emulated
= __kvmppc_handle_load(vcpu
, rt
, bytes
,
1323 is_default_endian
, mmio_sign_extend
);
1325 if (emulated
!= EMULATE_DONE
)
1328 vcpu
->arch
.paddr_accessed
+= vcpu
->run
->mmio
.len
;
1330 vcpu
->arch
.mmio_vsx_copy_nums
--;
1331 vcpu
->arch
.mmio_vsx_offset
++;
1335 #endif /* CONFIG_VSX */
1337 int kvmppc_handle_store(struct kvm_vcpu
*vcpu
,
1338 u64 val
, unsigned int bytes
, int is_default_endian
)
1340 struct kvm_run
*run
= vcpu
->run
;
1341 void *data
= run
->mmio
.data
;
1345 /* Pity C doesn't have a logical XOR operator */
1346 if (kvmppc_need_byteswap(vcpu
)) {
1347 host_swabbed
= is_default_endian
;
1349 host_swabbed
= !is_default_endian
;
1352 if (bytes
> sizeof(run
->mmio
.data
))
1353 return EMULATE_FAIL
;
1355 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
1356 run
->mmio
.len
= bytes
;
1357 run
->mmio
.is_write
= 1;
1358 vcpu
->mmio_needed
= 1;
1359 vcpu
->mmio_is_write
= 1;
1361 if ((vcpu
->arch
.mmio_sp64_extend
) && (bytes
== 4))
1362 val
= dp_to_sp(val
);
1364 /* Store the value at the lowest bytes in 'data'. */
1365 if (!host_swabbed
) {
1367 case 8: *(u64
*)data
= val
; break;
1368 case 4: *(u32
*)data
= val
; break;
1369 case 2: *(u16
*)data
= val
; break;
1370 case 1: *(u8
*)data
= val
; break;
1374 case 8: *(u64
*)data
= swab64(val
); break;
1375 case 4: *(u32
*)data
= swab32(val
); break;
1376 case 2: *(u16
*)data
= swab16(val
); break;
1377 case 1: *(u8
*)data
= val
; break;
1381 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1383 ret
= kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
1384 bytes
, &run
->mmio
.data
);
1386 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1389 vcpu
->mmio_needed
= 0;
1390 return EMULATE_DONE
;
1393 return EMULATE_DO_MMIO
;
1395 EXPORT_SYMBOL_GPL(kvmppc_handle_store
);
1398 static inline int kvmppc_get_vsr_data(struct kvm_vcpu
*vcpu
, int rs
, u64
*val
)
1400 u32 dword_offset
, word_offset
;
1401 union kvmppc_one_reg reg
;
1403 int copy_type
= vcpu
->arch
.mmio_copy_type
;
1406 switch (copy_type
) {
1407 case KVMPPC_VSX_COPY_DWORD
:
1409 kvmppc_get_vsr_dword_offset(vcpu
->arch
.mmio_vsx_offset
);
1411 if (vsx_offset
== -1) {
1417 *val
= kvmppc_get_vsx_fpr(vcpu
, rs
, vsx_offset
);
1419 kvmppc_get_vsx_vr(vcpu
, rs
- 32, ®
.vval
);
1420 *val
= reg
.vsxval
[vsx_offset
];
1424 case KVMPPC_VSX_COPY_WORD
:
1426 kvmppc_get_vsr_word_offset(vcpu
->arch
.mmio_vsx_offset
);
1428 if (vsx_offset
== -1) {
1434 dword_offset
= vsx_offset
/ 2;
1435 word_offset
= vsx_offset
% 2;
1436 reg
.vsxval
[0] = kvmppc_get_vsx_fpr(vcpu
, rs
, dword_offset
);
1437 *val
= reg
.vsx32val
[word_offset
];
1439 kvmppc_get_vsx_vr(vcpu
, rs
- 32, ®
.vval
);
1440 *val
= reg
.vsx32val
[vsx_offset
];
1452 int kvmppc_handle_vsx_store(struct kvm_vcpu
*vcpu
,
1453 int rs
, unsigned int bytes
, int is_default_endian
)
1456 enum emulation_result emulated
= EMULATE_DONE
;
1458 vcpu
->arch
.io_gpr
= rs
;
1460 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1461 if (vcpu
->arch
.mmio_vsx_copy_nums
> 4)
1462 return EMULATE_FAIL
;
1464 while (vcpu
->arch
.mmio_vsx_copy_nums
) {
1465 if (kvmppc_get_vsr_data(vcpu
, rs
, &val
) == -1)
1466 return EMULATE_FAIL
;
1468 emulated
= kvmppc_handle_store(vcpu
,
1469 val
, bytes
, is_default_endian
);
1471 if (emulated
!= EMULATE_DONE
)
1474 vcpu
->arch
.paddr_accessed
+= vcpu
->run
->mmio
.len
;
1476 vcpu
->arch
.mmio_vsx_copy_nums
--;
1477 vcpu
->arch
.mmio_vsx_offset
++;
1483 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu
*vcpu
)
1485 struct kvm_run
*run
= vcpu
->run
;
1486 enum emulation_result emulated
= EMULATE_FAIL
;
1489 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1491 if (!vcpu
->mmio_is_write
) {
1492 emulated
= kvmppc_handle_vsx_load(vcpu
, vcpu
->arch
.io_gpr
,
1493 run
->mmio
.len
, 1, vcpu
->arch
.mmio_sign_extend
);
1495 emulated
= kvmppc_handle_vsx_store(vcpu
,
1496 vcpu
->arch
.io_gpr
, run
->mmio
.len
, 1);
1500 case EMULATE_DO_MMIO
:
1501 run
->exit_reason
= KVM_EXIT_MMIO
;
1505 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1506 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1507 run
->internal
.suberror
= KVM_INTERNAL_ERROR_EMULATION
;
1516 #endif /* CONFIG_VSX */
1518 #ifdef CONFIG_ALTIVEC
1519 int kvmppc_handle_vmx_load(struct kvm_vcpu
*vcpu
,
1520 unsigned int rt
, unsigned int bytes
, int is_default_endian
)
1522 enum emulation_result emulated
= EMULATE_DONE
;
1524 if (vcpu
->arch
.mmio_vmx_copy_nums
> 2)
1525 return EMULATE_FAIL
;
1527 while (vcpu
->arch
.mmio_vmx_copy_nums
) {
1528 emulated
= __kvmppc_handle_load(vcpu
, rt
, bytes
,
1529 is_default_endian
, 0);
1531 if (emulated
!= EMULATE_DONE
)
1534 vcpu
->arch
.paddr_accessed
+= vcpu
->run
->mmio
.len
;
1535 vcpu
->arch
.mmio_vmx_copy_nums
--;
1536 vcpu
->arch
.mmio_vmx_offset
++;
1542 static int kvmppc_get_vmx_dword(struct kvm_vcpu
*vcpu
, int index
, u64
*val
)
1544 union kvmppc_one_reg reg
;
1549 kvmppc_get_vmx_dword_offset(vcpu
, vcpu
->arch
.mmio_vmx_offset
);
1551 if (vmx_offset
== -1)
1554 kvmppc_get_vsx_vr(vcpu
, index
, ®
.vval
);
1555 *val
= reg
.vsxval
[vmx_offset
];
1560 static int kvmppc_get_vmx_word(struct kvm_vcpu
*vcpu
, int index
, u64
*val
)
1562 union kvmppc_one_reg reg
;
1567 kvmppc_get_vmx_word_offset(vcpu
, vcpu
->arch
.mmio_vmx_offset
);
1569 if (vmx_offset
== -1)
1572 kvmppc_get_vsx_vr(vcpu
, index
, ®
.vval
);
1573 *val
= reg
.vsx32val
[vmx_offset
];
1578 static int kvmppc_get_vmx_hword(struct kvm_vcpu
*vcpu
, int index
, u64
*val
)
1580 union kvmppc_one_reg reg
;
1585 kvmppc_get_vmx_hword_offset(vcpu
, vcpu
->arch
.mmio_vmx_offset
);
1587 if (vmx_offset
== -1)
1590 kvmppc_get_vsx_vr(vcpu
, index
, ®
.vval
);
1591 *val
= reg
.vsx16val
[vmx_offset
];
1596 static int kvmppc_get_vmx_byte(struct kvm_vcpu
*vcpu
, int index
, u64
*val
)
1598 union kvmppc_one_reg reg
;
1603 kvmppc_get_vmx_byte_offset(vcpu
, vcpu
->arch
.mmio_vmx_offset
);
1605 if (vmx_offset
== -1)
1608 kvmppc_get_vsx_vr(vcpu
, index
, ®
.vval
);
1609 *val
= reg
.vsx8val
[vmx_offset
];
1614 int kvmppc_handle_vmx_store(struct kvm_vcpu
*vcpu
,
1615 unsigned int rs
, unsigned int bytes
, int is_default_endian
)
1618 unsigned int index
= rs
& KVM_MMIO_REG_MASK
;
1619 enum emulation_result emulated
= EMULATE_DONE
;
1621 if (vcpu
->arch
.mmio_vmx_copy_nums
> 2)
1622 return EMULATE_FAIL
;
1624 vcpu
->arch
.io_gpr
= rs
;
1626 while (vcpu
->arch
.mmio_vmx_copy_nums
) {
1627 switch (vcpu
->arch
.mmio_copy_type
) {
1628 case KVMPPC_VMX_COPY_DWORD
:
1629 if (kvmppc_get_vmx_dword(vcpu
, index
, &val
) == -1)
1630 return EMULATE_FAIL
;
1633 case KVMPPC_VMX_COPY_WORD
:
1634 if (kvmppc_get_vmx_word(vcpu
, index
, &val
) == -1)
1635 return EMULATE_FAIL
;
1637 case KVMPPC_VMX_COPY_HWORD
:
1638 if (kvmppc_get_vmx_hword(vcpu
, index
, &val
) == -1)
1639 return EMULATE_FAIL
;
1641 case KVMPPC_VMX_COPY_BYTE
:
1642 if (kvmppc_get_vmx_byte(vcpu
, index
, &val
) == -1)
1643 return EMULATE_FAIL
;
1646 return EMULATE_FAIL
;
1649 emulated
= kvmppc_handle_store(vcpu
, val
, bytes
,
1651 if (emulated
!= EMULATE_DONE
)
1654 vcpu
->arch
.paddr_accessed
+= vcpu
->run
->mmio
.len
;
1655 vcpu
->arch
.mmio_vmx_copy_nums
--;
1656 vcpu
->arch
.mmio_vmx_offset
++;
1662 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu
*vcpu
)
1664 struct kvm_run
*run
= vcpu
->run
;
1665 enum emulation_result emulated
= EMULATE_FAIL
;
1668 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1670 if (!vcpu
->mmio_is_write
) {
1671 emulated
= kvmppc_handle_vmx_load(vcpu
,
1672 vcpu
->arch
.io_gpr
, run
->mmio
.len
, 1);
1674 emulated
= kvmppc_handle_vmx_store(vcpu
,
1675 vcpu
->arch
.io_gpr
, run
->mmio
.len
, 1);
1679 case EMULATE_DO_MMIO
:
1680 run
->exit_reason
= KVM_EXIT_MMIO
;
1684 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1685 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1686 run
->internal
.suberror
= KVM_INTERNAL_ERROR_EMULATION
;
1695 #endif /* CONFIG_ALTIVEC */
1697 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1700 union kvmppc_one_reg val
;
1703 size
= one_reg_size(reg
->id
);
1704 if (size
> sizeof(val
))
1707 r
= kvmppc_get_one_reg(vcpu
, reg
->id
, &val
);
1711 #ifdef CONFIG_ALTIVEC
1712 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
1713 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1717 kvmppc_get_vsx_vr(vcpu
, reg
->id
- KVM_REG_PPC_VR0
, &val
.vval
);
1719 case KVM_REG_PPC_VSCR
:
1720 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1724 val
= get_reg_val(reg
->id
, kvmppc_get_vscr(vcpu
));
1726 case KVM_REG_PPC_VRSAVE
:
1727 val
= get_reg_val(reg
->id
, kvmppc_get_vrsave(vcpu
));
1729 #endif /* CONFIG_ALTIVEC */
1739 if (copy_to_user((char __user
*)(unsigned long)reg
->addr
, &val
, size
))
1745 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1748 union kvmppc_one_reg val
;
1751 size
= one_reg_size(reg
->id
);
1752 if (size
> sizeof(val
))
1755 if (copy_from_user(&val
, (char __user
*)(unsigned long)reg
->addr
, size
))
1758 r
= kvmppc_set_one_reg(vcpu
, reg
->id
, &val
);
1762 #ifdef CONFIG_ALTIVEC
1763 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
1764 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1768 kvmppc_set_vsx_vr(vcpu
, reg
->id
- KVM_REG_PPC_VR0
, &val
.vval
);
1770 case KVM_REG_PPC_VSCR
:
1771 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1775 kvmppc_set_vscr(vcpu
, set_reg_val(reg
->id
, val
));
1777 case KVM_REG_PPC_VRSAVE
:
1778 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1782 kvmppc_set_vrsave(vcpu
, set_reg_val(reg
->id
, val
));
1784 #endif /* CONFIG_ALTIVEC */
1794 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
)
1796 struct kvm_run
*run
= vcpu
->run
;
1801 if (vcpu
->mmio_needed
) {
1802 vcpu
->mmio_needed
= 0;
1803 if (!vcpu
->mmio_is_write
)
1804 kvmppc_complete_mmio_load(vcpu
);
1806 if (vcpu
->arch
.mmio_vsx_copy_nums
> 0) {
1807 vcpu
->arch
.mmio_vsx_copy_nums
--;
1808 vcpu
->arch
.mmio_vsx_offset
++;
1811 if (vcpu
->arch
.mmio_vsx_copy_nums
> 0) {
1812 r
= kvmppc_emulate_mmio_vsx_loadstore(vcpu
);
1813 if (r
== RESUME_HOST
) {
1814 vcpu
->mmio_needed
= 1;
1819 #ifdef CONFIG_ALTIVEC
1820 if (vcpu
->arch
.mmio_vmx_copy_nums
> 0) {
1821 vcpu
->arch
.mmio_vmx_copy_nums
--;
1822 vcpu
->arch
.mmio_vmx_offset
++;
1825 if (vcpu
->arch
.mmio_vmx_copy_nums
> 0) {
1826 r
= kvmppc_emulate_mmio_vmx_loadstore(vcpu
);
1827 if (r
== RESUME_HOST
) {
1828 vcpu
->mmio_needed
= 1;
1833 } else if (vcpu
->arch
.osi_needed
) {
1834 u64
*gprs
= run
->osi
.gprs
;
1837 for (i
= 0; i
< 32; i
++)
1838 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
1839 vcpu
->arch
.osi_needed
= 0;
1840 } else if (vcpu
->arch
.hcall_needed
) {
1843 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
1844 for (i
= 0; i
< 9; ++i
)
1845 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
1846 vcpu
->arch
.hcall_needed
= 0;
1848 } else if (vcpu
->arch
.epr_needed
) {
1849 kvmppc_set_epr(vcpu
, run
->epr
.epr
);
1850 vcpu
->arch
.epr_needed
= 0;
1854 kvm_sigset_activate(vcpu
);
1856 if (run
->immediate_exit
)
1859 r
= kvmppc_vcpu_run(vcpu
);
1861 kvm_sigset_deactivate(vcpu
);
1863 #ifdef CONFIG_ALTIVEC
1868 * We're already returning to userspace, don't pass the
1869 * RESUME_HOST flags along.
1878 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
1880 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
1881 kvmppc_core_dequeue_external(vcpu
);
1885 kvmppc_core_queue_external(vcpu
, irq
);
1887 kvm_vcpu_kick(vcpu
);
1892 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
1893 struct kvm_enable_cap
*cap
)
1901 case KVM_CAP_PPC_OSI
:
1903 vcpu
->arch
.osi_enabled
= true;
1905 case KVM_CAP_PPC_PAPR
:
1907 vcpu
->arch
.papr_enabled
= true;
1909 case KVM_CAP_PPC_EPR
:
1912 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_USER
;
1914 vcpu
->arch
.epr_flags
&= ~KVMPPC_EPR_USER
;
1917 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
1919 vcpu
->arch
.watchdog_enabled
= true;
1922 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1923 case KVM_CAP_SW_TLB
: {
1924 struct kvm_config_tlb cfg
;
1925 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
1928 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
1931 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
1935 #ifdef CONFIG_KVM_MPIC
1936 case KVM_CAP_IRQ_MPIC
: {
1938 struct kvm_device
*dev
;
1941 f
= fdget(cap
->args
[0]);
1946 dev
= kvm_device_from_filp(f
.file
);
1948 r
= kvmppc_mpic_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1954 #ifdef CONFIG_KVM_XICS
1955 case KVM_CAP_IRQ_XICS
: {
1957 struct kvm_device
*dev
;
1960 f
= fdget(cap
->args
[0]);
1965 dev
= kvm_device_from_filp(f
.file
);
1968 r
= kvmppc_xive_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1970 r
= kvmppc_xics_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1976 #endif /* CONFIG_KVM_XICS */
1977 #ifdef CONFIG_KVM_XIVE
1978 case KVM_CAP_PPC_IRQ_XIVE
: {
1980 struct kvm_device
*dev
;
1983 f
= fdget(cap
->args
[0]);
1988 if (!xive_enabled())
1992 dev
= kvm_device_from_filp(f
.file
);
1994 r
= kvmppc_xive_native_connect_vcpu(dev
, vcpu
,
2000 #endif /* CONFIG_KVM_XIVE */
2001 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2002 case KVM_CAP_PPC_FWNMI
:
2004 if (!is_kvmppc_hv_enabled(vcpu
->kvm
))
2007 vcpu
->kvm
->arch
.fwnmi_enabled
= true;
2009 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2016 r
= kvmppc_sanity_check(vcpu
);
2021 bool kvm_arch_intc_initialized(struct kvm
*kvm
)
2023 #ifdef CONFIG_KVM_MPIC
2027 #ifdef CONFIG_KVM_XICS
2028 if (kvm
->arch
.xics
|| kvm
->arch
.xive
)
2034 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
2035 struct kvm_mp_state
*mp_state
)
2040 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
2041 struct kvm_mp_state
*mp_state
)
2046 long kvm_arch_vcpu_async_ioctl(struct file
*filp
,
2047 unsigned int ioctl
, unsigned long arg
)
2049 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2050 void __user
*argp
= (void __user
*)arg
;
2052 if (ioctl
== KVM_INTERRUPT
) {
2053 struct kvm_interrupt irq
;
2054 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
2056 return kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
2058 return -ENOIOCTLCMD
;
2061 long kvm_arch_vcpu_ioctl(struct file
*filp
,
2062 unsigned int ioctl
, unsigned long arg
)
2064 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2065 void __user
*argp
= (void __user
*)arg
;
2069 case KVM_ENABLE_CAP
:
2071 struct kvm_enable_cap cap
;
2073 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
2076 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
2081 case KVM_SET_ONE_REG
:
2082 case KVM_GET_ONE_REG
:
2084 struct kvm_one_reg reg
;
2086 if (copy_from_user(®
, argp
, sizeof(reg
)))
2088 if (ioctl
== KVM_SET_ONE_REG
)
2089 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
2091 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
2095 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2096 case KVM_DIRTY_TLB
: {
2097 struct kvm_dirty_tlb dirty
;
2099 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
2102 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
2115 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
2117 return VM_FAULT_SIGBUS
;
2120 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
2122 u32 inst_nop
= 0x60000000;
2123 #ifdef CONFIG_KVM_BOOKE_HV
2124 u32 inst_sc1
= 0x44000022;
2125 pvinfo
->hcall
[0] = cpu_to_be32(inst_sc1
);
2126 pvinfo
->hcall
[1] = cpu_to_be32(inst_nop
);
2127 pvinfo
->hcall
[2] = cpu_to_be32(inst_nop
);
2128 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
2130 u32 inst_lis
= 0x3c000000;
2131 u32 inst_ori
= 0x60000000;
2132 u32 inst_sc
= 0x44000002;
2133 u32 inst_imm_mask
= 0xffff;
2136 * The hypercall to get into KVM from within guest context is as
2139 * lis r0, r0, KVM_SC_MAGIC_R0@h
2140 * ori r0, KVM_SC_MAGIC_R0@l
2144 pvinfo
->hcall
[0] = cpu_to_be32(inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
));
2145 pvinfo
->hcall
[1] = cpu_to_be32(inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
));
2146 pvinfo
->hcall
[2] = cpu_to_be32(inst_sc
);
2147 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
2150 pvinfo
->flags
= KVM_PPC_PVINFO_FLAGS_EV_IDLE
;
2155 bool kvm_arch_irqchip_in_kernel(struct kvm
*kvm
)
2159 #ifdef CONFIG_KVM_MPIC
2160 ret
= ret
|| (kvm
->arch
.mpic
!= NULL
);
2162 #ifdef CONFIG_KVM_XICS
2163 ret
= ret
|| (kvm
->arch
.xics
!= NULL
);
2164 ret
= ret
|| (kvm
->arch
.xive
!= NULL
);
2170 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_event
,
2173 if (!kvm_arch_irqchip_in_kernel(kvm
))
2176 irq_event
->status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
2177 irq_event
->irq
, irq_event
->level
,
2183 int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
,
2184 struct kvm_enable_cap
*cap
)
2192 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2193 case KVM_CAP_PPC_ENABLE_HCALL
: {
2194 unsigned long hcall
= cap
->args
[0];
2197 if (hcall
> MAX_HCALL_OPCODE
|| (hcall
& 3) ||
2200 if (!kvmppc_book3s_hcall_implemented(kvm
, hcall
))
2203 set_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
2205 clear_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
2209 case KVM_CAP_PPC_SMT
: {
2210 unsigned long mode
= cap
->args
[0];
2211 unsigned long flags
= cap
->args
[1];
2214 if (kvm
->arch
.kvm_ops
->set_smt_mode
)
2215 r
= kvm
->arch
.kvm_ops
->set_smt_mode(kvm
, mode
, flags
);
2219 case KVM_CAP_PPC_NESTED_HV
:
2221 if (!is_kvmppc_hv_enabled(kvm
) ||
2222 !kvm
->arch
.kvm_ops
->enable_nested
)
2224 r
= kvm
->arch
.kvm_ops
->enable_nested(kvm
);
2227 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2228 case KVM_CAP_PPC_SECURE_GUEST
:
2230 if (!is_kvmppc_hv_enabled(kvm
) || !kvm
->arch
.kvm_ops
->enable_svm
)
2232 r
= kvm
->arch
.kvm_ops
->enable_svm(kvm
);
2234 case KVM_CAP_PPC_DAWR1
:
2236 if (!is_kvmppc_hv_enabled(kvm
) || !kvm
->arch
.kvm_ops
->enable_dawr1
)
2238 r
= kvm
->arch
.kvm_ops
->enable_dawr1(kvm
);
2249 #ifdef CONFIG_PPC_BOOK3S_64
2251 * These functions check whether the underlying hardware is safe
2252 * against attacks based on observing the effects of speculatively
2253 * executed instructions, and whether it supplies instructions for
2254 * use in workarounds. The information comes from firmware, either
2255 * via the device tree on powernv platforms or from an hcall on
2256 * pseries platforms.
2258 #ifdef CONFIG_PPC_PSERIES
2259 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char
*cp
)
2261 struct h_cpu_char_result c
;
2264 if (!machine_is(pseries
))
2267 rc
= plpar_get_cpu_characteristics(&c
);
2268 if (rc
== H_SUCCESS
) {
2269 cp
->character
= c
.character
;
2270 cp
->behaviour
= c
.behaviour
;
2271 cp
->character_mask
= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31
|
2272 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED
|
2273 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30
|
2274 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2
|
2275 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV
|
2276 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED
|
2277 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF
|
2278 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS
|
2279 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST
;
2280 cp
->behaviour_mask
= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY
|
2281 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR
|
2282 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR
|
2283 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE
;
2288 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char
*cp
)
2294 static inline bool have_fw_feat(struct device_node
*fw_features
,
2295 const char *state
, const char *name
)
2297 struct device_node
*np
;
2300 np
= of_get_child_by_name(fw_features
, name
);
2302 r
= of_property_read_bool(np
, state
);
2308 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char
*cp
)
2310 struct device_node
*np
, *fw_features
;
2313 memset(cp
, 0, sizeof(*cp
));
2314 r
= pseries_get_cpu_char(cp
);
2318 np
= of_find_node_by_name(NULL
, "ibm,opal");
2320 fw_features
= of_get_child_by_name(np
, "fw-features");
2324 if (have_fw_feat(fw_features
, "enabled",
2325 "inst-spec-barrier-ori31,31,0"))
2326 cp
->character
|= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31
;
2327 if (have_fw_feat(fw_features
, "enabled",
2328 "fw-bcctrl-serialized"))
2329 cp
->character
|= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED
;
2330 if (have_fw_feat(fw_features
, "enabled",
2331 "inst-l1d-flush-ori30,30,0"))
2332 cp
->character
|= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30
;
2333 if (have_fw_feat(fw_features
, "enabled",
2334 "inst-l1d-flush-trig2"))
2335 cp
->character
|= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2
;
2336 if (have_fw_feat(fw_features
, "enabled",
2337 "fw-l1d-thread-split"))
2338 cp
->character
|= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV
;
2339 if (have_fw_feat(fw_features
, "enabled",
2340 "fw-count-cache-disabled"))
2341 cp
->character
|= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS
;
2342 if (have_fw_feat(fw_features
, "enabled",
2343 "fw-count-cache-flush-bcctr2,0,0"))
2344 cp
->character
|= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST
;
2345 cp
->character_mask
= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31
|
2346 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED
|
2347 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30
|
2348 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2
|
2349 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV
|
2350 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS
|
2351 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST
;
2353 if (have_fw_feat(fw_features
, "enabled",
2354 "speculation-policy-favor-security"))
2355 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY
;
2356 if (!have_fw_feat(fw_features
, "disabled",
2357 "needs-l1d-flush-msr-pr-0-to-1"))
2358 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR
;
2359 if (!have_fw_feat(fw_features
, "disabled",
2360 "needs-spec-barrier-for-bound-checks"))
2361 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR
;
2362 if (have_fw_feat(fw_features
, "enabled",
2363 "needs-count-cache-flush-on-context-switch"))
2364 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE
;
2365 cp
->behaviour_mask
= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY
|
2366 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR
|
2367 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR
|
2368 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE
;
2370 of_node_put(fw_features
);
2377 int kvm_arch_vm_ioctl(struct file
*filp
, unsigned int ioctl
, unsigned long arg
)
2379 struct kvm
*kvm __maybe_unused
= filp
->private_data
;
2380 void __user
*argp
= (void __user
*)arg
;
2384 case KVM_PPC_GET_PVINFO
: {
2385 struct kvm_ppc_pvinfo pvinfo
;
2386 memset(&pvinfo
, 0, sizeof(pvinfo
));
2387 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
2388 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
2395 #ifdef CONFIG_SPAPR_TCE_IOMMU
2396 case KVM_CREATE_SPAPR_TCE_64
: {
2397 struct kvm_create_spapr_tce_64 create_tce_64
;
2400 if (copy_from_user(&create_tce_64
, argp
, sizeof(create_tce_64
)))
2402 if (create_tce_64
.flags
) {
2406 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce_64
);
2409 case KVM_CREATE_SPAPR_TCE
: {
2410 struct kvm_create_spapr_tce create_tce
;
2411 struct kvm_create_spapr_tce_64 create_tce_64
;
2414 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
2417 create_tce_64
.liobn
= create_tce
.liobn
;
2418 create_tce_64
.page_shift
= IOMMU_PAGE_SHIFT_4K
;
2419 create_tce_64
.offset
= 0;
2420 create_tce_64
.size
= create_tce
.window_size
>>
2421 IOMMU_PAGE_SHIFT_4K
;
2422 create_tce_64
.flags
= 0;
2423 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce_64
);
2427 #ifdef CONFIG_PPC_BOOK3S_64
2428 case KVM_PPC_GET_SMMU_INFO
: {
2429 struct kvm_ppc_smmu_info info
;
2430 struct kvm
*kvm
= filp
->private_data
;
2432 memset(&info
, 0, sizeof(info
));
2433 r
= kvm
->arch
.kvm_ops
->get_smmu_info(kvm
, &info
);
2434 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
2438 case KVM_PPC_RTAS_DEFINE_TOKEN
: {
2439 struct kvm
*kvm
= filp
->private_data
;
2441 r
= kvm_vm_ioctl_rtas_define_token(kvm
, argp
);
2444 case KVM_PPC_CONFIGURE_V3_MMU
: {
2445 struct kvm
*kvm
= filp
->private_data
;
2446 struct kvm_ppc_mmuv3_cfg cfg
;
2449 if (!kvm
->arch
.kvm_ops
->configure_mmu
)
2452 if (copy_from_user(&cfg
, argp
, sizeof(cfg
)))
2454 r
= kvm
->arch
.kvm_ops
->configure_mmu(kvm
, &cfg
);
2457 case KVM_PPC_GET_RMMU_INFO
: {
2458 struct kvm
*kvm
= filp
->private_data
;
2459 struct kvm_ppc_rmmu_info info
;
2462 if (!kvm
->arch
.kvm_ops
->get_rmmu_info
)
2464 r
= kvm
->arch
.kvm_ops
->get_rmmu_info(kvm
, &info
);
2465 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
2469 case KVM_PPC_GET_CPU_CHAR
: {
2470 struct kvm_ppc_cpu_char cpuchar
;
2472 r
= kvmppc_get_cpu_char(&cpuchar
);
2473 if (r
>= 0 && copy_to_user(argp
, &cpuchar
, sizeof(cpuchar
)))
2477 case KVM_PPC_SVM_OFF
: {
2478 struct kvm
*kvm
= filp
->private_data
;
2481 if (!kvm
->arch
.kvm_ops
->svm_off
)
2484 r
= kvm
->arch
.kvm_ops
->svm_off(kvm
);
2488 struct kvm
*kvm
= filp
->private_data
;
2489 r
= kvm
->arch
.kvm_ops
->arch_vm_ioctl(filp
, ioctl
, arg
);
2491 #else /* CONFIG_PPC_BOOK3S_64 */
2500 static DEFINE_IDA(lpid_inuse
);
2501 static unsigned long nr_lpids
;
2503 long kvmppc_alloc_lpid(void)
2507 /* The host LPID must always be 0 (allocation starts at 1) */
2508 lpid
= ida_alloc_range(&lpid_inuse
, 1, nr_lpids
- 1, GFP_KERNEL
);
2510 if (lpid
== -ENOMEM
)
2511 pr_err("%s: Out of memory\n", __func__
);
2513 pr_err("%s: No LPIDs free\n", __func__
);
2519 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid
);
2521 void kvmppc_free_lpid(long lpid
)
2523 ida_free(&lpid_inuse
, lpid
);
2525 EXPORT_SYMBOL_GPL(kvmppc_free_lpid
);
2527 /* nr_lpids_param includes the host LPID */
2528 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
2530 nr_lpids
= nr_lpids_param
;
2532 EXPORT_SYMBOL_GPL(kvmppc_init_lpid
);
2534 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr
);
2536 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu
*vcpu
, struct dentry
*debugfs_dentry
)
2538 if (vcpu
->kvm
->arch
.kvm_ops
->create_vcpu_debugfs
)
2539 vcpu
->kvm
->arch
.kvm_ops
->create_vcpu_debugfs(vcpu
, debugfs_dentry
);
2542 int kvm_arch_create_vm_debugfs(struct kvm
*kvm
)
2544 if (kvm
->arch
.kvm_ops
->create_vm_debugfs
)
2545 kvm
->arch
.kvm_ops
->create_vm_debugfs(kvm
);