1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 #include <linux/cpu_pm.h>
9 #include <linux/entry-kvm.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
17 #include <linux/mman.h>
18 #include <linux/sched.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_irqfd.h>
21 #include <linux/irqbypass.h>
22 #include <linux/sched/stat.h>
23 #include <linux/psci.h>
24 #include <trace/events/kvm.h>
26 #define CREATE_TRACE_POINTS
27 #include "trace_arm.h"
29 #include <linux/uaccess.h>
30 #include <asm/ptrace.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cacheflush.h>
34 #include <asm/cpufeature.h>
36 #include <asm/kvm_arm.h>
37 #include <asm/kvm_asm.h>
38 #include <asm/kvm_mmu.h>
39 #include <asm/kvm_nested.h>
40 #include <asm/kvm_pkvm.h>
41 #include <asm/kvm_emulate.h>
42 #include <asm/sections.h>
44 #include <kvm/arm_hypercalls.h>
45 #include <kvm/arm_pmu.h>
46 #include <kvm/arm_psci.h>
48 static enum kvm_mode kvm_mode
= KVM_MODE_DEFAULT
;
50 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector
);
52 DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page
);
53 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params
, kvm_init_params
);
55 DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context
, kvm_hyp_ctxt
);
57 static bool vgic_present
, kvm_arm_initialised
;
59 static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized
);
60 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use
);
62 bool is_kvm_arm_initialised(void)
64 return kvm_arm_initialised
;
67 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
69 return kvm_vcpu_exiting_guest_mode(vcpu
) == IN_GUEST_MODE
;
72 int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
,
73 struct kvm_enable_cap
*cap
)
82 case KVM_CAP_ARM_NISV_TO_USER
:
84 set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER
,
88 mutex_lock(&kvm
->lock
);
89 if (!system_supports_mte() || kvm
->created_vcpus
) {
93 set_bit(KVM_ARCH_FLAG_MTE_ENABLED
, &kvm
->arch
.flags
);
95 mutex_unlock(&kvm
->lock
);
97 case KVM_CAP_ARM_SYSTEM_SUSPEND
:
99 set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED
, &kvm
->arch
.flags
);
101 case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
:
102 new_cap
= cap
->args
[0];
104 mutex_lock(&kvm
->slots_lock
);
106 * To keep things simple, allow changing the chunk
107 * size only when no memory slots have been created.
109 if (!kvm_are_all_memslots_empty(kvm
)) {
111 } else if (new_cap
&& !kvm_is_block_size_supported(new_cap
)) {
115 kvm
->arch
.mmu
.split_page_chunk_size
= new_cap
;
117 mutex_unlock(&kvm
->slots_lock
);
127 static int kvm_arm_default_max_vcpus(void)
129 return vgic_present
? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS
;
133 * kvm_arch_init_vm - initializes a VM data structure
134 * @kvm: pointer to the KVM struct
136 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
140 mutex_init(&kvm
->arch
.config_lock
);
142 #ifdef CONFIG_LOCKDEP
143 /* Clue in lockdep that the config_lock must be taken inside kvm->lock */
144 mutex_lock(&kvm
->lock
);
145 mutex_lock(&kvm
->arch
.config_lock
);
146 mutex_unlock(&kvm
->arch
.config_lock
);
147 mutex_unlock(&kvm
->lock
);
150 ret
= kvm_share_hyp(kvm
, kvm
+ 1);
154 ret
= pkvm_init_host_vm(kvm
);
156 goto err_unshare_kvm
;
158 if (!zalloc_cpumask_var(&kvm
->arch
.supported_cpus
, GFP_KERNEL_ACCOUNT
)) {
160 goto err_unshare_kvm
;
162 cpumask_copy(kvm
->arch
.supported_cpus
, cpu_possible_mask
);
164 ret
= kvm_init_stage2_mmu(kvm
, &kvm
->arch
.mmu
, type
);
166 goto err_free_cpumask
;
168 kvm_vgic_early_init(kvm
);
170 kvm_timer_init_vm(kvm
);
172 /* The maximum number of VCPUs is limited by the host's GIC model */
173 kvm
->max_vcpus
= kvm_arm_default_max_vcpus();
175 kvm_arm_init_hypercalls(kvm
);
177 bitmap_zero(kvm
->arch
.vcpu_features
, KVM_VCPU_MAX_FEATURES
);
182 free_cpumask_var(kvm
->arch
.supported_cpus
);
184 kvm_unshare_hyp(kvm
, kvm
+ 1);
188 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
190 return VM_FAULT_SIGBUS
;
195 * kvm_arch_destroy_vm - destroy the VM data structure
196 * @kvm: pointer to the KVM struct
198 void kvm_arch_destroy_vm(struct kvm
*kvm
)
200 bitmap_free(kvm
->arch
.pmu_filter
);
201 free_cpumask_var(kvm
->arch
.supported_cpus
);
203 kvm_vgic_destroy(kvm
);
205 if (is_protected_kvm_enabled())
206 pkvm_destroy_hyp_vm(kvm
);
208 kfree(kvm
->arch
.mpidr_data
);
209 kvm_destroy_vcpus(kvm
);
211 kvm_unshare_hyp(kvm
, kvm
+ 1);
213 kvm_arm_teardown_hypercalls(kvm
);
216 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
220 case KVM_CAP_IRQCHIP
:
223 case KVM_CAP_IOEVENTFD
:
224 case KVM_CAP_USER_MEMORY
:
225 case KVM_CAP_SYNC_MMU
:
226 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS
:
227 case KVM_CAP_ONE_REG
:
228 case KVM_CAP_ARM_PSCI
:
229 case KVM_CAP_ARM_PSCI_0_2
:
230 case KVM_CAP_READONLY_MEM
:
231 case KVM_CAP_MP_STATE
:
232 case KVM_CAP_IMMEDIATE_EXIT
:
233 case KVM_CAP_VCPU_EVENTS
:
234 case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2
:
235 case KVM_CAP_ARM_NISV_TO_USER
:
236 case KVM_CAP_ARM_INJECT_EXT_DABT
:
237 case KVM_CAP_SET_GUEST_DEBUG
:
238 case KVM_CAP_VCPU_ATTRIBUTES
:
239 case KVM_CAP_PTP_KVM
:
240 case KVM_CAP_ARM_SYSTEM_SUSPEND
:
241 case KVM_CAP_IRQFD_RESAMPLE
:
242 case KVM_CAP_COUNTER_OFFSET
:
245 case KVM_CAP_SET_GUEST_DEBUG2
:
246 return KVM_GUESTDBG_VALID_MASK
;
247 case KVM_CAP_ARM_SET_DEVICE_ADDR
:
250 case KVM_CAP_NR_VCPUS
:
252 * ARM64 treats KVM_CAP_NR_CPUS differently from all other
253 * architectures, as it does not always bound it to
254 * KVM_CAP_MAX_VCPUS. It should not matter much because
255 * this is just an advisory value.
257 r
= min_t(unsigned int, num_online_cpus(),
258 kvm_arm_default_max_vcpus());
260 case KVM_CAP_MAX_VCPUS
:
261 case KVM_CAP_MAX_VCPU_ID
:
265 r
= kvm_arm_default_max_vcpus();
267 case KVM_CAP_MSI_DEVID
:
271 r
= kvm
->arch
.vgic
.msis_require_devid
;
273 case KVM_CAP_ARM_USER_IRQ
:
275 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
276 * (bump this number if adding more devices)
280 case KVM_CAP_ARM_MTE
:
281 r
= system_supports_mte();
283 case KVM_CAP_STEAL_TIME
:
284 r
= kvm_arm_pvtime_supported();
286 case KVM_CAP_ARM_EL1_32BIT
:
287 r
= cpus_have_final_cap(ARM64_HAS_32BIT_EL1
);
289 case KVM_CAP_GUEST_DEBUG_HW_BPS
:
292 case KVM_CAP_GUEST_DEBUG_HW_WPS
:
295 case KVM_CAP_ARM_PMU_V3
:
296 r
= kvm_arm_support_pmu_v3();
298 case KVM_CAP_ARM_INJECT_SERROR_ESR
:
299 r
= cpus_have_final_cap(ARM64_HAS_RAS_EXTN
);
301 case KVM_CAP_ARM_VM_IPA_SIZE
:
302 r
= get_kvm_ipa_limit();
304 case KVM_CAP_ARM_SVE
:
305 r
= system_supports_sve();
307 case KVM_CAP_ARM_PTRAUTH_ADDRESS
:
308 case KVM_CAP_ARM_PTRAUTH_GENERIC
:
309 r
= system_has_full_ptr_auth();
311 case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
:
313 r
= kvm
->arch
.mmu
.split_page_chunk_size
;
315 r
= KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT
;
317 case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES
:
318 r
= kvm_supported_block_sizes();
320 case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES
:
330 long kvm_arch_dev_ioctl(struct file
*filp
,
331 unsigned int ioctl
, unsigned long arg
)
336 struct kvm
*kvm_arch_alloc_vm(void)
338 size_t sz
= sizeof(struct kvm
);
341 return kzalloc(sz
, GFP_KERNEL_ACCOUNT
);
343 return __vmalloc(sz
, GFP_KERNEL_ACCOUNT
| __GFP_HIGHMEM
| __GFP_ZERO
);
346 int kvm_arch_vcpu_precreate(struct kvm
*kvm
, unsigned int id
)
348 if (irqchip_in_kernel(kvm
) && vgic_initialized(kvm
))
351 if (id
>= kvm
->max_vcpus
)
357 int kvm_arch_vcpu_create(struct kvm_vcpu
*vcpu
)
361 spin_lock_init(&vcpu
->arch
.mp_state_lock
);
363 #ifdef CONFIG_LOCKDEP
364 /* Inform lockdep that the config_lock is acquired after vcpu->mutex */
365 mutex_lock(&vcpu
->mutex
);
366 mutex_lock(&vcpu
->kvm
->arch
.config_lock
);
367 mutex_unlock(&vcpu
->kvm
->arch
.config_lock
);
368 mutex_unlock(&vcpu
->mutex
);
371 /* Force users to call KVM_ARM_VCPU_INIT */
372 vcpu_clear_flag(vcpu
, VCPU_INITIALIZED
);
374 vcpu
->arch
.mmu_page_cache
.gfp_zero
= __GFP_ZERO
;
377 * Default value for the FP state, will be overloaded at load
378 * time if we support FP (pretty likely)
380 vcpu
->arch
.fp_state
= FP_STATE_FREE
;
382 /* Set up the timer */
383 kvm_timer_vcpu_init(vcpu
);
385 kvm_pmu_vcpu_init(vcpu
);
387 kvm_arm_reset_debug_ptr(vcpu
);
389 kvm_arm_pvtime_vcpu_init(&vcpu
->arch
);
391 vcpu
->arch
.hw_mmu
= &vcpu
->kvm
->arch
.mmu
;
393 err
= kvm_vgic_vcpu_init(vcpu
);
397 return kvm_share_hyp(vcpu
, vcpu
+ 1);
400 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
404 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
406 if (vcpu_has_run_once(vcpu
) && unlikely(!irqchip_in_kernel(vcpu
->kvm
)))
407 static_branch_dec(&userspace_irqchip_in_use
);
409 kvm_mmu_free_memory_cache(&vcpu
->arch
.mmu_page_cache
);
410 kvm_timer_vcpu_terminate(vcpu
);
411 kvm_pmu_vcpu_destroy(vcpu
);
412 kvm_vgic_vcpu_destroy(vcpu
);
413 kvm_arm_vcpu_destroy(vcpu
);
416 void kvm_arch_vcpu_blocking(struct kvm_vcpu
*vcpu
)
421 void kvm_arch_vcpu_unblocking(struct kvm_vcpu
*vcpu
)
426 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
428 struct kvm_s2_mmu
*mmu
;
431 mmu
= vcpu
->arch
.hw_mmu
;
432 last_ran
= this_cpu_ptr(mmu
->last_vcpu_ran
);
435 * We guarantee that both TLBs and I-cache are private to each
436 * vcpu. If detecting that a vcpu from the same VM has
437 * previously run on the same physical CPU, call into the
438 * hypervisor code to nuke the relevant contexts.
440 * We might get preempted before the vCPU actually runs, but
441 * over-invalidation doesn't affect correctness.
443 if (*last_ran
!= vcpu
->vcpu_idx
) {
444 kvm_call_hyp(__kvm_flush_cpu_context
, mmu
);
445 *last_ran
= vcpu
->vcpu_idx
;
451 kvm_timer_vcpu_load(vcpu
);
453 kvm_vcpu_load_vhe(vcpu
);
454 kvm_arch_vcpu_load_fp(vcpu
);
455 kvm_vcpu_pmu_restore_guest(vcpu
);
456 if (kvm_arm_is_pvtime_enabled(&vcpu
->arch
))
457 kvm_make_request(KVM_REQ_RECORD_STEAL
, vcpu
);
459 if (single_task_running())
460 vcpu_clear_wfx_traps(vcpu
);
462 vcpu_set_wfx_traps(vcpu
);
464 if (vcpu_has_ptrauth(vcpu
))
465 vcpu_ptrauth_disable(vcpu
);
466 kvm_arch_vcpu_load_debug_state_flags(vcpu
);
468 if (!cpumask_test_cpu(cpu
, vcpu
->kvm
->arch
.supported_cpus
))
469 vcpu_set_on_unsupported_cpu(vcpu
);
472 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
474 kvm_arch_vcpu_put_debug_state_flags(vcpu
);
475 kvm_arch_vcpu_put_fp(vcpu
);
477 kvm_vcpu_put_vhe(vcpu
);
478 kvm_timer_vcpu_put(vcpu
);
480 kvm_vcpu_pmu_restore_host(vcpu
);
481 kvm_arm_vmid_clear_active();
483 vcpu_clear_on_unsupported_cpu(vcpu
);
487 static void __kvm_arm_vcpu_power_off(struct kvm_vcpu
*vcpu
)
489 WRITE_ONCE(vcpu
->arch
.mp_state
.mp_state
, KVM_MP_STATE_STOPPED
);
490 kvm_make_request(KVM_REQ_SLEEP
, vcpu
);
494 void kvm_arm_vcpu_power_off(struct kvm_vcpu
*vcpu
)
496 spin_lock(&vcpu
->arch
.mp_state_lock
);
497 __kvm_arm_vcpu_power_off(vcpu
);
498 spin_unlock(&vcpu
->arch
.mp_state_lock
);
501 bool kvm_arm_vcpu_stopped(struct kvm_vcpu
*vcpu
)
503 return READ_ONCE(vcpu
->arch
.mp_state
.mp_state
) == KVM_MP_STATE_STOPPED
;
506 static void kvm_arm_vcpu_suspend(struct kvm_vcpu
*vcpu
)
508 WRITE_ONCE(vcpu
->arch
.mp_state
.mp_state
, KVM_MP_STATE_SUSPENDED
);
509 kvm_make_request(KVM_REQ_SUSPEND
, vcpu
);
513 static bool kvm_arm_vcpu_suspended(struct kvm_vcpu
*vcpu
)
515 return READ_ONCE(vcpu
->arch
.mp_state
.mp_state
) == KVM_MP_STATE_SUSPENDED
;
518 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
519 struct kvm_mp_state
*mp_state
)
521 *mp_state
= READ_ONCE(vcpu
->arch
.mp_state
);
526 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
527 struct kvm_mp_state
*mp_state
)
531 spin_lock(&vcpu
->arch
.mp_state_lock
);
533 switch (mp_state
->mp_state
) {
534 case KVM_MP_STATE_RUNNABLE
:
535 WRITE_ONCE(vcpu
->arch
.mp_state
, *mp_state
);
537 case KVM_MP_STATE_STOPPED
:
538 __kvm_arm_vcpu_power_off(vcpu
);
540 case KVM_MP_STATE_SUSPENDED
:
541 kvm_arm_vcpu_suspend(vcpu
);
547 spin_unlock(&vcpu
->arch
.mp_state_lock
);
553 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
554 * @v: The VCPU pointer
556 * If the guest CPU is not waiting for interrupts or an interrupt line is
557 * asserted, the CPU is by definition runnable.
559 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
561 bool irq_lines
= *vcpu_hcr(v
) & (HCR_VI
| HCR_VF
);
562 return ((irq_lines
|| kvm_vgic_vcpu_pending_irq(v
))
563 && !kvm_arm_vcpu_stopped(v
) && !v
->arch
.pause
);
566 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
568 return vcpu_mode_priv(vcpu
);
571 #ifdef CONFIG_GUEST_PERF_EVENTS
572 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu
*vcpu
)
574 return *vcpu_pc(vcpu
);
578 static int kvm_vcpu_initialized(struct kvm_vcpu
*vcpu
)
580 return vcpu_get_flag(vcpu
, VCPU_INITIALIZED
);
583 static void kvm_init_mpidr_data(struct kvm
*kvm
)
585 struct kvm_mpidr_data
*data
= NULL
;
586 unsigned long c
, mask
, nr_entries
;
587 u64 aff_set
= 0, aff_clr
= ~0UL;
588 struct kvm_vcpu
*vcpu
;
590 mutex_lock(&kvm
->arch
.config_lock
);
592 if (kvm
->arch
.mpidr_data
|| atomic_read(&kvm
->online_vcpus
) == 1)
595 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
596 u64 aff
= kvm_vcpu_get_mpidr_aff(vcpu
);
602 * A significant bit can be either 0 or 1, and will only appear in
603 * aff_set. Use aff_clr to weed out the useless stuff.
605 mask
= aff_set
^ aff_clr
;
606 nr_entries
= BIT_ULL(hweight_long(mask
));
609 * Don't let userspace fool us. If we need more than a single page
610 * to describe the compressed MPIDR array, just fall back to the
611 * iterative method. Single vcpu VMs do not need this either.
613 if (struct_size(data
, cmpidr_to_idx
, nr_entries
) <= PAGE_SIZE
)
614 data
= kzalloc(struct_size(data
, cmpidr_to_idx
, nr_entries
),
620 data
->mpidr_mask
= mask
;
622 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
623 u64 aff
= kvm_vcpu_get_mpidr_aff(vcpu
);
624 u16 index
= kvm_mpidr_index(data
, aff
);
626 data
->cmpidr_to_idx
[index
] = c
;
629 kvm
->arch
.mpidr_data
= data
;
631 mutex_unlock(&kvm
->arch
.config_lock
);
635 * Handle both the initialisation that is being done when the vcpu is
636 * run for the first time, as well as the updates that must be
637 * performed each time we get a new thread dealing with this vcpu.
639 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu
*vcpu
)
641 struct kvm
*kvm
= vcpu
->kvm
;
644 if (!kvm_vcpu_initialized(vcpu
))
647 if (!kvm_arm_vcpu_is_finalized(vcpu
))
650 ret
= kvm_arch_vcpu_run_map_fp(vcpu
);
654 if (likely(vcpu_has_run_once(vcpu
)))
657 kvm_init_mpidr_data(kvm
);
659 kvm_arm_vcpu_init_debug(vcpu
);
661 if (likely(irqchip_in_kernel(kvm
))) {
663 * Map the VGIC hardware resources before running a vcpu the
664 * first time on this VM.
666 ret
= kvm_vgic_map_resources(kvm
);
671 if (vcpu_has_nv(vcpu
)) {
672 ret
= kvm_init_nv_sysregs(vcpu
->kvm
);
677 ret
= kvm_timer_enable(vcpu
);
681 ret
= kvm_arm_pmu_v3_enable(vcpu
);
685 if (is_protected_kvm_enabled()) {
686 ret
= pkvm_create_hyp_vm(kvm
);
691 if (!irqchip_in_kernel(kvm
)) {
693 * Tell the rest of the code that there are userspace irqchip
696 static_branch_inc(&userspace_irqchip_in_use
);
700 * Initialize traps for protected VMs.
701 * NOTE: Move to run in EL2 directly, rather than via a hypercall, once
702 * the code is in place for first run initialization at EL2.
704 if (kvm_vm_is_protected(kvm
))
705 kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps
, vcpu
);
707 mutex_lock(&kvm
->arch
.config_lock
);
708 set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE
, &kvm
->arch
.flags
);
709 mutex_unlock(&kvm
->arch
.config_lock
);
714 bool kvm_arch_intc_initialized(struct kvm
*kvm
)
716 return vgic_initialized(kvm
);
719 void kvm_arm_halt_guest(struct kvm
*kvm
)
722 struct kvm_vcpu
*vcpu
;
724 kvm_for_each_vcpu(i
, vcpu
, kvm
)
725 vcpu
->arch
.pause
= true;
726 kvm_make_all_cpus_request(kvm
, KVM_REQ_SLEEP
);
729 void kvm_arm_resume_guest(struct kvm
*kvm
)
732 struct kvm_vcpu
*vcpu
;
734 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
735 vcpu
->arch
.pause
= false;
736 __kvm_vcpu_wake_up(vcpu
);
740 static void kvm_vcpu_sleep(struct kvm_vcpu
*vcpu
)
742 struct rcuwait
*wait
= kvm_arch_vcpu_get_wait(vcpu
);
744 rcuwait_wait_event(wait
,
745 (!kvm_arm_vcpu_stopped(vcpu
)) && (!vcpu
->arch
.pause
),
748 if (kvm_arm_vcpu_stopped(vcpu
) || vcpu
->arch
.pause
) {
749 /* Awaken to handle a signal, request we sleep again later. */
750 kvm_make_request(KVM_REQ_SLEEP
, vcpu
);
754 * Make sure we will observe a potential reset request if we've
755 * observed a change to the power state. Pairs with the smp_wmb() in
756 * kvm_psci_vcpu_on().
762 * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior
763 * @vcpu: The VCPU pointer
765 * Suspend execution of a vCPU until a valid wake event is detected, i.e. until
766 * the vCPU is runnable. The vCPU may or may not be scheduled out, depending
767 * on when a wake event arrives, e.g. there may already be a pending wake event.
769 void kvm_vcpu_wfi(struct kvm_vcpu
*vcpu
)
772 * Sync back the state of the GIC CPU interface so that we have
773 * the latest PMR and group enables. This ensures that
774 * kvm_arch_vcpu_runnable has up-to-date data to decide whether
775 * we have pending interrupts, e.g. when determining if the
778 * For the same reason, we want to tell GICv4 that we need
779 * doorbells to be signalled, should an interrupt become pending.
782 kvm_vgic_vmcr_sync(vcpu
);
783 vcpu_set_flag(vcpu
, IN_WFI
);
788 vcpu_clear_flag(vcpu
, IN_WFIT
);
791 vcpu_clear_flag(vcpu
, IN_WFI
);
796 static int kvm_vcpu_suspend(struct kvm_vcpu
*vcpu
)
798 if (!kvm_arm_vcpu_suspended(vcpu
))
804 * The suspend state is sticky; we do not leave it until userspace
805 * explicitly marks the vCPU as runnable. Request that we suspend again
808 kvm_make_request(KVM_REQ_SUSPEND
, vcpu
);
811 * Check to make sure the vCPU is actually runnable. If so, exit to
812 * userspace informing it of the wakeup condition.
814 if (kvm_arch_vcpu_runnable(vcpu
)) {
815 memset(&vcpu
->run
->system_event
, 0, sizeof(vcpu
->run
->system_event
));
816 vcpu
->run
->system_event
.type
= KVM_SYSTEM_EVENT_WAKEUP
;
817 vcpu
->run
->exit_reason
= KVM_EXIT_SYSTEM_EVENT
;
822 * Otherwise, we were unblocked to process a different event, such as a
823 * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to
830 * check_vcpu_requests - check and handle pending vCPU requests
831 * @vcpu: the VCPU pointer
833 * Return: 1 if we should enter the guest
834 * 0 if we should exit to userspace
835 * < 0 if we should exit to userspace, where the return value indicates
838 static int check_vcpu_requests(struct kvm_vcpu
*vcpu
)
840 if (kvm_request_pending(vcpu
)) {
841 if (kvm_check_request(KVM_REQ_SLEEP
, vcpu
))
842 kvm_vcpu_sleep(vcpu
);
844 if (kvm_check_request(KVM_REQ_VCPU_RESET
, vcpu
))
845 kvm_reset_vcpu(vcpu
);
848 * Clear IRQ_PENDING requests that were made to guarantee
849 * that a VCPU sees new virtual interrupts.
851 kvm_check_request(KVM_REQ_IRQ_PENDING
, vcpu
);
853 if (kvm_check_request(KVM_REQ_RECORD_STEAL
, vcpu
))
854 kvm_update_stolen_time(vcpu
);
856 if (kvm_check_request(KVM_REQ_RELOAD_GICv4
, vcpu
)) {
857 /* The distributor enable bits were changed */
864 if (kvm_check_request(KVM_REQ_RELOAD_PMU
, vcpu
))
865 kvm_vcpu_reload_pmu(vcpu
);
867 if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0
, vcpu
))
868 kvm_vcpu_pmu_restore_guest(vcpu
);
870 if (kvm_check_request(KVM_REQ_SUSPEND
, vcpu
))
871 return kvm_vcpu_suspend(vcpu
);
873 if (kvm_dirty_ring_check_request(vcpu
))
880 static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu
*vcpu
)
882 if (likely(!vcpu_mode_is_32bit(vcpu
)))
885 if (vcpu_has_nv(vcpu
))
888 return !kvm_supports_32bit_el0();
892 * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
893 * @vcpu: The VCPU pointer
894 * @ret: Pointer to write optional return code
896 * Returns: true if the VCPU needs to return to a preemptible + interruptible
897 * and skip guest entry.
899 * This function disambiguates between two different types of exits: exits to a
900 * preemptible + interruptible kernel context and exits to userspace. For an
901 * exit to userspace, this function will write the return code to ret and return
902 * true. For an exit to preemptible + interruptible kernel context (i.e. check
903 * for pending work and re-enter), return true without writing to ret.
905 static bool kvm_vcpu_exit_request(struct kvm_vcpu
*vcpu
, int *ret
)
907 struct kvm_run
*run
= vcpu
->run
;
910 * If we're using a userspace irqchip, then check if we need
911 * to tell a userspace irqchip about timer or PMU level
912 * changes and if so, exit to userspace (the actual level
913 * state gets updated in kvm_timer_update_run and
914 * kvm_pmu_update_run below).
916 if (static_branch_unlikely(&userspace_irqchip_in_use
)) {
917 if (kvm_timer_should_notify_user(vcpu
) ||
918 kvm_pmu_should_notify_user(vcpu
)) {
920 run
->exit_reason
= KVM_EXIT_INTR
;
925 if (unlikely(vcpu_on_unsupported_cpu(vcpu
))) {
926 run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
927 run
->fail_entry
.hardware_entry_failure_reason
= KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED
;
928 run
->fail_entry
.cpu
= smp_processor_id();
933 return kvm_request_pending(vcpu
) ||
934 xfer_to_guest_mode_work_pending();
938 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
939 * the vCPU is running.
941 * This must be noinstr as instrumentation may make use of RCU, and this is not
942 * safe during the EQS.
944 static int noinstr
kvm_arm_vcpu_enter_exit(struct kvm_vcpu
*vcpu
)
948 guest_state_enter_irqoff();
949 ret
= kvm_call_hyp_ret(__kvm_vcpu_run
, vcpu
);
950 guest_state_exit_irqoff();
956 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
957 * @vcpu: The VCPU pointer
959 * This function is called through the VCPU_RUN ioctl called from user space. It
960 * will execute VM code in a loop until the time slice for the process is used
961 * or some emulation is needed from user space in which case the function will
962 * return with return value 0 and with the kvm_run structure filled in with the
963 * required data for the requested emulation.
965 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
)
967 struct kvm_run
*run
= vcpu
->run
;
970 if (run
->exit_reason
== KVM_EXIT_MMIO
) {
971 ret
= kvm_handle_mmio_return(vcpu
);
978 if (run
->immediate_exit
) {
983 kvm_sigset_activate(vcpu
);
986 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
990 * Check conditions before entering the guest
992 ret
= xfer_to_guest_mode_handle_work(vcpu
);
997 ret
= check_vcpu_requests(vcpu
);
1000 * Preparing the interrupts to be injected also
1001 * involves poking the GIC, which must be done in a
1002 * non-preemptible context.
1007 * The VMID allocator only tracks active VMIDs per
1008 * physical CPU, and therefore the VMID allocated may not be
1009 * preserved on VMID roll-over if the task was preempted,
1010 * making a thread's VMID inactive. So we need to call
1011 * kvm_arm_vmid_update() in non-premptible context.
1013 if (kvm_arm_vmid_update(&vcpu
->arch
.hw_mmu
->vmid
) &&
1015 __load_stage2(vcpu
->arch
.hw_mmu
,
1016 vcpu
->arch
.hw_mmu
->arch
);
1018 kvm_pmu_flush_hwstate(vcpu
);
1020 local_irq_disable();
1022 kvm_vgic_flush_hwstate(vcpu
);
1024 kvm_pmu_update_vcpu_events(vcpu
);
1027 * Ensure we set mode to IN_GUEST_MODE after we disable
1028 * interrupts and before the final VCPU requests check.
1029 * See the comment in kvm_vcpu_exiting_guest_mode() and
1030 * Documentation/virt/kvm/vcpu-requests.rst
1032 smp_store_mb(vcpu
->mode
, IN_GUEST_MODE
);
1034 if (ret
<= 0 || kvm_vcpu_exit_request(vcpu
, &ret
)) {
1035 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1036 isb(); /* Ensure work in x_flush_hwstate is committed */
1037 kvm_pmu_sync_hwstate(vcpu
);
1038 if (static_branch_unlikely(&userspace_irqchip_in_use
))
1039 kvm_timer_sync_user(vcpu
);
1040 kvm_vgic_sync_hwstate(vcpu
);
1046 kvm_arm_setup_debug(vcpu
);
1047 kvm_arch_vcpu_ctxflush_fp(vcpu
);
1049 /**************************************************************
1052 trace_kvm_entry(*vcpu_pc(vcpu
));
1053 guest_timing_enter_irqoff();
1055 ret
= kvm_arm_vcpu_enter_exit(vcpu
);
1057 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1061 *************************************************************/
1063 kvm_arm_clear_debug(vcpu
);
1066 * We must sync the PMU state before the vgic state so
1067 * that the vgic can properly sample the updated state of the
1070 kvm_pmu_sync_hwstate(vcpu
);
1073 * Sync the vgic state before syncing the timer state because
1074 * the timer code needs to know if the virtual timer
1075 * interrupts are active.
1077 kvm_vgic_sync_hwstate(vcpu
);
1080 * Sync the timer hardware state before enabling interrupts as
1081 * we don't want vtimer interrupts to race with syncing the
1082 * timer virtual interrupt state.
1084 if (static_branch_unlikely(&userspace_irqchip_in_use
))
1085 kvm_timer_sync_user(vcpu
);
1087 kvm_arch_vcpu_ctxsync_fp(vcpu
);
1090 * We must ensure that any pending interrupts are taken before
1091 * we exit guest timing so that timer ticks are accounted as
1092 * guest time. Transiently unmask interrupts so that any
1093 * pending interrupts are taken.
1095 * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other
1096 * context synchronization event) is necessary to ensure that
1097 * pending interrupts are taken.
1099 if (ARM_EXCEPTION_CODE(ret
) == ARM_EXCEPTION_IRQ
) {
1102 local_irq_disable();
1105 guest_timing_exit_irqoff();
1109 trace_kvm_exit(ret
, kvm_vcpu_trap_get_class(vcpu
), *vcpu_pc(vcpu
));
1111 /* Exit types that need handling before we can be preempted */
1112 handle_exit_early(vcpu
, ret
);
1117 * The ARMv8 architecture doesn't give the hypervisor
1118 * a mechanism to prevent a guest from dropping to AArch32 EL0
1119 * if implemented by the CPU. If we spot the guest in such
1120 * state and that we decided it wasn't supposed to do so (like
1121 * with the asymmetric AArch32 case), return to userspace with
1124 if (vcpu_mode_is_bad_32bit(vcpu
)) {
1126 * As we have caught the guest red-handed, decide that
1127 * it isn't fit for purpose anymore by making the vcpu
1128 * invalid. The VMM can try and fix it by issuing a
1129 * KVM_ARM_VCPU_INIT if it really wants to.
1131 vcpu_clear_flag(vcpu
, VCPU_INITIALIZED
);
1132 ret
= ARM_EXCEPTION_IL
;
1135 ret
= handle_exit(vcpu
, ret
);
1138 /* Tell userspace about in-kernel device output levels */
1139 if (unlikely(!irqchip_in_kernel(vcpu
->kvm
))) {
1140 kvm_timer_update_run(vcpu
);
1141 kvm_pmu_update_run(vcpu
);
1144 kvm_sigset_deactivate(vcpu
);
1148 * In the unlikely event that we are returning to userspace
1149 * with pending exceptions or PC adjustment, commit these
1150 * adjustments in order to give userspace a consistent view of
1151 * the vcpu state. Note that this relies on __kvm_adjust_pc()
1152 * being preempt-safe on VHE.
1154 if (unlikely(vcpu_get_flag(vcpu
, PENDING_EXCEPTION
) ||
1155 vcpu_get_flag(vcpu
, INCREMENT_PC
)))
1156 kvm_call_hyp(__kvm_adjust_pc
, vcpu
);
1162 static int vcpu_interrupt_line(struct kvm_vcpu
*vcpu
, int number
, bool level
)
1168 if (number
== KVM_ARM_IRQ_CPU_IRQ
)
1169 bit_index
= __ffs(HCR_VI
);
1170 else /* KVM_ARM_IRQ_CPU_FIQ */
1171 bit_index
= __ffs(HCR_VF
);
1173 hcr
= vcpu_hcr(vcpu
);
1175 set
= test_and_set_bit(bit_index
, hcr
);
1177 set
= test_and_clear_bit(bit_index
, hcr
);
1180 * If we didn't change anything, no need to wake up or kick other CPUs
1186 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
1187 * trigger a world-switch round on the running physical CPU to set the
1188 * virtual IRQ/FIQ fields in the HCR appropriately.
1190 kvm_make_request(KVM_REQ_IRQ_PENDING
, vcpu
);
1191 kvm_vcpu_kick(vcpu
);
1196 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_level
,
1199 u32 irq
= irq_level
->irq
;
1200 unsigned int irq_type
, vcpu_id
, irq_num
;
1201 struct kvm_vcpu
*vcpu
= NULL
;
1202 bool level
= irq_level
->level
;
1204 irq_type
= (irq
>> KVM_ARM_IRQ_TYPE_SHIFT
) & KVM_ARM_IRQ_TYPE_MASK
;
1205 vcpu_id
= (irq
>> KVM_ARM_IRQ_VCPU_SHIFT
) & KVM_ARM_IRQ_VCPU_MASK
;
1206 vcpu_id
+= ((irq
>> KVM_ARM_IRQ_VCPU2_SHIFT
) & KVM_ARM_IRQ_VCPU2_MASK
) * (KVM_ARM_IRQ_VCPU_MASK
+ 1);
1207 irq_num
= (irq
>> KVM_ARM_IRQ_NUM_SHIFT
) & KVM_ARM_IRQ_NUM_MASK
;
1209 trace_kvm_irq_line(irq_type
, vcpu_id
, irq_num
, irq_level
->level
);
1212 case KVM_ARM_IRQ_TYPE_CPU
:
1213 if (irqchip_in_kernel(kvm
))
1216 vcpu
= kvm_get_vcpu_by_id(kvm
, vcpu_id
);
1220 if (irq_num
> KVM_ARM_IRQ_CPU_FIQ
)
1223 return vcpu_interrupt_line(vcpu
, irq_num
, level
);
1224 case KVM_ARM_IRQ_TYPE_PPI
:
1225 if (!irqchip_in_kernel(kvm
))
1228 vcpu
= kvm_get_vcpu_by_id(kvm
, vcpu_id
);
1232 if (irq_num
< VGIC_NR_SGIS
|| irq_num
>= VGIC_NR_PRIVATE_IRQS
)
1235 return kvm_vgic_inject_irq(kvm
, vcpu
, irq_num
, level
, NULL
);
1236 case KVM_ARM_IRQ_TYPE_SPI
:
1237 if (!irqchip_in_kernel(kvm
))
1240 if (irq_num
< VGIC_NR_PRIVATE_IRQS
)
1243 return kvm_vgic_inject_irq(kvm
, NULL
, irq_num
, level
, NULL
);
1249 static unsigned long system_supported_vcpu_features(void)
1251 unsigned long features
= KVM_VCPU_VALID_FEATURES
;
1253 if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1
))
1254 clear_bit(KVM_ARM_VCPU_EL1_32BIT
, &features
);
1256 if (!kvm_arm_support_pmu_v3())
1257 clear_bit(KVM_ARM_VCPU_PMU_V3
, &features
);
1259 if (!system_supports_sve())
1260 clear_bit(KVM_ARM_VCPU_SVE
, &features
);
1262 if (!system_has_full_ptr_auth()) {
1263 clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS
, &features
);
1264 clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC
, &features
);
1267 if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT
))
1268 clear_bit(KVM_ARM_VCPU_HAS_EL2
, &features
);
1273 static int kvm_vcpu_init_check_features(struct kvm_vcpu
*vcpu
,
1274 const struct kvm_vcpu_init
*init
)
1276 unsigned long features
= init
->features
[0];
1279 if (features
& ~KVM_VCPU_VALID_FEATURES
)
1282 for (i
= 1; i
< ARRAY_SIZE(init
->features
); i
++) {
1283 if (init
->features
[i
])
1287 if (features
& ~system_supported_vcpu_features())
1291 * For now make sure that both address/generic pointer authentication
1292 * features are requested by the userspace together.
1294 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS
, &features
) !=
1295 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC
, &features
))
1298 /* Disallow NV+SVE for the time being */
1299 if (test_bit(KVM_ARM_VCPU_HAS_EL2
, &features
) &&
1300 test_bit(KVM_ARM_VCPU_SVE
, &features
))
1303 if (!test_bit(KVM_ARM_VCPU_EL1_32BIT
, &features
))
1306 /* MTE is incompatible with AArch32 */
1307 if (kvm_has_mte(vcpu
->kvm
))
1310 /* NV is incompatible with AArch32 */
1311 if (test_bit(KVM_ARM_VCPU_HAS_EL2
, &features
))
1317 static bool kvm_vcpu_init_changed(struct kvm_vcpu
*vcpu
,
1318 const struct kvm_vcpu_init
*init
)
1320 unsigned long features
= init
->features
[0];
1322 return !bitmap_equal(vcpu
->kvm
->arch
.vcpu_features
, &features
,
1323 KVM_VCPU_MAX_FEATURES
);
1326 static int kvm_setup_vcpu(struct kvm_vcpu
*vcpu
)
1328 struct kvm
*kvm
= vcpu
->kvm
;
1332 * When the vCPU has a PMU, but no PMU is set for the guest
1333 * yet, set the default one.
1335 if (kvm_vcpu_has_pmu(vcpu
) && !kvm
->arch
.arm_pmu
)
1336 ret
= kvm_arm_set_default_pmu(kvm
);
1341 static int __kvm_vcpu_set_target(struct kvm_vcpu
*vcpu
,
1342 const struct kvm_vcpu_init
*init
)
1344 unsigned long features
= init
->features
[0];
1345 struct kvm
*kvm
= vcpu
->kvm
;
1348 mutex_lock(&kvm
->arch
.config_lock
);
1350 if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED
, &kvm
->arch
.flags
) &&
1351 kvm_vcpu_init_changed(vcpu
, init
))
1354 bitmap_copy(kvm
->arch
.vcpu_features
, &features
, KVM_VCPU_MAX_FEATURES
);
1356 ret
= kvm_setup_vcpu(vcpu
);
1360 /* Now we know what it is, we can reset it. */
1361 kvm_reset_vcpu(vcpu
);
1363 set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED
, &kvm
->arch
.flags
);
1364 vcpu_set_flag(vcpu
, VCPU_INITIALIZED
);
1367 mutex_unlock(&kvm
->arch
.config_lock
);
1371 static int kvm_vcpu_set_target(struct kvm_vcpu
*vcpu
,
1372 const struct kvm_vcpu_init
*init
)
1376 if (init
->target
!= KVM_ARM_TARGET_GENERIC_V8
&&
1377 init
->target
!= kvm_target_cpu())
1380 ret
= kvm_vcpu_init_check_features(vcpu
, init
);
1384 if (!kvm_vcpu_initialized(vcpu
))
1385 return __kvm_vcpu_set_target(vcpu
, init
);
1387 if (kvm_vcpu_init_changed(vcpu
, init
))
1390 kvm_reset_vcpu(vcpu
);
1394 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu
*vcpu
,
1395 struct kvm_vcpu_init
*init
)
1397 bool power_off
= false;
1401 * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid
1402 * reflecting it in the finalized feature set, thus limiting its scope
1403 * to a single KVM_ARM_VCPU_INIT call.
1405 if (init
->features
[0] & BIT(KVM_ARM_VCPU_POWER_OFF
)) {
1406 init
->features
[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF
);
1410 ret
= kvm_vcpu_set_target(vcpu
, init
);
1415 * Ensure a rebooted VM will fault in RAM pages and detect if the
1416 * guest MMU is turned off and flush the caches as needed.
1418 * S2FWB enforces all memory accesses to RAM being cacheable,
1419 * ensuring that the data side is always coherent. We still
1420 * need to invalidate the I-cache though, as FWB does *not*
1421 * imply CTR_EL0.DIC.
1423 if (vcpu_has_run_once(vcpu
)) {
1424 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB
))
1425 stage2_unmap_vm(vcpu
->kvm
);
1427 icache_inval_all_pou();
1430 vcpu_reset_hcr(vcpu
);
1431 vcpu
->arch
.cptr_el2
= kvm_get_reset_cptr_el2(vcpu
);
1434 * Handle the "start in power-off" case.
1436 spin_lock(&vcpu
->arch
.mp_state_lock
);
1439 __kvm_arm_vcpu_power_off(vcpu
);
1441 WRITE_ONCE(vcpu
->arch
.mp_state
.mp_state
, KVM_MP_STATE_RUNNABLE
);
1443 spin_unlock(&vcpu
->arch
.mp_state_lock
);
1448 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu
*vcpu
,
1449 struct kvm_device_attr
*attr
)
1453 switch (attr
->group
) {
1455 ret
= kvm_arm_vcpu_arch_set_attr(vcpu
, attr
);
1462 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu
*vcpu
,
1463 struct kvm_device_attr
*attr
)
1467 switch (attr
->group
) {
1469 ret
= kvm_arm_vcpu_arch_get_attr(vcpu
, attr
);
1476 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu
*vcpu
,
1477 struct kvm_device_attr
*attr
)
1481 switch (attr
->group
) {
1483 ret
= kvm_arm_vcpu_arch_has_attr(vcpu
, attr
);
1490 static int kvm_arm_vcpu_get_events(struct kvm_vcpu
*vcpu
,
1491 struct kvm_vcpu_events
*events
)
1493 memset(events
, 0, sizeof(*events
));
1495 return __kvm_arm_vcpu_get_events(vcpu
, events
);
1498 static int kvm_arm_vcpu_set_events(struct kvm_vcpu
*vcpu
,
1499 struct kvm_vcpu_events
*events
)
1503 /* check whether the reserved field is zero */
1504 for (i
= 0; i
< ARRAY_SIZE(events
->reserved
); i
++)
1505 if (events
->reserved
[i
])
1508 /* check whether the pad field is zero */
1509 for (i
= 0; i
< ARRAY_SIZE(events
->exception
.pad
); i
++)
1510 if (events
->exception
.pad
[i
])
1513 return __kvm_arm_vcpu_set_events(vcpu
, events
);
1516 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1517 unsigned int ioctl
, unsigned long arg
)
1519 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1520 void __user
*argp
= (void __user
*)arg
;
1521 struct kvm_device_attr attr
;
1525 case KVM_ARM_VCPU_INIT
: {
1526 struct kvm_vcpu_init init
;
1529 if (copy_from_user(&init
, argp
, sizeof(init
)))
1532 r
= kvm_arch_vcpu_ioctl_vcpu_init(vcpu
, &init
);
1535 case KVM_SET_ONE_REG
:
1536 case KVM_GET_ONE_REG
: {
1537 struct kvm_one_reg reg
;
1540 if (unlikely(!kvm_vcpu_initialized(vcpu
)))
1544 if (copy_from_user(®
, argp
, sizeof(reg
)))
1548 * We could owe a reset due to PSCI. Handle the pending reset
1549 * here to ensure userspace register accesses are ordered after
1552 if (kvm_check_request(KVM_REQ_VCPU_RESET
, vcpu
))
1553 kvm_reset_vcpu(vcpu
);
1555 if (ioctl
== KVM_SET_ONE_REG
)
1556 r
= kvm_arm_set_reg(vcpu
, ®
);
1558 r
= kvm_arm_get_reg(vcpu
, ®
);
1561 case KVM_GET_REG_LIST
: {
1562 struct kvm_reg_list __user
*user_list
= argp
;
1563 struct kvm_reg_list reg_list
;
1567 if (unlikely(!kvm_vcpu_initialized(vcpu
)))
1571 if (!kvm_arm_vcpu_is_finalized(vcpu
))
1575 if (copy_from_user(®_list
, user_list
, sizeof(reg_list
)))
1578 reg_list
.n
= kvm_arm_num_regs(vcpu
);
1579 if (copy_to_user(user_list
, ®_list
, sizeof(reg_list
)))
1584 r
= kvm_arm_copy_reg_indices(vcpu
, user_list
->reg
);
1587 case KVM_SET_DEVICE_ATTR
: {
1589 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1591 r
= kvm_arm_vcpu_set_attr(vcpu
, &attr
);
1594 case KVM_GET_DEVICE_ATTR
: {
1596 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1598 r
= kvm_arm_vcpu_get_attr(vcpu
, &attr
);
1601 case KVM_HAS_DEVICE_ATTR
: {
1603 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1605 r
= kvm_arm_vcpu_has_attr(vcpu
, &attr
);
1608 case KVM_GET_VCPU_EVENTS
: {
1609 struct kvm_vcpu_events events
;
1611 if (kvm_arm_vcpu_get_events(vcpu
, &events
))
1614 if (copy_to_user(argp
, &events
, sizeof(events
)))
1619 case KVM_SET_VCPU_EVENTS
: {
1620 struct kvm_vcpu_events events
;
1622 if (copy_from_user(&events
, argp
, sizeof(events
)))
1625 return kvm_arm_vcpu_set_events(vcpu
, &events
);
1627 case KVM_ARM_VCPU_FINALIZE
: {
1630 if (!kvm_vcpu_initialized(vcpu
))
1633 if (get_user(what
, (const int __user
*)argp
))
1636 return kvm_arm_vcpu_finalize(vcpu
, what
);
1645 void kvm_arch_sync_dirty_log(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
1650 static int kvm_vm_ioctl_set_device_addr(struct kvm
*kvm
,
1651 struct kvm_arm_device_addr
*dev_addr
)
1653 switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK
, dev_addr
->id
)) {
1654 case KVM_ARM_DEVICE_VGIC_V2
:
1657 return kvm_set_legacy_vgic_v2_addr(kvm
, dev_addr
);
1663 static int kvm_vm_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1665 switch (attr
->group
) {
1666 case KVM_ARM_VM_SMCCC_CTRL
:
1667 return kvm_vm_smccc_has_attr(kvm
, attr
);
1673 static int kvm_vm_set_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1675 switch (attr
->group
) {
1676 case KVM_ARM_VM_SMCCC_CTRL
:
1677 return kvm_vm_smccc_set_attr(kvm
, attr
);
1683 int kvm_arch_vm_ioctl(struct file
*filp
, unsigned int ioctl
, unsigned long arg
)
1685 struct kvm
*kvm
= filp
->private_data
;
1686 void __user
*argp
= (void __user
*)arg
;
1687 struct kvm_device_attr attr
;
1690 case KVM_CREATE_IRQCHIP
: {
1694 mutex_lock(&kvm
->lock
);
1695 ret
= kvm_vgic_create(kvm
, KVM_DEV_TYPE_ARM_VGIC_V2
);
1696 mutex_unlock(&kvm
->lock
);
1699 case KVM_ARM_SET_DEVICE_ADDR
: {
1700 struct kvm_arm_device_addr dev_addr
;
1702 if (copy_from_user(&dev_addr
, argp
, sizeof(dev_addr
)))
1704 return kvm_vm_ioctl_set_device_addr(kvm
, &dev_addr
);
1706 case KVM_ARM_PREFERRED_TARGET
: {
1707 struct kvm_vcpu_init init
= {
1708 .target
= KVM_ARM_TARGET_GENERIC_V8
,
1711 if (copy_to_user(argp
, &init
, sizeof(init
)))
1716 case KVM_ARM_MTE_COPY_TAGS
: {
1717 struct kvm_arm_copy_mte_tags copy_tags
;
1719 if (copy_from_user(©_tags
, argp
, sizeof(copy_tags
)))
1721 return kvm_vm_ioctl_mte_copy_tags(kvm
, ©_tags
);
1723 case KVM_ARM_SET_COUNTER_OFFSET
: {
1724 struct kvm_arm_counter_offset offset
;
1726 if (copy_from_user(&offset
, argp
, sizeof(offset
)))
1728 return kvm_vm_ioctl_set_counter_offset(kvm
, &offset
);
1730 case KVM_HAS_DEVICE_ATTR
: {
1731 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1734 return kvm_vm_has_attr(kvm
, &attr
);
1736 case KVM_SET_DEVICE_ATTR
: {
1737 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1740 return kvm_vm_set_attr(kvm
, &attr
);
1742 case KVM_ARM_GET_REG_WRITABLE_MASKS
: {
1743 struct reg_mask_range range
;
1745 if (copy_from_user(&range
, argp
, sizeof(range
)))
1747 return kvm_vm_ioctl_get_reg_writable_masks(kvm
, &range
);
1754 /* unlocks vcpus from @vcpu_lock_idx and smaller */
1755 static void unlock_vcpus(struct kvm
*kvm
, int vcpu_lock_idx
)
1757 struct kvm_vcpu
*tmp_vcpu
;
1759 for (; vcpu_lock_idx
>= 0; vcpu_lock_idx
--) {
1760 tmp_vcpu
= kvm_get_vcpu(kvm
, vcpu_lock_idx
);
1761 mutex_unlock(&tmp_vcpu
->mutex
);
1765 void unlock_all_vcpus(struct kvm
*kvm
)
1767 lockdep_assert_held(&kvm
->lock
);
1769 unlock_vcpus(kvm
, atomic_read(&kvm
->online_vcpus
) - 1);
1772 /* Returns true if all vcpus were locked, false otherwise */
1773 bool lock_all_vcpus(struct kvm
*kvm
)
1775 struct kvm_vcpu
*tmp_vcpu
;
1778 lockdep_assert_held(&kvm
->lock
);
1781 * Any time a vcpu is in an ioctl (including running), the
1782 * core KVM code tries to grab the vcpu->mutex.
1784 * By grabbing the vcpu->mutex of all VCPUs we ensure that no
1785 * other VCPUs can fiddle with the state while we access it.
1787 kvm_for_each_vcpu(c
, tmp_vcpu
, kvm
) {
1788 if (!mutex_trylock(&tmp_vcpu
->mutex
)) {
1789 unlock_vcpus(kvm
, c
- 1);
1797 static unsigned long nvhe_percpu_size(void)
1799 return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end
) -
1800 (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start
);
1803 static unsigned long nvhe_percpu_order(void)
1805 unsigned long size
= nvhe_percpu_size();
1807 return size
? get_order(size
) : 0;
1810 /* A lookup table holding the hypervisor VA for each vector slot */
1811 static void *hyp_spectre_vector_selector
[BP_HARDEN_EL2_SLOTS
];
1813 static void kvm_init_vector_slot(void *base
, enum arm64_hyp_spectre_vector slot
)
1815 hyp_spectre_vector_selector
[slot
] = __kvm_vector_slot2addr(base
, slot
);
1818 static int kvm_init_vector_slots(void)
1823 base
= kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector
));
1824 kvm_init_vector_slot(base
, HYP_VECTOR_DIRECT
);
1826 base
= kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs
));
1827 kvm_init_vector_slot(base
, HYP_VECTOR_SPECTRE_DIRECT
);
1829 if (kvm_system_needs_idmapped_vectors() &&
1830 !is_protected_kvm_enabled()) {
1831 err
= create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs
),
1832 __BP_HARDEN_HYP_VECS_SZ
, &base
);
1837 kvm_init_vector_slot(base
, HYP_VECTOR_INDIRECT
);
1838 kvm_init_vector_slot(base
, HYP_VECTOR_SPECTRE_INDIRECT
);
1842 static void __init
cpu_prepare_hyp_mode(int cpu
, u32 hyp_va_bits
)
1844 struct kvm_nvhe_init_params
*params
= per_cpu_ptr_nvhe_sym(kvm_init_params
, cpu
);
1845 u64 mmfr0
= read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1
);
1849 * Calculate the raw per-cpu offset without a translation from the
1850 * kernel's mapping to the linear mapping, and store it in tpidr_el2
1851 * so that we can use adr_l to access per-cpu variables in EL2.
1852 * Also drop the KASAN tag which gets in the way...
1854 params
->tpidr_el2
= (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start
, cpu
)) -
1855 (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start
));
1857 params
->mair_el2
= read_sysreg(mair_el1
);
1859 tcr
= read_sysreg(tcr_el1
);
1860 if (cpus_have_final_cap(ARM64_KVM_HVHE
)) {
1861 tcr
|= TCR_EPD1_MASK
;
1863 tcr
&= TCR_EL2_MASK
;
1864 tcr
|= TCR_EL2_RES1
;
1866 tcr
&= ~TCR_T0SZ_MASK
;
1867 tcr
|= TCR_T0SZ(hyp_va_bits
);
1868 tcr
&= ~TCR_EL2_PS_MASK
;
1869 tcr
|= FIELD_PREP(TCR_EL2_PS_MASK
, kvm_get_parange(mmfr0
));
1870 if (kvm_lpa2_is_enabled())
1872 params
->tcr_el2
= tcr
;
1874 params
->pgd_pa
= kvm_mmu_get_httbr();
1875 if (is_protected_kvm_enabled())
1876 params
->hcr_el2
= HCR_HOST_NVHE_PROTECTED_FLAGS
;
1878 params
->hcr_el2
= HCR_HOST_NVHE_FLAGS
;
1879 if (cpus_have_final_cap(ARM64_KVM_HVHE
))
1880 params
->hcr_el2
|= HCR_E2H
;
1881 params
->vttbr
= params
->vtcr
= 0;
1884 * Flush the init params from the data cache because the struct will
1885 * be read while the MMU is off.
1887 kvm_flush_dcache_to_poc(params
, sizeof(*params
));
1890 static void hyp_install_host_vector(void)
1892 struct kvm_nvhe_init_params
*params
;
1893 struct arm_smccc_res res
;
1895 /* Switch from the HYP stub to our own HYP init vector */
1896 __hyp_set_vectors(kvm_get_idmap_vector());
1899 * Call initialization code, and switch to the full blown HYP code.
1900 * If the cpucaps haven't been finalized yet, something has gone very
1901 * wrong, and hyp will crash and burn when it uses any
1902 * cpus_have_*_cap() wrapper.
1904 BUG_ON(!system_capabilities_finalized());
1905 params
= this_cpu_ptr_nvhe_sym(kvm_init_params
);
1906 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init
), virt_to_phys(params
), &res
);
1907 WARN_ON(res
.a0
!= SMCCC_RET_SUCCESS
);
1910 static void cpu_init_hyp_mode(void)
1912 hyp_install_host_vector();
1915 * Disabling SSBD on a non-VHE system requires us to enable SSBS
1918 if (this_cpu_has_cap(ARM64_SSBS
) &&
1919 arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE
) {
1920 kvm_call_hyp_nvhe(__kvm_enable_ssbs
);
1924 static void cpu_hyp_reset(void)
1926 if (!is_kernel_in_hyp_mode())
1927 __hyp_reset_vectors();
1931 * EL2 vectors can be mapped and rerouted in a number of ways,
1932 * depending on the kernel configuration and CPU present:
1934 * - If the CPU is affected by Spectre-v2, the hardening sequence is
1935 * placed in one of the vector slots, which is executed before jumping
1936 * to the real vectors.
1938 * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
1939 * containing the hardening sequence is mapped next to the idmap page,
1940 * and executed before jumping to the real vectors.
1942 * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
1943 * empty slot is selected, mapped next to the idmap page, and
1944 * executed before jumping to the real vectors.
1946 * Note that ARM64_SPECTRE_V3A is somewhat incompatible with
1947 * VHE, as we don't have hypervisor-specific mappings. If the system
1948 * is VHE and yet selects this capability, it will be ignored.
1950 static void cpu_set_hyp_vector(void)
1952 struct bp_hardening_data
*data
= this_cpu_ptr(&bp_hardening_data
);
1953 void *vector
= hyp_spectre_vector_selector
[data
->slot
];
1955 if (!is_protected_kvm_enabled())
1956 *this_cpu_ptr_hyp_sym(kvm_hyp_vector
) = (unsigned long)vector
;
1958 kvm_call_hyp_nvhe(__pkvm_cpu_set_vector
, data
->slot
);
1961 static void cpu_hyp_init_context(void)
1963 kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data
)->host_ctxt
);
1965 if (!is_kernel_in_hyp_mode())
1966 cpu_init_hyp_mode();
1969 static void cpu_hyp_init_features(void)
1971 cpu_set_hyp_vector();
1972 kvm_arm_init_debug();
1974 if (is_kernel_in_hyp_mode())
1975 kvm_timer_init_vhe();
1978 kvm_vgic_init_cpu_hardware();
1981 static void cpu_hyp_reinit(void)
1984 cpu_hyp_init_context();
1985 cpu_hyp_init_features();
1988 static void cpu_hyp_init(void *discard
)
1990 if (!__this_cpu_read(kvm_hyp_initialized
)) {
1992 __this_cpu_write(kvm_hyp_initialized
, 1);
1996 static void cpu_hyp_uninit(void *discard
)
1998 if (__this_cpu_read(kvm_hyp_initialized
)) {
2000 __this_cpu_write(kvm_hyp_initialized
, 0);
2004 int kvm_arch_hardware_enable(void)
2007 * Most calls to this function are made with migration
2008 * disabled, but not with preemption disabled. The former is
2009 * enough to ensure correctness, but most of the helpers
2010 * expect the later and will throw a tantrum otherwise.
2024 void kvm_arch_hardware_disable(void)
2026 kvm_timer_cpu_down();
2027 kvm_vgic_cpu_down();
2029 if (!is_protected_kvm_enabled())
2030 cpu_hyp_uninit(NULL
);
2033 #ifdef CONFIG_CPU_PM
2034 static int hyp_init_cpu_pm_notifier(struct notifier_block
*self
,
2039 * kvm_hyp_initialized is left with its old value over
2040 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
2045 if (__this_cpu_read(kvm_hyp_initialized
))
2047 * don't update kvm_hyp_initialized here
2048 * so that the hyp will be re-enabled
2049 * when we resume. See below.
2054 case CPU_PM_ENTER_FAILED
:
2056 if (__this_cpu_read(kvm_hyp_initialized
))
2057 /* The hyp was enabled before suspend. */
2067 static struct notifier_block hyp_init_cpu_pm_nb
= {
2068 .notifier_call
= hyp_init_cpu_pm_notifier
,
2071 static void __init
hyp_cpu_pm_init(void)
2073 if (!is_protected_kvm_enabled())
2074 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb
);
2076 static void __init
hyp_cpu_pm_exit(void)
2078 if (!is_protected_kvm_enabled())
2079 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb
);
2082 static inline void __init
hyp_cpu_pm_init(void)
2085 static inline void __init
hyp_cpu_pm_exit(void)
2090 static void __init
init_cpu_logical_map(void)
2095 * Copy the MPIDR <-> logical CPU ID mapping to hyp.
2096 * Only copy the set of online CPUs whose features have been checked
2097 * against the finalized system capabilities. The hypervisor will not
2098 * allow any other CPUs from the `possible` set to boot.
2100 for_each_online_cpu(cpu
)
2101 hyp_cpu_logical_map
[cpu
] = cpu_logical_map(cpu
);
2104 #define init_psci_0_1_impl_state(config, what) \
2105 config.psci_0_1_ ## what ## _implemented = psci_ops.what
2107 static bool __init
init_psci_relay(void)
2110 * If PSCI has not been initialized, protected KVM cannot install
2111 * itself on newly booted CPUs.
2113 if (!psci_ops
.get_version
) {
2114 kvm_err("Cannot initialize protected mode without PSCI\n");
2118 kvm_host_psci_config
.version
= psci_ops
.get_version();
2119 kvm_host_psci_config
.smccc_version
= arm_smccc_get_version();
2121 if (kvm_host_psci_config
.version
== PSCI_VERSION(0, 1)) {
2122 kvm_host_psci_config
.function_ids_0_1
= get_psci_0_1_function_ids();
2123 init_psci_0_1_impl_state(kvm_host_psci_config
, cpu_suspend
);
2124 init_psci_0_1_impl_state(kvm_host_psci_config
, cpu_on
);
2125 init_psci_0_1_impl_state(kvm_host_psci_config
, cpu_off
);
2126 init_psci_0_1_impl_state(kvm_host_psci_config
, migrate
);
2131 static int __init
init_subsystems(void)
2136 * Enable hardware so that subsystem initialisation can access EL2.
2138 on_each_cpu(cpu_hyp_init
, NULL
, 1);
2141 * Register CPU lower-power notifier
2146 * Init HYP view of VGIC
2148 err
= kvm_vgic_hyp_init();
2151 vgic_present
= true;
2155 vgic_present
= false;
2163 * Init HYP architected timer support
2165 err
= kvm_timer_hyp_init(vgic_present
);
2169 kvm_register_perf_callbacks(NULL
);
2175 if (err
|| !is_protected_kvm_enabled())
2176 on_each_cpu(cpu_hyp_uninit
, NULL
, 1);
2181 static void __init
teardown_subsystems(void)
2183 kvm_unregister_perf_callbacks();
2187 static void __init
teardown_hyp_mode(void)
2192 for_each_possible_cpu(cpu
) {
2193 free_page(per_cpu(kvm_arm_hyp_stack_page
, cpu
));
2194 free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base
)[cpu
], nvhe_percpu_order());
2198 static int __init
do_pkvm_init(u32 hyp_va_bits
)
2200 void *per_cpu_base
= kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base
));
2204 cpu_hyp_init_context();
2205 ret
= kvm_call_hyp_nvhe(__pkvm_init
, hyp_mem_base
, hyp_mem_size
,
2206 num_possible_cpus(), kern_hyp_va(per_cpu_base
),
2208 cpu_hyp_init_features();
2211 * The stub hypercalls are now disabled, so set our local flag to
2212 * prevent a later re-init attempt in kvm_arch_hardware_enable().
2214 __this_cpu_write(kvm_hyp_initialized
, 1);
2220 static u64
get_hyp_id_aa64pfr0_el1(void)
2223 * Track whether the system isn't affected by spectre/meltdown in the
2224 * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
2225 * Although this is per-CPU, we make it global for simplicity, e.g., not
2226 * to have to worry about vcpu migration.
2228 * Unlike for non-protected VMs, userspace cannot override this for
2231 u64 val
= read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1
);
2233 val
&= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2
) |
2234 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3
));
2236 val
|= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2
),
2237 arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED
);
2238 val
|= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3
),
2239 arm64_get_meltdown_state() == SPECTRE_UNAFFECTED
);
2244 static void kvm_hyp_init_symbols(void)
2246 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val
) = get_hyp_id_aa64pfr0_el1();
2247 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val
) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1
);
2248 kvm_nvhe_sym(id_aa64isar0_el1_sys_val
) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1
);
2249 kvm_nvhe_sym(id_aa64isar1_el1_sys_val
) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1
);
2250 kvm_nvhe_sym(id_aa64isar2_el1_sys_val
) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1
);
2251 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val
) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1
);
2252 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val
) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1
);
2253 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val
) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1
);
2254 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val
) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1
);
2255 kvm_nvhe_sym(__icache_flags
) = __icache_flags
;
2256 kvm_nvhe_sym(kvm_arm_vmid_bits
) = kvm_arm_vmid_bits
;
2259 static int __init
kvm_hyp_init_protection(u32 hyp_va_bits
)
2261 void *addr
= phys_to_virt(hyp_mem_base
);
2264 ret
= create_hyp_mappings(addr
, addr
+ hyp_mem_size
, PAGE_HYP
);
2268 ret
= do_pkvm_init(hyp_va_bits
);
2277 static void pkvm_hyp_init_ptrauth(void)
2279 struct kvm_cpu_context
*hyp_ctxt
;
2282 for_each_possible_cpu(cpu
) {
2283 hyp_ctxt
= per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt
, cpu
);
2284 hyp_ctxt
->sys_regs
[APIAKEYLO_EL1
] = get_random_long();
2285 hyp_ctxt
->sys_regs
[APIAKEYHI_EL1
] = get_random_long();
2286 hyp_ctxt
->sys_regs
[APIBKEYLO_EL1
] = get_random_long();
2287 hyp_ctxt
->sys_regs
[APIBKEYHI_EL1
] = get_random_long();
2288 hyp_ctxt
->sys_regs
[APDAKEYLO_EL1
] = get_random_long();
2289 hyp_ctxt
->sys_regs
[APDAKEYHI_EL1
] = get_random_long();
2290 hyp_ctxt
->sys_regs
[APDBKEYLO_EL1
] = get_random_long();
2291 hyp_ctxt
->sys_regs
[APDBKEYHI_EL1
] = get_random_long();
2292 hyp_ctxt
->sys_regs
[APGAKEYLO_EL1
] = get_random_long();
2293 hyp_ctxt
->sys_regs
[APGAKEYHI_EL1
] = get_random_long();
2297 /* Inits Hyp-mode on all online CPUs */
2298 static int __init
init_hyp_mode(void)
2305 * The protected Hyp-mode cannot be initialized if the memory pool
2306 * allocation has failed.
2308 if (is_protected_kvm_enabled() && !hyp_mem_base
)
2312 * Allocate Hyp PGD and setup Hyp identity mapping
2314 err
= kvm_mmu_init(&hyp_va_bits
);
2319 * Allocate stack pages for Hypervisor-mode
2321 for_each_possible_cpu(cpu
) {
2322 unsigned long stack_page
;
2324 stack_page
= __get_free_page(GFP_KERNEL
);
2330 per_cpu(kvm_arm_hyp_stack_page
, cpu
) = stack_page
;
2334 * Allocate and initialize pages for Hypervisor-mode percpu regions.
2336 for_each_possible_cpu(cpu
) {
2340 page
= alloc_pages(GFP_KERNEL
, nvhe_percpu_order());
2346 page_addr
= page_address(page
);
2347 memcpy(page_addr
, CHOOSE_NVHE_SYM(__per_cpu_start
), nvhe_percpu_size());
2348 kvm_nvhe_sym(kvm_arm_hyp_percpu_base
)[cpu
] = (unsigned long)page_addr
;
2352 * Map the Hyp-code called directly from the host
2354 err
= create_hyp_mappings(kvm_ksym_ref(__hyp_text_start
),
2355 kvm_ksym_ref(__hyp_text_end
), PAGE_HYP_EXEC
);
2357 kvm_err("Cannot map world-switch code\n");
2361 err
= create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start
),
2362 kvm_ksym_ref(__hyp_rodata_end
), PAGE_HYP_RO
);
2364 kvm_err("Cannot map .hyp.rodata section\n");
2368 err
= create_hyp_mappings(kvm_ksym_ref(__start_rodata
),
2369 kvm_ksym_ref(__end_rodata
), PAGE_HYP_RO
);
2371 kvm_err("Cannot map rodata section\n");
2376 * .hyp.bss is guaranteed to be placed at the beginning of the .bss
2377 * section thanks to an assertion in the linker script. Map it RW and
2378 * the rest of .bss RO.
2380 err
= create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start
),
2381 kvm_ksym_ref(__hyp_bss_end
), PAGE_HYP
);
2383 kvm_err("Cannot map hyp bss section: %d\n", err
);
2387 err
= create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end
),
2388 kvm_ksym_ref(__bss_stop
), PAGE_HYP_RO
);
2390 kvm_err("Cannot map bss section\n");
2395 * Map the Hyp stack pages
2397 for_each_possible_cpu(cpu
) {
2398 struct kvm_nvhe_init_params
*params
= per_cpu_ptr_nvhe_sym(kvm_init_params
, cpu
);
2399 char *stack_page
= (char *)per_cpu(kvm_arm_hyp_stack_page
, cpu
);
2401 err
= create_hyp_stack(__pa(stack_page
), ¶ms
->stack_hyp_va
);
2403 kvm_err("Cannot map hyp stack\n");
2408 * Save the stack PA in nvhe_init_params. This will be needed
2409 * to recreate the stack mapping in protected nVHE mode.
2410 * __hyp_pa() won't do the right thing there, since the stack
2411 * has been mapped in the flexible private VA space.
2413 params
->stack_pa
= __pa(stack_page
);
2416 for_each_possible_cpu(cpu
) {
2417 char *percpu_begin
= (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base
)[cpu
];
2418 char *percpu_end
= percpu_begin
+ nvhe_percpu_size();
2420 /* Map Hyp percpu pages */
2421 err
= create_hyp_mappings(percpu_begin
, percpu_end
, PAGE_HYP
);
2423 kvm_err("Cannot map hyp percpu region\n");
2427 /* Prepare the CPU initialization parameters */
2428 cpu_prepare_hyp_mode(cpu
, hyp_va_bits
);
2431 kvm_hyp_init_symbols();
2433 if (is_protected_kvm_enabled()) {
2434 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL
) &&
2435 cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH
))
2436 pkvm_hyp_init_ptrauth();
2438 init_cpu_logical_map();
2440 if (!init_psci_relay()) {
2445 err
= kvm_hyp_init_protection(hyp_va_bits
);
2447 kvm_err("Failed to init hyp memory protection\n");
2455 teardown_hyp_mode();
2456 kvm_err("error initializing Hyp mode: %d\n", err
);
2460 struct kvm_vcpu
*kvm_mpidr_to_vcpu(struct kvm
*kvm
, unsigned long mpidr
)
2462 struct kvm_vcpu
*vcpu
;
2465 mpidr
&= MPIDR_HWID_BITMASK
;
2467 if (kvm
->arch
.mpidr_data
) {
2468 u16 idx
= kvm_mpidr_index(kvm
->arch
.mpidr_data
, mpidr
);
2470 vcpu
= kvm_get_vcpu(kvm
,
2471 kvm
->arch
.mpidr_data
->cmpidr_to_idx
[idx
]);
2472 if (mpidr
!= kvm_vcpu_get_mpidr_aff(vcpu
))
2478 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2479 if (mpidr
== kvm_vcpu_get_mpidr_aff(vcpu
))
2485 bool kvm_arch_irqchip_in_kernel(struct kvm
*kvm
)
2487 return irqchip_in_kernel(kvm
);
2490 bool kvm_arch_has_irq_bypass(void)
2495 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer
*cons
,
2496 struct irq_bypass_producer
*prod
)
2498 struct kvm_kernel_irqfd
*irqfd
=
2499 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
2501 return kvm_vgic_v4_set_forwarding(irqfd
->kvm
, prod
->irq
,
2504 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer
*cons
,
2505 struct irq_bypass_producer
*prod
)
2507 struct kvm_kernel_irqfd
*irqfd
=
2508 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
2510 kvm_vgic_v4_unset_forwarding(irqfd
->kvm
, prod
->irq
,
2514 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer
*cons
)
2516 struct kvm_kernel_irqfd
*irqfd
=
2517 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
2519 kvm_arm_halt_guest(irqfd
->kvm
);
2522 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer
*cons
)
2524 struct kvm_kernel_irqfd
*irqfd
=
2525 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
2527 kvm_arm_resume_guest(irqfd
->kvm
);
2530 /* Initialize Hyp-mode and memory mappings on all CPUs */
2531 static __init
int kvm_arm_init(void)
2536 if (!is_hyp_mode_available()) {
2537 kvm_info("HYP mode not available\n");
2541 if (kvm_get_mode() == KVM_MODE_NONE
) {
2542 kvm_info("KVM disabled from command line\n");
2546 err
= kvm_sys_reg_table_init();
2548 kvm_info("Error initializing system register tables");
2552 in_hyp_mode
= is_kernel_in_hyp_mode();
2554 if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE
) ||
2555 cpus_have_final_cap(ARM64_WORKAROUND_1508412
))
2556 kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
2557 "Only trusted guests should be used on this system.\n");
2559 err
= kvm_set_ipa_limit();
2563 err
= kvm_arm_init_sve();
2567 err
= kvm_arm_vmid_alloc_init();
2569 kvm_err("Failed to initialize VMID allocator.\n");
2574 err
= init_hyp_mode();
2579 err
= kvm_init_vector_slots();
2581 kvm_err("Cannot initialise vector slots\n");
2585 err
= init_subsystems();
2589 if (is_protected_kvm_enabled()) {
2590 kvm_info("Protected nVHE mode initialized successfully\n");
2591 } else if (in_hyp_mode
) {
2592 kvm_info("VHE mode initialized successfully\n");
2594 kvm_info("Hyp mode initialized successfully\n");
2598 * FIXME: Do something reasonable if kvm_init() fails after pKVM
2599 * hypervisor protection is finalized.
2601 err
= kvm_init(sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
2605 kvm_arm_initialised
= true;
2610 teardown_subsystems();
2613 teardown_hyp_mode();
2615 kvm_arm_vmid_alloc_free();
2619 static int __init
early_kvm_mode_cfg(char *arg
)
2624 if (strcmp(arg
, "none") == 0) {
2625 kvm_mode
= KVM_MODE_NONE
;
2629 if (!is_hyp_mode_available()) {
2630 pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n");
2634 if (strcmp(arg
, "protected") == 0) {
2635 if (!is_kernel_in_hyp_mode())
2636 kvm_mode
= KVM_MODE_PROTECTED
;
2638 pr_warn_once("Protected KVM not available with VHE\n");
2643 if (strcmp(arg
, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) {
2644 kvm_mode
= KVM_MODE_DEFAULT
;
2648 if (strcmp(arg
, "nested") == 0 && !WARN_ON(!is_kernel_in_hyp_mode())) {
2649 kvm_mode
= KVM_MODE_NV
;
2655 early_param("kvm-arm.mode", early_kvm_mode_cfg
);
2657 enum kvm_mode
kvm_get_mode(void)
2662 module_init(kvm_arm_init
);