1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/guest.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #include <linux/bits.h>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/nospec.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/stddef.h>
18 #include <linux/string.h>
19 #include <linux/vmalloc.h>
21 #include <kvm/arm_psci.h>
22 #include <asm/cputype.h>
23 #include <linux/uaccess.h>
24 #include <asm/fpsimd.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_coproc.h>
28 #include <asm/sigcontext.h>
32 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
33 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
35 struct kvm_stats_debugfs_item debugfs_entries
[] = {
36 VCPU_STAT(halt_successful_poll
),
37 VCPU_STAT(halt_attempted_poll
),
38 VCPU_STAT(halt_poll_invalid
),
39 VCPU_STAT(halt_wakeup
),
40 VCPU_STAT(hvc_exit_stat
),
41 VCPU_STAT(wfe_exit_stat
),
42 VCPU_STAT(wfi_exit_stat
),
43 VCPU_STAT(mmio_exit_user
),
44 VCPU_STAT(mmio_exit_kernel
),
49 static bool core_reg_offset_is_vreg(u64 off
)
51 return off
>= KVM_REG_ARM_CORE_REG(fp_regs
.vregs
) &&
52 off
< KVM_REG_ARM_CORE_REG(fp_regs
.fpsr
);
55 static u64
core_reg_offset_from_id(u64 id
)
57 return id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
| KVM_REG_ARM_CORE
);
60 static int core_reg_size_from_offset(const struct kvm_vcpu
*vcpu
, u64 off
)
65 case KVM_REG_ARM_CORE_REG(regs
.regs
[0]) ...
66 KVM_REG_ARM_CORE_REG(regs
.regs
[30]):
67 case KVM_REG_ARM_CORE_REG(regs
.sp
):
68 case KVM_REG_ARM_CORE_REG(regs
.pc
):
69 case KVM_REG_ARM_CORE_REG(regs
.pstate
):
70 case KVM_REG_ARM_CORE_REG(sp_el1
):
71 case KVM_REG_ARM_CORE_REG(elr_el1
):
72 case KVM_REG_ARM_CORE_REG(spsr
[0]) ...
73 KVM_REG_ARM_CORE_REG(spsr
[KVM_NR_SPSR
- 1]):
77 case KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[0]) ...
78 KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[31]):
79 size
= sizeof(__uint128_t
);
82 case KVM_REG_ARM_CORE_REG(fp_regs
.fpsr
):
83 case KVM_REG_ARM_CORE_REG(fp_regs
.fpcr
):
91 if (!IS_ALIGNED(off
, size
/ sizeof(__u32
)))
95 * The KVM_REG_ARM64_SVE regs must be used instead of
96 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
99 if (vcpu_has_sve(vcpu
) && core_reg_offset_is_vreg(off
))
105 static int validate_core_offset(const struct kvm_vcpu
*vcpu
,
106 const struct kvm_one_reg
*reg
)
108 u64 off
= core_reg_offset_from_id(reg
->id
);
109 int size
= core_reg_size_from_offset(vcpu
, off
);
114 if (KVM_REG_SIZE(reg
->id
) != size
)
120 static int get_core_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
123 * Because the kvm_regs structure is a mix of 32, 64 and
124 * 128bit fields, we index it as if it was a 32bit
125 * array. Hence below, nr_regs is the number of entries, and
126 * off the index in the "array".
128 __u32 __user
*uaddr
= (__u32 __user
*)(unsigned long)reg
->addr
;
129 struct kvm_regs
*regs
= vcpu_gp_regs(vcpu
);
130 int nr_regs
= sizeof(*regs
) / sizeof(__u32
);
133 /* Our ID is an index into the kvm_regs struct. */
134 off
= core_reg_offset_from_id(reg
->id
);
135 if (off
>= nr_regs
||
136 (off
+ (KVM_REG_SIZE(reg
->id
) / sizeof(__u32
))) >= nr_regs
)
139 if (validate_core_offset(vcpu
, reg
))
142 if (copy_to_user(uaddr
, ((u32
*)regs
) + off
, KVM_REG_SIZE(reg
->id
)))
148 static int set_core_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
150 __u32 __user
*uaddr
= (__u32 __user
*)(unsigned long)reg
->addr
;
151 struct kvm_regs
*regs
= vcpu_gp_regs(vcpu
);
152 int nr_regs
= sizeof(*regs
) / sizeof(__u32
);
158 /* Our ID is an index into the kvm_regs struct. */
159 off
= core_reg_offset_from_id(reg
->id
);
160 if (off
>= nr_regs
||
161 (off
+ (KVM_REG_SIZE(reg
->id
) / sizeof(__u32
))) >= nr_regs
)
164 if (validate_core_offset(vcpu
, reg
))
167 if (KVM_REG_SIZE(reg
->id
) > sizeof(tmp
))
170 if (copy_from_user(valp
, uaddr
, KVM_REG_SIZE(reg
->id
))) {
175 if (off
== KVM_REG_ARM_CORE_REG(regs
.pstate
)) {
176 u64 mode
= (*(u64
*)valp
) & PSR_AA32_MODE_MASK
;
178 case PSR_AA32_MODE_USR
:
179 if (!system_supports_32bit_el0())
182 case PSR_AA32_MODE_FIQ
:
183 case PSR_AA32_MODE_IRQ
:
184 case PSR_AA32_MODE_SVC
:
185 case PSR_AA32_MODE_ABT
:
186 case PSR_AA32_MODE_UND
:
187 if (!vcpu_el1_is_32bit(vcpu
))
193 if (vcpu_el1_is_32bit(vcpu
))
202 memcpy((u32
*)regs
+ off
, valp
, KVM_REG_SIZE(reg
->id
));
204 if (*vcpu_cpsr(vcpu
) & PSR_MODE32_BIT
) {
207 for (i
= 0; i
< 16; i
++)
208 *vcpu_reg32(vcpu
, i
) = (u32
)*vcpu_reg32(vcpu
, i
);
214 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
215 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
216 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
218 static int get_sve_vls(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
220 unsigned int max_vq
, vq
;
221 u64 vqs
[KVM_ARM64_SVE_VLS_WORDS
];
223 if (!vcpu_has_sve(vcpu
))
226 if (WARN_ON(!sve_vl_valid(vcpu
->arch
.sve_max_vl
)))
229 memset(vqs
, 0, sizeof(vqs
));
231 max_vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
232 for (vq
= SVE_VQ_MIN
; vq
<= max_vq
; ++vq
)
233 if (sve_vq_available(vq
))
234 vqs
[vq_word(vq
)] |= vq_mask(vq
);
236 if (copy_to_user((void __user
*)reg
->addr
, vqs
, sizeof(vqs
)))
242 static int set_sve_vls(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
244 unsigned int max_vq
, vq
;
245 u64 vqs
[KVM_ARM64_SVE_VLS_WORDS
];
247 if (!vcpu_has_sve(vcpu
))
250 if (kvm_arm_vcpu_sve_finalized(vcpu
))
251 return -EPERM
; /* too late! */
253 if (WARN_ON(vcpu
->arch
.sve_state
))
256 if (copy_from_user(vqs
, (const void __user
*)reg
->addr
, sizeof(vqs
)))
260 for (vq
= SVE_VQ_MIN
; vq
<= SVE_VQ_MAX
; ++vq
)
261 if (vq_present(vqs
, vq
))
264 if (max_vq
> sve_vq_from_vl(kvm_sve_max_vl
))
268 * Vector lengths supported by the host can't currently be
269 * hidden from the guest individually: instead we can only set a
270 * maxmium via ZCR_EL2.LEN. So, make sure the available vector
271 * lengths match the set requested exactly up to the requested
274 for (vq
= SVE_VQ_MIN
; vq
<= max_vq
; ++vq
)
275 if (vq_present(vqs
, vq
) != sve_vq_available(vq
))
278 /* Can't run with no vector lengths at all: */
279 if (max_vq
< SVE_VQ_MIN
)
282 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
283 vcpu
->arch
.sve_max_vl
= sve_vl_from_vq(max_vq
);
288 #define SVE_REG_SLICE_SHIFT 0
289 #define SVE_REG_SLICE_BITS 5
290 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
291 #define SVE_REG_ID_BITS 5
293 #define SVE_REG_SLICE_MASK \
294 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
296 #define SVE_REG_ID_MASK \
297 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
299 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
301 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
302 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
305 * Number of register slices required to cover each whole SVE register.
306 * NOTE: Only the first slice every exists, for now.
307 * If you are tempted to modify this, you must also rework sve_reg_to_region()
310 #define vcpu_sve_slices(vcpu) 1
312 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
313 struct sve_state_reg_region
{
314 unsigned int koffset
; /* offset into sve_state in kernel memory */
315 unsigned int klen
; /* length in kernel memory */
316 unsigned int upad
; /* extra trailing padding in user memory */
320 * Validate SVE register ID and get sanitised bounds for user/kernel SVE
323 static int sve_reg_to_region(struct sve_state_reg_region
*region
,
324 struct kvm_vcpu
*vcpu
,
325 const struct kvm_one_reg
*reg
)
327 /* reg ID ranges for Z- registers */
328 const u64 zreg_id_min
= KVM_REG_ARM64_SVE_ZREG(0, 0);
329 const u64 zreg_id_max
= KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS
- 1,
332 /* reg ID ranges for P- registers and FFR (which are contiguous) */
333 const u64 preg_id_min
= KVM_REG_ARM64_SVE_PREG(0, 0);
334 const u64 preg_id_max
= KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES
- 1);
337 unsigned int reg_num
;
339 unsigned int reqoffset
, reqlen
; /* User-requested offset and length */
340 unsigned int maxlen
; /* Maxmimum permitted length */
342 size_t sve_state_size
;
344 const u64 last_preg_id
= KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS
- 1,
347 /* Verify that the P-regs and FFR really do have contiguous IDs: */
348 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id
+ 1);
350 /* Verify that we match the UAPI header: */
351 BUILD_BUG_ON(SVE_NUM_SLICES
!= KVM_ARM64_SVE_MAX_SLICES
);
353 reg_num
= (reg
->id
& SVE_REG_ID_MASK
) >> SVE_REG_ID_SHIFT
;
355 if (reg
->id
>= zreg_id_min
&& reg
->id
<= zreg_id_max
) {
356 if (!vcpu_has_sve(vcpu
) || (reg
->id
& SVE_REG_SLICE_MASK
) > 0)
359 vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
361 reqoffset
= SVE_SIG_ZREG_OFFSET(vq
, reg_num
) -
363 reqlen
= KVM_SVE_ZREG_SIZE
;
364 maxlen
= SVE_SIG_ZREG_SIZE(vq
);
365 } else if (reg
->id
>= preg_id_min
&& reg
->id
<= preg_id_max
) {
366 if (!vcpu_has_sve(vcpu
) || (reg
->id
& SVE_REG_SLICE_MASK
) > 0)
369 vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
371 reqoffset
= SVE_SIG_PREG_OFFSET(vq
, reg_num
) -
373 reqlen
= KVM_SVE_PREG_SIZE
;
374 maxlen
= SVE_SIG_PREG_SIZE(vq
);
379 sve_state_size
= vcpu_sve_state_size(vcpu
);
380 if (WARN_ON(!sve_state_size
))
383 region
->koffset
= array_index_nospec(reqoffset
, sve_state_size
);
384 region
->klen
= min(maxlen
, reqlen
);
385 region
->upad
= reqlen
- region
->klen
;
390 static int get_sve_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
393 struct sve_state_reg_region region
;
394 char __user
*uptr
= (char __user
*)reg
->addr
;
396 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
397 if (reg
->id
== KVM_REG_ARM64_SVE_VLS
)
398 return get_sve_vls(vcpu
, reg
);
400 /* Try to interpret reg ID as an architectural SVE register... */
401 ret
= sve_reg_to_region(®ion
, vcpu
, reg
);
405 if (!kvm_arm_vcpu_sve_finalized(vcpu
))
408 if (copy_to_user(uptr
, vcpu
->arch
.sve_state
+ region
.koffset
,
410 clear_user(uptr
+ region
.klen
, region
.upad
))
416 static int set_sve_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
419 struct sve_state_reg_region region
;
420 const char __user
*uptr
= (const char __user
*)reg
->addr
;
422 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
423 if (reg
->id
== KVM_REG_ARM64_SVE_VLS
)
424 return set_sve_vls(vcpu
, reg
);
426 /* Try to interpret reg ID as an architectural SVE register... */
427 ret
= sve_reg_to_region(®ion
, vcpu
, reg
);
431 if (!kvm_arm_vcpu_sve_finalized(vcpu
))
434 if (copy_from_user(vcpu
->arch
.sve_state
+ region
.koffset
, uptr
,
441 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
446 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
451 static int copy_core_reg_indices(const struct kvm_vcpu
*vcpu
,
452 u64 __user
*uindices
)
457 for (i
= 0; i
< sizeof(struct kvm_regs
) / sizeof(__u32
); i
++) {
458 u64 reg
= KVM_REG_ARM64
| KVM_REG_ARM_CORE
| i
;
459 int size
= core_reg_size_from_offset(vcpu
, i
);
466 reg
|= KVM_REG_SIZE_U32
;
470 reg
|= KVM_REG_SIZE_U64
;
473 case sizeof(__uint128_t
):
474 reg
|= KVM_REG_SIZE_U128
;
483 if (put_user(reg
, uindices
))
494 static unsigned long num_core_regs(const struct kvm_vcpu
*vcpu
)
496 return copy_core_reg_indices(vcpu
, NULL
);
500 * ARM64 versions of the TIMER registers, always available on arm64
503 #define NUM_TIMER_REGS 3
505 static bool is_timer_reg(u64 index
)
508 case KVM_REG_ARM_TIMER_CTL
:
509 case KVM_REG_ARM_TIMER_CNT
:
510 case KVM_REG_ARM_TIMER_CVAL
:
516 static int copy_timer_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
518 if (put_user(KVM_REG_ARM_TIMER_CTL
, uindices
))
521 if (put_user(KVM_REG_ARM_TIMER_CNT
, uindices
))
524 if (put_user(KVM_REG_ARM_TIMER_CVAL
, uindices
))
530 static int set_timer_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
532 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
536 ret
= copy_from_user(&val
, uaddr
, KVM_REG_SIZE(reg
->id
));
540 return kvm_arm_timer_set_reg(vcpu
, reg
->id
, val
);
543 static int get_timer_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
545 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
548 val
= kvm_arm_timer_get_reg(vcpu
, reg
->id
);
549 return copy_to_user(uaddr
, &val
, KVM_REG_SIZE(reg
->id
)) ? -EFAULT
: 0;
552 static unsigned long num_sve_regs(const struct kvm_vcpu
*vcpu
)
554 const unsigned int slices
= vcpu_sve_slices(vcpu
);
556 if (!vcpu_has_sve(vcpu
))
559 /* Policed by KVM_GET_REG_LIST: */
560 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu
));
562 return slices
* (SVE_NUM_PREGS
+ SVE_NUM_ZREGS
+ 1 /* FFR */)
563 + 1; /* KVM_REG_ARM64_SVE_VLS */
566 static int copy_sve_reg_indices(const struct kvm_vcpu
*vcpu
,
567 u64 __user
*uindices
)
569 const unsigned int slices
= vcpu_sve_slices(vcpu
);
574 if (!vcpu_has_sve(vcpu
))
577 /* Policed by KVM_GET_REG_LIST: */
578 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu
));
581 * Enumerate this first, so that userspace can save/restore in
582 * the order reported by KVM_GET_REG_LIST:
584 reg
= KVM_REG_ARM64_SVE_VLS
;
585 if (put_user(reg
, uindices
++))
589 for (i
= 0; i
< slices
; i
++) {
590 for (n
= 0; n
< SVE_NUM_ZREGS
; n
++) {
591 reg
= KVM_REG_ARM64_SVE_ZREG(n
, i
);
592 if (put_user(reg
, uindices
++))
597 for (n
= 0; n
< SVE_NUM_PREGS
; n
++) {
598 reg
= KVM_REG_ARM64_SVE_PREG(n
, i
);
599 if (put_user(reg
, uindices
++))
604 reg
= KVM_REG_ARM64_SVE_FFR(i
);
605 if (put_user(reg
, uindices
++))
614 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
616 * This is for all registers.
618 unsigned long kvm_arm_num_regs(struct kvm_vcpu
*vcpu
)
620 unsigned long res
= 0;
622 res
+= num_core_regs(vcpu
);
623 res
+= num_sve_regs(vcpu
);
624 res
+= kvm_arm_num_sys_reg_descs(vcpu
);
625 res
+= kvm_arm_get_fw_num_regs(vcpu
);
626 res
+= NUM_TIMER_REGS
;
632 * kvm_arm_copy_reg_indices - get indices of all registers.
634 * We do core registers right here, then we append system regs.
636 int kvm_arm_copy_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
640 ret
= copy_core_reg_indices(vcpu
, uindices
);
645 ret
= copy_sve_reg_indices(vcpu
, uindices
);
650 ret
= kvm_arm_copy_fw_reg_indices(vcpu
, uindices
);
653 uindices
+= kvm_arm_get_fw_num_regs(vcpu
);
655 ret
= copy_timer_indices(vcpu
, uindices
);
658 uindices
+= NUM_TIMER_REGS
;
660 return kvm_arm_copy_sys_reg_indices(vcpu
, uindices
);
663 int kvm_arm_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
665 /* We currently use nothing arch-specific in upper 32 bits */
666 if ((reg
->id
& ~KVM_REG_SIZE_MASK
) >> 32 != KVM_REG_ARM64
>> 32)
669 switch (reg
->id
& KVM_REG_ARM_COPROC_MASK
) {
670 case KVM_REG_ARM_CORE
: return get_core_reg(vcpu
, reg
);
671 case KVM_REG_ARM_FW
: return kvm_arm_get_fw_reg(vcpu
, reg
);
672 case KVM_REG_ARM64_SVE
: return get_sve_reg(vcpu
, reg
);
675 if (is_timer_reg(reg
->id
))
676 return get_timer_reg(vcpu
, reg
);
678 return kvm_arm_sys_reg_get_reg(vcpu
, reg
);
681 int kvm_arm_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
683 /* We currently use nothing arch-specific in upper 32 bits */
684 if ((reg
->id
& ~KVM_REG_SIZE_MASK
) >> 32 != KVM_REG_ARM64
>> 32)
687 switch (reg
->id
& KVM_REG_ARM_COPROC_MASK
) {
688 case KVM_REG_ARM_CORE
: return set_core_reg(vcpu
, reg
);
689 case KVM_REG_ARM_FW
: return kvm_arm_set_fw_reg(vcpu
, reg
);
690 case KVM_REG_ARM64_SVE
: return set_sve_reg(vcpu
, reg
);
693 if (is_timer_reg(reg
->id
))
694 return set_timer_reg(vcpu
, reg
);
696 return kvm_arm_sys_reg_set_reg(vcpu
, reg
);
699 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
700 struct kvm_sregs
*sregs
)
705 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
706 struct kvm_sregs
*sregs
)
711 int __kvm_arm_vcpu_get_events(struct kvm_vcpu
*vcpu
,
712 struct kvm_vcpu_events
*events
)
714 events
->exception
.serror_pending
= !!(vcpu
->arch
.hcr_el2
& HCR_VSE
);
715 events
->exception
.serror_has_esr
= cpus_have_const_cap(ARM64_HAS_RAS_EXTN
);
717 if (events
->exception
.serror_pending
&& events
->exception
.serror_has_esr
)
718 events
->exception
.serror_esr
= vcpu_get_vsesr(vcpu
);
721 * We never return a pending ext_dabt here because we deliver it to
722 * the virtual CPU directly when setting the event and it's no longer
723 * 'pending' at this point.
729 int __kvm_arm_vcpu_set_events(struct kvm_vcpu
*vcpu
,
730 struct kvm_vcpu_events
*events
)
732 bool serror_pending
= events
->exception
.serror_pending
;
733 bool has_esr
= events
->exception
.serror_has_esr
;
734 bool ext_dabt_pending
= events
->exception
.ext_dabt_pending
;
736 if (serror_pending
&& has_esr
) {
737 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN
))
740 if (!((events
->exception
.serror_esr
) & ~ESR_ELx_ISS_MASK
))
741 kvm_set_sei_esr(vcpu
, events
->exception
.serror_esr
);
744 } else if (serror_pending
) {
745 kvm_inject_vabt(vcpu
);
748 if (ext_dabt_pending
)
749 kvm_inject_dabt(vcpu
, kvm_vcpu_get_hfar(vcpu
));
754 int __attribute_const__
kvm_target_cpu(void)
756 unsigned long implementor
= read_cpuid_implementor();
757 unsigned long part_number
= read_cpuid_part_number();
759 switch (implementor
) {
760 case ARM_CPU_IMP_ARM
:
761 switch (part_number
) {
762 case ARM_CPU_PART_AEM_V8
:
763 return KVM_ARM_TARGET_AEM_V8
;
764 case ARM_CPU_PART_FOUNDATION
:
765 return KVM_ARM_TARGET_FOUNDATION_V8
;
766 case ARM_CPU_PART_CORTEX_A53
:
767 return KVM_ARM_TARGET_CORTEX_A53
;
768 case ARM_CPU_PART_CORTEX_A57
:
769 return KVM_ARM_TARGET_CORTEX_A57
;
772 case ARM_CPU_IMP_APM
:
773 switch (part_number
) {
774 case APM_CPU_PART_POTENZA
:
775 return KVM_ARM_TARGET_XGENE_POTENZA
;
780 /* Return a default generic target */
781 return KVM_ARM_TARGET_GENERIC_V8
;
784 int kvm_vcpu_preferred_target(struct kvm_vcpu_init
*init
)
786 int target
= kvm_target_cpu();
791 memset(init
, 0, sizeof(*init
));
794 * For now, we don't return any features.
795 * In future, we might use features to return target
796 * specific features available for the preferred
799 init
->target
= (__u32
)target
;
804 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
809 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
814 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
815 struct kvm_translation
*tr
)
820 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
821 KVM_GUESTDBG_USE_SW_BP | \
822 KVM_GUESTDBG_USE_HW | \
823 KVM_GUESTDBG_SINGLESTEP)
826 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
827 * @kvm: pointer to the KVM struct
828 * @kvm_guest_debug: the ioctl data buffer
830 * This sets up and enables the VM for guest debugging. Userspace
831 * passes in a control flag to enable different debug types and
832 * potentially other architecture specific information in the rest of
835 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
836 struct kvm_guest_debug
*dbg
)
840 trace_kvm_set_guest_debug(vcpu
, dbg
->control
);
842 if (dbg
->control
& ~KVM_GUESTDBG_VALID_MASK
) {
847 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
848 vcpu
->guest_debug
= dbg
->control
;
850 /* Hardware assisted Break and Watch points */
851 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW
) {
852 vcpu
->arch
.external_debug_state
= dbg
->arch
;
856 /* If not enabled clear all flags */
857 vcpu
->guest_debug
= 0;
864 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu
*vcpu
,
865 struct kvm_device_attr
*attr
)
869 switch (attr
->group
) {
870 case KVM_ARM_VCPU_PMU_V3_CTRL
:
871 ret
= kvm_arm_pmu_v3_set_attr(vcpu
, attr
);
873 case KVM_ARM_VCPU_TIMER_CTRL
:
874 ret
= kvm_arm_timer_set_attr(vcpu
, attr
);
876 case KVM_ARM_VCPU_PVTIME_CTRL
:
877 ret
= kvm_arm_pvtime_set_attr(vcpu
, attr
);
887 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu
*vcpu
,
888 struct kvm_device_attr
*attr
)
892 switch (attr
->group
) {
893 case KVM_ARM_VCPU_PMU_V3_CTRL
:
894 ret
= kvm_arm_pmu_v3_get_attr(vcpu
, attr
);
896 case KVM_ARM_VCPU_TIMER_CTRL
:
897 ret
= kvm_arm_timer_get_attr(vcpu
, attr
);
899 case KVM_ARM_VCPU_PVTIME_CTRL
:
900 ret
= kvm_arm_pvtime_get_attr(vcpu
, attr
);
910 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu
*vcpu
,
911 struct kvm_device_attr
*attr
)
915 switch (attr
->group
) {
916 case KVM_ARM_VCPU_PMU_V3_CTRL
:
917 ret
= kvm_arm_pmu_v3_has_attr(vcpu
, attr
);
919 case KVM_ARM_VCPU_TIMER_CTRL
:
920 ret
= kvm_arm_timer_has_attr(vcpu
, attr
);
922 case KVM_ARM_VCPU_PVTIME_CTRL
:
923 ret
= kvm_arm_pvtime_has_attr(vcpu
, attr
);