1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/guest.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #include <linux/bits.h>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/nospec.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/stddef.h>
18 #include <linux/string.h>
19 #include <linux/vmalloc.h>
21 #include <kvm/arm_psci.h>
22 #include <asm/cputype.h>
23 #include <linux/uaccess.h>
24 #include <asm/fpsimd.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_coproc.h>
28 #include <asm/sigcontext.h>
32 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
33 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
35 struct kvm_stats_debugfs_item debugfs_entries
[] = {
36 VCPU_STAT(halt_successful_poll
),
37 VCPU_STAT(halt_attempted_poll
),
38 VCPU_STAT(halt_poll_invalid
),
39 VCPU_STAT(halt_wakeup
),
40 VCPU_STAT(hvc_exit_stat
),
41 VCPU_STAT(wfe_exit_stat
),
42 VCPU_STAT(wfi_exit_stat
),
43 VCPU_STAT(mmio_exit_user
),
44 VCPU_STAT(mmio_exit_kernel
),
49 static bool core_reg_offset_is_vreg(u64 off
)
51 return off
>= KVM_REG_ARM_CORE_REG(fp_regs
.vregs
) &&
52 off
< KVM_REG_ARM_CORE_REG(fp_regs
.fpsr
);
55 static u64
core_reg_offset_from_id(u64 id
)
57 return id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
| KVM_REG_ARM_CORE
);
60 static int core_reg_size_from_offset(const struct kvm_vcpu
*vcpu
, u64 off
)
65 case KVM_REG_ARM_CORE_REG(regs
.regs
[0]) ...
66 KVM_REG_ARM_CORE_REG(regs
.regs
[30]):
67 case KVM_REG_ARM_CORE_REG(regs
.sp
):
68 case KVM_REG_ARM_CORE_REG(regs
.pc
):
69 case KVM_REG_ARM_CORE_REG(regs
.pstate
):
70 case KVM_REG_ARM_CORE_REG(sp_el1
):
71 case KVM_REG_ARM_CORE_REG(elr_el1
):
72 case KVM_REG_ARM_CORE_REG(spsr
[0]) ...
73 KVM_REG_ARM_CORE_REG(spsr
[KVM_NR_SPSR
- 1]):
77 case KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[0]) ...
78 KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[31]):
79 size
= sizeof(__uint128_t
);
82 case KVM_REG_ARM_CORE_REG(fp_regs
.fpsr
):
83 case KVM_REG_ARM_CORE_REG(fp_regs
.fpcr
):
91 if (!IS_ALIGNED(off
, size
/ sizeof(__u32
)))
95 * The KVM_REG_ARM64_SVE regs must be used instead of
96 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
99 if (vcpu_has_sve(vcpu
) && core_reg_offset_is_vreg(off
))
105 static int validate_core_offset(const struct kvm_vcpu
*vcpu
,
106 const struct kvm_one_reg
*reg
)
108 u64 off
= core_reg_offset_from_id(reg
->id
);
109 int size
= core_reg_size_from_offset(vcpu
, off
);
114 if (KVM_REG_SIZE(reg
->id
) != size
)
120 static int get_core_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
123 * Because the kvm_regs structure is a mix of 32, 64 and
124 * 128bit fields, we index it as if it was a 32bit
125 * array. Hence below, nr_regs is the number of entries, and
126 * off the index in the "array".
128 __u32 __user
*uaddr
= (__u32 __user
*)(unsigned long)reg
->addr
;
129 struct kvm_regs
*regs
= vcpu_gp_regs(vcpu
);
130 int nr_regs
= sizeof(*regs
) / sizeof(__u32
);
133 /* Our ID is an index into the kvm_regs struct. */
134 off
= core_reg_offset_from_id(reg
->id
);
135 if (off
>= nr_regs
||
136 (off
+ (KVM_REG_SIZE(reg
->id
) / sizeof(__u32
))) >= nr_regs
)
139 if (validate_core_offset(vcpu
, reg
))
142 if (copy_to_user(uaddr
, ((u32
*)regs
) + off
, KVM_REG_SIZE(reg
->id
)))
148 static int set_core_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
150 __u32 __user
*uaddr
= (__u32 __user
*)(unsigned long)reg
->addr
;
151 struct kvm_regs
*regs
= vcpu_gp_regs(vcpu
);
152 int nr_regs
= sizeof(*regs
) / sizeof(__u32
);
158 /* Our ID is an index into the kvm_regs struct. */
159 off
= core_reg_offset_from_id(reg
->id
);
160 if (off
>= nr_regs
||
161 (off
+ (KVM_REG_SIZE(reg
->id
) / sizeof(__u32
))) >= nr_regs
)
164 if (validate_core_offset(vcpu
, reg
))
167 if (KVM_REG_SIZE(reg
->id
) > sizeof(tmp
))
170 if (copy_from_user(valp
, uaddr
, KVM_REG_SIZE(reg
->id
))) {
175 if (off
== KVM_REG_ARM_CORE_REG(regs
.pstate
)) {
176 u64 mode
= (*(u64
*)valp
) & PSR_AA32_MODE_MASK
;
178 case PSR_AA32_MODE_USR
:
179 if (!system_supports_32bit_el0())
182 case PSR_AA32_MODE_FIQ
:
183 case PSR_AA32_MODE_IRQ
:
184 case PSR_AA32_MODE_SVC
:
185 case PSR_AA32_MODE_ABT
:
186 case PSR_AA32_MODE_UND
:
187 if (!vcpu_el1_is_32bit(vcpu
))
193 if (vcpu_el1_is_32bit(vcpu
))
202 memcpy((u32
*)regs
+ off
, valp
, KVM_REG_SIZE(reg
->id
));
207 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
208 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
209 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
211 static int get_sve_vls(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
213 unsigned int max_vq
, vq
;
214 u64 vqs
[KVM_ARM64_SVE_VLS_WORDS
];
216 if (!vcpu_has_sve(vcpu
))
219 if (WARN_ON(!sve_vl_valid(vcpu
->arch
.sve_max_vl
)))
222 memset(vqs
, 0, sizeof(vqs
));
224 max_vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
225 for (vq
= SVE_VQ_MIN
; vq
<= max_vq
; ++vq
)
226 if (sve_vq_available(vq
))
227 vqs
[vq_word(vq
)] |= vq_mask(vq
);
229 if (copy_to_user((void __user
*)reg
->addr
, vqs
, sizeof(vqs
)))
235 static int set_sve_vls(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
237 unsigned int max_vq
, vq
;
238 u64 vqs
[KVM_ARM64_SVE_VLS_WORDS
];
240 if (!vcpu_has_sve(vcpu
))
243 if (kvm_arm_vcpu_sve_finalized(vcpu
))
244 return -EPERM
; /* too late! */
246 if (WARN_ON(vcpu
->arch
.sve_state
))
249 if (copy_from_user(vqs
, (const void __user
*)reg
->addr
, sizeof(vqs
)))
253 for (vq
= SVE_VQ_MIN
; vq
<= SVE_VQ_MAX
; ++vq
)
254 if (vq_present(vqs
, vq
))
257 if (max_vq
> sve_vq_from_vl(kvm_sve_max_vl
))
261 * Vector lengths supported by the host can't currently be
262 * hidden from the guest individually: instead we can only set a
263 * maxmium via ZCR_EL2.LEN. So, make sure the available vector
264 * lengths match the set requested exactly up to the requested
267 for (vq
= SVE_VQ_MIN
; vq
<= max_vq
; ++vq
)
268 if (vq_present(vqs
, vq
) != sve_vq_available(vq
))
271 /* Can't run with no vector lengths at all: */
272 if (max_vq
< SVE_VQ_MIN
)
275 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
276 vcpu
->arch
.sve_max_vl
= sve_vl_from_vq(max_vq
);
281 #define SVE_REG_SLICE_SHIFT 0
282 #define SVE_REG_SLICE_BITS 5
283 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
284 #define SVE_REG_ID_BITS 5
286 #define SVE_REG_SLICE_MASK \
287 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
289 #define SVE_REG_ID_MASK \
290 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
292 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
294 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
295 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
298 * Number of register slices required to cover each whole SVE register.
299 * NOTE: Only the first slice every exists, for now.
300 * If you are tempted to modify this, you must also rework sve_reg_to_region()
303 #define vcpu_sve_slices(vcpu) 1
305 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
306 struct sve_state_reg_region
{
307 unsigned int koffset
; /* offset into sve_state in kernel memory */
308 unsigned int klen
; /* length in kernel memory */
309 unsigned int upad
; /* extra trailing padding in user memory */
313 * Validate SVE register ID and get sanitised bounds for user/kernel SVE
316 static int sve_reg_to_region(struct sve_state_reg_region
*region
,
317 struct kvm_vcpu
*vcpu
,
318 const struct kvm_one_reg
*reg
)
320 /* reg ID ranges for Z- registers */
321 const u64 zreg_id_min
= KVM_REG_ARM64_SVE_ZREG(0, 0);
322 const u64 zreg_id_max
= KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS
- 1,
325 /* reg ID ranges for P- registers and FFR (which are contiguous) */
326 const u64 preg_id_min
= KVM_REG_ARM64_SVE_PREG(0, 0);
327 const u64 preg_id_max
= KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES
- 1);
330 unsigned int reg_num
;
332 unsigned int reqoffset
, reqlen
; /* User-requested offset and length */
333 unsigned int maxlen
; /* Maxmimum permitted length */
335 size_t sve_state_size
;
337 const u64 last_preg_id
= KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS
- 1,
340 /* Verify that the P-regs and FFR really do have contiguous IDs: */
341 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id
+ 1);
343 /* Verify that we match the UAPI header: */
344 BUILD_BUG_ON(SVE_NUM_SLICES
!= KVM_ARM64_SVE_MAX_SLICES
);
346 reg_num
= (reg
->id
& SVE_REG_ID_MASK
) >> SVE_REG_ID_SHIFT
;
348 if (reg
->id
>= zreg_id_min
&& reg
->id
<= zreg_id_max
) {
349 if (!vcpu_has_sve(vcpu
) || (reg
->id
& SVE_REG_SLICE_MASK
) > 0)
352 vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
354 reqoffset
= SVE_SIG_ZREG_OFFSET(vq
, reg_num
) -
356 reqlen
= KVM_SVE_ZREG_SIZE
;
357 maxlen
= SVE_SIG_ZREG_SIZE(vq
);
358 } else if (reg
->id
>= preg_id_min
&& reg
->id
<= preg_id_max
) {
359 if (!vcpu_has_sve(vcpu
) || (reg
->id
& SVE_REG_SLICE_MASK
) > 0)
362 vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
364 reqoffset
= SVE_SIG_PREG_OFFSET(vq
, reg_num
) -
366 reqlen
= KVM_SVE_PREG_SIZE
;
367 maxlen
= SVE_SIG_PREG_SIZE(vq
);
372 sve_state_size
= vcpu_sve_state_size(vcpu
);
373 if (WARN_ON(!sve_state_size
))
376 region
->koffset
= array_index_nospec(reqoffset
, sve_state_size
);
377 region
->klen
= min(maxlen
, reqlen
);
378 region
->upad
= reqlen
- region
->klen
;
383 static int get_sve_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
386 struct sve_state_reg_region region
;
387 char __user
*uptr
= (char __user
*)reg
->addr
;
389 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
390 if (reg
->id
== KVM_REG_ARM64_SVE_VLS
)
391 return get_sve_vls(vcpu
, reg
);
393 /* Try to interpret reg ID as an architectural SVE register... */
394 ret
= sve_reg_to_region(®ion
, vcpu
, reg
);
398 if (!kvm_arm_vcpu_sve_finalized(vcpu
))
401 if (copy_to_user(uptr
, vcpu
->arch
.sve_state
+ region
.koffset
,
403 clear_user(uptr
+ region
.klen
, region
.upad
))
409 static int set_sve_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
412 struct sve_state_reg_region region
;
413 const char __user
*uptr
= (const char __user
*)reg
->addr
;
415 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
416 if (reg
->id
== KVM_REG_ARM64_SVE_VLS
)
417 return set_sve_vls(vcpu
, reg
);
419 /* Try to interpret reg ID as an architectural SVE register... */
420 ret
= sve_reg_to_region(®ion
, vcpu
, reg
);
424 if (!kvm_arm_vcpu_sve_finalized(vcpu
))
427 if (copy_from_user(vcpu
->arch
.sve_state
+ region
.koffset
, uptr
,
434 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
439 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
444 static int copy_core_reg_indices(const struct kvm_vcpu
*vcpu
,
445 u64 __user
*uindices
)
450 for (i
= 0; i
< sizeof(struct kvm_regs
) / sizeof(__u32
); i
++) {
451 u64 reg
= KVM_REG_ARM64
| KVM_REG_ARM_CORE
| i
;
452 int size
= core_reg_size_from_offset(vcpu
, i
);
459 reg
|= KVM_REG_SIZE_U32
;
463 reg
|= KVM_REG_SIZE_U64
;
466 case sizeof(__uint128_t
):
467 reg
|= KVM_REG_SIZE_U128
;
476 if (put_user(reg
, uindices
))
487 static unsigned long num_core_regs(const struct kvm_vcpu
*vcpu
)
489 return copy_core_reg_indices(vcpu
, NULL
);
493 * ARM64 versions of the TIMER registers, always available on arm64
496 #define NUM_TIMER_REGS 3
498 static bool is_timer_reg(u64 index
)
501 case KVM_REG_ARM_TIMER_CTL
:
502 case KVM_REG_ARM_TIMER_CNT
:
503 case KVM_REG_ARM_TIMER_CVAL
:
509 static int copy_timer_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
511 if (put_user(KVM_REG_ARM_TIMER_CTL
, uindices
))
514 if (put_user(KVM_REG_ARM_TIMER_CNT
, uindices
))
517 if (put_user(KVM_REG_ARM_TIMER_CVAL
, uindices
))
523 static int set_timer_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
525 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
529 ret
= copy_from_user(&val
, uaddr
, KVM_REG_SIZE(reg
->id
));
533 return kvm_arm_timer_set_reg(vcpu
, reg
->id
, val
);
536 static int get_timer_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
538 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
541 val
= kvm_arm_timer_get_reg(vcpu
, reg
->id
);
542 return copy_to_user(uaddr
, &val
, KVM_REG_SIZE(reg
->id
)) ? -EFAULT
: 0;
545 static unsigned long num_sve_regs(const struct kvm_vcpu
*vcpu
)
547 const unsigned int slices
= vcpu_sve_slices(vcpu
);
549 if (!vcpu_has_sve(vcpu
))
552 /* Policed by KVM_GET_REG_LIST: */
553 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu
));
555 return slices
* (SVE_NUM_PREGS
+ SVE_NUM_ZREGS
+ 1 /* FFR */)
556 + 1; /* KVM_REG_ARM64_SVE_VLS */
559 static int copy_sve_reg_indices(const struct kvm_vcpu
*vcpu
,
560 u64 __user
*uindices
)
562 const unsigned int slices
= vcpu_sve_slices(vcpu
);
567 if (!vcpu_has_sve(vcpu
))
570 /* Policed by KVM_GET_REG_LIST: */
571 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu
));
574 * Enumerate this first, so that userspace can save/restore in
575 * the order reported by KVM_GET_REG_LIST:
577 reg
= KVM_REG_ARM64_SVE_VLS
;
578 if (put_user(reg
, uindices
++))
582 for (i
= 0; i
< slices
; i
++) {
583 for (n
= 0; n
< SVE_NUM_ZREGS
; n
++) {
584 reg
= KVM_REG_ARM64_SVE_ZREG(n
, i
);
585 if (put_user(reg
, uindices
++))
590 for (n
= 0; n
< SVE_NUM_PREGS
; n
++) {
591 reg
= KVM_REG_ARM64_SVE_PREG(n
, i
);
592 if (put_user(reg
, uindices
++))
597 reg
= KVM_REG_ARM64_SVE_FFR(i
);
598 if (put_user(reg
, uindices
++))
607 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
609 * This is for all registers.
611 unsigned long kvm_arm_num_regs(struct kvm_vcpu
*vcpu
)
613 unsigned long res
= 0;
615 res
+= num_core_regs(vcpu
);
616 res
+= num_sve_regs(vcpu
);
617 res
+= kvm_arm_num_sys_reg_descs(vcpu
);
618 res
+= kvm_arm_get_fw_num_regs(vcpu
);
619 res
+= NUM_TIMER_REGS
;
625 * kvm_arm_copy_reg_indices - get indices of all registers.
627 * We do core registers right here, then we append system regs.
629 int kvm_arm_copy_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
633 ret
= copy_core_reg_indices(vcpu
, uindices
);
638 ret
= copy_sve_reg_indices(vcpu
, uindices
);
643 ret
= kvm_arm_copy_fw_reg_indices(vcpu
, uindices
);
646 uindices
+= kvm_arm_get_fw_num_regs(vcpu
);
648 ret
= copy_timer_indices(vcpu
, uindices
);
651 uindices
+= NUM_TIMER_REGS
;
653 return kvm_arm_copy_sys_reg_indices(vcpu
, uindices
);
656 int kvm_arm_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
658 /* We currently use nothing arch-specific in upper 32 bits */
659 if ((reg
->id
& ~KVM_REG_SIZE_MASK
) >> 32 != KVM_REG_ARM64
>> 32)
662 switch (reg
->id
& KVM_REG_ARM_COPROC_MASK
) {
663 case KVM_REG_ARM_CORE
: return get_core_reg(vcpu
, reg
);
664 case KVM_REG_ARM_FW
: return kvm_arm_get_fw_reg(vcpu
, reg
);
665 case KVM_REG_ARM64_SVE
: return get_sve_reg(vcpu
, reg
);
668 if (is_timer_reg(reg
->id
))
669 return get_timer_reg(vcpu
, reg
);
671 return kvm_arm_sys_reg_get_reg(vcpu
, reg
);
674 int kvm_arm_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
676 /* We currently use nothing arch-specific in upper 32 bits */
677 if ((reg
->id
& ~KVM_REG_SIZE_MASK
) >> 32 != KVM_REG_ARM64
>> 32)
680 switch (reg
->id
& KVM_REG_ARM_COPROC_MASK
) {
681 case KVM_REG_ARM_CORE
: return set_core_reg(vcpu
, reg
);
682 case KVM_REG_ARM_FW
: return kvm_arm_set_fw_reg(vcpu
, reg
);
683 case KVM_REG_ARM64_SVE
: return set_sve_reg(vcpu
, reg
);
686 if (is_timer_reg(reg
->id
))
687 return set_timer_reg(vcpu
, reg
);
689 return kvm_arm_sys_reg_set_reg(vcpu
, reg
);
692 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
693 struct kvm_sregs
*sregs
)
698 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
699 struct kvm_sregs
*sregs
)
704 int __kvm_arm_vcpu_get_events(struct kvm_vcpu
*vcpu
,
705 struct kvm_vcpu_events
*events
)
707 events
->exception
.serror_pending
= !!(vcpu
->arch
.hcr_el2
& HCR_VSE
);
708 events
->exception
.serror_has_esr
= cpus_have_const_cap(ARM64_HAS_RAS_EXTN
);
710 if (events
->exception
.serror_pending
&& events
->exception
.serror_has_esr
)
711 events
->exception
.serror_esr
= vcpu_get_vsesr(vcpu
);
714 * We never return a pending ext_dabt here because we deliver it to
715 * the virtual CPU directly when setting the event and it's no longer
716 * 'pending' at this point.
722 int __kvm_arm_vcpu_set_events(struct kvm_vcpu
*vcpu
,
723 struct kvm_vcpu_events
*events
)
725 bool serror_pending
= events
->exception
.serror_pending
;
726 bool has_esr
= events
->exception
.serror_has_esr
;
727 bool ext_dabt_pending
= events
->exception
.ext_dabt_pending
;
729 if (serror_pending
&& has_esr
) {
730 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN
))
733 if (!((events
->exception
.serror_esr
) & ~ESR_ELx_ISS_MASK
))
734 kvm_set_sei_esr(vcpu
, events
->exception
.serror_esr
);
737 } else if (serror_pending
) {
738 kvm_inject_vabt(vcpu
);
741 if (ext_dabt_pending
)
742 kvm_inject_dabt(vcpu
, kvm_vcpu_get_hfar(vcpu
));
747 int __attribute_const__
kvm_target_cpu(void)
749 unsigned long implementor
= read_cpuid_implementor();
750 unsigned long part_number
= read_cpuid_part_number();
752 switch (implementor
) {
753 case ARM_CPU_IMP_ARM
:
754 switch (part_number
) {
755 case ARM_CPU_PART_AEM_V8
:
756 return KVM_ARM_TARGET_AEM_V8
;
757 case ARM_CPU_PART_FOUNDATION
:
758 return KVM_ARM_TARGET_FOUNDATION_V8
;
759 case ARM_CPU_PART_CORTEX_A53
:
760 return KVM_ARM_TARGET_CORTEX_A53
;
761 case ARM_CPU_PART_CORTEX_A57
:
762 return KVM_ARM_TARGET_CORTEX_A57
;
765 case ARM_CPU_IMP_APM
:
766 switch (part_number
) {
767 case APM_CPU_PART_POTENZA
:
768 return KVM_ARM_TARGET_XGENE_POTENZA
;
773 /* Return a default generic target */
774 return KVM_ARM_TARGET_GENERIC_V8
;
777 int kvm_vcpu_preferred_target(struct kvm_vcpu_init
*init
)
779 int target
= kvm_target_cpu();
784 memset(init
, 0, sizeof(*init
));
787 * For now, we don't return any features.
788 * In future, we might use features to return target
789 * specific features available for the preferred
792 init
->target
= (__u32
)target
;
797 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
802 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
807 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
808 struct kvm_translation
*tr
)
813 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
814 KVM_GUESTDBG_USE_SW_BP | \
815 KVM_GUESTDBG_USE_HW | \
816 KVM_GUESTDBG_SINGLESTEP)
819 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
820 * @kvm: pointer to the KVM struct
821 * @kvm_guest_debug: the ioctl data buffer
823 * This sets up and enables the VM for guest debugging. Userspace
824 * passes in a control flag to enable different debug types and
825 * potentially other architecture specific information in the rest of
828 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
829 struct kvm_guest_debug
*dbg
)
833 trace_kvm_set_guest_debug(vcpu
, dbg
->control
);
835 if (dbg
->control
& ~KVM_GUESTDBG_VALID_MASK
) {
840 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
841 vcpu
->guest_debug
= dbg
->control
;
843 /* Hardware assisted Break and Watch points */
844 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW
) {
845 vcpu
->arch
.external_debug_state
= dbg
->arch
;
849 /* If not enabled clear all flags */
850 vcpu
->guest_debug
= 0;
857 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu
*vcpu
,
858 struct kvm_device_attr
*attr
)
862 switch (attr
->group
) {
863 case KVM_ARM_VCPU_PMU_V3_CTRL
:
864 ret
= kvm_arm_pmu_v3_set_attr(vcpu
, attr
);
866 case KVM_ARM_VCPU_TIMER_CTRL
:
867 ret
= kvm_arm_timer_set_attr(vcpu
, attr
);
869 case KVM_ARM_VCPU_PVTIME_CTRL
:
870 ret
= kvm_arm_pvtime_set_attr(vcpu
, attr
);
880 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu
*vcpu
,
881 struct kvm_device_attr
*attr
)
885 switch (attr
->group
) {
886 case KVM_ARM_VCPU_PMU_V3_CTRL
:
887 ret
= kvm_arm_pmu_v3_get_attr(vcpu
, attr
);
889 case KVM_ARM_VCPU_TIMER_CTRL
:
890 ret
= kvm_arm_timer_get_attr(vcpu
, attr
);
892 case KVM_ARM_VCPU_PVTIME_CTRL
:
893 ret
= kvm_arm_pvtime_get_attr(vcpu
, attr
);
903 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu
*vcpu
,
904 struct kvm_device_attr
*attr
)
908 switch (attr
->group
) {
909 case KVM_ARM_VCPU_PMU_V3_CTRL
:
910 ret
= kvm_arm_pmu_v3_has_attr(vcpu
, attr
);
912 case KVM_ARM_VCPU_TIMER_CTRL
:
913 ret
= kvm_arm_timer_has_attr(vcpu
, attr
);
915 case KVM_ARM_VCPU_PVTIME_CTRL
:
916 ret
= kvm_arm_pvtime_has_attr(vcpu
, attr
);