1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/debugfs.h>
16 #include <linux/kvm_host.h>
18 #include <linux/printk.h>
19 #include <linux/uaccess.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cputype.h>
23 #include <asm/debug-monitors.h>
25 #include <asm/kvm_arm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_hyp.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/kvm_nested.h>
30 #include <asm/perf_event.h>
31 #include <asm/sysreg.h>
33 #include <trace/events/kvm.h>
40 * For AArch32, we only take care of what is being trapped. Anything
41 * that has to do with init and userspace access has to go via the
45 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
);
46 static int set_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
49 static bool bad_trap(struct kvm_vcpu
*vcpu
,
50 struct sys_reg_params
*params
,
51 const struct sys_reg_desc
*r
,
54 WARN_ONCE(1, "Unexpected %s\n", msg
);
55 print_sys_reg_instr(params
);
56 kvm_inject_undefined(vcpu
);
60 static bool read_from_write_only(struct kvm_vcpu
*vcpu
,
61 struct sys_reg_params
*params
,
62 const struct sys_reg_desc
*r
)
64 return bad_trap(vcpu
, params
, r
,
65 "sys_reg read to write-only register");
68 static bool write_to_read_only(struct kvm_vcpu
*vcpu
,
69 struct sys_reg_params
*params
,
70 const struct sys_reg_desc
*r
)
72 return bad_trap(vcpu
, params
, r
,
73 "sys_reg write to read-only register");
76 #define PURE_EL2_SYSREG(el2) \
82 #define MAPPED_EL2_SYSREG(el2, el1, fn) \
89 static bool get_el2_to_el1_mapping(unsigned int reg
,
90 unsigned int *el1r
, u64 (**xlate
)(u64
))
93 PURE_EL2_SYSREG( VPIDR_EL2
);
94 PURE_EL2_SYSREG( VMPIDR_EL2
);
95 PURE_EL2_SYSREG( ACTLR_EL2
);
96 PURE_EL2_SYSREG( HCR_EL2
);
97 PURE_EL2_SYSREG( MDCR_EL2
);
98 PURE_EL2_SYSREG( HSTR_EL2
);
99 PURE_EL2_SYSREG( HACR_EL2
);
100 PURE_EL2_SYSREG( VTTBR_EL2
);
101 PURE_EL2_SYSREG( VTCR_EL2
);
102 PURE_EL2_SYSREG( RVBAR_EL2
);
103 PURE_EL2_SYSREG( TPIDR_EL2
);
104 PURE_EL2_SYSREG( HPFAR_EL2
);
105 PURE_EL2_SYSREG( CNTHCTL_EL2
);
106 MAPPED_EL2_SYSREG(SCTLR_EL2
, SCTLR_EL1
,
107 translate_sctlr_el2_to_sctlr_el1
);
108 MAPPED_EL2_SYSREG(CPTR_EL2
, CPACR_EL1
,
109 translate_cptr_el2_to_cpacr_el1
);
110 MAPPED_EL2_SYSREG(TTBR0_EL2
, TTBR0_EL1
,
111 translate_ttbr0_el2_to_ttbr0_el1
);
112 MAPPED_EL2_SYSREG(TTBR1_EL2
, TTBR1_EL1
, NULL
);
113 MAPPED_EL2_SYSREG(TCR_EL2
, TCR_EL1
,
114 translate_tcr_el2_to_tcr_el1
);
115 MAPPED_EL2_SYSREG(VBAR_EL2
, VBAR_EL1
, NULL
);
116 MAPPED_EL2_SYSREG(AFSR0_EL2
, AFSR0_EL1
, NULL
);
117 MAPPED_EL2_SYSREG(AFSR1_EL2
, AFSR1_EL1
, NULL
);
118 MAPPED_EL2_SYSREG(ESR_EL2
, ESR_EL1
, NULL
);
119 MAPPED_EL2_SYSREG(FAR_EL2
, FAR_EL1
, NULL
);
120 MAPPED_EL2_SYSREG(MAIR_EL2
, MAIR_EL1
, NULL
);
121 MAPPED_EL2_SYSREG(AMAIR_EL2
, AMAIR_EL1
, NULL
);
122 MAPPED_EL2_SYSREG(ELR_EL2
, ELR_EL1
, NULL
);
123 MAPPED_EL2_SYSREG(SPSR_EL2
, SPSR_EL1
, NULL
);
129 u64
vcpu_read_sys_reg(const struct kvm_vcpu
*vcpu
, int reg
)
131 u64 val
= 0x8badf00d8badf00d;
132 u64 (*xlate
)(u64
) = NULL
;
135 if (!vcpu_get_flag(vcpu
, SYSREGS_ON_CPU
))
138 if (unlikely(get_el2_to_el1_mapping(reg
, &el1r
, &xlate
))) {
139 if (!is_hyp_ctxt(vcpu
))
143 * If this register does not have an EL1 counterpart,
144 * then read the stored EL2 version.
150 * If we have a non-VHE guest and that the sysreg
151 * requires translation to be used at EL1, use the
152 * in-memory copy instead.
154 if (!vcpu_el2_e2h_is_set(vcpu
) && xlate
)
157 /* Get the current version of the EL1 counterpart. */
158 WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r
, &val
));
162 /* EL1 register can't be on the CPU if the guest is in vEL2. */
163 if (unlikely(is_hyp_ctxt(vcpu
)))
166 if (__vcpu_read_sys_reg_from_cpu(reg
, &val
))
170 return __vcpu_sys_reg(vcpu
, reg
);
173 void vcpu_write_sys_reg(struct kvm_vcpu
*vcpu
, u64 val
, int reg
)
175 u64 (*xlate
)(u64
) = NULL
;
178 if (!vcpu_get_flag(vcpu
, SYSREGS_ON_CPU
))
181 if (unlikely(get_el2_to_el1_mapping(reg
, &el1r
, &xlate
))) {
182 if (!is_hyp_ctxt(vcpu
))
186 * Always store a copy of the write to memory to avoid having
187 * to reverse-translate virtual EL2 system registers for a
188 * non-VHE guest hypervisor.
190 __vcpu_sys_reg(vcpu
, reg
) = val
;
192 /* No EL1 counterpart? We're done here.? */
196 if (!vcpu_el2_e2h_is_set(vcpu
) && xlate
)
199 /* Redirect this to the EL1 version of the register. */
200 WARN_ON(!__vcpu_write_sys_reg_to_cpu(val
, el1r
));
204 /* EL1 register can't be on the CPU if the guest is in vEL2. */
205 if (unlikely(is_hyp_ctxt(vcpu
)))
208 if (__vcpu_write_sys_reg_to_cpu(val
, reg
))
212 __vcpu_sys_reg(vcpu
, reg
) = val
;
215 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
216 #define CSSELR_MAX 14
219 * Returns the minimum line size for the selected cache, expressed as
222 static u8
get_min_cache_line_size(bool icache
)
224 u64 ctr
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
228 field
= SYS_FIELD_GET(CTR_EL0
, IminLine
, ctr
);
230 field
= SYS_FIELD_GET(CTR_EL0
, DminLine
, ctr
);
233 * Cache line size is represented as Log2(words) in CTR_EL0.
234 * Log2(bytes) can be derived with the following:
236 * Log2(words) + 2 = Log2(bytes / 4) + 2
237 * = Log2(bytes) - 2 + 2
243 /* Which cache CCSIDR represents depends on CSSELR value. */
244 static u32
get_ccsidr(struct kvm_vcpu
*vcpu
, u32 csselr
)
248 if (vcpu
->arch
.ccsidr
)
249 return vcpu
->arch
.ccsidr
[csselr
];
251 line_size
= get_min_cache_line_size(csselr
& CSSELR_EL1_InD
);
254 * Fabricate a CCSIDR value as the overriding value does not exist.
255 * The real CCSIDR value will not be used as it can vary by the
256 * physical CPU which the vcpu currently resides in.
258 * The line size is determined with get_min_cache_line_size(), which
259 * should be valid for all CPUs even if they have different cache
262 * The associativity bits are cleared, meaning the geometry of all data
263 * and unified caches (which are guaranteed to be PIPT and thus
264 * non-aliasing) are 1 set and 1 way.
265 * Guests should not be doing cache operations by set/way at all, and
266 * for this reason, we trap them and attempt to infer the intent, so
267 * that we can flush the entire guest's address space at the appropriate
268 * time. The exposed geometry minimizes the number of the traps.
269 * [If guests should attempt to infer aliasing properties from the
270 * geometry (which is not permitted by the architecture), they would
271 * only do so for virtually indexed caches.]
273 * We don't check if the cache level exists as it is allowed to return
274 * an UNKNOWN value if not.
276 return SYS_FIELD_PREP(CCSIDR_EL1
, LineSize
, line_size
- 4);
279 static int set_ccsidr(struct kvm_vcpu
*vcpu
, u32 csselr
, u32 val
)
281 u8 line_size
= FIELD_GET(CCSIDR_EL1_LineSize
, val
) + 4;
282 u32
*ccsidr
= vcpu
->arch
.ccsidr
;
285 if ((val
& CCSIDR_EL1_RES0
) ||
286 line_size
< get_min_cache_line_size(csselr
& CSSELR_EL1_InD
))
290 if (val
== get_ccsidr(vcpu
, csselr
))
293 ccsidr
= kmalloc_array(CSSELR_MAX
, sizeof(u32
), GFP_KERNEL_ACCOUNT
);
297 for (i
= 0; i
< CSSELR_MAX
; i
++)
298 ccsidr
[i
] = get_ccsidr(vcpu
, i
);
300 vcpu
->arch
.ccsidr
= ccsidr
;
303 ccsidr
[csselr
] = val
;
308 static bool access_rw(struct kvm_vcpu
*vcpu
,
309 struct sys_reg_params
*p
,
310 const struct sys_reg_desc
*r
)
313 vcpu_write_sys_reg(vcpu
, p
->regval
, r
->reg
);
315 p
->regval
= vcpu_read_sys_reg(vcpu
, r
->reg
);
321 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
323 static bool access_dcsw(struct kvm_vcpu
*vcpu
,
324 struct sys_reg_params
*p
,
325 const struct sys_reg_desc
*r
)
328 return read_from_write_only(vcpu
, p
, r
);
331 * Only track S/W ops if we don't have FWB. It still indicates
332 * that the guest is a bit broken (S/W operations should only
333 * be done by firmware, knowing that there is only a single
334 * CPU left in the system, and certainly not from non-secure
337 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB
))
338 kvm_set_way_flush(vcpu
);
343 static bool access_dcgsw(struct kvm_vcpu
*vcpu
,
344 struct sys_reg_params
*p
,
345 const struct sys_reg_desc
*r
)
347 if (!kvm_has_mte(vcpu
->kvm
)) {
348 kvm_inject_undefined(vcpu
);
352 /* Treat MTE S/W ops as we treat the classic ones: with contempt */
353 return access_dcsw(vcpu
, p
, r
);
356 static void get_access_mask(const struct sys_reg_desc
*r
, u64
*mask
, u64
*shift
)
358 switch (r
->aarch32_map
) {
360 *mask
= GENMASK_ULL(31, 0);
364 *mask
= GENMASK_ULL(63, 32);
368 *mask
= GENMASK_ULL(63, 0);
375 * Generic accessor for VM registers. Only called as long as HCR_TVM
376 * is set. If the guest enables the MMU, we stop trapping the VM
377 * sys_regs and leave it in complete control of the caches.
379 static bool access_vm_reg(struct kvm_vcpu
*vcpu
,
380 struct sys_reg_params
*p
,
381 const struct sys_reg_desc
*r
)
383 bool was_enabled
= vcpu_has_cache_enabled(vcpu
);
384 u64 val
, mask
, shift
;
386 BUG_ON(!p
->is_write
);
388 get_access_mask(r
, &mask
, &shift
);
391 val
= vcpu_read_sys_reg(vcpu
, r
->reg
);
397 val
|= (p
->regval
& (mask
>> shift
)) << shift
;
398 vcpu_write_sys_reg(vcpu
, val
, r
->reg
);
400 kvm_toggle_cache(vcpu
, was_enabled
);
404 static bool access_actlr(struct kvm_vcpu
*vcpu
,
405 struct sys_reg_params
*p
,
406 const struct sys_reg_desc
*r
)
411 return ignore_write(vcpu
, p
);
413 get_access_mask(r
, &mask
, &shift
);
414 p
->regval
= (vcpu_read_sys_reg(vcpu
, r
->reg
) & mask
) >> shift
;
420 * Trap handler for the GICv3 SGI generation system register.
421 * Forward the request to the VGIC emulation.
422 * The cp15_64 code makes sure this automatically works
423 * for both AArch64 and AArch32 accesses.
425 static bool access_gic_sgi(struct kvm_vcpu
*vcpu
,
426 struct sys_reg_params
*p
,
427 const struct sys_reg_desc
*r
)
432 return read_from_write_only(vcpu
, p
, r
);
435 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
436 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
437 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
438 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
441 if (p
->Op0
== 0) { /* AArch32 */
443 default: /* Keep GCC quiet */
444 case 0: /* ICC_SGI1R */
447 case 1: /* ICC_ASGI1R */
448 case 2: /* ICC_SGI0R */
452 } else { /* AArch64 */
454 default: /* Keep GCC quiet */
455 case 5: /* ICC_SGI1R_EL1 */
458 case 6: /* ICC_ASGI1R_EL1 */
459 case 7: /* ICC_SGI0R_EL1 */
465 vgic_v3_dispatch_sgi(vcpu
, p
->regval
, g1
);
470 static bool access_gic_sre(struct kvm_vcpu
*vcpu
,
471 struct sys_reg_params
*p
,
472 const struct sys_reg_desc
*r
)
475 return ignore_write(vcpu
, p
);
477 p
->regval
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_sre
;
481 static bool trap_raz_wi(struct kvm_vcpu
*vcpu
,
482 struct sys_reg_params
*p
,
483 const struct sys_reg_desc
*r
)
486 return ignore_write(vcpu
, p
);
488 return read_zero(vcpu
, p
);
491 static bool trap_undef(struct kvm_vcpu
*vcpu
,
492 struct sys_reg_params
*p
,
493 const struct sys_reg_desc
*r
)
495 kvm_inject_undefined(vcpu
);
500 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
501 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
502 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
503 * treat it separately.
505 static bool trap_loregion(struct kvm_vcpu
*vcpu
,
506 struct sys_reg_params
*p
,
507 const struct sys_reg_desc
*r
)
509 u32 sr
= reg_to_encoding(r
);
511 if (!kvm_has_feat(vcpu
->kvm
, ID_AA64MMFR1_EL1
, LO
, IMP
)) {
512 kvm_inject_undefined(vcpu
);
516 if (p
->is_write
&& sr
== SYS_LORID_EL1
)
517 return write_to_read_only(vcpu
, p
, r
);
519 return trap_raz_wi(vcpu
, p
, r
);
522 static bool trap_oslar_el1(struct kvm_vcpu
*vcpu
,
523 struct sys_reg_params
*p
,
524 const struct sys_reg_desc
*r
)
529 return read_from_write_only(vcpu
, p
, r
);
531 /* Forward the OSLK bit to OSLSR */
532 oslsr
= __vcpu_sys_reg(vcpu
, OSLSR_EL1
) & ~OSLSR_EL1_OSLK
;
533 if (p
->regval
& OSLAR_EL1_OSLK
)
534 oslsr
|= OSLSR_EL1_OSLK
;
536 __vcpu_sys_reg(vcpu
, OSLSR_EL1
) = oslsr
;
540 static bool trap_oslsr_el1(struct kvm_vcpu
*vcpu
,
541 struct sys_reg_params
*p
,
542 const struct sys_reg_desc
*r
)
545 return write_to_read_only(vcpu
, p
, r
);
547 p
->regval
= __vcpu_sys_reg(vcpu
, r
->reg
);
551 static int set_oslsr_el1(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
555 * The only modifiable bit is the OSLK bit. Refuse the write if
556 * userspace attempts to change any other bit in the register.
558 if ((val
^ rd
->val
) & ~OSLSR_EL1_OSLK
)
561 __vcpu_sys_reg(vcpu
, rd
->reg
) = val
;
565 static bool trap_dbgauthstatus_el1(struct kvm_vcpu
*vcpu
,
566 struct sys_reg_params
*p
,
567 const struct sys_reg_desc
*r
)
570 return ignore_write(vcpu
, p
);
572 p
->regval
= read_sysreg(dbgauthstatus_el1
);
578 * We want to avoid world-switching all the DBG registers all the
581 * - If we've touched any debug register, it is likely that we're
582 * going to touch more of them. It then makes sense to disable the
583 * traps and start doing the save/restore dance
584 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
585 * then mandatory to save/restore the registers, as the guest
588 * For this, we use a DIRTY bit, indicating the guest has modified the
589 * debug registers, used as follow:
592 * - If the dirty bit is set (because we're coming back from trapping),
593 * disable the traps, save host registers, restore guest registers.
594 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
595 * set the dirty bit, disable the traps, save host registers,
596 * restore guest registers.
597 * - Otherwise, enable the traps
600 * - If the dirty bit is set, save guest registers, restore host
601 * registers and clear the dirty bit. This ensure that the host can
602 * now use the debug registers.
604 static bool trap_debug_regs(struct kvm_vcpu
*vcpu
,
605 struct sys_reg_params
*p
,
606 const struct sys_reg_desc
*r
)
608 access_rw(vcpu
, p
, r
);
610 vcpu_set_flag(vcpu
, DEBUG_DIRTY
);
612 trace_trap_reg(__func__
, r
->reg
, p
->is_write
, p
->regval
);
618 * reg_to_dbg/dbg_to_reg
620 * A 32 bit write to a debug register leave top bits alone
621 * A 32 bit read from a debug register only returns the bottom bits
623 * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
624 * switches between host and guest values in future.
626 static void reg_to_dbg(struct kvm_vcpu
*vcpu
,
627 struct sys_reg_params
*p
,
628 const struct sys_reg_desc
*rd
,
631 u64 mask
, shift
, val
;
633 get_access_mask(rd
, &mask
, &shift
);
637 val
|= (p
->regval
& (mask
>> shift
)) << shift
;
640 vcpu_set_flag(vcpu
, DEBUG_DIRTY
);
643 static void dbg_to_reg(struct kvm_vcpu
*vcpu
,
644 struct sys_reg_params
*p
,
645 const struct sys_reg_desc
*rd
,
650 get_access_mask(rd
, &mask
, &shift
);
651 p
->regval
= (*dbg_reg
& mask
) >> shift
;
654 static bool trap_bvr(struct kvm_vcpu
*vcpu
,
655 struct sys_reg_params
*p
,
656 const struct sys_reg_desc
*rd
)
658 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->CRm
];
661 reg_to_dbg(vcpu
, p
, rd
, dbg_reg
);
663 dbg_to_reg(vcpu
, p
, rd
, dbg_reg
);
665 trace_trap_reg(__func__
, rd
->CRm
, p
->is_write
, *dbg_reg
);
670 static int set_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
673 vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->CRm
] = val
;
677 static int get_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
680 *val
= vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->CRm
];
684 static u64
reset_bvr(struct kvm_vcpu
*vcpu
,
685 const struct sys_reg_desc
*rd
)
687 vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->CRm
] = rd
->val
;
691 static bool trap_bcr(struct kvm_vcpu
*vcpu
,
692 struct sys_reg_params
*p
,
693 const struct sys_reg_desc
*rd
)
695 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->CRm
];
698 reg_to_dbg(vcpu
, p
, rd
, dbg_reg
);
700 dbg_to_reg(vcpu
, p
, rd
, dbg_reg
);
702 trace_trap_reg(__func__
, rd
->CRm
, p
->is_write
, *dbg_reg
);
707 static int set_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
710 vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->CRm
] = val
;
714 static int get_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
717 *val
= vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->CRm
];
721 static u64
reset_bcr(struct kvm_vcpu
*vcpu
,
722 const struct sys_reg_desc
*rd
)
724 vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->CRm
] = rd
->val
;
728 static bool trap_wvr(struct kvm_vcpu
*vcpu
,
729 struct sys_reg_params
*p
,
730 const struct sys_reg_desc
*rd
)
732 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->CRm
];
735 reg_to_dbg(vcpu
, p
, rd
, dbg_reg
);
737 dbg_to_reg(vcpu
, p
, rd
, dbg_reg
);
739 trace_trap_reg(__func__
, rd
->CRm
, p
->is_write
,
740 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->CRm
]);
745 static int set_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
748 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->CRm
] = val
;
752 static int get_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
755 *val
= vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->CRm
];
759 static u64
reset_wvr(struct kvm_vcpu
*vcpu
,
760 const struct sys_reg_desc
*rd
)
762 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->CRm
] = rd
->val
;
766 static bool trap_wcr(struct kvm_vcpu
*vcpu
,
767 struct sys_reg_params
*p
,
768 const struct sys_reg_desc
*rd
)
770 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->CRm
];
773 reg_to_dbg(vcpu
, p
, rd
, dbg_reg
);
775 dbg_to_reg(vcpu
, p
, rd
, dbg_reg
);
777 trace_trap_reg(__func__
, rd
->CRm
, p
->is_write
, *dbg_reg
);
782 static int set_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
785 vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->CRm
] = val
;
789 static int get_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
792 *val
= vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->CRm
];
796 static u64
reset_wcr(struct kvm_vcpu
*vcpu
,
797 const struct sys_reg_desc
*rd
)
799 vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->CRm
] = rd
->val
;
803 static u64
reset_amair_el1(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
805 u64 amair
= read_sysreg(amair_el1
);
806 vcpu_write_sys_reg(vcpu
, amair
, AMAIR_EL1
);
810 static u64
reset_actlr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
812 u64 actlr
= read_sysreg(actlr_el1
);
813 vcpu_write_sys_reg(vcpu
, actlr
, ACTLR_EL1
);
817 static u64
reset_mpidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
822 * Map the vcpu_id into the first three affinity level fields of
823 * the MPIDR. We limit the number of VCPUs in level 0 due to a
824 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
825 * of the GICv3 to be able to address each CPU directly when
828 mpidr
= (vcpu
->vcpu_id
& 0x0f) << MPIDR_LEVEL_SHIFT(0);
829 mpidr
|= ((vcpu
->vcpu_id
>> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
830 mpidr
|= ((vcpu
->vcpu_id
>> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
831 mpidr
|= (1ULL << 31);
832 vcpu_write_sys_reg(vcpu
, mpidr
, MPIDR_EL1
);
837 static unsigned int pmu_visibility(const struct kvm_vcpu
*vcpu
,
838 const struct sys_reg_desc
*r
)
840 if (kvm_vcpu_has_pmu(vcpu
))
846 static u64
reset_pmu_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
848 u64 mask
= BIT(ARMV8_PMU_CYCLE_IDX
);
849 u8 n
= vcpu
->kvm
->arch
.pmcr_n
;
852 mask
|= GENMASK(n
- 1, 0);
854 reset_unknown(vcpu
, r
);
855 __vcpu_sys_reg(vcpu
, r
->reg
) &= mask
;
857 return __vcpu_sys_reg(vcpu
, r
->reg
);
860 static u64
reset_pmevcntr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
862 reset_unknown(vcpu
, r
);
863 __vcpu_sys_reg(vcpu
, r
->reg
) &= GENMASK(31, 0);
865 return __vcpu_sys_reg(vcpu
, r
->reg
);
868 static u64
reset_pmevtyper(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
870 /* This thing will UNDEF, who cares about the reset value? */
871 if (!kvm_vcpu_has_pmu(vcpu
))
874 reset_unknown(vcpu
, r
);
875 __vcpu_sys_reg(vcpu
, r
->reg
) &= kvm_pmu_evtyper_mask(vcpu
->kvm
);
877 return __vcpu_sys_reg(vcpu
, r
->reg
);
880 static u64
reset_pmselr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
882 reset_unknown(vcpu
, r
);
883 __vcpu_sys_reg(vcpu
, r
->reg
) &= ARMV8_PMU_COUNTER_MASK
;
885 return __vcpu_sys_reg(vcpu
, r
->reg
);
888 static u64
reset_pmcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
892 if (!kvm_supports_32bit_el0())
893 pmcr
|= ARMV8_PMU_PMCR_LC
;
896 * The value of PMCR.N field is included when the
897 * vCPU register is read via kvm_vcpu_read_pmcr().
899 __vcpu_sys_reg(vcpu
, r
->reg
) = pmcr
;
901 return __vcpu_sys_reg(vcpu
, r
->reg
);
904 static bool check_pmu_access_disabled(struct kvm_vcpu
*vcpu
, u64 flags
)
906 u64 reg
= __vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
907 bool enabled
= (reg
& flags
) || vcpu_mode_priv(vcpu
);
910 kvm_inject_undefined(vcpu
);
915 static bool pmu_access_el0_disabled(struct kvm_vcpu
*vcpu
)
917 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_EN
);
920 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu
*vcpu
)
922 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_SW
| ARMV8_PMU_USERENR_EN
);
925 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
927 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_CR
| ARMV8_PMU_USERENR_EN
);
930 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
932 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_ER
| ARMV8_PMU_USERENR_EN
);
935 static bool access_pmcr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
936 const struct sys_reg_desc
*r
)
940 if (pmu_access_el0_disabled(vcpu
))
945 * Only update writeable bits of PMCR (continuing into
946 * kvm_pmu_handle_pmcr() as well)
948 val
= kvm_vcpu_read_pmcr(vcpu
);
949 val
&= ~ARMV8_PMU_PMCR_MASK
;
950 val
|= p
->regval
& ARMV8_PMU_PMCR_MASK
;
951 if (!kvm_supports_32bit_el0())
952 val
|= ARMV8_PMU_PMCR_LC
;
953 kvm_pmu_handle_pmcr(vcpu
, val
);
955 /* PMCR.P & PMCR.C are RAZ */
956 val
= kvm_vcpu_read_pmcr(vcpu
)
957 & ~(ARMV8_PMU_PMCR_P
| ARMV8_PMU_PMCR_C
);
964 static bool access_pmselr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
965 const struct sys_reg_desc
*r
)
967 if (pmu_access_event_counter_el0_disabled(vcpu
))
971 __vcpu_sys_reg(vcpu
, PMSELR_EL0
) = p
->regval
;
973 /* return PMSELR.SEL field */
974 p
->regval
= __vcpu_sys_reg(vcpu
, PMSELR_EL0
)
975 & ARMV8_PMU_COUNTER_MASK
;
980 static bool access_pmceid(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
981 const struct sys_reg_desc
*r
)
983 u64 pmceid
, mask
, shift
;
987 if (pmu_access_el0_disabled(vcpu
))
990 get_access_mask(r
, &mask
, &shift
);
992 pmceid
= kvm_pmu_get_pmceid(vcpu
, (p
->Op2
& 1));
1001 static bool pmu_counter_idx_valid(struct kvm_vcpu
*vcpu
, u64 idx
)
1005 pmcr
= kvm_vcpu_read_pmcr(vcpu
);
1006 val
= FIELD_GET(ARMV8_PMU_PMCR_N
, pmcr
);
1007 if (idx
>= val
&& idx
!= ARMV8_PMU_CYCLE_IDX
) {
1008 kvm_inject_undefined(vcpu
);
1015 static int get_pmu_evcntr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
,
1020 if (r
->CRn
== 9 && r
->CRm
== 13 && r
->Op2
== 0)
1022 idx
= ARMV8_PMU_CYCLE_IDX
;
1025 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
1027 *val
= kvm_pmu_get_counter_value(vcpu
, idx
);
1031 static bool access_pmu_evcntr(struct kvm_vcpu
*vcpu
,
1032 struct sys_reg_params
*p
,
1033 const struct sys_reg_desc
*r
)
1037 if (r
->CRn
== 9 && r
->CRm
== 13) {
1040 if (pmu_access_event_counter_el0_disabled(vcpu
))
1043 idx
= __vcpu_sys_reg(vcpu
, PMSELR_EL0
)
1044 & ARMV8_PMU_COUNTER_MASK
;
1045 } else if (r
->Op2
== 0) {
1047 if (pmu_access_cycle_counter_el0_disabled(vcpu
))
1050 idx
= ARMV8_PMU_CYCLE_IDX
;
1052 } else if (r
->CRn
== 0 && r
->CRm
== 9) {
1054 if (pmu_access_event_counter_el0_disabled(vcpu
))
1057 idx
= ARMV8_PMU_CYCLE_IDX
;
1058 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 8) {
1060 if (pmu_access_event_counter_el0_disabled(vcpu
))
1063 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
1066 /* Catch any decoding mistake */
1067 WARN_ON(idx
== ~0UL);
1069 if (!pmu_counter_idx_valid(vcpu
, idx
))
1073 if (pmu_access_el0_disabled(vcpu
))
1076 kvm_pmu_set_counter_value(vcpu
, idx
, p
->regval
);
1078 p
->regval
= kvm_pmu_get_counter_value(vcpu
, idx
);
1084 static bool access_pmu_evtyper(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1085 const struct sys_reg_desc
*r
)
1089 if (pmu_access_el0_disabled(vcpu
))
1092 if (r
->CRn
== 9 && r
->CRm
== 13 && r
->Op2
== 1) {
1093 /* PMXEVTYPER_EL0 */
1094 idx
= __vcpu_sys_reg(vcpu
, PMSELR_EL0
) & ARMV8_PMU_COUNTER_MASK
;
1095 reg
= PMEVTYPER0_EL0
+ idx
;
1096 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 12) {
1097 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
1098 if (idx
== ARMV8_PMU_CYCLE_IDX
)
1099 reg
= PMCCFILTR_EL0
;
1101 /* PMEVTYPERn_EL0 */
1102 reg
= PMEVTYPER0_EL0
+ idx
;
1107 if (!pmu_counter_idx_valid(vcpu
, idx
))
1111 kvm_pmu_set_counter_event_type(vcpu
, p
->regval
, idx
);
1112 kvm_vcpu_pmu_restore_guest(vcpu
);
1114 p
->regval
= __vcpu_sys_reg(vcpu
, reg
);
1120 static int set_pmreg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
, u64 val
)
1124 val
&= kvm_pmu_valid_counter_mask(vcpu
);
1128 /* CRm[1] being set indicates a SET register, and CLR otherwise */
1132 /* Op2[0] being set indicates a SET register, and CLR otherwise */
1138 __vcpu_sys_reg(vcpu
, r
->reg
) |= val
;
1140 __vcpu_sys_reg(vcpu
, r
->reg
) &= ~val
;
1145 static int get_pmreg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
, u64
*val
)
1147 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
1149 *val
= __vcpu_sys_reg(vcpu
, r
->reg
) & mask
;
1153 static bool access_pmcnten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1154 const struct sys_reg_desc
*r
)
1158 if (pmu_access_el0_disabled(vcpu
))
1161 mask
= kvm_pmu_valid_counter_mask(vcpu
);
1163 val
= p
->regval
& mask
;
1165 /* accessing PMCNTENSET_EL0 */
1166 __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) |= val
;
1167 kvm_pmu_enable_counter_mask(vcpu
, val
);
1168 kvm_vcpu_pmu_restore_guest(vcpu
);
1170 /* accessing PMCNTENCLR_EL0 */
1171 __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) &= ~val
;
1172 kvm_pmu_disable_counter_mask(vcpu
, val
);
1175 p
->regval
= __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
);
1181 static bool access_pminten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1182 const struct sys_reg_desc
*r
)
1184 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
1186 if (check_pmu_access_disabled(vcpu
, 0))
1190 u64 val
= p
->regval
& mask
;
1193 /* accessing PMINTENSET_EL1 */
1194 __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) |= val
;
1196 /* accessing PMINTENCLR_EL1 */
1197 __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) &= ~val
;
1199 p
->regval
= __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
);
1205 static bool access_pmovs(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1206 const struct sys_reg_desc
*r
)
1208 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
1210 if (pmu_access_el0_disabled(vcpu
))
1215 /* accessing PMOVSSET_EL0 */
1216 __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) |= (p
->regval
& mask
);
1218 /* accessing PMOVSCLR_EL0 */
1219 __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) &= ~(p
->regval
& mask
);
1221 p
->regval
= __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
);
1227 static bool access_pmswinc(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1228 const struct sys_reg_desc
*r
)
1233 return read_from_write_only(vcpu
, p
, r
);
1235 if (pmu_write_swinc_el0_disabled(vcpu
))
1238 mask
= kvm_pmu_valid_counter_mask(vcpu
);
1239 kvm_pmu_software_increment(vcpu
, p
->regval
& mask
);
1243 static bool access_pmuserenr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1244 const struct sys_reg_desc
*r
)
1247 if (!vcpu_mode_priv(vcpu
)) {
1248 kvm_inject_undefined(vcpu
);
1252 __vcpu_sys_reg(vcpu
, PMUSERENR_EL0
) =
1253 p
->regval
& ARMV8_PMU_USERENR_MASK
;
1255 p
->regval
= __vcpu_sys_reg(vcpu
, PMUSERENR_EL0
)
1256 & ARMV8_PMU_USERENR_MASK
;
1262 static int get_pmcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
,
1265 *val
= kvm_vcpu_read_pmcr(vcpu
);
1269 static int set_pmcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
,
1272 u8 new_n
= FIELD_GET(ARMV8_PMU_PMCR_N
, val
);
1273 struct kvm
*kvm
= vcpu
->kvm
;
1275 mutex_lock(&kvm
->arch
.config_lock
);
1278 * The vCPU can't have more counters than the PMU hardware
1279 * implements. Ignore this error to maintain compatibility
1280 * with the existing KVM behavior.
1282 if (!kvm_vm_has_ran_once(kvm
) &&
1283 new_n
<= kvm_arm_pmu_get_max_counters(kvm
))
1284 kvm
->arch
.pmcr_n
= new_n
;
1286 mutex_unlock(&kvm
->arch
.config_lock
);
1289 * Ignore writes to RES0 bits, read only bits that are cleared on
1290 * vCPU reset, and writable bits that KVM doesn't support yet.
1291 * (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
1292 * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
1293 * But, we leave the bit as it is here, as the vCPU's PMUver might
1294 * be changed later (NOTE: the bit will be cleared on first vCPU run
1297 val
&= ARMV8_PMU_PMCR_MASK
;
1299 /* The LC bit is RES1 when AArch32 is not supported */
1300 if (!kvm_supports_32bit_el0())
1301 val
|= ARMV8_PMU_PMCR_LC
;
1303 __vcpu_sys_reg(vcpu
, r
->reg
) = val
;
1307 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1308 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1309 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1310 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
1311 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1312 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
1313 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1314 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
1315 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1316 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
1318 #define PMU_SYS_REG(name) \
1319 SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
1320 .visibility = pmu_visibility
1322 /* Macro to expand the PMEVCNTRn_EL0 register */
1323 #define PMU_PMEVCNTR_EL0(n) \
1324 { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
1325 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1326 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1328 /* Macro to expand the PMEVTYPERn_EL0 register */
1329 #define PMU_PMEVTYPER_EL0(n) \
1330 { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
1331 .reset = reset_pmevtyper, \
1332 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1334 static bool undef_access(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1335 const struct sys_reg_desc
*r
)
1337 kvm_inject_undefined(vcpu
);
1342 /* Macro to expand the AMU counter and type registers*/
1343 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1344 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1345 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1346 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1348 static unsigned int ptrauth_visibility(const struct kvm_vcpu
*vcpu
,
1349 const struct sys_reg_desc
*rd
)
1351 return vcpu_has_ptrauth(vcpu
) ? 0 : REG_HIDDEN
;
1355 * If we land here on a PtrAuth access, that is because we didn't
1356 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1357 * way this happens is when the guest does not have PtrAuth support
1360 #define __PTRAUTH_KEY(k) \
1361 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1362 .visibility = ptrauth_visibility}
1364 #define PTRAUTH_KEY(k) \
1365 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1366 __PTRAUTH_KEY(k ## KEYHI_EL1)
1368 static bool access_arch_timer(struct kvm_vcpu
*vcpu
,
1369 struct sys_reg_params
*p
,
1370 const struct sys_reg_desc
*r
)
1372 enum kvm_arch_timers tmr
;
1373 enum kvm_arch_timer_regs treg
;
1374 u64 reg
= reg_to_encoding(r
);
1377 case SYS_CNTP_TVAL_EL0
:
1378 case SYS_AARCH32_CNTP_TVAL
:
1380 treg
= TIMER_REG_TVAL
;
1382 case SYS_CNTP_CTL_EL0
:
1383 case SYS_AARCH32_CNTP_CTL
:
1385 treg
= TIMER_REG_CTL
;
1387 case SYS_CNTP_CVAL_EL0
:
1388 case SYS_AARCH32_CNTP_CVAL
:
1390 treg
= TIMER_REG_CVAL
;
1392 case SYS_CNTPCT_EL0
:
1393 case SYS_CNTPCTSS_EL0
:
1394 case SYS_AARCH32_CNTPCT
:
1396 treg
= TIMER_REG_CNT
;
1399 print_sys_reg_msg(p
, "%s", "Unhandled trapped timer register");
1400 kvm_inject_undefined(vcpu
);
1405 kvm_arm_timer_write_sysreg(vcpu
, tmr
, treg
, p
->regval
);
1407 p
->regval
= kvm_arm_timer_read_sysreg(vcpu
, tmr
, treg
);
1412 static s64
kvm_arm64_ftr_safe_value(u32 id
, const struct arm64_ftr_bits
*ftrp
,
1415 struct arm64_ftr_bits kvm_ftr
= *ftrp
;
1417 /* Some features have different safe value type in KVM than host features */
1419 case SYS_ID_AA64DFR0_EL1
:
1420 switch (kvm_ftr
.shift
) {
1421 case ID_AA64DFR0_EL1_PMUVer_SHIFT
:
1422 kvm_ftr
.type
= FTR_LOWER_SAFE
;
1424 case ID_AA64DFR0_EL1_DebugVer_SHIFT
:
1425 kvm_ftr
.type
= FTR_LOWER_SAFE
;
1429 case SYS_ID_DFR0_EL1
:
1430 if (kvm_ftr
.shift
== ID_DFR0_EL1_PerfMon_SHIFT
)
1431 kvm_ftr
.type
= FTR_LOWER_SAFE
;
1435 return arm64_ftr_safe_value(&kvm_ftr
, new, cur
);
1439 * arm64_check_features() - Check if a feature register value constitutes
1440 * a subset of features indicated by the idreg's KVM sanitised limit.
1442 * This function will check if each feature field of @val is the "safe" value
1443 * against idreg's KVM sanitised limit return from reset() callback.
1444 * If a field value in @val is the same as the one in limit, it is always
1445 * considered the safe value regardless For register fields that are not in
1446 * writable, only the value in limit is considered the safe value.
1448 * Return: 0 if all the fields are safe. Otherwise, return negative errno.
1450 static int arm64_check_features(struct kvm_vcpu
*vcpu
,
1451 const struct sys_reg_desc
*rd
,
1454 const struct arm64_ftr_reg
*ftr_reg
;
1455 const struct arm64_ftr_bits
*ftrp
= NULL
;
1456 u32 id
= reg_to_encoding(rd
);
1457 u64 writable_mask
= rd
->val
;
1458 u64 limit
= rd
->reset(vcpu
, rd
);
1462 * Hidden and unallocated ID registers may not have a corresponding
1463 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1464 * only safe value is 0.
1466 if (sysreg_visible_as_raz(vcpu
, rd
))
1467 return val
? -E2BIG
: 0;
1469 ftr_reg
= get_arm64_ftr_reg(id
);
1473 ftrp
= ftr_reg
->ftr_bits
;
1475 for (; ftrp
&& ftrp
->width
; ftrp
++) {
1476 s64 f_val
, f_lim
, safe_val
;
1479 ftr_mask
= arm64_ftr_mask(ftrp
);
1480 if ((ftr_mask
& writable_mask
) != ftr_mask
)
1483 f_val
= arm64_ftr_value(ftrp
, val
);
1484 f_lim
= arm64_ftr_value(ftrp
, limit
);
1490 safe_val
= kvm_arm64_ftr_safe_value(id
, ftrp
, f_val
, f_lim
);
1492 if (safe_val
!= f_val
)
1496 /* For fields that are not writable, values in limit are the safe values. */
1497 if ((val
& ~mask
) != (limit
& ~mask
))
1503 static u8
pmuver_to_perfmon(u8 pmuver
)
1506 case ID_AA64DFR0_EL1_PMUVer_IMP
:
1507 return ID_DFR0_EL1_PerfMon_PMUv3
;
1508 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF
:
1509 return ID_DFR0_EL1_PerfMon_IMPDEF
;
1511 /* Anything ARMv8.1+ and NI have the same value. For now. */
1516 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1517 static u64
__kvm_read_sanitised_id_reg(const struct kvm_vcpu
*vcpu
,
1518 const struct sys_reg_desc
*r
)
1520 u32 id
= reg_to_encoding(r
);
1523 if (sysreg_visible_as_raz(vcpu
, r
))
1526 val
= read_sanitised_ftr_reg(id
);
1529 case SYS_ID_AA64PFR1_EL1
:
1530 if (!kvm_has_mte(vcpu
->kvm
))
1531 val
&= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE
);
1533 val
&= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME
);
1535 case SYS_ID_AA64ISAR1_EL1
:
1536 if (!vcpu_has_ptrauth(vcpu
))
1537 val
&= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA
) |
1538 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API
) |
1539 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA
) |
1540 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI
));
1542 case SYS_ID_AA64ISAR2_EL1
:
1543 if (!vcpu_has_ptrauth(vcpu
))
1544 val
&= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3
) |
1545 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3
));
1546 if (!cpus_have_final_cap(ARM64_HAS_WFXT
))
1547 val
&= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT
);
1549 case SYS_ID_AA64MMFR2_EL1
:
1550 val
&= ~ID_AA64MMFR2_EL1_CCIDX_MASK
;
1552 case SYS_ID_MMFR4_EL1
:
1553 val
&= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX
);
1560 static u64
kvm_read_sanitised_id_reg(struct kvm_vcpu
*vcpu
,
1561 const struct sys_reg_desc
*r
)
1563 return __kvm_read_sanitised_id_reg(vcpu
, r
);
1566 static u64
read_id_reg(const struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
1568 return IDREG(vcpu
->kvm
, reg_to_encoding(r
));
1572 * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1573 * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
1575 static inline bool is_id_reg(u32 id
)
1577 return (sys_reg_Op0(id
) == 3 && sys_reg_Op1(id
) == 0 &&
1578 sys_reg_CRn(id
) == 0 && sys_reg_CRm(id
) >= 1 &&
1579 sys_reg_CRm(id
) < 8);
1582 static inline bool is_aa32_id_reg(u32 id
)
1584 return (sys_reg_Op0(id
) == 3 && sys_reg_Op1(id
) == 0 &&
1585 sys_reg_CRn(id
) == 0 && sys_reg_CRm(id
) >= 1 &&
1586 sys_reg_CRm(id
) <= 3);
1589 static unsigned int id_visibility(const struct kvm_vcpu
*vcpu
,
1590 const struct sys_reg_desc
*r
)
1592 u32 id
= reg_to_encoding(r
);
1595 case SYS_ID_AA64ZFR0_EL1
:
1596 if (!vcpu_has_sve(vcpu
))
1604 static unsigned int aa32_id_visibility(const struct kvm_vcpu
*vcpu
,
1605 const struct sys_reg_desc
*r
)
1608 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1609 * EL. Promote to RAZ/WI in order to guarantee consistency between
1612 if (!kvm_supports_32bit_el0())
1613 return REG_RAZ
| REG_USER_WI
;
1615 return id_visibility(vcpu
, r
);
1618 static unsigned int raz_visibility(const struct kvm_vcpu
*vcpu
,
1619 const struct sys_reg_desc
*r
)
1624 /* cpufeature ID register access trap handlers */
1626 static bool access_id_reg(struct kvm_vcpu
*vcpu
,
1627 struct sys_reg_params
*p
,
1628 const struct sys_reg_desc
*r
)
1631 return write_to_read_only(vcpu
, p
, r
);
1633 p
->regval
= read_id_reg(vcpu
, r
);
1638 /* Visibility overrides for SVE-specific control registers */
1639 static unsigned int sve_visibility(const struct kvm_vcpu
*vcpu
,
1640 const struct sys_reg_desc
*rd
)
1642 if (vcpu_has_sve(vcpu
))
1648 static u64
read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu
*vcpu
,
1649 const struct sys_reg_desc
*rd
)
1651 u64 val
= read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1
);
1653 if (!vcpu_has_sve(vcpu
))
1654 val
&= ~ID_AA64PFR0_EL1_SVE_MASK
;
1657 * The default is to expose CSV2 == 1 if the HW isn't affected.
1658 * Although this is a per-CPU feature, we make it global because
1659 * asymmetric systems are just a nuisance.
1661 * Userspace can override this as long as it doesn't promise
1664 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED
) {
1665 val
&= ~ID_AA64PFR0_EL1_CSV2_MASK
;
1666 val
|= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1
, CSV2
, IMP
);
1668 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED
) {
1669 val
&= ~ID_AA64PFR0_EL1_CSV3_MASK
;
1670 val
|= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1
, CSV3
, IMP
);
1673 if (kvm_vgic_global_state
.type
== VGIC_V3
) {
1674 val
&= ~ID_AA64PFR0_EL1_GIC_MASK
;
1675 val
|= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1
, GIC
, IMP
);
1678 val
&= ~ID_AA64PFR0_EL1_AMU_MASK
;
1683 #define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit) \
1685 u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \
1686 (val) &= ~reg##_##field##_MASK; \
1687 (val) |= FIELD_PREP(reg##_##field##_MASK, \
1689 (u64)SYS_FIELD_VALUE(reg, field, limit))); \
1693 static u64
read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu
*vcpu
,
1694 const struct sys_reg_desc
*rd
)
1696 u64 val
= read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1
);
1698 val
= ID_REG_LIMIT_FIELD_ENUM(val
, ID_AA64DFR0_EL1
, DebugVer
, V8P8
);
1701 * Only initialize the PMU version if the vCPU was configured with one.
1703 val
&= ~ID_AA64DFR0_EL1_PMUVer_MASK
;
1704 if (kvm_vcpu_has_pmu(vcpu
))
1705 val
|= SYS_FIELD_PREP(ID_AA64DFR0_EL1
, PMUVer
,
1706 kvm_arm_pmu_get_pmuver_limit());
1708 /* Hide SPE from guests */
1709 val
&= ~ID_AA64DFR0_EL1_PMSVer_MASK
;
1714 static int set_id_aa64dfr0_el1(struct kvm_vcpu
*vcpu
,
1715 const struct sys_reg_desc
*rd
,
1718 u8 debugver
= SYS_FIELD_GET(ID_AA64DFR0_EL1
, DebugVer
, val
);
1719 u8 pmuver
= SYS_FIELD_GET(ID_AA64DFR0_EL1
, PMUVer
, val
);
1722 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
1723 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
1724 * exposed an IMP_DEF PMU to userspace and the guest on systems w/
1725 * non-architectural PMUs. Of course, PMUv3 is the only game in town for
1726 * PMU virtualization, so the IMP_DEF value was rather user-hostile.
1728 * At minimum, we're on the hook to allow values that were given to
1729 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
1730 * with a more sensible NI. The value of an ID register changing under
1731 * the nose of the guest is unfortunate, but is certainly no more
1732 * surprising than an ill-guided PMU driver poking at impdef system
1733 * registers that end in an UNDEF...
1735 if (pmuver
== ID_AA64DFR0_EL1_PMUVer_IMP_DEF
)
1736 val
&= ~ID_AA64DFR0_EL1_PMUVer_MASK
;
1739 * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
1740 * nonzero minimum safe value.
1742 if (debugver
< ID_AA64DFR0_EL1_DebugVer_IMP
)
1745 return set_id_reg(vcpu
, rd
, val
);
1748 static u64
read_sanitised_id_dfr0_el1(struct kvm_vcpu
*vcpu
,
1749 const struct sys_reg_desc
*rd
)
1751 u8 perfmon
= pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1752 u64 val
= read_sanitised_ftr_reg(SYS_ID_DFR0_EL1
);
1754 val
&= ~ID_DFR0_EL1_PerfMon_MASK
;
1755 if (kvm_vcpu_has_pmu(vcpu
))
1756 val
|= SYS_FIELD_PREP(ID_DFR0_EL1
, PerfMon
, perfmon
);
1758 val
= ID_REG_LIMIT_FIELD_ENUM(val
, ID_DFR0_EL1
, CopDbg
, Debugv8p8
);
1763 static int set_id_dfr0_el1(struct kvm_vcpu
*vcpu
,
1764 const struct sys_reg_desc
*rd
,
1767 u8 perfmon
= SYS_FIELD_GET(ID_DFR0_EL1
, PerfMon
, val
);
1768 u8 copdbg
= SYS_FIELD_GET(ID_DFR0_EL1
, CopDbg
, val
);
1770 if (perfmon
== ID_DFR0_EL1_PerfMon_IMPDEF
) {
1771 val
&= ~ID_DFR0_EL1_PerfMon_MASK
;
1776 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1777 * it doesn't promise more than what the HW gives us on the
1778 * AArch64 side (as everything is emulated with that), and
1779 * that this is a PMUv3.
1781 if (perfmon
!= 0 && perfmon
< ID_DFR0_EL1_PerfMon_PMUv3
)
1784 if (copdbg
< ID_DFR0_EL1_CopDbg_Armv8
)
1787 return set_id_reg(vcpu
, rd
, val
);
1791 * cpufeature ID register user accessors
1793 * For now, these registers are immutable for userspace, so no values
1794 * are stored, and for set_id_reg() we don't allow the effective value
1797 static int get_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1801 * Avoid locking if the VM has already started, as the ID registers are
1802 * guaranteed to be invariant at that point.
1804 if (kvm_vm_has_ran_once(vcpu
->kvm
)) {
1805 *val
= read_id_reg(vcpu
, rd
);
1809 mutex_lock(&vcpu
->kvm
->arch
.config_lock
);
1810 *val
= read_id_reg(vcpu
, rd
);
1811 mutex_unlock(&vcpu
->kvm
->arch
.config_lock
);
1816 static int set_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1819 u32 id
= reg_to_encoding(rd
);
1822 mutex_lock(&vcpu
->kvm
->arch
.config_lock
);
1825 * Once the VM has started the ID registers are immutable. Reject any
1826 * write that does not match the final register value.
1828 if (kvm_vm_has_ran_once(vcpu
->kvm
)) {
1829 if (val
!= read_id_reg(vcpu
, rd
))
1834 mutex_unlock(&vcpu
->kvm
->arch
.config_lock
);
1838 ret
= arm64_check_features(vcpu
, rd
, val
);
1840 IDREG(vcpu
->kvm
, id
) = val
;
1842 mutex_unlock(&vcpu
->kvm
->arch
.config_lock
);
1845 * arm64_check_features() returns -E2BIG to indicate the register's
1846 * feature set is a superset of the maximally-allowed register value.
1847 * While it would be nice to precisely describe this to userspace, the
1848 * existing UAPI for KVM_SET_ONE_REG has it that invalid register
1849 * writes return -EINVAL.
1856 static int get_raz_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1863 static int set_wi_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1869 static bool access_ctr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1870 const struct sys_reg_desc
*r
)
1873 return write_to_read_only(vcpu
, p
, r
);
1875 p
->regval
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
1879 static bool access_clidr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1880 const struct sys_reg_desc
*r
)
1883 return write_to_read_only(vcpu
, p
, r
);
1885 p
->regval
= __vcpu_sys_reg(vcpu
, r
->reg
);
1890 * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
1891 * by the physical CPU which the vcpu currently resides in.
1893 static u64
reset_clidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
1895 u64 ctr_el0
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
1899 if ((ctr_el0
& CTR_EL0_IDC
)) {
1901 * Data cache clean to the PoU is not required so LoUU and LoUIS
1902 * will not be set and a unified cache, which will be marked as
1903 * LoC, will be added.
1905 * If not DIC, let the unified cache L2 so that an instruction
1906 * cache can be added as L1 later.
1908 loc
= (ctr_el0
& CTR_EL0_DIC
) ? 1 : 2;
1909 clidr
= CACHE_TYPE_UNIFIED
<< CLIDR_CTYPE_SHIFT(loc
);
1912 * Data cache clean to the PoU is required so let L1 have a data
1913 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
1914 * it can be marked as LoC too.
1917 clidr
= 1 << CLIDR_LOUU_SHIFT
;
1918 clidr
|= 1 << CLIDR_LOUIS_SHIFT
;
1919 clidr
|= CACHE_TYPE_DATA
<< CLIDR_CTYPE_SHIFT(1);
1923 * Instruction cache invalidation to the PoU is required so let L1 have
1924 * an instruction cache. If L1 already has a data cache, it will be
1925 * CACHE_TYPE_SEPARATE.
1927 if (!(ctr_el0
& CTR_EL0_DIC
))
1928 clidr
|= CACHE_TYPE_INST
<< CLIDR_CTYPE_SHIFT(1);
1930 clidr
|= loc
<< CLIDR_LOC_SHIFT
;
1933 * Add tag cache unified to data cache. Allocation tags and data are
1934 * unified in a cache line so that it looks valid even if there is only
1937 if (kvm_has_mte(vcpu
->kvm
))
1938 clidr
|= 2 << CLIDR_TTYPE_SHIFT(loc
);
1940 __vcpu_sys_reg(vcpu
, r
->reg
) = clidr
;
1942 return __vcpu_sys_reg(vcpu
, r
->reg
);
1945 static int set_clidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1948 u64 ctr_el0
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
1949 u64 idc
= !CLIDR_LOC(val
) || (!CLIDR_LOUIS(val
) && !CLIDR_LOUU(val
));
1951 if ((val
& CLIDR_EL1_RES0
) || (!(ctr_el0
& CTR_EL0_IDC
) && idc
))
1954 __vcpu_sys_reg(vcpu
, rd
->reg
) = val
;
1959 static bool access_csselr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1960 const struct sys_reg_desc
*r
)
1965 vcpu_write_sys_reg(vcpu
, p
->regval
, reg
);
1967 p
->regval
= vcpu_read_sys_reg(vcpu
, reg
);
1971 static bool access_ccsidr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1972 const struct sys_reg_desc
*r
)
1977 return write_to_read_only(vcpu
, p
, r
);
1979 csselr
= vcpu_read_sys_reg(vcpu
, CSSELR_EL1
);
1980 csselr
&= CSSELR_EL1_Level
| CSSELR_EL1_InD
;
1981 if (csselr
< CSSELR_MAX
)
1982 p
->regval
= get_ccsidr(vcpu
, csselr
);
1987 static unsigned int mte_visibility(const struct kvm_vcpu
*vcpu
,
1988 const struct sys_reg_desc
*rd
)
1990 if (kvm_has_mte(vcpu
->kvm
))
1996 #define MTE_REG(name) { \
1997 SYS_DESC(SYS_##name), \
1998 .access = undef_access, \
1999 .reset = reset_unknown, \
2001 .visibility = mte_visibility, \
2004 static unsigned int el2_visibility(const struct kvm_vcpu
*vcpu
,
2005 const struct sys_reg_desc
*rd
)
2007 if (vcpu_has_nv(vcpu
))
2013 static bool bad_vncr_trap(struct kvm_vcpu
*vcpu
,
2014 struct sys_reg_params
*p
,
2015 const struct sys_reg_desc
*r
)
2018 * We really shouldn't be here, and this is likely the result
2019 * of a misconfigured trap, as this register should target the
2020 * VNCR page, and nothing else.
2022 return bad_trap(vcpu
, p
, r
,
2023 "trap of VNCR-backed register");
2026 static bool bad_redir_trap(struct kvm_vcpu
*vcpu
,
2027 struct sys_reg_params
*p
,
2028 const struct sys_reg_desc
*r
)
2031 * We really shouldn't be here, and this is likely the result
2032 * of a misconfigured trap, as this register should target the
2033 * corresponding EL1, and nothing else.
2035 return bad_trap(vcpu
, p
, r
,
2036 "trap of EL2 register redirected to EL1");
2039 #define EL2_REG(name, acc, rst, v) { \
2040 SYS_DESC(SYS_##name), \
2044 .visibility = el2_visibility, \
2048 #define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
2049 #define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
2052 * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
2053 * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
2054 * handling traps. Given that, they are always hidden from userspace.
2056 static unsigned int hidden_user_visibility(const struct kvm_vcpu
*vcpu
,
2057 const struct sys_reg_desc
*rd
)
2059 return REG_HIDDEN_USER
;
2062 #define EL12_REG(name, acc, rst, v) { \
2063 SYS_DESC(SYS_##name##_EL12), \
2066 .reg = name##_EL1, \
2068 .visibility = hidden_user_visibility, \
2072 * Since reset() callback and field val are not used for idregs, they will be
2073 * used for specific purposes for idregs.
2074 * The reset() would return KVM sanitised register value. The value would be the
2075 * same as the host kernel sanitised value if there is no KVM sanitisation.
2076 * The val would be used as a mask indicating writable fields for the idreg.
2077 * Only bits with 1 are writable from userspace. This mask might not be
2078 * necessary in the future whenever all ID registers are enabled as writable
2082 #define ID_DESC(name) \
2083 SYS_DESC(SYS_##name), \
2084 .access = access_id_reg, \
2085 .get_user = get_id_reg \
2087 /* sys_reg_desc initialiser for known cpufeature ID registers */
2088 #define ID_SANITISED(name) { \
2090 .set_user = set_id_reg, \
2091 .visibility = id_visibility, \
2092 .reset = kvm_read_sanitised_id_reg, \
2096 /* sys_reg_desc initialiser for known cpufeature ID registers */
2097 #define AA32_ID_SANITISED(name) { \
2099 .set_user = set_id_reg, \
2100 .visibility = aa32_id_visibility, \
2101 .reset = kvm_read_sanitised_id_reg, \
2105 /* sys_reg_desc initialiser for writable ID registers */
2106 #define ID_WRITABLE(name, mask) { \
2108 .set_user = set_id_reg, \
2109 .visibility = id_visibility, \
2110 .reset = kvm_read_sanitised_id_reg, \
2115 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
2116 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
2117 * (1 <= crm < 8, 0 <= Op2 < 8).
2119 #define ID_UNALLOCATED(crm, op2) { \
2120 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
2121 .access = access_id_reg, \
2122 .get_user = get_id_reg, \
2123 .set_user = set_id_reg, \
2124 .visibility = raz_visibility, \
2125 .reset = kvm_read_sanitised_id_reg, \
2130 * sys_reg_desc initialiser for known ID registers that we hide from guests.
2131 * For now, these are exposed just like unallocated ID regs: they appear
2132 * RAZ for the guest.
2134 #define ID_HIDDEN(name) { \
2136 .set_user = set_id_reg, \
2137 .visibility = raz_visibility, \
2138 .reset = kvm_read_sanitised_id_reg, \
2142 static bool access_sp_el1(struct kvm_vcpu
*vcpu
,
2143 struct sys_reg_params
*p
,
2144 const struct sys_reg_desc
*r
)
2147 __vcpu_sys_reg(vcpu
, SP_EL1
) = p
->regval
;
2149 p
->regval
= __vcpu_sys_reg(vcpu
, SP_EL1
);
2154 static bool access_elr(struct kvm_vcpu
*vcpu
,
2155 struct sys_reg_params
*p
,
2156 const struct sys_reg_desc
*r
)
2159 vcpu_write_sys_reg(vcpu
, p
->regval
, ELR_EL1
);
2161 p
->regval
= vcpu_read_sys_reg(vcpu
, ELR_EL1
);
2166 static bool access_spsr(struct kvm_vcpu
*vcpu
,
2167 struct sys_reg_params
*p
,
2168 const struct sys_reg_desc
*r
)
2171 __vcpu_sys_reg(vcpu
, SPSR_EL1
) = p
->regval
;
2173 p
->regval
= __vcpu_sys_reg(vcpu
, SPSR_EL1
);
2178 static u64
reset_hcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
2182 if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1
))
2185 return __vcpu_sys_reg(vcpu
, r
->reg
) = val
;
2189 * Architected system registers.
2190 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
2192 * Debug handling: We do trap most, if not all debug related system
2193 * registers. The implementation is good enough to ensure that a guest
2194 * can use these with minimal performance degradation. The drawback is
2195 * that we don't implement any of the external debug architecture.
2196 * This should be revisited if we ever encounter a more demanding
2199 static const struct sys_reg_desc sys_reg_descs
[] = {
2200 DBG_BCR_BVR_WCR_WVR_EL1(0),
2201 DBG_BCR_BVR_WCR_WVR_EL1(1),
2202 { SYS_DESC(SYS_MDCCINT_EL1
), trap_debug_regs
, reset_val
, MDCCINT_EL1
, 0 },
2203 { SYS_DESC(SYS_MDSCR_EL1
), trap_debug_regs
, reset_val
, MDSCR_EL1
, 0 },
2204 DBG_BCR_BVR_WCR_WVR_EL1(2),
2205 DBG_BCR_BVR_WCR_WVR_EL1(3),
2206 DBG_BCR_BVR_WCR_WVR_EL1(4),
2207 DBG_BCR_BVR_WCR_WVR_EL1(5),
2208 DBG_BCR_BVR_WCR_WVR_EL1(6),
2209 DBG_BCR_BVR_WCR_WVR_EL1(7),
2210 DBG_BCR_BVR_WCR_WVR_EL1(8),
2211 DBG_BCR_BVR_WCR_WVR_EL1(9),
2212 DBG_BCR_BVR_WCR_WVR_EL1(10),
2213 DBG_BCR_BVR_WCR_WVR_EL1(11),
2214 DBG_BCR_BVR_WCR_WVR_EL1(12),
2215 DBG_BCR_BVR_WCR_WVR_EL1(13),
2216 DBG_BCR_BVR_WCR_WVR_EL1(14),
2217 DBG_BCR_BVR_WCR_WVR_EL1(15),
2219 { SYS_DESC(SYS_MDRAR_EL1
), trap_raz_wi
},
2220 { SYS_DESC(SYS_OSLAR_EL1
), trap_oslar_el1
},
2221 { SYS_DESC(SYS_OSLSR_EL1
), trap_oslsr_el1
, reset_val
, OSLSR_EL1
,
2222 OSLSR_EL1_OSLM_IMPLEMENTED
, .set_user
= set_oslsr_el1
, },
2223 { SYS_DESC(SYS_OSDLR_EL1
), trap_raz_wi
},
2224 { SYS_DESC(SYS_DBGPRCR_EL1
), trap_raz_wi
},
2225 { SYS_DESC(SYS_DBGCLAIMSET_EL1
), trap_raz_wi
},
2226 { SYS_DESC(SYS_DBGCLAIMCLR_EL1
), trap_raz_wi
},
2227 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1
), trap_dbgauthstatus_el1
},
2229 { SYS_DESC(SYS_MDCCSR_EL0
), trap_raz_wi
},
2230 { SYS_DESC(SYS_DBGDTR_EL0
), trap_raz_wi
},
2231 // DBGDTR[TR]X_EL0 share the same encoding
2232 { SYS_DESC(SYS_DBGDTRTX_EL0
), trap_raz_wi
},
2234 { SYS_DESC(SYS_DBGVCR32_EL2
), trap_undef
, reset_val
, DBGVCR32_EL2
, 0 },
2236 { SYS_DESC(SYS_MPIDR_EL1
), NULL
, reset_mpidr
, MPIDR_EL1
},
2239 * ID regs: all ID_SANITISED() entries here must have corresponding
2240 * entries in arm64_ftr_regs[].
2243 /* AArch64 mappings of the AArch32 ID registers */
2245 AA32_ID_SANITISED(ID_PFR0_EL1
),
2246 AA32_ID_SANITISED(ID_PFR1_EL1
),
2247 { SYS_DESC(SYS_ID_DFR0_EL1
),
2248 .access
= access_id_reg
,
2249 .get_user
= get_id_reg
,
2250 .set_user
= set_id_dfr0_el1
,
2251 .visibility
= aa32_id_visibility
,
2252 .reset
= read_sanitised_id_dfr0_el1
,
2253 .val
= ID_DFR0_EL1_PerfMon_MASK
|
2254 ID_DFR0_EL1_CopDbg_MASK
, },
2255 ID_HIDDEN(ID_AFR0_EL1
),
2256 AA32_ID_SANITISED(ID_MMFR0_EL1
),
2257 AA32_ID_SANITISED(ID_MMFR1_EL1
),
2258 AA32_ID_SANITISED(ID_MMFR2_EL1
),
2259 AA32_ID_SANITISED(ID_MMFR3_EL1
),
2262 AA32_ID_SANITISED(ID_ISAR0_EL1
),
2263 AA32_ID_SANITISED(ID_ISAR1_EL1
),
2264 AA32_ID_SANITISED(ID_ISAR2_EL1
),
2265 AA32_ID_SANITISED(ID_ISAR3_EL1
),
2266 AA32_ID_SANITISED(ID_ISAR4_EL1
),
2267 AA32_ID_SANITISED(ID_ISAR5_EL1
),
2268 AA32_ID_SANITISED(ID_MMFR4_EL1
),
2269 AA32_ID_SANITISED(ID_ISAR6_EL1
),
2272 AA32_ID_SANITISED(MVFR0_EL1
),
2273 AA32_ID_SANITISED(MVFR1_EL1
),
2274 AA32_ID_SANITISED(MVFR2_EL1
),
2275 ID_UNALLOCATED(3,3),
2276 AA32_ID_SANITISED(ID_PFR2_EL1
),
2277 ID_HIDDEN(ID_DFR1_EL1
),
2278 AA32_ID_SANITISED(ID_MMFR5_EL1
),
2279 ID_UNALLOCATED(3,7),
2281 /* AArch64 ID registers */
2283 { SYS_DESC(SYS_ID_AA64PFR0_EL1
),
2284 .access
= access_id_reg
,
2285 .get_user
= get_id_reg
,
2286 .set_user
= set_id_reg
,
2287 .reset
= read_sanitised_id_aa64pfr0_el1
,
2288 .val
= ~(ID_AA64PFR0_EL1_AMU
|
2289 ID_AA64PFR0_EL1_MPAM
|
2290 ID_AA64PFR0_EL1_SVE
|
2291 ID_AA64PFR0_EL1_RAS
|
2292 ID_AA64PFR0_EL1_GIC
|
2293 ID_AA64PFR0_EL1_AdvSIMD
|
2294 ID_AA64PFR0_EL1_FP
), },
2295 ID_SANITISED(ID_AA64PFR1_EL1
),
2296 ID_UNALLOCATED(4,2),
2297 ID_UNALLOCATED(4,3),
2298 ID_WRITABLE(ID_AA64ZFR0_EL1
, ~ID_AA64ZFR0_EL1_RES0
),
2299 ID_HIDDEN(ID_AA64SMFR0_EL1
),
2300 ID_UNALLOCATED(4,6),
2301 ID_UNALLOCATED(4,7),
2304 { SYS_DESC(SYS_ID_AA64DFR0_EL1
),
2305 .access
= access_id_reg
,
2306 .get_user
= get_id_reg
,
2307 .set_user
= set_id_aa64dfr0_el1
,
2308 .reset
= read_sanitised_id_aa64dfr0_el1
,
2309 .val
= ID_AA64DFR0_EL1_PMUVer_MASK
|
2310 ID_AA64DFR0_EL1_DebugVer_MASK
, },
2311 ID_SANITISED(ID_AA64DFR1_EL1
),
2312 ID_UNALLOCATED(5,2),
2313 ID_UNALLOCATED(5,3),
2314 ID_HIDDEN(ID_AA64AFR0_EL1
),
2315 ID_HIDDEN(ID_AA64AFR1_EL1
),
2316 ID_UNALLOCATED(5,6),
2317 ID_UNALLOCATED(5,7),
2320 ID_WRITABLE(ID_AA64ISAR0_EL1
, ~ID_AA64ISAR0_EL1_RES0
),
2321 ID_WRITABLE(ID_AA64ISAR1_EL1
, ~(ID_AA64ISAR1_EL1_GPI
|
2322 ID_AA64ISAR1_EL1_GPA
|
2323 ID_AA64ISAR1_EL1_API
|
2324 ID_AA64ISAR1_EL1_APA
)),
2325 ID_WRITABLE(ID_AA64ISAR2_EL1
, ~(ID_AA64ISAR2_EL1_RES0
|
2326 ID_AA64ISAR2_EL1_APA3
|
2327 ID_AA64ISAR2_EL1_GPA3
)),
2328 ID_UNALLOCATED(6,3),
2329 ID_UNALLOCATED(6,4),
2330 ID_UNALLOCATED(6,5),
2331 ID_UNALLOCATED(6,6),
2332 ID_UNALLOCATED(6,7),
2335 ID_WRITABLE(ID_AA64MMFR0_EL1
, ~(ID_AA64MMFR0_EL1_RES0
|
2336 ID_AA64MMFR0_EL1_TGRAN4_2
|
2337 ID_AA64MMFR0_EL1_TGRAN64_2
|
2338 ID_AA64MMFR0_EL1_TGRAN16_2
)),
2339 ID_WRITABLE(ID_AA64MMFR1_EL1
, ~(ID_AA64MMFR1_EL1_RES0
|
2340 ID_AA64MMFR1_EL1_HCX
|
2341 ID_AA64MMFR1_EL1_XNX
|
2342 ID_AA64MMFR1_EL1_TWED
|
2343 ID_AA64MMFR1_EL1_XNX
|
2344 ID_AA64MMFR1_EL1_VH
|
2345 ID_AA64MMFR1_EL1_VMIDBits
)),
2346 ID_WRITABLE(ID_AA64MMFR2_EL1
, ~(ID_AA64MMFR2_EL1_RES0
|
2347 ID_AA64MMFR2_EL1_EVT
|
2348 ID_AA64MMFR2_EL1_FWB
|
2349 ID_AA64MMFR2_EL1_IDS
|
2350 ID_AA64MMFR2_EL1_NV
|
2351 ID_AA64MMFR2_EL1_CCIDX
)),
2352 ID_SANITISED(ID_AA64MMFR3_EL1
),
2353 ID_SANITISED(ID_AA64MMFR4_EL1
),
2354 ID_UNALLOCATED(7,5),
2355 ID_UNALLOCATED(7,6),
2356 ID_UNALLOCATED(7,7),
2358 { SYS_DESC(SYS_SCTLR_EL1
), access_vm_reg
, reset_val
, SCTLR_EL1
, 0x00C50078 },
2359 { SYS_DESC(SYS_ACTLR_EL1
), access_actlr
, reset_actlr
, ACTLR_EL1
},
2360 { SYS_DESC(SYS_CPACR_EL1
), NULL
, reset_val
, CPACR_EL1
, 0 },
2365 { SYS_DESC(SYS_ZCR_EL1
), NULL
, reset_val
, ZCR_EL1
, 0, .visibility
= sve_visibility
},
2366 { SYS_DESC(SYS_TRFCR_EL1
), undef_access
},
2367 { SYS_DESC(SYS_SMPRI_EL1
), undef_access
},
2368 { SYS_DESC(SYS_SMCR_EL1
), undef_access
},
2369 { SYS_DESC(SYS_TTBR0_EL1
), access_vm_reg
, reset_unknown
, TTBR0_EL1
},
2370 { SYS_DESC(SYS_TTBR1_EL1
), access_vm_reg
, reset_unknown
, TTBR1_EL1
},
2371 { SYS_DESC(SYS_TCR_EL1
), access_vm_reg
, reset_val
, TCR_EL1
, 0 },
2372 { SYS_DESC(SYS_TCR2_EL1
), access_vm_reg
, reset_val
, TCR2_EL1
, 0 },
2380 { SYS_DESC(SYS_SPSR_EL1
), access_spsr
},
2381 { SYS_DESC(SYS_ELR_EL1
), access_elr
},
2383 { SYS_DESC(SYS_AFSR0_EL1
), access_vm_reg
, reset_unknown
, AFSR0_EL1
},
2384 { SYS_DESC(SYS_AFSR1_EL1
), access_vm_reg
, reset_unknown
, AFSR1_EL1
},
2385 { SYS_DESC(SYS_ESR_EL1
), access_vm_reg
, reset_unknown
, ESR_EL1
},
2387 { SYS_DESC(SYS_ERRIDR_EL1
), trap_raz_wi
},
2388 { SYS_DESC(SYS_ERRSELR_EL1
), trap_raz_wi
},
2389 { SYS_DESC(SYS_ERXFR_EL1
), trap_raz_wi
},
2390 { SYS_DESC(SYS_ERXCTLR_EL1
), trap_raz_wi
},
2391 { SYS_DESC(SYS_ERXSTATUS_EL1
), trap_raz_wi
},
2392 { SYS_DESC(SYS_ERXADDR_EL1
), trap_raz_wi
},
2393 { SYS_DESC(SYS_ERXMISC0_EL1
), trap_raz_wi
},
2394 { SYS_DESC(SYS_ERXMISC1_EL1
), trap_raz_wi
},
2397 MTE_REG(TFSRE0_EL1
),
2399 { SYS_DESC(SYS_FAR_EL1
), access_vm_reg
, reset_unknown
, FAR_EL1
},
2400 { SYS_DESC(SYS_PAR_EL1
), NULL
, reset_unknown
, PAR_EL1
},
2402 { SYS_DESC(SYS_PMSCR_EL1
), undef_access
},
2403 { SYS_DESC(SYS_PMSNEVFR_EL1
), undef_access
},
2404 { SYS_DESC(SYS_PMSICR_EL1
), undef_access
},
2405 { SYS_DESC(SYS_PMSIRR_EL1
), undef_access
},
2406 { SYS_DESC(SYS_PMSFCR_EL1
), undef_access
},
2407 { SYS_DESC(SYS_PMSEVFR_EL1
), undef_access
},
2408 { SYS_DESC(SYS_PMSLATFR_EL1
), undef_access
},
2409 { SYS_DESC(SYS_PMSIDR_EL1
), undef_access
},
2410 { SYS_DESC(SYS_PMBLIMITR_EL1
), undef_access
},
2411 { SYS_DESC(SYS_PMBPTR_EL1
), undef_access
},
2412 { SYS_DESC(SYS_PMBSR_EL1
), undef_access
},
2413 /* PMBIDR_EL1 is not trapped */
2415 { PMU_SYS_REG(PMINTENSET_EL1
),
2416 .access
= access_pminten
, .reg
= PMINTENSET_EL1
,
2417 .get_user
= get_pmreg
, .set_user
= set_pmreg
},
2418 { PMU_SYS_REG(PMINTENCLR_EL1
),
2419 .access
= access_pminten
, .reg
= PMINTENSET_EL1
,
2420 .get_user
= get_pmreg
, .set_user
= set_pmreg
},
2421 { SYS_DESC(SYS_PMMIR_EL1
), trap_raz_wi
},
2423 { SYS_DESC(SYS_MAIR_EL1
), access_vm_reg
, reset_unknown
, MAIR_EL1
},
2424 { SYS_DESC(SYS_PIRE0_EL1
), NULL
, reset_unknown
, PIRE0_EL1
},
2425 { SYS_DESC(SYS_PIR_EL1
), NULL
, reset_unknown
, PIR_EL1
},
2426 { SYS_DESC(SYS_AMAIR_EL1
), access_vm_reg
, reset_amair_el1
, AMAIR_EL1
},
2428 { SYS_DESC(SYS_LORSA_EL1
), trap_loregion
},
2429 { SYS_DESC(SYS_LOREA_EL1
), trap_loregion
},
2430 { SYS_DESC(SYS_LORN_EL1
), trap_loregion
},
2431 { SYS_DESC(SYS_LORC_EL1
), trap_loregion
},
2432 { SYS_DESC(SYS_LORID_EL1
), trap_loregion
},
2434 { SYS_DESC(SYS_VBAR_EL1
), access_rw
, reset_val
, VBAR_EL1
, 0 },
2435 { SYS_DESC(SYS_DISR_EL1
), NULL
, reset_val
, DISR_EL1
, 0 },
2437 { SYS_DESC(SYS_ICC_IAR0_EL1
), write_to_read_only
},
2438 { SYS_DESC(SYS_ICC_EOIR0_EL1
), read_from_write_only
},
2439 { SYS_DESC(SYS_ICC_HPPIR0_EL1
), write_to_read_only
},
2440 { SYS_DESC(SYS_ICC_DIR_EL1
), read_from_write_only
},
2441 { SYS_DESC(SYS_ICC_RPR_EL1
), write_to_read_only
},
2442 { SYS_DESC(SYS_ICC_SGI1R_EL1
), access_gic_sgi
},
2443 { SYS_DESC(SYS_ICC_ASGI1R_EL1
), access_gic_sgi
},
2444 { SYS_DESC(SYS_ICC_SGI0R_EL1
), access_gic_sgi
},
2445 { SYS_DESC(SYS_ICC_IAR1_EL1
), write_to_read_only
},
2446 { SYS_DESC(SYS_ICC_EOIR1_EL1
), read_from_write_only
},
2447 { SYS_DESC(SYS_ICC_HPPIR1_EL1
), write_to_read_only
},
2448 { SYS_DESC(SYS_ICC_SRE_EL1
), access_gic_sre
},
2450 { SYS_DESC(SYS_CONTEXTIDR_EL1
), access_vm_reg
, reset_val
, CONTEXTIDR_EL1
, 0 },
2451 { SYS_DESC(SYS_TPIDR_EL1
), NULL
, reset_unknown
, TPIDR_EL1
},
2453 { SYS_DESC(SYS_ACCDATA_EL1
), undef_access
},
2455 { SYS_DESC(SYS_SCXTNUM_EL1
), undef_access
},
2457 { SYS_DESC(SYS_CNTKCTL_EL1
), NULL
, reset_val
, CNTKCTL_EL1
, 0},
2459 { SYS_DESC(SYS_CCSIDR_EL1
), access_ccsidr
},
2460 { SYS_DESC(SYS_CLIDR_EL1
), access_clidr
, reset_clidr
, CLIDR_EL1
,
2461 .set_user
= set_clidr
},
2462 { SYS_DESC(SYS_CCSIDR2_EL1
), undef_access
},
2463 { SYS_DESC(SYS_SMIDR_EL1
), undef_access
},
2464 { SYS_DESC(SYS_CSSELR_EL1
), access_csselr
, reset_unknown
, CSSELR_EL1
},
2465 { SYS_DESC(SYS_CTR_EL0
), access_ctr
},
2466 { SYS_DESC(SYS_SVCR
), undef_access
},
2468 { PMU_SYS_REG(PMCR_EL0
), .access
= access_pmcr
, .reset
= reset_pmcr
,
2469 .reg
= PMCR_EL0
, .get_user
= get_pmcr
, .set_user
= set_pmcr
},
2470 { PMU_SYS_REG(PMCNTENSET_EL0
),
2471 .access
= access_pmcnten
, .reg
= PMCNTENSET_EL0
,
2472 .get_user
= get_pmreg
, .set_user
= set_pmreg
},
2473 { PMU_SYS_REG(PMCNTENCLR_EL0
),
2474 .access
= access_pmcnten
, .reg
= PMCNTENSET_EL0
,
2475 .get_user
= get_pmreg
, .set_user
= set_pmreg
},
2476 { PMU_SYS_REG(PMOVSCLR_EL0
),
2477 .access
= access_pmovs
, .reg
= PMOVSSET_EL0
,
2478 .get_user
= get_pmreg
, .set_user
= set_pmreg
},
2480 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
2481 * previously (and pointlessly) advertised in the past...
2483 { PMU_SYS_REG(PMSWINC_EL0
),
2484 .get_user
= get_raz_reg
, .set_user
= set_wi_reg
,
2485 .access
= access_pmswinc
, .reset
= NULL
},
2486 { PMU_SYS_REG(PMSELR_EL0
),
2487 .access
= access_pmselr
, .reset
= reset_pmselr
, .reg
= PMSELR_EL0
},
2488 { PMU_SYS_REG(PMCEID0_EL0
),
2489 .access
= access_pmceid
, .reset
= NULL
},
2490 { PMU_SYS_REG(PMCEID1_EL0
),
2491 .access
= access_pmceid
, .reset
= NULL
},
2492 { PMU_SYS_REG(PMCCNTR_EL0
),
2493 .access
= access_pmu_evcntr
, .reset
= reset_unknown
,
2494 .reg
= PMCCNTR_EL0
, .get_user
= get_pmu_evcntr
},
2495 { PMU_SYS_REG(PMXEVTYPER_EL0
),
2496 .access
= access_pmu_evtyper
, .reset
= NULL
},
2497 { PMU_SYS_REG(PMXEVCNTR_EL0
),
2498 .access
= access_pmu_evcntr
, .reset
= NULL
},
2500 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
2501 * in 32bit mode. Here we choose to reset it as zero for consistency.
2503 { PMU_SYS_REG(PMUSERENR_EL0
), .access
= access_pmuserenr
,
2504 .reset
= reset_val
, .reg
= PMUSERENR_EL0
, .val
= 0 },
2505 { PMU_SYS_REG(PMOVSSET_EL0
),
2506 .access
= access_pmovs
, .reg
= PMOVSSET_EL0
,
2507 .get_user
= get_pmreg
, .set_user
= set_pmreg
},
2509 { SYS_DESC(SYS_TPIDR_EL0
), NULL
, reset_unknown
, TPIDR_EL0
},
2510 { SYS_DESC(SYS_TPIDRRO_EL0
), NULL
, reset_unknown
, TPIDRRO_EL0
},
2511 { SYS_DESC(SYS_TPIDR2_EL0
), undef_access
},
2513 { SYS_DESC(SYS_SCXTNUM_EL0
), undef_access
},
2515 { SYS_DESC(SYS_AMCR_EL0
), undef_access
},
2516 { SYS_DESC(SYS_AMCFGR_EL0
), undef_access
},
2517 { SYS_DESC(SYS_AMCGCR_EL0
), undef_access
},
2518 { SYS_DESC(SYS_AMUSERENR_EL0
), undef_access
},
2519 { SYS_DESC(SYS_AMCNTENCLR0_EL0
), undef_access
},
2520 { SYS_DESC(SYS_AMCNTENSET0_EL0
), undef_access
},
2521 { SYS_DESC(SYS_AMCNTENCLR1_EL0
), undef_access
},
2522 { SYS_DESC(SYS_AMCNTENSET1_EL0
), undef_access
},
2523 AMU_AMEVCNTR0_EL0(0),
2524 AMU_AMEVCNTR0_EL0(1),
2525 AMU_AMEVCNTR0_EL0(2),
2526 AMU_AMEVCNTR0_EL0(3),
2527 AMU_AMEVCNTR0_EL0(4),
2528 AMU_AMEVCNTR0_EL0(5),
2529 AMU_AMEVCNTR0_EL0(6),
2530 AMU_AMEVCNTR0_EL0(7),
2531 AMU_AMEVCNTR0_EL0(8),
2532 AMU_AMEVCNTR0_EL0(9),
2533 AMU_AMEVCNTR0_EL0(10),
2534 AMU_AMEVCNTR0_EL0(11),
2535 AMU_AMEVCNTR0_EL0(12),
2536 AMU_AMEVCNTR0_EL0(13),
2537 AMU_AMEVCNTR0_EL0(14),
2538 AMU_AMEVCNTR0_EL0(15),
2539 AMU_AMEVTYPER0_EL0(0),
2540 AMU_AMEVTYPER0_EL0(1),
2541 AMU_AMEVTYPER0_EL0(2),
2542 AMU_AMEVTYPER0_EL0(3),
2543 AMU_AMEVTYPER0_EL0(4),
2544 AMU_AMEVTYPER0_EL0(5),
2545 AMU_AMEVTYPER0_EL0(6),
2546 AMU_AMEVTYPER0_EL0(7),
2547 AMU_AMEVTYPER0_EL0(8),
2548 AMU_AMEVTYPER0_EL0(9),
2549 AMU_AMEVTYPER0_EL0(10),
2550 AMU_AMEVTYPER0_EL0(11),
2551 AMU_AMEVTYPER0_EL0(12),
2552 AMU_AMEVTYPER0_EL0(13),
2553 AMU_AMEVTYPER0_EL0(14),
2554 AMU_AMEVTYPER0_EL0(15),
2555 AMU_AMEVCNTR1_EL0(0),
2556 AMU_AMEVCNTR1_EL0(1),
2557 AMU_AMEVCNTR1_EL0(2),
2558 AMU_AMEVCNTR1_EL0(3),
2559 AMU_AMEVCNTR1_EL0(4),
2560 AMU_AMEVCNTR1_EL0(5),
2561 AMU_AMEVCNTR1_EL0(6),
2562 AMU_AMEVCNTR1_EL0(7),
2563 AMU_AMEVCNTR1_EL0(8),
2564 AMU_AMEVCNTR1_EL0(9),
2565 AMU_AMEVCNTR1_EL0(10),
2566 AMU_AMEVCNTR1_EL0(11),
2567 AMU_AMEVCNTR1_EL0(12),
2568 AMU_AMEVCNTR1_EL0(13),
2569 AMU_AMEVCNTR1_EL0(14),
2570 AMU_AMEVCNTR1_EL0(15),
2571 AMU_AMEVTYPER1_EL0(0),
2572 AMU_AMEVTYPER1_EL0(1),
2573 AMU_AMEVTYPER1_EL0(2),
2574 AMU_AMEVTYPER1_EL0(3),
2575 AMU_AMEVTYPER1_EL0(4),
2576 AMU_AMEVTYPER1_EL0(5),
2577 AMU_AMEVTYPER1_EL0(6),
2578 AMU_AMEVTYPER1_EL0(7),
2579 AMU_AMEVTYPER1_EL0(8),
2580 AMU_AMEVTYPER1_EL0(9),
2581 AMU_AMEVTYPER1_EL0(10),
2582 AMU_AMEVTYPER1_EL0(11),
2583 AMU_AMEVTYPER1_EL0(12),
2584 AMU_AMEVTYPER1_EL0(13),
2585 AMU_AMEVTYPER1_EL0(14),
2586 AMU_AMEVTYPER1_EL0(15),
2588 { SYS_DESC(SYS_CNTPCT_EL0
), access_arch_timer
},
2589 { SYS_DESC(SYS_CNTPCTSS_EL0
), access_arch_timer
},
2590 { SYS_DESC(SYS_CNTP_TVAL_EL0
), access_arch_timer
},
2591 { SYS_DESC(SYS_CNTP_CTL_EL0
), access_arch_timer
},
2592 { SYS_DESC(SYS_CNTP_CVAL_EL0
), access_arch_timer
},
2595 PMU_PMEVCNTR_EL0(0),
2596 PMU_PMEVCNTR_EL0(1),
2597 PMU_PMEVCNTR_EL0(2),
2598 PMU_PMEVCNTR_EL0(3),
2599 PMU_PMEVCNTR_EL0(4),
2600 PMU_PMEVCNTR_EL0(5),
2601 PMU_PMEVCNTR_EL0(6),
2602 PMU_PMEVCNTR_EL0(7),
2603 PMU_PMEVCNTR_EL0(8),
2604 PMU_PMEVCNTR_EL0(9),
2605 PMU_PMEVCNTR_EL0(10),
2606 PMU_PMEVCNTR_EL0(11),
2607 PMU_PMEVCNTR_EL0(12),
2608 PMU_PMEVCNTR_EL0(13),
2609 PMU_PMEVCNTR_EL0(14),
2610 PMU_PMEVCNTR_EL0(15),
2611 PMU_PMEVCNTR_EL0(16),
2612 PMU_PMEVCNTR_EL0(17),
2613 PMU_PMEVCNTR_EL0(18),
2614 PMU_PMEVCNTR_EL0(19),
2615 PMU_PMEVCNTR_EL0(20),
2616 PMU_PMEVCNTR_EL0(21),
2617 PMU_PMEVCNTR_EL0(22),
2618 PMU_PMEVCNTR_EL0(23),
2619 PMU_PMEVCNTR_EL0(24),
2620 PMU_PMEVCNTR_EL0(25),
2621 PMU_PMEVCNTR_EL0(26),
2622 PMU_PMEVCNTR_EL0(27),
2623 PMU_PMEVCNTR_EL0(28),
2624 PMU_PMEVCNTR_EL0(29),
2625 PMU_PMEVCNTR_EL0(30),
2626 /* PMEVTYPERn_EL0 */
2627 PMU_PMEVTYPER_EL0(0),
2628 PMU_PMEVTYPER_EL0(1),
2629 PMU_PMEVTYPER_EL0(2),
2630 PMU_PMEVTYPER_EL0(3),
2631 PMU_PMEVTYPER_EL0(4),
2632 PMU_PMEVTYPER_EL0(5),
2633 PMU_PMEVTYPER_EL0(6),
2634 PMU_PMEVTYPER_EL0(7),
2635 PMU_PMEVTYPER_EL0(8),
2636 PMU_PMEVTYPER_EL0(9),
2637 PMU_PMEVTYPER_EL0(10),
2638 PMU_PMEVTYPER_EL0(11),
2639 PMU_PMEVTYPER_EL0(12),
2640 PMU_PMEVTYPER_EL0(13),
2641 PMU_PMEVTYPER_EL0(14),
2642 PMU_PMEVTYPER_EL0(15),
2643 PMU_PMEVTYPER_EL0(16),
2644 PMU_PMEVTYPER_EL0(17),
2645 PMU_PMEVTYPER_EL0(18),
2646 PMU_PMEVTYPER_EL0(19),
2647 PMU_PMEVTYPER_EL0(20),
2648 PMU_PMEVTYPER_EL0(21),
2649 PMU_PMEVTYPER_EL0(22),
2650 PMU_PMEVTYPER_EL0(23),
2651 PMU_PMEVTYPER_EL0(24),
2652 PMU_PMEVTYPER_EL0(25),
2653 PMU_PMEVTYPER_EL0(26),
2654 PMU_PMEVTYPER_EL0(27),
2655 PMU_PMEVTYPER_EL0(28),
2656 PMU_PMEVTYPER_EL0(29),
2657 PMU_PMEVTYPER_EL0(30),
2659 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
2660 * in 32bit mode. Here we choose to reset it as zero for consistency.
2662 { PMU_SYS_REG(PMCCFILTR_EL0
), .access
= access_pmu_evtyper
,
2663 .reset
= reset_val
, .reg
= PMCCFILTR_EL0
, .val
= 0 },
2665 EL2_REG_VNCR(VPIDR_EL2
, reset_unknown
, 0),
2666 EL2_REG_VNCR(VMPIDR_EL2
, reset_unknown
, 0),
2667 EL2_REG(SCTLR_EL2
, access_rw
, reset_val
, SCTLR_EL2_RES1
),
2668 EL2_REG(ACTLR_EL2
, access_rw
, reset_val
, 0),
2669 EL2_REG_VNCR(HCR_EL2
, reset_hcr
, 0),
2670 EL2_REG(MDCR_EL2
, access_rw
, reset_val
, 0),
2671 EL2_REG(CPTR_EL2
, access_rw
, reset_val
, CPTR_NVHE_EL2_RES1
),
2672 EL2_REG_VNCR(HSTR_EL2
, reset_val
, 0),
2673 EL2_REG_VNCR(HFGRTR_EL2
, reset_val
, 0),
2674 EL2_REG_VNCR(HFGWTR_EL2
, reset_val
, 0),
2675 EL2_REG_VNCR(HFGITR_EL2
, reset_val
, 0),
2676 EL2_REG_VNCR(HACR_EL2
, reset_val
, 0),
2678 EL2_REG_VNCR(HCRX_EL2
, reset_val
, 0),
2680 EL2_REG(TTBR0_EL2
, access_rw
, reset_val
, 0),
2681 EL2_REG(TTBR1_EL2
, access_rw
, reset_val
, 0),
2682 EL2_REG(TCR_EL2
, access_rw
, reset_val
, TCR_EL2_RES1
),
2683 EL2_REG_VNCR(VTTBR_EL2
, reset_val
, 0),
2684 EL2_REG_VNCR(VTCR_EL2
, reset_val
, 0),
2686 { SYS_DESC(SYS_DACR32_EL2
), trap_undef
, reset_unknown
, DACR32_EL2
},
2687 EL2_REG_VNCR(HDFGRTR_EL2
, reset_val
, 0),
2688 EL2_REG_VNCR(HDFGWTR_EL2
, reset_val
, 0),
2689 EL2_REG_VNCR(HAFGRTR_EL2
, reset_val
, 0),
2690 EL2_REG_REDIR(SPSR_EL2
, reset_val
, 0),
2691 EL2_REG_REDIR(ELR_EL2
, reset_val
, 0),
2692 { SYS_DESC(SYS_SP_EL1
), access_sp_el1
},
2694 /* AArch32 SPSR_* are RES0 if trapped from a NV guest */
2695 { SYS_DESC(SYS_SPSR_irq
), .access
= trap_raz_wi
,
2696 .visibility
= hidden_user_visibility
},
2697 { SYS_DESC(SYS_SPSR_abt
), .access
= trap_raz_wi
,
2698 .visibility
= hidden_user_visibility
},
2699 { SYS_DESC(SYS_SPSR_und
), .access
= trap_raz_wi
,
2700 .visibility
= hidden_user_visibility
},
2701 { SYS_DESC(SYS_SPSR_fiq
), .access
= trap_raz_wi
,
2702 .visibility
= hidden_user_visibility
},
2704 { SYS_DESC(SYS_IFSR32_EL2
), trap_undef
, reset_unknown
, IFSR32_EL2
},
2705 EL2_REG(AFSR0_EL2
, access_rw
, reset_val
, 0),
2706 EL2_REG(AFSR1_EL2
, access_rw
, reset_val
, 0),
2707 EL2_REG_REDIR(ESR_EL2
, reset_val
, 0),
2708 { SYS_DESC(SYS_FPEXC32_EL2
), trap_undef
, reset_val
, FPEXC32_EL2
, 0x700 },
2710 EL2_REG_REDIR(FAR_EL2
, reset_val
, 0),
2711 EL2_REG(HPFAR_EL2
, access_rw
, reset_val
, 0),
2713 EL2_REG(MAIR_EL2
, access_rw
, reset_val
, 0),
2714 EL2_REG(AMAIR_EL2
, access_rw
, reset_val
, 0),
2716 EL2_REG(VBAR_EL2
, access_rw
, reset_val
, 0),
2717 EL2_REG(RVBAR_EL2
, access_rw
, reset_val
, 0),
2718 { SYS_DESC(SYS_RMR_EL2
), trap_undef
},
2720 EL2_REG(CONTEXTIDR_EL2
, access_rw
, reset_val
, 0),
2721 EL2_REG(TPIDR_EL2
, access_rw
, reset_val
, 0),
2723 EL2_REG_VNCR(CNTVOFF_EL2
, reset_val
, 0),
2724 EL2_REG(CNTHCTL_EL2
, access_rw
, reset_val
, 0),
2726 EL12_REG(CNTKCTL
, access_rw
, reset_val
, 0),
2728 EL2_REG(SP_EL2
, NULL
, reset_unknown
, 0),
2731 static struct sys_reg_desc sys_insn_descs
[] = {
2732 { SYS_DESC(SYS_DC_ISW
), access_dcsw
},
2733 { SYS_DESC(SYS_DC_IGSW
), access_dcgsw
},
2734 { SYS_DESC(SYS_DC_IGDSW
), access_dcgsw
},
2735 { SYS_DESC(SYS_DC_CSW
), access_dcsw
},
2736 { SYS_DESC(SYS_DC_CGSW
), access_dcgsw
},
2737 { SYS_DESC(SYS_DC_CGDSW
), access_dcgsw
},
2738 { SYS_DESC(SYS_DC_CISW
), access_dcsw
},
2739 { SYS_DESC(SYS_DC_CIGSW
), access_dcgsw
},
2740 { SYS_DESC(SYS_DC_CIGDSW
), access_dcgsw
},
2743 static const struct sys_reg_desc
*first_idreg
;
2745 static bool trap_dbgdidr(struct kvm_vcpu
*vcpu
,
2746 struct sys_reg_params
*p
,
2747 const struct sys_reg_desc
*r
)
2750 return ignore_write(vcpu
, p
);
2752 u64 dfr
= IDREG(vcpu
->kvm
, SYS_ID_AA64DFR0_EL1
);
2753 u32 el3
= kvm_has_feat(vcpu
->kvm
, ID_AA64PFR0_EL1
, EL3
, IMP
);
2755 p
->regval
= ((SYS_FIELD_GET(ID_AA64DFR0_EL1
, WRPs
, dfr
) << 28) |
2756 (SYS_FIELD_GET(ID_AA64DFR0_EL1
, BRPs
, dfr
) << 24) |
2757 (SYS_FIELD_GET(ID_AA64DFR0_EL1
, CTX_CMPs
, dfr
) << 20) |
2758 (SYS_FIELD_GET(ID_AA64DFR0_EL1
, DebugVer
, dfr
) << 16) |
2759 (1 << 15) | (el3
<< 14) | (el3
<< 12));
2765 * AArch32 debug register mappings
2767 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
2768 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
2770 * None of the other registers share their location, so treat them as
2771 * if they were 64bit.
2773 #define DBG_BCR_BVR_WCR_WVR(n) \
2775 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
2777 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
2779 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
2781 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
2783 #define DBGBXVR(n) \
2784 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
2787 * Trapped cp14 registers. We generally ignore most of the external
2788 * debug, on the principle that they don't really make sense to a
2789 * guest. Revisit this one day, would this principle change.
2791 static const struct sys_reg_desc cp14_regs
[] = {
2793 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr
},
2795 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi
},
2797 DBG_BCR_BVR_WCR_WVR(0),
2799 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi
},
2800 DBG_BCR_BVR_WCR_WVR(1),
2802 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs
, NULL
, MDCCINT_EL1
},
2804 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs
, NULL
, MDSCR_EL1
},
2805 DBG_BCR_BVR_WCR_WVR(2),
2806 /* DBGDTR[RT]Xint */
2807 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi
},
2808 /* DBGDTR[RT]Xext */
2809 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi
},
2810 DBG_BCR_BVR_WCR_WVR(3),
2811 DBG_BCR_BVR_WCR_WVR(4),
2812 DBG_BCR_BVR_WCR_WVR(5),
2814 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi
},
2816 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi
},
2817 DBG_BCR_BVR_WCR_WVR(6),
2819 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs
, NULL
, DBGVCR32_EL2
},
2820 DBG_BCR_BVR_WCR_WVR(7),
2821 DBG_BCR_BVR_WCR_WVR(8),
2822 DBG_BCR_BVR_WCR_WVR(9),
2823 DBG_BCR_BVR_WCR_WVR(10),
2824 DBG_BCR_BVR_WCR_WVR(11),
2825 DBG_BCR_BVR_WCR_WVR(12),
2826 DBG_BCR_BVR_WCR_WVR(13),
2827 DBG_BCR_BVR_WCR_WVR(14),
2828 DBG_BCR_BVR_WCR_WVR(15),
2830 /* DBGDRAR (32bit) */
2831 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi
},
2835 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1
},
2838 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1
, NULL
, OSLSR_EL1
},
2842 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi
},
2845 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi
},
2858 /* DBGDSAR (32bit) */
2859 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi
},
2862 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi
},
2864 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi
},
2866 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi
},
2868 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi
},
2870 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi
},
2872 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1
},
2875 /* Trapped cp14 64bit registers */
2876 static const struct sys_reg_desc cp14_64_regs
[] = {
2877 /* DBGDRAR (64bit) */
2878 { Op1( 0), CRm( 1), .access
= trap_raz_wi
},
2880 /* DBGDSAR (64bit) */
2881 { Op1( 0), CRm( 2), .access
= trap_raz_wi
},
2884 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
2886 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
2887 .visibility = pmu_visibility
2889 /* Macro to expand the PMEVCNTRn register */
2890 #define PMU_PMEVCNTR(n) \
2891 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2892 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2893 .access = access_pmu_evcntr }
2895 /* Macro to expand the PMEVTYPERn register */
2896 #define PMU_PMEVTYPER(n) \
2897 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2898 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2899 .access = access_pmu_evtyper }
2901 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
2902 * depending on the way they are accessed (as a 32bit or a 64bit
2905 static const struct sys_reg_desc cp15_regs
[] = {
2906 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr
},
2907 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg
, NULL
, SCTLR_EL1
},
2909 { AA32(LO
), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr
, NULL
, ACTLR_EL1
},
2911 { AA32(HI
), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr
, NULL
, ACTLR_EL1
},
2912 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg
, NULL
, TTBR0_EL1
},
2913 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg
, NULL
, TTBR1_EL1
},
2915 { AA32(LO
), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg
, NULL
, TCR_EL1
},
2917 { AA32(HI
), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg
, NULL
, TCR_EL1
},
2918 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg
, NULL
, DACR32_EL2
},
2920 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg
, NULL
, ESR_EL1
},
2921 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg
, NULL
, IFSR32_EL2
},
2923 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg
, NULL
, AFSR0_EL1
},
2925 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg
, NULL
, AFSR1_EL1
},
2927 { AA32(LO
), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg
, NULL
, FAR_EL1
},
2929 { AA32(HI
), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg
, NULL
, FAR_EL1
},
2932 * DC{C,I,CI}SW operations:
2934 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw
},
2935 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw
},
2936 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw
},
2939 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 12, 0), .access
= access_pmcr
},
2940 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 12, 1), .access
= access_pmcnten
},
2941 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 12, 2), .access
= access_pmcnten
},
2942 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 12, 3), .access
= access_pmovs
},
2943 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 12, 4), .access
= access_pmswinc
},
2944 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 12, 5), .access
= access_pmselr
},
2945 { CP15_PMU_SYS_REG(LO
, 0, 9, 12, 6), .access
= access_pmceid
},
2946 { CP15_PMU_SYS_REG(LO
, 0, 9, 12, 7), .access
= access_pmceid
},
2947 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 13, 0), .access
= access_pmu_evcntr
},
2948 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 13, 1), .access
= access_pmu_evtyper
},
2949 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 13, 2), .access
= access_pmu_evcntr
},
2950 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 14, 0), .access
= access_pmuserenr
},
2951 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 14, 1), .access
= access_pminten
},
2952 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 14, 2), .access
= access_pminten
},
2953 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 14, 3), .access
= access_pmovs
},
2954 { CP15_PMU_SYS_REG(HI
, 0, 9, 14, 4), .access
= access_pmceid
},
2955 { CP15_PMU_SYS_REG(HI
, 0, 9, 14, 5), .access
= access_pmceid
},
2957 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 14, 6), .access
= trap_raz_wi
},
2960 { AA32(LO
), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg
, NULL
, MAIR_EL1
},
2962 { AA32(HI
), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg
, NULL
, MAIR_EL1
},
2964 { AA32(LO
), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg
, NULL
, AMAIR_EL1
},
2966 { AA32(HI
), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg
, NULL
, AMAIR_EL1
},
2969 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre
},
2971 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg
, NULL
, CONTEXTIDR_EL1
},
2974 { SYS_DESC(SYS_AARCH32_CNTP_TVAL
), access_arch_timer
},
2975 { SYS_DESC(SYS_AARCH32_CNTP_CTL
), access_arch_timer
},
3042 { CP15_PMU_SYS_REG(DIRECT
, 0, 14, 15, 7), .access
= access_pmu_evtyper
},
3044 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr
},
3045 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr
},
3048 { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access
},
3050 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr
, NULL
, CSSELR_EL1
},
3053 static const struct sys_reg_desc cp15_64_regs
[] = {
3054 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, TTBR0_EL1
},
3055 { CP15_PMU_SYS_REG(DIRECT
, 0, 0, 9, 0), .access
= access_pmu_evcntr
},
3056 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
}, /* ICC_SGI1R */
3057 { SYS_DESC(SYS_AARCH32_CNTPCT
), access_arch_timer
},
3058 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, TTBR1_EL1
},
3059 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
}, /* ICC_ASGI1R */
3060 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
}, /* ICC_SGI0R */
3061 { SYS_DESC(SYS_AARCH32_CNTP_CVAL
), access_arch_timer
},
3062 { SYS_DESC(SYS_AARCH32_CNTPCTSS
), access_arch_timer
},
3065 static bool check_sysreg_table(const struct sys_reg_desc
*table
, unsigned int n
,
3070 for (i
= 0; i
< n
; i
++) {
3071 if (!is_32
&& table
[i
].reg
&& !table
[i
].reset
) {
3072 kvm_err("sys_reg table %pS entry %d lacks reset\n", &table
[i
], i
);
3076 if (i
&& cmp_sys_reg(&table
[i
-1], &table
[i
]) >= 0) {
3077 kvm_err("sys_reg table %pS entry %d out of order\n", &table
[i
- 1], i
- 1);
3085 int kvm_handle_cp14_load_store(struct kvm_vcpu
*vcpu
)
3087 kvm_inject_undefined(vcpu
);
3091 static void perform_access(struct kvm_vcpu
*vcpu
,
3092 struct sys_reg_params
*params
,
3093 const struct sys_reg_desc
*r
)
3095 trace_kvm_sys_access(*vcpu_pc(vcpu
), params
, r
);
3097 /* Check for regs disabled by runtime config */
3098 if (sysreg_hidden(vcpu
, r
)) {
3099 kvm_inject_undefined(vcpu
);
3104 * Not having an accessor means that we have configured a trap
3105 * that we don't know how to handle. This certainly qualifies
3106 * as a gross bug that should be fixed right away.
3110 /* Skip instruction if instructed so */
3111 if (likely(r
->access(vcpu
, params
, r
)))
3116 * emulate_cp -- tries to match a sys_reg access in a handling table, and
3117 * call the corresponding trap handler.
3119 * @params: pointer to the descriptor of the access
3120 * @table: array of trap descriptors
3121 * @num: size of the trap descriptor array
3123 * Return true if the access has been handled, false if not.
3125 static bool emulate_cp(struct kvm_vcpu
*vcpu
,
3126 struct sys_reg_params
*params
,
3127 const struct sys_reg_desc
*table
,
3130 const struct sys_reg_desc
*r
;
3133 return false; /* Not handled */
3135 r
= find_reg(params
, table
, num
);
3138 perform_access(vcpu
, params
, r
);
3146 static void unhandled_cp_access(struct kvm_vcpu
*vcpu
,
3147 struct sys_reg_params
*params
)
3149 u8 esr_ec
= kvm_vcpu_trap_get_class(vcpu
);
3153 case ESR_ELx_EC_CP15_32
:
3154 case ESR_ELx_EC_CP15_64
:
3157 case ESR_ELx_EC_CP14_MR
:
3158 case ESR_ELx_EC_CP14_64
:
3165 print_sys_reg_msg(params
,
3166 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
3167 cp
, *vcpu_pc(vcpu
), *vcpu_cpsr(vcpu
));
3168 kvm_inject_undefined(vcpu
);
3172 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
3173 * @vcpu: The VCPU pointer
3174 * @global: &struct sys_reg_desc
3175 * @nr_global: size of the @global array
3177 static int kvm_handle_cp_64(struct kvm_vcpu
*vcpu
,
3178 const struct sys_reg_desc
*global
,
3181 struct sys_reg_params params
;
3182 u64 esr
= kvm_vcpu_get_esr(vcpu
);
3183 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
3184 int Rt2
= (esr
>> 10) & 0x1f;
3186 params
.CRm
= (esr
>> 1) & 0xf;
3187 params
.is_write
= ((esr
& 1) == 0);
3190 params
.Op1
= (esr
>> 16) & 0xf;
3195 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
3196 * backends between AArch32 and AArch64, we get away with it.
3198 if (params
.is_write
) {
3199 params
.regval
= vcpu_get_reg(vcpu
, Rt
) & 0xffffffff;
3200 params
.regval
|= vcpu_get_reg(vcpu
, Rt2
) << 32;
3204 * If the table contains a handler, handle the
3205 * potential register operation in the case of a read and return
3208 if (emulate_cp(vcpu
, ¶ms
, global
, nr_global
)) {
3209 /* Split up the value between registers for the read side */
3210 if (!params
.is_write
) {
3211 vcpu_set_reg(vcpu
, Rt
, lower_32_bits(params
.regval
));
3212 vcpu_set_reg(vcpu
, Rt2
, upper_32_bits(params
.regval
));
3218 unhandled_cp_access(vcpu
, ¶ms
);
3222 static bool emulate_sys_reg(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*params
);
3225 * The CP10 ID registers are architecturally mapped to AArch64 feature
3226 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
3229 static bool kvm_esr_cp10_id_to_sys64(u64 esr
, struct sys_reg_params
*params
)
3231 u8 reg_id
= (esr
>> 10) & 0xf;
3234 params
->is_write
= ((esr
& 1) == 0);
3240 /* CP10 ID registers are read-only */
3241 valid
= !params
->is_write
;
3263 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
3264 params
->is_write
? "write" : "read", reg_id
);
3269 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
3270 * VFP Register' from AArch32.
3271 * @vcpu: The vCPU pointer
3273 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
3274 * Work out the correct AArch64 system register encoding and reroute to the
3275 * AArch64 system register emulation.
3277 int kvm_handle_cp10_id(struct kvm_vcpu
*vcpu
)
3279 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
3280 u64 esr
= kvm_vcpu_get_esr(vcpu
);
3281 struct sys_reg_params params
;
3283 /* UNDEF on any unhandled register access */
3284 if (!kvm_esr_cp10_id_to_sys64(esr
, ¶ms
)) {
3285 kvm_inject_undefined(vcpu
);
3289 if (emulate_sys_reg(vcpu
, ¶ms
))
3290 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
3296 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
3297 * CRn=0, which corresponds to the AArch32 feature
3299 * @vcpu: the vCPU pointer
3300 * @params: the system register access parameters.
3302 * Our cp15 system register tables do not enumerate the AArch32 feature
3303 * registers. Conveniently, our AArch64 table does, and the AArch32 system
3304 * register encoding can be trivially remapped into the AArch64 for the feature
3305 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
3307 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
3308 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
3309 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
3310 * treat undefined registers in this range as RAZ.
3312 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu
*vcpu
,
3313 struct sys_reg_params
*params
)
3315 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
3317 /* Treat impossible writes to RO registers as UNDEFINED */
3318 if (params
->is_write
) {
3319 unhandled_cp_access(vcpu
, params
);
3326 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
3327 * Avoid conflicting with future expansion of AArch64 feature registers
3328 * and simply treat them as RAZ here.
3330 if (params
->CRm
> 3)
3332 else if (!emulate_sys_reg(vcpu
, params
))
3335 vcpu_set_reg(vcpu
, Rt
, params
->regval
);
3340 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
3341 * @vcpu: The VCPU pointer
3342 * @params: &struct sys_reg_params
3343 * @global: &struct sys_reg_desc
3344 * @nr_global: size of the @global array
3346 static int kvm_handle_cp_32(struct kvm_vcpu
*vcpu
,
3347 struct sys_reg_params
*params
,
3348 const struct sys_reg_desc
*global
,
3351 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
3353 params
->regval
= vcpu_get_reg(vcpu
, Rt
);
3355 if (emulate_cp(vcpu
, params
, global
, nr_global
)) {
3356 if (!params
->is_write
)
3357 vcpu_set_reg(vcpu
, Rt
, params
->regval
);
3361 unhandled_cp_access(vcpu
, params
);
3365 int kvm_handle_cp15_64(struct kvm_vcpu
*vcpu
)
3367 return kvm_handle_cp_64(vcpu
, cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
));
3370 int kvm_handle_cp15_32(struct kvm_vcpu
*vcpu
)
3372 struct sys_reg_params params
;
3374 params
= esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu
));
3377 * Certain AArch32 ID registers are handled by rerouting to the AArch64
3378 * system register table. Registers in the ID range where CRm=0 are
3379 * excluded from this scheme as they do not trivially map into AArch64
3380 * system register encodings.
3382 if (params
.Op1
== 0 && params
.CRn
== 0 && params
.CRm
)
3383 return kvm_emulate_cp15_id_reg(vcpu
, ¶ms
);
3385 return kvm_handle_cp_32(vcpu
, ¶ms
, cp15_regs
, ARRAY_SIZE(cp15_regs
));
3388 int kvm_handle_cp14_64(struct kvm_vcpu
*vcpu
)
3390 return kvm_handle_cp_64(vcpu
, cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
));
3393 int kvm_handle_cp14_32(struct kvm_vcpu
*vcpu
)
3395 struct sys_reg_params params
;
3397 params
= esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu
));
3399 return kvm_handle_cp_32(vcpu
, ¶ms
, cp14_regs
, ARRAY_SIZE(cp14_regs
));
3403 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
3404 * @vcpu: The VCPU pointer
3405 * @params: Decoded system register parameters
3407 * Return: true if the system register access was successful, false otherwise.
3409 static bool emulate_sys_reg(struct kvm_vcpu
*vcpu
,
3410 struct sys_reg_params
*params
)
3412 const struct sys_reg_desc
*r
;
3414 r
= find_reg(params
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
3416 perform_access(vcpu
, params
, r
);
3420 print_sys_reg_msg(params
,
3421 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
3422 *vcpu_pc(vcpu
), *vcpu_cpsr(vcpu
));
3423 kvm_inject_undefined(vcpu
);
3428 static void *idregs_debug_start(struct seq_file
*s
, loff_t
*pos
)
3430 struct kvm
*kvm
= s
->private;
3433 mutex_lock(&kvm
->arch
.config_lock
);
3435 iter
= &kvm
->arch
.idreg_debugfs_iter
;
3436 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED
, &kvm
->arch
.flags
) &&
3439 if (*iter
>= KVM_ARM_ID_REG_NUM
)
3442 iter
= ERR_PTR(-EBUSY
);
3445 mutex_unlock(&kvm
->arch
.config_lock
);
3450 static void *idregs_debug_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
3452 struct kvm
*kvm
= s
->private;
3456 if ((kvm
->arch
.idreg_debugfs_iter
+ 1) < KVM_ARM_ID_REG_NUM
) {
3457 kvm
->arch
.idreg_debugfs_iter
++;
3459 return &kvm
->arch
.idreg_debugfs_iter
;
3465 static void idregs_debug_stop(struct seq_file
*s
, void *v
)
3467 struct kvm
*kvm
= s
->private;
3472 mutex_lock(&kvm
->arch
.config_lock
);
3474 kvm
->arch
.idreg_debugfs_iter
= ~0;
3476 mutex_unlock(&kvm
->arch
.config_lock
);
3479 static int idregs_debug_show(struct seq_file
*s
, void *v
)
3481 struct kvm
*kvm
= s
->private;
3482 const struct sys_reg_desc
*desc
;
3484 desc
= first_idreg
+ kvm
->arch
.idreg_debugfs_iter
;
3489 seq_printf(s
, "%20s:\t%016llx\n",
3490 desc
->name
, IDREG(kvm
, IDX_IDREG(kvm
->arch
.idreg_debugfs_iter
)));
3495 static const struct seq_operations idregs_debug_sops
= {
3496 .start
= idregs_debug_start
,
3497 .next
= idregs_debug_next
,
3498 .stop
= idregs_debug_stop
,
3499 .show
= idregs_debug_show
,
3502 DEFINE_SEQ_ATTRIBUTE(idregs_debug
);
3504 void kvm_sys_regs_create_debugfs(struct kvm
*kvm
)
3506 kvm
->arch
.idreg_debugfs_iter
= ~0;
3508 debugfs_create_file("idregs", 0444, kvm
->debugfs_dentry
, kvm
,
3509 &idregs_debug_fops
);
3512 static void kvm_reset_id_regs(struct kvm_vcpu
*vcpu
)
3514 const struct sys_reg_desc
*idreg
= first_idreg
;
3515 u32 id
= reg_to_encoding(idreg
);
3516 struct kvm
*kvm
= vcpu
->kvm
;
3518 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED
, &kvm
->arch
.flags
))
3521 lockdep_assert_held(&kvm
->arch
.config_lock
);
3523 /* Initialize all idregs */
3524 while (is_id_reg(id
)) {
3525 IDREG(kvm
, id
) = idreg
->reset(vcpu
, idreg
);
3528 id
= reg_to_encoding(idreg
);
3531 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED
, &kvm
->arch
.flags
);
3535 * kvm_reset_sys_regs - sets system registers to reset value
3536 * @vcpu: The VCPU pointer
3538 * This function finds the right table above and sets the registers on the
3539 * virtual CPU struct to their architecturally defined reset values.
3541 void kvm_reset_sys_regs(struct kvm_vcpu
*vcpu
)
3545 kvm_reset_id_regs(vcpu
);
3547 for (i
= 0; i
< ARRAY_SIZE(sys_reg_descs
); i
++) {
3548 const struct sys_reg_desc
*r
= &sys_reg_descs
[i
];
3550 if (is_id_reg(reg_to_encoding(r
)))
3559 * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
3560 * trap on a guest execution
3561 * @vcpu: The VCPU pointer
3563 int kvm_handle_sys_reg(struct kvm_vcpu
*vcpu
)
3565 const struct sys_reg_desc
*desc
= NULL
;
3566 struct sys_reg_params params
;
3567 unsigned long esr
= kvm_vcpu_get_esr(vcpu
);
3568 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
3571 trace_kvm_handle_sys_reg(esr
);
3573 if (triage_sysreg_trap(vcpu
, &sr_idx
))
3576 params
= esr_sys64_to_params(esr
);
3577 params
.regval
= vcpu_get_reg(vcpu
, Rt
);
3579 /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
3580 if (params
.Op0
== 2 || params
.Op0
== 3)
3581 desc
= &sys_reg_descs
[sr_idx
];
3583 desc
= &sys_insn_descs
[sr_idx
];
3585 perform_access(vcpu
, ¶ms
, desc
);
3587 /* Read from system register? */
3588 if (!params
.is_write
&&
3589 (params
.Op0
== 2 || params
.Op0
== 3))
3590 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
3595 /******************************************************************************
3597 *****************************************************************************/
3599 static bool index_to_params(u64 id
, struct sys_reg_params
*params
)
3601 switch (id
& KVM_REG_SIZE_MASK
) {
3602 case KVM_REG_SIZE_U64
:
3603 /* Any unused index bits means it's not valid. */
3604 if (id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
3605 | KVM_REG_ARM_COPROC_MASK
3606 | KVM_REG_ARM64_SYSREG_OP0_MASK
3607 | KVM_REG_ARM64_SYSREG_OP1_MASK
3608 | KVM_REG_ARM64_SYSREG_CRN_MASK
3609 | KVM_REG_ARM64_SYSREG_CRM_MASK
3610 | KVM_REG_ARM64_SYSREG_OP2_MASK
))
3612 params
->Op0
= ((id
& KVM_REG_ARM64_SYSREG_OP0_MASK
)
3613 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT
);
3614 params
->Op1
= ((id
& KVM_REG_ARM64_SYSREG_OP1_MASK
)
3615 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT
);
3616 params
->CRn
= ((id
& KVM_REG_ARM64_SYSREG_CRN_MASK
)
3617 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT
);
3618 params
->CRm
= ((id
& KVM_REG_ARM64_SYSREG_CRM_MASK
)
3619 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT
);
3620 params
->Op2
= ((id
& KVM_REG_ARM64_SYSREG_OP2_MASK
)
3621 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT
);
3628 const struct sys_reg_desc
*get_reg_by_id(u64 id
,
3629 const struct sys_reg_desc table
[],
3632 struct sys_reg_params params
;
3634 if (!index_to_params(id
, ¶ms
))
3637 return find_reg(¶ms
, table
, num
);
3640 /* Decode an index value, and find the sys_reg_desc entry. */
3641 static const struct sys_reg_desc
*
3642 id_to_sys_reg_desc(struct kvm_vcpu
*vcpu
, u64 id
,
3643 const struct sys_reg_desc table
[], unsigned int num
)
3646 const struct sys_reg_desc
*r
;
3648 /* We only do sys_reg for now. */
3649 if ((id
& KVM_REG_ARM_COPROC_MASK
) != KVM_REG_ARM64_SYSREG
)
3652 r
= get_reg_by_id(id
, table
, num
);
3654 /* Not saved in the sys_reg array and not otherwise accessible? */
3655 if (r
&& (!(r
->reg
|| r
->get_user
) || sysreg_hidden(vcpu
, r
)))
3662 * These are the invariant sys_reg registers: we let the guest see the
3663 * host versions of these, so they're part of the guest state.
3665 * A future CPU may provide a mechanism to present different values to
3666 * the guest, or a future kvm may trap them.
3669 #define FUNCTION_INVARIANT(reg) \
3670 static u64 get_##reg(struct kvm_vcpu *v, \
3671 const struct sys_reg_desc *r) \
3673 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
3674 return ((struct sys_reg_desc *)r)->val; \
3677 FUNCTION_INVARIANT(midr_el1
)
3678 FUNCTION_INVARIANT(revidr_el1
)
3679 FUNCTION_INVARIANT(aidr_el1
)
3681 static u64
get_ctr_el0(struct kvm_vcpu
*v
, const struct sys_reg_desc
*r
)
3683 ((struct sys_reg_desc
*)r
)->val
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
3684 return ((struct sys_reg_desc
*)r
)->val
;
3687 /* ->val is filled in by kvm_sys_reg_table_init() */
3688 static struct sys_reg_desc invariant_sys_regs
[] __ro_after_init
= {
3689 { SYS_DESC(SYS_MIDR_EL1
), NULL
, get_midr_el1
},
3690 { SYS_DESC(SYS_REVIDR_EL1
), NULL
, get_revidr_el1
},
3691 { SYS_DESC(SYS_AIDR_EL1
), NULL
, get_aidr_el1
},
3692 { SYS_DESC(SYS_CTR_EL0
), NULL
, get_ctr_el0
},
3695 static int get_invariant_sys_reg(u64 id
, u64 __user
*uaddr
)
3697 const struct sys_reg_desc
*r
;
3699 r
= get_reg_by_id(id
, invariant_sys_regs
,
3700 ARRAY_SIZE(invariant_sys_regs
));
3704 return put_user(r
->val
, uaddr
);
3707 static int set_invariant_sys_reg(u64 id
, u64 __user
*uaddr
)
3709 const struct sys_reg_desc
*r
;
3712 r
= get_reg_by_id(id
, invariant_sys_regs
,
3713 ARRAY_SIZE(invariant_sys_regs
));
3717 if (get_user(val
, uaddr
))
3720 /* This is what we mean by invariant: you can't change it. */
3727 static int demux_c15_get(struct kvm_vcpu
*vcpu
, u64 id
, void __user
*uaddr
)
3730 u32 __user
*uval
= uaddr
;
3732 /* Fail if we have unknown bits set. */
3733 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
3734 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
3737 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
3738 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
3739 if (KVM_REG_SIZE(id
) != 4)
3741 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
3742 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
3743 if (val
>= CSSELR_MAX
)
3746 return put_user(get_ccsidr(vcpu
, val
), uval
);
3752 static int demux_c15_set(struct kvm_vcpu
*vcpu
, u64 id
, void __user
*uaddr
)
3755 u32 __user
*uval
= uaddr
;
3757 /* Fail if we have unknown bits set. */
3758 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
3759 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
3762 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
3763 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
3764 if (KVM_REG_SIZE(id
) != 4)
3766 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
3767 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
3768 if (val
>= CSSELR_MAX
)
3771 if (get_user(newval
, uval
))
3774 return set_ccsidr(vcpu
, val
, newval
);
3780 int kvm_sys_reg_get_user(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
,
3781 const struct sys_reg_desc table
[], unsigned int num
)
3783 u64 __user
*uaddr
= (u64 __user
*)(unsigned long)reg
->addr
;
3784 const struct sys_reg_desc
*r
;
3788 r
= id_to_sys_reg_desc(vcpu
, reg
->id
, table
, num
);
3789 if (!r
|| sysreg_hidden_user(vcpu
, r
))
3793 ret
= (r
->get_user
)(vcpu
, r
, &val
);
3795 val
= __vcpu_sys_reg(vcpu
, r
->reg
);
3800 ret
= put_user(val
, uaddr
);
3805 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
3807 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
3810 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
3811 return demux_c15_get(vcpu
, reg
->id
, uaddr
);
3813 err
= get_invariant_sys_reg(reg
->id
, uaddr
);
3817 return kvm_sys_reg_get_user(vcpu
, reg
,
3818 sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
3821 int kvm_sys_reg_set_user(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
,
3822 const struct sys_reg_desc table
[], unsigned int num
)
3824 u64 __user
*uaddr
= (u64 __user
*)(unsigned long)reg
->addr
;
3825 const struct sys_reg_desc
*r
;
3829 if (get_user(val
, uaddr
))
3832 r
= id_to_sys_reg_desc(vcpu
, reg
->id
, table
, num
);
3833 if (!r
|| sysreg_hidden_user(vcpu
, r
))
3836 if (sysreg_user_write_ignore(vcpu
, r
))
3840 ret
= (r
->set_user
)(vcpu
, r
, val
);
3842 __vcpu_sys_reg(vcpu
, r
->reg
) = val
;
3849 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
3851 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
3854 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
3855 return demux_c15_set(vcpu
, reg
->id
, uaddr
);
3857 err
= set_invariant_sys_reg(reg
->id
, uaddr
);
3861 return kvm_sys_reg_set_user(vcpu
, reg
,
3862 sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
3865 static unsigned int num_demux_regs(void)
3870 static int write_demux_regids(u64 __user
*uindices
)
3872 u64 val
= KVM_REG_ARM64
| KVM_REG_SIZE_U32
| KVM_REG_ARM_DEMUX
;
3875 val
|= KVM_REG_ARM_DEMUX_ID_CCSIDR
;
3876 for (i
= 0; i
< CSSELR_MAX
; i
++) {
3877 if (put_user(val
| i
, uindices
))
3884 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
)
3886 return (KVM_REG_ARM64
| KVM_REG_SIZE_U64
|
3887 KVM_REG_ARM64_SYSREG
|
3888 (reg
->Op0
<< KVM_REG_ARM64_SYSREG_OP0_SHIFT
) |
3889 (reg
->Op1
<< KVM_REG_ARM64_SYSREG_OP1_SHIFT
) |
3890 (reg
->CRn
<< KVM_REG_ARM64_SYSREG_CRN_SHIFT
) |
3891 (reg
->CRm
<< KVM_REG_ARM64_SYSREG_CRM_SHIFT
) |
3892 (reg
->Op2
<< KVM_REG_ARM64_SYSREG_OP2_SHIFT
));
3895 static bool copy_reg_to_user(const struct sys_reg_desc
*reg
, u64 __user
**uind
)
3900 if (put_user(sys_reg_to_index(reg
), *uind
))
3907 static int walk_one_sys_reg(const struct kvm_vcpu
*vcpu
,
3908 const struct sys_reg_desc
*rd
,
3910 unsigned int *total
)
3913 * Ignore registers we trap but don't save,
3914 * and for which no custom user accessor is provided.
3916 if (!(rd
->reg
|| rd
->get_user
))
3919 if (sysreg_hidden_user(vcpu
, rd
))
3922 if (!copy_reg_to_user(rd
, uind
))
3929 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
3930 static int walk_sys_regs(struct kvm_vcpu
*vcpu
, u64 __user
*uind
)
3932 const struct sys_reg_desc
*i2
, *end2
;
3933 unsigned int total
= 0;
3937 end2
= sys_reg_descs
+ ARRAY_SIZE(sys_reg_descs
);
3939 while (i2
!= end2
) {
3940 err
= walk_one_sys_reg(vcpu
, i2
++, &uind
, &total
);
3947 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu
*vcpu
)
3949 return ARRAY_SIZE(invariant_sys_regs
)
3951 + walk_sys_regs(vcpu
, (u64 __user
*)NULL
);
3954 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
3959 /* Then give them all the invariant registers' indices. */
3960 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++) {
3961 if (put_user(sys_reg_to_index(&invariant_sys_regs
[i
]), uindices
))
3966 err
= walk_sys_regs(vcpu
, uindices
);
3971 return write_demux_regids(uindices
);
3974 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
3975 KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \
3981 static bool is_feature_id_reg(u32 encoding
)
3983 return (sys_reg_Op0(encoding
) == 3 &&
3984 (sys_reg_Op1(encoding
) < 2 || sys_reg_Op1(encoding
) == 3) &&
3985 sys_reg_CRn(encoding
) == 0 &&
3986 sys_reg_CRm(encoding
) <= 7);
3989 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm
*kvm
, struct reg_mask_range
*range
)
3991 const void *zero_page
= page_to_virt(ZERO_PAGE(0));
3992 u64 __user
*masks
= (u64 __user
*)range
->addr
;
3994 /* Only feature id range is supported, reserved[13] must be zero. */
3996 memcmp(range
->reserved
, zero_page
, sizeof(range
->reserved
)))
3999 /* Wipe the whole thing first */
4000 if (clear_user(masks
, KVM_ARM_FEATURE_ID_RANGE_SIZE
* sizeof(__u64
)))
4003 for (int i
= 0; i
< ARRAY_SIZE(sys_reg_descs
); i
++) {
4004 const struct sys_reg_desc
*reg
= &sys_reg_descs
[i
];
4005 u32 encoding
= reg_to_encoding(reg
);
4008 if (!is_feature_id_reg(encoding
) || !reg
->set_user
)
4012 * For ID registers, we return the writable mask. Other feature
4013 * registers return a full 64bit mask. That's not necessary
4014 * compliant with a given revision of the architecture, but the
4015 * RES0/RES1 definitions allow us to do that.
4017 if (is_id_reg(encoding
)) {
4019 (is_aa32_id_reg(encoding
) && !kvm_supports_32bit_el0()))
4026 if (put_user(val
, (masks
+ KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding
))))
4033 void kvm_init_sysreg(struct kvm_vcpu
*vcpu
)
4035 struct kvm
*kvm
= vcpu
->kvm
;
4037 mutex_lock(&kvm
->arch
.config_lock
);
4040 * In the absence of FGT, we cannot independently trap TLBI
4041 * Range instructions. This isn't great, but trapping all
4042 * TLBIs would be far worse. Live with it...
4044 if (!kvm_has_feat(kvm
, ID_AA64ISAR0_EL1
, TLB
, OS
))
4045 vcpu
->arch
.hcr_el2
|= HCR_TTLBOS
;
4047 if (cpus_have_final_cap(ARM64_HAS_HCX
)) {
4048 vcpu
->arch
.hcrx_el2
= HCRX_GUEST_FLAGS
;
4050 if (kvm_has_feat(kvm
, ID_AA64ISAR2_EL1
, MOPS
, IMP
))
4051 vcpu
->arch
.hcrx_el2
|= (HCRX_EL2_MSCEn
| HCRX_EL2_MCE2
);
4054 if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED
, &kvm
->arch
.flags
))
4057 kvm
->arch
.fgu
[HFGxTR_GROUP
] = (HFGxTR_EL2_nAMAIR2_EL1
|
4058 HFGxTR_EL2_nMAIR2_EL1
|
4059 HFGxTR_EL2_nS2POR_EL1
|
4060 HFGxTR_EL2_nPOR_EL1
|
4061 HFGxTR_EL2_nPOR_EL0
|
4062 HFGxTR_EL2_nACCDATA_EL1
|
4063 HFGxTR_EL2_nSMPRI_EL1_MASK
|
4064 HFGxTR_EL2_nTPIDR2_EL0_MASK
);
4066 if (!kvm_has_feat(kvm
, ID_AA64ISAR0_EL1
, TLB
, OS
))
4067 kvm
->arch
.fgu
[HFGITR_GROUP
] |= (HFGITR_EL2_TLBIRVAALE1OS
|
4068 HFGITR_EL2_TLBIRVALE1OS
|
4069 HFGITR_EL2_TLBIRVAAE1OS
|
4070 HFGITR_EL2_TLBIRVAE1OS
|
4071 HFGITR_EL2_TLBIVAALE1OS
|
4072 HFGITR_EL2_TLBIVALE1OS
|
4073 HFGITR_EL2_TLBIVAAE1OS
|
4074 HFGITR_EL2_TLBIASIDE1OS
|
4075 HFGITR_EL2_TLBIVAE1OS
|
4076 HFGITR_EL2_TLBIVMALLE1OS
);
4078 if (!kvm_has_feat(kvm
, ID_AA64ISAR0_EL1
, TLB
, RANGE
))
4079 kvm
->arch
.fgu
[HFGITR_GROUP
] |= (HFGITR_EL2_TLBIRVAALE1
|
4080 HFGITR_EL2_TLBIRVALE1
|
4081 HFGITR_EL2_TLBIRVAAE1
|
4082 HFGITR_EL2_TLBIRVAE1
|
4083 HFGITR_EL2_TLBIRVAALE1IS
|
4084 HFGITR_EL2_TLBIRVALE1IS
|
4085 HFGITR_EL2_TLBIRVAAE1IS
|
4086 HFGITR_EL2_TLBIRVAE1IS
|
4087 HFGITR_EL2_TLBIRVAALE1OS
|
4088 HFGITR_EL2_TLBIRVALE1OS
|
4089 HFGITR_EL2_TLBIRVAAE1OS
|
4090 HFGITR_EL2_TLBIRVAE1OS
);
4092 if (!kvm_has_feat(kvm
, ID_AA64MMFR3_EL1
, S1PIE
, IMP
))
4093 kvm
->arch
.fgu
[HFGxTR_GROUP
] |= (HFGxTR_EL2_nPIRE0_EL1
|
4094 HFGxTR_EL2_nPIR_EL1
);
4096 if (!kvm_has_feat(kvm
, ID_AA64PFR0_EL1
, AMU
, IMP
))
4097 kvm
->arch
.fgu
[HAFGRTR_GROUP
] |= ~(HAFGRTR_EL2_RES0
|
4100 set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED
, &kvm
->arch
.flags
);
4102 mutex_unlock(&kvm
->arch
.config_lock
);
4105 int __init
kvm_sys_reg_table_init(void)
4107 struct sys_reg_params params
;
4112 /* Make sure tables are unique and in order. */
4113 valid
&= check_sysreg_table(sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
), false);
4114 valid
&= check_sysreg_table(cp14_regs
, ARRAY_SIZE(cp14_regs
), true);
4115 valid
&= check_sysreg_table(cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
), true);
4116 valid
&= check_sysreg_table(cp15_regs
, ARRAY_SIZE(cp15_regs
), true);
4117 valid
&= check_sysreg_table(cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
), true);
4118 valid
&= check_sysreg_table(invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
), false);
4119 valid
&= check_sysreg_table(sys_insn_descs
, ARRAY_SIZE(sys_insn_descs
), false);
4124 /* We abuse the reset function to overwrite the table itself. */
4125 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++)
4126 invariant_sys_regs
[i
].reset(NULL
, &invariant_sys_regs
[i
]);
4128 /* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
4129 params
= encoding_to_params(SYS_ID_PFR0_EL1
);
4130 first_idreg
= find_reg(¶ms
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
4134 ret
= populate_nv_trap_config();
4136 for (i
= 0; !ret
&& i
< ARRAY_SIZE(sys_reg_descs
); i
++)
4137 ret
= populate_sysreg_config(sys_reg_descs
+ i
, i
);
4139 for (i
= 0; !ret
&& i
< ARRAY_SIZE(sys_insn_descs
); i
++)
4140 ret
= populate_sysreg_config(sys_insn_descs
+ i
, i
);