1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/kvm_host.h>
17 #include <linux/printk.h>
18 #include <linux/uaccess.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cputype.h>
22 #include <asm/debug-monitors.h>
24 #include <asm/kvm_arm.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_nested.h>
29 #include <asm/perf_event.h>
30 #include <asm/sysreg.h>
32 #include <trace/events/kvm.h>
39 * For AArch32, we only take care of what is being trapped. Anything
40 * that has to do with init and userspace access has to go via the
44 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
);
45 static int set_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
48 static bool read_from_write_only(struct kvm_vcpu
*vcpu
,
49 struct sys_reg_params
*params
,
50 const struct sys_reg_desc
*r
)
52 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
53 print_sys_reg_instr(params
);
54 kvm_inject_undefined(vcpu
);
58 static bool write_to_read_only(struct kvm_vcpu
*vcpu
,
59 struct sys_reg_params
*params
,
60 const struct sys_reg_desc
*r
)
62 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
63 print_sys_reg_instr(params
);
64 kvm_inject_undefined(vcpu
);
68 u64
vcpu_read_sys_reg(const struct kvm_vcpu
*vcpu
, int reg
)
70 u64 val
= 0x8badf00d8badf00d;
72 if (vcpu_get_flag(vcpu
, SYSREGS_ON_CPU
) &&
73 __vcpu_read_sys_reg_from_cpu(reg
, &val
))
76 return __vcpu_sys_reg(vcpu
, reg
);
79 void vcpu_write_sys_reg(struct kvm_vcpu
*vcpu
, u64 val
, int reg
)
81 if (vcpu_get_flag(vcpu
, SYSREGS_ON_CPU
) &&
82 __vcpu_write_sys_reg_to_cpu(val
, reg
))
85 __vcpu_sys_reg(vcpu
, reg
) = val
;
88 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
92 * Returns the minimum line size for the selected cache, expressed as
95 static u8
get_min_cache_line_size(bool icache
)
97 u64 ctr
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
101 field
= SYS_FIELD_GET(CTR_EL0
, IminLine
, ctr
);
103 field
= SYS_FIELD_GET(CTR_EL0
, DminLine
, ctr
);
106 * Cache line size is represented as Log2(words) in CTR_EL0.
107 * Log2(bytes) can be derived with the following:
109 * Log2(words) + 2 = Log2(bytes / 4) + 2
110 * = Log2(bytes) - 2 + 2
116 /* Which cache CCSIDR represents depends on CSSELR value. */
117 static u32
get_ccsidr(struct kvm_vcpu
*vcpu
, u32 csselr
)
121 if (vcpu
->arch
.ccsidr
)
122 return vcpu
->arch
.ccsidr
[csselr
];
124 line_size
= get_min_cache_line_size(csselr
& CSSELR_EL1_InD
);
127 * Fabricate a CCSIDR value as the overriding value does not exist.
128 * The real CCSIDR value will not be used as it can vary by the
129 * physical CPU which the vcpu currently resides in.
131 * The line size is determined with get_min_cache_line_size(), which
132 * should be valid for all CPUs even if they have different cache
135 * The associativity bits are cleared, meaning the geometry of all data
136 * and unified caches (which are guaranteed to be PIPT and thus
137 * non-aliasing) are 1 set and 1 way.
138 * Guests should not be doing cache operations by set/way at all, and
139 * for this reason, we trap them and attempt to infer the intent, so
140 * that we can flush the entire guest's address space at the appropriate
141 * time. The exposed geometry minimizes the number of the traps.
142 * [If guests should attempt to infer aliasing properties from the
143 * geometry (which is not permitted by the architecture), they would
144 * only do so for virtually indexed caches.]
146 * We don't check if the cache level exists as it is allowed to return
147 * an UNKNOWN value if not.
149 return SYS_FIELD_PREP(CCSIDR_EL1
, LineSize
, line_size
- 4);
152 static int set_ccsidr(struct kvm_vcpu
*vcpu
, u32 csselr
, u32 val
)
154 u8 line_size
= FIELD_GET(CCSIDR_EL1_LineSize
, val
) + 4;
155 u32
*ccsidr
= vcpu
->arch
.ccsidr
;
158 if ((val
& CCSIDR_EL1_RES0
) ||
159 line_size
< get_min_cache_line_size(csselr
& CSSELR_EL1_InD
))
163 if (val
== get_ccsidr(vcpu
, csselr
))
166 ccsidr
= kmalloc_array(CSSELR_MAX
, sizeof(u32
), GFP_KERNEL_ACCOUNT
);
170 for (i
= 0; i
< CSSELR_MAX
; i
++)
171 ccsidr
[i
] = get_ccsidr(vcpu
, i
);
173 vcpu
->arch
.ccsidr
= ccsidr
;
176 ccsidr
[csselr
] = val
;
181 static bool access_rw(struct kvm_vcpu
*vcpu
,
182 struct sys_reg_params
*p
,
183 const struct sys_reg_desc
*r
)
186 vcpu_write_sys_reg(vcpu
, p
->regval
, r
->reg
);
188 p
->regval
= vcpu_read_sys_reg(vcpu
, r
->reg
);
194 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
196 static bool access_dcsw(struct kvm_vcpu
*vcpu
,
197 struct sys_reg_params
*p
,
198 const struct sys_reg_desc
*r
)
201 return read_from_write_only(vcpu
, p
, r
);
204 * Only track S/W ops if we don't have FWB. It still indicates
205 * that the guest is a bit broken (S/W operations should only
206 * be done by firmware, knowing that there is only a single
207 * CPU left in the system, and certainly not from non-secure
210 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB
))
211 kvm_set_way_flush(vcpu
);
216 static bool access_dcgsw(struct kvm_vcpu
*vcpu
,
217 struct sys_reg_params
*p
,
218 const struct sys_reg_desc
*r
)
220 if (!kvm_has_mte(vcpu
->kvm
)) {
221 kvm_inject_undefined(vcpu
);
225 /* Treat MTE S/W ops as we treat the classic ones: with contempt */
226 return access_dcsw(vcpu
, p
, r
);
229 static void get_access_mask(const struct sys_reg_desc
*r
, u64
*mask
, u64
*shift
)
231 switch (r
->aarch32_map
) {
233 *mask
= GENMASK_ULL(31, 0);
237 *mask
= GENMASK_ULL(63, 32);
241 *mask
= GENMASK_ULL(63, 0);
248 * Generic accessor for VM registers. Only called as long as HCR_TVM
249 * is set. If the guest enables the MMU, we stop trapping the VM
250 * sys_regs and leave it in complete control of the caches.
252 static bool access_vm_reg(struct kvm_vcpu
*vcpu
,
253 struct sys_reg_params
*p
,
254 const struct sys_reg_desc
*r
)
256 bool was_enabled
= vcpu_has_cache_enabled(vcpu
);
257 u64 val
, mask
, shift
;
259 BUG_ON(!p
->is_write
);
261 get_access_mask(r
, &mask
, &shift
);
264 val
= vcpu_read_sys_reg(vcpu
, r
->reg
);
270 val
|= (p
->regval
& (mask
>> shift
)) << shift
;
271 vcpu_write_sys_reg(vcpu
, val
, r
->reg
);
273 kvm_toggle_cache(vcpu
, was_enabled
);
277 static bool access_actlr(struct kvm_vcpu
*vcpu
,
278 struct sys_reg_params
*p
,
279 const struct sys_reg_desc
*r
)
284 return ignore_write(vcpu
, p
);
286 get_access_mask(r
, &mask
, &shift
);
287 p
->regval
= (vcpu_read_sys_reg(vcpu
, r
->reg
) & mask
) >> shift
;
293 * Trap handler for the GICv3 SGI generation system register.
294 * Forward the request to the VGIC emulation.
295 * The cp15_64 code makes sure this automatically works
296 * for both AArch64 and AArch32 accesses.
298 static bool access_gic_sgi(struct kvm_vcpu
*vcpu
,
299 struct sys_reg_params
*p
,
300 const struct sys_reg_desc
*r
)
305 return read_from_write_only(vcpu
, p
, r
);
308 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
309 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
310 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
311 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
314 if (p
->Op0
== 0) { /* AArch32 */
316 default: /* Keep GCC quiet */
317 case 0: /* ICC_SGI1R */
320 case 1: /* ICC_ASGI1R */
321 case 2: /* ICC_SGI0R */
325 } else { /* AArch64 */
327 default: /* Keep GCC quiet */
328 case 5: /* ICC_SGI1R_EL1 */
331 case 6: /* ICC_ASGI1R_EL1 */
332 case 7: /* ICC_SGI0R_EL1 */
338 vgic_v3_dispatch_sgi(vcpu
, p
->regval
, g1
);
343 static bool access_gic_sre(struct kvm_vcpu
*vcpu
,
344 struct sys_reg_params
*p
,
345 const struct sys_reg_desc
*r
)
348 return ignore_write(vcpu
, p
);
350 p
->regval
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_sre
;
354 static bool trap_raz_wi(struct kvm_vcpu
*vcpu
,
355 struct sys_reg_params
*p
,
356 const struct sys_reg_desc
*r
)
359 return ignore_write(vcpu
, p
);
361 return read_zero(vcpu
, p
);
364 static bool trap_undef(struct kvm_vcpu
*vcpu
,
365 struct sys_reg_params
*p
,
366 const struct sys_reg_desc
*r
)
368 kvm_inject_undefined(vcpu
);
373 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
374 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
375 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
376 * treat it separately.
378 static bool trap_loregion(struct kvm_vcpu
*vcpu
,
379 struct sys_reg_params
*p
,
380 const struct sys_reg_desc
*r
)
382 u64 val
= read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1
);
383 u32 sr
= reg_to_encoding(r
);
385 if (!(val
& (0xfUL
<< ID_AA64MMFR1_EL1_LO_SHIFT
))) {
386 kvm_inject_undefined(vcpu
);
390 if (p
->is_write
&& sr
== SYS_LORID_EL1
)
391 return write_to_read_only(vcpu
, p
, r
);
393 return trap_raz_wi(vcpu
, p
, r
);
396 static bool trap_oslar_el1(struct kvm_vcpu
*vcpu
,
397 struct sys_reg_params
*p
,
398 const struct sys_reg_desc
*r
)
403 return read_from_write_only(vcpu
, p
, r
);
405 /* Forward the OSLK bit to OSLSR */
406 oslsr
= __vcpu_sys_reg(vcpu
, OSLSR_EL1
) & ~OSLSR_EL1_OSLK
;
407 if (p
->regval
& OSLAR_EL1_OSLK
)
408 oslsr
|= OSLSR_EL1_OSLK
;
410 __vcpu_sys_reg(vcpu
, OSLSR_EL1
) = oslsr
;
414 static bool trap_oslsr_el1(struct kvm_vcpu
*vcpu
,
415 struct sys_reg_params
*p
,
416 const struct sys_reg_desc
*r
)
419 return write_to_read_only(vcpu
, p
, r
);
421 p
->regval
= __vcpu_sys_reg(vcpu
, r
->reg
);
425 static int set_oslsr_el1(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
429 * The only modifiable bit is the OSLK bit. Refuse the write if
430 * userspace attempts to change any other bit in the register.
432 if ((val
^ rd
->val
) & ~OSLSR_EL1_OSLK
)
435 __vcpu_sys_reg(vcpu
, rd
->reg
) = val
;
439 static bool trap_dbgauthstatus_el1(struct kvm_vcpu
*vcpu
,
440 struct sys_reg_params
*p
,
441 const struct sys_reg_desc
*r
)
444 return ignore_write(vcpu
, p
);
446 p
->regval
= read_sysreg(dbgauthstatus_el1
);
452 * We want to avoid world-switching all the DBG registers all the
455 * - If we've touched any debug register, it is likely that we're
456 * going to touch more of them. It then makes sense to disable the
457 * traps and start doing the save/restore dance
458 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
459 * then mandatory to save/restore the registers, as the guest
462 * For this, we use a DIRTY bit, indicating the guest has modified the
463 * debug registers, used as follow:
466 * - If the dirty bit is set (because we're coming back from trapping),
467 * disable the traps, save host registers, restore guest registers.
468 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
469 * set the dirty bit, disable the traps, save host registers,
470 * restore guest registers.
471 * - Otherwise, enable the traps
474 * - If the dirty bit is set, save guest registers, restore host
475 * registers and clear the dirty bit. This ensure that the host can
476 * now use the debug registers.
478 static bool trap_debug_regs(struct kvm_vcpu
*vcpu
,
479 struct sys_reg_params
*p
,
480 const struct sys_reg_desc
*r
)
482 access_rw(vcpu
, p
, r
);
484 vcpu_set_flag(vcpu
, DEBUG_DIRTY
);
486 trace_trap_reg(__func__
, r
->reg
, p
->is_write
, p
->regval
);
492 * reg_to_dbg/dbg_to_reg
494 * A 32 bit write to a debug register leave top bits alone
495 * A 32 bit read from a debug register only returns the bottom bits
497 * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
498 * switches between host and guest values in future.
500 static void reg_to_dbg(struct kvm_vcpu
*vcpu
,
501 struct sys_reg_params
*p
,
502 const struct sys_reg_desc
*rd
,
505 u64 mask
, shift
, val
;
507 get_access_mask(rd
, &mask
, &shift
);
511 val
|= (p
->regval
& (mask
>> shift
)) << shift
;
514 vcpu_set_flag(vcpu
, DEBUG_DIRTY
);
517 static void dbg_to_reg(struct kvm_vcpu
*vcpu
,
518 struct sys_reg_params
*p
,
519 const struct sys_reg_desc
*rd
,
524 get_access_mask(rd
, &mask
, &shift
);
525 p
->regval
= (*dbg_reg
& mask
) >> shift
;
528 static bool trap_bvr(struct kvm_vcpu
*vcpu
,
529 struct sys_reg_params
*p
,
530 const struct sys_reg_desc
*rd
)
532 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->CRm
];
535 reg_to_dbg(vcpu
, p
, rd
, dbg_reg
);
537 dbg_to_reg(vcpu
, p
, rd
, dbg_reg
);
539 trace_trap_reg(__func__
, rd
->CRm
, p
->is_write
, *dbg_reg
);
544 static int set_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
547 vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->CRm
] = val
;
551 static int get_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
554 *val
= vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->CRm
];
558 static u64
reset_bvr(struct kvm_vcpu
*vcpu
,
559 const struct sys_reg_desc
*rd
)
561 vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->CRm
] = rd
->val
;
565 static bool trap_bcr(struct kvm_vcpu
*vcpu
,
566 struct sys_reg_params
*p
,
567 const struct sys_reg_desc
*rd
)
569 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->CRm
];
572 reg_to_dbg(vcpu
, p
, rd
, dbg_reg
);
574 dbg_to_reg(vcpu
, p
, rd
, dbg_reg
);
576 trace_trap_reg(__func__
, rd
->CRm
, p
->is_write
, *dbg_reg
);
581 static int set_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
584 vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->CRm
] = val
;
588 static int get_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
591 *val
= vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->CRm
];
595 static u64
reset_bcr(struct kvm_vcpu
*vcpu
,
596 const struct sys_reg_desc
*rd
)
598 vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->CRm
] = rd
->val
;
602 static bool trap_wvr(struct kvm_vcpu
*vcpu
,
603 struct sys_reg_params
*p
,
604 const struct sys_reg_desc
*rd
)
606 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->CRm
];
609 reg_to_dbg(vcpu
, p
, rd
, dbg_reg
);
611 dbg_to_reg(vcpu
, p
, rd
, dbg_reg
);
613 trace_trap_reg(__func__
, rd
->CRm
, p
->is_write
,
614 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->CRm
]);
619 static int set_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
622 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->CRm
] = val
;
626 static int get_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
629 *val
= vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->CRm
];
633 static u64
reset_wvr(struct kvm_vcpu
*vcpu
,
634 const struct sys_reg_desc
*rd
)
636 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->CRm
] = rd
->val
;
640 static bool trap_wcr(struct kvm_vcpu
*vcpu
,
641 struct sys_reg_params
*p
,
642 const struct sys_reg_desc
*rd
)
644 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->CRm
];
647 reg_to_dbg(vcpu
, p
, rd
, dbg_reg
);
649 dbg_to_reg(vcpu
, p
, rd
, dbg_reg
);
651 trace_trap_reg(__func__
, rd
->CRm
, p
->is_write
, *dbg_reg
);
656 static int set_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
659 vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->CRm
] = val
;
663 static int get_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
666 *val
= vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->CRm
];
670 static u64
reset_wcr(struct kvm_vcpu
*vcpu
,
671 const struct sys_reg_desc
*rd
)
673 vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->CRm
] = rd
->val
;
677 static u64
reset_amair_el1(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
679 u64 amair
= read_sysreg(amair_el1
);
680 vcpu_write_sys_reg(vcpu
, amair
, AMAIR_EL1
);
684 static u64
reset_actlr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
686 u64 actlr
= read_sysreg(actlr_el1
);
687 vcpu_write_sys_reg(vcpu
, actlr
, ACTLR_EL1
);
691 static u64
reset_mpidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
696 * Map the vcpu_id into the first three affinity level fields of
697 * the MPIDR. We limit the number of VCPUs in level 0 due to a
698 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
699 * of the GICv3 to be able to address each CPU directly when
702 mpidr
= (vcpu
->vcpu_id
& 0x0f) << MPIDR_LEVEL_SHIFT(0);
703 mpidr
|= ((vcpu
->vcpu_id
>> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
704 mpidr
|= ((vcpu
->vcpu_id
>> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
705 mpidr
|= (1ULL << 31);
706 vcpu_write_sys_reg(vcpu
, mpidr
, MPIDR_EL1
);
711 static unsigned int pmu_visibility(const struct kvm_vcpu
*vcpu
,
712 const struct sys_reg_desc
*r
)
714 if (kvm_vcpu_has_pmu(vcpu
))
720 static u64
reset_pmu_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
722 u64 n
, mask
= BIT(ARMV8_PMU_CYCLE_IDX
);
724 /* No PMU available, any PMU reg may UNDEF... */
725 if (!kvm_arm_support_pmu_v3())
728 n
= read_sysreg(pmcr_el0
) >> ARMV8_PMU_PMCR_N_SHIFT
;
729 n
&= ARMV8_PMU_PMCR_N_MASK
;
731 mask
|= GENMASK(n
- 1, 0);
733 reset_unknown(vcpu
, r
);
734 __vcpu_sys_reg(vcpu
, r
->reg
) &= mask
;
736 return __vcpu_sys_reg(vcpu
, r
->reg
);
739 static u64
reset_pmevcntr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
741 reset_unknown(vcpu
, r
);
742 __vcpu_sys_reg(vcpu
, r
->reg
) &= GENMASK(31, 0);
744 return __vcpu_sys_reg(vcpu
, r
->reg
);
747 static u64
reset_pmevtyper(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
749 reset_unknown(vcpu
, r
);
750 __vcpu_sys_reg(vcpu
, r
->reg
) &= ARMV8_PMU_EVTYPE_MASK
;
752 return __vcpu_sys_reg(vcpu
, r
->reg
);
755 static u64
reset_pmselr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
757 reset_unknown(vcpu
, r
);
758 __vcpu_sys_reg(vcpu
, r
->reg
) &= ARMV8_PMU_COUNTER_MASK
;
760 return __vcpu_sys_reg(vcpu
, r
->reg
);
763 static u64
reset_pmcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
767 /* No PMU available, PMCR_EL0 may UNDEF... */
768 if (!kvm_arm_support_pmu_v3())
771 /* Only preserve PMCR_EL0.N, and reset the rest to 0 */
772 pmcr
= read_sysreg(pmcr_el0
) & (ARMV8_PMU_PMCR_N_MASK
<< ARMV8_PMU_PMCR_N_SHIFT
);
773 if (!kvm_supports_32bit_el0())
774 pmcr
|= ARMV8_PMU_PMCR_LC
;
776 __vcpu_sys_reg(vcpu
, r
->reg
) = pmcr
;
778 return __vcpu_sys_reg(vcpu
, r
->reg
);
781 static bool check_pmu_access_disabled(struct kvm_vcpu
*vcpu
, u64 flags
)
783 u64 reg
= __vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
784 bool enabled
= (reg
& flags
) || vcpu_mode_priv(vcpu
);
787 kvm_inject_undefined(vcpu
);
792 static bool pmu_access_el0_disabled(struct kvm_vcpu
*vcpu
)
794 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_EN
);
797 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu
*vcpu
)
799 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_SW
| ARMV8_PMU_USERENR_EN
);
802 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
804 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_CR
| ARMV8_PMU_USERENR_EN
);
807 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
809 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_ER
| ARMV8_PMU_USERENR_EN
);
812 static bool access_pmcr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
813 const struct sys_reg_desc
*r
)
817 if (pmu_access_el0_disabled(vcpu
))
822 * Only update writeable bits of PMCR (continuing into
823 * kvm_pmu_handle_pmcr() as well)
825 val
= __vcpu_sys_reg(vcpu
, PMCR_EL0
);
826 val
&= ~ARMV8_PMU_PMCR_MASK
;
827 val
|= p
->regval
& ARMV8_PMU_PMCR_MASK
;
828 if (!kvm_supports_32bit_el0())
829 val
|= ARMV8_PMU_PMCR_LC
;
830 kvm_pmu_handle_pmcr(vcpu
, val
);
832 /* PMCR.P & PMCR.C are RAZ */
833 val
= __vcpu_sys_reg(vcpu
, PMCR_EL0
)
834 & ~(ARMV8_PMU_PMCR_P
| ARMV8_PMU_PMCR_C
);
841 static bool access_pmselr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
842 const struct sys_reg_desc
*r
)
844 if (pmu_access_event_counter_el0_disabled(vcpu
))
848 __vcpu_sys_reg(vcpu
, PMSELR_EL0
) = p
->regval
;
850 /* return PMSELR.SEL field */
851 p
->regval
= __vcpu_sys_reg(vcpu
, PMSELR_EL0
)
852 & ARMV8_PMU_COUNTER_MASK
;
857 static bool access_pmceid(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
858 const struct sys_reg_desc
*r
)
860 u64 pmceid
, mask
, shift
;
864 if (pmu_access_el0_disabled(vcpu
))
867 get_access_mask(r
, &mask
, &shift
);
869 pmceid
= kvm_pmu_get_pmceid(vcpu
, (p
->Op2
& 1));
878 static bool pmu_counter_idx_valid(struct kvm_vcpu
*vcpu
, u64 idx
)
882 pmcr
= __vcpu_sys_reg(vcpu
, PMCR_EL0
);
883 val
= (pmcr
>> ARMV8_PMU_PMCR_N_SHIFT
) & ARMV8_PMU_PMCR_N_MASK
;
884 if (idx
>= val
&& idx
!= ARMV8_PMU_CYCLE_IDX
) {
885 kvm_inject_undefined(vcpu
);
892 static int get_pmu_evcntr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
,
897 if (r
->CRn
== 9 && r
->CRm
== 13 && r
->Op2
== 0)
899 idx
= ARMV8_PMU_CYCLE_IDX
;
902 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
904 *val
= kvm_pmu_get_counter_value(vcpu
, idx
);
908 static bool access_pmu_evcntr(struct kvm_vcpu
*vcpu
,
909 struct sys_reg_params
*p
,
910 const struct sys_reg_desc
*r
)
914 if (r
->CRn
== 9 && r
->CRm
== 13) {
917 if (pmu_access_event_counter_el0_disabled(vcpu
))
920 idx
= __vcpu_sys_reg(vcpu
, PMSELR_EL0
)
921 & ARMV8_PMU_COUNTER_MASK
;
922 } else if (r
->Op2
== 0) {
924 if (pmu_access_cycle_counter_el0_disabled(vcpu
))
927 idx
= ARMV8_PMU_CYCLE_IDX
;
929 } else if (r
->CRn
== 0 && r
->CRm
== 9) {
931 if (pmu_access_event_counter_el0_disabled(vcpu
))
934 idx
= ARMV8_PMU_CYCLE_IDX
;
935 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 8) {
937 if (pmu_access_event_counter_el0_disabled(vcpu
))
940 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
943 /* Catch any decoding mistake */
944 WARN_ON(idx
== ~0UL);
946 if (!pmu_counter_idx_valid(vcpu
, idx
))
950 if (pmu_access_el0_disabled(vcpu
))
953 kvm_pmu_set_counter_value(vcpu
, idx
, p
->regval
);
955 p
->regval
= kvm_pmu_get_counter_value(vcpu
, idx
);
961 static bool access_pmu_evtyper(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
962 const struct sys_reg_desc
*r
)
966 if (pmu_access_el0_disabled(vcpu
))
969 if (r
->CRn
== 9 && r
->CRm
== 13 && r
->Op2
== 1) {
971 idx
= __vcpu_sys_reg(vcpu
, PMSELR_EL0
) & ARMV8_PMU_COUNTER_MASK
;
972 reg
= PMEVTYPER0_EL0
+ idx
;
973 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 12) {
974 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
975 if (idx
== ARMV8_PMU_CYCLE_IDX
)
979 reg
= PMEVTYPER0_EL0
+ idx
;
984 if (!pmu_counter_idx_valid(vcpu
, idx
))
988 kvm_pmu_set_counter_event_type(vcpu
, p
->regval
, idx
);
989 kvm_vcpu_pmu_restore_guest(vcpu
);
991 p
->regval
= __vcpu_sys_reg(vcpu
, reg
) & ARMV8_PMU_EVTYPE_MASK
;
997 static bool access_pmcnten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
998 const struct sys_reg_desc
*r
)
1002 if (pmu_access_el0_disabled(vcpu
))
1005 mask
= kvm_pmu_valid_counter_mask(vcpu
);
1007 val
= p
->regval
& mask
;
1009 /* accessing PMCNTENSET_EL0 */
1010 __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) |= val
;
1011 kvm_pmu_enable_counter_mask(vcpu
, val
);
1012 kvm_vcpu_pmu_restore_guest(vcpu
);
1014 /* accessing PMCNTENCLR_EL0 */
1015 __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) &= ~val
;
1016 kvm_pmu_disable_counter_mask(vcpu
, val
);
1019 p
->regval
= __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
);
1025 static bool access_pminten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1026 const struct sys_reg_desc
*r
)
1028 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
1030 if (check_pmu_access_disabled(vcpu
, 0))
1034 u64 val
= p
->regval
& mask
;
1037 /* accessing PMINTENSET_EL1 */
1038 __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) |= val
;
1040 /* accessing PMINTENCLR_EL1 */
1041 __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) &= ~val
;
1043 p
->regval
= __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
);
1049 static bool access_pmovs(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1050 const struct sys_reg_desc
*r
)
1052 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
1054 if (pmu_access_el0_disabled(vcpu
))
1059 /* accessing PMOVSSET_EL0 */
1060 __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) |= (p
->regval
& mask
);
1062 /* accessing PMOVSCLR_EL0 */
1063 __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) &= ~(p
->regval
& mask
);
1065 p
->regval
= __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
);
1071 static bool access_pmswinc(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1072 const struct sys_reg_desc
*r
)
1077 return read_from_write_only(vcpu
, p
, r
);
1079 if (pmu_write_swinc_el0_disabled(vcpu
))
1082 mask
= kvm_pmu_valid_counter_mask(vcpu
);
1083 kvm_pmu_software_increment(vcpu
, p
->regval
& mask
);
1087 static bool access_pmuserenr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1088 const struct sys_reg_desc
*r
)
1091 if (!vcpu_mode_priv(vcpu
)) {
1092 kvm_inject_undefined(vcpu
);
1096 __vcpu_sys_reg(vcpu
, PMUSERENR_EL0
) =
1097 p
->regval
& ARMV8_PMU_USERENR_MASK
;
1099 p
->regval
= __vcpu_sys_reg(vcpu
, PMUSERENR_EL0
)
1100 & ARMV8_PMU_USERENR_MASK
;
1106 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1107 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1108 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1109 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
1110 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1111 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
1112 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1113 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
1114 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1115 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
1117 #define PMU_SYS_REG(name) \
1118 SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
1119 .visibility = pmu_visibility
1121 /* Macro to expand the PMEVCNTRn_EL0 register */
1122 #define PMU_PMEVCNTR_EL0(n) \
1123 { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
1124 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1125 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1127 /* Macro to expand the PMEVTYPERn_EL0 register */
1128 #define PMU_PMEVTYPER_EL0(n) \
1129 { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
1130 .reset = reset_pmevtyper, \
1131 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1133 static bool undef_access(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1134 const struct sys_reg_desc
*r
)
1136 kvm_inject_undefined(vcpu
);
1141 /* Macro to expand the AMU counter and type registers*/
1142 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1143 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1144 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1145 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1147 static unsigned int ptrauth_visibility(const struct kvm_vcpu
*vcpu
,
1148 const struct sys_reg_desc
*rd
)
1150 return vcpu_has_ptrauth(vcpu
) ? 0 : REG_HIDDEN
;
1154 * If we land here on a PtrAuth access, that is because we didn't
1155 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1156 * way this happens is when the guest does not have PtrAuth support
1159 #define __PTRAUTH_KEY(k) \
1160 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1161 .visibility = ptrauth_visibility}
1163 #define PTRAUTH_KEY(k) \
1164 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1165 __PTRAUTH_KEY(k ## KEYHI_EL1)
1167 static bool access_arch_timer(struct kvm_vcpu
*vcpu
,
1168 struct sys_reg_params
*p
,
1169 const struct sys_reg_desc
*r
)
1171 enum kvm_arch_timers tmr
;
1172 enum kvm_arch_timer_regs treg
;
1173 u64 reg
= reg_to_encoding(r
);
1176 case SYS_CNTP_TVAL_EL0
:
1177 case SYS_AARCH32_CNTP_TVAL
:
1179 treg
= TIMER_REG_TVAL
;
1181 case SYS_CNTP_CTL_EL0
:
1182 case SYS_AARCH32_CNTP_CTL
:
1184 treg
= TIMER_REG_CTL
;
1186 case SYS_CNTP_CVAL_EL0
:
1187 case SYS_AARCH32_CNTP_CVAL
:
1189 treg
= TIMER_REG_CVAL
;
1191 case SYS_CNTPCT_EL0
:
1192 case SYS_CNTPCTSS_EL0
:
1193 case SYS_AARCH32_CNTPCT
:
1195 treg
= TIMER_REG_CNT
;
1198 print_sys_reg_msg(p
, "%s", "Unhandled trapped timer register");
1199 kvm_inject_undefined(vcpu
);
1204 kvm_arm_timer_write_sysreg(vcpu
, tmr
, treg
, p
->regval
);
1206 p
->regval
= kvm_arm_timer_read_sysreg(vcpu
, tmr
, treg
);
1211 static s64
kvm_arm64_ftr_safe_value(u32 id
, const struct arm64_ftr_bits
*ftrp
,
1214 struct arm64_ftr_bits kvm_ftr
= *ftrp
;
1216 /* Some features have different safe value type in KVM than host features */
1218 case SYS_ID_AA64DFR0_EL1
:
1219 if (kvm_ftr
.shift
== ID_AA64DFR0_EL1_PMUVer_SHIFT
)
1220 kvm_ftr
.type
= FTR_LOWER_SAFE
;
1222 case SYS_ID_DFR0_EL1
:
1223 if (kvm_ftr
.shift
== ID_DFR0_EL1_PerfMon_SHIFT
)
1224 kvm_ftr
.type
= FTR_LOWER_SAFE
;
1228 return arm64_ftr_safe_value(&kvm_ftr
, new, cur
);
1232 * arm64_check_features() - Check if a feature register value constitutes
1233 * a subset of features indicated by the idreg's KVM sanitised limit.
1235 * This function will check if each feature field of @val is the "safe" value
1236 * against idreg's KVM sanitised limit return from reset() callback.
1237 * If a field value in @val is the same as the one in limit, it is always
1238 * considered the safe value regardless For register fields that are not in
1239 * writable, only the value in limit is considered the safe value.
1241 * Return: 0 if all the fields are safe. Otherwise, return negative errno.
1243 static int arm64_check_features(struct kvm_vcpu
*vcpu
,
1244 const struct sys_reg_desc
*rd
,
1247 const struct arm64_ftr_reg
*ftr_reg
;
1248 const struct arm64_ftr_bits
*ftrp
= NULL
;
1249 u32 id
= reg_to_encoding(rd
);
1250 u64 writable_mask
= rd
->val
;
1251 u64 limit
= rd
->reset(vcpu
, rd
);
1255 * Hidden and unallocated ID registers may not have a corresponding
1256 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1257 * only safe value is 0.
1259 if (sysreg_visible_as_raz(vcpu
, rd
))
1260 return val
? -E2BIG
: 0;
1262 ftr_reg
= get_arm64_ftr_reg(id
);
1266 ftrp
= ftr_reg
->ftr_bits
;
1268 for (; ftrp
&& ftrp
->width
; ftrp
++) {
1269 s64 f_val
, f_lim
, safe_val
;
1272 ftr_mask
= arm64_ftr_mask(ftrp
);
1273 if ((ftr_mask
& writable_mask
) != ftr_mask
)
1276 f_val
= arm64_ftr_value(ftrp
, val
);
1277 f_lim
= arm64_ftr_value(ftrp
, limit
);
1283 safe_val
= kvm_arm64_ftr_safe_value(id
, ftrp
, f_val
, f_lim
);
1285 if (safe_val
!= f_val
)
1289 /* For fields that are not writable, values in limit are the safe values. */
1290 if ((val
& ~mask
) != (limit
& ~mask
))
1296 static u8
pmuver_to_perfmon(u8 pmuver
)
1299 case ID_AA64DFR0_EL1_PMUVer_IMP
:
1300 return ID_DFR0_EL1_PerfMon_PMUv3
;
1301 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF
:
1302 return ID_DFR0_EL1_PerfMon_IMPDEF
;
1304 /* Anything ARMv8.1+ and NI have the same value. For now. */
1309 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1310 static u64
__kvm_read_sanitised_id_reg(const struct kvm_vcpu
*vcpu
,
1311 const struct sys_reg_desc
*r
)
1313 u32 id
= reg_to_encoding(r
);
1316 if (sysreg_visible_as_raz(vcpu
, r
))
1319 val
= read_sanitised_ftr_reg(id
);
1322 case SYS_ID_AA64PFR1_EL1
:
1323 if (!kvm_has_mte(vcpu
->kvm
))
1324 val
&= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE
);
1326 val
&= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME
);
1328 case SYS_ID_AA64ISAR1_EL1
:
1329 if (!vcpu_has_ptrauth(vcpu
))
1330 val
&= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA
) |
1331 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API
) |
1332 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA
) |
1333 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI
));
1335 case SYS_ID_AA64ISAR2_EL1
:
1336 if (!vcpu_has_ptrauth(vcpu
))
1337 val
&= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3
) |
1338 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3
));
1339 if (!cpus_have_final_cap(ARM64_HAS_WFXT
))
1340 val
&= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT
);
1341 val
&= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS
);
1343 case SYS_ID_AA64MMFR2_EL1
:
1344 val
&= ~ID_AA64MMFR2_EL1_CCIDX_MASK
;
1346 case SYS_ID_MMFR4_EL1
:
1347 val
&= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX
);
1354 static u64
kvm_read_sanitised_id_reg(struct kvm_vcpu
*vcpu
,
1355 const struct sys_reg_desc
*r
)
1357 return __kvm_read_sanitised_id_reg(vcpu
, r
);
1360 static u64
read_id_reg(const struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
1362 return IDREG(vcpu
->kvm
, reg_to_encoding(r
));
1366 * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1367 * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
1369 static inline bool is_id_reg(u32 id
)
1371 return (sys_reg_Op0(id
) == 3 && sys_reg_Op1(id
) == 0 &&
1372 sys_reg_CRn(id
) == 0 && sys_reg_CRm(id
) >= 1 &&
1373 sys_reg_CRm(id
) < 8);
1376 static unsigned int id_visibility(const struct kvm_vcpu
*vcpu
,
1377 const struct sys_reg_desc
*r
)
1379 u32 id
= reg_to_encoding(r
);
1382 case SYS_ID_AA64ZFR0_EL1
:
1383 if (!vcpu_has_sve(vcpu
))
1391 static unsigned int aa32_id_visibility(const struct kvm_vcpu
*vcpu
,
1392 const struct sys_reg_desc
*r
)
1395 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1396 * EL. Promote to RAZ/WI in order to guarantee consistency between
1399 if (!kvm_supports_32bit_el0())
1400 return REG_RAZ
| REG_USER_WI
;
1402 return id_visibility(vcpu
, r
);
1405 static unsigned int raz_visibility(const struct kvm_vcpu
*vcpu
,
1406 const struct sys_reg_desc
*r
)
1411 /* cpufeature ID register access trap handlers */
1413 static bool access_id_reg(struct kvm_vcpu
*vcpu
,
1414 struct sys_reg_params
*p
,
1415 const struct sys_reg_desc
*r
)
1418 return write_to_read_only(vcpu
, p
, r
);
1420 p
->regval
= read_id_reg(vcpu
, r
);
1421 if (vcpu_has_nv(vcpu
))
1422 access_nested_id_reg(vcpu
, p
, r
);
1427 /* Visibility overrides for SVE-specific control registers */
1428 static unsigned int sve_visibility(const struct kvm_vcpu
*vcpu
,
1429 const struct sys_reg_desc
*rd
)
1431 if (vcpu_has_sve(vcpu
))
1437 static u64
read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu
*vcpu
,
1438 const struct sys_reg_desc
*rd
)
1440 u64 val
= read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1
);
1442 if (!vcpu_has_sve(vcpu
))
1443 val
&= ~ID_AA64PFR0_EL1_SVE_MASK
;
1446 * The default is to expose CSV2 == 1 if the HW isn't affected.
1447 * Although this is a per-CPU feature, we make it global because
1448 * asymmetric systems are just a nuisance.
1450 * Userspace can override this as long as it doesn't promise
1453 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED
) {
1454 val
&= ~ID_AA64PFR0_EL1_CSV2_MASK
;
1455 val
|= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1
, CSV2
, IMP
);
1457 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED
) {
1458 val
&= ~ID_AA64PFR0_EL1_CSV3_MASK
;
1459 val
|= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1
, CSV3
, IMP
);
1462 if (kvm_vgic_global_state
.type
== VGIC_V3
) {
1463 val
&= ~ID_AA64PFR0_EL1_GIC_MASK
;
1464 val
|= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1
, GIC
, IMP
);
1467 val
&= ~ID_AA64PFR0_EL1_AMU_MASK
;
1472 static u64
read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu
*vcpu
,
1473 const struct sys_reg_desc
*rd
)
1475 u64 val
= read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1
);
1477 /* Limit debug to ARMv8.0 */
1478 val
&= ~ID_AA64DFR0_EL1_DebugVer_MASK
;
1479 val
|= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1
, DebugVer
, IMP
);
1482 * Only initialize the PMU version if the vCPU was configured with one.
1484 val
&= ~ID_AA64DFR0_EL1_PMUVer_MASK
;
1485 if (kvm_vcpu_has_pmu(vcpu
))
1486 val
|= SYS_FIELD_PREP(ID_AA64DFR0_EL1
, PMUVer
,
1487 kvm_arm_pmu_get_pmuver_limit());
1489 /* Hide SPE from guests */
1490 val
&= ~ID_AA64DFR0_EL1_PMSVer_MASK
;
1495 static int set_id_aa64dfr0_el1(struct kvm_vcpu
*vcpu
,
1496 const struct sys_reg_desc
*rd
,
1499 u8 pmuver
= SYS_FIELD_GET(ID_AA64DFR0_EL1
, PMUVer
, val
);
1502 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
1503 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
1504 * exposed an IMP_DEF PMU to userspace and the guest on systems w/
1505 * non-architectural PMUs. Of course, PMUv3 is the only game in town for
1506 * PMU virtualization, so the IMP_DEF value was rather user-hostile.
1508 * At minimum, we're on the hook to allow values that were given to
1509 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
1510 * with a more sensible NI. The value of an ID register changing under
1511 * the nose of the guest is unfortunate, but is certainly no more
1512 * surprising than an ill-guided PMU driver poking at impdef system
1513 * registers that end in an UNDEF...
1515 if (pmuver
== ID_AA64DFR0_EL1_PMUVer_IMP_DEF
)
1516 val
&= ~ID_AA64DFR0_EL1_PMUVer_MASK
;
1518 return set_id_reg(vcpu
, rd
, val
);
1521 static u64
read_sanitised_id_dfr0_el1(struct kvm_vcpu
*vcpu
,
1522 const struct sys_reg_desc
*rd
)
1524 u8 perfmon
= pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1525 u64 val
= read_sanitised_ftr_reg(SYS_ID_DFR0_EL1
);
1527 val
&= ~ID_DFR0_EL1_PerfMon_MASK
;
1528 if (kvm_vcpu_has_pmu(vcpu
))
1529 val
|= SYS_FIELD_PREP(ID_DFR0_EL1
, PerfMon
, perfmon
);
1534 static int set_id_dfr0_el1(struct kvm_vcpu
*vcpu
,
1535 const struct sys_reg_desc
*rd
,
1538 u8 perfmon
= SYS_FIELD_GET(ID_DFR0_EL1
, PerfMon
, val
);
1540 if (perfmon
== ID_DFR0_EL1_PerfMon_IMPDEF
) {
1541 val
&= ~ID_DFR0_EL1_PerfMon_MASK
;
1546 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1547 * it doesn't promise more than what the HW gives us on the
1548 * AArch64 side (as everything is emulated with that), and
1549 * that this is a PMUv3.
1551 if (perfmon
!= 0 && perfmon
< ID_DFR0_EL1_PerfMon_PMUv3
)
1554 return set_id_reg(vcpu
, rd
, val
);
1558 * cpufeature ID register user accessors
1560 * For now, these registers are immutable for userspace, so no values
1561 * are stored, and for set_id_reg() we don't allow the effective value
1564 static int get_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1568 * Avoid locking if the VM has already started, as the ID registers are
1569 * guaranteed to be invariant at that point.
1571 if (kvm_vm_has_ran_once(vcpu
->kvm
)) {
1572 *val
= read_id_reg(vcpu
, rd
);
1576 mutex_lock(&vcpu
->kvm
->arch
.config_lock
);
1577 *val
= read_id_reg(vcpu
, rd
);
1578 mutex_unlock(&vcpu
->kvm
->arch
.config_lock
);
1583 static int set_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1586 u32 id
= reg_to_encoding(rd
);
1589 mutex_lock(&vcpu
->kvm
->arch
.config_lock
);
1592 * Once the VM has started the ID registers are immutable. Reject any
1593 * write that does not match the final register value.
1595 if (kvm_vm_has_ran_once(vcpu
->kvm
)) {
1596 if (val
!= read_id_reg(vcpu
, rd
))
1601 mutex_unlock(&vcpu
->kvm
->arch
.config_lock
);
1605 ret
= arm64_check_features(vcpu
, rd
, val
);
1607 IDREG(vcpu
->kvm
, id
) = val
;
1609 mutex_unlock(&vcpu
->kvm
->arch
.config_lock
);
1612 * arm64_check_features() returns -E2BIG to indicate the register's
1613 * feature set is a superset of the maximally-allowed register value.
1614 * While it would be nice to precisely describe this to userspace, the
1615 * existing UAPI for KVM_SET_ONE_REG has it that invalid register
1616 * writes return -EINVAL.
1623 static int get_raz_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1630 static int set_wi_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1636 static bool access_ctr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1637 const struct sys_reg_desc
*r
)
1640 return write_to_read_only(vcpu
, p
, r
);
1642 p
->regval
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
1646 static bool access_clidr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1647 const struct sys_reg_desc
*r
)
1650 return write_to_read_only(vcpu
, p
, r
);
1652 p
->regval
= __vcpu_sys_reg(vcpu
, r
->reg
);
1657 * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
1658 * by the physical CPU which the vcpu currently resides in.
1660 static u64
reset_clidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
1662 u64 ctr_el0
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
1666 if ((ctr_el0
& CTR_EL0_IDC
)) {
1668 * Data cache clean to the PoU is not required so LoUU and LoUIS
1669 * will not be set and a unified cache, which will be marked as
1670 * LoC, will be added.
1672 * If not DIC, let the unified cache L2 so that an instruction
1673 * cache can be added as L1 later.
1675 loc
= (ctr_el0
& CTR_EL0_DIC
) ? 1 : 2;
1676 clidr
= CACHE_TYPE_UNIFIED
<< CLIDR_CTYPE_SHIFT(loc
);
1679 * Data cache clean to the PoU is required so let L1 have a data
1680 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
1681 * it can be marked as LoC too.
1684 clidr
= 1 << CLIDR_LOUU_SHIFT
;
1685 clidr
|= 1 << CLIDR_LOUIS_SHIFT
;
1686 clidr
|= CACHE_TYPE_DATA
<< CLIDR_CTYPE_SHIFT(1);
1690 * Instruction cache invalidation to the PoU is required so let L1 have
1691 * an instruction cache. If L1 already has a data cache, it will be
1692 * CACHE_TYPE_SEPARATE.
1694 if (!(ctr_el0
& CTR_EL0_DIC
))
1695 clidr
|= CACHE_TYPE_INST
<< CLIDR_CTYPE_SHIFT(1);
1697 clidr
|= loc
<< CLIDR_LOC_SHIFT
;
1700 * Add tag cache unified to data cache. Allocation tags and data are
1701 * unified in a cache line so that it looks valid even if there is only
1704 if (kvm_has_mte(vcpu
->kvm
))
1705 clidr
|= 2 << CLIDR_TTYPE_SHIFT(loc
);
1707 __vcpu_sys_reg(vcpu
, r
->reg
) = clidr
;
1709 return __vcpu_sys_reg(vcpu
, r
->reg
);
1712 static int set_clidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1715 u64 ctr_el0
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
1716 u64 idc
= !CLIDR_LOC(val
) || (!CLIDR_LOUIS(val
) && !CLIDR_LOUU(val
));
1718 if ((val
& CLIDR_EL1_RES0
) || (!(ctr_el0
& CTR_EL0_IDC
) && idc
))
1721 __vcpu_sys_reg(vcpu
, rd
->reg
) = val
;
1726 static bool access_csselr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1727 const struct sys_reg_desc
*r
)
1732 vcpu_write_sys_reg(vcpu
, p
->regval
, reg
);
1734 p
->regval
= vcpu_read_sys_reg(vcpu
, reg
);
1738 static bool access_ccsidr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1739 const struct sys_reg_desc
*r
)
1744 return write_to_read_only(vcpu
, p
, r
);
1746 csselr
= vcpu_read_sys_reg(vcpu
, CSSELR_EL1
);
1747 csselr
&= CSSELR_EL1_Level
| CSSELR_EL1_InD
;
1748 if (csselr
< CSSELR_MAX
)
1749 p
->regval
= get_ccsidr(vcpu
, csselr
);
1754 static unsigned int mte_visibility(const struct kvm_vcpu
*vcpu
,
1755 const struct sys_reg_desc
*rd
)
1757 if (kvm_has_mte(vcpu
->kvm
))
1763 #define MTE_REG(name) { \
1764 SYS_DESC(SYS_##name), \
1765 .access = undef_access, \
1766 .reset = reset_unknown, \
1768 .visibility = mte_visibility, \
1771 static unsigned int el2_visibility(const struct kvm_vcpu
*vcpu
,
1772 const struct sys_reg_desc
*rd
)
1774 if (vcpu_has_nv(vcpu
))
1780 #define EL2_REG(name, acc, rst, v) { \
1781 SYS_DESC(SYS_##name), \
1785 .visibility = el2_visibility, \
1790 * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
1791 * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
1792 * handling traps. Given that, they are always hidden from userspace.
1794 static unsigned int elx2_visibility(const struct kvm_vcpu
*vcpu
,
1795 const struct sys_reg_desc
*rd
)
1797 return REG_HIDDEN_USER
;
1800 #define EL12_REG(name, acc, rst, v) { \
1801 SYS_DESC(SYS_##name##_EL12), \
1804 .reg = name##_EL1, \
1806 .visibility = elx2_visibility, \
1810 * Since reset() callback and field val are not used for idregs, they will be
1811 * used for specific purposes for idregs.
1812 * The reset() would return KVM sanitised register value. The value would be the
1813 * same as the host kernel sanitised value if there is no KVM sanitisation.
1814 * The val would be used as a mask indicating writable fields for the idreg.
1815 * Only bits with 1 are writable from userspace. This mask might not be
1816 * necessary in the future whenever all ID registers are enabled as writable
1820 /* sys_reg_desc initialiser for known cpufeature ID registers */
1821 #define ID_SANITISED(name) { \
1822 SYS_DESC(SYS_##name), \
1823 .access = access_id_reg, \
1824 .get_user = get_id_reg, \
1825 .set_user = set_id_reg, \
1826 .visibility = id_visibility, \
1827 .reset = kvm_read_sanitised_id_reg, \
1831 /* sys_reg_desc initialiser for known cpufeature ID registers */
1832 #define AA32_ID_SANITISED(name) { \
1833 SYS_DESC(SYS_##name), \
1834 .access = access_id_reg, \
1835 .get_user = get_id_reg, \
1836 .set_user = set_id_reg, \
1837 .visibility = aa32_id_visibility, \
1838 .reset = kvm_read_sanitised_id_reg, \
1843 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1844 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1845 * (1 <= crm < 8, 0 <= Op2 < 8).
1847 #define ID_UNALLOCATED(crm, op2) { \
1848 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1849 .access = access_id_reg, \
1850 .get_user = get_id_reg, \
1851 .set_user = set_id_reg, \
1852 .visibility = raz_visibility, \
1853 .reset = kvm_read_sanitised_id_reg, \
1858 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1859 * For now, these are exposed just like unallocated ID regs: they appear
1860 * RAZ for the guest.
1862 #define ID_HIDDEN(name) { \
1863 SYS_DESC(SYS_##name), \
1864 .access = access_id_reg, \
1865 .get_user = get_id_reg, \
1866 .set_user = set_id_reg, \
1867 .visibility = raz_visibility, \
1868 .reset = kvm_read_sanitised_id_reg, \
1872 static bool access_sp_el1(struct kvm_vcpu
*vcpu
,
1873 struct sys_reg_params
*p
,
1874 const struct sys_reg_desc
*r
)
1877 __vcpu_sys_reg(vcpu
, SP_EL1
) = p
->regval
;
1879 p
->regval
= __vcpu_sys_reg(vcpu
, SP_EL1
);
1884 static bool access_elr(struct kvm_vcpu
*vcpu
,
1885 struct sys_reg_params
*p
,
1886 const struct sys_reg_desc
*r
)
1889 vcpu_write_sys_reg(vcpu
, p
->regval
, ELR_EL1
);
1891 p
->regval
= vcpu_read_sys_reg(vcpu
, ELR_EL1
);
1896 static bool access_spsr(struct kvm_vcpu
*vcpu
,
1897 struct sys_reg_params
*p
,
1898 const struct sys_reg_desc
*r
)
1901 __vcpu_sys_reg(vcpu
, SPSR_EL1
) = p
->regval
;
1903 p
->regval
= __vcpu_sys_reg(vcpu
, SPSR_EL1
);
1909 * Architected system registers.
1910 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1912 * Debug handling: We do trap most, if not all debug related system
1913 * registers. The implementation is good enough to ensure that a guest
1914 * can use these with minimal performance degradation. The drawback is
1915 * that we don't implement any of the external debug architecture.
1916 * This should be revisited if we ever encounter a more demanding
1919 static const struct sys_reg_desc sys_reg_descs
[] = {
1920 { SYS_DESC(SYS_DC_ISW
), access_dcsw
},
1921 { SYS_DESC(SYS_DC_IGSW
), access_dcgsw
},
1922 { SYS_DESC(SYS_DC_IGDSW
), access_dcgsw
},
1923 { SYS_DESC(SYS_DC_CSW
), access_dcsw
},
1924 { SYS_DESC(SYS_DC_CGSW
), access_dcgsw
},
1925 { SYS_DESC(SYS_DC_CGDSW
), access_dcgsw
},
1926 { SYS_DESC(SYS_DC_CISW
), access_dcsw
},
1927 { SYS_DESC(SYS_DC_CIGSW
), access_dcgsw
},
1928 { SYS_DESC(SYS_DC_CIGDSW
), access_dcgsw
},
1930 DBG_BCR_BVR_WCR_WVR_EL1(0),
1931 DBG_BCR_BVR_WCR_WVR_EL1(1),
1932 { SYS_DESC(SYS_MDCCINT_EL1
), trap_debug_regs
, reset_val
, MDCCINT_EL1
, 0 },
1933 { SYS_DESC(SYS_MDSCR_EL1
), trap_debug_regs
, reset_val
, MDSCR_EL1
, 0 },
1934 DBG_BCR_BVR_WCR_WVR_EL1(2),
1935 DBG_BCR_BVR_WCR_WVR_EL1(3),
1936 DBG_BCR_BVR_WCR_WVR_EL1(4),
1937 DBG_BCR_BVR_WCR_WVR_EL1(5),
1938 DBG_BCR_BVR_WCR_WVR_EL1(6),
1939 DBG_BCR_BVR_WCR_WVR_EL1(7),
1940 DBG_BCR_BVR_WCR_WVR_EL1(8),
1941 DBG_BCR_BVR_WCR_WVR_EL1(9),
1942 DBG_BCR_BVR_WCR_WVR_EL1(10),
1943 DBG_BCR_BVR_WCR_WVR_EL1(11),
1944 DBG_BCR_BVR_WCR_WVR_EL1(12),
1945 DBG_BCR_BVR_WCR_WVR_EL1(13),
1946 DBG_BCR_BVR_WCR_WVR_EL1(14),
1947 DBG_BCR_BVR_WCR_WVR_EL1(15),
1949 { SYS_DESC(SYS_MDRAR_EL1
), trap_raz_wi
},
1950 { SYS_DESC(SYS_OSLAR_EL1
), trap_oslar_el1
},
1951 { SYS_DESC(SYS_OSLSR_EL1
), trap_oslsr_el1
, reset_val
, OSLSR_EL1
,
1952 OSLSR_EL1_OSLM_IMPLEMENTED
, .set_user
= set_oslsr_el1
, },
1953 { SYS_DESC(SYS_OSDLR_EL1
), trap_raz_wi
},
1954 { SYS_DESC(SYS_DBGPRCR_EL1
), trap_raz_wi
},
1955 { SYS_DESC(SYS_DBGCLAIMSET_EL1
), trap_raz_wi
},
1956 { SYS_DESC(SYS_DBGCLAIMCLR_EL1
), trap_raz_wi
},
1957 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1
), trap_dbgauthstatus_el1
},
1959 { SYS_DESC(SYS_MDCCSR_EL0
), trap_raz_wi
},
1960 { SYS_DESC(SYS_DBGDTR_EL0
), trap_raz_wi
},
1961 // DBGDTR[TR]X_EL0 share the same encoding
1962 { SYS_DESC(SYS_DBGDTRTX_EL0
), trap_raz_wi
},
1964 { SYS_DESC(SYS_DBGVCR32_EL2
), NULL
, reset_val
, DBGVCR32_EL2
, 0 },
1966 { SYS_DESC(SYS_MPIDR_EL1
), NULL
, reset_mpidr
, MPIDR_EL1
},
1969 * ID regs: all ID_SANITISED() entries here must have corresponding
1970 * entries in arm64_ftr_regs[].
1973 /* AArch64 mappings of the AArch32 ID registers */
1975 AA32_ID_SANITISED(ID_PFR0_EL1
),
1976 AA32_ID_SANITISED(ID_PFR1_EL1
),
1977 { SYS_DESC(SYS_ID_DFR0_EL1
),
1978 .access
= access_id_reg
,
1979 .get_user
= get_id_reg
,
1980 .set_user
= set_id_dfr0_el1
,
1981 .visibility
= aa32_id_visibility
,
1982 .reset
= read_sanitised_id_dfr0_el1
,
1983 .val
= ID_DFR0_EL1_PerfMon_MASK
, },
1984 ID_HIDDEN(ID_AFR0_EL1
),
1985 AA32_ID_SANITISED(ID_MMFR0_EL1
),
1986 AA32_ID_SANITISED(ID_MMFR1_EL1
),
1987 AA32_ID_SANITISED(ID_MMFR2_EL1
),
1988 AA32_ID_SANITISED(ID_MMFR3_EL1
),
1991 AA32_ID_SANITISED(ID_ISAR0_EL1
),
1992 AA32_ID_SANITISED(ID_ISAR1_EL1
),
1993 AA32_ID_SANITISED(ID_ISAR2_EL1
),
1994 AA32_ID_SANITISED(ID_ISAR3_EL1
),
1995 AA32_ID_SANITISED(ID_ISAR4_EL1
),
1996 AA32_ID_SANITISED(ID_ISAR5_EL1
),
1997 AA32_ID_SANITISED(ID_MMFR4_EL1
),
1998 AA32_ID_SANITISED(ID_ISAR6_EL1
),
2001 AA32_ID_SANITISED(MVFR0_EL1
),
2002 AA32_ID_SANITISED(MVFR1_EL1
),
2003 AA32_ID_SANITISED(MVFR2_EL1
),
2004 ID_UNALLOCATED(3,3),
2005 AA32_ID_SANITISED(ID_PFR2_EL1
),
2006 ID_HIDDEN(ID_DFR1_EL1
),
2007 AA32_ID_SANITISED(ID_MMFR5_EL1
),
2008 ID_UNALLOCATED(3,7),
2010 /* AArch64 ID registers */
2012 { SYS_DESC(SYS_ID_AA64PFR0_EL1
),
2013 .access
= access_id_reg
,
2014 .get_user
= get_id_reg
,
2015 .set_user
= set_id_reg
,
2016 .reset
= read_sanitised_id_aa64pfr0_el1
,
2017 .val
= ID_AA64PFR0_EL1_CSV2_MASK
| ID_AA64PFR0_EL1_CSV3_MASK
, },
2018 ID_SANITISED(ID_AA64PFR1_EL1
),
2019 ID_UNALLOCATED(4,2),
2020 ID_UNALLOCATED(4,3),
2021 ID_SANITISED(ID_AA64ZFR0_EL1
),
2022 ID_HIDDEN(ID_AA64SMFR0_EL1
),
2023 ID_UNALLOCATED(4,6),
2024 ID_UNALLOCATED(4,7),
2027 { SYS_DESC(SYS_ID_AA64DFR0_EL1
),
2028 .access
= access_id_reg
,
2029 .get_user
= get_id_reg
,
2030 .set_user
= set_id_aa64dfr0_el1
,
2031 .reset
= read_sanitised_id_aa64dfr0_el1
,
2032 .val
= ID_AA64DFR0_EL1_PMUVer_MASK
, },
2033 ID_SANITISED(ID_AA64DFR1_EL1
),
2034 ID_UNALLOCATED(5,2),
2035 ID_UNALLOCATED(5,3),
2036 ID_HIDDEN(ID_AA64AFR0_EL1
),
2037 ID_HIDDEN(ID_AA64AFR1_EL1
),
2038 ID_UNALLOCATED(5,6),
2039 ID_UNALLOCATED(5,7),
2042 ID_SANITISED(ID_AA64ISAR0_EL1
),
2043 ID_SANITISED(ID_AA64ISAR1_EL1
),
2044 ID_SANITISED(ID_AA64ISAR2_EL1
),
2045 ID_UNALLOCATED(6,3),
2046 ID_UNALLOCATED(6,4),
2047 ID_UNALLOCATED(6,5),
2048 ID_UNALLOCATED(6,6),
2049 ID_UNALLOCATED(6,7),
2052 ID_SANITISED(ID_AA64MMFR0_EL1
),
2053 ID_SANITISED(ID_AA64MMFR1_EL1
),
2054 ID_SANITISED(ID_AA64MMFR2_EL1
),
2055 ID_SANITISED(ID_AA64MMFR3_EL1
),
2056 ID_UNALLOCATED(7,4),
2057 ID_UNALLOCATED(7,5),
2058 ID_UNALLOCATED(7,6),
2059 ID_UNALLOCATED(7,7),
2061 { SYS_DESC(SYS_SCTLR_EL1
), access_vm_reg
, reset_val
, SCTLR_EL1
, 0x00C50078 },
2062 { SYS_DESC(SYS_ACTLR_EL1
), access_actlr
, reset_actlr
, ACTLR_EL1
},
2063 { SYS_DESC(SYS_CPACR_EL1
), NULL
, reset_val
, CPACR_EL1
, 0 },
2068 { SYS_DESC(SYS_ZCR_EL1
), NULL
, reset_val
, ZCR_EL1
, 0, .visibility
= sve_visibility
},
2069 { SYS_DESC(SYS_TRFCR_EL1
), undef_access
},
2070 { SYS_DESC(SYS_SMPRI_EL1
), undef_access
},
2071 { SYS_DESC(SYS_SMCR_EL1
), undef_access
},
2072 { SYS_DESC(SYS_TTBR0_EL1
), access_vm_reg
, reset_unknown
, TTBR0_EL1
},
2073 { SYS_DESC(SYS_TTBR1_EL1
), access_vm_reg
, reset_unknown
, TTBR1_EL1
},
2074 { SYS_DESC(SYS_TCR_EL1
), access_vm_reg
, reset_val
, TCR_EL1
, 0 },
2075 { SYS_DESC(SYS_TCR2_EL1
), access_vm_reg
, reset_val
, TCR2_EL1
, 0 },
2083 { SYS_DESC(SYS_SPSR_EL1
), access_spsr
},
2084 { SYS_DESC(SYS_ELR_EL1
), access_elr
},
2086 { SYS_DESC(SYS_AFSR0_EL1
), access_vm_reg
, reset_unknown
, AFSR0_EL1
},
2087 { SYS_DESC(SYS_AFSR1_EL1
), access_vm_reg
, reset_unknown
, AFSR1_EL1
},
2088 { SYS_DESC(SYS_ESR_EL1
), access_vm_reg
, reset_unknown
, ESR_EL1
},
2090 { SYS_DESC(SYS_ERRIDR_EL1
), trap_raz_wi
},
2091 { SYS_DESC(SYS_ERRSELR_EL1
), trap_raz_wi
},
2092 { SYS_DESC(SYS_ERXFR_EL1
), trap_raz_wi
},
2093 { SYS_DESC(SYS_ERXCTLR_EL1
), trap_raz_wi
},
2094 { SYS_DESC(SYS_ERXSTATUS_EL1
), trap_raz_wi
},
2095 { SYS_DESC(SYS_ERXADDR_EL1
), trap_raz_wi
},
2096 { SYS_DESC(SYS_ERXMISC0_EL1
), trap_raz_wi
},
2097 { SYS_DESC(SYS_ERXMISC1_EL1
), trap_raz_wi
},
2100 MTE_REG(TFSRE0_EL1
),
2102 { SYS_DESC(SYS_FAR_EL1
), access_vm_reg
, reset_unknown
, FAR_EL1
},
2103 { SYS_DESC(SYS_PAR_EL1
), NULL
, reset_unknown
, PAR_EL1
},
2105 { SYS_DESC(SYS_PMSCR_EL1
), undef_access
},
2106 { SYS_DESC(SYS_PMSNEVFR_EL1
), undef_access
},
2107 { SYS_DESC(SYS_PMSICR_EL1
), undef_access
},
2108 { SYS_DESC(SYS_PMSIRR_EL1
), undef_access
},
2109 { SYS_DESC(SYS_PMSFCR_EL1
), undef_access
},
2110 { SYS_DESC(SYS_PMSEVFR_EL1
), undef_access
},
2111 { SYS_DESC(SYS_PMSLATFR_EL1
), undef_access
},
2112 { SYS_DESC(SYS_PMSIDR_EL1
), undef_access
},
2113 { SYS_DESC(SYS_PMBLIMITR_EL1
), undef_access
},
2114 { SYS_DESC(SYS_PMBPTR_EL1
), undef_access
},
2115 { SYS_DESC(SYS_PMBSR_EL1
), undef_access
},
2116 /* PMBIDR_EL1 is not trapped */
2118 { PMU_SYS_REG(PMINTENSET_EL1
),
2119 .access
= access_pminten
, .reg
= PMINTENSET_EL1
},
2120 { PMU_SYS_REG(PMINTENCLR_EL1
),
2121 .access
= access_pminten
, .reg
= PMINTENSET_EL1
},
2122 { SYS_DESC(SYS_PMMIR_EL1
), trap_raz_wi
},
2124 { SYS_DESC(SYS_MAIR_EL1
), access_vm_reg
, reset_unknown
, MAIR_EL1
},
2125 { SYS_DESC(SYS_PIRE0_EL1
), access_vm_reg
, reset_unknown
, PIRE0_EL1
},
2126 { SYS_DESC(SYS_PIR_EL1
), access_vm_reg
, reset_unknown
, PIR_EL1
},
2127 { SYS_DESC(SYS_AMAIR_EL1
), access_vm_reg
, reset_amair_el1
, AMAIR_EL1
},
2129 { SYS_DESC(SYS_LORSA_EL1
), trap_loregion
},
2130 { SYS_DESC(SYS_LOREA_EL1
), trap_loregion
},
2131 { SYS_DESC(SYS_LORN_EL1
), trap_loregion
},
2132 { SYS_DESC(SYS_LORC_EL1
), trap_loregion
},
2133 { SYS_DESC(SYS_LORID_EL1
), trap_loregion
},
2135 { SYS_DESC(SYS_VBAR_EL1
), access_rw
, reset_val
, VBAR_EL1
, 0 },
2136 { SYS_DESC(SYS_DISR_EL1
), NULL
, reset_val
, DISR_EL1
, 0 },
2138 { SYS_DESC(SYS_ICC_IAR0_EL1
), write_to_read_only
},
2139 { SYS_DESC(SYS_ICC_EOIR0_EL1
), read_from_write_only
},
2140 { SYS_DESC(SYS_ICC_HPPIR0_EL1
), write_to_read_only
},
2141 { SYS_DESC(SYS_ICC_DIR_EL1
), read_from_write_only
},
2142 { SYS_DESC(SYS_ICC_RPR_EL1
), write_to_read_only
},
2143 { SYS_DESC(SYS_ICC_SGI1R_EL1
), access_gic_sgi
},
2144 { SYS_DESC(SYS_ICC_ASGI1R_EL1
), access_gic_sgi
},
2145 { SYS_DESC(SYS_ICC_SGI0R_EL1
), access_gic_sgi
},
2146 { SYS_DESC(SYS_ICC_IAR1_EL1
), write_to_read_only
},
2147 { SYS_DESC(SYS_ICC_EOIR1_EL1
), read_from_write_only
},
2148 { SYS_DESC(SYS_ICC_HPPIR1_EL1
), write_to_read_only
},
2149 { SYS_DESC(SYS_ICC_SRE_EL1
), access_gic_sre
},
2151 { SYS_DESC(SYS_CONTEXTIDR_EL1
), access_vm_reg
, reset_val
, CONTEXTIDR_EL1
, 0 },
2152 { SYS_DESC(SYS_TPIDR_EL1
), NULL
, reset_unknown
, TPIDR_EL1
},
2154 { SYS_DESC(SYS_ACCDATA_EL1
), undef_access
},
2156 { SYS_DESC(SYS_SCXTNUM_EL1
), undef_access
},
2158 { SYS_DESC(SYS_CNTKCTL_EL1
), NULL
, reset_val
, CNTKCTL_EL1
, 0},
2160 { SYS_DESC(SYS_CCSIDR_EL1
), access_ccsidr
},
2161 { SYS_DESC(SYS_CLIDR_EL1
), access_clidr
, reset_clidr
, CLIDR_EL1
,
2162 .set_user
= set_clidr
},
2163 { SYS_DESC(SYS_CCSIDR2_EL1
), undef_access
},
2164 { SYS_DESC(SYS_SMIDR_EL1
), undef_access
},
2165 { SYS_DESC(SYS_CSSELR_EL1
), access_csselr
, reset_unknown
, CSSELR_EL1
},
2166 { SYS_DESC(SYS_CTR_EL0
), access_ctr
},
2167 { SYS_DESC(SYS_SVCR
), undef_access
},
2169 { PMU_SYS_REG(PMCR_EL0
), .access
= access_pmcr
,
2170 .reset
= reset_pmcr
, .reg
= PMCR_EL0
},
2171 { PMU_SYS_REG(PMCNTENSET_EL0
),
2172 .access
= access_pmcnten
, .reg
= PMCNTENSET_EL0
},
2173 { PMU_SYS_REG(PMCNTENCLR_EL0
),
2174 .access
= access_pmcnten
, .reg
= PMCNTENSET_EL0
},
2175 { PMU_SYS_REG(PMOVSCLR_EL0
),
2176 .access
= access_pmovs
, .reg
= PMOVSSET_EL0
},
2178 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
2179 * previously (and pointlessly) advertised in the past...
2181 { PMU_SYS_REG(PMSWINC_EL0
),
2182 .get_user
= get_raz_reg
, .set_user
= set_wi_reg
,
2183 .access
= access_pmswinc
, .reset
= NULL
},
2184 { PMU_SYS_REG(PMSELR_EL0
),
2185 .access
= access_pmselr
, .reset
= reset_pmselr
, .reg
= PMSELR_EL0
},
2186 { PMU_SYS_REG(PMCEID0_EL0
),
2187 .access
= access_pmceid
, .reset
= NULL
},
2188 { PMU_SYS_REG(PMCEID1_EL0
),
2189 .access
= access_pmceid
, .reset
= NULL
},
2190 { PMU_SYS_REG(PMCCNTR_EL0
),
2191 .access
= access_pmu_evcntr
, .reset
= reset_unknown
,
2192 .reg
= PMCCNTR_EL0
, .get_user
= get_pmu_evcntr
},
2193 { PMU_SYS_REG(PMXEVTYPER_EL0
),
2194 .access
= access_pmu_evtyper
, .reset
= NULL
},
2195 { PMU_SYS_REG(PMXEVCNTR_EL0
),
2196 .access
= access_pmu_evcntr
, .reset
= NULL
},
2198 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
2199 * in 32bit mode. Here we choose to reset it as zero for consistency.
2201 { PMU_SYS_REG(PMUSERENR_EL0
), .access
= access_pmuserenr
,
2202 .reset
= reset_val
, .reg
= PMUSERENR_EL0
, .val
= 0 },
2203 { PMU_SYS_REG(PMOVSSET_EL0
),
2204 .access
= access_pmovs
, .reg
= PMOVSSET_EL0
},
2206 { SYS_DESC(SYS_TPIDR_EL0
), NULL
, reset_unknown
, TPIDR_EL0
},
2207 { SYS_DESC(SYS_TPIDRRO_EL0
), NULL
, reset_unknown
, TPIDRRO_EL0
},
2208 { SYS_DESC(SYS_TPIDR2_EL0
), undef_access
},
2210 { SYS_DESC(SYS_SCXTNUM_EL0
), undef_access
},
2212 { SYS_DESC(SYS_AMCR_EL0
), undef_access
},
2213 { SYS_DESC(SYS_AMCFGR_EL0
), undef_access
},
2214 { SYS_DESC(SYS_AMCGCR_EL0
), undef_access
},
2215 { SYS_DESC(SYS_AMUSERENR_EL0
), undef_access
},
2216 { SYS_DESC(SYS_AMCNTENCLR0_EL0
), undef_access
},
2217 { SYS_DESC(SYS_AMCNTENSET0_EL0
), undef_access
},
2218 { SYS_DESC(SYS_AMCNTENCLR1_EL0
), undef_access
},
2219 { SYS_DESC(SYS_AMCNTENSET1_EL0
), undef_access
},
2220 AMU_AMEVCNTR0_EL0(0),
2221 AMU_AMEVCNTR0_EL0(1),
2222 AMU_AMEVCNTR0_EL0(2),
2223 AMU_AMEVCNTR0_EL0(3),
2224 AMU_AMEVCNTR0_EL0(4),
2225 AMU_AMEVCNTR0_EL0(5),
2226 AMU_AMEVCNTR0_EL0(6),
2227 AMU_AMEVCNTR0_EL0(7),
2228 AMU_AMEVCNTR0_EL0(8),
2229 AMU_AMEVCNTR0_EL0(9),
2230 AMU_AMEVCNTR0_EL0(10),
2231 AMU_AMEVCNTR0_EL0(11),
2232 AMU_AMEVCNTR0_EL0(12),
2233 AMU_AMEVCNTR0_EL0(13),
2234 AMU_AMEVCNTR0_EL0(14),
2235 AMU_AMEVCNTR0_EL0(15),
2236 AMU_AMEVTYPER0_EL0(0),
2237 AMU_AMEVTYPER0_EL0(1),
2238 AMU_AMEVTYPER0_EL0(2),
2239 AMU_AMEVTYPER0_EL0(3),
2240 AMU_AMEVTYPER0_EL0(4),
2241 AMU_AMEVTYPER0_EL0(5),
2242 AMU_AMEVTYPER0_EL0(6),
2243 AMU_AMEVTYPER0_EL0(7),
2244 AMU_AMEVTYPER0_EL0(8),
2245 AMU_AMEVTYPER0_EL0(9),
2246 AMU_AMEVTYPER0_EL0(10),
2247 AMU_AMEVTYPER0_EL0(11),
2248 AMU_AMEVTYPER0_EL0(12),
2249 AMU_AMEVTYPER0_EL0(13),
2250 AMU_AMEVTYPER0_EL0(14),
2251 AMU_AMEVTYPER0_EL0(15),
2252 AMU_AMEVCNTR1_EL0(0),
2253 AMU_AMEVCNTR1_EL0(1),
2254 AMU_AMEVCNTR1_EL0(2),
2255 AMU_AMEVCNTR1_EL0(3),
2256 AMU_AMEVCNTR1_EL0(4),
2257 AMU_AMEVCNTR1_EL0(5),
2258 AMU_AMEVCNTR1_EL0(6),
2259 AMU_AMEVCNTR1_EL0(7),
2260 AMU_AMEVCNTR1_EL0(8),
2261 AMU_AMEVCNTR1_EL0(9),
2262 AMU_AMEVCNTR1_EL0(10),
2263 AMU_AMEVCNTR1_EL0(11),
2264 AMU_AMEVCNTR1_EL0(12),
2265 AMU_AMEVCNTR1_EL0(13),
2266 AMU_AMEVCNTR1_EL0(14),
2267 AMU_AMEVCNTR1_EL0(15),
2268 AMU_AMEVTYPER1_EL0(0),
2269 AMU_AMEVTYPER1_EL0(1),
2270 AMU_AMEVTYPER1_EL0(2),
2271 AMU_AMEVTYPER1_EL0(3),
2272 AMU_AMEVTYPER1_EL0(4),
2273 AMU_AMEVTYPER1_EL0(5),
2274 AMU_AMEVTYPER1_EL0(6),
2275 AMU_AMEVTYPER1_EL0(7),
2276 AMU_AMEVTYPER1_EL0(8),
2277 AMU_AMEVTYPER1_EL0(9),
2278 AMU_AMEVTYPER1_EL0(10),
2279 AMU_AMEVTYPER1_EL0(11),
2280 AMU_AMEVTYPER1_EL0(12),
2281 AMU_AMEVTYPER1_EL0(13),
2282 AMU_AMEVTYPER1_EL0(14),
2283 AMU_AMEVTYPER1_EL0(15),
2285 { SYS_DESC(SYS_CNTPCT_EL0
), access_arch_timer
},
2286 { SYS_DESC(SYS_CNTPCTSS_EL0
), access_arch_timer
},
2287 { SYS_DESC(SYS_CNTP_TVAL_EL0
), access_arch_timer
},
2288 { SYS_DESC(SYS_CNTP_CTL_EL0
), access_arch_timer
},
2289 { SYS_DESC(SYS_CNTP_CVAL_EL0
), access_arch_timer
},
2292 PMU_PMEVCNTR_EL0(0),
2293 PMU_PMEVCNTR_EL0(1),
2294 PMU_PMEVCNTR_EL0(2),
2295 PMU_PMEVCNTR_EL0(3),
2296 PMU_PMEVCNTR_EL0(4),
2297 PMU_PMEVCNTR_EL0(5),
2298 PMU_PMEVCNTR_EL0(6),
2299 PMU_PMEVCNTR_EL0(7),
2300 PMU_PMEVCNTR_EL0(8),
2301 PMU_PMEVCNTR_EL0(9),
2302 PMU_PMEVCNTR_EL0(10),
2303 PMU_PMEVCNTR_EL0(11),
2304 PMU_PMEVCNTR_EL0(12),
2305 PMU_PMEVCNTR_EL0(13),
2306 PMU_PMEVCNTR_EL0(14),
2307 PMU_PMEVCNTR_EL0(15),
2308 PMU_PMEVCNTR_EL0(16),
2309 PMU_PMEVCNTR_EL0(17),
2310 PMU_PMEVCNTR_EL0(18),
2311 PMU_PMEVCNTR_EL0(19),
2312 PMU_PMEVCNTR_EL0(20),
2313 PMU_PMEVCNTR_EL0(21),
2314 PMU_PMEVCNTR_EL0(22),
2315 PMU_PMEVCNTR_EL0(23),
2316 PMU_PMEVCNTR_EL0(24),
2317 PMU_PMEVCNTR_EL0(25),
2318 PMU_PMEVCNTR_EL0(26),
2319 PMU_PMEVCNTR_EL0(27),
2320 PMU_PMEVCNTR_EL0(28),
2321 PMU_PMEVCNTR_EL0(29),
2322 PMU_PMEVCNTR_EL0(30),
2323 /* PMEVTYPERn_EL0 */
2324 PMU_PMEVTYPER_EL0(0),
2325 PMU_PMEVTYPER_EL0(1),
2326 PMU_PMEVTYPER_EL0(2),
2327 PMU_PMEVTYPER_EL0(3),
2328 PMU_PMEVTYPER_EL0(4),
2329 PMU_PMEVTYPER_EL0(5),
2330 PMU_PMEVTYPER_EL0(6),
2331 PMU_PMEVTYPER_EL0(7),
2332 PMU_PMEVTYPER_EL0(8),
2333 PMU_PMEVTYPER_EL0(9),
2334 PMU_PMEVTYPER_EL0(10),
2335 PMU_PMEVTYPER_EL0(11),
2336 PMU_PMEVTYPER_EL0(12),
2337 PMU_PMEVTYPER_EL0(13),
2338 PMU_PMEVTYPER_EL0(14),
2339 PMU_PMEVTYPER_EL0(15),
2340 PMU_PMEVTYPER_EL0(16),
2341 PMU_PMEVTYPER_EL0(17),
2342 PMU_PMEVTYPER_EL0(18),
2343 PMU_PMEVTYPER_EL0(19),
2344 PMU_PMEVTYPER_EL0(20),
2345 PMU_PMEVTYPER_EL0(21),
2346 PMU_PMEVTYPER_EL0(22),
2347 PMU_PMEVTYPER_EL0(23),
2348 PMU_PMEVTYPER_EL0(24),
2349 PMU_PMEVTYPER_EL0(25),
2350 PMU_PMEVTYPER_EL0(26),
2351 PMU_PMEVTYPER_EL0(27),
2352 PMU_PMEVTYPER_EL0(28),
2353 PMU_PMEVTYPER_EL0(29),
2354 PMU_PMEVTYPER_EL0(30),
2356 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
2357 * in 32bit mode. Here we choose to reset it as zero for consistency.
2359 { PMU_SYS_REG(PMCCFILTR_EL0
), .access
= access_pmu_evtyper
,
2360 .reset
= reset_val
, .reg
= PMCCFILTR_EL0
, .val
= 0 },
2362 EL2_REG(VPIDR_EL2
, access_rw
, reset_unknown
, 0),
2363 EL2_REG(VMPIDR_EL2
, access_rw
, reset_unknown
, 0),
2364 EL2_REG(SCTLR_EL2
, access_rw
, reset_val
, SCTLR_EL2_RES1
),
2365 EL2_REG(ACTLR_EL2
, access_rw
, reset_val
, 0),
2366 EL2_REG(HCR_EL2
, access_rw
, reset_val
, 0),
2367 EL2_REG(MDCR_EL2
, access_rw
, reset_val
, 0),
2368 EL2_REG(CPTR_EL2
, access_rw
, reset_val
, CPTR_NVHE_EL2_RES1
),
2369 EL2_REG(HSTR_EL2
, access_rw
, reset_val
, 0),
2370 EL2_REG(HFGRTR_EL2
, access_rw
, reset_val
, 0),
2371 EL2_REG(HFGWTR_EL2
, access_rw
, reset_val
, 0),
2372 EL2_REG(HFGITR_EL2
, access_rw
, reset_val
, 0),
2373 EL2_REG(HACR_EL2
, access_rw
, reset_val
, 0),
2375 EL2_REG(HCRX_EL2
, access_rw
, reset_val
, 0),
2377 EL2_REG(TTBR0_EL2
, access_rw
, reset_val
, 0),
2378 EL2_REG(TTBR1_EL2
, access_rw
, reset_val
, 0),
2379 EL2_REG(TCR_EL2
, access_rw
, reset_val
, TCR_EL2_RES1
),
2380 EL2_REG(VTTBR_EL2
, access_rw
, reset_val
, 0),
2381 EL2_REG(VTCR_EL2
, access_rw
, reset_val
, 0),
2383 { SYS_DESC(SYS_DACR32_EL2
), NULL
, reset_unknown
, DACR32_EL2
},
2384 EL2_REG(HDFGRTR_EL2
, access_rw
, reset_val
, 0),
2385 EL2_REG(HDFGWTR_EL2
, access_rw
, reset_val
, 0),
2386 EL2_REG(SPSR_EL2
, access_rw
, reset_val
, 0),
2387 EL2_REG(ELR_EL2
, access_rw
, reset_val
, 0),
2388 { SYS_DESC(SYS_SP_EL1
), access_sp_el1
},
2390 { SYS_DESC(SYS_IFSR32_EL2
), NULL
, reset_unknown
, IFSR32_EL2
},
2391 EL2_REG(AFSR0_EL2
, access_rw
, reset_val
, 0),
2392 EL2_REG(AFSR1_EL2
, access_rw
, reset_val
, 0),
2393 EL2_REG(ESR_EL2
, access_rw
, reset_val
, 0),
2394 { SYS_DESC(SYS_FPEXC32_EL2
), NULL
, reset_val
, FPEXC32_EL2
, 0x700 },
2396 EL2_REG(FAR_EL2
, access_rw
, reset_val
, 0),
2397 EL2_REG(HPFAR_EL2
, access_rw
, reset_val
, 0),
2399 EL2_REG(MAIR_EL2
, access_rw
, reset_val
, 0),
2400 EL2_REG(AMAIR_EL2
, access_rw
, reset_val
, 0),
2402 EL2_REG(VBAR_EL2
, access_rw
, reset_val
, 0),
2403 EL2_REG(RVBAR_EL2
, access_rw
, reset_val
, 0),
2404 { SYS_DESC(SYS_RMR_EL2
), trap_undef
},
2406 EL2_REG(CONTEXTIDR_EL2
, access_rw
, reset_val
, 0),
2407 EL2_REG(TPIDR_EL2
, access_rw
, reset_val
, 0),
2409 EL2_REG(CNTVOFF_EL2
, access_rw
, reset_val
, 0),
2410 EL2_REG(CNTHCTL_EL2
, access_rw
, reset_val
, 0),
2412 EL12_REG(SCTLR
, access_vm_reg
, reset_val
, 0x00C50078),
2413 EL12_REG(CPACR
, access_rw
, reset_val
, 0),
2414 EL12_REG(TTBR0
, access_vm_reg
, reset_unknown
, 0),
2415 EL12_REG(TTBR1
, access_vm_reg
, reset_unknown
, 0),
2416 EL12_REG(TCR
, access_vm_reg
, reset_val
, 0),
2417 { SYS_DESC(SYS_SPSR_EL12
), access_spsr
},
2418 { SYS_DESC(SYS_ELR_EL12
), access_elr
},
2419 EL12_REG(AFSR0
, access_vm_reg
, reset_unknown
, 0),
2420 EL12_REG(AFSR1
, access_vm_reg
, reset_unknown
, 0),
2421 EL12_REG(ESR
, access_vm_reg
, reset_unknown
, 0),
2422 EL12_REG(FAR
, access_vm_reg
, reset_unknown
, 0),
2423 EL12_REG(MAIR
, access_vm_reg
, reset_unknown
, 0),
2424 EL12_REG(AMAIR
, access_vm_reg
, reset_amair_el1
, 0),
2425 EL12_REG(VBAR
, access_rw
, reset_val
, 0),
2426 EL12_REG(CONTEXTIDR
, access_vm_reg
, reset_val
, 0),
2427 EL12_REG(CNTKCTL
, access_rw
, reset_val
, 0),
2429 EL2_REG(SP_EL2
, NULL
, reset_unknown
, 0),
2432 static const struct sys_reg_desc
*first_idreg
;
2434 static bool trap_dbgdidr(struct kvm_vcpu
*vcpu
,
2435 struct sys_reg_params
*p
,
2436 const struct sys_reg_desc
*r
)
2439 return ignore_write(vcpu
, p
);
2441 u64 dfr
= read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1
);
2442 u64 pfr
= read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1
);
2443 u32 el3
= !!cpuid_feature_extract_unsigned_field(pfr
, ID_AA64PFR0_EL1_EL3_SHIFT
);
2445 p
->regval
= ((((dfr
>> ID_AA64DFR0_EL1_WRPs_SHIFT
) & 0xf) << 28) |
2446 (((dfr
>> ID_AA64DFR0_EL1_BRPs_SHIFT
) & 0xf) << 24) |
2447 (((dfr
>> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT
) & 0xf) << 20)
2448 | (6 << 16) | (1 << 15) | (el3
<< 14) | (el3
<< 12));
2454 * AArch32 debug register mappings
2456 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
2457 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
2459 * None of the other registers share their location, so treat them as
2460 * if they were 64bit.
2462 #define DBG_BCR_BVR_WCR_WVR(n) \
2464 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
2466 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
2468 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
2470 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
2472 #define DBGBXVR(n) \
2473 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
2476 * Trapped cp14 registers. We generally ignore most of the external
2477 * debug, on the principle that they don't really make sense to a
2478 * guest. Revisit this one day, would this principle change.
2480 static const struct sys_reg_desc cp14_regs
[] = {
2482 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr
},
2484 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi
},
2486 DBG_BCR_BVR_WCR_WVR(0),
2488 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi
},
2489 DBG_BCR_BVR_WCR_WVR(1),
2491 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs
, NULL
, MDCCINT_EL1
},
2493 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs
, NULL
, MDSCR_EL1
},
2494 DBG_BCR_BVR_WCR_WVR(2),
2495 /* DBGDTR[RT]Xint */
2496 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi
},
2497 /* DBGDTR[RT]Xext */
2498 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi
},
2499 DBG_BCR_BVR_WCR_WVR(3),
2500 DBG_BCR_BVR_WCR_WVR(4),
2501 DBG_BCR_BVR_WCR_WVR(5),
2503 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi
},
2505 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi
},
2506 DBG_BCR_BVR_WCR_WVR(6),
2508 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs
, NULL
, DBGVCR32_EL2
},
2509 DBG_BCR_BVR_WCR_WVR(7),
2510 DBG_BCR_BVR_WCR_WVR(8),
2511 DBG_BCR_BVR_WCR_WVR(9),
2512 DBG_BCR_BVR_WCR_WVR(10),
2513 DBG_BCR_BVR_WCR_WVR(11),
2514 DBG_BCR_BVR_WCR_WVR(12),
2515 DBG_BCR_BVR_WCR_WVR(13),
2516 DBG_BCR_BVR_WCR_WVR(14),
2517 DBG_BCR_BVR_WCR_WVR(15),
2519 /* DBGDRAR (32bit) */
2520 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi
},
2524 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1
},
2527 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1
, NULL
, OSLSR_EL1
},
2531 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi
},
2534 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi
},
2547 /* DBGDSAR (32bit) */
2548 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi
},
2551 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi
},
2553 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi
},
2555 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi
},
2557 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi
},
2559 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi
},
2561 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1
},
2564 /* Trapped cp14 64bit registers */
2565 static const struct sys_reg_desc cp14_64_regs
[] = {
2566 /* DBGDRAR (64bit) */
2567 { Op1( 0), CRm( 1), .access
= trap_raz_wi
},
2569 /* DBGDSAR (64bit) */
2570 { Op1( 0), CRm( 2), .access
= trap_raz_wi
},
2573 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
2575 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
2576 .visibility = pmu_visibility
2578 /* Macro to expand the PMEVCNTRn register */
2579 #define PMU_PMEVCNTR(n) \
2580 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2581 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2582 .access = access_pmu_evcntr }
2584 /* Macro to expand the PMEVTYPERn register */
2585 #define PMU_PMEVTYPER(n) \
2586 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2587 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2588 .access = access_pmu_evtyper }
2590 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
2591 * depending on the way they are accessed (as a 32bit or a 64bit
2594 static const struct sys_reg_desc cp15_regs
[] = {
2595 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr
},
2596 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg
, NULL
, SCTLR_EL1
},
2598 { AA32(LO
), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr
, NULL
, ACTLR_EL1
},
2600 { AA32(HI
), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr
, NULL
, ACTLR_EL1
},
2601 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg
, NULL
, TTBR0_EL1
},
2602 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg
, NULL
, TTBR1_EL1
},
2604 { AA32(LO
), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg
, NULL
, TCR_EL1
},
2606 { AA32(HI
), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg
, NULL
, TCR_EL1
},
2607 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg
, NULL
, DACR32_EL2
},
2609 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg
, NULL
, ESR_EL1
},
2610 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg
, NULL
, IFSR32_EL2
},
2612 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg
, NULL
, AFSR0_EL1
},
2614 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg
, NULL
, AFSR1_EL1
},
2616 { AA32(LO
), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg
, NULL
, FAR_EL1
},
2618 { AA32(HI
), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg
, NULL
, FAR_EL1
},
2621 * DC{C,I,CI}SW operations:
2623 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw
},
2624 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw
},
2625 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw
},
2628 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 12, 0), .access
= access_pmcr
},
2629 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 12, 1), .access
= access_pmcnten
},
2630 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 12, 2), .access
= access_pmcnten
},
2631 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 12, 3), .access
= access_pmovs
},
2632 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 12, 4), .access
= access_pmswinc
},
2633 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 12, 5), .access
= access_pmselr
},
2634 { CP15_PMU_SYS_REG(LO
, 0, 9, 12, 6), .access
= access_pmceid
},
2635 { CP15_PMU_SYS_REG(LO
, 0, 9, 12, 7), .access
= access_pmceid
},
2636 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 13, 0), .access
= access_pmu_evcntr
},
2637 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 13, 1), .access
= access_pmu_evtyper
},
2638 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 13, 2), .access
= access_pmu_evcntr
},
2639 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 14, 0), .access
= access_pmuserenr
},
2640 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 14, 1), .access
= access_pminten
},
2641 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 14, 2), .access
= access_pminten
},
2642 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 14, 3), .access
= access_pmovs
},
2643 { CP15_PMU_SYS_REG(HI
, 0, 9, 14, 4), .access
= access_pmceid
},
2644 { CP15_PMU_SYS_REG(HI
, 0, 9, 14, 5), .access
= access_pmceid
},
2646 { CP15_PMU_SYS_REG(DIRECT
, 0, 9, 14, 6), .access
= trap_raz_wi
},
2649 { AA32(LO
), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg
, NULL
, MAIR_EL1
},
2651 { AA32(HI
), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg
, NULL
, MAIR_EL1
},
2653 { AA32(LO
), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg
, NULL
, AMAIR_EL1
},
2655 { AA32(HI
), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg
, NULL
, AMAIR_EL1
},
2658 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre
},
2660 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg
, NULL
, CONTEXTIDR_EL1
},
2663 { SYS_DESC(SYS_AARCH32_CNTP_TVAL
), access_arch_timer
},
2664 { SYS_DESC(SYS_AARCH32_CNTP_CTL
), access_arch_timer
},
2731 { CP15_PMU_SYS_REG(DIRECT
, 0, 14, 15, 7), .access
= access_pmu_evtyper
},
2733 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr
},
2734 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr
},
2737 { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access
},
2739 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr
, NULL
, CSSELR_EL1
},
2742 static const struct sys_reg_desc cp15_64_regs
[] = {
2743 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, TTBR0_EL1
},
2744 { CP15_PMU_SYS_REG(DIRECT
, 0, 0, 9, 0), .access
= access_pmu_evcntr
},
2745 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
}, /* ICC_SGI1R */
2746 { SYS_DESC(SYS_AARCH32_CNTPCT
), access_arch_timer
},
2747 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, TTBR1_EL1
},
2748 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
}, /* ICC_ASGI1R */
2749 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
}, /* ICC_SGI0R */
2750 { SYS_DESC(SYS_AARCH32_CNTP_CVAL
), access_arch_timer
},
2751 { SYS_DESC(SYS_AARCH32_CNTPCTSS
), access_arch_timer
},
2754 static bool check_sysreg_table(const struct sys_reg_desc
*table
, unsigned int n
,
2759 for (i
= 0; i
< n
; i
++) {
2760 if (!is_32
&& table
[i
].reg
&& !table
[i
].reset
) {
2761 kvm_err("sys_reg table %pS entry %d lacks reset\n", &table
[i
], i
);
2765 if (i
&& cmp_sys_reg(&table
[i
-1], &table
[i
]) >= 0) {
2766 kvm_err("sys_reg table %pS entry %d out of order\n", &table
[i
- 1], i
- 1);
2774 int kvm_handle_cp14_load_store(struct kvm_vcpu
*vcpu
)
2776 kvm_inject_undefined(vcpu
);
2780 static void perform_access(struct kvm_vcpu
*vcpu
,
2781 struct sys_reg_params
*params
,
2782 const struct sys_reg_desc
*r
)
2784 trace_kvm_sys_access(*vcpu_pc(vcpu
), params
, r
);
2786 /* Check for regs disabled by runtime config */
2787 if (sysreg_hidden(vcpu
, r
)) {
2788 kvm_inject_undefined(vcpu
);
2793 * Not having an accessor means that we have configured a trap
2794 * that we don't know how to handle. This certainly qualifies
2795 * as a gross bug that should be fixed right away.
2799 /* Skip instruction if instructed so */
2800 if (likely(r
->access(vcpu
, params
, r
)))
2805 * emulate_cp -- tries to match a sys_reg access in a handling table, and
2806 * call the corresponding trap handler.
2808 * @params: pointer to the descriptor of the access
2809 * @table: array of trap descriptors
2810 * @num: size of the trap descriptor array
2812 * Return true if the access has been handled, false if not.
2814 static bool emulate_cp(struct kvm_vcpu
*vcpu
,
2815 struct sys_reg_params
*params
,
2816 const struct sys_reg_desc
*table
,
2819 const struct sys_reg_desc
*r
;
2822 return false; /* Not handled */
2824 r
= find_reg(params
, table
, num
);
2827 perform_access(vcpu
, params
, r
);
2835 static void unhandled_cp_access(struct kvm_vcpu
*vcpu
,
2836 struct sys_reg_params
*params
)
2838 u8 esr_ec
= kvm_vcpu_trap_get_class(vcpu
);
2842 case ESR_ELx_EC_CP15_32
:
2843 case ESR_ELx_EC_CP15_64
:
2846 case ESR_ELx_EC_CP14_MR
:
2847 case ESR_ELx_EC_CP14_64
:
2854 print_sys_reg_msg(params
,
2855 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2856 cp
, *vcpu_pc(vcpu
), *vcpu_cpsr(vcpu
));
2857 kvm_inject_undefined(vcpu
);
2861 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2862 * @vcpu: The VCPU pointer
2863 * @run: The kvm_run struct
2865 static int kvm_handle_cp_64(struct kvm_vcpu
*vcpu
,
2866 const struct sys_reg_desc
*global
,
2869 struct sys_reg_params params
;
2870 u64 esr
= kvm_vcpu_get_esr(vcpu
);
2871 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
2872 int Rt2
= (esr
>> 10) & 0x1f;
2874 params
.CRm
= (esr
>> 1) & 0xf;
2875 params
.is_write
= ((esr
& 1) == 0);
2878 params
.Op1
= (esr
>> 16) & 0xf;
2883 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2884 * backends between AArch32 and AArch64, we get away with it.
2886 if (params
.is_write
) {
2887 params
.regval
= vcpu_get_reg(vcpu
, Rt
) & 0xffffffff;
2888 params
.regval
|= vcpu_get_reg(vcpu
, Rt2
) << 32;
2892 * If the table contains a handler, handle the
2893 * potential register operation in the case of a read and return
2896 if (emulate_cp(vcpu
, ¶ms
, global
, nr_global
)) {
2897 /* Split up the value between registers for the read side */
2898 if (!params
.is_write
) {
2899 vcpu_set_reg(vcpu
, Rt
, lower_32_bits(params
.regval
));
2900 vcpu_set_reg(vcpu
, Rt2
, upper_32_bits(params
.regval
));
2906 unhandled_cp_access(vcpu
, ¶ms
);
2910 static bool emulate_sys_reg(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*params
);
2913 * The CP10 ID registers are architecturally mapped to AArch64 feature
2914 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
2917 static bool kvm_esr_cp10_id_to_sys64(u64 esr
, struct sys_reg_params
*params
)
2919 u8 reg_id
= (esr
>> 10) & 0xf;
2922 params
->is_write
= ((esr
& 1) == 0);
2928 /* CP10 ID registers are read-only */
2929 valid
= !params
->is_write
;
2951 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
2952 params
->is_write
? "write" : "read", reg_id
);
2957 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
2958 * VFP Register' from AArch32.
2959 * @vcpu: The vCPU pointer
2961 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
2962 * Work out the correct AArch64 system register encoding and reroute to the
2963 * AArch64 system register emulation.
2965 int kvm_handle_cp10_id(struct kvm_vcpu
*vcpu
)
2967 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
2968 u64 esr
= kvm_vcpu_get_esr(vcpu
);
2969 struct sys_reg_params params
;
2971 /* UNDEF on any unhandled register access */
2972 if (!kvm_esr_cp10_id_to_sys64(esr
, ¶ms
)) {
2973 kvm_inject_undefined(vcpu
);
2977 if (emulate_sys_reg(vcpu
, ¶ms
))
2978 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
2984 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
2985 * CRn=0, which corresponds to the AArch32 feature
2987 * @vcpu: the vCPU pointer
2988 * @params: the system register access parameters.
2990 * Our cp15 system register tables do not enumerate the AArch32 feature
2991 * registers. Conveniently, our AArch64 table does, and the AArch32 system
2992 * register encoding can be trivially remapped into the AArch64 for the feature
2993 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
2995 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
2996 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
2997 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
2998 * treat undefined registers in this range as RAZ.
3000 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu
*vcpu
,
3001 struct sys_reg_params
*params
)
3003 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
3005 /* Treat impossible writes to RO registers as UNDEFINED */
3006 if (params
->is_write
) {
3007 unhandled_cp_access(vcpu
, params
);
3014 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
3015 * Avoid conflicting with future expansion of AArch64 feature registers
3016 * and simply treat them as RAZ here.
3018 if (params
->CRm
> 3)
3020 else if (!emulate_sys_reg(vcpu
, params
))
3023 vcpu_set_reg(vcpu
, Rt
, params
->regval
);
3028 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
3029 * @vcpu: The VCPU pointer
3030 * @run: The kvm_run struct
3032 static int kvm_handle_cp_32(struct kvm_vcpu
*vcpu
,
3033 struct sys_reg_params
*params
,
3034 const struct sys_reg_desc
*global
,
3037 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
3039 params
->regval
= vcpu_get_reg(vcpu
, Rt
);
3041 if (emulate_cp(vcpu
, params
, global
, nr_global
)) {
3042 if (!params
->is_write
)
3043 vcpu_set_reg(vcpu
, Rt
, params
->regval
);
3047 unhandled_cp_access(vcpu
, params
);
3051 int kvm_handle_cp15_64(struct kvm_vcpu
*vcpu
)
3053 return kvm_handle_cp_64(vcpu
, cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
));
3056 int kvm_handle_cp15_32(struct kvm_vcpu
*vcpu
)
3058 struct sys_reg_params params
;
3060 params
= esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu
));
3063 * Certain AArch32 ID registers are handled by rerouting to the AArch64
3064 * system register table. Registers in the ID range where CRm=0 are
3065 * excluded from this scheme as they do not trivially map into AArch64
3066 * system register encodings.
3068 if (params
.Op1
== 0 && params
.CRn
== 0 && params
.CRm
)
3069 return kvm_emulate_cp15_id_reg(vcpu
, ¶ms
);
3071 return kvm_handle_cp_32(vcpu
, ¶ms
, cp15_regs
, ARRAY_SIZE(cp15_regs
));
3074 int kvm_handle_cp14_64(struct kvm_vcpu
*vcpu
)
3076 return kvm_handle_cp_64(vcpu
, cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
));
3079 int kvm_handle_cp14_32(struct kvm_vcpu
*vcpu
)
3081 struct sys_reg_params params
;
3083 params
= esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu
));
3085 return kvm_handle_cp_32(vcpu
, ¶ms
, cp14_regs
, ARRAY_SIZE(cp14_regs
));
3088 static bool is_imp_def_sys_reg(struct sys_reg_params
*params
)
3090 // See ARM DDI 0487E.a, section D12.3.2
3091 return params
->Op0
== 3 && (params
->CRn
& 0b1011) == 0b1011;
3095 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
3096 * @vcpu: The VCPU pointer
3097 * @params: Decoded system register parameters
3099 * Return: true if the system register access was successful, false otherwise.
3101 static bool emulate_sys_reg(struct kvm_vcpu
*vcpu
,
3102 struct sys_reg_params
*params
)
3104 const struct sys_reg_desc
*r
;
3106 r
= find_reg(params
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
3109 perform_access(vcpu
, params
, r
);
3113 if (is_imp_def_sys_reg(params
)) {
3114 kvm_inject_undefined(vcpu
);
3116 print_sys_reg_msg(params
,
3117 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
3118 *vcpu_pc(vcpu
), *vcpu_cpsr(vcpu
));
3119 kvm_inject_undefined(vcpu
);
3124 static void kvm_reset_id_regs(struct kvm_vcpu
*vcpu
)
3126 const struct sys_reg_desc
*idreg
= first_idreg
;
3127 u32 id
= reg_to_encoding(idreg
);
3128 struct kvm
*kvm
= vcpu
->kvm
;
3130 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED
, &kvm
->arch
.flags
))
3133 lockdep_assert_held(&kvm
->arch
.config_lock
);
3135 /* Initialize all idregs */
3136 while (is_id_reg(id
)) {
3137 IDREG(kvm
, id
) = idreg
->reset(vcpu
, idreg
);
3140 id
= reg_to_encoding(idreg
);
3143 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED
, &kvm
->arch
.flags
);
3147 * kvm_reset_sys_regs - sets system registers to reset value
3148 * @vcpu: The VCPU pointer
3150 * This function finds the right table above and sets the registers on the
3151 * virtual CPU struct to their architecturally defined reset values.
3153 void kvm_reset_sys_regs(struct kvm_vcpu
*vcpu
)
3157 kvm_reset_id_regs(vcpu
);
3159 for (i
= 0; i
< ARRAY_SIZE(sys_reg_descs
); i
++) {
3160 const struct sys_reg_desc
*r
= &sys_reg_descs
[i
];
3162 if (is_id_reg(reg_to_encoding(r
)))
3171 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
3172 * @vcpu: The VCPU pointer
3174 int kvm_handle_sys_reg(struct kvm_vcpu
*vcpu
)
3176 struct sys_reg_params params
;
3177 unsigned long esr
= kvm_vcpu_get_esr(vcpu
);
3178 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
3180 trace_kvm_handle_sys_reg(esr
);
3182 if (__check_nv_sr_forward(vcpu
))
3185 params
= esr_sys64_to_params(esr
);
3186 params
.regval
= vcpu_get_reg(vcpu
, Rt
);
3188 if (!emulate_sys_reg(vcpu
, ¶ms
))
3191 if (!params
.is_write
)
3192 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
3196 /******************************************************************************
3198 *****************************************************************************/
3200 static bool index_to_params(u64 id
, struct sys_reg_params
*params
)
3202 switch (id
& KVM_REG_SIZE_MASK
) {
3203 case KVM_REG_SIZE_U64
:
3204 /* Any unused index bits means it's not valid. */
3205 if (id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
3206 | KVM_REG_ARM_COPROC_MASK
3207 | KVM_REG_ARM64_SYSREG_OP0_MASK
3208 | KVM_REG_ARM64_SYSREG_OP1_MASK
3209 | KVM_REG_ARM64_SYSREG_CRN_MASK
3210 | KVM_REG_ARM64_SYSREG_CRM_MASK
3211 | KVM_REG_ARM64_SYSREG_OP2_MASK
))
3213 params
->Op0
= ((id
& KVM_REG_ARM64_SYSREG_OP0_MASK
)
3214 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT
);
3215 params
->Op1
= ((id
& KVM_REG_ARM64_SYSREG_OP1_MASK
)
3216 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT
);
3217 params
->CRn
= ((id
& KVM_REG_ARM64_SYSREG_CRN_MASK
)
3218 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT
);
3219 params
->CRm
= ((id
& KVM_REG_ARM64_SYSREG_CRM_MASK
)
3220 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT
);
3221 params
->Op2
= ((id
& KVM_REG_ARM64_SYSREG_OP2_MASK
)
3222 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT
);
3229 const struct sys_reg_desc
*get_reg_by_id(u64 id
,
3230 const struct sys_reg_desc table
[],
3233 struct sys_reg_params params
;
3235 if (!index_to_params(id
, ¶ms
))
3238 return find_reg(¶ms
, table
, num
);
3241 /* Decode an index value, and find the sys_reg_desc entry. */
3242 static const struct sys_reg_desc
*
3243 id_to_sys_reg_desc(struct kvm_vcpu
*vcpu
, u64 id
,
3244 const struct sys_reg_desc table
[], unsigned int num
)
3247 const struct sys_reg_desc
*r
;
3249 /* We only do sys_reg for now. */
3250 if ((id
& KVM_REG_ARM_COPROC_MASK
) != KVM_REG_ARM64_SYSREG
)
3253 r
= get_reg_by_id(id
, table
, num
);
3255 /* Not saved in the sys_reg array and not otherwise accessible? */
3256 if (r
&& (!(r
->reg
|| r
->get_user
) || sysreg_hidden(vcpu
, r
)))
3263 * These are the invariant sys_reg registers: we let the guest see the
3264 * host versions of these, so they're part of the guest state.
3266 * A future CPU may provide a mechanism to present different values to
3267 * the guest, or a future kvm may trap them.
3270 #define FUNCTION_INVARIANT(reg) \
3271 static u64 get_##reg(struct kvm_vcpu *v, \
3272 const struct sys_reg_desc *r) \
3274 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
3275 return ((struct sys_reg_desc *)r)->val; \
3278 FUNCTION_INVARIANT(midr_el1
)
3279 FUNCTION_INVARIANT(revidr_el1
)
3280 FUNCTION_INVARIANT(aidr_el1
)
3282 static u64
get_ctr_el0(struct kvm_vcpu
*v
, const struct sys_reg_desc
*r
)
3284 ((struct sys_reg_desc
*)r
)->val
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
3285 return ((struct sys_reg_desc
*)r
)->val
;
3288 /* ->val is filled in by kvm_sys_reg_table_init() */
3289 static struct sys_reg_desc invariant_sys_regs
[] __ro_after_init
= {
3290 { SYS_DESC(SYS_MIDR_EL1
), NULL
, get_midr_el1
},
3291 { SYS_DESC(SYS_REVIDR_EL1
), NULL
, get_revidr_el1
},
3292 { SYS_DESC(SYS_AIDR_EL1
), NULL
, get_aidr_el1
},
3293 { SYS_DESC(SYS_CTR_EL0
), NULL
, get_ctr_el0
},
3296 static int get_invariant_sys_reg(u64 id
, u64 __user
*uaddr
)
3298 const struct sys_reg_desc
*r
;
3300 r
= get_reg_by_id(id
, invariant_sys_regs
,
3301 ARRAY_SIZE(invariant_sys_regs
));
3305 return put_user(r
->val
, uaddr
);
3308 static int set_invariant_sys_reg(u64 id
, u64 __user
*uaddr
)
3310 const struct sys_reg_desc
*r
;
3313 r
= get_reg_by_id(id
, invariant_sys_regs
,
3314 ARRAY_SIZE(invariant_sys_regs
));
3318 if (get_user(val
, uaddr
))
3321 /* This is what we mean by invariant: you can't change it. */
3328 static int demux_c15_get(struct kvm_vcpu
*vcpu
, u64 id
, void __user
*uaddr
)
3331 u32 __user
*uval
= uaddr
;
3333 /* Fail if we have unknown bits set. */
3334 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
3335 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
3338 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
3339 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
3340 if (KVM_REG_SIZE(id
) != 4)
3342 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
3343 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
3344 if (val
>= CSSELR_MAX
)
3347 return put_user(get_ccsidr(vcpu
, val
), uval
);
3353 static int demux_c15_set(struct kvm_vcpu
*vcpu
, u64 id
, void __user
*uaddr
)
3356 u32 __user
*uval
= uaddr
;
3358 /* Fail if we have unknown bits set. */
3359 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
3360 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
3363 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
3364 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
3365 if (KVM_REG_SIZE(id
) != 4)
3367 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
3368 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
3369 if (val
>= CSSELR_MAX
)
3372 if (get_user(newval
, uval
))
3375 return set_ccsidr(vcpu
, val
, newval
);
3381 int kvm_sys_reg_get_user(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
,
3382 const struct sys_reg_desc table
[], unsigned int num
)
3384 u64 __user
*uaddr
= (u64 __user
*)(unsigned long)reg
->addr
;
3385 const struct sys_reg_desc
*r
;
3389 r
= id_to_sys_reg_desc(vcpu
, reg
->id
, table
, num
);
3390 if (!r
|| sysreg_hidden_user(vcpu
, r
))
3394 ret
= (r
->get_user
)(vcpu
, r
, &val
);
3396 val
= __vcpu_sys_reg(vcpu
, r
->reg
);
3401 ret
= put_user(val
, uaddr
);
3406 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
3408 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
3411 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
3412 return demux_c15_get(vcpu
, reg
->id
, uaddr
);
3414 err
= get_invariant_sys_reg(reg
->id
, uaddr
);
3418 return kvm_sys_reg_get_user(vcpu
, reg
,
3419 sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
3422 int kvm_sys_reg_set_user(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
,
3423 const struct sys_reg_desc table
[], unsigned int num
)
3425 u64 __user
*uaddr
= (u64 __user
*)(unsigned long)reg
->addr
;
3426 const struct sys_reg_desc
*r
;
3430 if (get_user(val
, uaddr
))
3433 r
= id_to_sys_reg_desc(vcpu
, reg
->id
, table
, num
);
3434 if (!r
|| sysreg_hidden_user(vcpu
, r
))
3437 if (sysreg_user_write_ignore(vcpu
, r
))
3441 ret
= (r
->set_user
)(vcpu
, r
, val
);
3443 __vcpu_sys_reg(vcpu
, r
->reg
) = val
;
3450 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
3452 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
3455 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
3456 return demux_c15_set(vcpu
, reg
->id
, uaddr
);
3458 err
= set_invariant_sys_reg(reg
->id
, uaddr
);
3462 return kvm_sys_reg_set_user(vcpu
, reg
,
3463 sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
3466 static unsigned int num_demux_regs(void)
3471 static int write_demux_regids(u64 __user
*uindices
)
3473 u64 val
= KVM_REG_ARM64
| KVM_REG_SIZE_U32
| KVM_REG_ARM_DEMUX
;
3476 val
|= KVM_REG_ARM_DEMUX_ID_CCSIDR
;
3477 for (i
= 0; i
< CSSELR_MAX
; i
++) {
3478 if (put_user(val
| i
, uindices
))
3485 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
)
3487 return (KVM_REG_ARM64
| KVM_REG_SIZE_U64
|
3488 KVM_REG_ARM64_SYSREG
|
3489 (reg
->Op0
<< KVM_REG_ARM64_SYSREG_OP0_SHIFT
) |
3490 (reg
->Op1
<< KVM_REG_ARM64_SYSREG_OP1_SHIFT
) |
3491 (reg
->CRn
<< KVM_REG_ARM64_SYSREG_CRN_SHIFT
) |
3492 (reg
->CRm
<< KVM_REG_ARM64_SYSREG_CRM_SHIFT
) |
3493 (reg
->Op2
<< KVM_REG_ARM64_SYSREG_OP2_SHIFT
));
3496 static bool copy_reg_to_user(const struct sys_reg_desc
*reg
, u64 __user
**uind
)
3501 if (put_user(sys_reg_to_index(reg
), *uind
))
3508 static int walk_one_sys_reg(const struct kvm_vcpu
*vcpu
,
3509 const struct sys_reg_desc
*rd
,
3511 unsigned int *total
)
3514 * Ignore registers we trap but don't save,
3515 * and for which no custom user accessor is provided.
3517 if (!(rd
->reg
|| rd
->get_user
))
3520 if (sysreg_hidden_user(vcpu
, rd
))
3523 if (!copy_reg_to_user(rd
, uind
))
3530 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
3531 static int walk_sys_regs(struct kvm_vcpu
*vcpu
, u64 __user
*uind
)
3533 const struct sys_reg_desc
*i2
, *end2
;
3534 unsigned int total
= 0;
3538 end2
= sys_reg_descs
+ ARRAY_SIZE(sys_reg_descs
);
3540 while (i2
!= end2
) {
3541 err
= walk_one_sys_reg(vcpu
, i2
++, &uind
, &total
);
3548 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu
*vcpu
)
3550 return ARRAY_SIZE(invariant_sys_regs
)
3552 + walk_sys_regs(vcpu
, (u64 __user
*)NULL
);
3555 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
3560 /* Then give them all the invariant registers' indices. */
3561 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++) {
3562 if (put_user(sys_reg_to_index(&invariant_sys_regs
[i
]), uindices
))
3567 err
= walk_sys_regs(vcpu
, uindices
);
3572 return write_demux_regids(uindices
);
3575 int __init
kvm_sys_reg_table_init(void)
3577 struct sys_reg_params params
;
3581 /* Make sure tables are unique and in order. */
3582 valid
&= check_sysreg_table(sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
), false);
3583 valid
&= check_sysreg_table(cp14_regs
, ARRAY_SIZE(cp14_regs
), true);
3584 valid
&= check_sysreg_table(cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
), true);
3585 valid
&= check_sysreg_table(cp15_regs
, ARRAY_SIZE(cp15_regs
), true);
3586 valid
&= check_sysreg_table(cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
), true);
3587 valid
&= check_sysreg_table(invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
), false);
3592 /* We abuse the reset function to overwrite the table itself. */
3593 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++)
3594 invariant_sys_regs
[i
].reset(NULL
, &invariant_sys_regs
[i
]);
3596 /* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
3597 params
= encoding_to_params(SYS_ID_PFR0_EL1
);
3598 first_idreg
= find_reg(¶ms
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
3602 if (kvm_get_mode() == KVM_MODE_NV
)
3603 return populate_nv_trap_config();