DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
-static void __activate_traps(struct kvm_vcpu *vcpu)
+static bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
{
- u64 val;
+ return vcpu->arch.flags & KVM_ARM64_FP_ENABLED;
+}
- ___activate_traps(vcpu);
- __activate_traps_common(vcpu);
+static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
+{
+ u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
- val = vcpu->arch.cptr_el2;
- val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
- if (!update_fp_enabled(vcpu)) {
- val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
- __activate_traps_fpsimd32(vcpu);
+ /* !hVHE case upstream */
+ if (1) {
+ val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
+
+ if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs(vcpu))
+ val |= CPTR_EL2_TZ;
+
+ if (!guest_owns_fp_regs(vcpu))
+ val |= CPTR_EL2_TFP;
}
+ if (!guest_owns_fp_regs(vcpu))
+ __activate_traps_fpsimd32(vcpu);
+
write_sysreg(val, cptr_el2);
+}
+
+static void __activate_traps(struct kvm_vcpu *vcpu)
+{
+ ___activate_traps(vcpu);
+ __activate_traps_common(vcpu);
+ __activate_cptr_traps(vcpu);
+
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {