KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb);
KVM_NVHE_ALIAS(alt_cb_patch_nops);
+KVM_NVHE_ALIAS(kvm_compute_ich_hcr_trap_bits);
/* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(kvm_vgic_global_state);
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
+#include "../../vgic/vgic.h"
+
#define vtr_to_max_lr_idx(v) ((v) & 0xf)
#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
#define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
return val;
}
+static u64 compute_ich_hcr(struct vgic_v3_cpu_if *cpu_if)
+{
+ return cpu_if->vgic_hcr | vgic_ich_hcr_trap_bits();
+}
+
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
{
u64 used_lrs = cpu_if->used_lrs;
elrsr = read_gicreg(ICH_ELRSR_EL2);
- write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EL2_En, ICH_HCR_EL2);
+ write_gicreg(0, ICH_HCR_EL2);
for (i = 0; i < used_lrs; i++) {
if (elrsr & (1 << i))
int i;
if (used_lrs || cpu_if->its_vpe.its_vm) {
- write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
+ write_gicreg(compute_ich_hcr(cpu_if), ICH_HCR_EL2);
for (i = 0; i < used_lrs; i++)
__gic_v3_set_lr(cpu_if->vgic_lr[i], i);
}
/*
- * If we need to trap system registers, we must write
- * ICH_HCR_EL2 anyway, even if no interrupts are being
- * injected. Note that this also applies if we don't expect
- * any system register access (no vgic at all).
+ * If we need to trap system registers, we must write ICH_HCR_EL2
+ * anyway, even if no interrupts are being injected. Note that this
+ * also applies if we don't expect any system register access (no
+ * vgic at all). In any case, no need to provide MI configuration.
*/
if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
cpu_if->its_vpe.its_vm || !cpu_if->vgic_sre)
- write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
+ write_gicreg(vgic_ich_hcr_trap_bits() | ICH_HCR_EL2_En, ICH_HCR_EL2);
}
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
struct vgic_v3_cpu_if *s_cpu_if)
{
struct vgic_v3_cpu_if *host_if = &vcpu->arch.vgic_cpu.vgic_v3;
- u64 val = 0;
int i;
- /*
- * If we're on a system with a broken vgic that requires
- * trapping, propagate the trapping requirements.
- *
- * Ah, the smell of rotten fruits...
- */
- if (static_branch_unlikely(&vgic_v3_cpuif_trap))
- val = host_if->vgic_hcr & (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 |
- ICH_HCR_EL2_TC | ICH_HCR_EL2_TDIR);
- s_cpu_if->vgic_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) | val;
+ s_cpu_if->vgic_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
s_cpu_if->vgic_vmcr = __vcpu_sys_reg(vcpu, ICH_VMCR_EL2);
s_cpu_if->vgic_sre = host_if->vgic_sre;
return;
/* Hide GICv3 sysreg if necessary */
- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
vgic_v3->vgic_hcr |= (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 |
ICH_HCR_EL2_TC);
- return;
- }
-
- if (group0_trap)
- vgic_v3->vgic_hcr |= ICH_HCR_EL2_TALL0;
- if (group1_trap)
- vgic_v3->vgic_hcr |= ICH_HCR_EL2_TALL1;
- if (common_trap)
- vgic_v3->vgic_hcr |= ICH_HCR_EL2_TC;
- if (dir_trap)
- vgic_v3->vgic_hcr |= ICH_HCR_EL2_TDIR;
}
int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
static bool vgic_v3_broken_seis(void)
{
- return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_EL2_SEIS) &&
- is_midr_in_range_list(broken_seis));
+ return (is_kernel_in_hyp_mode() &&
+ is_midr_in_range_list(broken_seis) &&
+ (read_sysreg_s(SYS_ICH_VTR_EL2) & ICH_VTR_EL2_SEIS));
+}
+
+void noinstr kvm_compute_ich_hcr_trap_bits(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+{
+ u32 insn, oinsn, rd;
+ u64 hcr = 0;
+
+ if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
+ group0_trap = true;
+ group1_trap = true;
+ }
+
+ if (vgic_v3_broken_seis()) {
+ /* We know that these machines have ICH_HCR_EL2.TDIR */
+ group0_trap = true;
+ group1_trap = true;
+ dir_trap = true;
+ }
+
+ if (group0_trap)
+ hcr |= ICH_HCR_EL2_TALL0;
+ if (group1_trap)
+ hcr |= ICH_HCR_EL2_TALL1;
+ if (common_trap)
+ hcr |= ICH_HCR_EL2_TC;
+ if (dir_trap)
+ hcr |= ICH_HCR_EL2_TDIR;
+
+ /* Compute target register */
+ oinsn = le32_to_cpu(*origptr);
+ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
+
+ /* movz rd, #(val & 0xffff) */
+ insn = aarch64_insn_gen_movewide(rd,
+ (u16)hcr,
+ 0,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_MOVEWIDE_ZERO);
+ *updptr = cpu_to_le32(insn);
}
/**
{
u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
bool has_v2;
+ u64 traps;
int ret;
has_v2 = ich_vtr_el2 >> 63;
if (has_v2)
static_branch_enable(&vgic_v3_has_v2_compat);
- if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
- group0_trap = true;
- group1_trap = true;
- }
-
if (vgic_v3_broken_seis()) {
kvm_info("GICv3 with broken locally generated SEI\n");
-
kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_EL2_SEIS;
- group0_trap = true;
- group1_trap = true;
- if (ich_vtr_el2 & ICH_VTR_EL2_TDS)
- dir_trap = true;
- else
- common_trap = true;
}
- if (group0_trap || group1_trap || common_trap | dir_trap) {
+ traps = vgic_ich_hcr_trap_bits();
+ if (traps) {
kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n",
- group0_trap ? "G0" : "",
- group1_trap ? "G1" : "",
- common_trap ? "C" : "",
- dir_trap ? "D" : "");
+ (traps & ICH_HCR_EL2_TALL0) ? "G0" : "",
+ (traps & ICH_HCR_EL2_TALL1) ? "G1" : "",
+ (traps & ICH_HCR_EL2_TC) ? "C" : "",
+ (traps & ICH_HCR_EL2_TDIR) ? "D" : "");
static_branch_enable(&vgic_v3_cpuif_trap);
}
return ret;
}
+void kvm_compute_ich_hcr_trap_bits(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst);
+
+static inline u64 vgic_ich_hcr_trap_bits(void)
+{
+ u64 hcr;
+
+ /* All the traps are in the bottom 16bits */
+ asm volatile(ALTERNATIVE_CB("movz %0, #0\n",
+ ARM64_ALWAYS_SYSTEM,
+ kvm_compute_ich_hcr_trap_bits)
+ : "=r" (hcr));
+
+ return hcr;
+}
+
/*
* This struct provides an intermediate representation of the fields contained
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC