*vcpu_fgt(vcpu, HDFGWTR_EL2) |= HDFGWTR_EL2_MDSCR_EL1;
}
+static void __compute_ich_hfgrtr(struct kvm_vcpu *vcpu)
+{
+ __compute_fgt(vcpu, ICH_HFGRTR_EL2);
+
+ /* ICC_IAFFIDR_EL1 *always* needs to be trapped when running a guest */
+ *vcpu_fgt(vcpu, ICH_HFGRTR_EL2) &= ~ICH_HFGRTR_EL2_ICC_IAFFIDR_EL1;
+}
+
void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu)
{
if (!cpus_have_final_cap(ARM64_HAS_FGT))
}
if (cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) {
- __compute_fgt(vcpu, ICH_HFGRTR_EL2);
+ __compute_ich_hfgrtr(vcpu);
__compute_fgt(vcpu, ICH_HFGWTR_EL2);
__compute_fgt(vcpu, ICH_HFGITR_EL2);
}
return true;
}
+static bool access_gicv5_iaffid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return undef_access(vcpu, p, r);
+
+ /*
+ * For GICv5 VMs, the IAFFID value is the same as the VPE ID. The VPE ID
+ * is the same as the VCPU's ID.
+ */
+ p->regval = FIELD_PREP(ICC_IAFFIDR_EL1_IAFFID, vcpu->vcpu_id);
+
+ return true;
+}
+
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{ SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
{ SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
{ SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_IAFFIDR_EL1), access_gicv5_iaffid },
{ SYS_DESC(SYS_ICC_DIR_EL1), access_gic_dir },
{ SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
return kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP);
}
+static inline bool kvm_has_gicv5(struct kvm *kvm)
+{
+ return kvm_has_feat(kvm, ID_AA64PFR2_EL1, GCIE, IMP);
+}
+
void vgic_v3_flush_nested(struct kvm_vcpu *vcpu);
void vgic_v3_sync_nested(struct kvm_vcpu *vcpu);
void vgic_v3_load_nested(struct kvm_vcpu *vcpu);