/* Save VGICv3 state on non-VHE systems */
static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
{
+ if (vgic_is_v5(kern_hyp_va(vcpu->kvm))) {
+ __vgic_v5_save_state(&vcpu->arch.vgic_cpu.vgic_v5);
+ __vgic_v5_save_ppi_state(&vcpu->arch.vgic_cpu.vgic_v5);
+ return;
+ }
+
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
/* Restore VGICv3 state on non-VHE systems */
static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
{
+ if (vgic_is_v5(kern_hyp_va(vcpu->kvm))) {
+ __vgic_v5_restore_state(&vcpu->arch.vgic_cpu.vgic_v5);
+ __vgic_v5_restore_ppi_state(&vcpu->arch.vgic_cpu.vgic_v5);
+ return;
+ }
+
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
- if (kvm_vgic_global_state.type == VGIC_V2)
- vgic_v2_set_vmcr(vcpu, vmcr);
- else
+ const struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ switch (dist->vgic_model) {
+ case KVM_DEV_TYPE_ARM_VGIC_V5:
+ vgic_v5_set_vmcr(vcpu, vmcr);
+ break;
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
vgic_v3_set_vmcr(vcpu, vmcr);
+ break;
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ vgic_v3_set_vmcr(vcpu, vmcr);
+ else
+ vgic_v2_set_vmcr(vcpu, vmcr);
+ break;
+ default:
+ BUG();
+ }
}
void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
- if (kvm_vgic_global_state.type == VGIC_V2)
- vgic_v2_get_vmcr(vcpu, vmcr);
- else
+ const struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ switch (dist->vgic_model) {
+ case KVM_DEV_TYPE_ARM_VGIC_V5:
+ vgic_v5_get_vmcr(vcpu, vmcr);
+ break;
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
vgic_v3_get_vmcr(vcpu, vmcr);
+ break;
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ vgic_v3_get_vmcr(vcpu, vmcr);
+ else
+ vgic_v2_get_vmcr(vcpu, vmcr);
+ break;
+ default:
+ BUG();
+ }
}
/*
return 0;
}
+
+void vgic_v5_load(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+
+ /*
+ * On the WFI path, vgic_load is called a second time. The first is when
+ * scheduling in the vcpu thread again, and the second is when leaving
+ * WFI. Skip the second instance as it serves no purpose and just
+ * restores the same state again.
+ */
+ if (cpu_if->gicv5_vpe.resident)
+ return;
+
+ kvm_call_hyp(__vgic_v5_restore_vmcr_apr, cpu_if);
+
+ cpu_if->gicv5_vpe.resident = true;
+}
+
+void vgic_v5_put(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+
+ /*
+ * Do nothing if we're not resident. This can happen in the WFI path
+ * where we do a vgic_put in the WFI path and again later when
+ * descheduling the thread. We risk losing VMCR state if we sync it
+ * twice, so instead return early in this case.
+ */
+ if (!cpu_if->gicv5_vpe.resident)
+ return;
+
+ kvm_call_hyp(__vgic_v5_save_apr, cpu_if);
+
+ cpu_if->gicv5_vpe.resident = false;
+}
+
+void vgic_v5_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+ struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+ u64 vmcr = cpu_if->vgic_vmcr;
+
+ vmcrp->en = FIELD_GET(FEAT_GCIE_ICH_VMCR_EL2_EN, vmcr);
+ vmcrp->pmr = FIELD_GET(FEAT_GCIE_ICH_VMCR_EL2_VPMR, vmcr);
+}
+
+void vgic_v5_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+ struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+ u64 vmcr;
+
+ vmcr = FIELD_PREP(FEAT_GCIE_ICH_VMCR_EL2_VPMR, vmcrp->pmr) |
+ FIELD_PREP(FEAT_GCIE_ICH_VMCR_EL2_EN, vmcrp->en);
+
+ cpu_if->vgic_vmcr = vmcr;
+}
+
+void vgic_v5_restore_state(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+
+ __vgic_v5_restore_state(cpu_if);
+ __vgic_v5_restore_ppi_state(cpu_if);
+ dsb(sy);
+}
+
+void vgic_v5_save_state(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+
+ __vgic_v5_save_state(cpu_if);
+ __vgic_v5_save_ppi_state(cpu_if);
+ dsb(sy);
+}
static inline void vgic_save_state(struct kvm_vcpu *vcpu)
{
- if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ /* No switch statement here. See comment in vgic_restore_state() */
+ if (vgic_is_v5(vcpu->kvm))
+ vgic_v5_save_state(vcpu);
+ else if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_save_state(vcpu);
else
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
- /* If nesting, emulate the HW effect from L0 to L1 */
- if (vgic_state_is_nested(vcpu)) {
- vgic_v3_sync_nested(vcpu);
- return;
- }
+ if (vgic_is_v3(vcpu->kvm)) {
+ /* If nesting, emulate the HW effect from L0 to L1 */
+ if (vgic_state_is_nested(vcpu)) {
+ vgic_v3_sync_nested(vcpu);
+ return;
+ }
- if (vcpu_has_nv(vcpu))
- vgic_v3_nested_update_mi(vcpu);
+ if (vcpu_has_nv(vcpu))
+ vgic_v3_nested_update_mi(vcpu);
+ }
if (can_access_vgic_from_kernel())
vgic_save_state(vcpu);
static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
{
- if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ /*
+ * As nice as it would be to restructure this code into a switch
+ * statement as can be found elsewhere, the logic quickly gets ugly.
+ *
+ * __vgic_v3_restore_state() is doing a lot of heavy lifting here. It is
+ * required for GICv3-on-GICv3, GICv2-on-GICv3, GICv3-on-GICv5, and the
+ * no-in-kernel-irqchip case on GICv3 hardware. Hence, adding a switch
+ * here results in much more complex code.
+ */
+ if (vgic_is_v5(vcpu->kvm))
+ vgic_v5_restore_state(vcpu);
+ else if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_restore_state(vcpu);
else
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
void kvm_vgic_load(struct kvm_vcpu *vcpu)
{
+ const struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
return;
}
- if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
- vgic_v2_load(vcpu);
- else
+ switch (dist->vgic_model) {
+ case KVM_DEV_TYPE_ARM_VGIC_V5:
+ vgic_v5_load(vcpu);
+ break;
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
vgic_v3_load(vcpu);
+ break;
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ vgic_v3_load(vcpu);
+ else
+ vgic_v2_load(vcpu);
+ break;
+ default:
+ BUG();
+ }
}
void kvm_vgic_put(struct kvm_vcpu *vcpu)
{
+ const struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
return;
}
- if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
- vgic_v2_put(vcpu);
- else
+ switch (dist->vgic_model) {
+ case KVM_DEV_TYPE_ARM_VGIC_V5:
+ vgic_v5_put(vcpu);
+ break;
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
vgic_v3_put(vcpu);
+ break;
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ vgic_v3_put(vcpu);
+ else
+ vgic_v2_put(vcpu);
+ break;
+ default:
+ BUG();
+ }
}
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
* registers regardless of the hardware backed GIC used.
*/
struct vgic_vmcr {
+ u32 en; /* GICv5-specific */
u32 grpen0;
u32 grpen1;
void vgic_debug_destroy(struct kvm *kvm);
int vgic_v5_probe(const struct gic_kvm_info *info);
+void vgic_v5_load(struct kvm_vcpu *vcpu);
+void vgic_v5_put(struct kvm_vcpu *vcpu);
+void vgic_v5_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_v5_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_v5_restore_state(struct kvm_vcpu *vcpu);
+void vgic_v5_save_state(struct kvm_vcpu *vcpu);
static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
{
* it is the hyp's responsibility to keep the state constistent.
*/
u64 vgic_icsr;
+
+ struct gicv5_vpe gicv5_vpe;
};
/* What PPI capabilities does a GICv5 host have */
int gicv5_irs_iste_alloc(u32 lpi);
void gicv5_irs_syncr(void);
+/* Embedded in kvm.arch */
+struct gicv5_vpe {
+ bool resident;
+};
+
struct gicv5_its_devtab_cfg {
union {
struct {