ICH_HFGRTR_EL2_ICC_IDRn_EL1);
}
+static void __compute_ich_hfgwtr(struct kvm_vcpu *vcpu)
+{
+ __compute_fgt(vcpu, ICH_HFGWTR_EL2);
+
+ /*
+ * We present a different subset of PPIs the guest from what
+ * exist in real hardware. We only trap writes, not reads.
+ */
+ *vcpu_fgt(vcpu, ICH_HFGWTR_EL2) &= ~(ICH_HFGWTR_EL2_ICC_PPI_ENABLERn_EL1);
+}
+
void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu)
{
if (!cpus_have_final_cap(ARM64_HAS_FGT))
if (cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) {
__compute_ich_hfgrtr(vcpu);
- __compute_fgt(vcpu, ICH_HFGWTR_EL2);
+ __compute_ich_hfgwtr(vcpu);
__compute_fgt(vcpu, ICH_HFGITR_EL2);
}
}
bitmap_write(host_data_ptr(vgic_v5_ppi_state)->activer_exit,
read_sysreg_s(SYS_ICH_PPI_ACTIVER0_EL2), 0, 64);
- bitmap_write(host_data_ptr(vgic_v5_ppi_state)->enabler_exit,
- read_sysreg_s(SYS_ICH_PPI_ENABLER0_EL2), 0, 64);
bitmap_write(host_data_ptr(vgic_v5_ppi_state)->pendr_exit,
read_sysreg_s(SYS_ICH_PPI_PENDR0_EL2), 0, 64);
if (VGIC_V5_NR_PRIVATE_IRQS == 128) {
bitmap_write(host_data_ptr(vgic_v5_ppi_state)->activer_exit,
read_sysreg_s(SYS_ICH_PPI_ACTIVER1_EL2), 64, 64);
- bitmap_write(host_data_ptr(vgic_v5_ppi_state)->enabler_exit,
- read_sysreg_s(SYS_ICH_PPI_ENABLER1_EL2), 64, 64);
bitmap_write(host_data_ptr(vgic_v5_ppi_state)->pendr_exit,
read_sysreg_s(SYS_ICH_PPI_PENDR1_EL2), 64, 64);
return true;
}
+static bool access_gicv5_ppi_enabler(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ unsigned long *mask = vcpu->kvm->arch.vgic.gicv5_vm.vgic_ppi_mask;
+ struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+ int i;
+
+ /* We never expect to get here with a read! */
+ if (WARN_ON_ONCE(!p->is_write))
+ return undef_access(vcpu, p, r);
+
+ /*
+ * If we're only handling architected PPIs and the guest writes to the
+ * enable for the non-architected PPIs, we just return as there's
+ * nothing to do at all. We don't even allocate the storage for them in
+ * this case.
+ */
+ if (VGIC_V5_NR_PRIVATE_IRQS == 64 && p->Op2 % 2)
+ return true;
+
+ /*
+ * Merge the raw guest write into out bitmap at an offset of either 0 or
+ * 64, then and it with our PPI mask.
+ */
+ bitmap_write(cpu_if->vgic_ppi_enabler, p->regval, 64 * (p->Op2 % 2), 64);
+ bitmap_and(cpu_if->vgic_ppi_enabler, cpu_if->vgic_ppi_enabler, mask,
+ VGIC_V5_NR_PRIVATE_IRQS);
+
+ /*
+ * Sync the change in enable states to the vgic_irqs. We consider all
+ * PPIs as we don't expose many to the guest.
+ */
+ for_each_set_bit(i, mask, VGIC_V5_NR_PRIVATE_IRQS) {
+ u32 intid = vgic_v5_make_ppi(i);
+ struct vgic_irq *irq;
+
+ irq = vgic_get_vcpu_irq(vcpu, intid);
+
+ scoped_guard(raw_spinlock_irqsave, &irq->irq_lock)
+ irq->enabled = test_bit(i, cpu_if->vgic_ppi_enabler);
+
+ vgic_put_irq(vcpu->kvm, irq);
+ }
+
+ return true;
+}
+
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{ SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
{ SYS_DESC(SYS_ICC_IDR0_EL1), access_gicv5_idr0 },
{ SYS_DESC(SYS_ICC_IAFFIDR_EL1), access_gicv5_iaffid },
+ { SYS_DESC(SYS_ICC_PPI_ENABLER0_EL1), access_gicv5_ppi_enabler },
+ { SYS_DESC(SYS_ICC_PPI_ENABLER1_EL1), access_gicv5_ppi_enabler },
{ SYS_DESC(SYS_ICC_DIR_EL1), access_gic_dir },
{ SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },