]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: gic-v5: Implement GICv5 load/put and save/restore
authorSascha Bischoff <Sascha.Bischoff@arm.com>
Thu, 19 Mar 2026 15:54:08 +0000 (15:54 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 19 Mar 2026 18:21:28 +0000 (18:21 +0000)
This change introduces GICv5 load/put. Additionally, it plumbs in
save/restore for:

* PPIs (ICH_PPI_x_EL2 regs)
* ICH_VMCR_EL2
* ICH_APR_EL2
* ICC_ICSR_EL1

A GICv5-specific enable bit is added to struct vgic_vmcr as this
differs from previous GICs. On GICv5-native systems, the VMCR only
contains the enable bit (driven by the guest via ICC_CR0_EL1.EN) and
the priority mask (PCR).

A struct gicv5_vpe is also introduced. This currently only contains a
single field - bool resident - which is used to track if a VPE is
currently running or not, and is used to avoid a case of double load
or double put on the WFI path for a vCPU. This struct will be extended
as additional GICv5 support is merged, specifically for VPE doorbells.

Co-authored-by: Timothy Hayes <timothy.hayes@arm.com>
Signed-off-by: Timothy Hayes <timothy.hayes@arm.com>
Signed-off-by: Sascha Bischoff <sascha.bischoff@arm.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Link: https://patch.msgid.link/20260319154937.3619520-18-sascha.bischoff@arm.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/vgic/vgic-mmio.c
arch/arm64/kvm/vgic/vgic-v5.c
arch/arm64/kvm/vgic/vgic.c
arch/arm64/kvm/vgic/vgic.h
include/kvm/arm_vgic.h
include/linux/irqchip/arm-gic-v5.h

index b41485ce295abd6e6397d9ead9af4a2e42f313c4..a88da302b6d08c736c5ac707e497271b17a34ba2 100644 (file)
@@ -113,6 +113,12 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
 /* Save VGICv3 state on non-VHE systems */
 static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
 {
+       if (vgic_is_v5(kern_hyp_va(vcpu->kvm))) {
+               __vgic_v5_save_state(&vcpu->arch.vgic_cpu.vgic_v5);
+               __vgic_v5_save_ppi_state(&vcpu->arch.vgic_cpu.vgic_v5);
+               return;
+       }
+
        if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
                __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
                __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
@@ -122,6 +128,12 @@ static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
 /* Restore VGICv3 state on non-VHE systems */
 static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
 {
+       if (vgic_is_v5(kern_hyp_va(vcpu->kvm))) {
+               __vgic_v5_restore_state(&vcpu->arch.vgic_cpu.vgic_v5);
+               __vgic_v5_restore_ppi_state(&vcpu->arch.vgic_cpu.vgic_v5);
+               return;
+       }
+
        if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
                __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
                __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
index a573b1f0c6cbeee347aa50426d37ddb018d741c2..74d76dec97304b42324523bb2b2b9e3652e0b508 100644 (file)
@@ -842,18 +842,46 @@ vgic_find_mmio_region(const struct vgic_register_region *regions,
 
 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 {
-       if (kvm_vgic_global_state.type == VGIC_V2)
-               vgic_v2_set_vmcr(vcpu, vmcr);
-       else
+       const struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       switch (dist->vgic_model) {
+       case KVM_DEV_TYPE_ARM_VGIC_V5:
+               vgic_v5_set_vmcr(vcpu, vmcr);
+               break;
+       case KVM_DEV_TYPE_ARM_VGIC_V3:
                vgic_v3_set_vmcr(vcpu, vmcr);
+               break;
+       case KVM_DEV_TYPE_ARM_VGIC_V2:
+               if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+                       vgic_v3_set_vmcr(vcpu, vmcr);
+               else
+                       vgic_v2_set_vmcr(vcpu, vmcr);
+               break;
+       default:
+               BUG();
+       }
 }
 
 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 {
-       if (kvm_vgic_global_state.type == VGIC_V2)
-               vgic_v2_get_vmcr(vcpu, vmcr);
-       else
+       const struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+       switch (dist->vgic_model) {
+       case KVM_DEV_TYPE_ARM_VGIC_V5:
+               vgic_v5_get_vmcr(vcpu, vmcr);
+               break;
+       case KVM_DEV_TYPE_ARM_VGIC_V3:
                vgic_v3_get_vmcr(vcpu, vmcr);
+               break;
+       case KVM_DEV_TYPE_ARM_VGIC_V2:
+               if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+                       vgic_v3_get_vmcr(vcpu, vmcr);
+               else
+                       vgic_v2_get_vmcr(vcpu, vmcr);
+               break;
+       default:
+               BUG();
+       }
 }
 
 /*
index cf8382a954bbcf9bc4c2f2d3e3a552595b6374dd..41317e1d94a2f8fd2db1d6a7d7a032fa41df1781 100644 (file)
@@ -86,3 +86,77 @@ int vgic_v5_probe(const struct gic_kvm_info *info)
 
        return 0;
 }
+
+void vgic_v5_load(struct kvm_vcpu *vcpu)
+{
+       struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+
+       /*
+        * On the WFI path, vgic_load is called a second time. The first is when
+        * scheduling in the vcpu thread again, and the second is when leaving
+        * WFI. Skip the second instance as it serves no purpose and just
+        * restores the same state again.
+        */
+       if (cpu_if->gicv5_vpe.resident)
+               return;
+
+       kvm_call_hyp(__vgic_v5_restore_vmcr_apr, cpu_if);
+
+       cpu_if->gicv5_vpe.resident = true;
+}
+
+void vgic_v5_put(struct kvm_vcpu *vcpu)
+{
+       struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+
+       /*
+        * Do nothing if we're not resident. This can happen in the WFI path
+        * where we do a vgic_put in the WFI path and again later when
+        * descheduling the thread. We risk losing VMCR state if we sync it
+        * twice, so instead return early in this case.
+        */
+       if (!cpu_if->gicv5_vpe.resident)
+               return;
+
+       kvm_call_hyp(__vgic_v5_save_apr, cpu_if);
+
+       cpu_if->gicv5_vpe.resident = false;
+}
+
+void vgic_v5_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+       struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+       u64 vmcr = cpu_if->vgic_vmcr;
+
+       vmcrp->en = FIELD_GET(FEAT_GCIE_ICH_VMCR_EL2_EN, vmcr);
+       vmcrp->pmr = FIELD_GET(FEAT_GCIE_ICH_VMCR_EL2_VPMR, vmcr);
+}
+
+void vgic_v5_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+       struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+       u64 vmcr;
+
+       vmcr = FIELD_PREP(FEAT_GCIE_ICH_VMCR_EL2_VPMR, vmcrp->pmr) |
+              FIELD_PREP(FEAT_GCIE_ICH_VMCR_EL2_EN, vmcrp->en);
+
+       cpu_if->vgic_vmcr = vmcr;
+}
+
+void vgic_v5_restore_state(struct kvm_vcpu *vcpu)
+{
+       struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+
+       __vgic_v5_restore_state(cpu_if);
+       __vgic_v5_restore_ppi_state(cpu_if);
+       dsb(sy);
+}
+
+void vgic_v5_save_state(struct kvm_vcpu *vcpu)
+{
+       struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
+
+       __vgic_v5_save_state(cpu_if);
+       __vgic_v5_save_ppi_state(cpu_if);
+       dsb(sy);
+}
index 2f3f892cbddc24a70e7da7c268e81a21eec61554..84199d2df80af2512455d5d832f9b73a64a26d42 100644 (file)
@@ -1017,7 +1017,10 @@ static inline bool can_access_vgic_from_kernel(void)
 
 static inline void vgic_save_state(struct kvm_vcpu *vcpu)
 {
-       if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+       /* No switch statement here. See comment in vgic_restore_state() */
+       if (vgic_is_v5(vcpu->kvm))
+               vgic_v5_save_state(vcpu);
+       else if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
                vgic_v2_save_state(vcpu);
        else
                __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
@@ -1026,14 +1029,16 @@ static inline void vgic_save_state(struct kvm_vcpu *vcpu)
 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
-       /* If nesting, emulate the HW effect from L0 to L1 */
-       if (vgic_state_is_nested(vcpu)) {
-               vgic_v3_sync_nested(vcpu);
-               return;
-       }
+       if (vgic_is_v3(vcpu->kvm)) {
+               /* If nesting, emulate the HW effect from L0 to L1 */
+               if (vgic_state_is_nested(vcpu)) {
+                       vgic_v3_sync_nested(vcpu);
+                       return;
+               }
 
-       if (vcpu_has_nv(vcpu))
-               vgic_v3_nested_update_mi(vcpu);
+               if (vcpu_has_nv(vcpu))
+                       vgic_v3_nested_update_mi(vcpu);
+       }
 
        if (can_access_vgic_from_kernel())
                vgic_save_state(vcpu);
@@ -1055,7 +1060,18 @@ void kvm_vgic_process_async_update(struct kvm_vcpu *vcpu)
 
 static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
 {
-       if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+       /*
+        * As nice as it would be to restructure this code into a switch
+        * statement as can be found elsewhere, the logic quickly gets ugly.
+        *
+        * __vgic_v3_restore_state() is doing a lot of heavy lifting here. It is
+        * required for GICv3-on-GICv3, GICv2-on-GICv3, GICv3-on-GICv5, and the
+        * no-in-kernel-irqchip case on GICv3 hardware. Hence, adding a switch
+        * here results in much more complex code.
+        */
+       if (vgic_is_v5(vcpu->kvm))
+               vgic_v5_restore_state(vcpu);
+       else if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
                vgic_v2_restore_state(vcpu);
        else
                __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
@@ -1109,30 +1125,58 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
 
 void kvm_vgic_load(struct kvm_vcpu *vcpu)
 {
+       const struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
        if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
                if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
                        __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
                return;
        }
 
-       if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
-               vgic_v2_load(vcpu);
-       else
+       switch (dist->vgic_model) {
+       case KVM_DEV_TYPE_ARM_VGIC_V5:
+               vgic_v5_load(vcpu);
+               break;
+       case KVM_DEV_TYPE_ARM_VGIC_V3:
                vgic_v3_load(vcpu);
+               break;
+       case KVM_DEV_TYPE_ARM_VGIC_V2:
+               if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+                       vgic_v3_load(vcpu);
+               else
+                       vgic_v2_load(vcpu);
+               break;
+       default:
+               BUG();
+       }
 }
 
 void kvm_vgic_put(struct kvm_vcpu *vcpu)
 {
+       const struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
        if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
                if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
                        __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
                return;
        }
 
-       if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
-               vgic_v2_put(vcpu);
-       else
+       switch (dist->vgic_model) {
+       case KVM_DEV_TYPE_ARM_VGIC_V5:
+               vgic_v5_put(vcpu);
+               break;
+       case KVM_DEV_TYPE_ARM_VGIC_V3:
                vgic_v3_put(vcpu);
+               break;
+       case KVM_DEV_TYPE_ARM_VGIC_V2:
+               if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+                       vgic_v3_put(vcpu);
+               else
+                       vgic_v2_put(vcpu);
+               break;
+       default:
+               BUG();
+       }
 }
 
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
index 7b7eed69d7973060d983331312fd5fcf1cf4c754..cc487a69d038989c12ccfc089066d8d5d2dc576e 100644 (file)
@@ -187,6 +187,7 @@ static inline u64 vgic_ich_hcr_trap_bits(void)
  * registers regardless of the hardware backed GIC used.
  */
 struct vgic_vmcr {
+       u32     en; /* GICv5-specific */
        u32     grpen0;
        u32     grpen1;
 
@@ -363,6 +364,12 @@ void vgic_debug_init(struct kvm *kvm);
 void vgic_debug_destroy(struct kvm *kvm);
 
 int vgic_v5_probe(const struct gic_kvm_info *info);
+void vgic_v5_load(struct kvm_vcpu *vcpu);
+void vgic_v5_put(struct kvm_vcpu *vcpu);
+void vgic_v5_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_v5_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_v5_restore_state(struct kvm_vcpu *vcpu);
+void vgic_v5_save_state(struct kvm_vcpu *vcpu);
 
 static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
 {
index 07e394690dccb03a2921c66ed8ed44e3b28477f8..b27bfc463a31111a14d3fe02a0e5fd509d09bac4 100644 (file)
@@ -447,6 +447,8 @@ struct vgic_v5_cpu_if {
         * it is the hyp's responsibility to keep the state constistent.
         */
        u64     vgic_icsr;
+
+       struct gicv5_vpe gicv5_vpe;
 };
 
 /* What PPI capabilities does a GICv5 host have */
index b1566a7c93ecb4294732da9380718014f48a79e2..40d2fce682940ab2adb2bc547e4765243888b8b5 100644 (file)
@@ -387,6 +387,11 @@ int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type);
 int gicv5_irs_iste_alloc(u32 lpi);
 void gicv5_irs_syncr(void);
 
+/* Embedded in kvm.arch */
+struct gicv5_vpe {
+       bool                    resident;
+};
+
 struct gicv5_its_devtab_cfg {
        union {
                struct {