]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: Eagerly save VMCR on exit
authorMarc Zyngier <maz@kernel.org>
Thu, 20 Nov 2025 17:25:10 +0000 (17:25 +0000)
committerOliver Upton <oupton@kernel.org>
Mon, 24 Nov 2025 22:29:13 +0000 (14:29 -0800)
We currently save/restore the VMCR register in a pretty lazy way
(on load/put, consistently with what we do with the APRs).

However, we are going to need the group-enable bits that are backed
by VMCR on each entry (so that we can avoid injecting interrupts for
disabled groups).

Move the synchronisation from put to sync, which results in some minor
churn in the nVHE hypercalls to simplify things.

Tested-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Link: https://msgid.link/20251120172540.2267180-21-maz@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/vgic-v3-sr.c
arch/arm64/kvm/vgic/vgic-v2.c
arch/arm64/kvm/vgic/vgic-v3-nested.c
arch/arm64/kvm/vgic/vgic-v3.c

index 9da54d4ee49ee7b4f98baf9dd779f8409223b602..f8adbd535b4aefeacbe3216e82f82381b3d79c40 100644 (file)
@@ -79,7 +79,7 @@ enum __kvm_host_smccc_func {
        __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
        __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
        __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
-       __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
+       __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
        __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
        __KVM_HOST_SMCCC_FUNC___pkvm_reserve_vm,
        __KVM_HOST_SMCCC_FUNC___pkvm_unreserve_vm,
index e6be1f5d0967f6d17c6fb3a69e6fe79af3f1534f..dbf16a9f67728a30b6fbcf05aa4f3d39cb2fe20c 100644 (file)
@@ -82,7 +82,7 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
 void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
 void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
 void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
-void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
+void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
 void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
 int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
 
index 870953b4a8a74f0b51ffb444920604ee29f8b6ff..733195ef183e1e8e29480f22a1f52a9fd469cb98 100644 (file)
@@ -659,8 +659,7 @@ nommu:
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
        if (is_protected_kvm_enabled()) {
-               kvm_call_hyp(__vgic_v3_save_vmcr_aprs,
-                            &vcpu->arch.vgic_cpu.vgic_v3);
+               kvm_call_hyp(__vgic_v3_save_aprs, &vcpu->arch.vgic_cpu.vgic_v3);
                kvm_call_hyp_nvhe(__pkvm_vcpu_put);
        }
 
index 29430c031095a3103cc8d7763deb8cc67d7a5da2..a7c689152f68694fb96aadcc181ba4be8bfe9349 100644 (file)
@@ -157,6 +157,7 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
        host_vcpu->arch.iflags          = hyp_vcpu->vcpu.arch.iflags;
 
        host_cpu_if->vgic_hcr           = hyp_cpu_if->vgic_hcr;
+       host_cpu_if->vgic_vmcr          = hyp_cpu_if->vgic_vmcr;
        for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
                host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
 }
@@ -464,11 +465,11 @@ static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
        __vgic_v3_init_lrs();
 }
 
-static void handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
+static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
 {
        DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
 
-       __vgic_v3_save_vmcr_aprs(kern_hyp_va(cpu_if));
+       __vgic_v3_save_aprs(kern_hyp_va(cpu_if));
 }
 
 static void handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
@@ -616,7 +617,7 @@ static const hcall_t host_hcall[] = {
        HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
        HANDLE_FUNC(__kvm_flush_cpu_context),
        HANDLE_FUNC(__kvm_timer_set_cntvoff),
-       HANDLE_FUNC(__vgic_v3_save_vmcr_aprs),
+       HANDLE_FUNC(__vgic_v3_save_aprs),
        HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
        HANDLE_FUNC(__pkvm_reserve_vm),
        HANDLE_FUNC(__pkvm_unreserve_vm),
index 2509b52bbd6290ff9f75b986c2b0eca7240fd502..cafbb41b4c334e4c114aba13548c66198cb36f4e 100644 (file)
@@ -235,6 +235,8 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
                }
        }
 
+       cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
+
        if (cpu_if->vgic_hcr & ICH_HCR_EL2_LRENPIE) {
                u64 val = read_gicreg(ICH_HCR_EL2);
                cpu_if->vgic_hcr &= ~ICH_HCR_EL2_EOIcount;
@@ -332,10 +334,6 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
 {
        u64 val;
 
-       if (!cpu_if->vgic_sre) {
-               cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
-       }
-
        /* Only restore SRE if the host implements the GICv2 interface */
        if (static_branch_unlikely(&vgic_v3_has_v2_compat)) {
                val = read_gicreg(ICC_SRE_EL2);
@@ -357,7 +355,7 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
                write_gicreg(0, ICH_HCR_EL2);
 }
 
-static void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
+void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
 {
        u64 val;
        u32 nr_pre_bits;
@@ -518,13 +516,6 @@ static void __vgic_v3_write_vmcr(u32 vmcr)
        write_gicreg(vmcr, ICH_VMCR_EL2);
 }
 
-void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
-{
-       __vgic_v3_save_aprs(cpu_if);
-       if (cpu_if->vgic_sre)
-               cpu_if->vgic_vmcr = __vgic_v3_read_vmcr();
-}
-
 void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
 {
        __vgic_v3_compat_mode_enable();
index 5a2165a8d22c031b2b2f79344791f189eede83ac..07e93acafd04d793ff9daab75fc474e356c2cc4e 100644 (file)
@@ -451,6 +451,7 @@ void vgic_v2_save_state(struct kvm_vcpu *vcpu)
        if (!base)
                return;
 
+       cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
 
        if (used_lrs)
                save_lrs(vcpu, base);
@@ -495,6 +496,5 @@ void vgic_v2_put(struct kvm_vcpu *vcpu)
 {
        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
 
-       cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
        cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
 }
index 1fc9e0780abe6c9fef5af22cf61e3b4869e60bd9..1531e4907c652d19bba76bd7789bf0a282d421b6 100644 (file)
@@ -340,7 +340,7 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
        u64 val;
        int i;
 
-       __vgic_v3_save_vmcr_aprs(s_cpu_if);
+       __vgic_v3_save_aprs(s_cpu_if);
        __vgic_v3_deactivate_traps(s_cpu_if);
        __vgic_v3_save_state(s_cpu_if);
 
index bcce7f35a6d678fdbba402a122f17b6daabb56d2..5b276e303aab23477f5dc7435c86c359cc5ff6e2 100644 (file)
@@ -815,7 +815,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
        }
 
        if (likely(!is_protected_kvm_enabled()))
-               kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
+               kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
        WARN_ON(vgic_v4_put(vcpu));
 
        if (has_vhe())