]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Eagerly switch ZCR_EL{1,2}
authorMark Rutland <mark.rutland@arm.com>
Tue, 8 Apr 2025 18:10:06 +0000 (19:10 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 2 May 2025 05:44:17 +0000 (07:44 +0200)
[ Upstream commit 59419f10045bc955d2229819c7cf7a8b0b9c5b59 ]

In non-protected KVM modes, while the guest FPSIMD/SVE/SME state is live on the
CPU, the host's active SVE VL may differ from the guest's maximum SVE VL:

* For VHE hosts, when a VM uses NV, ZCR_EL2 contains a value constrained
  by the guest hypervisor, which may be less than or equal to that
  guest's maximum VL.

  Note: in this case the value of ZCR_EL1 is immaterial due to E2H.

* For nVHE/hVHE hosts, ZCR_EL1 contains a value written by the guest,
  which may be less than or greater than the guest's maximum VL.

  Note: in this case hyp code traps host SVE usage and lazily restores
  ZCR_EL2 to the host's maximum VL, which may be greater than the
  guest's maximum VL.

This can be the case between exiting a guest and kvm_arch_vcpu_put_fp().
If a softirq is taken during this period and the softirq handler tries
to use kernel-mode NEON, then the kernel will fail to save the guest's
FPSIMD/SVE state, and will pend a SIGKILL for the current thread.

This happens because kvm_arch_vcpu_ctxsync_fp() binds the guest's live
FPSIMD/SVE state with the guest's maximum SVE VL, and
fpsimd_save_user_state() verifies that the live SVE VL is as expected
before attempting to save the register state:

| if (WARN_ON(sve_get_vl() != vl)) {
|         force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
|         return;
| }

Fix this and make this a bit easier to reason about by always eagerly
switching ZCR_EL{1,2} at hyp during guest<->host transitions. With this
happening, there's no need to trap host SVE usage, and the nVHE/nVHE
__deactivate_cptr_traps() logic can be simplified to enable host access
to all present FPSIMD/SVE/SME features.

In protected nVHE/hVHE modes, the host's state is always saved/restored
by hyp, and the guest's state is saved prior to exit to the host, so
from the host's PoV the guest never has live FPSIMD/SVE/SME state, and
the host's ZCR_EL1 is never clobbered by hyp.

Fixes: 8c8010d69c132273 ("KVM: arm64: Save/restore SVE state for nVHE")
Fixes: 2e3cf82063a00ea0 ("KVM: arm64: nv: Ensure correct VL is loaded before saving SVE state")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Fuad Tabba <tabba@google.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Oliver Upton <oliver.upton@linux.dev>
Cc: Will Deacon <will@kernel.org>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20250210195226.1215254-9-mark.rutland@arm.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
[ v6.6 lacks pKVM saving of host SVE state, pull in discovery of maximum
  host VL separately -- broonie ]
Signed-off-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/kvm/fpsimd.c
arch/arm64/kvm/hyp/entry.S
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/reset.c

index 2e0952134e2e1d191912958f131eb59388fdf718..6d7b6b5d076d4b09a276a3841f31ef06b5cc49f5 100644 (file)
@@ -64,6 +64,7 @@ enum kvm_mode kvm_get_mode(void);
 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
 
 extern unsigned int kvm_sve_max_vl;
+extern unsigned int kvm_host_sve_max_vl;
 int kvm_arm_init_sve(void);
 
 u32 __attribute_const__ kvm_target_cpu(void);
index 657d0c94cf828964cf7e9a8c9b0940f2f629f76a..308df86f9a4b33b9778207d4f57febb273ada865 100644 (file)
@@ -117,5 +117,12 @@ void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
 
 extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
 extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
+extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
+
+static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.flags & KVM_ARM64_FP_ENABLED;
+}
+
 
 #endif /* __ARM64_KVM_HYP_H__ */
index 1360ddd4137bf221fa7d36a96e06ea0e6bd05dd5..cfda503c8b3ffba214f161817d5ba77765761afe 100644 (file)
@@ -129,15 +129,16 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
        local_irq_save(flags);
 
        if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
-               if (vcpu_has_sve(vcpu)) {
-                       __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
-
-                       /* Restore the VL that was saved when bound to the CPU */
-                       if (!has_vhe())
-                               sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
-                                                      SYS_ZCR_EL1);
-               }
-
+               /*
+                * Flush (save and invalidate) the fpsimd/sve state so that if
+                * the host tries to use fpsimd/sve, it's not using stale data
+                * from the guest.
+                *
+                * Flushing the state sets the TIF_FOREIGN_FPSTATE bit for the
+                * context unconditionally, in both nVHE and VHE. This allows
+                * the kernel to restore the fpsimd/sve state, including ZCR_EL1
+                * when needed.
+                */
                fpsimd_save_and_flush_cpu_state();
        }
 
index 435346ea1504e158f7877499514118d39400f247..d8c94c45cb2f2f815d0f5e9e58f9fd4e6eb572f2 100644 (file)
@@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
 alternative_else_nop_endif
        mrs     x1, isr_el1
        cbz     x1,  1f
+
+       // Ensure that __guest_enter() always provides a context
+       // synchronization event so that callers don't need ISBs for anything
+       // that would usually be synchonized by the ERET.
+       isb
        mov     x0, #ARM_EXCEPTION_IRQ
        ret
 
index cc102e46b0e2563b35e6a912d8bb8f23a753b9e3..797544662a955d67a9da1f2213abcdd66edc4ab2 100644 (file)
@@ -215,6 +215,61 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
        write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
 }
 
+static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
+{
+       u64 zcr_el1, zcr_el2;
+
+       if (!guest_owns_fp_regs(vcpu))
+               return;
+
+       if (vcpu_has_sve(vcpu)) {
+               zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
+
+               write_sysreg_el2(zcr_el2, SYS_ZCR);
+
+               zcr_el1 = __vcpu_sys_reg(vcpu, ZCR_EL1);
+               write_sysreg_el1(zcr_el1, SYS_ZCR);
+       }
+}
+
+static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
+{
+       u64 zcr_el1, zcr_el2;
+
+       if (!guest_owns_fp_regs(vcpu))
+               return;
+
+       /*
+        * When the guest owns the FP regs, we know that guest+hyp traps for
+        * any FPSIMD/SVE/SME features exposed to the guest have been disabled
+        * by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
+        * prior to __guest_entry(). As __guest_entry() guarantees a context
+        * synchronization event, we don't need an ISB here to avoid taking
+        * traps for anything that was exposed to the guest.
+        */
+       if (vcpu_has_sve(vcpu)) {
+               zcr_el1 = read_sysreg_el1(SYS_ZCR);
+               __vcpu_sys_reg(vcpu, ZCR_EL1) = zcr_el1;
+
+               /*
+                * The guest's state is always saved using the guest's max VL.
+                * Ensure that the host has the guest's max VL active such that
+                * the host can save the guest's state lazily, but don't
+                * artificially restrict the host to the guest's max VL.
+                */
+               if (has_vhe()) {
+                       zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
+                       write_sysreg_el2(zcr_el2, SYS_ZCR);
+               } else {
+                       zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
+                       write_sysreg_el2(zcr_el2, SYS_ZCR);
+
+                       zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
+                       write_sysreg_el1(zcr_el1, SYS_ZCR);
+               }
+       }
+}
+
 /* Check for an FPSIMD/SVE trap and handle as appropriate */
 static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 {
index 2da6aa8da8680d1e6c17415e0ee16d13fd26d3bc..a446883d5b9aafc4daf5ca6d2a9f92d600fe7256 100644 (file)
 
 DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
 
+unsigned int kvm_host_sve_max_vl;
+
 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
 
 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
 {
        DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
 
+       fpsimd_lazy_switch_to_guest(kern_hyp_va(vcpu));
        cpu_reg(host_ctxt, 1) =  __kvm_vcpu_run(kern_hyp_va(vcpu));
+       fpsimd_lazy_switch_to_host(kern_hyp_va(vcpu));
 }
 
 static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
@@ -237,11 +241,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
        case ESR_ELx_EC_SMC64:
                handle_host_smc(host_ctxt);
                break;
-       case ESR_ELx_EC_SVE:
-               sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
-               isb();
-               sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
-               break;
        case ESR_ELx_EC_IABT_LOW:
        case ESR_ELx_EC_DABT_LOW:
                handle_host_mem_abort(host_ctxt);
index c0885197f2a55a8cc692a404627a0a46c2cf9316..fff7491d8351f2216352dc29372ba6a2012b132b 100644 (file)
@@ -34,15 +34,13 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
 
-static bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.flags & KVM_ARM64_FP_ENABLED;
-}
-
 static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
 {
        u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
 
+       if (!guest_owns_fp_regs(vcpu))
+               __activate_traps_fpsimd32(vcpu);
+
        /* !hVHE case upstream */
        if (1) {
                val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
@@ -52,12 +50,22 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
 
                if (!guest_owns_fp_regs(vcpu))
                        val |= CPTR_EL2_TFP;
+
+               write_sysreg(val, cptr_el2);
        }
+}
 
-       if (!guest_owns_fp_regs(vcpu))
-               __activate_traps_fpsimd32(vcpu);
+static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
+{
+       /* !hVHE case upstream */
+       if (1) {
+               u64 val = CPTR_NVHE_EL2_RES1;
 
-       write_sysreg(val, cptr_el2);
+               if (!cpus_have_final_cap(ARM64_SVE))
+                       val |= CPTR_EL2_TZ;
+
+               write_sysreg(val, cptr_el2);
+       }
 }
 
 static void __activate_traps(struct kvm_vcpu *vcpu)
@@ -86,7 +94,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
 static void __deactivate_traps(struct kvm_vcpu *vcpu)
 {
        extern char __kvm_hyp_host_vector[];
-       u64 cptr;
 
        ___deactivate_traps(vcpu);
 
@@ -111,11 +118,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
 
        write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
 
-       cptr = CPTR_EL2_DEFAULT;
-       if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
-               cptr |= CPTR_EL2_TZ;
-
-       write_sysreg(cptr, cptr_el2);
+       __deactivate_cptr_traps(vcpu);
        write_sysreg(__kvm_hyp_host_vector, vbar_el2);
 }
 
index 813e6e2178c162f106839d4a239094a6aec145fc..d8a8628a9d70f551c38eb08b6e75e76bf5098dda 100644 (file)
@@ -114,6 +114,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 
        sysreg_save_host_state_vhe(host_ctxt);
 
+       fpsimd_lazy_switch_to_guest(vcpu);
+
        /*
         * ARM erratum 1165522 requires us to configure both stage 1 and
         * stage 2 translation for the guest context before we clear
@@ -144,6 +146,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 
        __deactivate_traps(vcpu);
 
+       fpsimd_lazy_switch_to_host(vcpu);
+
        sysreg_restore_host_state_vhe(host_ctxt);
 
        if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
index 5ce36b0a3343963924e28317acf6b865801dfa9f..deb2056382797b271f7a203da49cf88f0a0723de 100644 (file)
@@ -42,11 +42,14 @@ static u32 kvm_ipa_limit;
                                 PSR_AA32_I_BIT | PSR_AA32_F_BIT)
 
 unsigned int kvm_sve_max_vl;
+unsigned int kvm_host_sve_max_vl;
 
 int kvm_arm_init_sve(void)
 {
        if (system_supports_sve()) {
                kvm_sve_max_vl = sve_max_virtualisable_vl;
+               kvm_host_sve_max_vl = sve_max_vl;
+               kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
 
                /*
                 * The get_sve_reg()/set_sve_reg() ioctl interface will need