From 978c0ccf80ceaa764b5f7c6bbabf2c8ca9b20711 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 8 Apr 2025 19:10:01 +0100 Subject: [PATCH] arm64/fpsimd: Stop using TIF_SVE to manage register saving in KVM [ Upstream commit 62021cc36add7b2c015b837f7893f2fb4b8c2586 ] Now that we are explicitly telling the host FP code which register state it needs to save we can remove the manipulation of TIF_SVE from the KVM code, simplifying it and allowing us to optimise our handling of normal tasks. Remove the manipulation of TIF_SVE from KVM and instead rely on to_save to ensure we save the correct data for it. There should be no functional or performance impact from this change. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20221115094640.112848-5-broonie@kernel.org Signed-off-by: Will Deacon [ Mark: trivial backport ] Signed-off-by: Mark Rutland Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/fpsimd.c | 40 +++++++++++++++----------------------- arch/arm64/kvm/fpsimd.c | 3 --- 2 files changed, 16 insertions(+), 27 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 105b8aa0c0383..e8f10daaa0d7a 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -318,7 +318,13 @@ static void task_fpsimd_load(void) /* * Ensure FPSIMD/SVE storage in memory for the loaded context is up to - * date with respect to the CPU registers. + * date with respect to the CPU registers. Note carefully that the + * current context is the context last bound to the CPU stored in + * last, if KVM is involved this may be the guest VM context rather + * than the host thread for the VM pointed to by current. This means + * that we must always reference the state storage via last rather + * than via current, if we are saving KVM state then it will have + * ensured that the type of registers to save is set in last->to_save. */ static void fpsimd_save(void) { @@ -334,9 +340,15 @@ static void fpsimd_save(void) if (test_thread_flag(TIF_FOREIGN_FPSTATE)) return; - if (IS_ENABLED(CONFIG_ARM64_SVE) && - test_thread_flag(TIF_SVE)) { - if (WARN_ON(sve_get_vl() != last->sve_vl)) { + if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE)) || + last->to_save == FP_STATE_SVE) { + save_sve_regs = true; + vl = last->sve_vl; + } + + if (IS_ENABLED(CONFIG_ARM64_SVE) && save_sve_regs) { + /* Get the configured VL from RDVL, will account for SM */ + if (WARN_ON(sve_get_vl() != vl)) { /* * Can't save the user regs, so current would * re-enter user with corrupt state. @@ -347,26 +359,6 @@ static void fpsimd_save(void) } } - if (test_thread_flag(TIF_SVE)) { - save_sve_regs = true; - vl = last->sve_vl; - } - - /* - * Validate that an explicitly specified state to save is - * consistent with the task state. - */ - switch (last->to_save) { - case FP_STATE_CURRENT: - break; - case FP_STATE_FPSIMD: - WARN_ON_ONCE(save_sve_regs); - break; - case FP_STATE_SVE: - WARN_ON_ONCE(!save_sve_regs); - break; - } - if (IS_ENABLED(CONFIG_ARM64_SVE) && save_sve_regs) { sve_save_state((char *)last->sve_state + sve_ffr_offset(last->sve_vl), diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 54a31c97eb7aa..2e0f44f4c470a 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -110,7 +110,6 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) &vcpu->arch.fp_type, fp_type); clear_thread_flag(TIF_FOREIGN_FPSTATE); - update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); } } @@ -151,7 +150,5 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); } - update_thread_flag(TIF_SVE, 0); - local_irq_restore(flags); } -- 2.47.3