--- /dev/null
+From stable+bounces-128302-greg=kroah.com@vger.kernel.org Fri Apr 4 15:28:08 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 04 Apr 2025 14:23:36 +0100
+Subject: arm64/fpsimd: Have KVM explicitly say which FP registers to save
+To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Oliver Upton <oliver.upton@linux.dev>, Oleg Nesterov <oleg@redhat.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kvmarm@lists.cs.columbia.edu, Mark Brown <broonie@kernel.org>, stable@vger.kernel.org, Mark Rutland <mark.rutland@arm.com>
+Message-ID: <20250404-stable-sve-6-1-v1-3-cd5c9eb52d49@kernel.org>
+
+From: Mark Brown <broonie@kernel.org>
+
+[ Upstream commit deeb8f9a80fdae5a62525656d65c7070c28bd3a4 ]
+
+In order to avoid needlessly saving and restoring the guest registers KVM
+relies on the host FPSMID code to save the guest registers when we context
+switch away from the guest. This is done by binding the KVM guest state to
+the CPU on top of the task state that was originally there, then carefully
+managing the TIF_SVE flag for the task to cause the host to save the full
+SVE state when needed regardless of the needs of the host task. This works
+well enough but isn't terribly direct about what is going on and makes it
+much more complicated to try to optimise what we're doing with the SVE
+register state.
+
+Let's instead have KVM pass in the register state it wants saving when it
+binds to the CPU. We introduce a new FP_STATE_CURRENT for use
+during normal task binding to indicate that we should base our
+decisions on the current task. This should not be used when
+actually saving. Ideally we might want to use a separate enum for
+the type to save but this enum and the enum values would then
+need to be named which has problems with clarity and ambiguity.
+
+In order to ease any future debugging that might be required this patch
+does not actually update any of the decision making about what to save,
+it merely starts tracking the new information and warns if the requested
+state is not what we would otherwise have decided to save.
+
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20221115094640.112848-4-broonie@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+[ Mark: trivial backport ]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/fpsimd.h | 3 ++-
+ arch/arm64/include/asm/processor.h | 1 +
+ arch/arm64/kernel/fpsimd.c | 27 ++++++++++++++++++++++++---
+ arch/arm64/kvm/fpsimd.c | 9 ++++++++-
+ 4 files changed, 35 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/asm/fpsimd.h
++++ b/arch/arm64/include/asm/fpsimd.h
+@@ -61,7 +61,8 @@ extern void fpsimd_kvm_prepare(void);
+ extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
+ void *sve_state, unsigned int sve_vl,
+ void *za_state, unsigned int sme_vl,
+- u64 *svcr, enum fp_type *type);
++ u64 *svcr, enum fp_type *type,
++ enum fp_type to_save);
+
+ extern void fpsimd_flush_task_state(struct task_struct *target);
+ extern void fpsimd_save_and_flush_cpu_state(void);
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -123,6 +123,7 @@ enum vec_type {
+ };
+
+ enum fp_type {
++ FP_STATE_CURRENT, /* Save based on current task state. */
+ FP_STATE_FPSIMD,
+ FP_STATE_SVE,
+ };
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -126,6 +126,7 @@ struct fpsimd_last_state_struct {
+ unsigned int sve_vl;
+ unsigned int sme_vl;
+ enum fp_type *fp_type;
++ enum fp_type to_save;
+ };
+
+ static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
+@@ -356,7 +357,8 @@ void task_set_vl_onexec(struct task_stru
+ * but userspace is discouraged from relying on this.
+ *
+ * task->thread.sve_state does not need to be non-NULL, valid or any
+- * particular size: it must not be dereferenced.
++ * particular size: it must not be dereferenced and any data stored
++ * there should be considered stale and not referenced.
+ *
+ * * SVE state - FP_STATE_SVE:
+ *
+@@ -369,7 +371,9 @@ void task_set_vl_onexec(struct task_stru
+ * task->thread.uw.fpsimd_state should be ignored.
+ *
+ * task->thread.sve_state must point to a valid buffer at least
+- * sve_state_size(task) bytes in size.
++ * sve_state_size(task) bytes in size. The data stored in
++ * task->thread.uw.fpsimd_state.vregs should be considered stale
++ * and not referenced.
+ *
+ * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
+ * irrespective of whether TIF_SVE is clear or set, since these are
+@@ -459,6 +463,21 @@ static void fpsimd_save(void)
+ vl = last->sve_vl;
+ }
+
++ /*
++ * Validate that an explicitly specified state to save is
++ * consistent with the task state.
++ */
++ switch (last->to_save) {
++ case FP_STATE_CURRENT:
++ break;
++ case FP_STATE_FPSIMD:
++ WARN_ON_ONCE(save_sve_regs);
++ break;
++ case FP_STATE_SVE:
++ WARN_ON_ONCE(!save_sve_regs);
++ break;
++ }
++
+ if (system_supports_sme()) {
+ u64 *svcr = last->svcr;
+
+@@ -1709,6 +1728,7 @@ static void fpsimd_bind_task_to_cpu(void
+ last->sme_vl = task_get_sme_vl(current);
+ last->svcr = ¤t->thread.svcr;
+ last->fp_type = ¤t->thread.fp_type;
++ last->to_save = FP_STATE_CURRENT;
+ current->thread.fpsimd_cpu = smp_processor_id();
+
+ /*
+@@ -1733,7 +1753,7 @@ static void fpsimd_bind_task_to_cpu(void
+ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+ unsigned int sve_vl, void *za_state,
+ unsigned int sme_vl, u64 *svcr,
+- enum fp_type *type)
++ enum fp_type *type, enum fp_type to_save)
+ {
+ struct fpsimd_last_state_struct *last =
+ this_cpu_ptr(&fpsimd_last_state);
+@@ -1748,6 +1768,7 @@ void fpsimd_bind_state_to_cpu(struct use
+ last->sve_vl = sve_vl;
+ last->sme_vl = sme_vl;
+ last->fp_type = type;
++ last->to_save = to_save;
+ }
+
+ /*
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -130,9 +130,16 @@ void kvm_arch_vcpu_ctxflush_fp(struct kv
+ */
+ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
+ {
++ enum fp_type fp_type;
++
+ WARN_ON_ONCE(!irqs_disabled());
+
+ if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
++ if (vcpu_has_sve(vcpu))
++ fp_type = FP_STATE_SVE;
++ else
++ fp_type = FP_STATE_FPSIMD;
++
+ /*
+ * Currently we do not support SME guests so SVCR is
+ * always 0 and we just need a variable to point to.
+@@ -141,7 +148,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm
+ vcpu->arch.sve_state,
+ vcpu->arch.sve_max_vl,
+ NULL, 0, &vcpu->arch.svcr,
+- &vcpu->arch.fp_type);
++ &vcpu->arch.fp_type, fp_type);
+
+ clear_thread_flag(TIF_FOREIGN_FPSTATE);
+ update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
--- /dev/null
+From stable+bounces-128303-greg=kroah.com@vger.kernel.org Fri Apr 4 15:28:32 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 04 Apr 2025 14:23:37 +0100
+Subject: arm64/fpsimd: Stop using TIF_SVE to manage register saving in KVM
+To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Oliver Upton <oliver.upton@linux.dev>, Oleg Nesterov <oleg@redhat.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kvmarm@lists.cs.columbia.edu, Mark Brown <broonie@kernel.org>, stable@vger.kernel.org, Mark Rutland <mark.rutland@arm.com>
+Message-ID: <20250404-stable-sve-6-1-v1-4-cd5c9eb52d49@kernel.org>
+
+From: Mark Brown <broonie@kernel.org>
+
+[ Upstream commit 62021cc36add7b2c015b837f7893f2fb4b8c2586 ]
+
+Now that we are explicitly telling the host FP code which register state
+it needs to save we can remove the manipulation of TIF_SVE from the KVM
+code, simplifying it and allowing us to optimise our handling of normal
+tasks. Remove the manipulation of TIF_SVE from KVM and instead rely on
+to_save to ensure we save the correct data for it.
+
+There should be no functional or performance impact from this change.
+
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20221115094640.112848-5-broonie@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+[ Mark: trivial backport ]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/fpsimd.c | 22 ++++------------------
+ arch/arm64/kvm/fpsimd.c | 3 ---
+ 2 files changed, 4 insertions(+), 21 deletions(-)
+
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -439,8 +439,8 @@ static void task_fpsimd_load(void)
+ * last, if KVM is involved this may be the guest VM context rather
+ * than the host thread for the VM pointed to by current. This means
+ * that we must always reference the state storage via last rather
+- * than via current, other than the TIF_ flags which KVM will
+- * carefully maintain for us.
++ * than via current, if we are saving KVM state then it will have
++ * ensured that the type of registers to save is set in last->to_save.
+ */
+ static void fpsimd_save(void)
+ {
+@@ -457,27 +457,13 @@ static void fpsimd_save(void)
+ if (test_thread_flag(TIF_FOREIGN_FPSTATE))
+ return;
+
+- if (test_thread_flag(TIF_SVE)) {
++ if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE)) ||
++ last->to_save == FP_STATE_SVE) {
+ save_sve_regs = true;
+ save_ffr = true;
+ vl = last->sve_vl;
+ }
+
+- /*
+- * Validate that an explicitly specified state to save is
+- * consistent with the task state.
+- */
+- switch (last->to_save) {
+- case FP_STATE_CURRENT:
+- break;
+- case FP_STATE_FPSIMD:
+- WARN_ON_ONCE(save_sve_regs);
+- break;
+- case FP_STATE_SVE:
+- WARN_ON_ONCE(!save_sve_regs);
+- break;
+- }
+-
+ if (system_supports_sme()) {
+ u64 *svcr = last->svcr;
+
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -151,7 +151,6 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm
+ &vcpu->arch.fp_type, fp_type);
+
+ clear_thread_flag(TIF_FOREIGN_FPSTATE);
+- update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
+ }
+ }
+
+@@ -208,7 +207,5 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
+ sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
+ }
+
+- update_thread_flag(TIF_SVE, 0);
+-
+ local_irq_restore(flags);
+ }
--- /dev/null
+From stable+bounces-128301-greg=kroah.com@vger.kernel.org Fri Apr 4 15:28:10 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 04 Apr 2025 14:23:35 +0100
+Subject: arm64/fpsimd: Track the saved FPSIMD state type separately to TIF_SVE
+To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Oliver Upton <oliver.upton@linux.dev>, Oleg Nesterov <oleg@redhat.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kvmarm@lists.cs.columbia.edu, Mark Brown <broonie@kernel.org>, stable@vger.kernel.org, Mark Rutland <mark.rutland@arm.com>
+Message-ID: <20250404-stable-sve-6-1-v1-2-cd5c9eb52d49@kernel.org>
+
+From: Mark Brown <broonie@kernel.org>
+
+[ Upstream commit baa8515281b30861cff3da7db70662d2a25c6440 ]
+
+When we save the state for the floating point registers this can be done
+in the form visible through either the FPSIMD V registers or the SVE Z and
+P registers. At present we track which format is currently used based on
+TIF_SVE and the SME streaming mode state but particularly in the SVE case
+this limits our options for optimising things, especially around syscalls.
+Introduce a new enum which we place together with saved floating point
+state in both thread_struct and the KVM guest state which explicitly
+states which format is active and keep it up to date when we change it.
+
+At present we do not use this state except to verify that it has the
+expected value when loading the state, future patches will introduce
+functional changes.
+
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20221115094640.112848-3-broonie@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+[ Mark: fix conflicts due to earlier backports ]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/fpsimd.h | 2 -
+ arch/arm64/include/asm/kvm_host.h | 12 +++++++
+ arch/arm64/include/asm/processor.h | 6 +++
+ arch/arm64/kernel/fpsimd.c | 58 +++++++++++++++++++++++++++----------
+ arch/arm64/kernel/process.c | 2 +
+ arch/arm64/kernel/ptrace.c | 3 +
+ arch/arm64/kernel/signal.c | 7 +++-
+ arch/arm64/kvm/fpsimd.c | 3 +
+ 8 files changed, 74 insertions(+), 19 deletions(-)
+
+--- a/arch/arm64/include/asm/fpsimd.h
++++ b/arch/arm64/include/asm/fpsimd.h
+@@ -61,7 +61,7 @@ extern void fpsimd_kvm_prepare(void);
+ extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
+ void *sve_state, unsigned int sve_vl,
+ void *za_state, unsigned int sme_vl,
+- u64 *svcr);
++ u64 *svcr, enum fp_type *type);
+
+ extern void fpsimd_flush_task_state(struct task_struct *target);
+ extern void fpsimd_save_and_flush_cpu_state(void);
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -309,8 +309,18 @@ struct vcpu_reset_state {
+ struct kvm_vcpu_arch {
+ struct kvm_cpu_context ctxt;
+
+- /* Guest floating point state */
++ /*
++ * Guest floating point state
++ *
++ * The architecture has two main floating point extensions,
++ * the original FPSIMD and SVE. These have overlapping
++ * register views, with the FPSIMD V registers occupying the
++ * low 128 bits of the SVE Z registers. When the core
++ * floating point code saves the register state of a task it
++ * records which view it saved in fp_type.
++ */
+ void *sve_state;
++ enum fp_type fp_type;
+ unsigned int sve_max_vl;
+ u64 svcr;
+
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -122,6 +122,11 @@ enum vec_type {
+ ARM64_VEC_MAX,
+ };
+
++enum fp_type {
++ FP_STATE_FPSIMD,
++ FP_STATE_SVE,
++};
++
+ struct cpu_context {
+ unsigned long x19;
+ unsigned long x20;
+@@ -152,6 +157,7 @@ struct thread_struct {
+ struct user_fpsimd_state fpsimd_state;
+ } uw;
+
++ enum fp_type fp_type; /* registers FPSIMD or SVE? */
+ unsigned int fpsimd_cpu;
+ void *sve_state; /* SVE registers, if any */
+ void *za_state; /* ZA register, if any */
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -125,6 +125,7 @@ struct fpsimd_last_state_struct {
+ u64 *svcr;
+ unsigned int sve_vl;
+ unsigned int sme_vl;
++ enum fp_type *fp_type;
+ };
+
+ static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
+@@ -330,15 +331,6 @@ void task_set_vl_onexec(struct task_stru
+ * The task can execute SVE instructions while in userspace without
+ * trapping to the kernel.
+ *
+- * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
+- * corresponding Zn), P0-P15 and FFR are encoded in
+- * task->thread.sve_state, formatted appropriately for vector
+- * length task->thread.sve_vl or, if SVCR.SM is set,
+- * task->thread.sme_vl.
+- *
+- * task->thread.sve_state must point to a valid buffer at least
+- * sve_state_size(task) bytes in size.
+- *
+ * During any syscall, the kernel may optionally clear TIF_SVE and
+ * discard the vector state except for the FPSIMD subset.
+ *
+@@ -348,7 +340,15 @@ void task_set_vl_onexec(struct task_stru
+ * do_sve_acc() to be called, which does some preparation and then
+ * sets TIF_SVE.
+ *
+- * When stored, FPSIMD registers V0-V31 are encoded in
++ * During any syscall, the kernel may optionally clear TIF_SVE and
++ * discard the vector state except for the FPSIMD subset.
++ *
++ * The data will be stored in one of two formats:
++ *
++ * * FPSIMD only - FP_STATE_FPSIMD:
++ *
++ * When the FPSIMD only state stored task->thread.fp_type is set to
++ * FP_STATE_FPSIMD, the FPSIMD registers V0-V31 are encoded in
+ * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
+ * logically zero but not stored anywhere; P0-P15 and FFR are not
+ * stored and have unspecified values from userspace's point of
+@@ -358,6 +358,19 @@ void task_set_vl_onexec(struct task_stru
+ * task->thread.sve_state does not need to be non-NULL, valid or any
+ * particular size: it must not be dereferenced.
+ *
++ * * SVE state - FP_STATE_SVE:
++ *
++ * When the full SVE state is stored task->thread.fp_type is set to
++ * FP_STATE_SVE and Z0-Z31 (incorporating Vn in bits[127:0] or the
++ * corresponding Zn), P0-P15 and FFR are encoded in in
++ * task->thread.sve_state, formatted appropriately for vector
++ * length task->thread.sve_vl or, if SVCR.SM is set,
++ * task->thread.sme_vl. The storage for the vector registers in
++ * task->thread.uw.fpsimd_state should be ignored.
++ *
++ * task->thread.sve_state must point to a valid buffer at least
++ * sve_state_size(task) bytes in size.
++ *
+ * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
+ * irrespective of whether TIF_SVE is clear or set, since these are
+ * not vector length dependent.
+@@ -404,12 +417,15 @@ static void task_fpsimd_load(void)
+ }
+ }
+
+- if (restore_sve_regs)
++ if (restore_sve_regs) {
++ WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE);
+ sve_load_state(sve_pffr(¤t->thread),
+ ¤t->thread.uw.fpsimd_state.fpsr,
+ restore_ffr);
+- else
++ } else {
++ WARN_ON_ONCE(current->thread.fp_type != FP_STATE_FPSIMD);
+ fpsimd_load_state(¤t->thread.uw.fpsimd_state);
++ }
+ }
+
+ /*
+@@ -474,8 +490,10 @@ static void fpsimd_save(void)
+ sve_save_state((char *)last->sve_state +
+ sve_ffr_offset(vl),
+ &last->st->fpsr, save_ffr);
++ *last->fp_type = FP_STATE_SVE;
+ } else {
+ fpsimd_save_state(last->st);
++ *last->fp_type = FP_STATE_FPSIMD;
+ }
+ }
+
+@@ -851,8 +869,10 @@ int vec_set_vector_length(struct task_st
+
+ fpsimd_flush_task_state(task);
+ if (test_and_clear_tsk_thread_flag(task, TIF_SVE) ||
+- thread_sm_enabled(&task->thread))
++ thread_sm_enabled(&task->thread)) {
+ sve_to_fpsimd(task);
++ task->thread.fp_type = FP_STATE_FPSIMD;
++ }
+
+ if (system_supports_sme()) {
+ if (type == ARM64_VEC_SME ||
+@@ -1383,6 +1403,7 @@ static void sve_init_regs(void)
+ fpsimd_bind_task_to_cpu();
+ } else {
+ fpsimd_to_sve(current);
++ current->thread.fp_type = FP_STATE_SVE;
+ fpsimd_flush_task_state(current);
+ }
+ }
+@@ -1612,6 +1633,8 @@ void fpsimd_flush_thread(void)
+ current->thread.svcr = 0;
+ }
+
++ current->thread.fp_type = FP_STATE_FPSIMD;
++
+ put_cpu_fpsimd_context();
+ kfree(sve_state);
+ kfree(za_state);
+@@ -1660,8 +1683,10 @@ void fpsimd_kvm_prepare(void)
+ */
+ get_cpu_fpsimd_context();
+
+- if (test_and_clear_thread_flag(TIF_SVE))
++ if (test_and_clear_thread_flag(TIF_SVE)) {
+ sve_to_fpsimd(current);
++ current->thread.fp_type = FP_STATE_FPSIMD;
++ }
+
+ put_cpu_fpsimd_context();
+ }
+@@ -1683,6 +1708,7 @@ static void fpsimd_bind_task_to_cpu(void
+ last->sve_vl = task_get_sve_vl(current);
+ last->sme_vl = task_get_sme_vl(current);
+ last->svcr = ¤t->thread.svcr;
++ last->fp_type = ¤t->thread.fp_type;
+ current->thread.fpsimd_cpu = smp_processor_id();
+
+ /*
+@@ -1706,7 +1732,8 @@ static void fpsimd_bind_task_to_cpu(void
+
+ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+ unsigned int sve_vl, void *za_state,
+- unsigned int sme_vl, u64 *svcr)
++ unsigned int sme_vl, u64 *svcr,
++ enum fp_type *type)
+ {
+ struct fpsimd_last_state_struct *last =
+ this_cpu_ptr(&fpsimd_last_state);
+@@ -1720,6 +1747,7 @@ void fpsimd_bind_state_to_cpu(struct use
+ last->za_state = za_state;
+ last->sve_vl = sve_vl;
+ last->sme_vl = sme_vl;
++ last->fp_type = type;
+ }
+
+ /*
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -331,6 +331,8 @@ int arch_dup_task_struct(struct task_str
+ clear_tsk_thread_flag(dst, TIF_SME);
+ }
+
++ dst->thread.fp_type = FP_STATE_FPSIMD;
++
+ /* clear any pending asynchronous tag fault raised by the parent */
+ clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
+
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -917,6 +917,7 @@ static int sve_set_common(struct task_st
+ clear_tsk_thread_flag(target, TIF_SVE);
+ if (type == ARM64_VEC_SME)
+ fpsimd_force_sync_to_sve(target);
++ target->thread.fp_type = FP_STATE_FPSIMD;
+ goto out;
+ }
+
+@@ -939,6 +940,7 @@ static int sve_set_common(struct task_st
+ if (!target->thread.sve_state) {
+ ret = -ENOMEM;
+ clear_tsk_thread_flag(target, TIF_SVE);
++ target->thread.fp_type = FP_STATE_FPSIMD;
+ goto out;
+ }
+
+@@ -952,6 +954,7 @@ static int sve_set_common(struct task_st
+ fpsimd_sync_to_sve(target);
+ if (type == ARM64_VEC_SVE)
+ set_tsk_thread_flag(target, TIF_SVE);
++ target->thread.fp_type = FP_STATE_SVE;
+
+ BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
+ start = SVE_PT_SVE_OFFSET;
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -207,6 +207,7 @@ static int restore_fpsimd_context(struct
+ __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
+
+ clear_thread_flag(TIF_SVE);
++ current->thread.fp_type = FP_STATE_FPSIMD;
+
+ /* load the hardware registers from the fpsimd_state structure */
+ if (!err)
+@@ -297,6 +298,7 @@ static int restore_sve_fpsimd_context(st
+ if (sve.head.size <= sizeof(*user->sve)) {
+ clear_thread_flag(TIF_SVE);
+ current->thread.svcr &= ~SVCR_SM_MASK;
++ current->thread.fp_type = FP_STATE_FPSIMD;
+ goto fpsimd_only;
+ }
+
+@@ -332,6 +334,7 @@ static int restore_sve_fpsimd_context(st
+ current->thread.svcr |= SVCR_SM_MASK;
+ else
+ set_thread_flag(TIF_SVE);
++ current->thread.fp_type = FP_STATE_SVE;
+
+ fpsimd_only:
+ /* copy the FP and status/control registers */
+@@ -937,9 +940,11 @@ static void setup_return(struct pt_regs
+ * FPSIMD register state - flush the saved FPSIMD
+ * register state in case it gets loaded.
+ */
+- if (current->thread.svcr & SVCR_SM_MASK)
++ if (current->thread.svcr & SVCR_SM_MASK) {
+ memset(¤t->thread.uw.fpsimd_state, 0,
+ sizeof(current->thread.uw.fpsimd_state));
++ current->thread.fp_type = FP_STATE_FPSIMD;
++ }
+
+ current->thread.svcr &= ~(SVCR_ZA_MASK |
+ SVCR_SM_MASK);
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -140,7 +140,8 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm
+ fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
+ vcpu->arch.sve_state,
+ vcpu->arch.sve_max_vl,
+- NULL, 0, &vcpu->arch.svcr);
++ NULL, 0, &vcpu->arch.svcr,
++ &vcpu->arch.fp_type);
+
+ clear_thread_flag(TIF_FOREIGN_FPSTATE);
+ update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
--- /dev/null
+From broonie@kernel.org Fri Apr 4 15:28:05 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 04 Apr 2025 14:23:44 +0100
+Subject: KVM: arm64: Calculate cptr_el2 traps on activating traps
+To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Oliver Upton <oliver.upton@linux.dev>, Oleg Nesterov <oleg@redhat.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kvmarm@lists.cs.columbia.edu, Mark Brown <broonie@kernel.org>, stable@vger.kernel.org, Fuad Tabba <tabba@google.com>, James Clark <james.clark@linaro.org>
+Message-ID: <20250404-stable-sve-6-1-v1-11-cd5c9eb52d49@kernel.org>
+
+From: Fuad Tabba <tabba@google.com>
+
+[ Upstream commit 2fd5b4b0e7b440602455b79977bfa64dea101e6c ]
+
+Similar to VHE, calculate the value of cptr_el2 from scratch on
+activate traps. This removes the need to store cptr_el2 in every
+vcpu structure. Moreover, some traps, such as whether the guest
+owns the fp registers, need to be set on every vcpu run.
+
+Reported-by: James Clark <james.clark@linaro.org>
+Fixes: 5294afdbf45a ("KVM: arm64: Exclude FP ownership from kvm_vcpu_arch")
+Signed-off-by: Fuad Tabba <tabba@google.com>
+Link: https://lore.kernel.org/r/20241216105057.579031-13-tabba@google.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h | 1 -
+ arch/arm64/kvm/arm.c | 1 -
+ arch/arm64/kvm/hyp/nvhe/pkvm.c | 15 ---------------
+ arch/arm64/kvm/hyp/nvhe/switch.c | 38 +++++++++++++++++++++++++++-----------
+ 4 files changed, 27 insertions(+), 28 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -330,7 +330,6 @@ struct kvm_vcpu_arch {
+ /* Values of trap registers for the guest. */
+ u64 hcr_el2;
+ u64 mdcr_el2;
+- u64 cptr_el2;
+
+ /* Values of trap registers for the host before guest entry. */
+ u64 mdcr_el2_host;
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1234,7 +1234,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init
+ }
+
+ vcpu_reset_hcr(vcpu);
+- vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT;
+
+ /*
+ * Handle the "start in power-off" case.
+--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
++++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
+@@ -17,7 +17,6 @@ static void pvm_init_traps_aa64pfr0(stru
+ const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
+ u64 hcr_set = HCR_RW;
+ u64 hcr_clear = 0;
+- u64 cptr_set = 0;
+
+ /* Protected KVM does not support AArch32 guests. */
+ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
+@@ -44,16 +43,10 @@ static void pvm_init_traps_aa64pfr0(stru
+ /* Trap AMU */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
+ hcr_clear |= HCR_AMVOFFEN;
+- cptr_set |= CPTR_EL2_TAM;
+ }
+
+- /* Trap SVE */
+- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
+- cptr_set |= CPTR_EL2_TZ;
+-
+ vcpu->arch.hcr_el2 |= hcr_set;
+ vcpu->arch.hcr_el2 &= ~hcr_clear;
+- vcpu->arch.cptr_el2 |= cptr_set;
+ }
+
+ /*
+@@ -83,7 +76,6 @@ static void pvm_init_traps_aa64dfr0(stru
+ const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
+ u64 mdcr_set = 0;
+ u64 mdcr_clear = 0;
+- u64 cptr_set = 0;
+
+ /* Trap/constrain PMU */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
+@@ -110,13 +102,8 @@ static void pvm_init_traps_aa64dfr0(stru
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
+ mdcr_set |= MDCR_EL2_TTRF;
+
+- /* Trap Trace */
+- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
+- cptr_set |= CPTR_EL2_TTA;
+-
+ vcpu->arch.mdcr_el2 |= mdcr_set;
+ vcpu->arch.mdcr_el2 &= ~mdcr_clear;
+- vcpu->arch.cptr_el2 |= cptr_set;
+ }
+
+ /*
+@@ -167,8 +154,6 @@ static void pvm_init_trap_regs(struct kv
+ /* Clear res0 and set res1 bits to trap potential new features. */
+ vcpu->arch.hcr_el2 &= ~(HCR_RES0);
+ vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
+- vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
+- vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
+ }
+
+ /*
+--- a/arch/arm64/kvm/hyp/nvhe/switch.c
++++ b/arch/arm64/kvm/hyp/nvhe/switch.c
+@@ -36,23 +36,39 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_ve
+
+ extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
+
+-static void __activate_traps(struct kvm_vcpu *vcpu)
++static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
+ {
+- u64 val;
++ u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
+
+- ___activate_traps(vcpu);
+- __activate_traps_common(vcpu);
++ /* !hVHE case upstream */
++ if (1) {
++ val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
++
++ /*
++ * Always trap SME since it's not supported in KVM.
++ * TSM is RES1 if SME isn't implemented.
++ */
++ val |= CPTR_EL2_TSM;
+
+- val = vcpu->arch.cptr_el2;
+- val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
+- if (!guest_owns_fp_regs(vcpu)) {
+- val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
+- __activate_traps_fpsimd32(vcpu);
++ if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs(vcpu))
++ val |= CPTR_EL2_TZ;
++
++ if (!guest_owns_fp_regs(vcpu))
++ val |= CPTR_EL2_TFP;
+ }
+- if (cpus_have_final_cap(ARM64_SME))
+- val |= CPTR_EL2_TSM;
++
++ if (!guest_owns_fp_regs(vcpu))
++ __activate_traps_fpsimd32(vcpu);
+
+ write_sysreg(val, cptr_el2);
++}
++
++static void __activate_traps(struct kvm_vcpu *vcpu)
++{
++ ___activate_traps(vcpu);
++ __activate_traps_common(vcpu);
++ __activate_cptr_traps(vcpu);
++
+ write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
+
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
--- /dev/null
+From stable+bounces-128300-greg=kroah.com@vger.kernel.org Fri Apr 4 15:27:49 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 04 Apr 2025 14:23:34 +0100
+Subject: KVM: arm64: Discard any SVE state when entering KVM guests
+To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Oliver Upton <oliver.upton@linux.dev>, Oleg Nesterov <oleg@redhat.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kvmarm@lists.cs.columbia.edu, Mark Brown <broonie@kernel.org>, stable@vger.kernel.org, Mark Rutland <mark.rutland@arm.com>
+Message-ID: <20250404-stable-sve-6-1-v1-1-cd5c9eb52d49@kernel.org>
+
+From: Mark Brown <broonie@kernel.org>
+
+[ Upstream commit 93ae6b01bafee8fa385aa25ee7ebdb40057f6abe ]
+
+Since 8383741ab2e773a99 (KVM: arm64: Get rid of host SVE tracking/saving)
+KVM has not tracked the host SVE state, relying on the fact that we
+currently disable SVE whenever we perform a syscall. This may not be true
+in future since performance optimisation may result in us keeping SVE
+enabled in order to avoid needing to take access traps to reenable it.
+Handle this by clearing TIF_SVE and converting the stored task state to
+FPSIMD format when preparing to run the guest. This is done with a new
+call fpsimd_kvm_prepare() to keep the direct state manipulation
+functions internal to fpsimd.c.
+
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20221115094640.112848-2-broonie@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+[ Mark: trivial backport to v6.1 ]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/fpsimd.h | 1 +
+ arch/arm64/kernel/fpsimd.c | 23 +++++++++++++++++++++++
+ arch/arm64/kvm/fpsimd.c | 3 ++-
+ 3 files changed, 26 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/fpsimd.h
++++ b/arch/arm64/include/asm/fpsimd.h
+@@ -56,6 +56,7 @@ extern void fpsimd_signal_preserve_curre
+ extern void fpsimd_preserve_current_state(void);
+ extern void fpsimd_restore_current_state(void);
+ extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
++extern void fpsimd_kvm_prepare(void);
+
+ extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
+ void *sve_state, unsigned int sve_vl,
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1644,6 +1644,29 @@ void fpsimd_signal_preserve_current_stat
+ }
+
+ /*
++ * Called by KVM when entering the guest.
++ */
++void fpsimd_kvm_prepare(void)
++{
++ if (!system_supports_sve())
++ return;
++
++ /*
++ * KVM does not save host SVE state since we can only enter
++ * the guest from a syscall so the ABI means that only the
++ * non-saved SVE state needs to be saved. If we have left
++ * SVE enabled for performance reasons then update the task
++ * state to be FPSIMD only.
++ */
++ get_cpu_fpsimd_context();
++
++ if (test_and_clear_thread_flag(TIF_SVE))
++ sve_to_fpsimd(current);
++
++ put_cpu_fpsimd_context();
++}
++
++/*
+ * Associate current's FPSIMD context with this cpu
+ * The caller must have ownership of the cpu FPSIMD context before calling
+ * this function.
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -75,11 +75,12 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_
+ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
+ {
+ BUG_ON(!current->mm);
+- BUG_ON(test_thread_flag(TIF_SVE));
+
+ if (!system_supports_fpsimd())
+ return;
+
++ fpsimd_kvm_prepare();
++
+ vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
+
+ vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
--- /dev/null
+From broonie@kernel.org Fri Apr 4 15:28:09 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 04 Apr 2025 14:23:45 +0100
+Subject: KVM: arm64: Eagerly switch ZCR_EL{1,2}
+To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Oliver Upton <oliver.upton@linux.dev>, Oleg Nesterov <oleg@redhat.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kvmarm@lists.cs.columbia.edu, Mark Brown <broonie@kernel.org>, stable@vger.kernel.org, Mark Rutland <mark.rutland@arm.com>, Fuad Tabba <tabba@google.com>
+Message-ID: <20250404-stable-sve-6-1-v1-12-cd5c9eb52d49@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 59419f10045bc955d2229819c7cf7a8b0b9c5b59 ]
+
+In non-protected KVM modes, while the guest FPSIMD/SVE/SME state is live on the
+CPU, the host's active SVE VL may differ from the guest's maximum SVE VL:
+
+* For VHE hosts, when a VM uses NV, ZCR_EL2 contains a value constrained
+ by the guest hypervisor, which may be less than or equal to that
+ guest's maximum VL.
+
+ Note: in this case the value of ZCR_EL1 is immaterial due to E2H.
+
+* For nVHE/hVHE hosts, ZCR_EL1 contains a value written by the guest,
+ which may be less than or greater than the guest's maximum VL.
+
+ Note: in this case hyp code traps host SVE usage and lazily restores
+ ZCR_EL2 to the host's maximum VL, which may be greater than the
+ guest's maximum VL.
+
+This can be the case between exiting a guest and kvm_arch_vcpu_put_fp().
+If a softirq is taken during this period and the softirq handler tries
+to use kernel-mode NEON, then the kernel will fail to save the guest's
+FPSIMD/SVE state, and will pend a SIGKILL for the current thread.
+
+This happens because kvm_arch_vcpu_ctxsync_fp() binds the guest's live
+FPSIMD/SVE state with the guest's maximum SVE VL, and
+fpsimd_save_user_state() verifies that the live SVE VL is as expected
+before attempting to save the register state:
+
+| if (WARN_ON(sve_get_vl() != vl)) {
+| force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
+| return;
+| }
+
+Fix this and make this a bit easier to reason about by always eagerly
+switching ZCR_EL{1,2} at hyp during guest<->host transitions. With this
+happening, there's no need to trap host SVE usage, and the nVHE/nVHE
+__deactivate_cptr_traps() logic can be simplified to enable host access
+to all present FPSIMD/SVE/SME features.
+
+In protected nVHE/hVHE modes, the host's state is always saved/restored
+by hyp, and the guest's state is saved prior to exit to the host, so
+from the host's PoV the guest never has live FPSIMD/SVE/SME state, and
+the host's ZCR_EL1 is never clobbered by hyp.
+
+Fixes: 8c8010d69c132273 ("KVM: arm64: Save/restore SVE state for nVHE")
+Fixes: 2e3cf82063a00ea0 ("KVM: arm64: nv: Ensure correct VL is loaded before saving SVE state")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Cc: Will Deacon <will@kernel.org>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-9-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+[ v6.6 lacks pKVM saving of host SVE state, pull in discovery of maximum
+ host VL separately -- broonie ]
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h | 1
+ arch/arm64/include/asm/kvm_hyp.h | 1
+ arch/arm64/kvm/fpsimd.c | 19 +++++------
+ arch/arm64/kvm/hyp/entry.S | 5 ++
+ arch/arm64/kvm/hyp/include/hyp/switch.h | 55 ++++++++++++++++++++++++++++++++
+ arch/arm64/kvm/hyp/nvhe/hyp-main.c | 8 +---
+ arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 +
+ arch/arm64/kvm/hyp/nvhe/switch.c | 30 +++++++++++------
+ arch/arm64/kvm/hyp/vhe/switch.c | 4 ++
+ arch/arm64/kvm/reset.c | 3 +
+ 10 files changed, 103 insertions(+), 25 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -67,6 +67,7 @@ enum kvm_mode kvm_get_mode(void);
+ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
+
+ extern unsigned int kvm_sve_max_vl;
++extern unsigned int kvm_host_sve_max_vl;
+ int kvm_arm_init_sve(void);
+
+ u32 __attribute_const__ kvm_target_cpu(void);
+--- a/arch/arm64/include/asm/kvm_hyp.h
++++ b/arch/arm64/include/asm/kvm_hyp.h
+@@ -122,5 +122,6 @@ extern u64 kvm_nvhe_sym(id_aa64isar2_el1
+ extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
+ extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
+ extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
++extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
+
+ #endif /* __ARM64_KVM_HYP_H__ */
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -148,15 +148,16 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
+ local_irq_save(flags);
+
+ if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
+- if (vcpu_has_sve(vcpu)) {
+- __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
+-
+- /* Restore the VL that was saved when bound to the CPU */
+- if (!has_vhe())
+- sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
+- SYS_ZCR_EL1);
+- }
+-
++ /*
++ * Flush (save and invalidate) the fpsimd/sve state so that if
++ * the host tries to use fpsimd/sve, it's not using stale data
++ * from the guest.
++ *
++ * Flushing the state sets the TIF_FOREIGN_FPSTATE bit for the
++ * context unconditionally, in both nVHE and VHE. This allows
++ * the kernel to restore the fpsimd/sve state, including ZCR_EL1
++ * when needed.
++ */
+ fpsimd_save_and_flush_cpu_state();
+ }
+
+--- a/arch/arm64/kvm/hyp/entry.S
++++ b/arch/arm64/kvm/hyp/entry.S
+@@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
+ alternative_else_nop_endif
+ mrs x1, isr_el1
+ cbz x1, 1f
++
++ // Ensure that __guest_enter() always provides a context
++ // synchronization event so that callers don't need ISBs for anything
++ // that would usually be synchonized by the ERET.
++ isb
+ mov x0, #ARM_EXCEPTION_IRQ
+ ret
+
+--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
++++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
+@@ -167,6 +167,61 @@ static inline void __hyp_sve_restore_gue
+ write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
+ }
+
++static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
++{
++ u64 zcr_el1, zcr_el2;
++
++ if (!guest_owns_fp_regs(vcpu))
++ return;
++
++ if (vcpu_has_sve(vcpu)) {
++ zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
++
++ write_sysreg_el2(zcr_el2, SYS_ZCR);
++
++ zcr_el1 = __vcpu_sys_reg(vcpu, ZCR_EL1);
++ write_sysreg_el1(zcr_el1, SYS_ZCR);
++ }
++}
++
++static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
++{
++ u64 zcr_el1, zcr_el2;
++
++ if (!guest_owns_fp_regs(vcpu))
++ return;
++
++ /*
++ * When the guest owns the FP regs, we know that guest+hyp traps for
++ * any FPSIMD/SVE/SME features exposed to the guest have been disabled
++ * by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
++ * prior to __guest_entry(). As __guest_entry() guarantees a context
++ * synchronization event, we don't need an ISB here to avoid taking
++ * traps for anything that was exposed to the guest.
++ */
++ if (vcpu_has_sve(vcpu)) {
++ zcr_el1 = read_sysreg_el1(SYS_ZCR);
++ __vcpu_sys_reg(vcpu, ZCR_EL1) = zcr_el1;
++
++ /*
++ * The guest's state is always saved using the guest's max VL.
++ * Ensure that the host has the guest's max VL active such that
++ * the host can save the guest's state lazily, but don't
++ * artificially restrict the host to the guest's max VL.
++ */
++ if (has_vhe()) {
++ zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
++ write_sysreg_el2(zcr_el2, SYS_ZCR);
++ } else {
++ zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
++ write_sysreg_el2(zcr_el2, SYS_ZCR);
++
++ zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
++ write_sysreg_el1(zcr_el1, SYS_ZCR);
++ }
++ }
++}
++
+ /*
+ * We trap the first access to the FP/SIMD to save the host context and
+ * restore the guest context lazily.
+--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
++++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+@@ -5,6 +5,7 @@
+ */
+
+ #include <hyp/adjust_pc.h>
++#include <hyp/switch.h>
+
+ #include <asm/pgtable-types.h>
+ #include <asm/kvm_asm.h>
+@@ -25,7 +26,9 @@ static void handle___kvm_vcpu_run(struct
+ {
+ DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
+
++ fpsimd_lazy_switch_to_guest(kern_hyp_va(vcpu));
+ cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
++ fpsimd_lazy_switch_to_host(kern_hyp_va(vcpu));
+ }
+
+ static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
+@@ -285,11 +288,6 @@ void handle_trap(struct kvm_cpu_context
+ case ESR_ELx_EC_SMC64:
+ handle_host_smc(host_ctxt);
+ break;
+- case ESR_ELx_EC_SVE:
+- sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
+- isb();
+- sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+- break;
+ case ESR_ELx_EC_IABT_LOW:
+ case ESR_ELx_EC_DABT_LOW:
+ handle_host_mem_abort(host_ctxt);
+--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
++++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
+@@ -9,6 +9,8 @@
+ #include <nvhe/fixed_config.h>
+ #include <nvhe/trap_handler.h>
+
++unsigned int kvm_host_sve_max_vl;
++
+ /*
+ * Set trap register values based on features in ID_AA64PFR0.
+ */
+--- a/arch/arm64/kvm/hyp/nvhe/switch.c
++++ b/arch/arm64/kvm/hyp/nvhe/switch.c
+@@ -40,6 +40,9 @@ static void __activate_cptr_traps(struct
+ {
+ u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
+
++ if (!guest_owns_fp_regs(vcpu))
++ __activate_traps_fpsimd32(vcpu);
++
+ /* !hVHE case upstream */
+ if (1) {
+ val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
+@@ -55,12 +58,24 @@ static void __activate_cptr_traps(struct
+
+ if (!guest_owns_fp_regs(vcpu))
+ val |= CPTR_EL2_TFP;
++
++ write_sysreg(val, cptr_el2);
+ }
++}
+
+- if (!guest_owns_fp_regs(vcpu))
+- __activate_traps_fpsimd32(vcpu);
++static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
++{
++ /* !hVHE case upstream */
++ if (1) {
++ u64 val = CPTR_NVHE_EL2_RES1;
+
+- write_sysreg(val, cptr_el2);
++ if (!cpus_have_final_cap(ARM64_SVE))
++ val |= CPTR_EL2_TZ;
++ if (!cpus_have_final_cap(ARM64_SME))
++ val |= CPTR_EL2_TSM;
++
++ write_sysreg(val, cptr_el2);
++ }
+ }
+
+ static void __activate_traps(struct kvm_vcpu *vcpu)
+@@ -89,7 +104,6 @@ static void __activate_traps(struct kvm_
+ static void __deactivate_traps(struct kvm_vcpu *vcpu)
+ {
+ extern char __kvm_hyp_host_vector[];
+- u64 cptr;
+
+ ___deactivate_traps(vcpu);
+
+@@ -114,13 +128,7 @@ static void __deactivate_traps(struct kv
+
+ write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
+
+- cptr = CPTR_EL2_DEFAULT;
+- if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
+- cptr |= CPTR_EL2_TZ;
+- if (cpus_have_final_cap(ARM64_SME))
+- cptr &= ~CPTR_EL2_TSM;
+-
+- write_sysreg(cptr, cptr_el2);
++ __deactivate_cptr_traps(vcpu);
+ write_sysreg(__kvm_hyp_host_vector, vbar_el2);
+ }
+
+--- a/arch/arm64/kvm/hyp/vhe/switch.c
++++ b/arch/arm64/kvm/hyp/vhe/switch.c
+@@ -134,6 +134,8 @@ static int __kvm_vcpu_run_vhe(struct kvm
+
+ sysreg_save_host_state_vhe(host_ctxt);
+
++ fpsimd_lazy_switch_to_guest(vcpu);
++
+ /*
+ * ARM erratum 1165522 requires us to configure both stage 1 and
+ * stage 2 translation for the guest context before we clear
+@@ -164,6 +166,8 @@ static int __kvm_vcpu_run_vhe(struct kvm
+
+ __deactivate_traps(vcpu);
+
++ fpsimd_lazy_switch_to_host(vcpu);
++
+ sysreg_restore_host_state_vhe(host_ctxt);
+
+ if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -42,11 +42,14 @@ static u32 kvm_ipa_limit;
+ PSR_AA32_I_BIT | PSR_AA32_F_BIT)
+
+ unsigned int kvm_sve_max_vl;
++unsigned int kvm_host_sve_max_vl;
+
+ int kvm_arm_init_sve(void)
+ {
+ if (system_supports_sve()) {
+ kvm_sve_max_vl = sve_max_virtualisable_vl();
++ kvm_host_sve_max_vl = sve_max_vl();
++ kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
+
+ /*
+ * The get_sve_reg()/set_sve_reg() ioctl interface will need
--- /dev/null
+From stable+bounces-128309-greg=kroah.com@vger.kernel.org Fri Apr 4 15:29:58 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 04 Apr 2025 14:23:43 +0100
+Subject: KVM: arm64: Mark some header functions as inline
+To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Oliver Upton <oliver.upton@linux.dev>, Oleg Nesterov <oleg@redhat.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kvmarm@lists.cs.columbia.edu, Mark Brown <broonie@kernel.org>, stable@vger.kernel.org, Mark Rutland <mark.rutland@arm.com>, Fuad Tabba <tabba@google.com>
+Message-ID: <20250404-stable-sve-6-1-v1-10-cd5c9eb52d49@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit f9dd00de1e53a47763dfad601635d18542c3836d ]
+
+The shared hyp switch header has a number of static functions which
+might not be used by all files that include the header, and when unused
+they will provoke compiler warnings, e.g.
+
+| In file included from arch/arm64/kvm/hyp/nvhe/hyp-main.c:8:
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:703:13: warning: 'kvm_hyp_handle_dabt_low' defined but not used [-Wunused-function]
+| 703 | static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+| | ^~~~~~~~~~~~~~~~~~~~~~~
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:682:13: warning: 'kvm_hyp_handle_cp15_32' defined but not used [-Wunused-function]
+| 682 | static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
+| | ^~~~~~~~~~~~~~~~~~~~~~
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:662:13: warning: 'kvm_hyp_handle_sysreg' defined but not used [-Wunused-function]
+| 662 | static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
+| | ^~~~~~~~~~~~~~~~~~~~~
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:458:13: warning: 'kvm_hyp_handle_fpsimd' defined but not used [-Wunused-function]
+| 458 | static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
+| | ^~~~~~~~~~~~~~~~~~~~~
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:329:13: warning: 'kvm_hyp_handle_mops' defined but not used [-Wunused-function]
+| 329 | static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
+| | ^~~~~~~~~~~~~~~~~~~
+
+Mark these functions as 'inline' to suppress this warning. This
+shouldn't result in any functional change.
+
+At the same time, avoid the use of __alias() in the header and alias
+kvm_hyp_handle_iabt_low() and kvm_hyp_handle_watchpt_low() to
+kvm_hyp_handle_memory_fault() using CPP, matching the style in the rest
+of the kernel. For consistency, kvm_hyp_handle_memory_fault() is also
+marked as 'inline'.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-8-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/hyp/include/hyp/switch.h | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
++++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
+@@ -173,7 +173,7 @@ static inline void __hyp_sve_restore_gue
+ * If FP/SIMD is not implemented, handle the trap and inject an undefined
+ * instruction exception to the guest. Similarly for trapped SVE accesses.
+ */
+-static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+ bool sve_guest;
+ u8 esr_ec;
+@@ -331,7 +331,7 @@ static bool kvm_hyp_handle_ptrauth(struc
+ return true;
+ }
+
+-static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+ handle_tx2_tvm(vcpu))
+@@ -347,7 +347,7 @@ static bool kvm_hyp_handle_sysreg(struct
+ return false;
+ }
+
+-static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
+ __vgic_v3_perform_cpuif_access(vcpu) == 1)
+@@ -356,19 +356,18 @@ static bool kvm_hyp_handle_cp15_32(struc
+ return false;
+ }
+
+-static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
++ u64 *exit_code)
+ {
+ if (!__populate_fault_info(vcpu))
+ return true;
+
+ return false;
+ }
+-static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+- __alias(kvm_hyp_handle_memory_fault);
+-static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+- __alias(kvm_hyp_handle_memory_fault);
++#define kvm_hyp_handle_iabt_low kvm_hyp_handle_memory_fault
++#define kvm_hyp_handle_watchpt_low kvm_hyp_handle_memory_fault
+
+-static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+ if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
+ return true;
--- /dev/null
+From stable+bounces-128308-greg=kroah.com@vger.kernel.org Fri Apr 4 15:29:46 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 04 Apr 2025 14:23:42 +0100
+Subject: KVM: arm64: Refactor exit handlers
+To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Oliver Upton <oliver.upton@linux.dev>, Oleg Nesterov <oleg@redhat.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kvmarm@lists.cs.columbia.edu, Mark Brown <broonie@kernel.org>, stable@vger.kernel.org, Mark Rutland <mark.rutland@arm.com>, Fuad Tabba <tabba@google.com>
+Message-ID: <20250404-stable-sve-6-1-v1-9-cd5c9eb52d49@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 9b66195063c5a145843547b1d692bd189be85287 ]
+
+The hyp exit handling logic is largely shared between VHE and nVHE/hVHE,
+with common logic in arch/arm64/kvm/hyp/include/hyp/switch.h. The code
+in the header depends on function definitions provided by
+arch/arm64/kvm/hyp/vhe/switch.c and arch/arm64/kvm/hyp/nvhe/switch.c
+when they include the header.
+
+This is an unusual header dependency, and prevents the use of
+arch/arm64/kvm/hyp/include/hyp/switch.h in other files as this would
+result in compiler warnings regarding missing definitions, e.g.
+
+| In file included from arch/arm64/kvm/hyp/nvhe/hyp-main.c:8:
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:733:31: warning: 'kvm_get_exit_handler_array' used but never defined
+| 733 | static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
+| | ^~~~~~~~~~~~~~~~~~~~~~~~~~
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:735:13: warning: 'early_exit_filter' used but never defined
+| 735 | static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
+| | ^~~~~~~~~~~~~~~~~
+
+Refactor the logic such that the header doesn't depend on anything from
+the C files. There should be no functional change as a result of this
+patch.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-7-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/hyp/include/hyp/switch.h | 30 ++++++------------------------
+ arch/arm64/kvm/hyp/nvhe/switch.c | 27 +++++++++++++++------------
+ arch/arm64/kvm/hyp/vhe/switch.c | 8 +++-----
+ 3 files changed, 24 insertions(+), 41 deletions(-)
+
+--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
++++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
+@@ -398,23 +398,16 @@ static bool kvm_hyp_handle_dabt_low(stru
+
+ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
+
+-static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
+-
+-static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
+-
+ /*
+ * Allow the hypervisor to handle the exit with an exit handler if it has one.
+ *
+ * Returns true if the hypervisor handled the exit, and control should go back
+ * to the guest, or false if it hasn't.
+ */
+-static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
++ const exit_handler_fn *handlers)
+ {
+- const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
+- exit_handler_fn fn;
+-
+- fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
+-
++ exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
+ if (fn)
+ return fn(vcpu, exit_code);
+
+@@ -444,20 +437,9 @@ static inline void synchronize_vcpu_psta
+ * the guest, false when we should restore the host state and return to the
+ * main run loop.
+ */
+-static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
++ const exit_handler_fn *handlers)
+ {
+- /*
+- * Save PSTATE early so that we can evaluate the vcpu mode
+- * early on.
+- */
+- synchronize_vcpu_pstate(vcpu, exit_code);
+-
+- /*
+- * Check whether we want to repaint the state one way or
+- * another.
+- */
+- early_exit_filter(vcpu, exit_code);
+-
+ if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
+ vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
+
+@@ -487,7 +469,7 @@ static inline bool fixup_guest_exit(stru
+ goto exit;
+
+ /* Check if there's an exit handler and allow it to handle the exit. */
+- if (kvm_hyp_handle_exit(vcpu, exit_code))
++ if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
+ goto guest;
+ exit:
+ /* Return to the host kernel and handle the exit */
+--- a/arch/arm64/kvm/hyp/nvhe/switch.c
++++ b/arch/arm64/kvm/hyp/nvhe/switch.c
+@@ -209,21 +209,22 @@ static const exit_handler_fn *kvm_get_ex
+ return hyp_exit_handlers;
+ }
+
+-/*
+- * Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
+- * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
+- * guest from dropping to AArch32 EL0 if implemented by the CPU. If the
+- * hypervisor spots a guest in such a state ensure it is handled, and don't
+- * trust the host to spot or fix it. The check below is based on the one in
+- * kvm_arch_vcpu_ioctl_run().
+- *
+- * Returns false if the guest ran in AArch32 when it shouldn't have, and
+- * thus should exit to the host, or true if a the guest run loop can continue.
+- */
+-static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
++ const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+
++ synchronize_vcpu_pstate(vcpu, exit_code);
++
++ /*
++ * Some guests (e.g., protected VMs) are not be allowed to run in
++ * AArch32. The ARMv8 architecture does not give the hypervisor a
++ * mechanism to prevent a guest from dropping to AArch32 EL0 if
++ * implemented by the CPU. If the hypervisor spots a guest in such a
++ * state ensure it is handled, and don't trust the host to spot or fix
++ * it. The check below is based on the one in
++ * kvm_arch_vcpu_ioctl_run().
++ */
+ if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
+ /*
+ * As we have caught the guest red-handed, decide that it isn't
+@@ -236,6 +237,8 @@ static void early_exit_filter(struct kvm
+ *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
+ *exit_code |= ARM_EXCEPTION_IL;
+ }
++
++ return __fixup_guest_exit(vcpu, exit_code, handlers);
+ }
+
+ /* Switch to the guest for legacy non-VHE systems */
+--- a/arch/arm64/kvm/hyp/vhe/switch.c
++++ b/arch/arm64/kvm/hyp/vhe/switch.c
+@@ -114,13 +114,11 @@ static const exit_handler_fn hyp_exit_ha
+ [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
+ };
+
+-static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
++static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+- return hyp_exit_handlers;
+-}
++ synchronize_vcpu_pstate(vcpu, exit_code);
+
+-static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
+-{
++ return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
+ }
+
+ /* Switch to the guest for VHE systems running in EL2 */
--- /dev/null
+From broonie@kernel.org Fri Apr 4 15:27:48 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 04 Apr 2025 14:23:39 +0100
+Subject: KVM: arm64: Remove host FPSIMD saving for non-protected KVM
+To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Oliver Upton <oliver.upton@linux.dev>, Oleg Nesterov <oleg@redhat.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kvmarm@lists.cs.columbia.edu, Mark Brown <broonie@kernel.org>, stable@vger.kernel.org, Mark Rutland <mark.rutland@arm.com>, Fuad Tabba <tabba@google.com>
+Message-ID: <20250404-stable-sve-6-1-v1-6-cd5c9eb52d49@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 8eca7f6d5100b6997df4f532090bc3f7e0203bef ]
+
+Now that the host eagerly saves its own FPSIMD/SVE/SME state,
+non-protected KVM never needs to save the host FPSIMD/SVE/SME state,
+and the code to do this is never used. Protected KVM still needs to
+save/restore the host FPSIMD/SVE state to avoid leaking guest state to
+the host (and to avoid revealing to the host whether the guest used
+FPSIMD/SVE/SME), and that code needs to be retained.
+
+Remove the unused code and data structures.
+
+To avoid the need for a stub copy of kvm_hyp_save_fpsimd_host() in the
+VHE hyp code, the nVHE/hVHE version is moved into the shared switch
+header, where it is only invoked when KVM is in protected mode.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-3-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h | 1 -
+ arch/arm64/kvm/fpsimd.c | 2 --
+ arch/arm64/kvm/hyp/include/hyp/switch.h | 4 ----
+ 3 files changed, 7 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -380,7 +380,6 @@ struct kvm_vcpu_arch {
+ struct kvm_guest_debug_arch vcpu_debug_state;
+ struct kvm_guest_debug_arch external_debug_state;
+
+- struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
+ struct task_struct *parent_task;
+
+ struct {
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -49,8 +49,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_
+ if (ret)
+ return ret;
+
+- vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
+-
+ /*
+ * We need to keep current's task_struct pinned until its data has been
+ * unshared with the hypervisor to make sure it is not re-used by the
+--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
++++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
+@@ -207,10 +207,6 @@ static bool kvm_hyp_handle_fpsimd(struct
+ }
+ isb();
+
+- /* Write out the host state if it's in the registers */
+- if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
+- __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
+-
+ /* Restore the guest state */
+ if (sve_guest)
+ __hyp_sve_restore_guest(vcpu);
--- /dev/null
+From broonie@kernel.org Fri Apr 4 15:27:55 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 04 Apr 2025 14:23:41 +0100
+Subject: KVM: arm64: Remove VHE host restore of CPACR_EL1.SMEN
+To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Oliver Upton <oliver.upton@linux.dev>, Oleg Nesterov <oleg@redhat.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kvmarm@lists.cs.columbia.edu, Mark Brown <broonie@kernel.org>, stable@vger.kernel.org, Mark Rutland <mark.rutland@arm.com>, Fuad Tabba <tabba@google.com>
+Message-ID: <20250404-stable-sve-6-1-v1-8-cd5c9eb52d49@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 407a99c4654e8ea65393f412c421a55cac539f5b ]
+
+When KVM is in VHE mode, the host kernel tries to save and restore the
+configuration of CPACR_EL1.SMEN (i.e. CPTR_EL2.SMEN when HCR_EL2.E2H=1)
+across kvm_arch_vcpu_load_fp() and kvm_arch_vcpu_put_fp(), since the
+configuration may be clobbered by hyp when running a vCPU. This logic
+has historically been broken, and is currently redundant.
+
+This logic was originally introduced in commit:
+
+ 861262ab86270206 ("KVM: arm64: Handle SME host state when running guests")
+
+At the time, the VHE hyp code would reset CPTR_EL2.SMEN to 0b00 when
+returning to the host, trapping host access to SME state. Unfortunately,
+this was unsafe as the host could take a softirq before calling
+kvm_arch_vcpu_put_fp(), and if a softirq handler were to use kernel mode
+NEON the resulting attempt to save the live FPSIMD/SVE/SME state would
+result in a fatal trap.
+
+That issue was limited to VHE mode. For nVHE/hVHE modes, KVM always
+saved/restored the host kernel's CPACR_EL1 value, and configured
+CPTR_EL2.TSM to 0b0, ensuring that host usage of SME would not be
+trapped.
+
+The issue above was incidentally fixed by commit:
+
+ 375110ab51dec5dc ("KVM: arm64: Fix resetting SME trap values on reset for (h)VHE")
+
+That commit changed the VHE hyp code to configure CPTR_EL2.SMEN to 0b01
+when returning to the host, permitting host kernel usage of SME,
+avoiding the issue described above. At the time, this was not identified
+as a fix for commit 861262ab86270206.
+
+Now that the host eagerly saves and unbinds its own FPSIMD/SVE/SME
+state, there's no need to save/restore the state of the EL0 SME trap.
+The kernel can safely save/restore state without trapping, as described
+above, and will restore userspace state (including trap controls) before
+returning to userspace.
+
+Remove the redundant logic.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-5-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+[Update for rework of flags storage -- broonie]
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h | 2 --
+ arch/arm64/kvm/fpsimd.c | 31 -------------------------------
+ 2 files changed, 33 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -556,8 +556,6 @@ struct kvm_vcpu_arch {
+ /* Save TRBE context if active */
+ #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
+
+-/* SME enabled for EL0 */
+-#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
+ /* Physical CPU not in supported_cpus */
+ #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
+ /* WFIT instruction trapped */
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -87,21 +87,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
+ */
+ fpsimd_save_and_flush_cpu_state();
+ vcpu->arch.fp_state = FP_STATE_FREE;
+-
+- /*
+- * We don't currently support SME guests but if we leave
+- * things in streaming mode then when the guest starts running
+- * FPSIMD or SVE code it may generate SME traps so as a
+- * special case if we are in streaming mode we force the host
+- * state to be saved now and exit streaming mode so that we
+- * don't have to handle any SME traps for valid guest
+- * operations. Do this for ZA as well for now for simplicity.
+- */
+- if (system_supports_sme()) {
+- vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
+- if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
+- vcpu_set_flag(vcpu, HOST_SME_ENABLED);
+- }
+ }
+
+ /*
+@@ -162,22 +147,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
+
+ local_irq_save(flags);
+
+- /*
+- * If we have VHE then the Hyp code will reset CPACR_EL1 to
+- * CPACR_EL1_DEFAULT and we need to reenable SME.
+- */
+- if (has_vhe() && system_supports_sme()) {
+- /* Also restore EL0 state seen on entry */
+- if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
+- sysreg_clear_set(CPACR_EL1, 0,
+- CPACR_EL1_SMEN_EL0EN |
+- CPACR_EL1_SMEN_EL1EN);
+- else
+- sysreg_clear_set(CPACR_EL1,
+- CPACR_EL1_SMEN_EL0EN,
+- CPACR_EL1_SMEN_EL1EN);
+- }
+-
+ if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
+ if (vcpu_has_sve(vcpu)) {
+ __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
--- /dev/null
+From broonie@kernel.org Fri Apr 4 15:27:52 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 04 Apr 2025 14:23:40 +0100
+Subject: KVM: arm64: Remove VHE host restore of CPACR_EL1.ZEN
+To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Oliver Upton <oliver.upton@linux.dev>, Oleg Nesterov <oleg@redhat.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kvmarm@lists.cs.columbia.edu, Mark Brown <broonie@kernel.org>, stable@vger.kernel.org, Mark Rutland <mark.rutland@arm.com>, Fuad Tabba <tabba@google.com>
+Message-ID: <20250404-stable-sve-6-1-v1-7-cd5c9eb52d49@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 459f059be702056d91537b99a129994aa6ccdd35 ]
+
+When KVM is in VHE mode, the host kernel tries to save and restore the
+configuration of CPACR_EL1.ZEN (i.e. CPTR_EL2.ZEN when HCR_EL2.E2H=1)
+across kvm_arch_vcpu_load_fp() and kvm_arch_vcpu_put_fp(), since the
+configuration may be clobbered by hyp when running a vCPU. This logic is
+currently redundant.
+
+The VHE hyp code unconditionally configures CPTR_EL2.ZEN to 0b01 when
+returning to the host, permitting host kernel usage of SVE.
+
+Now that the host eagerly saves and unbinds its own FPSIMD/SVE/SME
+state, there's no need to save/restore the state of the EL0 SVE trap.
+The kernel can safely save/restore state without trapping, as described
+above, and will restore userspace state (including trap controls) before
+returning to userspace.
+
+Remove the redundant logic.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-4-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+[Rework for refactoring of where the flags are stored -- broonie]
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h | 2 --
+ arch/arm64/kvm/fpsimd.c | 16 ----------------
+ 2 files changed, 18 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -556,8 +556,6 @@ struct kvm_vcpu_arch {
+ /* Save TRBE context if active */
+ #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
+
+-/* SVE enabled for host EL0 */
+-#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
+ /* SME enabled for EL0 */
+ #define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
+ /* Physical CPU not in supported_cpus */
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -88,10 +88,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
+ fpsimd_save_and_flush_cpu_state();
+ vcpu->arch.fp_state = FP_STATE_FREE;
+
+- vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
+- if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
+- vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
+-
+ /*
+ * We don't currently support SME guests but if we leave
+ * things in streaming mode then when the guest starts running
+@@ -193,18 +189,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
+ }
+
+ fpsimd_save_and_flush_cpu_state();
+- } else if (has_vhe() && system_supports_sve()) {
+- /*
+- * The FPSIMD/SVE state in the CPU has not been touched, and we
+- * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
+- * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
+- * for EL0. To avoid spurious traps, restore the trap state
+- * seen by kvm_arch_vcpu_load_fp():
+- */
+- if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED))
+- sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
+- else
+- sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
+ }
+
+ local_irq_restore(flags);
--- /dev/null
+From stable+bounces-128304-greg=kroah.com@vger.kernel.org Fri Apr 4 15:28:43 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 04 Apr 2025 14:23:38 +0100
+Subject: KVM: arm64: Unconditionally save+flush host FPSIMD/SVE/SME state
+To: Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Oliver Upton <oliver.upton@linux.dev>, Oleg Nesterov <oleg@redhat.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kvmarm@lists.cs.columbia.edu, Mark Brown <broonie@kernel.org>, stable@vger.kernel.org, Mark Rutland <mark.rutland@arm.com>, Eric Auger <eauger@redhat.com>, Wilco Dijkstra <wilco.dijkstra@arm.com>, Eric Auger <eric.auger@redhat.com>, Florian Weimer <fweimer@redhat.com>, Fuad Tabba <tabba@google.com>, Jeremy Linton <jeremy.linton@arm.com>, Paolo Bonzini <pbonzini@redhat.com>
+Message-ID: <20250404-stable-sve-6-1-v1-5-cd5c9eb52d49@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit fbc7e61195e23f744814e78524b73b59faa54ab4 ]
+
+There are several problems with the way hyp code lazily saves the host's
+FPSIMD/SVE state, including:
+
+* Host SVE being discarded unexpectedly due to inconsistent
+ configuration of TIF_SVE and CPACR_ELx.ZEN. This has been seen to
+ result in QEMU crashes where SVE is used by memmove(), as reported by
+ Eric Auger:
+
+ https://issues.redhat.com/browse/RHEL-68997
+
+* Host SVE state is discarded *after* modification by ptrace, which was an
+ unintentional ptrace ABI change introduced with lazy discarding of SVE state.
+
+* The host FPMR value can be discarded when running a non-protected VM,
+ where FPMR support is not exposed to a VM, and that VM uses
+ FPSIMD/SVE. In these cases the hyp code does not save the host's FPMR
+ before unbinding the host's FPSIMD/SVE/SME state, leaving a stale
+ value in memory.
+
+Avoid these by eagerly saving and "flushing" the host's FPSIMD/SVE/SME
+state when loading a vCPU such that KVM does not need to save any of the
+host's FPSIMD/SVE/SME state. For clarity, fpsimd_kvm_prepare() is
+removed and the necessary call to fpsimd_save_and_flush_cpu_state() is
+placed in kvm_arch_vcpu_load_fp(). As 'fpsimd_state' and 'fpmr_ptr'
+should not be used, they are set to NULL; all uses of these will be
+removed in subsequent patches.
+
+Historical problems go back at least as far as v5.17, e.g. erroneous
+assumptions about TIF_SVE being clear in commit:
+
+ 8383741ab2e773a9 ("KVM: arm64: Get rid of host SVE tracking/saving")
+
+... and so this eager save+flush probably needs to be backported to ALL
+stable trees.
+
+Fixes: 93ae6b01bafee8fa ("KVM: arm64: Discard any SVE state when entering KVM guests")
+Fixes: 8c845e2731041f0f ("arm64/sve: Leave SVE enabled on syscall if we don't context switch")
+Fixes: ef3be86021c3bdf3 ("KVM: arm64: Add save/restore support for FPMR")
+Reported-by: Eric Auger <eauger@redhat.com>
+Reported-by: Wilco Dijkstra <wilco.dijkstra@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Tested-by: Eric Auger <eric.auger@redhat.com>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Florian Weimer <fweimer@redhat.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Jeremy Linton <jeremy.linton@arm.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-2-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+[ Mark: Handle vcpu/host flag conflict, remove host_data_ptr() ]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/fpsimd.c | 25 -------------------------
+ arch/arm64/kvm/fpsimd.c | 18 ++++++++++--------
+ 2 files changed, 10 insertions(+), 33 deletions(-)
+
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1672,31 +1672,6 @@ void fpsimd_signal_preserve_current_stat
+ }
+
+ /*
+- * Called by KVM when entering the guest.
+- */
+-void fpsimd_kvm_prepare(void)
+-{
+- if (!system_supports_sve())
+- return;
+-
+- /*
+- * KVM does not save host SVE state since we can only enter
+- * the guest from a syscall so the ABI means that only the
+- * non-saved SVE state needs to be saved. If we have left
+- * SVE enabled for performance reasons then update the task
+- * state to be FPSIMD only.
+- */
+- get_cpu_fpsimd_context();
+-
+- if (test_and_clear_thread_flag(TIF_SVE)) {
+- sve_to_fpsimd(current);
+- current->thread.fp_type = FP_STATE_FPSIMD;
+- }
+-
+- put_cpu_fpsimd_context();
+-}
+-
+-/*
+ * Associate current's FPSIMD context with this cpu
+ * The caller must have ownership of the cpu FPSIMD context before calling
+ * this function.
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -79,9 +79,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
+ if (!system_supports_fpsimd())
+ return;
+
+- fpsimd_kvm_prepare();
+-
+- vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
++ /*
++ * Ensure that any host FPSIMD/SVE/SME state is saved and unbound such
++ * that the host kernel is responsible for restoring this state upon
++ * return to userspace, and the hyp code doesn't need to save anything.
++ *
++ * When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures
++ * that PSTATE.{SM,ZA} == {0,0}.
++ */
++ fpsimd_save_and_flush_cpu_state();
++ vcpu->arch.fp_state = FP_STATE_FREE;
+
+ vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
+ if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
+@@ -100,11 +107,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
+ vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
+ if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
+ vcpu_set_flag(vcpu, HOST_SME_ENABLED);
+-
+- if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
+- vcpu->arch.fp_state = FP_STATE_FREE;
+- fpsimd_save_and_flush_cpu_state();
+- }
+ }
+ }
+
drm-sti-remove-duplicate-object-names.patch
drm-i915-gvt-fix-unterminated-string-initialization-warning.patch
io_uring-net-fix-accept-multishot-handling.patch
+kvm-arm64-discard-any-sve-state-when-entering-kvm-guests.patch
+arm64-fpsimd-track-the-saved-fpsimd-state-type-separately-to-tif_sve.patch
+arm64-fpsimd-have-kvm-explicitly-say-which-fp-registers-to-save.patch
+arm64-fpsimd-stop-using-tif_sve-to-manage-register-saving-in-kvm.patch
+kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch
+kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch
+kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch
+kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch
+kvm-arm64-refactor-exit-handlers.patch
+kvm-arm64-mark-some-header-functions-as-inline.patch
+kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch
+kvm-arm64-eagerly-switch-zcr_el-1-2.patch