]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 25 Mar 2025 11:28:55 +0000 (07:28 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 25 Mar 2025 11:28:55 +0000 (07:28 -0400)
added patches:
kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch
kvm-arm64-eagerly-switch-zcr_el-1-2.patch
kvm-arm64-mark-some-header-functions-as-inline.patch
kvm-arm64-refactor-exit-handlers.patch
kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch
kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch
kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch
kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch
mptcp-fix-data-stream-corruption-in-the-address-announcement.patch

queue-6.6/kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch [new file with mode: 0644]
queue-6.6/kvm-arm64-eagerly-switch-zcr_el-1-2.patch [new file with mode: 0644]
queue-6.6/kvm-arm64-mark-some-header-functions-as-inline.patch [new file with mode: 0644]
queue-6.6/kvm-arm64-refactor-exit-handlers.patch [new file with mode: 0644]
queue-6.6/kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch [new file with mode: 0644]
queue-6.6/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch [new file with mode: 0644]
queue-6.6/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch [new file with mode: 0644]
queue-6.6/kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch [new file with mode: 0644]
queue-6.6/mptcp-fix-data-stream-corruption-in-the-address-announcement.patch [new file with mode: 0644]
queue-6.6/series

diff --git a/queue-6.6/kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch b/queue-6.6/kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch
new file mode 100644 (file)
index 0000000..87305e6
--- /dev/null
@@ -0,0 +1,209 @@
+From stable+bounces-125719-greg=kroah.com@vger.kernel.org Thu Mar 20 20:19:30 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 21 Mar 2025 00:16:01 +0000
+Subject: KVM: arm64: Calculate cptr_el2 traps on activating traps
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  James Morse <james.morse@arm.com>,  Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
+Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Fuad Tabba <tabba@google.com>,  James Clark <james.clark@linaro.org>
+Message-ID: <20250321-stable-sve-6-6-v1-1-0b3a6a14ea53@kernel.org>
+
+From: Fuad Tabba <tabba@google.com>
+
+[ Upstream commit 2fd5b4b0e7b440602455b79977bfa64dea101e6c ]
+
+Similar to VHE, calculate the value of cptr_el2 from scratch on
+activate traps. This removes the need to store cptr_el2 in every
+vcpu structure. Moreover, some traps, such as whether the guest
+owns the fp registers, need to be set on every vcpu run.
+
+Reported-by: James Clark <james.clark@linaro.org>
+Fixes: 5294afdbf45a ("KVM: arm64: Exclude FP ownership from kvm_vcpu_arch")
+Signed-off-by: Fuad Tabba <tabba@google.com>
+Link: https://lore.kernel.org/r/20241216105057.579031-13-tabba@google.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h  |    1 
+ arch/arm64/kvm/arm.c               |    1 
+ arch/arm64/kvm/hyp/nvhe/hyp-main.c |    2 -
+ arch/arm64/kvm/hyp/nvhe/pkvm.c     |   27 -------------------
+ arch/arm64/kvm/hyp/nvhe/switch.c   |   52 ++++++++++++++++++++++---------------
+ 5 files changed, 32 insertions(+), 51 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -486,7 +486,6 @@ struct kvm_vcpu_arch {
+       /* Values of trap registers for the guest. */
+       u64 hcr_el2;
+       u64 mdcr_el2;
+-      u64 cptr_el2;
+       /* Values of trap registers for the host before guest entry. */
+       u64 mdcr_el2_host;
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1309,7 +1309,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init
+       }
+       vcpu_reset_hcr(vcpu);
+-      vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
+       /*
+        * Handle the "start in power-off" case.
+--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
++++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+@@ -36,7 +36,6 @@ static void flush_hyp_vcpu(struct pkvm_h
+       hyp_vcpu->vcpu.arch.hcr_el2     = host_vcpu->arch.hcr_el2;
+       hyp_vcpu->vcpu.arch.mdcr_el2    = host_vcpu->arch.mdcr_el2;
+-      hyp_vcpu->vcpu.arch.cptr_el2    = host_vcpu->arch.cptr_el2;
+       hyp_vcpu->vcpu.arch.iflags      = host_vcpu->arch.iflags;
+       hyp_vcpu->vcpu.arch.fp_state    = host_vcpu->arch.fp_state;
+@@ -59,7 +58,6 @@ static void sync_hyp_vcpu(struct pkvm_hy
+       host_vcpu->arch.ctxt            = hyp_vcpu->vcpu.arch.ctxt;
+       host_vcpu->arch.hcr_el2         = hyp_vcpu->vcpu.arch.hcr_el2;
+-      host_vcpu->arch.cptr_el2        = hyp_vcpu->vcpu.arch.cptr_el2;
+       host_vcpu->arch.fault           = hyp_vcpu->vcpu.arch.fault;
+--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
++++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
+@@ -26,8 +26,6 @@ static void pvm_init_traps_aa64pfr0(stru
+       const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
+       u64 hcr_set = HCR_RW;
+       u64 hcr_clear = 0;
+-      u64 cptr_set = 0;
+-      u64 cptr_clear = 0;
+       /* Protected KVM does not support AArch32 guests. */
+       BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
+@@ -57,21 +55,10 @@ static void pvm_init_traps_aa64pfr0(stru
+       /* Trap AMU */
+       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
+               hcr_clear |= HCR_AMVOFFEN;
+-              cptr_set |= CPTR_EL2_TAM;
+-      }
+-
+-      /* Trap SVE */
+-      if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
+-              if (has_hvhe())
+-                      cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
+-              else
+-                      cptr_set |= CPTR_EL2_TZ;
+       }
+       vcpu->arch.hcr_el2 |= hcr_set;
+       vcpu->arch.hcr_el2 &= ~hcr_clear;
+-      vcpu->arch.cptr_el2 |= cptr_set;
+-      vcpu->arch.cptr_el2 &= ~cptr_clear;
+ }
+ /*
+@@ -101,7 +88,6 @@ static void pvm_init_traps_aa64dfr0(stru
+       const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
+       u64 mdcr_set = 0;
+       u64 mdcr_clear = 0;
+-      u64 cptr_set = 0;
+       /* Trap/constrain PMU */
+       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
+@@ -128,17 +114,8 @@ static void pvm_init_traps_aa64dfr0(stru
+       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
+               mdcr_set |= MDCR_EL2_TTRF;
+-      /* Trap Trace */
+-      if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
+-              if (has_hvhe())
+-                      cptr_set |= CPACR_EL1_TTA;
+-              else
+-                      cptr_set |= CPTR_EL2_TTA;
+-      }
+-
+       vcpu->arch.mdcr_el2 |= mdcr_set;
+       vcpu->arch.mdcr_el2 &= ~mdcr_clear;
+-      vcpu->arch.cptr_el2 |= cptr_set;
+ }
+ /*
+@@ -189,10 +166,6 @@ static void pvm_init_trap_regs(struct kv
+       /* Clear res0 and set res1 bits to trap potential new features. */
+       vcpu->arch.hcr_el2 &= ~(HCR_RES0);
+       vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
+-      if (!has_hvhe()) {
+-              vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
+-              vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
+-      }
+ }
+ /*
+--- a/arch/arm64/kvm/hyp/nvhe/switch.c
++++ b/arch/arm64/kvm/hyp/nvhe/switch.c
+@@ -36,34 +36,46 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_ve
+ extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
+-static void __activate_traps(struct kvm_vcpu *vcpu)
++static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
+ {
+-      u64 val;
++      u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
+-      ___activate_traps(vcpu);
+-      __activate_traps_common(vcpu);
++      if (has_hvhe()) {
++              val |= CPACR_ELx_TTA;
+-      val = vcpu->arch.cptr_el2;
+-      val |= CPTR_EL2_TAM;    /* Same bit irrespective of E2H */
+-      val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
+-      if (cpus_have_final_cap(ARM64_SME)) {
+-              if (has_hvhe())
+-                      val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
+-              else
+-                      val |= CPTR_EL2_TSM;
+-      }
++              if (guest_owns_fp_regs(vcpu)) {
++                      val |= CPACR_ELx_FPEN;
++                      if (vcpu_has_sve(vcpu))
++                              val |= CPACR_ELx_ZEN;
++              }
++      } else {
++              val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
++
++              /*
++               * Always trap SME since it's not supported in KVM.
++               * TSM is RES1 if SME isn't implemented.
++               */
++              val |= CPTR_EL2_TSM;
+-      if (!guest_owns_fp_regs(vcpu)) {
+-              if (has_hvhe())
+-                      val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
+-                               CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
+-              else
+-                      val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
++              if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs(vcpu))
++                      val |= CPTR_EL2_TZ;
+-              __activate_traps_fpsimd32(vcpu);
++              if (!guest_owns_fp_regs(vcpu))
++                      val |= CPTR_EL2_TFP;
+       }
++      if (!guest_owns_fp_regs(vcpu))
++              __activate_traps_fpsimd32(vcpu);
++
+       kvm_write_cptr_el2(val);
++}
++
++static void __activate_traps(struct kvm_vcpu *vcpu)
++{
++      ___activate_traps(vcpu);
++      __activate_traps_common(vcpu);
++      __activate_cptr_traps(vcpu);
++
+       write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
+       if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
diff --git a/queue-6.6/kvm-arm64-eagerly-switch-zcr_el-1-2.patch b/queue-6.6/kvm-arm64-eagerly-switch-zcr_el-1-2.patch
new file mode 100644 (file)
index 0000000..51ac67c
--- /dev/null
@@ -0,0 +1,359 @@
+From broonie@kernel.org Thu Mar 20 20:18:40 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 21 Mar 2025 00:16:08 +0000
+Subject: KVM: arm64: Eagerly switch ZCR_EL{1,2}
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  James Morse <james.morse@arm.com>,  Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
+Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Fuad Tabba <tabba@google.com>
+Message-ID: <20250321-stable-sve-6-6-v1-8-0b3a6a14ea53@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 59419f10045bc955d2229819c7cf7a8b0b9c5b59 ]
+
+In non-protected KVM modes, while the guest FPSIMD/SVE/SME state is live on the
+CPU, the host's active SVE VL may differ from the guest's maximum SVE VL:
+
+* For VHE hosts, when a VM uses NV, ZCR_EL2 contains a value constrained
+  by the guest hypervisor, which may be less than or equal to that
+  guest's maximum VL.
+
+  Note: in this case the value of ZCR_EL1 is immaterial due to E2H.
+
+* For nVHE/hVHE hosts, ZCR_EL1 contains a value written by the guest,
+  which may be less than or greater than the guest's maximum VL.
+
+  Note: in this case hyp code traps host SVE usage and lazily restores
+  ZCR_EL2 to the host's maximum VL, which may be greater than the
+  guest's maximum VL.
+
+This can be the case between exiting a guest and kvm_arch_vcpu_put_fp().
+If a softirq is taken during this period and the softirq handler tries
+to use kernel-mode NEON, then the kernel will fail to save the guest's
+FPSIMD/SVE state, and will pend a SIGKILL for the current thread.
+
+This happens because kvm_arch_vcpu_ctxsync_fp() binds the guest's live
+FPSIMD/SVE state with the guest's maximum SVE VL, and
+fpsimd_save_user_state() verifies that the live SVE VL is as expected
+before attempting to save the register state:
+
+| if (WARN_ON(sve_get_vl() != vl)) {
+|         force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
+|         return;
+| }
+
+Fix this and make this a bit easier to reason about by always eagerly
+switching ZCR_EL{1,2} at hyp during guest<->host transitions. With this
+happening, there's no need to trap host SVE usage, and the nVHE/nVHE
+__deactivate_cptr_traps() logic can be simplified to enable host access
+to all present FPSIMD/SVE/SME features.
+
+In protected nVHE/hVHE modes, the host's state is always saved/restored
+by hyp, and the guest's state is saved prior to exit to the host, so
+from the host's PoV the guest never has live FPSIMD/SVE/SME state, and
+the host's ZCR_EL1 is never clobbered by hyp.
+
+Fixes: 8c8010d69c132273 ("KVM: arm64: Save/restore SVE state for nVHE")
+Fixes: 2e3cf82063a00ea0 ("KVM: arm64: nv: Ensure correct VL is loaded before saving SVE state")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Cc: Will Deacon <will@kernel.org>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-9-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+[ v6.6 lacks pKVM saving of host SVE state, pull in discovery of maximum
+  host VL separately -- broonie ]
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h       |    1 
+ arch/arm64/include/asm/kvm_hyp.h        |    1 
+ arch/arm64/kvm/fpsimd.c                 |   19 +++++------
+ arch/arm64/kvm/hyp/entry.S              |    5 ++
+ arch/arm64/kvm/hyp/include/hyp/switch.h |   55 ++++++++++++++++++++++++++++++++
+ arch/arm64/kvm/hyp/nvhe/hyp-main.c      |   12 +-----
+ arch/arm64/kvm/hyp/nvhe/pkvm.c          |    2 +
+ arch/arm64/kvm/hyp/nvhe/switch.c        |   33 ++++++++++++++++---
+ arch/arm64/kvm/hyp/vhe/switch.c         |    4 ++
+ arch/arm64/kvm/reset.c                  |    3 +
+ 10 files changed, 113 insertions(+), 22 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -73,6 +73,7 @@ static inline enum kvm_mode kvm_get_mode
+ #endif
+ extern unsigned int __ro_after_init kvm_sve_max_vl;
++extern unsigned int __ro_after_init kvm_host_sve_max_vl;
+ int __init kvm_arm_init_sve(void);
+ u32 __attribute_const__ kvm_target_cpu(void);
+--- a/arch/arm64/include/asm/kvm_hyp.h
++++ b/arch/arm64/include/asm/kvm_hyp.h
+@@ -145,5 +145,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1
+ extern unsigned long kvm_nvhe_sym(__icache_flags);
+ extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
++extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
+ #endif /* __ARM64_KVM_HYP_H__ */
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -152,15 +152,16 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
+       local_irq_save(flags);
+       if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
+-              if (vcpu_has_sve(vcpu)) {
+-                      __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
+-
+-                      /* Restore the VL that was saved when bound to the CPU */
+-                      if (!has_vhe())
+-                              sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
+-                                                     SYS_ZCR_EL1);
+-              }
+-
++              /*
++               * Flush (save and invalidate) the fpsimd/sve state so that if
++               * the host tries to use fpsimd/sve, it's not using stale data
++               * from the guest.
++               *
++               * Flushing the state sets the TIF_FOREIGN_FPSTATE bit for the
++               * context unconditionally, in both nVHE and VHE. This allows
++               * the kernel to restore the fpsimd/sve state, including ZCR_EL1
++               * when needed.
++               */
+               fpsimd_save_and_flush_cpu_state();
+       }
+--- a/arch/arm64/kvm/hyp/entry.S
++++ b/arch/arm64/kvm/hyp/entry.S
+@@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
+ alternative_else_nop_endif
+       mrs     x1, isr_el1
+       cbz     x1,  1f
++
++      // Ensure that __guest_enter() always provides a context
++      // synchronization event so that callers don't need ISBs for anything
++      // that would usually be synchonized by the ERET.
++      isb
+       mov     x0, #ARM_EXCEPTION_IRQ
+       ret
+--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
++++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
+@@ -273,6 +273,61 @@ static inline void __hyp_sve_restore_gue
+       write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
+ }
++static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
++{
++      u64 zcr_el1, zcr_el2;
++
++      if (!guest_owns_fp_regs(vcpu))
++              return;
++
++      if (vcpu_has_sve(vcpu)) {
++              zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
++
++              write_sysreg_el2(zcr_el2, SYS_ZCR);
++
++              zcr_el1 = __vcpu_sys_reg(vcpu, ZCR_EL1);
++              write_sysreg_el1(zcr_el1, SYS_ZCR);
++      }
++}
++
++static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
++{
++      u64 zcr_el1, zcr_el2;
++
++      if (!guest_owns_fp_regs(vcpu))
++              return;
++
++      /*
++       * When the guest owns the FP regs, we know that guest+hyp traps for
++       * any FPSIMD/SVE/SME features exposed to the guest have been disabled
++       * by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
++       * prior to __guest_entry(). As __guest_entry() guarantees a context
++       * synchronization event, we don't need an ISB here to avoid taking
++       * traps for anything that was exposed to the guest.
++       */
++      if (vcpu_has_sve(vcpu)) {
++              zcr_el1 = read_sysreg_el1(SYS_ZCR);
++              __vcpu_sys_reg(vcpu, ZCR_EL1) = zcr_el1;
++
++              /*
++               * The guest's state is always saved using the guest's max VL.
++               * Ensure that the host has the guest's max VL active such that
++               * the host can save the guest's state lazily, but don't
++               * artificially restrict the host to the guest's max VL.
++               */
++              if (has_vhe()) {
++                      zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
++                      write_sysreg_el2(zcr_el2, SYS_ZCR);
++              } else {
++                      zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
++                      write_sysreg_el2(zcr_el2, SYS_ZCR);
++
++                      zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
++                      write_sysreg_el1(zcr_el1, SYS_ZCR);
++              }
++      }
++}
++
+ /*
+  * We trap the first access to the FP/SIMD to save the host context and
+  * restore the guest context lazily.
+--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
++++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+@@ -5,6 +5,7 @@
+  */
+ #include <hyp/adjust_pc.h>
++#include <hyp/switch.h>
+ #include <asm/pgtable-types.h>
+ #include <asm/kvm_asm.h>
+@@ -95,7 +96,9 @@ static void handle___kvm_vcpu_run(struct
+               pkvm_put_hyp_vcpu(hyp_vcpu);
+       } else {
+               /* The host is fully trusted, run its vCPU directly. */
++              fpsimd_lazy_switch_to_guest(host_vcpu);
+               ret = __kvm_vcpu_run(host_vcpu);
++              fpsimd_lazy_switch_to_host(host_vcpu);
+       }
+ out:
+@@ -416,15 +419,6 @@ void handle_trap(struct kvm_cpu_context
+       case ESR_ELx_EC_SMC64:
+               handle_host_smc(host_ctxt);
+               break;
+-      case ESR_ELx_EC_SVE:
+-              if (has_hvhe())
+-                      sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN |
+-                                                      CPACR_EL1_ZEN_EL0EN));
+-              else
+-                      sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
+-              isb();
+-              sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+-              break;
+       case ESR_ELx_EC_IABT_LOW:
+       case ESR_ELx_EC_DABT_LOW:
+               handle_host_mem_abort(host_ctxt);
+--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
++++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
+@@ -18,6 +18,8 @@ unsigned long __icache_flags;
+ /* Used by kvm_get_vttbr(). */
+ unsigned int kvm_arm_vmid_bits;
++unsigned int kvm_host_sve_max_vl;
++
+ /*
+  * Set trap register values based on features in ID_AA64PFR0.
+  */
+--- a/arch/arm64/kvm/hyp/nvhe/switch.c
++++ b/arch/arm64/kvm/hyp/nvhe/switch.c
+@@ -40,6 +40,9 @@ static void __activate_cptr_traps(struct
+ {
+       u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
++      if (!guest_owns_fp_regs(vcpu))
++              __activate_traps_fpsimd32(vcpu);
++
+       if (has_hvhe()) {
+               val |= CPACR_ELx_TTA;
+@@ -48,6 +51,8 @@ static void __activate_cptr_traps(struct
+                       if (vcpu_has_sve(vcpu))
+                               val |= CPACR_ELx_ZEN;
+               }
++
++              write_sysreg(val, cpacr_el1);
+       } else {
+               val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
+@@ -62,12 +67,32 @@ static void __activate_cptr_traps(struct
+               if (!guest_owns_fp_regs(vcpu))
+                       val |= CPTR_EL2_TFP;
++
++              write_sysreg(val, cptr_el2);
+       }
++}
+-      if (!guest_owns_fp_regs(vcpu))
+-              __activate_traps_fpsimd32(vcpu);
++static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
++{
++      if (has_hvhe()) {
++              u64 val = CPACR_ELx_FPEN;
++
++              if (cpus_have_final_cap(ARM64_SVE))
++                      val |= CPACR_ELx_ZEN;
++              if (cpus_have_final_cap(ARM64_SME))
++                      val |= CPACR_ELx_SMEN;
++
++              write_sysreg(val, cpacr_el1);
++      } else {
++              u64 val = CPTR_NVHE_EL2_RES1;
++
++              if (!cpus_have_final_cap(ARM64_SVE))
++                      val |= CPTR_EL2_TZ;
++              if (!cpus_have_final_cap(ARM64_SME))
++                      val |= CPTR_EL2_TSM;
+-      kvm_write_cptr_el2(val);
++              write_sysreg(val, cptr_el2);
++      }
+ }
+ static void __activate_traps(struct kvm_vcpu *vcpu)
+@@ -120,7 +145,7 @@ static void __deactivate_traps(struct kv
+       write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
+-      kvm_reset_cptr_el2(vcpu);
++      __deactivate_cptr_traps(vcpu);
+       write_sysreg(__kvm_hyp_host_vector, vbar_el2);
+ }
+--- a/arch/arm64/kvm/hyp/vhe/switch.c
++++ b/arch/arm64/kvm/hyp/vhe/switch.c
+@@ -212,6 +212,8 @@ static int __kvm_vcpu_run_vhe(struct kvm
+       sysreg_save_host_state_vhe(host_ctxt);
++      fpsimd_lazy_switch_to_guest(vcpu);
++
+       /*
+        * ARM erratum 1165522 requires us to configure both stage 1 and
+        * stage 2 translation for the guest context before we clear
+@@ -247,6 +249,8 @@ static int __kvm_vcpu_run_vhe(struct kvm
+       __deactivate_traps(vcpu);
++      fpsimd_lazy_switch_to_host(vcpu);
++
+       sysreg_restore_host_state_vhe(host_ctxt);
+       if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -46,11 +46,14 @@ static u32 __ro_after_init kvm_ipa_limit
+                                PSR_AA32_I_BIT | PSR_AA32_F_BIT)
+ unsigned int __ro_after_init kvm_sve_max_vl;
++unsigned int __ro_after_init kvm_host_sve_max_vl;
+ int __init kvm_arm_init_sve(void)
+ {
+       if (system_supports_sve()) {
+               kvm_sve_max_vl = sve_max_virtualisable_vl();
++              kvm_host_sve_max_vl = sve_max_vl();
++              kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
+               /*
+                * The get_sve_reg()/set_sve_reg() ioctl interface will need
diff --git a/queue-6.6/kvm-arm64-mark-some-header-functions-as-inline.patch b/queue-6.6/kvm-arm64-mark-some-header-functions-as-inline.patch
new file mode 100644 (file)
index 0000000..cc40fa6
--- /dev/null
@@ -0,0 +1,113 @@
+From broonie@kernel.org Thu Mar 20 20:18:37 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 21 Mar 2025 00:16:07 +0000
+Subject: KVM: arm64: Mark some header functions as inline
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  James Morse <james.morse@arm.com>,  Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
+Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Fuad Tabba <tabba@google.com>
+Message-ID: <20250321-stable-sve-6-6-v1-7-0b3a6a14ea53@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit f9dd00de1e53a47763dfad601635d18542c3836d ]
+
+The shared hyp switch header has a number of static functions which
+might not be used by all files that include the header, and when unused
+they will provoke compiler warnings, e.g.
+
+| In file included from arch/arm64/kvm/hyp/nvhe/hyp-main.c:8:
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:703:13: warning: 'kvm_hyp_handle_dabt_low' defined but not used [-Wunused-function]
+|   703 | static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+|       |             ^~~~~~~~~~~~~~~~~~~~~~~
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:682:13: warning: 'kvm_hyp_handle_cp15_32' defined but not used [-Wunused-function]
+|   682 | static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
+|       |             ^~~~~~~~~~~~~~~~~~~~~~
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:662:13: warning: 'kvm_hyp_handle_sysreg' defined but not used [-Wunused-function]
+|   662 | static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
+|       |             ^~~~~~~~~~~~~~~~~~~~~
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:458:13: warning: 'kvm_hyp_handle_fpsimd' defined but not used [-Wunused-function]
+|   458 | static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
+|       |             ^~~~~~~~~~~~~~~~~~~~~
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:329:13: warning: 'kvm_hyp_handle_mops' defined but not used [-Wunused-function]
+|   329 | static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
+|       |             ^~~~~~~~~~~~~~~~~~~
+
+Mark these functions as 'inline' to suppress this warning. This
+shouldn't result in any functional change.
+
+At the same time, avoid the use of __alias() in the header and alias
+kvm_hyp_handle_iabt_low() and kvm_hyp_handle_watchpt_low() to
+kvm_hyp_handle_memory_fault() using CPP, matching the style in the rest
+of the kernel. For consistency, kvm_hyp_handle_memory_fault() is also
+marked as 'inline'.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-8-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/hyp/include/hyp/switch.h |   17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
++++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
+@@ -279,7 +279,7 @@ static inline void __hyp_sve_restore_gue
+  * If FP/SIMD is not implemented, handle the trap and inject an undefined
+  * instruction exception to the guest. Similarly for trapped SVE accesses.
+  */
+-static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+       bool sve_guest;
+       u8 esr_ec;
+@@ -518,7 +518,7 @@ static bool handle_ampere1_tcr(struct kv
+       return true;
+ }
+-static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+       if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+           handle_tx2_tvm(vcpu))
+@@ -541,7 +541,7 @@ static bool kvm_hyp_handle_sysreg(struct
+       return false;
+ }
+-static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+       if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
+           __vgic_v3_perform_cpuif_access(vcpu) == 1)
+@@ -550,19 +550,18 @@ static bool kvm_hyp_handle_cp15_32(struc
+       return false;
+ }
+-static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
++                                             u64 *exit_code)
+ {
+       if (!__populate_fault_info(vcpu))
+               return true;
+       return false;
+ }
+-static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+-      __alias(kvm_hyp_handle_memory_fault);
+-static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+-      __alias(kvm_hyp_handle_memory_fault);
++#define kvm_hyp_handle_iabt_low               kvm_hyp_handle_memory_fault
++#define kvm_hyp_handle_watchpt_low    kvm_hyp_handle_memory_fault
+-static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+       if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
+               return true;
diff --git a/queue-6.6/kvm-arm64-refactor-exit-handlers.patch b/queue-6.6/kvm-arm64-refactor-exit-handlers.patch
new file mode 100644 (file)
index 0000000..a3ed46b
--- /dev/null
@@ -0,0 +1,187 @@
+From broonie@kernel.org Thu Mar 20 20:18:34 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 21 Mar 2025 00:16:06 +0000
+Subject: KVM: arm64: Refactor exit handlers
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  James Morse <james.morse@arm.com>,  Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
+Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Fuad Tabba <tabba@google.com>
+Message-ID: <20250321-stable-sve-6-6-v1-6-0b3a6a14ea53@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 9b66195063c5a145843547b1d692bd189be85287 ]
+
+The hyp exit handling logic is largely shared between VHE and nVHE/hVHE,
+with common logic in arch/arm64/kvm/hyp/include/hyp/switch.h. The code
+in the header depends on function definitions provided by
+arch/arm64/kvm/hyp/vhe/switch.c and arch/arm64/kvm/hyp/nvhe/switch.c
+when they include the header.
+
+This is an unusual header dependency, and prevents the use of
+arch/arm64/kvm/hyp/include/hyp/switch.h in other files as this would
+result in compiler warnings regarding missing definitions, e.g.
+
+| In file included from arch/arm64/kvm/hyp/nvhe/hyp-main.c:8:
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:733:31: warning: 'kvm_get_exit_handler_array' used but never defined
+|   733 | static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
+|       |                               ^~~~~~~~~~~~~~~~~~~~~~~~~~
+| ./arch/arm64/kvm/hyp/include/hyp/switch.h:735:13: warning: 'early_exit_filter' used but never defined
+|   735 | static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
+|       |             ^~~~~~~~~~~~~~~~~
+
+Refactor the logic such that the header doesn't depend on anything from
+the C files. There should be no functional change as a result of this
+patch.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-7-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/hyp/include/hyp/switch.h |   30 ++++++------------------------
+ arch/arm64/kvm/hyp/nvhe/switch.c        |   27 +++++++++++++++------------
+ arch/arm64/kvm/hyp/vhe/switch.c         |    9 ++++-----
+ 3 files changed, 25 insertions(+), 41 deletions(-)
+
+--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
++++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
+@@ -592,23 +592,16 @@ static bool kvm_hyp_handle_dabt_low(stru
+ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
+-static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
+-
+-static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
+-
+ /*
+  * Allow the hypervisor to handle the exit with an exit handler if it has one.
+  *
+  * Returns true if the hypervisor handled the exit, and control should go back
+  * to the guest, or false if it hasn't.
+  */
+-static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
++                                     const exit_handler_fn *handlers)
+ {
+-      const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
+-      exit_handler_fn fn;
+-
+-      fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
+-
++      exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
+       if (fn)
+               return fn(vcpu, exit_code);
+@@ -638,20 +631,9 @@ static inline void synchronize_vcpu_psta
+  * the guest, false when we should restore the host state and return to the
+  * main run loop.
+  */
+-static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
++                                    const exit_handler_fn *handlers)
+ {
+-      /*
+-       * Save PSTATE early so that we can evaluate the vcpu mode
+-       * early on.
+-       */
+-      synchronize_vcpu_pstate(vcpu, exit_code);
+-
+-      /*
+-       * Check whether we want to repaint the state one way or
+-       * another.
+-       */
+-      early_exit_filter(vcpu, exit_code);
+-
+       if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
+               vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
+@@ -681,7 +663,7 @@ static inline bool fixup_guest_exit(stru
+               goto exit;
+       /* Check if there's an exit handler and allow it to handle the exit. */
+-      if (kvm_hyp_handle_exit(vcpu, exit_code))
++      if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
+               goto guest;
+ exit:
+       /* Return to the host kernel and handle the exit */
+--- a/arch/arm64/kvm/hyp/nvhe/switch.c
++++ b/arch/arm64/kvm/hyp/nvhe/switch.c
+@@ -225,21 +225,22 @@ static const exit_handler_fn *kvm_get_ex
+       return hyp_exit_handlers;
+ }
+-/*
+- * Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
+- * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
+- * guest from dropping to AArch32 EL0 if implemented by the CPU. If the
+- * hypervisor spots a guest in such a state ensure it is handled, and don't
+- * trust the host to spot or fix it.  The check below is based on the one in
+- * kvm_arch_vcpu_ioctl_run().
+- *
+- * Returns false if the guest ran in AArch32 when it shouldn't have, and
+- * thus should exit to the host, or true if a the guest run loop can continue.
+- */
+-static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
++      const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
+       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
++      synchronize_vcpu_pstate(vcpu, exit_code);
++
++      /*
++       * Some guests (e.g., protected VMs) are not be allowed to run in
++       * AArch32.  The ARMv8 architecture does not give the hypervisor a
++       * mechanism to prevent a guest from dropping to AArch32 EL0 if
++       * implemented by the CPU. If the hypervisor spots a guest in such a
++       * state ensure it is handled, and don't trust the host to spot or fix
++       * it.  The check below is based on the one in
++       * kvm_arch_vcpu_ioctl_run().
++       */
+       if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
+               /*
+                * As we have caught the guest red-handed, decide that it isn't
+@@ -252,6 +253,8 @@ static void early_exit_filter(struct kvm
+               *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
+               *exit_code |= ARM_EXCEPTION_IL;
+       }
++
++      return __fixup_guest_exit(vcpu, exit_code, handlers);
+ }
+ /* Switch to the guest for legacy non-VHE systems */
+--- a/arch/arm64/kvm/hyp/vhe/switch.c
++++ b/arch/arm64/kvm/hyp/vhe/switch.c
+@@ -172,13 +172,10 @@ static const exit_handler_fn hyp_exit_ha
+       [ESR_ELx_EC_PAC]                = kvm_hyp_handle_ptrauth,
+ };
+-static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
++static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+-      return hyp_exit_handlers;
+-}
++      synchronize_vcpu_pstate(vcpu, exit_code);
+-static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
+-{
+       /*
+        * If we were in HYP context on entry, adjust the PSTATE view
+        * so that the usual helpers work correctly.
+@@ -198,6 +195,8 @@ static void early_exit_filter(struct kvm
+               *vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
+               *vcpu_cpsr(vcpu) |= mode;
+       }
++
++      return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
+ }
+ /* Switch to the guest for VHE systems running in EL2 */
diff --git a/queue-6.6/kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch b/queue-6.6/kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch
new file mode 100644 (file)
index 0000000..818b971
--- /dev/null
@@ -0,0 +1,89 @@
+From stable+bounces-125721-greg=kroah.com@vger.kernel.org Thu Mar 20 20:20:22 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 21 Mar 2025 00:16:03 +0000
+Subject: KVM: arm64: Remove host FPSIMD saving for non-protected KVM
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  James Morse <james.morse@arm.com>,  Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
+Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Fuad Tabba <tabba@google.com>
+Message-ID: <20250321-stable-sve-6-6-v1-3-0b3a6a14ea53@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 8eca7f6d5100b6997df4f532090bc3f7e0203bef ]
+
+Now that the host eagerly saves its own FPSIMD/SVE/SME state,
+non-protected KVM never needs to save the host FPSIMD/SVE/SME state,
+and the code to do this is never used. Protected KVM still needs to
+save/restore the host FPSIMD/SVE state to avoid leaking guest state to
+the host (and to avoid revealing to the host whether the guest used
+FPSIMD/SVE/SME), and that code needs to be retained.
+
+Remove the unused code and data structures.
+
+To avoid the need for a stub copy of kvm_hyp_save_fpsimd_host() in the
+VHE hyp code, the nVHE/hVHE version is moved into the shared switch
+header, where it is only invoked when KVM is in protected mode.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-3-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h       |    1 -
+ arch/arm64/kvm/fpsimd.c                 |    2 --
+ arch/arm64/kvm/hyp/include/hyp/switch.h |    4 ----
+ arch/arm64/kvm/hyp/nvhe/hyp-main.c      |    1 -
+ 4 files changed, 8 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -535,7 +535,6 @@ struct kvm_vcpu_arch {
+       struct kvm_guest_debug_arch vcpu_debug_state;
+       struct kvm_guest_debug_arch external_debug_state;
+-      struct user_fpsimd_state *host_fpsimd_state;    /* hyp VA */
+       struct task_struct *parent_task;
+       struct {
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -49,8 +49,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_
+       if (ret)
+               return ret;
+-      vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
+-
+       /*
+        * We need to keep current's task_struct pinned until its data has been
+        * unshared with the hypervisor to make sure it is not re-used by the
+--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
++++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
+@@ -321,10 +321,6 @@ static bool kvm_hyp_handle_fpsimd(struct
+       }
+       isb();
+-      /* Write out the host state if it's in the registers */
+-      if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
+-              __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
+-
+       /* Restore the guest state */
+       if (sve_guest)
+               __hyp_sve_restore_guest(vcpu);
+--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
++++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+@@ -41,7 +41,6 @@ static void flush_hyp_vcpu(struct pkvm_h
+       hyp_vcpu->vcpu.arch.fp_state    = host_vcpu->arch.fp_state;
+       hyp_vcpu->vcpu.arch.debug_ptr   = kern_hyp_va(host_vcpu->arch.debug_ptr);
+-      hyp_vcpu->vcpu.arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state;
+       hyp_vcpu->vcpu.arch.vsesr_el2   = host_vcpu->arch.vsesr_el2;
diff --git a/queue-6.6/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch b/queue-6.6/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch
new file mode 100644 (file)
index 0000000..6f45290
--- /dev/null
@@ -0,0 +1,120 @@
+From broonie@kernel.org Thu Mar 20 20:18:32 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 21 Mar 2025 00:16:05 +0000
+Subject: KVM: arm64: Remove VHE host restore of CPACR_EL1.SMEN
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  James Morse <james.morse@arm.com>,  Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
+Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Fuad Tabba <tabba@google.com>
+Message-ID: <20250321-stable-sve-6-6-v1-5-0b3a6a14ea53@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 407a99c4654e8ea65393f412c421a55cac539f5b ]
+
+When KVM is in VHE mode, the host kernel tries to save and restore the
+configuration of CPACR_EL1.SMEN (i.e. CPTR_EL2.SMEN when HCR_EL2.E2H=1)
+across kvm_arch_vcpu_load_fp() and kvm_arch_vcpu_put_fp(), since the
+configuration may be clobbered by hyp when running a vCPU. This logic
+has historically been broken, and is currently redundant.
+
+This logic was originally introduced in commit:
+
+  861262ab86270206 ("KVM: arm64: Handle SME host state when running guests")
+
+At the time, the VHE hyp code would reset CPTR_EL2.SMEN to 0b00 when
+returning to the host, trapping host access to SME state. Unfortunately,
+this was unsafe as the host could take a softirq before calling
+kvm_arch_vcpu_put_fp(), and if a softirq handler were to use kernel mode
+NEON the resulting attempt to save the live FPSIMD/SVE/SME state would
+result in a fatal trap.
+
+That issue was limited to VHE mode. For nVHE/hVHE modes, KVM always
+saved/restored the host kernel's CPACR_EL1 value, and configured
+CPTR_EL2.TSM to 0b0, ensuring that host usage of SME would not be
+trapped.
+
+The issue above was incidentally fixed by commit:
+
+  375110ab51dec5dc ("KVM: arm64: Fix resetting SME trap values on reset for (h)VHE")
+
+That commit changed the VHE hyp code to configure CPTR_EL2.SMEN to 0b01
+when returning to the host, permitting host kernel usage of SME,
+avoiding the issue described above. At the time, this was not identified
+as a fix for commit 861262ab86270206.
+
+Now that the host eagerly saves and unbinds its own FPSIMD/SVE/SME
+state, there's no need to save/restore the state of the EL0 SME trap.
+The kernel can safely save/restore state without trapping, as described
+above, and will restore userspace state (including trap controls) before
+returning to userspace.
+
+Remove the redundant logic.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-5-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+[Update for rework of flags storage -- broonie]
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h |    2 --
+ arch/arm64/kvm/fpsimd.c           |   23 -----------------------
+ 2 files changed, 25 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -717,8 +717,6 @@ struct kvm_vcpu_arch {
+ /* vcpu running in HYP context */
+ #define VCPU_HYP_CONTEXT      __vcpu_single_flag(iflags, BIT(7))
+-/* SME enabled for EL0 */
+-#define HOST_SME_ENABLED      __vcpu_single_flag(sflags, BIT(1))
+ /* Physical CPU not in supported_cpus */
+ #define ON_UNSUPPORTED_CPU    __vcpu_single_flag(sflags, BIT(2))
+ /* WFIT instruction trapped */
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -87,12 +87,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
+        */
+       fpsimd_save_and_flush_cpu_state();
+       vcpu->arch.fp_state = FP_STATE_FREE;
+-
+-      if (system_supports_sme()) {
+-              vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
+-              if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
+-                      vcpu_set_flag(vcpu, HOST_SME_ENABLED);
+-      }
+ }
+ /*
+@@ -157,23 +151,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
+       local_irq_save(flags);
+-      /*
+-       * If we have VHE then the Hyp code will reset CPACR_EL1 to
+-       * the default value and we need to reenable SME.
+-       */
+-      if (has_vhe() && system_supports_sme()) {
+-              /* Also restore EL0 state seen on entry */
+-              if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
+-                      sysreg_clear_set(CPACR_EL1, 0,
+-                                       CPACR_EL1_SMEN_EL0EN |
+-                                       CPACR_EL1_SMEN_EL1EN);
+-              else
+-                      sysreg_clear_set(CPACR_EL1,
+-                                       CPACR_EL1_SMEN_EL0EN,
+-                                       CPACR_EL1_SMEN_EL1EN);
+-              isb();
+-      }
+-
+       if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
+               if (vcpu_has_sve(vcpu)) {
+                       __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
diff --git a/queue-6.6/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch b/queue-6.6/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch
new file mode 100644 (file)
index 0000000..c18f749
--- /dev/null
@@ -0,0 +1,91 @@
+From broonie@kernel.org Thu Mar 20 20:18:28 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 21 Mar 2025 00:16:04 +0000
+Subject: KVM: arm64: Remove VHE host restore of CPACR_EL1.ZEN
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  James Morse <james.morse@arm.com>,  Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
+Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Fuad Tabba <tabba@google.com>
+Message-ID: <20250321-stable-sve-6-6-v1-4-0b3a6a14ea53@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 459f059be702056d91537b99a129994aa6ccdd35 ]
+
+When KVM is in VHE mode, the host kernel tries to save and restore the
+configuration of CPACR_EL1.ZEN (i.e. CPTR_EL2.ZEN when HCR_EL2.E2H=1)
+across kvm_arch_vcpu_load_fp() and kvm_arch_vcpu_put_fp(), since the
+configuration may be clobbered by hyp when running a vCPU. This logic is
+currently redundant.
+
+The VHE hyp code unconditionally configures CPTR_EL2.ZEN to 0b01 when
+returning to the host, permitting host kernel usage of SVE.
+
+Now that the host eagerly saves and unbinds its own FPSIMD/SVE/SME
+state, there's no need to save/restore the state of the EL0 SVE trap.
+The kernel can safely save/restore state without trapping, as described
+above, and will restore userspace state (including trap controls) before
+returning to userspace.
+
+Remove the redundant logic.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-4-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+[Rework for refactoring of where the flags are stored -- broonie]
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h |    2 --
+ arch/arm64/kvm/fpsimd.c           |   16 ----------------
+ 2 files changed, 18 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -717,8 +717,6 @@ struct kvm_vcpu_arch {
+ /* vcpu running in HYP context */
+ #define VCPU_HYP_CONTEXT      __vcpu_single_flag(iflags, BIT(7))
+-/* SVE enabled for host EL0 */
+-#define HOST_SVE_ENABLED      __vcpu_single_flag(sflags, BIT(0))
+ /* SME enabled for EL0 */
+ #define HOST_SME_ENABLED      __vcpu_single_flag(sflags, BIT(1))
+ /* Physical CPU not in supported_cpus */
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -88,10 +88,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
+       fpsimd_save_and_flush_cpu_state();
+       vcpu->arch.fp_state = FP_STATE_FREE;
+-      vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
+-      if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
+-              vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
+-
+       if (system_supports_sme()) {
+               vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
+               if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
+@@ -189,18 +185,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
+               }
+               fpsimd_save_and_flush_cpu_state();
+-      } else if (has_vhe() && system_supports_sve()) {
+-              /*
+-               * The FPSIMD/SVE state in the CPU has not been touched, and we
+-               * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
+-               * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
+-               * for EL0.  To avoid spurious traps, restore the trap state
+-               * seen by kvm_arch_vcpu_load_fp():
+-               */
+-              if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED))
+-                      sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
+-              else
+-                      sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
+       }
+       local_irq_restore(flags);
diff --git a/queue-6.6/kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch b/queue-6.6/kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch
new file mode 100644 (file)
index 0000000..452dfed
--- /dev/null
@@ -0,0 +1,159 @@
+From stable+bounces-125720-greg=kroah.com@vger.kernel.org Thu Mar 20 20:18:53 2025
+From: Mark Brown <broonie@kernel.org>
+Date: Fri, 21 Mar 2025 00:16:02 +0000
+Subject: KVM: arm64: Unconditionally save+flush host FPSIMD/SVE/SME state
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  James Morse <james.morse@arm.com>,  Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
+Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Eric Auger <eauger@redhat.com>, Wilco Dijkstra <wilco.dijkstra@arm.com>,  Eric Auger <eric.auger@redhat.com>, Florian Weimer <fweimer@redhat.com>,  Fuad Tabba <tabba@google.com>, Jeremy Linton <jeremy.linton@arm.com>,  Paolo Bonzini <pbonzini@redhat.com>
+Message-ID: <20250321-stable-sve-6-6-v1-2-0b3a6a14ea53@kernel.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit fbc7e61195e23f744814e78524b73b59faa54ab4 ]
+
+There are several problems with the way hyp code lazily saves the host's
+FPSIMD/SVE state, including:
+
+* Host SVE being discarded unexpectedly due to inconsistent
+  configuration of TIF_SVE and CPACR_ELx.ZEN. This has been seen to
+  result in QEMU crashes where SVE is used by memmove(), as reported by
+  Eric Auger:
+
+  https://issues.redhat.com/browse/RHEL-68997
+
+* Host SVE state is discarded *after* modification by ptrace, which was an
+  unintentional ptrace ABI change introduced with lazy discarding of SVE state.
+
+* The host FPMR value can be discarded when running a non-protected VM,
+  where FPMR support is not exposed to a VM, and that VM uses
+  FPSIMD/SVE. In these cases the hyp code does not save the host's FPMR
+  before unbinding the host's FPSIMD/SVE/SME state, leaving a stale
+  value in memory.
+
+Avoid these by eagerly saving and "flushing" the host's FPSIMD/SVE/SME
+state when loading a vCPU such that KVM does not need to save any of the
+host's FPSIMD/SVE/SME state. For clarity, fpsimd_kvm_prepare() is
+removed and the necessary call to fpsimd_save_and_flush_cpu_state() is
+placed in kvm_arch_vcpu_load_fp(). As 'fpsimd_state' and 'fpmr_ptr'
+should not be used, they are set to NULL; all uses of these will be
+removed in subsequent patches.
+
+Historical problems go back at least as far as v5.17, e.g. erroneous
+assumptions about TIF_SVE being clear in commit:
+
+  8383741ab2e773a9 ("KVM: arm64: Get rid of host SVE tracking/saving")
+
+... and so this eager save+flush probably needs to be backported to ALL
+stable trees.
+
+Fixes: 93ae6b01bafee8fa ("KVM: arm64: Discard any SVE state when entering KVM guests")
+Fixes: 8c845e2731041f0f ("arm64/sve: Leave SVE enabled on syscall if we don't context switch")
+Fixes: ef3be86021c3bdf3 ("KVM: arm64: Add save/restore support for FPMR")
+Reported-by: Eric Auger <eauger@redhat.com>
+Reported-by: Wilco Dijkstra <wilco.dijkstra@arm.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Tested-by: Eric Auger <eric.auger@redhat.com>
+Acked-by: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Florian Weimer <fweimer@redhat.com>
+Cc: Fuad Tabba <tabba@google.com>
+Cc: Jeremy Linton <jeremy.linton@arm.com>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Oliver Upton <oliver.upton@linux.dev>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Link: https://lore.kernel.org/r/20250210195226.1215254-2-mark.rutland@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+[ Mark: Handle vcpu/host flag conflict ]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/fpsimd.c |   25 -------------------------
+ arch/arm64/kvm/fpsimd.c    |   31 ++++++++-----------------------
+ 2 files changed, 8 insertions(+), 48 deletions(-)
+
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1708,31 +1708,6 @@ void fpsimd_signal_preserve_current_stat
+ }
+ /*
+- * Called by KVM when entering the guest.
+- */
+-void fpsimd_kvm_prepare(void)
+-{
+-      if (!system_supports_sve())
+-              return;
+-
+-      /*
+-       * KVM does not save host SVE state since we can only enter
+-       * the guest from a syscall so the ABI means that only the
+-       * non-saved SVE state needs to be saved.  If we have left
+-       * SVE enabled for performance reasons then update the task
+-       * state to be FPSIMD only.
+-       */
+-      get_cpu_fpsimd_context();
+-
+-      if (test_and_clear_thread_flag(TIF_SVE)) {
+-              sve_to_fpsimd(current);
+-              current->thread.fp_type = FP_STATE_FPSIMD;
+-      }
+-
+-      put_cpu_fpsimd_context();
+-}
+-
+-/*
+  * Associate current's FPSIMD context with this cpu
+  * The caller must have ownership of the cpu FPSIMD context before calling
+  * this function.
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -79,14 +79,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
+       if (!system_supports_fpsimd())
+               return;
+-      fpsimd_kvm_prepare();
+-
+       /*
+-       * We will check TIF_FOREIGN_FPSTATE just before entering the
+-       * guest in kvm_arch_vcpu_ctxflush_fp() and override this to
+-       * FP_STATE_FREE if the flag set.
++       * Ensure that any host FPSIMD/SVE/SME state is saved and unbound such
++       * that the host kernel is responsible for restoring this state upon
++       * return to userspace, and the hyp code doesn't need to save anything.
++       *
++       * When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures
++       * that PSTATE.{SM,ZA} == {0,0}.
+        */
+-      vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
++      fpsimd_save_and_flush_cpu_state();
++      vcpu->arch.fp_state = FP_STATE_FREE;
+       vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
+       if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
+@@ -96,23 +98,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
+               vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
+               if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
+                       vcpu_set_flag(vcpu, HOST_SME_ENABLED);
+-
+-              /*
+-               * If PSTATE.SM is enabled then save any pending FP
+-               * state and disable PSTATE.SM. If we leave PSTATE.SM
+-               * enabled and the guest does not enable SME via
+-               * CPACR_EL1.SMEN then operations that should be valid
+-               * may generate SME traps from EL1 to EL1 which we
+-               * can't intercept and which would confuse the guest.
+-               *
+-               * Do the same for PSTATE.ZA in the case where there
+-               * is state in the registers which has not already
+-               * been saved, this is very unlikely to happen.
+-               */
+-              if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
+-                      vcpu->arch.fp_state = FP_STATE_FREE;
+-                      fpsimd_save_and_flush_cpu_state();
+-              }
+       }
+ }
diff --git a/queue-6.6/mptcp-fix-data-stream-corruption-in-the-address-announcement.patch b/queue-6.6/mptcp-fix-data-stream-corruption-in-the-address-announcement.patch
new file mode 100644 (file)
index 0000000..89ef12a
--- /dev/null
@@ -0,0 +1,78 @@
+From 2c1f97a52cb827a5f2768e67a9dddffae1ed47ab Mon Sep 17 00:00:00 2001
+From: Arthur Mongodin <amongodin@randorisec.fr>
+Date: Fri, 14 Mar 2025 21:11:31 +0100
+Subject: mptcp: Fix data stream corruption in the address announcement
+
+From: Arthur Mongodin <amongodin@randorisec.fr>
+
+commit 2c1f97a52cb827a5f2768e67a9dddffae1ed47ab upstream.
+
+Because of the size restriction in the TCP options space, the MPTCP
+ADD_ADDR option is exclusive and cannot be sent with other MPTCP ones.
+For this reason, in the linked mptcp_out_options structure, group of
+fields linked to different options are part of the same union.
+
+There is a case where the mptcp_pm_add_addr_signal() function can modify
+opts->addr, but not ended up sending an ADD_ADDR. Later on, back in
+mptcp_established_options, other options will be sent, but with
+unexpected data written in other fields due to the union, e.g. in
+opts->ext_copy. This could lead to a data stream corruption in the next
+packet.
+
+Using an intermediate variable, prevents from corrupting previously
+established DSS option. The assignment of the ADD_ADDR option
+parameters is now done once we are sure this ADD_ADDR option can be set
+in the packet, e.g. after having dropped other suboptions.
+
+Fixes: 1bff1e43a30e ("mptcp: optimize out option generation")
+Cc: stable@vger.kernel.org
+Suggested-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Arthur Mongodin <amongodin@randorisec.fr>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+[ Matt: the commit message has been updated: long lines splits and some
+  clarifications. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250314-net-mptcp-fix-data-stream-corr-sockopt-v1-1-122dbb249db3@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/options.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -651,6 +651,7 @@ static bool mptcp_established_options_ad
+       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+       bool drop_other_suboptions = false;
+       unsigned int opt_size = *size;
++      struct mptcp_addr_info addr;
+       bool echo;
+       int len;
+@@ -659,7 +660,7 @@ static bool mptcp_established_options_ad
+        */
+       if (!mptcp_pm_should_add_signal(msk) ||
+           (opts->suboptions & (OPTION_MPTCP_MPJ_ACK | OPTION_MPTCP_MPC_ACK)) ||
+-          !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &opts->addr,
++          !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &addr,
+                   &echo, &drop_other_suboptions))
+               return false;
+@@ -672,7 +673,7 @@ static bool mptcp_established_options_ad
+       else if (opts->suboptions & OPTION_MPTCP_DSS)
+               return false;
+-      len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port);
++      len = mptcp_add_addr_len(addr.family, echo, !!addr.port);
+       if (remaining < len)
+               return false;
+@@ -689,6 +690,7 @@ static bool mptcp_established_options_ad
+               opts->ahmac = 0;
+               *size -= opt_size;
+       }
++      opts->addr = addr;
+       opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
+       if (!echo) {
+               MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDRTX);
index abceb78dcc40111ce18065a92fc8bee0d228ef35..cc39d33e2a181a35dd8e4003a3b0ac3d87c71a97 100644 (file)
@@ -59,3 +59,12 @@ drm-amdgpu-fix-jpeg-video-caps-max-size-for-navi1x-and-raven.patch
 drm-amd-display-should-support-dmub-hw-lock-on-replay.patch
 drm-amd-display-use-hw-lock-mgr-for-psr1-when-only-one-edp.patch
 ksmbd-fix-incorrect-validation-for-num_aces-field-of-smb_acl.patch
+mptcp-fix-data-stream-corruption-in-the-address-announcement.patch
+kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch
+kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch
+kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch
+kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch
+kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch
+kvm-arm64-refactor-exit-handlers.patch
+kvm-arm64-mark-some-header-functions-as-inline.patch
+kvm-arm64-eagerly-switch-zcr_el-1-2.patch