]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop some 6.12 and 6.13 patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 Mar 2025 13:59:17 +0000 (06:59 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 Mar 2025 13:59:17 +0000 (06:59 -0700)
22 files changed:
queue-6.12/kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch [deleted file]
queue-6.12/kvm-arm64-eagerly-switch-zcr_el-1-2.patch [deleted file]
queue-6.12/kvm-arm64-mark-some-header-functions-as-inline.patch [deleted file]
queue-6.12/kvm-arm64-refactor-exit-handlers.patch [deleted file]
queue-6.12/kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch [deleted file]
queue-6.12/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch [deleted file]
queue-6.12/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch [deleted file]
queue-6.12/kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch [deleted file]
queue-6.12/mm-shmem-fix-potential-data-corruption-during-shmem-.patch [deleted file]
queue-6.12/mm-shmem-skip-swapcache-for-swapin-of-synchronous-sw.patch [deleted file]
queue-6.12/series
queue-6.13/kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch [deleted file]
queue-6.13/kvm-arm64-eagerly-switch-zcr_el-1-2.patch [deleted file]
queue-6.13/kvm-arm64-mark-some-header-functions-as-inline.patch [deleted file]
queue-6.13/kvm-arm64-refactor-exit-handlers.patch [deleted file]
queue-6.13/kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch [deleted file]
queue-6.13/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch [deleted file]
queue-6.13/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch [deleted file]
queue-6.13/kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch [deleted file]
queue-6.13/mm-shmem-fix-potential-data-corruption-during-shmem-.patch [deleted file]
queue-6.13/mm-shmem-skip-swapcache-for-swapin-of-synchronous-sw.patch [deleted file]
queue-6.13/series

diff --git a/queue-6.12/kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch b/queue-6.12/kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch
deleted file mode 100644 (file)
index a89032b..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
-From 2fd5b4b0e7b440602455b79977bfa64dea101e6c Mon Sep 17 00:00:00 2001
-From: Fuad Tabba <tabba@google.com>
-Date: Mon, 16 Dec 2024 10:50:52 +0000
-Subject: KVM: arm64: Calculate cptr_el2 traps on activating traps
-
-From: Fuad Tabba <tabba@google.com>
-
-commit 2fd5b4b0e7b440602455b79977bfa64dea101e6c upstream.
-
-Similar to VHE, calculate the value of cptr_el2 from scratch on
-activate traps. This removes the need to store cptr_el2 in every
-vcpu structure. Moreover, some traps, such as whether the guest
-owns the fp registers, need to be set on every vcpu run.
-
-Reported-by: James Clark <james.clark@linaro.org>
-Fixes: 5294afdbf45a ("KVM: arm64: Exclude FP ownership from kvm_vcpu_arch")
-Signed-off-by: Fuad Tabba <tabba@google.com>
-Link: https://lore.kernel.org/r/20241216105057.579031-13-tabba@google.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/include/asm/kvm_host.h |    1 
- arch/arm64/kvm/arm.c              |    1 
- arch/arm64/kvm/hyp/nvhe/pkvm.c    |   29 ---------------------
- arch/arm64/kvm/hyp/nvhe/switch.c  |   51 +++++++++++++++++++++++---------------
- 4 files changed, 32 insertions(+), 50 deletions(-)
-
---- a/arch/arm64/include/asm/kvm_host.h
-+++ b/arch/arm64/include/asm/kvm_host.h
-@@ -697,7 +697,6 @@ struct kvm_vcpu_arch {
-       u64 hcr_el2;
-       u64 hcrx_el2;
-       u64 mdcr_el2;
--      u64 cptr_el2;
-       /* Exception Information */
-       struct kvm_vcpu_fault_info fault;
---- a/arch/arm64/kvm/arm.c
-+++ b/arch/arm64/kvm/arm.c
-@@ -1577,7 +1577,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init
-       }
-       vcpu_reset_hcr(vcpu);
--      vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
-       /*
-        * Handle the "start in power-off" case.
---- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
-+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
-@@ -28,8 +28,6 @@ static void pvm_init_traps_aa64pfr0(stru
-       const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
-       u64 hcr_set = HCR_RW;
-       u64 hcr_clear = 0;
--      u64 cptr_set = 0;
--      u64 cptr_clear = 0;
-       /* Protected KVM does not support AArch32 guests. */
-       BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
-@@ -59,21 +57,10 @@ static void pvm_init_traps_aa64pfr0(stru
-       /* Trap AMU */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
-               hcr_clear |= HCR_AMVOFFEN;
--              cptr_set |= CPTR_EL2_TAM;
--      }
--
--      /* Trap SVE */
--      if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
--              if (has_hvhe())
--                      cptr_clear |= CPACR_ELx_ZEN;
--              else
--                      cptr_set |= CPTR_EL2_TZ;
-       }
-       vcpu->arch.hcr_el2 |= hcr_set;
-       vcpu->arch.hcr_el2 &= ~hcr_clear;
--      vcpu->arch.cptr_el2 |= cptr_set;
--      vcpu->arch.cptr_el2 &= ~cptr_clear;
- }
- /*
-@@ -103,7 +90,6 @@ static void pvm_init_traps_aa64dfr0(stru
-       const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
-       u64 mdcr_set = 0;
-       u64 mdcr_clear = 0;
--      u64 cptr_set = 0;
-       /* Trap/constrain PMU */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
-@@ -130,21 +116,12 @@ static void pvm_init_traps_aa64dfr0(stru
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
-               mdcr_set |= MDCR_EL2_TTRF;
--      /* Trap Trace */
--      if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
--              if (has_hvhe())
--                      cptr_set |= CPACR_EL1_TTA;
--              else
--                      cptr_set |= CPTR_EL2_TTA;
--      }
--
-       /* Trap External Trace */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
-               mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
-       vcpu->arch.mdcr_el2 |= mdcr_set;
-       vcpu->arch.mdcr_el2 &= ~mdcr_clear;
--      vcpu->arch.cptr_el2 |= cptr_set;
- }
- /*
-@@ -195,10 +172,6 @@ static void pvm_init_trap_regs(struct kv
-       /* Clear res0 and set res1 bits to trap potential new features. */
-       vcpu->arch.hcr_el2 &= ~(HCR_RES0);
-       vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
--      if (!has_hvhe()) {
--              vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
--              vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
--      }
- }
- /*
-@@ -579,8 +552,6 @@ unlock:
-               return ret;
-       }
--      hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
--
-       return 0;
- }
---- a/arch/arm64/kvm/hyp/nvhe/switch.c
-+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
-@@ -36,33 +36,46 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_ve
- extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
--static void __activate_traps(struct kvm_vcpu *vcpu)
-+static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
- {
--      u64 val;
-+      u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
--      ___activate_traps(vcpu, vcpu->arch.hcr_el2);
--      __activate_traps_common(vcpu);
-+      if (has_hvhe()) {
-+              val |= CPACR_ELx_TTA;
--      val = vcpu->arch.cptr_el2;
--      val |= CPTR_EL2_TAM;    /* Same bit irrespective of E2H */
--      val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
--      if (cpus_have_final_cap(ARM64_SME)) {
--              if (has_hvhe())
--                      val &= ~CPACR_ELx_SMEN;
--              else
--                      val |= CPTR_EL2_TSM;
--      }
-+              if (guest_owns_fp_regs()) {
-+                      val |= CPACR_ELx_FPEN;
-+                      if (vcpu_has_sve(vcpu))
-+                              val |= CPACR_ELx_ZEN;
-+              }
-+      } else {
-+              val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
-+
-+              /*
-+               * Always trap SME since it's not supported in KVM.
-+               * TSM is RES1 if SME isn't implemented.
-+               */
-+              val |= CPTR_EL2_TSM;
--      if (!guest_owns_fp_regs()) {
--              if (has_hvhe())
--                      val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
--              else
--                      val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
-+              if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
-+                      val |= CPTR_EL2_TZ;
--              __activate_traps_fpsimd32(vcpu);
-+              if (!guest_owns_fp_regs())
-+                      val |= CPTR_EL2_TFP;
-       }
-+      if (!guest_owns_fp_regs())
-+              __activate_traps_fpsimd32(vcpu);
-+
-       kvm_write_cptr_el2(val);
-+}
-+
-+static void __activate_traps(struct kvm_vcpu *vcpu)
-+{
-+      ___activate_traps(vcpu, vcpu->arch.hcr_el2);
-+      __activate_traps_common(vcpu);
-+      __activate_cptr_traps(vcpu);
-+
-       write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
-       if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
diff --git a/queue-6.12/kvm-arm64-eagerly-switch-zcr_el-1-2.patch b/queue-6.12/kvm-arm64-eagerly-switch-zcr_el-1-2.patch
deleted file mode 100644 (file)
index bd27979..0000000
+++ /dev/null
@@ -1,319 +0,0 @@
-From 59419f10045bc955d2229819c7cf7a8b0b9c5b59 Mon Sep 17 00:00:00 2001
-From: Mark Rutland <mark.rutland@arm.com>
-Date: Mon, 10 Feb 2025 19:52:26 +0000
-Subject: KVM: arm64: Eagerly switch ZCR_EL{1,2}
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-commit 59419f10045bc955d2229819c7cf7a8b0b9c5b59 upstream.
-
-In non-protected KVM modes, while the guest FPSIMD/SVE/SME state is live on the
-CPU, the host's active SVE VL may differ from the guest's maximum SVE VL:
-
-* For VHE hosts, when a VM uses NV, ZCR_EL2 contains a value constrained
-  by the guest hypervisor, which may be less than or equal to that
-  guest's maximum VL.
-
-  Note: in this case the value of ZCR_EL1 is immaterial due to E2H.
-
-* For nVHE/hVHE hosts, ZCR_EL1 contains a value written by the guest,
-  which may be less than or greater than the guest's maximum VL.
-
-  Note: in this case hyp code traps host SVE usage and lazily restores
-  ZCR_EL2 to the host's maximum VL, which may be greater than the
-  guest's maximum VL.
-
-This can be the case between exiting a guest and kvm_arch_vcpu_put_fp().
-If a softirq is taken during this period and the softirq handler tries
-to use kernel-mode NEON, then the kernel will fail to save the guest's
-FPSIMD/SVE state, and will pend a SIGKILL for the current thread.
-
-This happens because kvm_arch_vcpu_ctxsync_fp() binds the guest's live
-FPSIMD/SVE state with the guest's maximum SVE VL, and
-fpsimd_save_user_state() verifies that the live SVE VL is as expected
-before attempting to save the register state:
-
-| if (WARN_ON(sve_get_vl() != vl)) {
-|         force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
-|         return;
-| }
-
-Fix this and make this a bit easier to reason about by always eagerly
-switching ZCR_EL{1,2} at hyp during guest<->host transitions. With this
-happening, there's no need to trap host SVE usage, and the nVHE/nVHE
-__deactivate_cptr_traps() logic can be simplified to enable host access
-to all present FPSIMD/SVE/SME features.
-
-In protected nVHE/hVHE modes, the host's state is always saved/restored
-by hyp, and the guest's state is saved prior to exit to the host, so
-from the host's PoV the guest never has live FPSIMD/SVE/SME state, and
-the host's ZCR_EL1 is never clobbered by hyp.
-
-Fixes: 8c8010d69c132273 ("KVM: arm64: Save/restore SVE state for nVHE")
-Fixes: 2e3cf82063a00ea0 ("KVM: arm64: nv: Ensure correct VL is loaded before saving SVE state")
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Cc: Will Deacon <will@kernel.org>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-9-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/kvm/fpsimd.c                 |   30 ----------------
- arch/arm64/kvm/hyp/entry.S              |    5 ++
- arch/arm64/kvm/hyp/include/hyp/switch.h |   59 ++++++++++++++++++++++++++++++++
- arch/arm64/kvm/hyp/nvhe/hyp-main.c      |   13 +++----
- arch/arm64/kvm/hyp/nvhe/switch.c        |   33 +++++++++++++++--
- arch/arm64/kvm/hyp/vhe/switch.c         |    4 ++
- 6 files changed, 103 insertions(+), 41 deletions(-)
-
---- a/arch/arm64/kvm/fpsimd.c
-+++ b/arch/arm64/kvm/fpsimd.c
-@@ -136,36 +136,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
-       local_irq_save(flags);
-       if (guest_owns_fp_regs()) {
--              if (vcpu_has_sve(vcpu)) {
--                      u64 zcr = read_sysreg_el1(SYS_ZCR);
--
--                      /*
--                       * If the vCPU is in the hyp context then ZCR_EL1 is
--                       * loaded with its vEL2 counterpart.
--                       */
--                      __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
--
--                      /*
--                       * Restore the VL that was saved when bound to the CPU,
--                       * which is the maximum VL for the guest. Because the
--                       * layout of the data when saving the sve state depends
--                       * on the VL, we need to use a consistent (i.e., the
--                       * maximum) VL.
--                       * Note that this means that at guest exit ZCR_EL1 is
--                       * not necessarily the same as on guest entry.
--                       *
--                       * ZCR_EL2 holds the guest hypervisor's VL when running
--                       * a nested guest, which could be smaller than the
--                       * max for the vCPU. Similar to above, we first need to
--                       * switch to a VL consistent with the layout of the
--                       * vCPU's SVE state. KVM support for NV implies VHE, so
--                       * using the ZCR_EL1 alias is safe.
--                       */
--                      if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)))
--                              sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
--                                                     SYS_ZCR_EL1);
--              }
--
-               /*
-                * Flush (save and invalidate) the fpsimd/sve state so that if
-                * the host tries to use fpsimd/sve, it's not using stale data
---- a/arch/arm64/kvm/hyp/entry.S
-+++ b/arch/arm64/kvm/hyp/entry.S
-@@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
- alternative_else_nop_endif
-       mrs     x1, isr_el1
-       cbz     x1,  1f
-+
-+      // Ensure that __guest_enter() always provides a context
-+      // synchronization event so that callers don't need ISBs for anything
-+      // that would usually be synchonized by the ERET.
-+      isb
-       mov     x0, #ARM_EXCEPTION_IRQ
-       ret
---- a/arch/arm64/kvm/hyp/include/hyp/switch.h
-+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
-@@ -344,6 +344,65 @@ static inline void __hyp_sve_save_host(v
-                        true);
- }
-+static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
-+{
-+      u64 zcr_el1, zcr_el2;
-+
-+      if (!guest_owns_fp_regs())
-+              return;
-+
-+      if (vcpu_has_sve(vcpu)) {
-+              /* A guest hypervisor may restrict the effective max VL. */
-+              if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
-+                      zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
-+              else
-+                      zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
-+
-+              write_sysreg_el2(zcr_el2, SYS_ZCR);
-+
-+              zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu));
-+              write_sysreg_el1(zcr_el1, SYS_ZCR);
-+      }
-+}
-+
-+static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
-+{
-+      u64 zcr_el1, zcr_el2;
-+
-+      if (!guest_owns_fp_regs())
-+              return;
-+
-+      /*
-+       * When the guest owns the FP regs, we know that guest+hyp traps for
-+       * any FPSIMD/SVE/SME features exposed to the guest have been disabled
-+       * by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
-+       * prior to __guest_entry(). As __guest_entry() guarantees a context
-+       * synchronization event, we don't need an ISB here to avoid taking
-+       * traps for anything that was exposed to the guest.
-+       */
-+      if (vcpu_has_sve(vcpu)) {
-+              zcr_el1 = read_sysreg_el1(SYS_ZCR);
-+              __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
-+
-+              /*
-+               * The guest's state is always saved using the guest's max VL.
-+               * Ensure that the host has the guest's max VL active such that
-+               * the host can save the guest's state lazily, but don't
-+               * artificially restrict the host to the guest's max VL.
-+               */
-+              if (has_vhe()) {
-+                      zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
-+                      write_sysreg_el2(zcr_el2, SYS_ZCR);
-+              } else {
-+                      zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
-+                      write_sysreg_el2(zcr_el2, SYS_ZCR);
-+
-+                      zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
-+                      write_sysreg_el1(zcr_el1, SYS_ZCR);
-+              }
-+      }
-+}
-+
- static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
- {
-       /*
---- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
-+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
-@@ -5,6 +5,7 @@
-  */
- #include <hyp/adjust_pc.h>
-+#include <hyp/switch.h>
- #include <asm/pgtable-types.h>
- #include <asm/kvm_asm.h>
-@@ -176,8 +177,12 @@ static void handle___kvm_vcpu_run(struct
-               sync_hyp_vcpu(hyp_vcpu);
-               pkvm_put_hyp_vcpu(hyp_vcpu);
-       } else {
-+              struct kvm_vcpu *vcpu = kern_hyp_va(host_vcpu);
-+
-               /* The host is fully trusted, run its vCPU directly. */
--              ret = __kvm_vcpu_run(host_vcpu);
-+              fpsimd_lazy_switch_to_guest(vcpu);
-+              ret = __kvm_vcpu_run(vcpu);
-+              fpsimd_lazy_switch_to_host(vcpu);
-       }
- out:
-@@ -486,12 +491,6 @@ void handle_trap(struct kvm_cpu_context
-       case ESR_ELx_EC_SMC64:
-               handle_host_smc(host_ctxt);
-               break;
--      case ESR_ELx_EC_SVE:
--              cpacr_clear_set(0, CPACR_ELx_ZEN);
--              isb();
--              sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
--                                     SYS_ZCR_EL2);
--              break;
-       case ESR_ELx_EC_IABT_LOW:
-       case ESR_ELx_EC_DABT_LOW:
-               handle_host_mem_abort(host_ctxt);
---- a/arch/arm64/kvm/hyp/nvhe/switch.c
-+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
-@@ -40,6 +40,9 @@ static void __activate_cptr_traps(struct
- {
-       u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
-+      if (!guest_owns_fp_regs())
-+              __activate_traps_fpsimd32(vcpu);
-+
-       if (has_hvhe()) {
-               val |= CPACR_ELx_TTA;
-@@ -48,6 +51,8 @@ static void __activate_cptr_traps(struct
-                       if (vcpu_has_sve(vcpu))
-                               val |= CPACR_ELx_ZEN;
-               }
-+
-+              write_sysreg(val, cpacr_el1);
-       } else {
-               val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
-@@ -62,12 +67,32 @@ static void __activate_cptr_traps(struct
-               if (!guest_owns_fp_regs())
-                       val |= CPTR_EL2_TFP;
-+
-+              write_sysreg(val, cptr_el2);
-       }
-+}
--      if (!guest_owns_fp_regs())
--              __activate_traps_fpsimd32(vcpu);
-+static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
-+{
-+      if (has_hvhe()) {
-+              u64 val = CPACR_ELx_FPEN;
-+
-+              if (cpus_have_final_cap(ARM64_SVE))
-+                      val |= CPACR_ELx_ZEN;
-+              if (cpus_have_final_cap(ARM64_SME))
-+                      val |= CPACR_ELx_SMEN;
-+
-+              write_sysreg(val, cpacr_el1);
-+      } else {
-+              u64 val = CPTR_NVHE_EL2_RES1;
-+
-+              if (!cpus_have_final_cap(ARM64_SVE))
-+                      val |= CPTR_EL2_TZ;
-+              if (!cpus_have_final_cap(ARM64_SME))
-+                      val |= CPTR_EL2_TSM;
--      kvm_write_cptr_el2(val);
-+              write_sysreg(val, cptr_el2);
-+      }
- }
- static void __activate_traps(struct kvm_vcpu *vcpu)
-@@ -120,7 +145,7 @@ static void __deactivate_traps(struct kv
-       write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
--      kvm_reset_cptr_el2(vcpu);
-+      __deactivate_cptr_traps(vcpu);
-       write_sysreg(__kvm_hyp_host_vector, vbar_el2);
- }
---- a/arch/arm64/kvm/hyp/vhe/switch.c
-+++ b/arch/arm64/kvm/hyp/vhe/switch.c
-@@ -462,6 +462,8 @@ static int __kvm_vcpu_run_vhe(struct kvm
-       sysreg_save_host_state_vhe(host_ctxt);
-+      fpsimd_lazy_switch_to_guest(vcpu);
-+
-       /*
-        * Note that ARM erratum 1165522 requires us to configure both stage 1
-        * and stage 2 translation for the guest context before we clear
-@@ -486,6 +488,8 @@ static int __kvm_vcpu_run_vhe(struct kvm
-       __deactivate_traps(vcpu);
-+      fpsimd_lazy_switch_to_host(vcpu);
-+
-       sysreg_restore_host_state_vhe(host_ctxt);
-       if (guest_owns_fp_regs())
diff --git a/queue-6.12/kvm-arm64-mark-some-header-functions-as-inline.patch b/queue-6.12/kvm-arm64-mark-some-header-functions-as-inline.patch
deleted file mode 100644 (file)
index d4ffd4b..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-From f9dd00de1e53a47763dfad601635d18542c3836d Mon Sep 17 00:00:00 2001
-From: Mark Rutland <mark.rutland@arm.com>
-Date: Mon, 10 Feb 2025 19:52:25 +0000
-Subject: KVM: arm64: Mark some header functions as inline
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-commit f9dd00de1e53a47763dfad601635d18542c3836d upstream.
-
-The shared hyp switch header has a number of static functions which
-might not be used by all files that include the header, and when unused
-they will provoke compiler warnings, e.g.
-
-| In file included from arch/arm64/kvm/hyp/nvhe/hyp-main.c:8:
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:703:13: warning: 'kvm_hyp_handle_dabt_low' defined but not used [-Wunused-function]
-|   703 | static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
-|       |             ^~~~~~~~~~~~~~~~~~~~~~~
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:682:13: warning: 'kvm_hyp_handle_cp15_32' defined but not used [-Wunused-function]
-|   682 | static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
-|       |             ^~~~~~~~~~~~~~~~~~~~~~
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:662:13: warning: 'kvm_hyp_handle_sysreg' defined but not used [-Wunused-function]
-|   662 | static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
-|       |             ^~~~~~~~~~~~~~~~~~~~~
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:458:13: warning: 'kvm_hyp_handle_fpsimd' defined but not used [-Wunused-function]
-|   458 | static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
-|       |             ^~~~~~~~~~~~~~~~~~~~~
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:329:13: warning: 'kvm_hyp_handle_mops' defined but not used [-Wunused-function]
-|   329 | static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
-|       |             ^~~~~~~~~~~~~~~~~~~
-
-Mark these functions as 'inline' to suppress this warning. This
-shouldn't result in any functional change.
-
-At the same time, avoid the use of __alias() in the header and alias
-kvm_hyp_handle_iabt_low() and kvm_hyp_handle_watchpt_low() to
-kvm_hyp_handle_memory_fault() using CPP, matching the style in the rest
-of the kernel. For consistency, kvm_hyp_handle_memory_fault() is also
-marked as 'inline'.
-
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Acked-by: Will Deacon <will@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-8-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/kvm/hyp/include/hyp/switch.h |   19 +++++++++----------
- 1 file changed, 9 insertions(+), 10 deletions(-)
-
---- a/arch/arm64/kvm/hyp/include/hyp/switch.h
-+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
-@@ -295,7 +295,7 @@ static inline bool __populate_fault_info
-       return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
- }
--static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-       *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
-       arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
-@@ -373,7 +373,7 @@ static void kvm_hyp_save_fpsimd_host(str
-  * If FP/SIMD is not implemented, handle the trap and inject an undefined
-  * instruction exception to the guest. Similarly for trapped SVE accesses.
-  */
--static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-       bool sve_guest;
-       u8 esr_ec;
-@@ -564,7 +564,7 @@ static bool handle_ampere1_tcr(struct kv
-       return true;
- }
--static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-       if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
-           handle_tx2_tvm(vcpu))
-@@ -584,7 +584,7 @@ static bool kvm_hyp_handle_sysreg(struct
-       return false;
- }
--static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-       if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
-           __vgic_v3_perform_cpuif_access(vcpu) == 1)
-@@ -593,19 +593,18 @@ static bool kvm_hyp_handle_cp15_32(struc
-       return false;
- }
--static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
-+                                             u64 *exit_code)
- {
-       if (!__populate_fault_info(vcpu))
-               return true;
-       return false;
- }
--static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
--      __alias(kvm_hyp_handle_memory_fault);
--static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
--      __alias(kvm_hyp_handle_memory_fault);
-+#define kvm_hyp_handle_iabt_low               kvm_hyp_handle_memory_fault
-+#define kvm_hyp_handle_watchpt_low    kvm_hyp_handle_memory_fault
--static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-       if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
-               return true;
diff --git a/queue-6.12/kvm-arm64-refactor-exit-handlers.patch b/queue-6.12/kvm-arm64-refactor-exit-handlers.patch
deleted file mode 100644 (file)
index d514eeb..0000000
+++ /dev/null
@@ -1,183 +0,0 @@
-From 9b66195063c5a145843547b1d692bd189be85287 Mon Sep 17 00:00:00 2001
-From: Mark Rutland <mark.rutland@arm.com>
-Date: Mon, 10 Feb 2025 19:52:24 +0000
-Subject: KVM: arm64: Refactor exit handlers
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-commit 9b66195063c5a145843547b1d692bd189be85287 upstream.
-
-The hyp exit handling logic is largely shared between VHE and nVHE/hVHE,
-with common logic in arch/arm64/kvm/hyp/include/hyp/switch.h. The code
-in the header depends on function definitions provided by
-arch/arm64/kvm/hyp/vhe/switch.c and arch/arm64/kvm/hyp/nvhe/switch.c
-when they include the header.
-
-This is an unusual header dependency, and prevents the use of
-arch/arm64/kvm/hyp/include/hyp/switch.h in other files as this would
-result in compiler warnings regarding missing definitions, e.g.
-
-| In file included from arch/arm64/kvm/hyp/nvhe/hyp-main.c:8:
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:733:31: warning: 'kvm_get_exit_handler_array' used but never defined
-|   733 | static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
-|       |                               ^~~~~~~~~~~~~~~~~~~~~~~~~~
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:735:13: warning: 'early_exit_filter' used but never defined
-|   735 | static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
-|       |             ^~~~~~~~~~~~~~~~~
-
-Refactor the logic such that the header doesn't depend on anything from
-the C files. There should be no functional change as a result of this
-patch.
-
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Acked-by: Will Deacon <will@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-7-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/kvm/hyp/include/hyp/switch.h |   30 ++++++------------------------
- arch/arm64/kvm/hyp/nvhe/switch.c        |   28 ++++++++++++++++------------
- arch/arm64/kvm/hyp/vhe/switch.c         |    9 ++++-----
- 3 files changed, 26 insertions(+), 41 deletions(-)
-
---- a/arch/arm64/kvm/hyp/include/hyp/switch.h
-+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
-@@ -635,23 +635,16 @@ static bool kvm_hyp_handle_dabt_low(stru
- typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
--static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
--
--static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
--
- /*
-  * Allow the hypervisor to handle the exit with an exit handler if it has one.
-  *
-  * Returns true if the hypervisor handled the exit, and control should go back
-  * to the guest, or false if it hasn't.
-  */
--static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
-+                                     const exit_handler_fn *handlers)
- {
--      const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
--      exit_handler_fn fn;
--
--      fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
--
-+      exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
-       if (fn)
-               return fn(vcpu, exit_code);
-@@ -681,20 +674,9 @@ static inline void synchronize_vcpu_psta
-  * the guest, false when we should restore the host state and return to the
-  * main run loop.
-  */
--static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
-+                                    const exit_handler_fn *handlers)
- {
--      /*
--       * Save PSTATE early so that we can evaluate the vcpu mode
--       * early on.
--       */
--      synchronize_vcpu_pstate(vcpu, exit_code);
--
--      /*
--       * Check whether we want to repaint the state one way or
--       * another.
--       */
--      early_exit_filter(vcpu, exit_code);
--
-       if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
-               vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
-@@ -724,7 +706,7 @@ static inline bool fixup_guest_exit(stru
-               goto exit;
-       /* Check if there's an exit handler and allow it to handle the exit. */
--      if (kvm_hyp_handle_exit(vcpu, exit_code))
-+      if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
-               goto guest;
- exit:
-       /* Return to the host kernel and handle the exit */
---- a/arch/arm64/kvm/hyp/nvhe/switch.c
-+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
-@@ -224,19 +224,21 @@ static const exit_handler_fn *kvm_get_ex
-       return hyp_exit_handlers;
- }
--/*
-- * Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
-- * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
-- * guest from dropping to AArch32 EL0 if implemented by the CPU. If the
-- * hypervisor spots a guest in such a state ensure it is handled, and don't
-- * trust the host to spot or fix it.  The check below is based on the one in
-- * kvm_arch_vcpu_ioctl_run().
-- *
-- * Returns false if the guest ran in AArch32 when it shouldn't have, and
-- * thus should exit to the host, or true if a the guest run loop can continue.
-- */
--static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-+      const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
-+
-+      synchronize_vcpu_pstate(vcpu, exit_code);
-+
-+      /*
-+       * Some guests (e.g., protected VMs) are not be allowed to run in
-+       * AArch32.  The ARMv8 architecture does not give the hypervisor a
-+       * mechanism to prevent a guest from dropping to AArch32 EL0 if
-+       * implemented by the CPU. If the hypervisor spots a guest in such a
-+       * state ensure it is handled, and don't trust the host to spot or fix
-+       * it.  The check below is based on the one in
-+       * kvm_arch_vcpu_ioctl_run().
-+       */
-       if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
-               /*
-                * As we have caught the guest red-handed, decide that it isn't
-@@ -249,6 +251,8 @@ static void early_exit_filter(struct kvm
-               *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
-               *exit_code |= ARM_EXCEPTION_IL;
-       }
-+
-+      return __fixup_guest_exit(vcpu, exit_code, handlers);
- }
- /* Switch to the guest for legacy non-VHE systems */
---- a/arch/arm64/kvm/hyp/vhe/switch.c
-+++ b/arch/arm64/kvm/hyp/vhe/switch.c
-@@ -423,13 +423,10 @@ static const exit_handler_fn hyp_exit_ha
-       [ESR_ELx_EC_MOPS]               = kvm_hyp_handle_mops,
- };
--static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
-+static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
--      return hyp_exit_handlers;
--}
-+      synchronize_vcpu_pstate(vcpu, exit_code);
--static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
--{
-       /*
-        * If we were in HYP context on entry, adjust the PSTATE view
-        * so that the usual helpers work correctly.
-@@ -449,6 +446,8 @@ static void early_exit_filter(struct kvm
-               *vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
-               *vcpu_cpsr(vcpu) |= mode;
-       }
-+
-+      return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
- }
- /* Switch to the guest for VHE systems running in EL2 */
diff --git a/queue-6.12/kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch b/queue-6.12/kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch
deleted file mode 100644 (file)
index 8196481..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-From 8eca7f6d5100b6997df4f532090bc3f7e0203bef Mon Sep 17 00:00:00 2001
-From: Mark Rutland <mark.rutland@arm.com>
-Date: Mon, 10 Feb 2025 19:52:20 +0000
-Subject: KVM: arm64: Remove host FPSIMD saving for non-protected KVM
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-commit 8eca7f6d5100b6997df4f532090bc3f7e0203bef upstream.
-
-Now that the host eagerly saves its own FPSIMD/SVE/SME state,
-non-protected KVM never needs to save the host FPSIMD/SVE/SME state,
-and the code to do this is never used. Protected KVM still needs to
-save/restore the host FPSIMD/SVE state to avoid leaking guest state to
-the host (and to avoid revealing to the host whether the guest used
-FPSIMD/SVE/SME), and that code needs to be retained.
-
-Remove the unused code and data structures.
-
-To avoid the need for a stub copy of kvm_hyp_save_fpsimd_host() in the
-VHE hyp code, the nVHE/hVHE version is moved into the shared switch
-header, where it is only invoked when KVM is in protected mode.
-
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Acked-by: Will Deacon <will@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-3-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/include/asm/kvm_host.h       |   18 ++++--------------
- arch/arm64/kvm/arm.c                    |    8 --------
- arch/arm64/kvm/fpsimd.c                 |    2 --
- arch/arm64/kvm/hyp/include/hyp/switch.h |   25 +++++++++++++++++++++++--
- arch/arm64/kvm/hyp/nvhe/hyp-main.c      |    2 +-
- arch/arm64/kvm/hyp/nvhe/switch.c        |   28 ----------------------------
- arch/arm64/kvm/hyp/vhe/switch.c         |    8 --------
- 7 files changed, 28 insertions(+), 63 deletions(-)
-
---- a/arch/arm64/include/asm/kvm_host.h
-+++ b/arch/arm64/include/asm/kvm_host.h
-@@ -602,23 +602,13 @@ struct kvm_host_data {
-       struct kvm_cpu_context host_ctxt;
-       /*
--       * All pointers in this union are hyp VA.
-+       * Hyp VA.
-        * sve_state is only used in pKVM and if system_supports_sve().
-        */
--      union {
--              struct user_fpsimd_state *fpsimd_state;
--              struct cpu_sve_state *sve_state;
--      };
-+      struct cpu_sve_state *sve_state;
--      union {
--              /* HYP VA pointer to the host storage for FPMR */
--              u64     *fpmr_ptr;
--              /*
--               * Used by pKVM only, as it needs to provide storage
--               * for the host
--               */
--              u64     fpmr;
--      };
-+      /* Used by pKVM only. */
-+      u64     fpmr;
-       /* Ownership of the FP regs */
-       enum {
---- a/arch/arm64/kvm/arm.c
-+++ b/arch/arm64/kvm/arm.c
-@@ -2476,14 +2476,6 @@ static void finalize_init_hyp_mode(void)
-                       per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
-                               kern_hyp_va(sve_state);
-               }
--      } else {
--              for_each_possible_cpu(cpu) {
--                      struct user_fpsimd_state *fpsimd_state;
--
--                      fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
--                      per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
--                              kern_hyp_va(fpsimd_state);
--              }
-       }
- }
---- a/arch/arm64/kvm/fpsimd.c
-+++ b/arch/arm64/kvm/fpsimd.c
-@@ -64,8 +64,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
-        */
-       fpsimd_save_and_flush_cpu_state();
-       *host_data_ptr(fp_owner) = FP_STATE_FREE;
--      *host_data_ptr(fpsimd_state) = NULL;
--      *host_data_ptr(fpmr_ptr) = NULL;
-       vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
-       if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
---- a/arch/arm64/kvm/hyp/include/hyp/switch.h
-+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
-@@ -344,7 +344,28 @@ static inline void __hyp_sve_save_host(v
-                        true);
- }
--static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
-+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
-+{
-+      /*
-+       * Non-protected kvm relies on the host restoring its sve state.
-+       * Protected kvm restores the host's sve state as not to reveal that
-+       * fpsimd was used by a guest nor leak upper sve bits.
-+       */
-+      if (system_supports_sve()) {
-+              __hyp_sve_save_host();
-+
-+              /* Re-enable SVE traps if not supported for the guest vcpu. */
-+              if (!vcpu_has_sve(vcpu))
-+                      cpacr_clear_set(CPACR_ELx_ZEN, 0);
-+
-+      } else {
-+              __fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
-+      }
-+
-+      if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
-+              *host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
-+}
-+
- /*
-  * We trap the first access to the FP/SIMD to save the host context and
-@@ -394,7 +415,7 @@ static bool kvm_hyp_handle_fpsimd(struct
-       isb();
-       /* Write out the host state if it's in the registers */
--      if (host_owns_fp_regs())
-+      if (is_protected_kvm_enabled() && host_owns_fp_regs())
-               kvm_hyp_save_fpsimd_host(vcpu);
-       /* Restore the guest state */
---- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
-+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
-@@ -83,7 +83,7 @@ static void fpsimd_sve_sync(struct kvm_v
-       if (system_supports_sve())
-               __hyp_sve_restore_host();
-       else
--              __fpsimd_restore_state(*host_data_ptr(fpsimd_state));
-+              __fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs));
-       if (has_fpmr)
-               write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
---- a/arch/arm64/kvm/hyp/nvhe/switch.c
-+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
-@@ -193,34 +193,6 @@ static bool kvm_handle_pvm_sys64(struct
-               kvm_handle_pvm_sysreg(vcpu, exit_code));
- }
--static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
--{
--      /*
--       * Non-protected kvm relies on the host restoring its sve state.
--       * Protected kvm restores the host's sve state as not to reveal that
--       * fpsimd was used by a guest nor leak upper sve bits.
--       */
--      if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
--              __hyp_sve_save_host();
--
--              /* Re-enable SVE traps if not supported for the guest vcpu. */
--              if (!vcpu_has_sve(vcpu))
--                      cpacr_clear_set(CPACR_ELx_ZEN, 0);
--
--      } else {
--              __fpsimd_save_state(*host_data_ptr(fpsimd_state));
--      }
--
--      if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
--              u64 val = read_sysreg_s(SYS_FPMR);
--
--              if (unlikely(is_protected_kvm_enabled()))
--                      *host_data_ptr(fpmr) = val;
--              else
--                      **host_data_ptr(fpmr_ptr) = val;
--      }
--}
--
- static const exit_handler_fn hyp_exit_handlers[] = {
-       [0 ... ESR_ELx_EC_MAX]          = NULL,
-       [ESR_ELx_EC_CP15_32]            = kvm_hyp_handle_cp15_32,
---- a/arch/arm64/kvm/hyp/vhe/switch.c
-+++ b/arch/arm64/kvm/hyp/vhe/switch.c
-@@ -309,14 +309,6 @@ static bool kvm_hyp_handle_eret(struct k
-       return true;
- }
--static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
--{
--      __fpsimd_save_state(*host_data_ptr(fpsimd_state));
--
--      if (kvm_has_fpmr(vcpu->kvm))
--              **host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
--}
--
- static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-       int ret = -EINVAL;
diff --git a/queue-6.12/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch b/queue-6.12/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch
deleted file mode 100644 (file)
index 2ca5902..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-From 407a99c4654e8ea65393f412c421a55cac539f5b Mon Sep 17 00:00:00 2001
-From: Mark Rutland <mark.rutland@arm.com>
-Date: Mon, 10 Feb 2025 19:52:22 +0000
-Subject: KVM: arm64: Remove VHE host restore of CPACR_EL1.SMEN
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-commit 407a99c4654e8ea65393f412c421a55cac539f5b upstream.
-
-When KVM is in VHE mode, the host kernel tries to save and restore the
-configuration of CPACR_EL1.SMEN (i.e. CPTR_EL2.SMEN when HCR_EL2.E2H=1)
-across kvm_arch_vcpu_load_fp() and kvm_arch_vcpu_put_fp(), since the
-configuration may be clobbered by hyp when running a vCPU. This logic
-has historically been broken, and is currently redundant.
-
-This logic was originally introduced in commit:
-
-  861262ab86270206 ("KVM: arm64: Handle SME host state when running guests")
-
-At the time, the VHE hyp code would reset CPTR_EL2.SMEN to 0b00 when
-returning to the host, trapping host access to SME state. Unfortunately,
-this was unsafe as the host could take a softirq before calling
-kvm_arch_vcpu_put_fp(), and if a softirq handler were to use kernel mode
-NEON the resulting attempt to save the live FPSIMD/SVE/SME state would
-result in a fatal trap.
-
-That issue was limited to VHE mode. For nVHE/hVHE modes, KVM always
-saved/restored the host kernel's CPACR_EL1 value, and configured
-CPTR_EL2.TSM to 0b0, ensuring that host usage of SME would not be
-trapped.
-
-The issue above was incidentally fixed by commit:
-
-  375110ab51dec5dc ("KVM: arm64: Fix resetting SME trap values on reset for (h)VHE")
-
-That commit changed the VHE hyp code to configure CPTR_EL2.SMEN to 0b01
-when returning to the host, permitting host kernel usage of SME,
-avoiding the issue described above. At the time, this was not identified
-as a fix for commit 861262ab86270206.
-
-Now that the host eagerly saves and unbinds its own FPSIMD/SVE/SME
-state, there's no need to save/restore the state of the EL0 SME trap.
-The kernel can safely save/restore state without trapping, as described
-above, and will restore userspace state (including trap controls) before
-returning to userspace.
-
-Remove the redundant logic.
-
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Acked-by: Will Deacon <will@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-5-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-[Update for rework of flags storage -- broonie]
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/include/asm/kvm_host.h |    2 --
- arch/arm64/kvm/fpsimd.c           |   21 ---------------------
- 2 files changed, 23 deletions(-)
-
---- a/arch/arm64/include/asm/kvm_host.h
-+++ b/arch/arm64/include/asm/kvm_host.h
-@@ -891,8 +891,6 @@ struct kvm_vcpu_arch {
- /* Save TRBE context if active  */
- #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
--/* SME enabled for EL0 */
--#define HOST_SME_ENABLED      __vcpu_single_flag(sflags, BIT(1))
- /* Physical CPU not in supported_cpus */
- #define ON_UNSUPPORTED_CPU    __vcpu_single_flag(sflags, BIT(2))
- /* WFIT instruction trapped */
---- a/arch/arm64/kvm/fpsimd.c
-+++ b/arch/arm64/kvm/fpsimd.c
-@@ -65,12 +65,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
-       fpsimd_save_and_flush_cpu_state();
-       *host_data_ptr(fp_owner) = FP_STATE_FREE;
--      if (system_supports_sme()) {
--              vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
--              if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
--                      vcpu_set_flag(vcpu, HOST_SME_ENABLED);
--      }
--
-       /*
-        * If normal guests gain SME support, maintain this behavior for pKVM
-        * guests, which don't support SME.
-@@ -141,21 +135,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
-       local_irq_save(flags);
--      /*
--       * If we have VHE then the Hyp code will reset CPACR_EL1 to
--       * the default value and we need to reenable SME.
--       */
--      if (has_vhe() && system_supports_sme()) {
--              /* Also restore EL0 state seen on entry */
--              if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
--                      sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
--              else
--                      sysreg_clear_set(CPACR_EL1,
--                                       CPACR_EL1_SMEN_EL0EN,
--                                       CPACR_EL1_SMEN_EL1EN);
--              isb();
--      }
--
-       if (guest_owns_fp_regs()) {
-               if (vcpu_has_sve(vcpu)) {
-                       u64 zcr = read_sysreg_el1(SYS_ZCR);
diff --git a/queue-6.12/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch b/queue-6.12/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch
deleted file mode 100644 (file)
index e5f6cb1..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-From 459f059be702056d91537b99a129994aa6ccdd35 Mon Sep 17 00:00:00 2001
-From: Mark Rutland <mark.rutland@arm.com>
-Date: Mon, 10 Feb 2025 19:52:21 +0000
-Subject: KVM: arm64: Remove VHE host restore of CPACR_EL1.ZEN
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-commit 459f059be702056d91537b99a129994aa6ccdd35 upstream.
-
-When KVM is in VHE mode, the host kernel tries to save and restore the
-configuration of CPACR_EL1.ZEN (i.e. CPTR_EL2.ZEN when HCR_EL2.E2H=1)
-across kvm_arch_vcpu_load_fp() and kvm_arch_vcpu_put_fp(), since the
-configuration may be clobbered by hyp when running a vCPU. This logic is
-currently redundant.
-
-The VHE hyp code unconditionally configures CPTR_EL2.ZEN to 0b01 when
-returning to the host, permitting host kernel usage of SVE.
-
-Now that the host eagerly saves and unbinds its own FPSIMD/SVE/SME
-state, there's no need to save/restore the state of the EL0 SVE trap.
-The kernel can safely save/restore state without trapping, as described
-above, and will restore userspace state (including trap controls) before
-returning to userspace.
-
-Remove the redundant logic.
-
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Acked-by: Will Deacon <will@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-4-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-[Rework for refactoring of where the flags are stored -- broonie]
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/include/asm/kvm_host.h |    2 --
- arch/arm64/kvm/fpsimd.c           |   16 ----------------
- 2 files changed, 18 deletions(-)
-
---- a/arch/arm64/include/asm/kvm_host.h
-+++ b/arch/arm64/include/asm/kvm_host.h
-@@ -891,8 +891,6 @@ struct kvm_vcpu_arch {
- /* Save TRBE context if active  */
- #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
--/* SVE enabled for host EL0 */
--#define HOST_SVE_ENABLED      __vcpu_single_flag(sflags, BIT(0))
- /* SME enabled for EL0 */
- #define HOST_SME_ENABLED      __vcpu_single_flag(sflags, BIT(1))
- /* Physical CPU not in supported_cpus */
---- a/arch/arm64/kvm/fpsimd.c
-+++ b/arch/arm64/kvm/fpsimd.c
-@@ -65,10 +65,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
-       fpsimd_save_and_flush_cpu_state();
-       *host_data_ptr(fp_owner) = FP_STATE_FREE;
--      vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
--      if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
--              vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
--
-       if (system_supports_sme()) {
-               vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
-               if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
-@@ -202,18 +198,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
-                * when needed.
-                */
-               fpsimd_save_and_flush_cpu_state();
--      } else if (has_vhe() && system_supports_sve()) {
--              /*
--               * The FPSIMD/SVE state in the CPU has not been touched, and we
--               * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
--               * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
--               * for EL0.  To avoid spurious traps, restore the trap state
--               * seen by kvm_arch_vcpu_load_fp():
--               */
--              if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED))
--                      sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
--              else
--                      sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
-       }
-       local_irq_restore(flags);
diff --git a/queue-6.12/kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch b/queue-6.12/kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch
deleted file mode 100644 (file)
index 0061bb0..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-From fbc7e61195e23f744814e78524b73b59faa54ab4 Mon Sep 17 00:00:00 2001
-From: Mark Rutland <mark.rutland@arm.com>
-Date: Mon, 10 Feb 2025 19:52:19 +0000
-Subject: KVM: arm64: Unconditionally save+flush host FPSIMD/SVE/SME state
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-commit fbc7e61195e23f744814e78524b73b59faa54ab4 upstream.
-
-There are several problems with the way hyp code lazily saves the host's
-FPSIMD/SVE state, including:
-
-* Host SVE being discarded unexpectedly due to inconsistent
-  configuration of TIF_SVE and CPACR_ELx.ZEN. This has been seen to
-  result in QEMU crashes where SVE is used by memmove(), as reported by
-  Eric Auger:
-
-  https://issues.redhat.com/browse/RHEL-68997
-
-* Host SVE state is discarded *after* modification by ptrace, which was an
-  unintentional ptrace ABI change introduced with lazy discarding of SVE state.
-
-* The host FPMR value can be discarded when running a non-protected VM,
-  where FPMR support is not exposed to a VM, and that VM uses
-  FPSIMD/SVE. In these cases the hyp code does not save the host's FPMR
-  before unbinding the host's FPSIMD/SVE/SME state, leaving a stale
-  value in memory.
-
-Avoid these by eagerly saving and "flushing" the host's FPSIMD/SVE/SME
-state when loading a vCPU such that KVM does not need to save any of the
-host's FPSIMD/SVE/SME state. For clarity, fpsimd_kvm_prepare() is
-removed and the necessary call to fpsimd_save_and_flush_cpu_state() is
-placed in kvm_arch_vcpu_load_fp(). As 'fpsimd_state' and 'fpmr_ptr'
-should not be used, they are set to NULL; all uses of these will be
-removed in subsequent patches.
-
-Historical problems go back at least as far as v5.17, e.g. erroneous
-assumptions about TIF_SVE being clear in commit:
-
-  8383741ab2e773a9 ("KVM: arm64: Get rid of host SVE tracking/saving")
-
-... and so this eager save+flush probably needs to be backported to ALL
-stable trees.
-
-Fixes: 93ae6b01bafee8fa ("KVM: arm64: Discard any SVE state when entering KVM guests")
-Fixes: 8c845e2731041f0f ("arm64/sve: Leave SVE enabled on syscall if we don't context switch")
-Fixes: ef3be86021c3bdf3 ("KVM: arm64: Add save/restore support for FPMR")
-Reported-by: Eric Auger <eauger@redhat.com>
-Reported-by: Wilco Dijkstra <wilco.dijkstra@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Tested-by: Eric Auger <eric.auger@redhat.com>
-Acked-by: Will Deacon <will@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Florian Weimer <fweimer@redhat.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Jeremy Linton <jeremy.linton@arm.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-2-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-[ Mark: Handle vcpu/host flag conflict ]
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/kernel/fpsimd.c |   25 -------------------------
- arch/arm64/kvm/fpsimd.c    |   35 ++++++++++-------------------------
- 2 files changed, 10 insertions(+), 50 deletions(-)
-
---- a/arch/arm64/kernel/fpsimd.c
-+++ b/arch/arm64/kernel/fpsimd.c
-@@ -1695,31 +1695,6 @@ void fpsimd_signal_preserve_current_stat
- }
- /*
-- * Called by KVM when entering the guest.
-- */
--void fpsimd_kvm_prepare(void)
--{
--      if (!system_supports_sve())
--              return;
--
--      /*
--       * KVM does not save host SVE state since we can only enter
--       * the guest from a syscall so the ABI means that only the
--       * non-saved SVE state needs to be saved.  If we have left
--       * SVE enabled for performance reasons then update the task
--       * state to be FPSIMD only.
--       */
--      get_cpu_fpsimd_context();
--
--      if (test_and_clear_thread_flag(TIF_SVE)) {
--              sve_to_fpsimd(current);
--              current->thread.fp_type = FP_STATE_FPSIMD;
--      }
--
--      put_cpu_fpsimd_context();
--}
--
--/*
-  * Associate current's FPSIMD context with this cpu
-  * The caller must have ownership of the cpu FPSIMD context before calling
-  * this function.
---- a/arch/arm64/kvm/fpsimd.c
-+++ b/arch/arm64/kvm/fpsimd.c
-@@ -54,16 +54,18 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
-       if (!system_supports_fpsimd())
-               return;
--      fpsimd_kvm_prepare();
--
-       /*
--       * We will check TIF_FOREIGN_FPSTATE just before entering the
--       * guest in kvm_arch_vcpu_ctxflush_fp() and override this to
--       * FP_STATE_FREE if the flag set.
-+       * Ensure that any host FPSIMD/SVE/SME state is saved and unbound such
-+       * that the host kernel is responsible for restoring this state upon
-+       * return to userspace, and the hyp code doesn't need to save anything.
-+       *
-+       * When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures
-+       * that PSTATE.{SM,ZA} == {0,0}.
-        */
--      *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
--      *host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
--      *host_data_ptr(fpmr_ptr) = kern_hyp_va(&current->thread.uw.fpmr);
-+      fpsimd_save_and_flush_cpu_state();
-+      *host_data_ptr(fp_owner) = FP_STATE_FREE;
-+      *host_data_ptr(fpsimd_state) = NULL;
-+      *host_data_ptr(fpmr_ptr) = NULL;
-       vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
-       if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
-@@ -73,23 +75,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
-               vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
-               if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
-                       vcpu_set_flag(vcpu, HOST_SME_ENABLED);
--
--              /*
--               * If PSTATE.SM is enabled then save any pending FP
--               * state and disable PSTATE.SM. If we leave PSTATE.SM
--               * enabled and the guest does not enable SME via
--               * CPACR_EL1.SMEN then operations that should be valid
--               * may generate SME traps from EL1 to EL1 which we
--               * can't intercept and which would confuse the guest.
--               *
--               * Do the same for PSTATE.ZA in the case where there
--               * is state in the registers which has not already
--               * been saved, this is very unlikely to happen.
--               */
--              if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
--                      *host_data_ptr(fp_owner) = FP_STATE_FREE;
--                      fpsimd_save_and_flush_cpu_state();
--              }
-       }
-       /*
diff --git a/queue-6.12/mm-shmem-fix-potential-data-corruption-during-shmem-.patch b/queue-6.12/mm-shmem-fix-potential-data-corruption-during-shmem-.patch
deleted file mode 100644 (file)
index 325371f..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-From 7a5f08ddda907f5ff10288e9ccc3cf57fac584fe Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 25 Feb 2025 17:52:55 +0800
-Subject: mm: shmem: fix potential data corruption during shmem swapin
-
-From: Baolin Wang <baolin.wang@linux.alibaba.com>
-
-[ Upstream commit 058313515d5aab10d0a01dd634f92ed4a4e71d4c ]
-
-Alex and Kairui reported some issues (system hang or data corruption) when
-swapping out or swapping in large shmem folios.  This is especially easy
-to reproduce when the tmpfs is mount with the 'huge=within_size'
-parameter.  Thanks to Kairui's reproducer, the issue can be easily
-replicated.
-
-The root cause of the problem is that swap readahead may asynchronously
-swap in order 0 folios into the swap cache, while the shmem mapping can
-still store large swap entries.  Then an order 0 folio is inserted into
-the shmem mapping without splitting the large swap entry, which overwrites
-the original large swap entry, leading to data corruption.
-
-When getting a folio from the swap cache, we should split the large swap
-entry stored in the shmem mapping if the orders do not match, to fix this
-issue.
-
-Link: https://lkml.kernel.org/r/2fe47c557e74e9df5fe2437ccdc6c9115fa1bf70.1740476943.git.baolin.wang@linux.alibaba.com
-Fixes: 809bc86517cc ("mm: shmem: support large folio swap out")
-Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
-Reported-by: Alex Xu (Hello71) <alex_y_xu@yahoo.ca>
-Reported-by: Kairui Song <ryncsn@gmail.com>
-Closes: https://lore.kernel.org/all/1738717785.im3r5g2vxc.none@localhost/
-Tested-by: Kairui Song <kasong@tencent.com>
-Cc: David Hildenbrand <david@redhat.com>
-Cc: Lance Yang <ioworker0@gmail.com>
-Cc: Matthew Wilcow <willy@infradead.org>
-Cc: Hugh Dickins <hughd@google.com>
-Cc: <stable@vger.kernel.org>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- mm/shmem.c | 31 +++++++++++++++++++++++++++----
- 1 file changed, 27 insertions(+), 4 deletions(-)
-
-diff --git a/mm/shmem.c b/mm/shmem.c
-index 738893d7fe083..e572d86f8f67e 100644
---- a/mm/shmem.c
-+++ b/mm/shmem.c
-@@ -2164,7 +2164,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       struct folio *folio = NULL;
-       bool skip_swapcache = false;
-       swp_entry_t swap;
--      int error, nr_pages;
-+      int error, nr_pages, order, split_order;
-       VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
-       swap = radix_to_swp_entry(*foliop);
-@@ -2183,10 +2183,9 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       /* Look it up and read it in.. */
-       folio = swap_cache_get_folio(swap, NULL, 0);
-+      order = xa_get_order(&mapping->i_pages, index);
-       if (!folio) {
--              int order = xa_get_order(&mapping->i_pages, index);
-               bool fallback_order0 = false;
--              int split_order;
-               /* Or update major stats only when swapin succeeds?? */
-               if (fault_type) {
-@@ -2250,6 +2249,29 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-                       error = -ENOMEM;
-                       goto failed;
-               }
-+      } else if (order != folio_order(folio)) {
-+              /*
-+               * Swap readahead may swap in order 0 folios into swapcache
-+               * asynchronously, while the shmem mapping can still stores
-+               * large swap entries. In such cases, we should split the
-+               * large swap entry to prevent possible data corruption.
-+               */
-+              split_order = shmem_split_large_entry(inode, index, swap, gfp);
-+              if (split_order < 0) {
-+                      error = split_order;
-+                      goto failed;
-+              }
-+
-+              /*
-+               * If the large swap entry has already been split, it is
-+               * necessary to recalculate the new swap entry based on
-+               * the old order alignment.
-+               */
-+              if (split_order > 0) {
-+                      pgoff_t offset = index - round_down(index, 1 << split_order);
-+
-+                      swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
-+              }
-       }
- alloced:
-@@ -2257,7 +2279,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       folio_lock(folio);
-       if ((!skip_swapcache && !folio_test_swapcache(folio)) ||
-           folio->swap.val != swap.val ||
--          !shmem_confirm_swap(mapping, index, swap)) {
-+          !shmem_confirm_swap(mapping, index, swap) ||
-+          xa_get_order(&mapping->i_pages, index) != folio_order(folio)) {
-               error = -EEXIST;
-               goto unlock;
-       }
--- 
-2.39.5
-
diff --git a/queue-6.12/mm-shmem-skip-swapcache-for-swapin-of-synchronous-sw.patch b/queue-6.12/mm-shmem-skip-swapcache-for-swapin-of-synchronous-sw.patch
deleted file mode 100644 (file)
index 384cb84..0000000
+++ /dev/null
@@ -1,241 +0,0 @@
-From df6cb03ef8e3bf53ebe143c79cf2c073bfa0584b Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 8 Jan 2025 10:16:49 +0800
-Subject: mm: shmem: skip swapcache for swapin of synchronous swap device
-
-From: Baolin Wang <baolin.wang@linux.alibaba.com>
-
-[ Upstream commit 1dd44c0af4fa1e80a4e82faa10cbf5d22da40362 ]
-
-With fast swap devices (such as zram), swapin latency is crucial to
-applications.  For shmem swapin, similar to anonymous memory swapin, we
-can skip the swapcache operation to improve swapin latency.  Testing 1G
-shmem sequential swapin without THP enabled, I observed approximately a 6%
-performance improvement: (Note: I repeated 5 times and took the mean data
-for each test)
-
-w/o patch      w/ patch        changes
-534.8ms                501ms           +6.3%
-
-In addition, currently, we always split the large swap entry stored in the
-shmem mapping during shmem large folio swapin, which is not perfect,
-especially with a fast swap device.  We should swap in the whole large
-folio instead of splitting the precious large folios to take advantage of
-the large folios and improve the swapin latency if the swap device is
-synchronous device, which is similar to anonymous memory mTHP swapin.
-Testing 1G shmem sequential swapin with 64K mTHP and 2M mTHP, I observed
-obvious performance improvement:
-
-mTHP=64K
-w/o patch      w/ patch        changes
-550.4ms                169.6ms         +69%
-
-mTHP=2M
-w/o patch      w/ patch        changes
-542.8ms                126.8ms         +77%
-
-Note that skipping swapcache requires attention to concurrent swapin
-scenarios.  Fortunately the swapcache_prepare() and
-shmem_add_to_page_cache() can help identify concurrent swapin and large
-swap entry split scenarios, and return -EEXIST for retry.
-
-[akpm@linux-foundation.org: use IS_ENABLED(), tweak comment grammar]
-Link: https://lkml.kernel.org/r/3d9f3bd3bc6ec953054baff5134f66feeaae7c1e.1736301701.git.baolin.wang@linux.alibaba.com
-Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
-Cc: David Hildenbrand <david@redhat.com>
-Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
-Cc: Hugh Dickins <hughd@google.com>
-Cc: Kairui Song <kasong@tencent.com>
-Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
-Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
-Cc: Ryan Roberts <ryan.roberts@arm.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Stable-dep-of: 058313515d5a ("mm: shmem: fix potential data corruption during shmem swapin")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- mm/shmem.c | 110 ++++++++++++++++++++++++++++++++++++++++++++++++++---
- 1 file changed, 105 insertions(+), 5 deletions(-)
-
-diff --git a/mm/shmem.c b/mm/shmem.c
-index 5960e5035f983..738893d7fe083 100644
---- a/mm/shmem.c
-+++ b/mm/shmem.c
-@@ -1878,6 +1878,65 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
-       return ERR_PTR(error);
- }
-+static struct folio *shmem_swap_alloc_folio(struct inode *inode,
-+              struct vm_area_struct *vma, pgoff_t index,
-+              swp_entry_t entry, int order, gfp_t gfp)
-+{
-+      struct shmem_inode_info *info = SHMEM_I(inode);
-+      struct folio *new;
-+      void *shadow;
-+      int nr_pages;
-+
-+      /*
-+       * We have arrived here because our zones are constrained, so don't
-+       * limit chance of success with further cpuset and node constraints.
-+       */
-+      gfp &= ~GFP_CONSTRAINT_MASK;
-+      if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && order > 0) {
-+              gfp_t huge_gfp = vma_thp_gfp_mask(vma);
-+
-+              gfp = limit_gfp_mask(huge_gfp, gfp);
-+      }
-+
-+      new = shmem_alloc_folio(gfp, order, info, index);
-+      if (!new)
-+              return ERR_PTR(-ENOMEM);
-+
-+      nr_pages = folio_nr_pages(new);
-+      if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL,
-+                                         gfp, entry)) {
-+              folio_put(new);
-+              return ERR_PTR(-ENOMEM);
-+      }
-+
-+      /*
-+       * Prevent parallel swapin from proceeding with the swap cache flag.
-+       *
-+       * Of course there is another possible concurrent scenario as well,
-+       * that is to say, the swap cache flag of a large folio has already
-+       * been set by swapcache_prepare(), while another thread may have
-+       * already split the large swap entry stored in the shmem mapping.
-+       * In this case, shmem_add_to_page_cache() will help identify the
-+       * concurrent swapin and return -EEXIST.
-+       */
-+      if (swapcache_prepare(entry, nr_pages)) {
-+              folio_put(new);
-+              return ERR_PTR(-EEXIST);
-+      }
-+
-+      __folio_set_locked(new);
-+      __folio_set_swapbacked(new);
-+      new->swap = entry;
-+
-+      mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
-+      shadow = get_shadow_from_swap_cache(entry);
-+      if (shadow)
-+              workingset_refault(new, shadow);
-+      folio_add_lru(new);
-+      swap_read_folio(new, NULL);
-+      return new;
-+}
-+
- /*
-  * When a page is moved from swapcache to shmem filecache (either by the
-  * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
-@@ -1981,7 +2040,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
- }
- static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
--                                       struct folio *folio, swp_entry_t swap)
-+                                       struct folio *folio, swp_entry_t swap,
-+                                       bool skip_swapcache)
- {
-       struct address_space *mapping = inode->i_mapping;
-       swp_entry_t swapin_error;
-@@ -1997,7 +2057,8 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
-       nr_pages = folio_nr_pages(folio);
-       folio_wait_writeback(folio);
--      delete_from_swap_cache(folio);
-+      if (!skip_swapcache)
-+              delete_from_swap_cache(folio);
-       /*
-        * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
-        * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
-@@ -2101,6 +2162,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       struct shmem_inode_info *info = SHMEM_I(inode);
-       struct swap_info_struct *si;
-       struct folio *folio = NULL;
-+      bool skip_swapcache = false;
-       swp_entry_t swap;
-       int error, nr_pages;
-@@ -2122,6 +2184,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       /* Look it up and read it in.. */
-       folio = swap_cache_get_folio(swap, NULL, 0);
-       if (!folio) {
-+              int order = xa_get_order(&mapping->i_pages, index);
-+              bool fallback_order0 = false;
-               int split_order;
-               /* Or update major stats only when swapin succeeds?? */
-@@ -2131,6 +2195,33 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-                       count_memcg_event_mm(fault_mm, PGMAJFAULT);
-               }
-+              /*
-+               * If uffd is active for the vma, we need per-page fault
-+               * fidelity to maintain the uffd semantics, then fallback
-+               * to swapin order-0 folio, as well as for zswap case.
-+               */
-+              if (order > 0 && ((vma && unlikely(userfaultfd_armed(vma))) ||
-+                                !zswap_never_enabled()))
-+                      fallback_order0 = true;
-+
-+              /* Skip swapcache for synchronous device. */
-+              if (!fallback_order0 && data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
-+                      folio = shmem_swap_alloc_folio(inode, vma, index, swap, order, gfp);
-+                      if (!IS_ERR(folio)) {
-+                              skip_swapcache = true;
-+                              goto alloced;
-+                      }
-+
-+                      /*
-+                       * Fallback to swapin order-0 folio unless the swap entry
-+                       * already exists.
-+                       */
-+                      error = PTR_ERR(folio);
-+                      folio = NULL;
-+                      if (error == -EEXIST)
-+                              goto failed;
-+              }
-+
-               /*
-                * Now swap device can only swap in order 0 folio, then we
-                * should split the large swap entry stored in the pagecache
-@@ -2161,9 +2252,10 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-               }
-       }
-+alloced:
-       /* We have to do this with folio locked to prevent races */
-       folio_lock(folio);
--      if (!folio_test_swapcache(folio) ||
-+      if ((!skip_swapcache && !folio_test_swapcache(folio)) ||
-           folio->swap.val != swap.val ||
-           !shmem_confirm_swap(mapping, index, swap)) {
-               error = -EEXIST;
-@@ -2199,7 +2291,12 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       if (sgp == SGP_WRITE)
-               folio_mark_accessed(folio);
--      delete_from_swap_cache(folio);
-+      if (skip_swapcache) {
-+              folio->swap.val = 0;
-+              swapcache_clear(si, swap, nr_pages);
-+      } else {
-+              delete_from_swap_cache(folio);
-+      }
-       folio_mark_dirty(folio);
-       swap_free_nr(swap, nr_pages);
-       put_swap_device(si);
-@@ -2210,8 +2307,11 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       if (!shmem_confirm_swap(mapping, index, swap))
-               error = -EEXIST;
-       if (error == -EIO)
--              shmem_set_folio_swapin_error(inode, index, folio, swap);
-+              shmem_set_folio_swapin_error(inode, index, folio, swap,
-+                                           skip_swapcache);
- unlock:
-+      if (skip_swapcache)
-+              swapcache_clear(si, swap, folio_nr_pages(folio));
-       if (folio) {
-               folio_unlock(folio);
-               folio_put(folio);
--- 
-2.39.5
-
index 728e7c165b1a0e06f50b27c8a19113ed637d5e0f..56cbc42933e71d4d2b649bbcf0c68402f8507eff 100644 (file)
@@ -143,14 +143,6 @@ block-fix-kmem_cache-of-name-bio-108-already-exists.patch
 vhost-return-task-creation-error-instead-of-null.patch
 cifs-validate-content-of-wsl-reparse-point-buffers.patch
 cifs-throw-eopnotsupp-error-on-unsupported-reparse-p.patch
-kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch
-kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch
-kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch
-kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch
-kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch
-kvm-arm64-refactor-exit-handlers.patch
-kvm-arm64-mark-some-header-functions-as-inline.patch
-kvm-arm64-eagerly-switch-zcr_el-1-2.patch
 input-goodix-berlin-fix-vddio-regulator-references.patch
 input-ads7846-fix-gpiod-allocation.patch
 input-iqs7222-preserve-system-status-register.patch
@@ -228,8 +220,6 @@ x86-vmware-parse-mp-tables-for-sev-snp-enabled-guest.patch
 i2c-ali1535-fix-an-error-handling-path-in-ali1535_pr.patch
 i2c-ali15x3-fix-an-error-handling-path-in-ali15x3_pr.patch
 i2c-sis630-fix-an-error-handling-path-in-sis630_prob.patch
-mm-shmem-skip-swapcache-for-swapin-of-synchronous-sw.patch
-mm-shmem-fix-potential-data-corruption-during-shmem-.patch
 mm-hugetlb-wait-for-hugetlb-folios-to-be-freed.patch
 smb3-add-support-for-iakerb.patch
 smb-client-fix-match_session-bug-preventing-session-.patch
diff --git a/queue-6.13/kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch b/queue-6.13/kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch
deleted file mode 100644 (file)
index a7f7196..0000000
+++ /dev/null
@@ -1,210 +0,0 @@
-From stable+bounces-124192-greg=kroah.com@vger.kernel.org Thu Mar 13 00:49:47 2025
-From: Mark Brown <broonie@kernel.org>
-Date: Wed, 12 Mar 2025 23:49:09 +0000
-Subject: KVM: arm64: Calculate cptr_el2 traps on activating traps
-To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  Joey Gouly <joey.gouly@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
-Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Fuad Tabba <tabba@google.com>,  James Clark <james.clark@linaro.org>
-Message-ID: <20250312-stable-sve-6-13-v1-1-c7ba07a6f4f7@kernel.org>
-
-From: Fuad Tabba <tabba@google.com>
-
-[ Upstream commit 2fd5b4b0e7b440602455b79977bfa64dea101e6c ]
-
-Similar to VHE, calculate the value of cptr_el2 from scratch on
-activate traps. This removes the need to store cptr_el2 in every
-vcpu structure. Moreover, some traps, such as whether the guest
-owns the fp registers, need to be set on every vcpu run.
-
-Reported-by: James Clark <james.clark@linaro.org>
-Fixes: 5294afdbf45a ("KVM: arm64: Exclude FP ownership from kvm_vcpu_arch")
-Signed-off-by: Fuad Tabba <tabba@google.com>
-Link: https://lore.kernel.org/r/20241216105057.579031-13-tabba@google.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/include/asm/kvm_host.h |    1 
- arch/arm64/kvm/arm.c              |    1 
- arch/arm64/kvm/hyp/nvhe/pkvm.c    |   30 ----------------------
- arch/arm64/kvm/hyp/nvhe/switch.c  |   51 +++++++++++++++++++++++---------------
- 4 files changed, 32 insertions(+), 51 deletions(-)
-
---- a/arch/arm64/include/asm/kvm_host.h
-+++ b/arch/arm64/include/asm/kvm_host.h
-@@ -708,7 +708,6 @@ struct kvm_vcpu_arch {
-       u64 hcr_el2;
-       u64 hcrx_el2;
-       u64 mdcr_el2;
--      u64 cptr_el2;
-       /* Exception Information */
-       struct kvm_vcpu_fault_info fault;
---- a/arch/arm64/kvm/arm.c
-+++ b/arch/arm64/kvm/arm.c
-@@ -1569,7 +1569,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init
-       }
-       vcpu_reset_hcr(vcpu);
--      vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
-       /*
-        * Handle the "start in power-off" case.
---- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
-+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
-@@ -31,8 +31,6 @@ static void pvm_init_traps_aa64pfr0(stru
-       const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
-       u64 hcr_set = HCR_RW;
-       u64 hcr_clear = 0;
--      u64 cptr_set = 0;
--      u64 cptr_clear = 0;
-       /* Protected KVM does not support AArch32 guests. */
-       BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
-@@ -62,21 +60,10 @@ static void pvm_init_traps_aa64pfr0(stru
-       /* Trap AMU */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
-               hcr_clear |= HCR_AMVOFFEN;
--              cptr_set |= CPTR_EL2_TAM;
--      }
--
--      /* Trap SVE */
--      if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
--              if (has_hvhe())
--                      cptr_clear |= CPACR_ELx_ZEN;
--              else
--                      cptr_set |= CPTR_EL2_TZ;
-       }
-       vcpu->arch.hcr_el2 |= hcr_set;
-       vcpu->arch.hcr_el2 &= ~hcr_clear;
--      vcpu->arch.cptr_el2 |= cptr_set;
--      vcpu->arch.cptr_el2 &= ~cptr_clear;
- }
- /*
-@@ -106,7 +93,6 @@ static void pvm_init_traps_aa64dfr0(stru
-       const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
-       u64 mdcr_set = 0;
-       u64 mdcr_clear = 0;
--      u64 cptr_set = 0;
-       /* Trap/constrain PMU */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
-@@ -133,21 +119,12 @@ static void pvm_init_traps_aa64dfr0(stru
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
-               mdcr_set |= MDCR_EL2_TTRF;
--      /* Trap Trace */
--      if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
--              if (has_hvhe())
--                      cptr_set |= CPACR_EL1_TTA;
--              else
--                      cptr_set |= CPTR_EL2_TTA;
--      }
--
-       /* Trap External Trace */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
-               mdcr_clear |= MDCR_EL2_E2TB_MASK;
-       vcpu->arch.mdcr_el2 |= mdcr_set;
-       vcpu->arch.mdcr_el2 &= ~mdcr_clear;
--      vcpu->arch.cptr_el2 |= cptr_set;
- }
- /*
-@@ -198,10 +175,6 @@ static void pvm_init_trap_regs(struct kv
-       /* Clear res0 and set res1 bits to trap potential new features. */
-       vcpu->arch.hcr_el2 &= ~(HCR_RES0);
-       vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
--      if (!has_hvhe()) {
--              vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
--              vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
--      }
- }
- static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
-@@ -236,7 +209,6 @@ static void pkvm_vcpu_reset_hcr(struct k
-  */
- static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
- {
--      vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
-       vcpu->arch.mdcr_el2 = 0;
-       pkvm_vcpu_reset_hcr(vcpu);
-@@ -693,8 +665,6 @@ unlock:
-               return ret;
-       }
--      hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
--
-       return 0;
- }
---- a/arch/arm64/kvm/hyp/nvhe/switch.c
-+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
-@@ -36,33 +36,46 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_ve
- extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
--static void __activate_traps(struct kvm_vcpu *vcpu)
-+static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
- {
--      u64 val;
-+      u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
--      ___activate_traps(vcpu, vcpu->arch.hcr_el2);
--      __activate_traps_common(vcpu);
-+      if (has_hvhe()) {
-+              val |= CPACR_ELx_TTA;
--      val = vcpu->arch.cptr_el2;
--      val |= CPTR_EL2_TAM;    /* Same bit irrespective of E2H */
--      val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
--      if (cpus_have_final_cap(ARM64_SME)) {
--              if (has_hvhe())
--                      val &= ~CPACR_ELx_SMEN;
--              else
--                      val |= CPTR_EL2_TSM;
--      }
-+              if (guest_owns_fp_regs()) {
-+                      val |= CPACR_ELx_FPEN;
-+                      if (vcpu_has_sve(vcpu))
-+                              val |= CPACR_ELx_ZEN;
-+              }
-+      } else {
-+              val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
-+
-+              /*
-+               * Always trap SME since it's not supported in KVM.
-+               * TSM is RES1 if SME isn't implemented.
-+               */
-+              val |= CPTR_EL2_TSM;
--      if (!guest_owns_fp_regs()) {
--              if (has_hvhe())
--                      val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
--              else
--                      val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
-+              if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
-+                      val |= CPTR_EL2_TZ;
--              __activate_traps_fpsimd32(vcpu);
-+              if (!guest_owns_fp_regs())
-+                      val |= CPTR_EL2_TFP;
-       }
-+      if (!guest_owns_fp_regs())
-+              __activate_traps_fpsimd32(vcpu);
-+
-       kvm_write_cptr_el2(val);
-+}
-+
-+static void __activate_traps(struct kvm_vcpu *vcpu)
-+{
-+      ___activate_traps(vcpu, vcpu->arch.hcr_el2);
-+      __activate_traps_common(vcpu);
-+      __activate_cptr_traps(vcpu);
-+
-       write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
-       if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
diff --git a/queue-6.13/kvm-arm64-eagerly-switch-zcr_el-1-2.patch b/queue-6.13/kvm-arm64-eagerly-switch-zcr_el-1-2.patch
deleted file mode 100644 (file)
index b1379f0..0000000
+++ /dev/null
@@ -1,321 +0,0 @@
-From broonie@kernel.org Thu Mar 13 00:49:53 2025
-From: Mark Brown <broonie@kernel.org>
-Date: Wed, 12 Mar 2025 23:49:16 +0000
-Subject: KVM: arm64: Eagerly switch ZCR_EL{1,2}
-To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  Joey Gouly <joey.gouly@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
-Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Fuad Tabba <tabba@google.com>
-Message-ID: <20250312-stable-sve-6-13-v1-8-c7ba07a6f4f7@kernel.org>
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-In non-protected KVM modes, while the guest FPSIMD/SVE/SME state is live on the
-CPU, the host's active SVE VL may differ from the guest's maximum SVE VL:
-
-* For VHE hosts, when a VM uses NV, ZCR_EL2 contains a value constrained
-  by the guest hypervisor, which may be less than or equal to that
-  guest's maximum VL.
-
-  Note: in this case the value of ZCR_EL1 is immaterial due to E2H.
-
-* For nVHE/hVHE hosts, ZCR_EL1 contains a value written by the guest,
-  which may be less than or greater than the guest's maximum VL.
-
-  Note: in this case hyp code traps host SVE usage and lazily restores
-  ZCR_EL2 to the host's maximum VL, which may be greater than the
-  guest's maximum VL.
-
-This can be the case between exiting a guest and kvm_arch_vcpu_put_fp().
-If a softirq is taken during this period and the softirq handler tries
-to use kernel-mode NEON, then the kernel will fail to save the guest's
-FPSIMD/SVE state, and will pend a SIGKILL for the current thread.
-
-This happens because kvm_arch_vcpu_ctxsync_fp() binds the guest's live
-FPSIMD/SVE state with the guest's maximum SVE VL, and
-fpsimd_save_user_state() verifies that the live SVE VL is as expected
-before attempting to save the register state:
-
-| if (WARN_ON(sve_get_vl() != vl)) {
-|         force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
-|         return;
-| }
-
-Fix this and make this a bit easier to reason about by always eagerly
-switching ZCR_EL{1,2} at hyp during guest<->host transitions. With this
-happening, there's no need to trap host SVE usage, and the nVHE/nVHE
-__deactivate_cptr_traps() logic can be simplified to enable host access
-to all present FPSIMD/SVE/SME features.
-
-In protected nVHE/hVHE modes, the host's state is always saved/restored
-by hyp, and the guest's state is saved prior to exit to the host, so
-from the host's PoV the guest never has live FPSIMD/SVE/SME state, and
-the host's ZCR_EL1 is never clobbered by hyp.
-
-Fixes: 8c8010d69c132273 ("KVM: arm64: Save/restore SVE state for nVHE")
-Fixes: 2e3cf82063a00ea0 ("KVM: arm64: nv: Ensure correct VL is loaded before saving SVE state")
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Cc: Will Deacon <will@kernel.org>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-9-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-(cherry picked from commit 59419f10045bc955d2229819c7cf7a8b0b9c5b59)
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/kvm/fpsimd.c                 |   30 ----------------
- arch/arm64/kvm/hyp/entry.S              |    5 ++
- arch/arm64/kvm/hyp/include/hyp/switch.h |   59 ++++++++++++++++++++++++++++++++
- arch/arm64/kvm/hyp/nvhe/hyp-main.c      |   13 +++----
- arch/arm64/kvm/hyp/nvhe/switch.c        |   33 +++++++++++++++--
- arch/arm64/kvm/hyp/vhe/switch.c         |    4 ++
- 6 files changed, 103 insertions(+), 41 deletions(-)
-
---- a/arch/arm64/kvm/fpsimd.c
-+++ b/arch/arm64/kvm/fpsimd.c
-@@ -136,36 +136,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
-       local_irq_save(flags);
-       if (guest_owns_fp_regs()) {
--              if (vcpu_has_sve(vcpu)) {
--                      u64 zcr = read_sysreg_el1(SYS_ZCR);
--
--                      /*
--                       * If the vCPU is in the hyp context then ZCR_EL1 is
--                       * loaded with its vEL2 counterpart.
--                       */
--                      __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
--
--                      /*
--                       * Restore the VL that was saved when bound to the CPU,
--                       * which is the maximum VL for the guest. Because the
--                       * layout of the data when saving the sve state depends
--                       * on the VL, we need to use a consistent (i.e., the
--                       * maximum) VL.
--                       * Note that this means that at guest exit ZCR_EL1 is
--                       * not necessarily the same as on guest entry.
--                       *
--                       * ZCR_EL2 holds the guest hypervisor's VL when running
--                       * a nested guest, which could be smaller than the
--                       * max for the vCPU. Similar to above, we first need to
--                       * switch to a VL consistent with the layout of the
--                       * vCPU's SVE state. KVM support for NV implies VHE, so
--                       * using the ZCR_EL1 alias is safe.
--                       */
--                      if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)))
--                              sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
--                                                     SYS_ZCR_EL1);
--              }
--
-               /*
-                * Flush (save and invalidate) the fpsimd/sve state so that if
-                * the host tries to use fpsimd/sve, it's not using stale data
---- a/arch/arm64/kvm/hyp/entry.S
-+++ b/arch/arm64/kvm/hyp/entry.S
-@@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
- alternative_else_nop_endif
-       mrs     x1, isr_el1
-       cbz     x1,  1f
-+
-+      // Ensure that __guest_enter() always provides a context
-+      // synchronization event so that callers don't need ISBs for anything
-+      // that would usually be synchonized by the ERET.
-+      isb
-       mov     x0, #ARM_EXCEPTION_IRQ
-       ret
---- a/arch/arm64/kvm/hyp/include/hyp/switch.h
-+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
-@@ -375,6 +375,65 @@ static inline void __hyp_sve_save_host(v
-                        true);
- }
-+static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
-+{
-+      u64 zcr_el1, zcr_el2;
-+
-+      if (!guest_owns_fp_regs())
-+              return;
-+
-+      if (vcpu_has_sve(vcpu)) {
-+              /* A guest hypervisor may restrict the effective max VL. */
-+              if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
-+                      zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
-+              else
-+                      zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
-+
-+              write_sysreg_el2(zcr_el2, SYS_ZCR);
-+
-+              zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu));
-+              write_sysreg_el1(zcr_el1, SYS_ZCR);
-+      }
-+}
-+
-+static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
-+{
-+      u64 zcr_el1, zcr_el2;
-+
-+      if (!guest_owns_fp_regs())
-+              return;
-+
-+      /*
-+       * When the guest owns the FP regs, we know that guest+hyp traps for
-+       * any FPSIMD/SVE/SME features exposed to the guest have been disabled
-+       * by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
-+       * prior to __guest_entry(). As __guest_entry() guarantees a context
-+       * synchronization event, we don't need an ISB here to avoid taking
-+       * traps for anything that was exposed to the guest.
-+       */
-+      if (vcpu_has_sve(vcpu)) {
-+              zcr_el1 = read_sysreg_el1(SYS_ZCR);
-+              __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
-+
-+              /*
-+               * The guest's state is always saved using the guest's max VL.
-+               * Ensure that the host has the guest's max VL active such that
-+               * the host can save the guest's state lazily, but don't
-+               * artificially restrict the host to the guest's max VL.
-+               */
-+              if (has_vhe()) {
-+                      zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
-+                      write_sysreg_el2(zcr_el2, SYS_ZCR);
-+              } else {
-+                      zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
-+                      write_sysreg_el2(zcr_el2, SYS_ZCR);
-+
-+                      zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
-+                      write_sysreg_el1(zcr_el1, SYS_ZCR);
-+              }
-+      }
-+}
-+
- static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
- {
-       /*
---- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
-+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
-@@ -5,6 +5,7 @@
-  */
- #include <hyp/adjust_pc.h>
-+#include <hyp/switch.h>
- #include <asm/pgtable-types.h>
- #include <asm/kvm_asm.h>
-@@ -178,8 +179,12 @@ static void handle___kvm_vcpu_run(struct
-               sync_hyp_vcpu(hyp_vcpu);
-               pkvm_put_hyp_vcpu(hyp_vcpu);
-       } else {
-+              struct kvm_vcpu *vcpu = kern_hyp_va(host_vcpu);
-+
-               /* The host is fully trusted, run its vCPU directly. */
--              ret = __kvm_vcpu_run(host_vcpu);
-+              fpsimd_lazy_switch_to_guest(vcpu);
-+              ret = __kvm_vcpu_run(vcpu);
-+              fpsimd_lazy_switch_to_host(vcpu);
-       }
- out:
-@@ -480,12 +485,6 @@ void handle_trap(struct kvm_cpu_context
-       case ESR_ELx_EC_SMC64:
-               handle_host_smc(host_ctxt);
-               break;
--      case ESR_ELx_EC_SVE:
--              cpacr_clear_set(0, CPACR_ELx_ZEN);
--              isb();
--              sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
--                                     SYS_ZCR_EL2);
--              break;
-       case ESR_ELx_EC_IABT_LOW:
-       case ESR_ELx_EC_DABT_LOW:
-               handle_host_mem_abort(host_ctxt);
---- a/arch/arm64/kvm/hyp/nvhe/switch.c
-+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
-@@ -40,6 +40,9 @@ static void __activate_cptr_traps(struct
- {
-       u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
-+      if (!guest_owns_fp_regs())
-+              __activate_traps_fpsimd32(vcpu);
-+
-       if (has_hvhe()) {
-               val |= CPACR_ELx_TTA;
-@@ -48,6 +51,8 @@ static void __activate_cptr_traps(struct
-                       if (vcpu_has_sve(vcpu))
-                               val |= CPACR_ELx_ZEN;
-               }
-+
-+              write_sysreg(val, cpacr_el1);
-       } else {
-               val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
-@@ -62,12 +67,32 @@ static void __activate_cptr_traps(struct
-               if (!guest_owns_fp_regs())
-                       val |= CPTR_EL2_TFP;
-+
-+              write_sysreg(val, cptr_el2);
-       }
-+}
--      if (!guest_owns_fp_regs())
--              __activate_traps_fpsimd32(vcpu);
-+static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
-+{
-+      if (has_hvhe()) {
-+              u64 val = CPACR_ELx_FPEN;
-+
-+              if (cpus_have_final_cap(ARM64_SVE))
-+                      val |= CPACR_ELx_ZEN;
-+              if (cpus_have_final_cap(ARM64_SME))
-+                      val |= CPACR_ELx_SMEN;
-+
-+              write_sysreg(val, cpacr_el1);
-+      } else {
-+              u64 val = CPTR_NVHE_EL2_RES1;
-+
-+              if (!cpus_have_final_cap(ARM64_SVE))
-+                      val |= CPTR_EL2_TZ;
-+              if (!cpus_have_final_cap(ARM64_SME))
-+                      val |= CPTR_EL2_TSM;
--      kvm_write_cptr_el2(val);
-+              write_sysreg(val, cptr_el2);
-+      }
- }
- static void __activate_traps(struct kvm_vcpu *vcpu)
-@@ -120,7 +145,7 @@ static void __deactivate_traps(struct kv
-       write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
--      kvm_reset_cptr_el2(vcpu);
-+      __deactivate_cptr_traps(vcpu);
-       write_sysreg(__kvm_hyp_host_vector, vbar_el2);
- }
---- a/arch/arm64/kvm/hyp/vhe/switch.c
-+++ b/arch/arm64/kvm/hyp/vhe/switch.c
-@@ -462,6 +462,8 @@ static int __kvm_vcpu_run_vhe(struct kvm
-       sysreg_save_host_state_vhe(host_ctxt);
-+      fpsimd_lazy_switch_to_guest(vcpu);
-+
-       /*
-        * Note that ARM erratum 1165522 requires us to configure both stage 1
-        * and stage 2 translation for the guest context before we clear
-@@ -486,6 +488,8 @@ static int __kvm_vcpu_run_vhe(struct kvm
-       __deactivate_traps(vcpu);
-+      fpsimd_lazy_switch_to_host(vcpu);
-+
-       sysreg_restore_host_state_vhe(host_ctxt);
-       if (guest_owns_fp_regs())
diff --git a/queue-6.13/kvm-arm64-mark-some-header-functions-as-inline.patch b/queue-6.13/kvm-arm64-mark-some-header-functions-as-inline.patch
deleted file mode 100644 (file)
index 45cbf53..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-From broonie@kernel.org Thu Mar 13 00:49:50 2025
-From: Mark Brown <broonie@kernel.org>
-Date: Wed, 12 Mar 2025 23:49:15 +0000
-Subject: KVM: arm64: Mark some header functions as inline
-To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  Joey Gouly <joey.gouly@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
-Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Fuad Tabba <tabba@google.com>
-Message-ID: <20250312-stable-sve-6-13-v1-7-c7ba07a6f4f7@kernel.org>
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-[ Upstream commit f9dd00de1e53a47763dfad601635d18542c3836d ]
-
-The shared hyp switch header has a number of static functions which
-might not be used by all files that include the header, and when unused
-they will provoke compiler warnings, e.g.
-
-| In file included from arch/arm64/kvm/hyp/nvhe/hyp-main.c:8:
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:703:13: warning: 'kvm_hyp_handle_dabt_low' defined but not used [-Wunused-function]
-|   703 | static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
-|       |             ^~~~~~~~~~~~~~~~~~~~~~~
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:682:13: warning: 'kvm_hyp_handle_cp15_32' defined but not used [-Wunused-function]
-|   682 | static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
-|       |             ^~~~~~~~~~~~~~~~~~~~~~
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:662:13: warning: 'kvm_hyp_handle_sysreg' defined but not used [-Wunused-function]
-|   662 | static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
-|       |             ^~~~~~~~~~~~~~~~~~~~~
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:458:13: warning: 'kvm_hyp_handle_fpsimd' defined but not used [-Wunused-function]
-|   458 | static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
-|       |             ^~~~~~~~~~~~~~~~~~~~~
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:329:13: warning: 'kvm_hyp_handle_mops' defined but not used [-Wunused-function]
-|   329 | static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
-|       |             ^~~~~~~~~~~~~~~~~~~
-
-Mark these functions as 'inline' to suppress this warning. This
-shouldn't result in any functional change.
-
-At the same time, avoid the use of __alias() in the header and alias
-kvm_hyp_handle_iabt_low() and kvm_hyp_handle_watchpt_low() to
-kvm_hyp_handle_memory_fault() using CPP, matching the style in the rest
-of the kernel. For consistency, kvm_hyp_handle_memory_fault() is also
-marked as 'inline'.
-
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Acked-by: Will Deacon <will@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-8-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/kvm/hyp/include/hyp/switch.h |   19 +++++++++----------
- 1 file changed, 9 insertions(+), 10 deletions(-)
-
---- a/arch/arm64/kvm/hyp/include/hyp/switch.h
-+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
-@@ -326,7 +326,7 @@ static inline bool __populate_fault_info
-       return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
- }
--static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-       *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
-       arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
-@@ -404,7 +404,7 @@ static void kvm_hyp_save_fpsimd_host(str
-  * If FP/SIMD is not implemented, handle the trap and inject an undefined
-  * instruction exception to the guest. Similarly for trapped SVE accesses.
-  */
--static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-       bool sve_guest;
-       u8 esr_ec;
-@@ -595,7 +595,7 @@ static bool handle_ampere1_tcr(struct kv
-       return true;
- }
--static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-       if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
-           handle_tx2_tvm(vcpu))
-@@ -615,7 +615,7 @@ static bool kvm_hyp_handle_sysreg(struct
-       return false;
- }
--static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-       if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
-           __vgic_v3_perform_cpuif_access(vcpu) == 1)
-@@ -624,19 +624,18 @@ static bool kvm_hyp_handle_cp15_32(struc
-       return false;
- }
--static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
-+                                             u64 *exit_code)
- {
-       if (!__populate_fault_info(vcpu))
-               return true;
-       return false;
- }
--static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
--      __alias(kvm_hyp_handle_memory_fault);
--static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
--      __alias(kvm_hyp_handle_memory_fault);
-+#define kvm_hyp_handle_iabt_low               kvm_hyp_handle_memory_fault
-+#define kvm_hyp_handle_watchpt_low    kvm_hyp_handle_memory_fault
--static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-       if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
-               return true;
diff --git a/queue-6.13/kvm-arm64-refactor-exit-handlers.patch b/queue-6.13/kvm-arm64-refactor-exit-handlers.patch
deleted file mode 100644 (file)
index b502a0f..0000000
+++ /dev/null
@@ -1,186 +0,0 @@
-From broonie@kernel.org Thu Mar 13 00:49:47 2025
-From: Mark Brown <broonie@kernel.org>
-Date: Wed, 12 Mar 2025 23:49:14 +0000
-Subject: KVM: arm64: Refactor exit handlers
-To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  Joey Gouly <joey.gouly@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
-Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Fuad Tabba <tabba@google.com>
-Message-ID: <20250312-stable-sve-6-13-v1-6-c7ba07a6f4f7@kernel.org>
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-[ Upstream commit 9b66195063c5a145843547b1d692bd189be85287 ]
-
-The hyp exit handling logic is largely shared between VHE and nVHE/hVHE,
-with common logic in arch/arm64/kvm/hyp/include/hyp/switch.h. The code
-in the header depends on function definitions provided by
-arch/arm64/kvm/hyp/vhe/switch.c and arch/arm64/kvm/hyp/nvhe/switch.c
-when they include the header.
-
-This is an unusual header dependency, and prevents the use of
-arch/arm64/kvm/hyp/include/hyp/switch.h in other files as this would
-result in compiler warnings regarding missing definitions, e.g.
-
-| In file included from arch/arm64/kvm/hyp/nvhe/hyp-main.c:8:
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:733:31: warning: 'kvm_get_exit_handler_array' used but never defined
-|   733 | static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
-|       |                               ^~~~~~~~~~~~~~~~~~~~~~~~~~
-| ./arch/arm64/kvm/hyp/include/hyp/switch.h:735:13: warning: 'early_exit_filter' used but never defined
-|   735 | static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
-|       |             ^~~~~~~~~~~~~~~~~
-
-Refactor the logic such that the header doesn't depend on anything from
-the C files. There should be no functional change as a result of this
-patch.
-
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Acked-by: Will Deacon <will@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-7-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/kvm/hyp/include/hyp/switch.h |   30 ++++++------------------------
- arch/arm64/kvm/hyp/nvhe/switch.c        |   28 ++++++++++++++++------------
- arch/arm64/kvm/hyp/vhe/switch.c         |    9 ++++-----
- 3 files changed, 26 insertions(+), 41 deletions(-)
-
---- a/arch/arm64/kvm/hyp/include/hyp/switch.h
-+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
-@@ -666,23 +666,16 @@ static bool kvm_hyp_handle_dabt_low(stru
- typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
--static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
--
--static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
--
- /*
-  * Allow the hypervisor to handle the exit with an exit handler if it has one.
-  *
-  * Returns true if the hypervisor handled the exit, and control should go back
-  * to the guest, or false if it hasn't.
-  */
--static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
-+                                     const exit_handler_fn *handlers)
- {
--      const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
--      exit_handler_fn fn;
--
--      fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
--
-+      exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
-       if (fn)
-               return fn(vcpu, exit_code);
-@@ -712,20 +705,9 @@ static inline void synchronize_vcpu_psta
-  * the guest, false when we should restore the host state and return to the
-  * main run loop.
-  */
--static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
-+                                    const exit_handler_fn *handlers)
- {
--      /*
--       * Save PSTATE early so that we can evaluate the vcpu mode
--       * early on.
--       */
--      synchronize_vcpu_pstate(vcpu, exit_code);
--
--      /*
--       * Check whether we want to repaint the state one way or
--       * another.
--       */
--      early_exit_filter(vcpu, exit_code);
--
-       if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
-               vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
-@@ -755,7 +737,7 @@ static inline bool fixup_guest_exit(stru
-               goto exit;
-       /* Check if there's an exit handler and allow it to handle the exit. */
--      if (kvm_hyp_handle_exit(vcpu, exit_code))
-+      if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
-               goto guest;
- exit:
-       /* Return to the host kernel and handle the exit */
---- a/arch/arm64/kvm/hyp/nvhe/switch.c
-+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
-@@ -224,19 +224,21 @@ static const exit_handler_fn *kvm_get_ex
-       return hyp_exit_handlers;
- }
--/*
-- * Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
-- * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
-- * guest from dropping to AArch32 EL0 if implemented by the CPU. If the
-- * hypervisor spots a guest in such a state ensure it is handled, and don't
-- * trust the host to spot or fix it.  The check below is based on the one in
-- * kvm_arch_vcpu_ioctl_run().
-- *
-- * Returns false if the guest ran in AArch32 when it shouldn't have, and
-- * thus should exit to the host, or true if a the guest run loop can continue.
-- */
--static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
-+static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-+      const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
-+
-+      synchronize_vcpu_pstate(vcpu, exit_code);
-+
-+      /*
-+       * Some guests (e.g., protected VMs) are not be allowed to run in
-+       * AArch32.  The ARMv8 architecture does not give the hypervisor a
-+       * mechanism to prevent a guest from dropping to AArch32 EL0 if
-+       * implemented by the CPU. If the hypervisor spots a guest in such a
-+       * state ensure it is handled, and don't trust the host to spot or fix
-+       * it.  The check below is based on the one in
-+       * kvm_arch_vcpu_ioctl_run().
-+       */
-       if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
-               /*
-                * As we have caught the guest red-handed, decide that it isn't
-@@ -249,6 +251,8 @@ static void early_exit_filter(struct kvm
-               *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
-               *exit_code |= ARM_EXCEPTION_IL;
-       }
-+
-+      return __fixup_guest_exit(vcpu, exit_code, handlers);
- }
- /* Switch to the guest for legacy non-VHE systems */
---- a/arch/arm64/kvm/hyp/vhe/switch.c
-+++ b/arch/arm64/kvm/hyp/vhe/switch.c
-@@ -423,13 +423,10 @@ static const exit_handler_fn hyp_exit_ha
-       [ESR_ELx_EC_MOPS]               = kvm_hyp_handle_mops,
- };
--static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
-+static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
--      return hyp_exit_handlers;
--}
-+      synchronize_vcpu_pstate(vcpu, exit_code);
--static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
--{
-       /*
-        * If we were in HYP context on entry, adjust the PSTATE view
-        * so that the usual helpers work correctly.
-@@ -449,6 +446,8 @@ static void early_exit_filter(struct kvm
-               *vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
-               *vcpu_cpsr(vcpu) |= mode;
-       }
-+
-+      return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
- }
- /* Switch to the guest for VHE systems running in EL2 */
diff --git a/queue-6.13/kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch b/queue-6.13/kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch
deleted file mode 100644 (file)
index fa1af42..0000000
+++ /dev/null
@@ -1,213 +0,0 @@
-From stable+bounces-124194-greg=kroah.com@vger.kernel.org Thu Mar 13 00:50:09 2025
-From: Mark Brown <broonie@kernel.org>
-Date: Wed, 12 Mar 2025 23:49:11 +0000
-Subject: KVM: arm64: Remove host FPSIMD saving for non-protected KVM
-To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  Joey Gouly <joey.gouly@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
-Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Fuad Tabba <tabba@google.com>
-Message-ID: <20250312-stable-sve-6-13-v1-3-c7ba07a6f4f7@kernel.org>
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-[ Upstream commit 8eca7f6d5100b6997df4f532090bc3f7e0203bef ]
-
-Now that the host eagerly saves its own FPSIMD/SVE/SME state,
-non-protected KVM never needs to save the host FPSIMD/SVE/SME state,
-and the code to do this is never used. Protected KVM still needs to
-save/restore the host FPSIMD/SVE state to avoid leaking guest state to
-the host (and to avoid revealing to the host whether the guest used
-FPSIMD/SVE/SME), and that code needs to be retained.
-
-Remove the unused code and data structures.
-
-To avoid the need for a stub copy of kvm_hyp_save_fpsimd_host() in the
-VHE hyp code, the nVHE/hVHE version is moved into the shared switch
-header, where it is only invoked when KVM is in protected mode.
-
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Acked-by: Will Deacon <will@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-3-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-[CPACR_EL1_ZEN -> CPACR_ELx_ZEN -- broonie]
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/include/asm/kvm_host.h       |   18 ++++--------------
- arch/arm64/kvm/arm.c                    |    8 --------
- arch/arm64/kvm/fpsimd.c                 |    2 --
- arch/arm64/kvm/hyp/include/hyp/switch.h |   25 +++++++++++++++++++++++--
- arch/arm64/kvm/hyp/nvhe/hyp-main.c      |    2 +-
- arch/arm64/kvm/hyp/nvhe/switch.c        |   28 ----------------------------
- arch/arm64/kvm/hyp/vhe/switch.c         |    8 --------
- 7 files changed, 28 insertions(+), 63 deletions(-)
-
---- a/arch/arm64/include/asm/kvm_host.h
-+++ b/arch/arm64/include/asm/kvm_host.h
-@@ -613,23 +613,13 @@ struct kvm_host_data {
-       struct kvm_cpu_context host_ctxt;
-       /*
--       * All pointers in this union are hyp VA.
-+       * Hyp VA.
-        * sve_state is only used in pKVM and if system_supports_sve().
-        */
--      union {
--              struct user_fpsimd_state *fpsimd_state;
--              struct cpu_sve_state *sve_state;
--      };
-+      struct cpu_sve_state *sve_state;
--      union {
--              /* HYP VA pointer to the host storage for FPMR */
--              u64     *fpmr_ptr;
--              /*
--               * Used by pKVM only, as it needs to provide storage
--               * for the host
--               */
--              u64     fpmr;
--      };
-+      /* Used by pKVM only. */
-+      u64     fpmr;
-       /* Ownership of the FP regs */
-       enum {
---- a/arch/arm64/kvm/arm.c
-+++ b/arch/arm64/kvm/arm.c
-@@ -2468,14 +2468,6 @@ static void finalize_init_hyp_mode(void)
-                       per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
-                               kern_hyp_va(sve_state);
-               }
--      } else {
--              for_each_possible_cpu(cpu) {
--                      struct user_fpsimd_state *fpsimd_state;
--
--                      fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
--                      per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
--                              kern_hyp_va(fpsimd_state);
--              }
-       }
- }
---- a/arch/arm64/kvm/fpsimd.c
-+++ b/arch/arm64/kvm/fpsimd.c
-@@ -64,8 +64,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
-        */
-       fpsimd_save_and_flush_cpu_state();
-       *host_data_ptr(fp_owner) = FP_STATE_FREE;
--      *host_data_ptr(fpsimd_state) = NULL;
--      *host_data_ptr(fpmr_ptr) = NULL;
-       vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
-       if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
---- a/arch/arm64/kvm/hyp/include/hyp/switch.h
-+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
-@@ -375,7 +375,28 @@ static inline void __hyp_sve_save_host(v
-                        true);
- }
--static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
-+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
-+{
-+      /*
-+       * Non-protected kvm relies on the host restoring its sve state.
-+       * Protected kvm restores the host's sve state as not to reveal that
-+       * fpsimd was used by a guest nor leak upper sve bits.
-+       */
-+      if (system_supports_sve()) {
-+              __hyp_sve_save_host();
-+
-+              /* Re-enable SVE traps if not supported for the guest vcpu. */
-+              if (!vcpu_has_sve(vcpu))
-+                      cpacr_clear_set(CPACR_ELx_ZEN, 0);
-+
-+      } else {
-+              __fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
-+      }
-+
-+      if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
-+              *host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
-+}
-+
- /*
-  * We trap the first access to the FP/SIMD to save the host context and
-@@ -425,7 +446,7 @@ static bool kvm_hyp_handle_fpsimd(struct
-       isb();
-       /* Write out the host state if it's in the registers */
--      if (host_owns_fp_regs())
-+      if (is_protected_kvm_enabled() && host_owns_fp_regs())
-               kvm_hyp_save_fpsimd_host(vcpu);
-       /* Restore the guest state */
---- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
-+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
-@@ -83,7 +83,7 @@ static void fpsimd_sve_sync(struct kvm_v
-       if (system_supports_sve())
-               __hyp_sve_restore_host();
-       else
--              __fpsimd_restore_state(*host_data_ptr(fpsimd_state));
-+              __fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs));
-       if (has_fpmr)
-               write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
---- a/arch/arm64/kvm/hyp/nvhe/switch.c
-+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
-@@ -193,34 +193,6 @@ static bool kvm_handle_pvm_sys64(struct
-               kvm_handle_pvm_sysreg(vcpu, exit_code));
- }
--static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
--{
--      /*
--       * Non-protected kvm relies on the host restoring its sve state.
--       * Protected kvm restores the host's sve state as not to reveal that
--       * fpsimd was used by a guest nor leak upper sve bits.
--       */
--      if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
--              __hyp_sve_save_host();
--
--              /* Re-enable SVE traps if not supported for the guest vcpu. */
--              if (!vcpu_has_sve(vcpu))
--                      cpacr_clear_set(CPACR_ELx_ZEN, 0);
--
--      } else {
--              __fpsimd_save_state(*host_data_ptr(fpsimd_state));
--      }
--
--      if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
--              u64 val = read_sysreg_s(SYS_FPMR);
--
--              if (unlikely(is_protected_kvm_enabled()))
--                      *host_data_ptr(fpmr) = val;
--              else
--                      **host_data_ptr(fpmr_ptr) = val;
--      }
--}
--
- static const exit_handler_fn hyp_exit_handlers[] = {
-       [0 ... ESR_ELx_EC_MAX]          = NULL,
-       [ESR_ELx_EC_CP15_32]            = kvm_hyp_handle_cp15_32,
---- a/arch/arm64/kvm/hyp/vhe/switch.c
-+++ b/arch/arm64/kvm/hyp/vhe/switch.c
-@@ -309,14 +309,6 @@ static bool kvm_hyp_handle_eret(struct k
-       return true;
- }
--static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
--{
--      __fpsimd_save_state(*host_data_ptr(fpsimd_state));
--
--      if (kvm_has_fpmr(vcpu->kvm))
--              **host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
--}
--
- static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
- {
-       int ret = -EINVAL;
diff --git a/queue-6.13/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch b/queue-6.13/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch
deleted file mode 100644 (file)
index fc9f8f9..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-From broonie@kernel.org Thu Mar 13 00:49:44 2025
-From: Mark Brown <broonie@kernel.org>
-Date: Wed, 12 Mar 2025 23:49:13 +0000
-Subject: KVM: arm64: Remove VHE host restore of CPACR_EL1.SMEN
-To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  Joey Gouly <joey.gouly@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
-Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Fuad Tabba <tabba@google.com>
-Message-ID: <20250312-stable-sve-6-13-v1-5-c7ba07a6f4f7@kernel.org>
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-[ Upstream commit 407a99c4654e8ea65393f412c421a55cac539f5b ]
-
-When KVM is in VHE mode, the host kernel tries to save and restore the
-configuration of CPACR_EL1.SMEN (i.e. CPTR_EL2.SMEN when HCR_EL2.E2H=1)
-across kvm_arch_vcpu_load_fp() and kvm_arch_vcpu_put_fp(), since the
-configuration may be clobbered by hyp when running a vCPU. This logic
-has historically been broken, and is currently redundant.
-
-This logic was originally introduced in commit:
-
-  861262ab86270206 ("KVM: arm64: Handle SME host state when running guests")
-
-At the time, the VHE hyp code would reset CPTR_EL2.SMEN to 0b00 when
-returning to the host, trapping host access to SME state. Unfortunately,
-this was unsafe as the host could take a softirq before calling
-kvm_arch_vcpu_put_fp(), and if a softirq handler were to use kernel mode
-NEON the resulting attempt to save the live FPSIMD/SVE/SME state would
-result in a fatal trap.
-
-That issue was limited to VHE mode. For nVHE/hVHE modes, KVM always
-saved/restored the host kernel's CPACR_EL1 value, and configured
-CPTR_EL2.TSM to 0b0, ensuring that host usage of SME would not be
-trapped.
-
-The issue above was incidentally fixed by commit:
-
-  375110ab51dec5dc ("KVM: arm64: Fix resetting SME trap values on reset for (h)VHE")
-
-That commit changed the VHE hyp code to configure CPTR_EL2.SMEN to 0b01
-when returning to the host, permitting host kernel usage of SME,
-avoiding the issue described above. At the time, this was not identified
-as a fix for commit 861262ab86270206.
-
-Now that the host eagerly saves and unbinds its own FPSIMD/SVE/SME
-state, there's no need to save/restore the state of the EL0 SME trap.
-The kernel can safely save/restore state without trapping, as described
-above, and will restore userspace state (including trap controls) before
-returning to userspace.
-
-Remove the redundant logic.
-
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Acked-by: Will Deacon <will@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-5-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-[Update for rework of flags storage -- broonie]
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/include/asm/kvm_host.h |    2 --
- arch/arm64/kvm/fpsimd.c           |   21 ---------------------
- 2 files changed, 23 deletions(-)
-
---- a/arch/arm64/include/asm/kvm_host.h
-+++ b/arch/arm64/include/asm/kvm_host.h
-@@ -902,8 +902,6 @@ struct kvm_vcpu_arch {
- /* Save TRBE context if active  */
- #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
--/* SME enabled for EL0 */
--#define HOST_SME_ENABLED      __vcpu_single_flag(sflags, BIT(1))
- /* Physical CPU not in supported_cpus */
- #define ON_UNSUPPORTED_CPU    __vcpu_single_flag(sflags, BIT(2))
- /* WFIT instruction trapped */
---- a/arch/arm64/kvm/fpsimd.c
-+++ b/arch/arm64/kvm/fpsimd.c
-@@ -65,12 +65,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
-       fpsimd_save_and_flush_cpu_state();
-       *host_data_ptr(fp_owner) = FP_STATE_FREE;
--      if (system_supports_sme()) {
--              vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
--              if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
--                      vcpu_set_flag(vcpu, HOST_SME_ENABLED);
--      }
--
-       /*
-        * If normal guests gain SME support, maintain this behavior for pKVM
-        * guests, which don't support SME.
-@@ -141,21 +135,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
-       local_irq_save(flags);
--      /*
--       * If we have VHE then the Hyp code will reset CPACR_EL1 to
--       * the default value and we need to reenable SME.
--       */
--      if (has_vhe() && system_supports_sme()) {
--              /* Also restore EL0 state seen on entry */
--              if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
--                      sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
--              else
--                      sysreg_clear_set(CPACR_EL1,
--                                       CPACR_EL1_SMEN_EL0EN,
--                                       CPACR_EL1_SMEN_EL1EN);
--              isb();
--      }
--
-       if (guest_owns_fp_regs()) {
-               if (vcpu_has_sve(vcpu)) {
-                       u64 zcr = read_sysreg_el1(SYS_ZCR);
diff --git a/queue-6.13/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch b/queue-6.13/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch
deleted file mode 100644 (file)
index 08f30a5..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-From stable+bounces-124195-greg=kroah.com@vger.kernel.org Thu Mar 13 00:50:19 2025
-From: Mark Brown <broonie@kernel.org>
-Date: Wed, 12 Mar 2025 23:49:12 +0000
-Subject: KVM: arm64: Remove VHE host restore of CPACR_EL1.ZEN
-To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  Joey Gouly <joey.gouly@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
-Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Fuad Tabba <tabba@google.com>
-Message-ID: <20250312-stable-sve-6-13-v1-4-c7ba07a6f4f7@kernel.org>
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-[ Upstream commit 459f059be702056d91537b99a129994aa6ccdd35 ]
-
-When KVM is in VHE mode, the host kernel tries to save and restore the
-configuration of CPACR_EL1.ZEN (i.e. CPTR_EL2.ZEN when HCR_EL2.E2H=1)
-across kvm_arch_vcpu_load_fp() and kvm_arch_vcpu_put_fp(), since the
-configuration may be clobbered by hyp when running a vCPU. This logic is
-currently redundant.
-
-The VHE hyp code unconditionally configures CPTR_EL2.ZEN to 0b01 when
-returning to the host, permitting host kernel usage of SVE.
-
-Now that the host eagerly saves and unbinds its own FPSIMD/SVE/SME
-state, there's no need to save/restore the state of the EL0 SVE trap.
-The kernel can safely save/restore state without trapping, as described
-above, and will restore userspace state (including trap controls) before
-returning to userspace.
-
-Remove the redundant logic.
-
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Acked-by: Will Deacon <will@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-4-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-[Rework for refactoring of where the flags are stored -- broonie]
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/include/asm/kvm_host.h |    2 --
- arch/arm64/kvm/fpsimd.c           |   16 ----------------
- 2 files changed, 18 deletions(-)
-
---- a/arch/arm64/include/asm/kvm_host.h
-+++ b/arch/arm64/include/asm/kvm_host.h
-@@ -902,8 +902,6 @@ struct kvm_vcpu_arch {
- /* Save TRBE context if active  */
- #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
--/* SVE enabled for host EL0 */
--#define HOST_SVE_ENABLED      __vcpu_single_flag(sflags, BIT(0))
- /* SME enabled for EL0 */
- #define HOST_SME_ENABLED      __vcpu_single_flag(sflags, BIT(1))
- /* Physical CPU not in supported_cpus */
---- a/arch/arm64/kvm/fpsimd.c
-+++ b/arch/arm64/kvm/fpsimd.c
-@@ -65,10 +65,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
-       fpsimd_save_and_flush_cpu_state();
-       *host_data_ptr(fp_owner) = FP_STATE_FREE;
--      vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
--      if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
--              vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
--
-       if (system_supports_sme()) {
-               vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
-               if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
-@@ -202,18 +198,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcp
-                * when needed.
-                */
-               fpsimd_save_and_flush_cpu_state();
--      } else if (has_vhe() && system_supports_sve()) {
--              /*
--               * The FPSIMD/SVE state in the CPU has not been touched, and we
--               * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
--               * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
--               * for EL0.  To avoid spurious traps, restore the trap state
--               * seen by kvm_arch_vcpu_load_fp():
--               */
--              if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED))
--                      sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
--              else
--                      sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
-       }
-       local_irq_restore(flags);
diff --git a/queue-6.13/kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch b/queue-6.13/kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch
deleted file mode 100644 (file)
index 56f0bfa..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-From stable+bounces-124193-greg=kroah.com@vger.kernel.org Thu Mar 13 00:49:59 2025
-From: Mark Brown <broonie@kernel.org>
-Date: Wed, 12 Mar 2025 23:49:10 +0000
-Subject: KVM: arm64: Unconditionally save+flush host FPSIMD/SVE/SME state
-To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,  Marc Zyngier <maz@kernel.org>, Oliver Upton <oliver.upton@linux.dev>,  Joey Gouly <joey.gouly@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>,  Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>
-Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,  linux-kernel@vger.kernel.org, stable@vger.kernel.org,  Mark Brown <broonie@kernel.org>, Mark Rutland <mark.rutland@arm.com>,  Eric Auger <eauger@redhat.com>, Wilco Dijkstra <wilco.dijkstra@arm.com>,  Eric Auger <eric.auger@redhat.com>, Florian Weimer <fweimer@redhat.com>,  Fuad Tabba <tabba@google.com>, Jeremy Linton <jeremy.linton@arm.com>,  Paolo Bonzini <pbonzini@redhat.com>
-Message-ID: <20250312-stable-sve-6-13-v1-2-c7ba07a6f4f7@kernel.org>
-
-From: Mark Rutland <mark.rutland@arm.com>
-
-[ Upstream commit fbc7e61195e23f744814e78524b73b59faa54ab4 ]
-
-There are several problems with the way hyp code lazily saves the host's
-FPSIMD/SVE state, including:
-
-* Host SVE being discarded unexpectedly due to inconsistent
-  configuration of TIF_SVE and CPACR_ELx.ZEN. This has been seen to
-  result in QEMU crashes where SVE is used by memmove(), as reported by
-  Eric Auger:
-
-  https://issues.redhat.com/browse/RHEL-68997
-
-* Host SVE state is discarded *after* modification by ptrace, which was an
-  unintentional ptrace ABI change introduced with lazy discarding of SVE state.
-
-* The host FPMR value can be discarded when running a non-protected VM,
-  where FPMR support is not exposed to a VM, and that VM uses
-  FPSIMD/SVE. In these cases the hyp code does not save the host's FPMR
-  before unbinding the host's FPSIMD/SVE/SME state, leaving a stale
-  value in memory.
-
-Avoid these by eagerly saving and "flushing" the host's FPSIMD/SVE/SME
-state when loading a vCPU such that KVM does not need to save any of the
-host's FPSIMD/SVE/SME state. For clarity, fpsimd_kvm_prepare() is
-removed and the necessary call to fpsimd_save_and_flush_cpu_state() is
-placed in kvm_arch_vcpu_load_fp(). As 'fpsimd_state' and 'fpmr_ptr'
-should not be used, they are set to NULL; all uses of these will be
-removed in subsequent patches.
-
-Historical problems go back at least as far as v5.17, e.g. erroneous
-assumptions about TIF_SVE being clear in commit:
-
-  8383741ab2e773a9 ("KVM: arm64: Get rid of host SVE tracking/saving")
-
-... and so this eager save+flush probably needs to be backported to ALL
-stable trees.
-
-Fixes: 93ae6b01bafee8fa ("KVM: arm64: Discard any SVE state when entering KVM guests")
-Fixes: 8c845e2731041f0f ("arm64/sve: Leave SVE enabled on syscall if we don't context switch")
-Fixes: ef3be86021c3bdf3 ("KVM: arm64: Add save/restore support for FPMR")
-Reported-by: Eric Auger <eauger@redhat.com>
-Reported-by: Wilco Dijkstra <wilco.dijkstra@arm.com>
-Reviewed-by: Mark Brown <broonie@kernel.org>
-Tested-by: Mark Brown <broonie@kernel.org>
-Tested-by: Eric Auger <eric.auger@redhat.com>
-Acked-by: Will Deacon <will@kernel.org>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
-Cc: Florian Weimer <fweimer@redhat.com>
-Cc: Fuad Tabba <tabba@google.com>
-Cc: Jeremy Linton <jeremy.linton@arm.com>
-Cc: Marc Zyngier <maz@kernel.org>
-Cc: Oliver Upton <oliver.upton@linux.dev>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
-Link: https://lore.kernel.org/r/20250210195226.1215254-2-mark.rutland@arm.com
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-[ Mark: Handle vcpu/host flag conflict ]
-Signed-off-by: Mark Rutland <mark.rutland@arm.com>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/kernel/fpsimd.c |   25 -------------------------
- arch/arm64/kvm/fpsimd.c    |   35 ++++++++++-------------------------
- 2 files changed, 10 insertions(+), 50 deletions(-)
-
---- a/arch/arm64/kernel/fpsimd.c
-+++ b/arch/arm64/kernel/fpsimd.c
-@@ -1695,31 +1695,6 @@ void fpsimd_signal_preserve_current_stat
- }
- /*
-- * Called by KVM when entering the guest.
-- */
--void fpsimd_kvm_prepare(void)
--{
--      if (!system_supports_sve())
--              return;
--
--      /*
--       * KVM does not save host SVE state since we can only enter
--       * the guest from a syscall so the ABI means that only the
--       * non-saved SVE state needs to be saved.  If we have left
--       * SVE enabled for performance reasons then update the task
--       * state to be FPSIMD only.
--       */
--      get_cpu_fpsimd_context();
--
--      if (test_and_clear_thread_flag(TIF_SVE)) {
--              sve_to_fpsimd(current);
--              current->thread.fp_type = FP_STATE_FPSIMD;
--      }
--
--      put_cpu_fpsimd_context();
--}
--
--/*
-  * Associate current's FPSIMD context with this cpu
-  * The caller must have ownership of the cpu FPSIMD context before calling
-  * this function.
---- a/arch/arm64/kvm/fpsimd.c
-+++ b/arch/arm64/kvm/fpsimd.c
-@@ -54,16 +54,18 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
-       if (!system_supports_fpsimd())
-               return;
--      fpsimd_kvm_prepare();
--
-       /*
--       * We will check TIF_FOREIGN_FPSTATE just before entering the
--       * guest in kvm_arch_vcpu_ctxflush_fp() and override this to
--       * FP_STATE_FREE if the flag set.
-+       * Ensure that any host FPSIMD/SVE/SME state is saved and unbound such
-+       * that the host kernel is responsible for restoring this state upon
-+       * return to userspace, and the hyp code doesn't need to save anything.
-+       *
-+       * When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures
-+       * that PSTATE.{SM,ZA} == {0,0}.
-        */
--      *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
--      *host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
--      *host_data_ptr(fpmr_ptr) = kern_hyp_va(&current->thread.uw.fpmr);
-+      fpsimd_save_and_flush_cpu_state();
-+      *host_data_ptr(fp_owner) = FP_STATE_FREE;
-+      *host_data_ptr(fpsimd_state) = NULL;
-+      *host_data_ptr(fpmr_ptr) = NULL;
-       vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
-       if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
-@@ -73,23 +75,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vc
-               vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
-               if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
-                       vcpu_set_flag(vcpu, HOST_SME_ENABLED);
--
--              /*
--               * If PSTATE.SM is enabled then save any pending FP
--               * state and disable PSTATE.SM. If we leave PSTATE.SM
--               * enabled and the guest does not enable SME via
--               * CPACR_EL1.SMEN then operations that should be valid
--               * may generate SME traps from EL1 to EL1 which we
--               * can't intercept and which would confuse the guest.
--               *
--               * Do the same for PSTATE.ZA in the case where there
--               * is state in the registers which has not already
--               * been saved, this is very unlikely to happen.
--               */
--              if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
--                      *host_data_ptr(fp_owner) = FP_STATE_FREE;
--                      fpsimd_save_and_flush_cpu_state();
--              }
-       }
-       /*
diff --git a/queue-6.13/mm-shmem-fix-potential-data-corruption-during-shmem-.patch b/queue-6.13/mm-shmem-fix-potential-data-corruption-during-shmem-.patch
deleted file mode 100644 (file)
index 20cf63e..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-From e6c84d5eac2493d64d745516b28b3abc7ce439b5 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 25 Feb 2025 17:52:55 +0800
-Subject: mm: shmem: fix potential data corruption during shmem swapin
-
-From: Baolin Wang <baolin.wang@linux.alibaba.com>
-
-[ Upstream commit 058313515d5aab10d0a01dd634f92ed4a4e71d4c ]
-
-Alex and Kairui reported some issues (system hang or data corruption) when
-swapping out or swapping in large shmem folios.  This is especially easy
-to reproduce when the tmpfs is mount with the 'huge=within_size'
-parameter.  Thanks to Kairui's reproducer, the issue can be easily
-replicated.
-
-The root cause of the problem is that swap readahead may asynchronously
-swap in order 0 folios into the swap cache, while the shmem mapping can
-still store large swap entries.  Then an order 0 folio is inserted into
-the shmem mapping without splitting the large swap entry, which overwrites
-the original large swap entry, leading to data corruption.
-
-When getting a folio from the swap cache, we should split the large swap
-entry stored in the shmem mapping if the orders do not match, to fix this
-issue.
-
-Link: https://lkml.kernel.org/r/2fe47c557e74e9df5fe2437ccdc6c9115fa1bf70.1740476943.git.baolin.wang@linux.alibaba.com
-Fixes: 809bc86517cc ("mm: shmem: support large folio swap out")
-Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
-Reported-by: Alex Xu (Hello71) <alex_y_xu@yahoo.ca>
-Reported-by: Kairui Song <ryncsn@gmail.com>
-Closes: https://lore.kernel.org/all/1738717785.im3r5g2vxc.none@localhost/
-Tested-by: Kairui Song <kasong@tencent.com>
-Cc: David Hildenbrand <david@redhat.com>
-Cc: Lance Yang <ioworker0@gmail.com>
-Cc: Matthew Wilcow <willy@infradead.org>
-Cc: Hugh Dickins <hughd@google.com>
-Cc: <stable@vger.kernel.org>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- mm/shmem.c | 31 +++++++++++++++++++++++++++----
- 1 file changed, 27 insertions(+), 4 deletions(-)
-
-diff --git a/mm/shmem.c b/mm/shmem.c
-index 6d30139d3967d..faf17a7578b71 100644
---- a/mm/shmem.c
-+++ b/mm/shmem.c
-@@ -2189,7 +2189,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       struct folio *folio = NULL;
-       bool skip_swapcache = false;
-       swp_entry_t swap;
--      int error, nr_pages;
-+      int error, nr_pages, order, split_order;
-       VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
-       swap = radix_to_swp_entry(*foliop);
-@@ -2208,10 +2208,9 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       /* Look it up and read it in.. */
-       folio = swap_cache_get_folio(swap, NULL, 0);
-+      order = xa_get_order(&mapping->i_pages, index);
-       if (!folio) {
--              int order = xa_get_order(&mapping->i_pages, index);
-               bool fallback_order0 = false;
--              int split_order;
-               /* Or update major stats only when swapin succeeds?? */
-               if (fault_type) {
-@@ -2275,6 +2274,29 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-                       error = -ENOMEM;
-                       goto failed;
-               }
-+      } else if (order != folio_order(folio)) {
-+              /*
-+               * Swap readahead may swap in order 0 folios into swapcache
-+               * asynchronously, while the shmem mapping can still stores
-+               * large swap entries. In such cases, we should split the
-+               * large swap entry to prevent possible data corruption.
-+               */
-+              split_order = shmem_split_large_entry(inode, index, swap, gfp);
-+              if (split_order < 0) {
-+                      error = split_order;
-+                      goto failed;
-+              }
-+
-+              /*
-+               * If the large swap entry has already been split, it is
-+               * necessary to recalculate the new swap entry based on
-+               * the old order alignment.
-+               */
-+              if (split_order > 0) {
-+                      pgoff_t offset = index - round_down(index, 1 << split_order);
-+
-+                      swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
-+              }
-       }
- alloced:
-@@ -2282,7 +2304,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       folio_lock(folio);
-       if ((!skip_swapcache && !folio_test_swapcache(folio)) ||
-           folio->swap.val != swap.val ||
--          !shmem_confirm_swap(mapping, index, swap)) {
-+          !shmem_confirm_swap(mapping, index, swap) ||
-+          xa_get_order(&mapping->i_pages, index) != folio_order(folio)) {
-               error = -EEXIST;
-               goto unlock;
-       }
--- 
-2.39.5
-
diff --git a/queue-6.13/mm-shmem-skip-swapcache-for-swapin-of-synchronous-sw.patch b/queue-6.13/mm-shmem-skip-swapcache-for-swapin-of-synchronous-sw.patch
deleted file mode 100644 (file)
index 1d1ac62..0000000
+++ /dev/null
@@ -1,241 +0,0 @@
-From 91c40c9fb0e939508d814e1ac302011d8e8213eb Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 8 Jan 2025 10:16:49 +0800
-Subject: mm: shmem: skip swapcache for swapin of synchronous swap device
-
-From: Baolin Wang <baolin.wang@linux.alibaba.com>
-
-[ Upstream commit 1dd44c0af4fa1e80a4e82faa10cbf5d22da40362 ]
-
-With fast swap devices (such as zram), swapin latency is crucial to
-applications.  For shmem swapin, similar to anonymous memory swapin, we
-can skip the swapcache operation to improve swapin latency.  Testing 1G
-shmem sequential swapin without THP enabled, I observed approximately a 6%
-performance improvement: (Note: I repeated 5 times and took the mean data
-for each test)
-
-w/o patch      w/ patch        changes
-534.8ms                501ms           +6.3%
-
-In addition, currently, we always split the large swap entry stored in the
-shmem mapping during shmem large folio swapin, which is not perfect,
-especially with a fast swap device.  We should swap in the whole large
-folio instead of splitting the precious large folios to take advantage of
-the large folios and improve the swapin latency if the swap device is
-synchronous device, which is similar to anonymous memory mTHP swapin.
-Testing 1G shmem sequential swapin with 64K mTHP and 2M mTHP, I observed
-obvious performance improvement:
-
-mTHP=64K
-w/o patch      w/ patch        changes
-550.4ms                169.6ms         +69%
-
-mTHP=2M
-w/o patch      w/ patch        changes
-542.8ms                126.8ms         +77%
-
-Note that skipping swapcache requires attention to concurrent swapin
-scenarios.  Fortunately the swapcache_prepare() and
-shmem_add_to_page_cache() can help identify concurrent swapin and large
-swap entry split scenarios, and return -EEXIST for retry.
-
-[akpm@linux-foundation.org: use IS_ENABLED(), tweak comment grammar]
-Link: https://lkml.kernel.org/r/3d9f3bd3bc6ec953054baff5134f66feeaae7c1e.1736301701.git.baolin.wang@linux.alibaba.com
-Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
-Cc: David Hildenbrand <david@redhat.com>
-Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
-Cc: Hugh Dickins <hughd@google.com>
-Cc: Kairui Song <kasong@tencent.com>
-Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
-Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
-Cc: Ryan Roberts <ryan.roberts@arm.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Stable-dep-of: 058313515d5a ("mm: shmem: fix potential data corruption during shmem swapin")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- mm/shmem.c | 110 ++++++++++++++++++++++++++++++++++++++++++++++++++---
- 1 file changed, 105 insertions(+), 5 deletions(-)
-
-diff --git a/mm/shmem.c b/mm/shmem.c
-index e10d6e0924620..6d30139d3967d 100644
---- a/mm/shmem.c
-+++ b/mm/shmem.c
-@@ -1903,6 +1903,65 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
-       return ERR_PTR(error);
- }
-+static struct folio *shmem_swap_alloc_folio(struct inode *inode,
-+              struct vm_area_struct *vma, pgoff_t index,
-+              swp_entry_t entry, int order, gfp_t gfp)
-+{
-+      struct shmem_inode_info *info = SHMEM_I(inode);
-+      struct folio *new;
-+      void *shadow;
-+      int nr_pages;
-+
-+      /*
-+       * We have arrived here because our zones are constrained, so don't
-+       * limit chance of success with further cpuset and node constraints.
-+       */
-+      gfp &= ~GFP_CONSTRAINT_MASK;
-+      if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && order > 0) {
-+              gfp_t huge_gfp = vma_thp_gfp_mask(vma);
-+
-+              gfp = limit_gfp_mask(huge_gfp, gfp);
-+      }
-+
-+      new = shmem_alloc_folio(gfp, order, info, index);
-+      if (!new)
-+              return ERR_PTR(-ENOMEM);
-+
-+      nr_pages = folio_nr_pages(new);
-+      if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL,
-+                                         gfp, entry)) {
-+              folio_put(new);
-+              return ERR_PTR(-ENOMEM);
-+      }
-+
-+      /*
-+       * Prevent parallel swapin from proceeding with the swap cache flag.
-+       *
-+       * Of course there is another possible concurrent scenario as well,
-+       * that is to say, the swap cache flag of a large folio has already
-+       * been set by swapcache_prepare(), while another thread may have
-+       * already split the large swap entry stored in the shmem mapping.
-+       * In this case, shmem_add_to_page_cache() will help identify the
-+       * concurrent swapin and return -EEXIST.
-+       */
-+      if (swapcache_prepare(entry, nr_pages)) {
-+              folio_put(new);
-+              return ERR_PTR(-EEXIST);
-+      }
-+
-+      __folio_set_locked(new);
-+      __folio_set_swapbacked(new);
-+      new->swap = entry;
-+
-+      mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
-+      shadow = get_shadow_from_swap_cache(entry);
-+      if (shadow)
-+              workingset_refault(new, shadow);
-+      folio_add_lru(new);
-+      swap_read_folio(new, NULL);
-+      return new;
-+}
-+
- /*
-  * When a page is moved from swapcache to shmem filecache (either by the
-  * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
-@@ -2006,7 +2065,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
- }
- static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
--                                       struct folio *folio, swp_entry_t swap)
-+                                       struct folio *folio, swp_entry_t swap,
-+                                       bool skip_swapcache)
- {
-       struct address_space *mapping = inode->i_mapping;
-       swp_entry_t swapin_error;
-@@ -2022,7 +2082,8 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
-       nr_pages = folio_nr_pages(folio);
-       folio_wait_writeback(folio);
--      delete_from_swap_cache(folio);
-+      if (!skip_swapcache)
-+              delete_from_swap_cache(folio);
-       /*
-        * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
-        * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
-@@ -2126,6 +2187,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       struct shmem_inode_info *info = SHMEM_I(inode);
-       struct swap_info_struct *si;
-       struct folio *folio = NULL;
-+      bool skip_swapcache = false;
-       swp_entry_t swap;
-       int error, nr_pages;
-@@ -2147,6 +2209,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       /* Look it up and read it in.. */
-       folio = swap_cache_get_folio(swap, NULL, 0);
-       if (!folio) {
-+              int order = xa_get_order(&mapping->i_pages, index);
-+              bool fallback_order0 = false;
-               int split_order;
-               /* Or update major stats only when swapin succeeds?? */
-@@ -2156,6 +2220,33 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-                       count_memcg_event_mm(fault_mm, PGMAJFAULT);
-               }
-+              /*
-+               * If uffd is active for the vma, we need per-page fault
-+               * fidelity to maintain the uffd semantics, then fallback
-+               * to swapin order-0 folio, as well as for zswap case.
-+               */
-+              if (order > 0 && ((vma && unlikely(userfaultfd_armed(vma))) ||
-+                                !zswap_never_enabled()))
-+                      fallback_order0 = true;
-+
-+              /* Skip swapcache for synchronous device. */
-+              if (!fallback_order0 && data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
-+                      folio = shmem_swap_alloc_folio(inode, vma, index, swap, order, gfp);
-+                      if (!IS_ERR(folio)) {
-+                              skip_swapcache = true;
-+                              goto alloced;
-+                      }
-+
-+                      /*
-+                       * Fallback to swapin order-0 folio unless the swap entry
-+                       * already exists.
-+                       */
-+                      error = PTR_ERR(folio);
-+                      folio = NULL;
-+                      if (error == -EEXIST)
-+                              goto failed;
-+              }
-+
-               /*
-                * Now swap device can only swap in order 0 folio, then we
-                * should split the large swap entry stored in the pagecache
-@@ -2186,9 +2277,10 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-               }
-       }
-+alloced:
-       /* We have to do this with folio locked to prevent races */
-       folio_lock(folio);
--      if (!folio_test_swapcache(folio) ||
-+      if ((!skip_swapcache && !folio_test_swapcache(folio)) ||
-           folio->swap.val != swap.val ||
-           !shmem_confirm_swap(mapping, index, swap)) {
-               error = -EEXIST;
-@@ -2224,7 +2316,12 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       if (sgp == SGP_WRITE)
-               folio_mark_accessed(folio);
--      delete_from_swap_cache(folio);
-+      if (skip_swapcache) {
-+              folio->swap.val = 0;
-+              swapcache_clear(si, swap, nr_pages);
-+      } else {
-+              delete_from_swap_cache(folio);
-+      }
-       folio_mark_dirty(folio);
-       swap_free_nr(swap, nr_pages);
-       put_swap_device(si);
-@@ -2235,8 +2332,11 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
-       if (!shmem_confirm_swap(mapping, index, swap))
-               error = -EEXIST;
-       if (error == -EIO)
--              shmem_set_folio_swapin_error(inode, index, folio, swap);
-+              shmem_set_folio_swapin_error(inode, index, folio, swap,
-+                                           skip_swapcache);
- unlock:
-+      if (skip_swapcache)
-+              swapcache_clear(si, swap, folio_nr_pages(folio));
-       if (folio) {
-               folio_unlock(folio);
-               folio_put(folio);
--- 
-2.39.5
-
index 7306d5c8ca7fe12ee9ecf02a17ca802aa6945f5e..0397be04709bc3d791a3baaaeeef51ac10ccd5f0 100644 (file)
@@ -1,11 +1,3 @@
-kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch
-kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch
-kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch
-kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch
-kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch
-kvm-arm64-refactor-exit-handlers.patch
-kvm-arm64-mark-some-header-functions-as-inline.patch
-kvm-arm64-eagerly-switch-zcr_el-1-2.patch
 mm-fix-kernel-bug-when-userfaultfd_move-encounters-swapcache.patch
 userfaultfd-fix-pte-unmapping-stack-allocated-pte-copies.patch
 mm-slab-kvfree_rcu-switch-to-wq_mem_reclaim-wq.patch
@@ -237,8 +229,6 @@ x86-vmware-parse-mp-tables-for-sev-snp-enabled-guest.patch
 i2c-ali1535-fix-an-error-handling-path-in-ali1535_pr.patch
 i2c-ali15x3-fix-an-error-handling-path-in-ali15x3_pr.patch
 i2c-sis630-fix-an-error-handling-path-in-sis630_prob.patch
-mm-shmem-skip-swapcache-for-swapin-of-synchronous-sw.patch
-mm-shmem-fix-potential-data-corruption-during-shmem-.patch
 mm-hugetlb-wait-for-hugetlb-folios-to-be-freed.patch
 smb3-add-support-for-iakerb.patch
 smb-client-fix-match_session-bug-preventing-session-.patch