]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop the kvm patches from older kernels, I went to fast...
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 16 Jun 2020 13:25:37 +0000 (15:25 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 16 Jun 2020 13:25:37 +0000 (15:25 +0200)
queue-4.19/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch [deleted file]
queue-4.19/series
queue-5.4/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch [deleted file]
queue-5.4/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch [deleted file]
queue-5.4/series
queue-5.6/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch [deleted file]
queue-5.6/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch [deleted file]
queue-5.6/series

diff --git a/queue-4.19/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch b/queue-4.19/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch
deleted file mode 100644 (file)
index e086445..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-From 0370964dd3ff7d3d406f292cb443a927952cbd05 Mon Sep 17 00:00:00 2001
-From: Marc Zyngier <maz@kernel.org>
-Date: Tue, 9 Jun 2020 08:50:29 +0100
-Subject: KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception
-
-From: Marc Zyngier <maz@kernel.org>
-
-commit 0370964dd3ff7d3d406f292cb443a927952cbd05 upstream.
-
-On a VHE system, the EL1 state is left in the CPU most of the time,
-and only syncronized back to memory when vcpu_put() is called (most
-of the time on preemption).
-
-Which means that when injecting an exception, we'd better have a way
-to either:
-(1) write directly to the EL1 sysregs
-(2) synchronize the state back to memory, and do the changes there
-
-For an AArch64, we already do (1), so we are safe. Unfortunately,
-doing the same thing for AArch32 would be pretty invasive. Instead,
-we can easily implement (2) by calling the put/load architectural
-backends, and keep preemption disabled. We can then reload the
-state back into EL1.
-
-Cc: stable@vger.kernel.org
-Reported-by: James Morse <james.morse@arm.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- virt/kvm/arm/aarch32.c |   28 ++++++++++++++++++++++++++++
- 1 file changed, 28 insertions(+)
-
---- a/virt/kvm/arm/aarch32.c
-+++ b/virt/kvm/arm/aarch32.c
-@@ -44,6 +44,26 @@ static const u8 return_offsets[8][2] = {
-       [7] = { 4, 4 },         /* FIQ, unused */
- };
-+static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
-+{
-+      preempt_disable();
-+      if (vcpu->arch.sysregs_loaded_on_cpu) {
-+              kvm_arch_vcpu_put(vcpu);
-+              return true;
-+      }
-+
-+      preempt_enable();
-+      return false;
-+}
-+
-+static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
-+{
-+      if (loaded) {
-+              kvm_arch_vcpu_load(vcpu, smp_processor_id());
-+              preempt_enable();
-+      }
-+}
-+
- /*
-  * When an exception is taken, most CPSR fields are left unchanged in the
-  * handler. However, some are explicitly overridden (e.g. M[4:0]).
-@@ -166,7 +186,10 @@ static void prepare_fault32(struct kvm_v
- void kvm_inject_undef32(struct kvm_vcpu *vcpu)
- {
-+      bool loaded = pre_fault_synchronize(vcpu);
-+
-       prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
-+      post_fault_synchronize(vcpu, loaded);
- }
- /*
-@@ -179,6 +202,9 @@ static void inject_abt32(struct kvm_vcpu
-       u32 vect_offset;
-       u32 *far, *fsr;
-       bool is_lpae;
-+      bool loaded;
-+
-+      loaded = pre_fault_synchronize(vcpu);
-       if (is_pabt) {
-               vect_offset = 12;
-@@ -202,6 +228,8 @@ static void inject_abt32(struct kvm_vcpu
-               /* no need to shuffle FS[4] into DFSR[10] as its 0 */
-               *fsr = DFSR_FSC_EXTABT_nLPAE;
-       }
-+
-+      post_fault_synchronize(vcpu, loaded);
- }
- void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
index 18e741246003450da8f702b79d66ae1e94a112ed..d43acab5cbc67b9fd01df5c357591934d77b4123 100644 (file)
@@ -89,4 +89,3 @@ mmc-sdhci-msm-clear-tuning-done-flag-while-hs400-tuning.patch
 arm-dts-at91-sama5d2_ptc_ek-fix-sdmmc0-node-description.patch
 mmc-sdio-fix-potential-null-pointer-error-in-mmc_sdio_init_card.patch
 xen-pvcalls-back-test-for-errors-when-calling-backend_connect.patch
-kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch
diff --git a/queue-5.4/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch b/queue-5.4/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch
deleted file mode 100644 (file)
index 5d89434..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-From ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f Mon Sep 17 00:00:00 2001
-From: Marc Zyngier <maz@kernel.org>
-Date: Wed, 3 Jun 2020 18:24:01 +0100
-Subject: KVM: arm64: Save the host's PtrAuth keys in non-preemptible context
-
-From: Marc Zyngier <maz@kernel.org>
-
-commit ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f upstream.
-
-When using the PtrAuth feature in a guest, we need to save the host's
-keys before allowing the guest to program them. For that, we dump
-them in a per-CPU data structure (the so called host context).
-
-But both call sites that do this are in preemptible context,
-which may end up in disaster should the vcpu thread get preempted
-before reentering the guest.
-
-Instead, save the keys eagerly on each vcpu_load(). This has an
-increased overhead, but is at least safe.
-
-Cc: stable@vger.kernel.org
-Reviewed-by: Mark Rutland <mark.rutland@arm.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/arm64/include/asm/kvm_emulate.h |    6 ------
- arch/arm64/kvm/handle_exit.c         |   19 ++-----------------
- virt/kvm/arm/arm.c                   |   18 +++++++++++++++++-
- 3 files changed, 19 insertions(+), 24 deletions(-)
-
---- a/arch/arm64/include/asm/kvm_emulate.h
-+++ b/arch/arm64/include/asm/kvm_emulate.h
-@@ -97,12 +97,6 @@ static inline void vcpu_ptrauth_disable(
-       vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
- }
--static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
--{
--      if (vcpu_has_ptrauth(vcpu))
--              vcpu_ptrauth_disable(vcpu);
--}
--
- static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
- {
-       return vcpu->arch.vsesr_el2;
---- a/arch/arm64/kvm/handle_exit.c
-+++ b/arch/arm64/kvm/handle_exit.c
-@@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *v
-       return 1;
- }
--#define __ptrauth_save_key(regs, key)                                         \
--({                                                                            \
--      regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);       \
--      regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);       \
--})
--
- /*
-  * Handle the guest trying to use a ptrauth instruction, or trying to access a
-  * ptrauth register.
-  */
- void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
- {
--      struct kvm_cpu_context *ctxt;
--
--      if (vcpu_has_ptrauth(vcpu)) {
-+      if (vcpu_has_ptrauth(vcpu))
-               vcpu_ptrauth_enable(vcpu);
--              ctxt = vcpu->arch.host_cpu_context;
--              __ptrauth_save_key(ctxt->sys_regs, APIA);
--              __ptrauth_save_key(ctxt->sys_regs, APIB);
--              __ptrauth_save_key(ctxt->sys_regs, APDA);
--              __ptrauth_save_key(ctxt->sys_regs, APDB);
--              __ptrauth_save_key(ctxt->sys_regs, APGA);
--      } else {
-+      else
-               kvm_inject_undefined(vcpu);
--      }
- }
- /*
---- a/virt/kvm/arm/arm.c
-+++ b/virt/kvm/arm/arm.c
-@@ -354,6 +354,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *
-       return kvm_vgic_vcpu_init(vcpu);
- }
-+#define __ptrauth_save_key(regs, key)                                         \
-+({                                                                            \
-+      regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);       \
-+      regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);       \
-+})
-+
- void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
- {
-       int *last_ran;
-@@ -386,7 +392,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu
-       else
-               vcpu_set_wfe_traps(vcpu);
--      vcpu_ptrauth_setup_lazy(vcpu);
-+      if (vcpu_has_ptrauth(vcpu)) {
-+              struct kvm_cpu_context *ctxt = vcpu->arch.host_cpu_context;
-+
-+              __ptrauth_save_key(ctxt->sys_regs, APIA);
-+              __ptrauth_save_key(ctxt->sys_regs, APIB);
-+              __ptrauth_save_key(ctxt->sys_regs, APDA);
-+              __ptrauth_save_key(ctxt->sys_regs, APDB);
-+              __ptrauth_save_key(ctxt->sys_regs, APGA);
-+
-+              vcpu_ptrauth_disable(vcpu);
-+      }
- }
- void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
diff --git a/queue-5.4/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch b/queue-5.4/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch
deleted file mode 100644 (file)
index 3fba683..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-From 0370964dd3ff7d3d406f292cb443a927952cbd05 Mon Sep 17 00:00:00 2001
-From: Marc Zyngier <maz@kernel.org>
-Date: Tue, 9 Jun 2020 08:50:29 +0100
-Subject: KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception
-
-From: Marc Zyngier <maz@kernel.org>
-
-commit 0370964dd3ff7d3d406f292cb443a927952cbd05 upstream.
-
-On a VHE system, the EL1 state is left in the CPU most of the time,
-and only syncronized back to memory when vcpu_put() is called (most
-of the time on preemption).
-
-Which means that when injecting an exception, we'd better have a way
-to either:
-(1) write directly to the EL1 sysregs
-(2) synchronize the state back to memory, and do the changes there
-
-For an AArch64, we already do (1), so we are safe. Unfortunately,
-doing the same thing for AArch32 would be pretty invasive. Instead,
-we can easily implement (2) by calling the put/load architectural
-backends, and keep preemption disabled. We can then reload the
-state back into EL1.
-
-Cc: stable@vger.kernel.org
-Reported-by: James Morse <james.morse@arm.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- virt/kvm/arm/aarch32.c |   28 ++++++++++++++++++++++++++++
- 1 file changed, 28 insertions(+)
-
---- a/virt/kvm/arm/aarch32.c
-+++ b/virt/kvm/arm/aarch32.c
-@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = {
-       [7] = { 4, 4 },         /* FIQ, unused */
- };
-+static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
-+{
-+      preempt_disable();
-+      if (vcpu->arch.sysregs_loaded_on_cpu) {
-+              kvm_arch_vcpu_put(vcpu);
-+              return true;
-+      }
-+
-+      preempt_enable();
-+      return false;
-+}
-+
-+static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
-+{
-+      if (loaded) {
-+              kvm_arch_vcpu_load(vcpu, smp_processor_id());
-+              preempt_enable();
-+      }
-+}
-+
- /*
-  * When an exception is taken, most CPSR fields are left unchanged in the
-  * handler. However, some are explicitly overridden (e.g. M[4:0]).
-@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_v
- void kvm_inject_undef32(struct kvm_vcpu *vcpu)
- {
-+      bool loaded = pre_fault_synchronize(vcpu);
-+
-       prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
-+      post_fault_synchronize(vcpu, loaded);
- }
- /*
-@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu
-       u32 vect_offset;
-       u32 *far, *fsr;
-       bool is_lpae;
-+      bool loaded;
-+
-+      loaded = pre_fault_synchronize(vcpu);
-       if (is_pabt) {
-               vect_offset = 12;
-@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu
-               /* no need to shuffle FS[4] into DFSR[10] as its 0 */
-               *fsr = DFSR_FSC_EXTABT_nLPAE;
-       }
-+
-+      post_fault_synchronize(vcpu, loaded);
- }
- void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
index 5952feb9dc83fbaf23628926817150fe9fc2bf3d..857f1bad4ef9a8ec9df5e01f31e9d896a25f5037 100644 (file)
@@ -130,5 +130,3 @@ mmc-sdio-fix-potential-null-pointer-error-in-mmc_sdio_init_card.patch
 mmc-sdio-fix-several-potential-memory-leaks-in-mmc_sdio_init_card.patch
 block-floppy-fix-contended-case-in-floppy_queue_rq.patch
 xen-pvcalls-back-test-for-errors-when-calling-backend_connect.patch
-kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch
-kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch
diff --git a/queue-5.6/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch b/queue-5.6/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch
deleted file mode 100644 (file)
index d0cd6a3..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-From ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f Mon Sep 17 00:00:00 2001
-From: Marc Zyngier <maz@kernel.org>
-Date: Wed, 3 Jun 2020 18:24:01 +0100
-Subject: KVM: arm64: Save the host's PtrAuth keys in non-preemptible context
-
-From: Marc Zyngier <maz@kernel.org>
-
-commit ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f upstream.
-
-When using the PtrAuth feature in a guest, we need to save the host's
-keys before allowing the guest to program them. For that, we dump
-them in a per-CPU data structure (the so called host context).
-
-But both call sites that do this are in preemptible context,
-which may end up in disaster should the vcpu thread get preempted
-before reentering the guest.
-
-Instead, save the keys eagerly on each vcpu_load(). This has an
-increased overhead, but is at least safe.
-
-Cc: stable@vger.kernel.org
-Reviewed-by: Mark Rutland <mark.rutland@arm.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/arm64/include/asm/kvm_emulate.h |    6 ------
- arch/arm64/kvm/handle_exit.c         |   19 ++-----------------
- virt/kvm/arm/arm.c                   |   18 +++++++++++++++++-
- 3 files changed, 19 insertions(+), 24 deletions(-)
-
---- a/arch/arm64/include/asm/kvm_emulate.h
-+++ b/arch/arm64/include/asm/kvm_emulate.h
-@@ -111,12 +111,6 @@ static inline void vcpu_ptrauth_disable(
-       vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
- }
--static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
--{
--      if (vcpu_has_ptrauth(vcpu))
--              vcpu_ptrauth_disable(vcpu);
--}
--
- static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
- {
-       return vcpu->arch.vsesr_el2;
---- a/arch/arm64/kvm/handle_exit.c
-+++ b/arch/arm64/kvm/handle_exit.c
-@@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *v
-       return 1;
- }
--#define __ptrauth_save_key(regs, key)                                         \
--({                                                                            \
--      regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);       \
--      regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);       \
--})
--
- /*
-  * Handle the guest trying to use a ptrauth instruction, or trying to access a
-  * ptrauth register.
-  */
- void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
- {
--      struct kvm_cpu_context *ctxt;
--
--      if (vcpu_has_ptrauth(vcpu)) {
-+      if (vcpu_has_ptrauth(vcpu))
-               vcpu_ptrauth_enable(vcpu);
--              ctxt = vcpu->arch.host_cpu_context;
--              __ptrauth_save_key(ctxt->sys_regs, APIA);
--              __ptrauth_save_key(ctxt->sys_regs, APIB);
--              __ptrauth_save_key(ctxt->sys_regs, APDA);
--              __ptrauth_save_key(ctxt->sys_regs, APDB);
--              __ptrauth_save_key(ctxt->sys_regs, APGA);
--      } else {
-+      else
-               kvm_inject_undefined(vcpu);
--      }
- }
- /*
---- a/virt/kvm/arm/arm.c
-+++ b/virt/kvm/arm/arm.c
-@@ -332,6 +332,12 @@ void kvm_arch_vcpu_unblocking(struct kvm
-       preempt_enable();
- }
-+#define __ptrauth_save_key(regs, key)                                         \
-+({                                                                            \
-+      regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);       \
-+      regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);       \
-+})
-+
- void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
- {
-       int *last_ran;
-@@ -365,7 +371,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu
-       else
-               vcpu_set_wfx_traps(vcpu);
--      vcpu_ptrauth_setup_lazy(vcpu);
-+      if (vcpu_has_ptrauth(vcpu)) {
-+              struct kvm_cpu_context *ctxt = vcpu->arch.host_cpu_context;
-+
-+              __ptrauth_save_key(ctxt->sys_regs, APIA);
-+              __ptrauth_save_key(ctxt->sys_regs, APIB);
-+              __ptrauth_save_key(ctxt->sys_regs, APDA);
-+              __ptrauth_save_key(ctxt->sys_regs, APDB);
-+              __ptrauth_save_key(ctxt->sys_regs, APGA);
-+
-+              vcpu_ptrauth_disable(vcpu);
-+      }
- }
- void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
diff --git a/queue-5.6/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch b/queue-5.6/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch
deleted file mode 100644 (file)
index 3fba683..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-From 0370964dd3ff7d3d406f292cb443a927952cbd05 Mon Sep 17 00:00:00 2001
-From: Marc Zyngier <maz@kernel.org>
-Date: Tue, 9 Jun 2020 08:50:29 +0100
-Subject: KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception
-
-From: Marc Zyngier <maz@kernel.org>
-
-commit 0370964dd3ff7d3d406f292cb443a927952cbd05 upstream.
-
-On a VHE system, the EL1 state is left in the CPU most of the time,
-and only syncronized back to memory when vcpu_put() is called (most
-of the time on preemption).
-
-Which means that when injecting an exception, we'd better have a way
-to either:
-(1) write directly to the EL1 sysregs
-(2) synchronize the state back to memory, and do the changes there
-
-For an AArch64, we already do (1), so we are safe. Unfortunately,
-doing the same thing for AArch32 would be pretty invasive. Instead,
-we can easily implement (2) by calling the put/load architectural
-backends, and keep preemption disabled. We can then reload the
-state back into EL1.
-
-Cc: stable@vger.kernel.org
-Reported-by: James Morse <james.morse@arm.com>
-Signed-off-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- virt/kvm/arm/aarch32.c |   28 ++++++++++++++++++++++++++++
- 1 file changed, 28 insertions(+)
-
---- a/virt/kvm/arm/aarch32.c
-+++ b/virt/kvm/arm/aarch32.c
-@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = {
-       [7] = { 4, 4 },         /* FIQ, unused */
- };
-+static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
-+{
-+      preempt_disable();
-+      if (vcpu->arch.sysregs_loaded_on_cpu) {
-+              kvm_arch_vcpu_put(vcpu);
-+              return true;
-+      }
-+
-+      preempt_enable();
-+      return false;
-+}
-+
-+static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
-+{
-+      if (loaded) {
-+              kvm_arch_vcpu_load(vcpu, smp_processor_id());
-+              preempt_enable();
-+      }
-+}
-+
- /*
-  * When an exception is taken, most CPSR fields are left unchanged in the
-  * handler. However, some are explicitly overridden (e.g. M[4:0]).
-@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_v
- void kvm_inject_undef32(struct kvm_vcpu *vcpu)
- {
-+      bool loaded = pre_fault_synchronize(vcpu);
-+
-       prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
-+      post_fault_synchronize(vcpu, loaded);
- }
- /*
-@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu
-       u32 vect_offset;
-       u32 *far, *fsr;
-       bool is_lpae;
-+      bool loaded;
-+
-+      loaded = pre_fault_synchronize(vcpu);
-       if (is_pabt) {
-               vect_offset = 12;
-@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu
-               /* no need to shuffle FS[4] into DFSR[10] as its 0 */
-               *fsr = DFSR_FSC_EXTABT_nLPAE;
-       }
-+
-+      post_fault_synchronize(vcpu, loaded);
- }
- void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
index c17920beb8ff233533265f939ad6c07b31a11cbf..319416ad58899d5f3fd629f68b649017e50bf870 100644 (file)
@@ -157,5 +157,3 @@ block-floppy-fix-contended-case-in-floppy_queue_rq.patch
 xen-pvcalls-back-test-for-errors-when-calling-backend_connect.patch
 platform-x86-sony-laptop-snc-calls-should-handle-buffer-types.patch
 platform-x86-sony-laptop-make-resuming-thermal-profile-safer.patch
-kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch
-kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch