]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 16 Jun 2020 15:28:18 +0000 (17:28 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 16 Jun 2020 15:28:18 +0000 (17:28 +0200)
added patches:
kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch
kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch

queue-5.4/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch [new file with mode: 0644]
queue-5.4/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch b/queue-5.4/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch
new file mode 100644 (file)
index 0000000..bce0c9f
--- /dev/null
@@ -0,0 +1,133 @@
+From ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Wed, 3 Jun 2020 18:24:01 +0100
+Subject: KVM: arm64: Save the host's PtrAuth keys in non-preemptible context
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f upstream.
+
+When using the PtrAuth feature in a guest, we need to save the host's
+keys before allowing the guest to program them. For that, we dump
+them in a per-CPU data structure (the so called host context).
+
+But both call sites that do this are in preemptible context,
+which may end up in disaster should the vcpu thread get preempted
+before reentering the guest.
+
+Instead, save the keys eagerly on each vcpu_load(). This has an
+increased overhead, but is at least safe.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/arm/include/asm/kvm_emulate.h   |    3 ++-
+ arch/arm64/include/asm/kvm_emulate.h |    6 ------
+ arch/arm64/kvm/handle_exit.c         |   19 ++-----------------
+ virt/kvm/arm/arm.c                   |   22 +++++++++++++++++++++-
+ 4 files changed, 25 insertions(+), 25 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_emulate.h
++++ b/arch/arm/include/asm/kvm_emulate.h
+@@ -363,6 +363,7 @@ static inline unsigned long vcpu_data_ho
+       }
+ }
+-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
++static inline bool vcpu_has_ptrauth(struct kvm_vcpu *vcpu) { return false; }
++static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) { }
+ #endif /* __ARM_KVM_EMULATE_H__ */
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -97,12 +97,6 @@ static inline void vcpu_ptrauth_disable(
+       vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
+ }
+-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
+-{
+-      if (vcpu_has_ptrauth(vcpu))
+-              vcpu_ptrauth_disable(vcpu);
+-}
+-
+ static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
+ {
+       return vcpu->arch.vsesr_el2;
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *v
+       return 1;
+ }
+-#define __ptrauth_save_key(regs, key)                                         \
+-({                                                                            \
+-      regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);       \
+-      regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);       \
+-})
+-
+ /*
+  * Handle the guest trying to use a ptrauth instruction, or trying to access a
+  * ptrauth register.
+  */
+ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
+ {
+-      struct kvm_cpu_context *ctxt;
+-
+-      if (vcpu_has_ptrauth(vcpu)) {
++      if (vcpu_has_ptrauth(vcpu))
+               vcpu_ptrauth_enable(vcpu);
+-              ctxt = vcpu->arch.host_cpu_context;
+-              __ptrauth_save_key(ctxt->sys_regs, APIA);
+-              __ptrauth_save_key(ctxt->sys_regs, APIB);
+-              __ptrauth_save_key(ctxt->sys_regs, APDA);
+-              __ptrauth_save_key(ctxt->sys_regs, APDB);
+-              __ptrauth_save_key(ctxt->sys_regs, APGA);
+-      } else {
++      else
+               kvm_inject_undefined(vcpu);
+-      }
+ }
+ /*
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -354,6 +354,16 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *
+       return kvm_vgic_vcpu_init(vcpu);
+ }
++#ifdef CONFIG_ARM64
++#define __ptrauth_save_key(regs, key)                                         \
++({                                                                            \
++      regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);       \
++      regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);       \
++})
++#else
++#define  __ptrauth_save_key(regs, key)        do { } while (0)
++#endif
++
+ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ {
+       int *last_ran;
+@@ -386,7 +396,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu
+       else
+               vcpu_set_wfe_traps(vcpu);
+-      vcpu_ptrauth_setup_lazy(vcpu);
++      if (vcpu_has_ptrauth(vcpu)) {
++              struct kvm_cpu_context __maybe_unused *ctxt = vcpu->arch.host_cpu_context;
++
++              __ptrauth_save_key(ctxt->sys_regs, APIA);
++              __ptrauth_save_key(ctxt->sys_regs, APIB);
++              __ptrauth_save_key(ctxt->sys_regs, APDA);
++              __ptrauth_save_key(ctxt->sys_regs, APDB);
++              __ptrauth_save_key(ctxt->sys_regs, APGA);
++
++              vcpu_ptrauth_disable(vcpu);
++      }
+ }
+ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
diff --git a/queue-5.4/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch b/queue-5.4/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch
new file mode 100644 (file)
index 0000000..c3e9bce
--- /dev/null
@@ -0,0 +1,112 @@
+From 0370964dd3ff7d3d406f292cb443a927952cbd05 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Tue, 9 Jun 2020 08:50:29 +0100
+Subject: KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 0370964dd3ff7d3d406f292cb443a927952cbd05 upstream.
+
+On a VHE system, the EL1 state is left in the CPU most of the time,
+and only syncronized back to memory when vcpu_put() is called (most
+of the time on preemption).
+
+Which means that when injecting an exception, we'd better have a way
+to either:
+(1) write directly to the EL1 sysregs
+(2) synchronize the state back to memory, and do the changes there
+
+For an AArch64, we already do (1), so we are safe. Unfortunately,
+doing the same thing for AArch32 would be pretty invasive. Instead,
+we can easily implement (2) by calling the put/load architectural
+backends, and keep preemption disabled. We can then reload the
+state back into EL1.
+
+Cc: stable@vger.kernel.org
+Reported-by: James Morse <james.morse@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_host.h   |    2 ++
+ arch/arm64/include/asm/kvm_host.h |    2 ++
+ virt/kvm/arm/aarch32.c            |   28 ++++++++++++++++++++++++++++
+ 3 files changed, 32 insertions(+)
+
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -421,4 +421,6 @@ static inline bool kvm_arm_vcpu_is_final
+       return true;
+ }
++#define kvm_arm_vcpu_loaded(vcpu)     (false)
++
+ #endif /* __ARM_KVM_HOST_H__ */
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -679,4 +679,6 @@ bool kvm_arm_vcpu_is_finalized(struct kv
+ #define kvm_arm_vcpu_sve_finalized(vcpu) \
+       ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
++#define kvm_arm_vcpu_loaded(vcpu)     ((vcpu)->arch.sysregs_loaded_on_cpu)
++
+ #endif /* __ARM64_KVM_HOST_H__ */
+--- a/virt/kvm/arm/aarch32.c
++++ b/virt/kvm/arm/aarch32.c
+@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = {
+       [7] = { 4, 4 },         /* FIQ, unused */
+ };
++static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
++{
++      preempt_disable();
++      if (kvm_arm_vcpu_loaded(vcpu)) {
++              kvm_arch_vcpu_put(vcpu);
++              return true;
++      }
++
++      preempt_enable();
++      return false;
++}
++
++static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
++{
++      if (loaded) {
++              kvm_arch_vcpu_load(vcpu, smp_processor_id());
++              preempt_enable();
++      }
++}
++
+ /*
+  * When an exception is taken, most CPSR fields are left unchanged in the
+  * handler. However, some are explicitly overridden (e.g. M[4:0]).
+@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_v
+ void kvm_inject_undef32(struct kvm_vcpu *vcpu)
+ {
++      bool loaded = pre_fault_synchronize(vcpu);
++
+       prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
++      post_fault_synchronize(vcpu, loaded);
+ }
+ /*
+@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu
+       u32 vect_offset;
+       u32 *far, *fsr;
+       bool is_lpae;
++      bool loaded;
++
++      loaded = pre_fault_synchronize(vcpu);
+       if (is_pabt) {
+               vect_offset = 12;
+@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu
+               /* no need to shuffle FS[4] into DFSR[10] as its 0 */
+               *fsr = DFSR_FSC_EXTABT_nLPAE;
+       }
++
++      post_fault_synchronize(vcpu, loaded);
+ }
+ void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
index 857f1bad4ef9a8ec9df5e01f31e9d896a25f5037..8bf93916d9ff716ceff7dfa0002ab0d3089023ce 100644 (file)
@@ -130,3 +130,5 @@ mmc-sdio-fix-potential-null-pointer-error-in-mmc_sdio_init_card.patch
 mmc-sdio-fix-several-potential-memory-leaks-in-mmc_sdio_init_card.patch
 block-floppy-fix-contended-case-in-floppy_queue_rq.patch
 xen-pvcalls-back-test-for-errors-when-calling-backend_connect.patch
+kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch
+kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch