--- /dev/null
+From ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Wed, 3 Jun 2020 18:24:01 +0100
+Subject: KVM: arm64: Save the host's PtrAuth keys in non-preemptible context
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f upstream.
+
+When using the PtrAuth feature in a guest, we need to save the host's
+keys before allowing the guest to program them. For that, we dump
+them in a per-CPU data structure (the so called host context).
+
+But both call sites that do this are in preemptible context,
+which may end up in disaster should the vcpu thread get preempted
+before reentering the guest.
+
+Instead, save the keys eagerly on each vcpu_load(). This has an
+increased overhead, but is at least safe.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/kvm_emulate.h | 6 ------
+ arch/arm64/kvm/handle_exit.c | 19 ++-----------------
+ virt/kvm/arm/arm.c | 18 +++++++++++++++++-
+ 3 files changed, 19 insertions(+), 24 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(
+ vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
+ }
+
+-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
+-{
+- if (vcpu_has_ptrauth(vcpu))
+- vcpu_ptrauth_disable(vcpu);
+-}
+-
+ static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
+ {
+ return vcpu->arch.vsesr_el2;
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *v
+ return 1;
+ }
+
+-#define __ptrauth_save_key(regs, key) \
+-({ \
+- regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
+- regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
+-})
+-
+ /*
+ * Handle the guest trying to use a ptrauth instruction, or trying to access a
+ * ptrauth register.
+ */
+ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
+ {
+- struct kvm_cpu_context *ctxt;
+-
+- if (vcpu_has_ptrauth(vcpu)) {
++ if (vcpu_has_ptrauth(vcpu))
+ vcpu_ptrauth_enable(vcpu);
+- ctxt = vcpu->arch.host_cpu_context;
+- __ptrauth_save_key(ctxt->sys_regs, APIA);
+- __ptrauth_save_key(ctxt->sys_regs, APIB);
+- __ptrauth_save_key(ctxt->sys_regs, APDA);
+- __ptrauth_save_key(ctxt->sys_regs, APDB);
+- __ptrauth_save_key(ctxt->sys_regs, APGA);
+- } else {
++ else
+ kvm_inject_undefined(vcpu);
+- }
+ }
+
+ /*
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -332,6 +332,12 @@ void kvm_arch_vcpu_unblocking(struct kvm
+ preempt_enable();
+ }
+
++#define __ptrauth_save_key(regs, key) \
++({ \
++ regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
++ regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
++})
++
+ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ {
+ int *last_ran;
+@@ -365,7 +371,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu
+ else
+ vcpu_set_wfx_traps(vcpu);
+
+- vcpu_ptrauth_setup_lazy(vcpu);
++ if (vcpu_has_ptrauth(vcpu)) {
++ struct kvm_cpu_context *ctxt = vcpu->arch.host_cpu_context;
++
++ __ptrauth_save_key(ctxt->sys_regs, APIA);
++ __ptrauth_save_key(ctxt->sys_regs, APIB);
++ __ptrauth_save_key(ctxt->sys_regs, APDA);
++ __ptrauth_save_key(ctxt->sys_regs, APDB);
++ __ptrauth_save_key(ctxt->sys_regs, APGA);
++
++ vcpu_ptrauth_disable(vcpu);
++ }
+ }
+
+ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
--- /dev/null
+From 0370964dd3ff7d3d406f292cb443a927952cbd05 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Tue, 9 Jun 2020 08:50:29 +0100
+Subject: KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 0370964dd3ff7d3d406f292cb443a927952cbd05 upstream.
+
+On a VHE system, the EL1 state is left in the CPU most of the time,
+and only syncronized back to memory when vcpu_put() is called (most
+of the time on preemption).
+
+Which means that when injecting an exception, we'd better have a way
+to either:
+(1) write directly to the EL1 sysregs
+(2) synchronize the state back to memory, and do the changes there
+
+For an AArch64, we already do (1), so we are safe. Unfortunately,
+doing the same thing for AArch32 would be pretty invasive. Instead,
+we can easily implement (2) by calling the put/load architectural
+backends, and keep preemption disabled. We can then reload the
+state back into EL1.
+
+Cc: stable@vger.kernel.org
+Reported-by: James Morse <james.morse@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/aarch32.c | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+--- a/virt/kvm/arm/aarch32.c
++++ b/virt/kvm/arm/aarch32.c
+@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = {
+ [7] = { 4, 4 }, /* FIQ, unused */
+ };
+
++static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
++{
++ preempt_disable();
++ if (vcpu->arch.sysregs_loaded_on_cpu) {
++ kvm_arch_vcpu_put(vcpu);
++ return true;
++ }
++
++ preempt_enable();
++ return false;
++}
++
++static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
++{
++ if (loaded) {
++ kvm_arch_vcpu_load(vcpu, smp_processor_id());
++ preempt_enable();
++ }
++}
++
+ /*
+ * When an exception is taken, most CPSR fields are left unchanged in the
+ * handler. However, some are explicitly overridden (e.g. M[4:0]).
+@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_v
+
+ void kvm_inject_undef32(struct kvm_vcpu *vcpu)
+ {
++ bool loaded = pre_fault_synchronize(vcpu);
++
+ prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
++ post_fault_synchronize(vcpu, loaded);
+ }
+
+ /*
+@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu
+ u32 vect_offset;
+ u32 *far, *fsr;
+ bool is_lpae;
++ bool loaded;
++
++ loaded = pre_fault_synchronize(vcpu);
+
+ if (is_pabt) {
+ vect_offset = 12;
+@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu
+ /* no need to shuffle FS[4] into DFSR[10] as its 0 */
+ *fsr = DFSR_FSC_EXTABT_nLPAE;
+ }
++
++ post_fault_synchronize(vcpu, loaded);
+ }
+
+ void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)