]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 28 Sep 2020 17:55:36 +0000 (19:55 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 28 Sep 2020 17:55:36 +0000 (19:55 +0200)
added patches:
kvm-arm64-assume-write-fault-on-s1ptw-permission-fault-on-instruction-fetch.patch

queue-4.19/kvm-arm64-assume-write-fault-on-s1ptw-permission-fault-on-instruction-fetch.patch [new file with mode: 0644]
queue-4.19/series

diff --git a/queue-4.19/kvm-arm64-assume-write-fault-on-s1ptw-permission-fault-on-instruction-fetch.patch b/queue-4.19/kvm-arm64-assume-write-fault-on-s1ptw-permission-fault-on-instruction-fetch.patch
new file mode 100644 (file)
index 0000000..9b22cd1
--- /dev/null
@@ -0,0 +1,164 @@
+From c4ad98e4b72cb5be30ea282fce935248f2300e62 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Tue, 15 Sep 2020 11:42:17 +0100
+Subject: KVM: arm64: Assume write fault on S1PTW permission fault on instruction fetch
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit c4ad98e4b72cb5be30ea282fce935248f2300e62 upstream.
+
+KVM currently assumes that an instruction abort can never be a write.
+This is in general true, except when the abort is triggered by
+a S1PTW on instruction fetch that tries to update the S1 page tables
+(to set AF, for example).
+
+This can happen if the page tables have been paged out and brought
+back in without seeing a direct write to them (they are thus marked
+read only), and the fault handling code will make the PT executable(!)
+instead of writable. The guest gets stuck forever.
+
+In these conditions, the permission fault must be considered as
+a write so that the Stage-1 update can take place. This is essentially
+the I-side equivalent of the problem fixed by 60e21a0ef54c ("arm64: KVM:
+Take S1 walks into account when determining S2 write faults").
+
+Update kvm_is_write_fault() to return true on IABT+S1PTW, and introduce
+kvm_vcpu_trap_is_exec_fault() that only return true when no faulting
+on a S1 fault. Additionally, kvm_vcpu_dabt_iss1tw() is renamed to
+kvm_vcpu_abt_iss1tw(), as the above makes it plain that it isn't
+specific to data abort.
+
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Will Deacon <will@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200915104218.1284701-2-maz@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/arm/include/asm/kvm_emulate.h   |   11 ++++++++---
+ arch/arm64/include/asm/kvm_emulate.h |   21 ++++++++++++++++++---
+ arch/arm64/kvm/hyp/switch.c          |    2 +-
+ virt/kvm/arm/mmio.c                  |    2 +-
+ virt/kvm/arm/mmu.c                   |    2 +-
+ 5 files changed, 29 insertions(+), 9 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_emulate.h
++++ b/arch/arm/include/asm/kvm_emulate.h
+@@ -216,7 +216,7 @@ static inline int kvm_vcpu_dabt_get_rd(s
+       return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+ }
+-static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
++static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
+ {
+       return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
+ }
+@@ -248,16 +248,21 @@ static inline bool kvm_vcpu_trap_il_is32
+       return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
+ }
+-static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
++static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
+ {
+       return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
+ }
+-static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
++static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
+ {
+       return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
+ }
++static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
++{
++      return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
++}
++
+ static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
+ {
+       return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -303,7 +303,7 @@ static inline int kvm_vcpu_dabt_get_rd(c
+       return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
+ }
+-static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
++static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
+ {
+       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
+ }
+@@ -311,7 +311,7 @@ static inline bool kvm_vcpu_dabt_iss1tw(
+ static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+ {
+       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
+-              kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
++              kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */
+ }
+ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
+@@ -340,6 +340,11 @@ static inline bool kvm_vcpu_trap_is_iabt
+       return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
+ }
++static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
++{
++      return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
++}
++
+ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
+ {
+       return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
+@@ -372,7 +377,17 @@ static inline bool kvm_vcpu_dabt_isextab
+ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
+ {
+       u32 esr = kvm_vcpu_get_hsr(vcpu);
+-      return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
++}
++
++static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
++{
++      if (kvm_vcpu_abt_iss1tw(vcpu))
++              return true;
++
++      if (kvm_vcpu_trap_is_iabt(vcpu))
++              return false;
++
++      return kvm_vcpu_dabt_iswrite(vcpu);
+ }
+ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -430,7 +430,7 @@ static bool __hyp_text fixup_guest_exit(
+                       kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
+                       kvm_vcpu_dabt_isvalid(vcpu) &&
+                       !kvm_vcpu_dabt_isextabt(vcpu) &&
+-                      !kvm_vcpu_dabt_iss1tw(vcpu);
++                      !kvm_vcpu_abt_iss1tw(vcpu);
+               if (valid) {
+                       int ret = __vgic_v2_perform_cpuif_access(vcpu);
+--- a/virt/kvm/arm/mmio.c
++++ b/virt/kvm/arm/mmio.c
+@@ -142,7 +142,7 @@ static int decode_hsr(struct kvm_vcpu *v
+       bool sign_extend;
+       bool sixty_four;
+-      if (kvm_vcpu_dabt_iss1tw(vcpu)) {
++      if (kvm_vcpu_abt_iss1tw(vcpu)) {
+               /* page table accesses IO mem: tell guest to fix its TTBR */
+               kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+               return 1;
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1496,7 +1496,7 @@ static int user_mem_abort(struct kvm_vcp
+       unsigned long flags = 0;
+       write_fault = kvm_is_write_fault(vcpu);
+-      exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
++      exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
+       VM_BUG_ON(write_fault && exec_fault);
+       if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
index 4d86656f34f67cbf8054a8e29d321dd65bb5a663..8eaee54adc627b2d80807b90ac1047ca430a6ed7 100644 (file)
@@ -241,3 +241,4 @@ s390-dasd-fix-zero-write-for-fba-devices.patch
 kprobes-fix-to-check-probe-enabled-before-disarm_kprobe_ftrace.patch
 mm-thp-swap-fix-allocating-cluster-for-swapfile-by-mistake.patch
 s390-zcrypt-fix-zcrypt_perdev_reqcnt-ioctl.patch
+kvm-arm64-assume-write-fault-on-s1ptw-permission-fault-on-instruction-fetch.patch