]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Add kvm_extable for vaxorcism code
authorJames Morse <james.morse@arm.com>
Fri, 21 Aug 2020 14:07:05 +0000 (15:07 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 5 Sep 2020 09:24:02 +0000 (11:24 +0200)
commit e9ee186bb735bfc17fa81dbc9aebf268aee5b41e upstream.

KVM has a one instruction window where it will allow an SError exception
to be consumed by the hypervisor without treating it as a hypervisor bug.
This is used to consume asynchronous external abort that were caused by
the guest.

As we are about to add another location that survives unexpected exceptions,
generalise this code to make it behave like the host's extable.

KVM's version has to be mapped to EL2 to be accessible on nVHE systems.

The SError vaxorcism code is a one instruction window, so has two entries
in the extable. Because the KVM code is copied for VHE and nVHE, we end up
with four entries, half of which correspond with code that isn't mapped.

Signed-off-by: James Morse <james.morse@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/include/asm/kvm_asm.h
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/hyp/entry.S
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/switch.c

index 352aaebf419803794e13dbb3eecbecc99e01dba1..7f09543b7c9a133eed06fc059d9d8b2c61133443 100644 (file)
@@ -146,6 +146,21 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
        kern_hyp_va     \vcpu
 .endm
 
+/*
+ * KVM extable for unexpected exceptions.
+ * In the same format _asm_extable, but output to a different section so that
+ * it can be mapped to EL2. The KVM version is not sorted. The caller must
+ * ensure:
+ * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
+ * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
+ */
+.macro _kvm_extable, from, to
+       .pushsection    __kvm_ex_table, "a"
+       .align          3
+       .long           (\from - .), (\to - .)
+       .popsection
+.endm
+
 #endif
 
 #endif /* __ARM_KVM_ASM_H__ */
index 5423ffe0a987602f45bf15026e229bf5f5ae698e..1417a9042d1359987f44fc7668d4de4584ded55c 100644 (file)
@@ -21,6 +21,13 @@ ENTRY(_text)
 
 jiffies = jiffies_64;
 
+
+#define HYPERVISOR_EXTABLE                                     \
+       . = ALIGN(SZ_8);                                        \
+       __start___kvm_ex_table = .;                             \
+       *(__kvm_ex_table)                                       \
+       __stop___kvm_ex_table = .;
+
 #define HYPERVISOR_TEXT                                        \
        /*                                              \
         * Align to 4 KB so that                        \
@@ -36,6 +43,7 @@ jiffies = jiffies_64;
        __hyp_idmap_text_end = .;                       \
        __hyp_text_start = .;                           \
        *(.hyp.text)                                    \
+       HYPERVISOR_EXTABLE                              \
        __hyp_text_end = .;
 
 #define IDMAP_TEXT                                     \
index 90186cf6473e0223ab965b24d5df0d1f7e677a5f..c2e6da3564082a9ec2e2274494d9e3bc43133a1d 100644 (file)
@@ -198,20 +198,23 @@ alternative_endif
        // This is our single instruction exception window. A pending
        // SError is guaranteed to occur at the earliest when we unmask
        // it, and at the latest just after the ISB.
-       .global abort_guest_exit_start
 abort_guest_exit_start:
 
        isb
 
-       .global abort_guest_exit_end
 abort_guest_exit_end:
 
        msr     daifset, #4     // Mask aborts
+       ret
+
+       _kvm_extable    abort_guest_exit_start, 9997f
+       _kvm_extable    abort_guest_exit_end, 9997f
+9997:
+       msr     daifset, #4     // Mask aborts
+       mov     x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
 
-       // If the exception took place, restore the EL1 exception
-       // context so that we can report some information.
-       // Merge the exception code with the SError pending bit.
-       tbz     x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
+       // restore the EL1 exception context so that we can report some
+       // information. Merge the exception code with the SError pending bit.
        msr     elr_el2, x2
        msr     esr_el2, x3
        msr     spsr_el2, x4
index 9c5cfb04170ee531c2f3e3fd80a8ab77f6fe4174..ca2e34063e59df747a51755e47eee814055cea7e 100644 (file)
 #include <asm/kvm_mmu.h>
 #include <asm/mmu.h>
 
+.macro save_caller_saved_regs_vect
+       /* x0 and x1 were saved in the vector entry */
+       stp     x2, x3,   [sp, #-16]!
+       stp     x4, x5,   [sp, #-16]!
+       stp     x6, x7,   [sp, #-16]!
+       stp     x8, x9,   [sp, #-16]!
+       stp     x10, x11, [sp, #-16]!
+       stp     x12, x13, [sp, #-16]!
+       stp     x14, x15, [sp, #-16]!
+       stp     x16, x17, [sp, #-16]!
+.endm
+
+.macro restore_caller_saved_regs_vect
+       ldp     x16, x17, [sp], #16
+       ldp     x14, x15, [sp], #16
+       ldp     x12, x13, [sp], #16
+       ldp     x10, x11, [sp], #16
+       ldp     x8, x9,   [sp], #16
+       ldp     x6, x7,   [sp], #16
+       ldp     x4, x5,   [sp], #16
+       ldp     x2, x3,   [sp], #16
+       ldp     x0, x1,   [sp], #16
+.endm
+
        .text
        .pushsection    .hyp.text, "ax"
 
@@ -156,27 +180,14 @@ el2_sync:
 
 
 el2_error:
-       ldp     x0, x1, [sp], #16
+       save_caller_saved_regs_vect
+       stp     x29, x30, [sp, #-16]!
+
+       bl      kvm_unexpected_el2_exception
+
+       ldp     x29, x30, [sp], #16
+       restore_caller_saved_regs_vect
 
-       /*
-        * Only two possibilities:
-        * 1) Either we come from the exit path, having just unmasked
-        *    PSTATE.A: change the return code to an EL2 fault, and
-        *    carry on, as we're already in a sane state to handle it.
-        * 2) Or we come from anywhere else, and that's a bug: we panic.
-        *
-        * For (1), x0 contains the original return code and x1 doesn't
-        * contain anything meaningful at that stage. We can reuse them
-        * as temp registers.
-        * For (2), who cares?
-        */
-       mrs     x0, elr_el2
-       adr     x1, abort_guest_exit_start
-       cmp     x0, x1
-       adr     x1, abort_guest_exit_end
-       ccmp    x0, x1, #4, ne
-       b.ne    __hyp_panic
-       mov     x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
        eret
        sb
 
index 9270b14157b55b14f0a0a8765acc905e2b97f864..ad44e4cee886bfa214a71bbe7bd18bebd600faf9 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <asm/barrier.h>
 #include <asm/cpufeature.h>
+#include <asm/extable.h>
 #include <asm/kprobes.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
@@ -24,6 +25,9 @@
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 
+extern struct exception_table_entry __start___kvm_ex_table;
+extern struct exception_table_entry __stop___kvm_ex_table;
+
 /* Check whether the FP regs were dirtied while in the host-side run loop: */
 static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
 {
@@ -934,3 +938,30 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
 
        unreachable();
 }
+
+asmlinkage void __hyp_text kvm_unexpected_el2_exception(void)
+{
+       unsigned long addr, fixup;
+       struct kvm_cpu_context *host_ctxt;
+       struct exception_table_entry *entry, *end;
+       unsigned long elr_el2 = read_sysreg(elr_el2);
+
+       entry = hyp_symbol_addr(__start___kvm_ex_table);
+       end = hyp_symbol_addr(__stop___kvm_ex_table);
+       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+
+       while (entry < end) {
+               addr = (unsigned long)&entry->insn + entry->insn;
+               fixup = (unsigned long)&entry->fixup + entry->fixup;
+
+               if (addr != elr_el2) {
+                       entry++;
+                       continue;
+               }
+
+               write_sysreg(fixup, elr_el2);
+               return;
+       }
+
+       hyp_panic(host_ctxt);
+}