--- /dev/null
+From 7d9e8f71b989230bc613d121ca38507d34ada849 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 18 Jan 2017 17:23:41 +0000
+Subject: arm64: avoid returning from bad_mode
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 7d9e8f71b989230bc613d121ca38507d34ada849 upstream.
+
+Generally, taking an unexpected exception should be a fatal event, and
+bad_mode is intended to cater for this. However, it should be possible
+to contain unexpected synchronous exceptions from EL0 without bringing
+the kernel down, by sending a SIGILL to the task.
+
+We tried to apply this approach in commit 9955ac47f4ba1c95 ("arm64:
+don't kill the kernel on a bad esr from el0"), by sending a signal for
+any bad_mode call resulting from an EL0 exception.
+
+However, this also applies to other unexpected exceptions, such as
+SError and FIQ. The entry paths for these exceptions branch to bad_mode
+without configuring the link register, and have no kernel_exit. Thus, if
+we take one of these exceptions from EL0, bad_mode will eventually
+return to the original user link register value.
+
+This patch fixes this by introducing a new bad_el0_sync handler to cater
+for the recoverable case, and restoring bad_mode to its original state,
+whereby it calls panic() and never returns. The recoverable case
+branches to bad_el0_sync with a bl, and returns to userspace via the
+usual ret_to_user mechanism.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Fixes: 9955ac47f4ba1c95 ("arm64: don't kill the kernel on a bad esr from el0")
+Reported-by: Mark Salter <msalter@redhat.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/entry.S | 2 +-
+ arch/arm64/kernel/traps.c | 28 ++++++++++++++++++++++++----
+ 2 files changed, 25 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -562,7 +562,7 @@ el0_inv:
+ mov x0, sp
+ mov x1, #BAD_SYNC
+ mov x2, x25
+- bl bad_mode
++ bl bad_el0_sync
+ b ret_to_user
+ ENDPROC(el0_sync)
+
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -434,16 +434,33 @@ const char *esr_get_class_string(u32 esr
+ }
+
+ /*
+- * bad_mode handles the impossible case in the exception vector.
++ * bad_mode handles the impossible case in the exception vector. This is always
++ * fatal.
+ */
+ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
+ {
+- siginfo_t info;
+- void __user *pc = (void __user *)instruction_pointer(regs);
+ console_verbose();
+
+ pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
+ handler[reason], esr, esr_get_class_string(esr));
++
++ die("Oops - bad mode", regs, 0);
++ local_irq_disable();
++ panic("bad mode");
++}
++
++/*
++ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
++ * exceptions taken from EL0. Unlike bad_mode, this returns.
++ */
++asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
++{
++ siginfo_t info;
++ void __user *pc = (void __user *)instruction_pointer(regs);
++ console_verbose();
++
++ pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
++ smp_processor_id(), esr, esr_get_class_string(esr));
+ __show_regs(regs);
+
+ info.si_signo = SIGILL;
+@@ -451,7 +468,10 @@ asmlinkage void bad_mode(struct pt_regs
+ info.si_code = ILL_ILLOPC;
+ info.si_addr = pc;
+
+- arm64_notify_die("Oops - bad mode", regs, &info, 0);
++ current->thread.fault_address = 0;
++ current->thread.fault_code = 0;
++
++ force_sig_info(info.si_signo, &info, current);
+ }
+
+ void __pte_error(const char *file, int line, unsigned long val)