]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
arm64: errata: Add workaround for Cortex-A76 erratum #1463225
authorWill Deacon <will.deacon@arm.com>
Mon, 29 Apr 2019 12:03:57 +0000 (13:03 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 31 May 2019 13:44:47 +0000 (06:44 -0700)
commit 969f5ea627570e91c9d54403287ee3ed657f58fe upstream.

Revisions of the Cortex-A76 CPU prior to r4p0 are affected by an erratum
that can prevent interrupts from being taken when single-stepping.

This patch implements a software workaround to prevent userspace from
effectively being able to disable interrupts.

Cc: <stable@vger.kernel.org>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Documentation/arm64/silicon-errata.txt
arch/arm64/Kconfig
arch/arm64/include/asm/cpucaps.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/syscall.c
arch/arm64/mm/fault.c

index ddb8ce5333ba9c11ddc25c0f9c8bb3f39682b7e3..7a7e271be3f11b365d3a23913795d8dd21efbaae 100644 (file)
@@ -61,6 +61,7 @@ stable kernels.
 | ARM            | Cortex-A76      | #1188873        | ARM64_ERRATUM_1188873       |
 | ARM            | Cortex-A76      | #1165522        | ARM64_ERRATUM_1165522       |
 | ARM            | Cortex-A76      | #1286807        | ARM64_ERRATUM_1286807       |
+| ARM            | Cortex-A76      | #1463225        | ARM64_ERRATUM_1463225       |
 | ARM            | MMU-500         | #841119,#826419 | N/A                         |
 |                |                 |                 |                             |
 | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375        |
index a4168d36612772a7668ee0a7d9e83afef40a729d..4535b2b48fd9e4fd0de241c2379944c257aca5e5 100644 (file)
@@ -518,6 +518,24 @@ config ARM64_ERRATUM_1286807
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_1463225
+       bool "Cortex-A76: Software Step might prevent interrupt recognition"
+       default y
+       help
+         This option adds a workaround for Arm Cortex-A76 erratum 1463225.
+
+         On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping
+         of a system call instruction (SVC) can prevent recognition of
+         subsequent interrupts when software stepping is disabled in the
+         exception handler of the system call and either kernel debugging
+         is enabled or VHE is in use.
+
+         Work around the erratum by triggering a dummy step exception
+         when handling a system call from a task that is being stepped
+         in a VHE configuration of the kernel.
+
+         If unsure, say Y.
+
 config CAVIUM_ERRATUM_22375
        bool "Cavium erratum 22375, 24313"
        default y
index 82e9099834ae3679791dd7e7434e1987a169b418..99db8de8373478d6c75ea557cc9fe4d35499ec33 100644 (file)
@@ -60,7 +60,8 @@
 #define ARM64_HAS_ADDRESS_AUTH_IMP_DEF         39
 #define ARM64_HAS_GENERIC_AUTH_ARCH            40
 #define ARM64_HAS_GENERIC_AUTH_IMP_DEF         41
+#define ARM64_WORKAROUND_1463225               42
 
-#define ARM64_NCAPS                            42
+#define ARM64_NCAPS                            43
 
 #endif /* __ASM_CPUCAPS_H */
index 9950bb0cbd52167c6b3b76e9d122291d70264df4..87019cd73f220be3e6ee76f5e540185480657a16 100644 (file)
@@ -464,6 +464,22 @@ out_printmsg:
 }
 #endif /* CONFIG_ARM64_SSBD */
 
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static bool
+has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
+                              int scope)
+{
+       u32 midr = read_cpuid_id();
+       /* Cortex-A76 r0p0 - r3p1 */
+       struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
+
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+       return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
+}
+#endif
+
 static void __maybe_unused
 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
 {
@@ -738,6 +754,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .capability = ARM64_WORKAROUND_1165522,
                ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
        },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+       {
+               .desc = "ARM erratum 1463225",
+               .capability = ARM64_WORKAROUND_1463225,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = has_cortex_a76_erratum_1463225,
+       },
 #endif
        {
        }
index 5610ac01c1ec0212fe5bb4082c58b5e1f9f4dcb2..871c739f060acab87cd98f2d679e75bccd8bfa89 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/syscalls.h>
 
 #include <asm/daifflags.h>
+#include <asm/debug-monitors.h>
 #include <asm/fpsimd.h>
 #include <asm/syscall.h>
 #include <asm/thread_info.h>
@@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
 int syscall_trace_enter(struct pt_regs *regs);
 void syscall_trace_exit(struct pt_regs *regs);
 
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static void cortex_a76_erratum_1463225_svc_handler(void)
+{
+       u32 reg, val;
+
+       if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
+               return;
+
+       if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
+               return;
+
+       __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
+       reg = read_sysreg(mdscr_el1);
+       val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
+       write_sysreg(val, mdscr_el1);
+       asm volatile("msr daifclr, #8");
+       isb();
+
+       /* We will have taken a single-step exception by this point */
+
+       write_sysreg(reg, mdscr_el1);
+       __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
+}
+#else
+static void cortex_a76_erratum_1463225_svc_handler(void) { }
+#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+
 static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
                           const syscall_fn_t syscall_table[])
 {
@@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
        regs->orig_x0 = regs->regs[0];
        regs->syscallno = scno;
 
+       cortex_a76_erratum_1463225_svc_handler();
        local_daif_restore(DAIF_PROCCTX);
        user_exit();
 
index ef46925096f0f2b58ea08a976e159352a756c3b2..d3bdef0b2f60170a4628633aad6b373d8c68fb8e 100644 (file)
@@ -824,14 +824,47 @@ void __init hook_debug_fault_code(int nr,
        debug_fault_info[nr].name       = name;
 }
 
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static int __exception
+cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+{
+       if (user_mode(regs))
+               return 0;
+
+       if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
+               return 0;
+
+       /*
+        * We've taken a dummy step exception from the kernel to ensure
+        * that interrupts are re-enabled on the syscall path. Return back
+        * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
+        * masked so that we can safely restore the mdscr and get on with
+        * handling the syscall.
+        */
+       regs->pstate |= PSR_D_BIT;
+       return 1;
+}
+#else
+static int __exception
+cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+{
+       return 0;
+}
+#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+
 asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
-                                             unsigned int esr,
-                                             struct pt_regs *regs)
+                                              unsigned int esr,
+                                              struct pt_regs *regs)
 {
        const struct fault_info *inf = esr_to_debug_fault_info(esr);
        unsigned long pc = instruction_pointer(regs);
        int rv;
 
+       if (cortex_a76_erratum_1463225_debug_handler(regs))
+               return 0;
+
        /*
         * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
         * already disabled to preserve the last enabled/disabled addresses.