1 From foo@baz Wed Feb 14 14:44:54 CET 2018
2 From: Will Deacon <will.deacon@arm.com>
3 Date: Wed, 3 Jan 2018 12:46:21 +0000
4 Subject: [Variant 2/Spectre-v2] arm64: Implement branch predictor hardening for affected Cortex-A CPUs
6 From: Will Deacon <will.deacon@arm.com>
9 Commit aa6acde65e03 upstream.
11 Cortex-A57, A72, A73 and A75 are susceptible to branch predictor aliasing
12 and can theoretically be attacked by malicious code.
14 This patch implements a PSCI-based mitigation for these CPUs when available.
15 The call into firmware will invalidate the branch predictor state, preventing
16 any malicious entries from affecting other victim contexts.
18 Co-developed-by: Marc Zyngier <marc.zyngier@arm.com>
19 Signed-off-by: Will Deacon <will.deacon@arm.com>
20 Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
21 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
22 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
24 arch/arm64/kernel/bpi.S | 24 +++++++++++++++++++++++
25 arch/arm64/kernel/cpu_errata.c | 42 +++++++++++++++++++++++++++++++++++++++++
26 2 files changed, 66 insertions(+)
28 --- a/arch/arm64/kernel/bpi.S
29 +++ b/arch/arm64/kernel/bpi.S
30 @@ -53,3 +53,27 @@ ENTRY(__bp_harden_hyp_vecs_start)
31 vectors __kvm_hyp_vector
33 ENTRY(__bp_harden_hyp_vecs_end)
34 +ENTRY(__psci_hyp_bp_inval_start)
35 + sub sp, sp, #(8 * 18)
36 + stp x16, x17, [sp, #(16 * 0)]
37 + stp x14, x15, [sp, #(16 * 1)]
38 + stp x12, x13, [sp, #(16 * 2)]
39 + stp x10, x11, [sp, #(16 * 3)]
40 + stp x8, x9, [sp, #(16 * 4)]
41 + stp x6, x7, [sp, #(16 * 5)]
42 + stp x4, x5, [sp, #(16 * 6)]
43 + stp x2, x3, [sp, #(16 * 7)]
44 + stp x0, x1, [sp, #(16 * 8)]
47 + ldp x16, x17, [sp, #(16 * 0)]
48 + ldp x14, x15, [sp, #(16 * 1)]
49 + ldp x12, x13, [sp, #(16 * 2)]
50 + ldp x10, x11, [sp, #(16 * 3)]
51 + ldp x8, x9, [sp, #(16 * 4)]
52 + ldp x6, x7, [sp, #(16 * 5)]
53 + ldp x4, x5, [sp, #(16 * 6)]
54 + ldp x2, x3, [sp, #(16 * 7)]
55 + ldp x0, x1, [sp, #(16 * 8)]
56 + add sp, sp, #(8 * 18)
57 +ENTRY(__psci_hyp_bp_inval_end)
58 --- a/arch/arm64/kernel/cpu_errata.c
59 +++ b/arch/arm64/kernel/cpu_errata.c
60 @@ -67,6 +67,8 @@ static int cpu_enable_trap_ctr_access(vo
61 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
64 +extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
66 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
67 const char *hyp_vecs_end)
69 @@ -108,6 +110,9 @@ static void __install_bp_hardening_cb(bp
70 spin_unlock(&bp_lock);
73 +#define __psci_hyp_bp_inval_start NULL
74 +#define __psci_hyp_bp_inval_end NULL
76 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
77 const char *hyp_vecs_start,
78 const char *hyp_vecs_end)
79 @@ -132,6 +137,21 @@ static void install_bp_hardening_cb(con
81 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
84 +#include <linux/psci.h>
86 +static int enable_psci_bp_hardening(void *data)
88 + const struct arm64_cpu_capabilities *entry = data;
90 + if (psci_ops.get_version)
91 + install_bp_hardening_cb(entry,
92 + (bp_hardening_cb_t)psci_ops.get_version,
93 + __psci_hyp_bp_inval_start,
94 + __psci_hyp_bp_inval_end);
98 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
100 #define MIDR_RANGE(model, min, max) \
101 @@ -282,6 +302,28 @@ const struct arm64_cpu_capabilities arm6
102 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
105 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
107 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
108 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
109 + .enable = enable_psci_bp_hardening,
112 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
113 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
114 + .enable = enable_psci_bp_hardening,
117 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
118 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
119 + .enable = enable_psci_bp_hardening,
122 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
123 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
124 + .enable = enable_psci_bp_hardening,