]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.14.20/arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch
Linux 4.14.119
[thirdparty/kernel/stable-queue.git] / releases / 4.14.20 / arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch
1 From foo@baz Wed Feb 14 14:44:54 CET 2018
2 From: Will Deacon <will.deacon@arm.com>
3 Date: Wed, 3 Jan 2018 12:46:21 +0000
4 Subject: [Variant 2/Spectre-v2] arm64: Implement branch predictor hardening for affected Cortex-A CPUs
5
6 From: Will Deacon <will.deacon@arm.com>
7
8
9 Commit aa6acde65e03 upstream.
10
11 Cortex-A57, A72, A73 and A75 are susceptible to branch predictor aliasing
12 and can theoretically be attacked by malicious code.
13
14 This patch implements a PSCI-based mitigation for these CPUs when available.
15 The call into firmware will invalidate the branch predictor state, preventing
16 any malicious entries from affecting other victim contexts.
17
18 Co-developed-by: Marc Zyngier <marc.zyngier@arm.com>
19 Signed-off-by: Will Deacon <will.deacon@arm.com>
20 Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
21 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
22 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
23 ---
24 arch/arm64/kernel/bpi.S | 24 +++++++++++++++++++++++
25 arch/arm64/kernel/cpu_errata.c | 42 +++++++++++++++++++++++++++++++++++++++++
26 2 files changed, 66 insertions(+)
27
28 --- a/arch/arm64/kernel/bpi.S
29 +++ b/arch/arm64/kernel/bpi.S
30 @@ -53,3 +53,27 @@ ENTRY(__bp_harden_hyp_vecs_start)
31 vectors __kvm_hyp_vector
32 .endr
33 ENTRY(__bp_harden_hyp_vecs_end)
34 +ENTRY(__psci_hyp_bp_inval_start)
35 + sub sp, sp, #(8 * 18)
36 + stp x16, x17, [sp, #(16 * 0)]
37 + stp x14, x15, [sp, #(16 * 1)]
38 + stp x12, x13, [sp, #(16 * 2)]
39 + stp x10, x11, [sp, #(16 * 3)]
40 + stp x8, x9, [sp, #(16 * 4)]
41 + stp x6, x7, [sp, #(16 * 5)]
42 + stp x4, x5, [sp, #(16 * 6)]
43 + stp x2, x3, [sp, #(16 * 7)]
44 + stp x0, x1, [sp, #(16 * 8)]
45 + mov x0, #0x84000000
46 + smc #0
47 + ldp x16, x17, [sp, #(16 * 0)]
48 + ldp x14, x15, [sp, #(16 * 1)]
49 + ldp x12, x13, [sp, #(16 * 2)]
50 + ldp x10, x11, [sp, #(16 * 3)]
51 + ldp x8, x9, [sp, #(16 * 4)]
52 + ldp x6, x7, [sp, #(16 * 5)]
53 + ldp x4, x5, [sp, #(16 * 6)]
54 + ldp x2, x3, [sp, #(16 * 7)]
55 + ldp x0, x1, [sp, #(16 * 8)]
56 + add sp, sp, #(8 * 18)
57 +ENTRY(__psci_hyp_bp_inval_end)
58 --- a/arch/arm64/kernel/cpu_errata.c
59 +++ b/arch/arm64/kernel/cpu_errata.c
60 @@ -67,6 +67,8 @@ static int cpu_enable_trap_ctr_access(vo
61 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
62
63 #ifdef CONFIG_KVM
64 +extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
65 +
66 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
67 const char *hyp_vecs_end)
68 {
69 @@ -108,6 +110,9 @@ static void __install_bp_hardening_cb(bp
70 spin_unlock(&bp_lock);
71 }
72 #else
73 +#define __psci_hyp_bp_inval_start NULL
74 +#define __psci_hyp_bp_inval_end NULL
75 +
76 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
77 const char *hyp_vecs_start,
78 const char *hyp_vecs_end)
79 @@ -132,6 +137,21 @@ static void install_bp_hardening_cb(con
80
81 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
82 }
83 +
84 +#include <linux/psci.h>
85 +
86 +static int enable_psci_bp_hardening(void *data)
87 +{
88 + const struct arm64_cpu_capabilities *entry = data;
89 +
90 + if (psci_ops.get_version)
91 + install_bp_hardening_cb(entry,
92 + (bp_hardening_cb_t)psci_ops.get_version,
93 + __psci_hyp_bp_inval_start,
94 + __psci_hyp_bp_inval_end);
95 +
96 + return 0;
97 +}
98 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
99
100 #define MIDR_RANGE(model, min, max) \
101 @@ -282,6 +302,28 @@ const struct arm64_cpu_capabilities arm6
102 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
103 },
104 #endif
105 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
106 + {
107 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
108 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
109 + .enable = enable_psci_bp_hardening,
110 + },
111 + {
112 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
113 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
114 + .enable = enable_psci_bp_hardening,
115 + },
116 + {
117 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
118 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
119 + .enable = enable_psci_bp_hardening,
120 + },
121 + {
122 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
123 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
124 + .enable = enable_psci_bp_hardening,
125 + },
126 +#endif
127 {
128 }
129 };