]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - pending-5.1/kvm-arm-arm64-move-cc-it-checks-under-hyp-s-makefile.patch
move existing queues out of the way for the moment...
[thirdparty/kernel/stable-queue.git] / pending-5.1 / kvm-arm-arm64-move-cc-it-checks-under-hyp-s-makefile.patch
CommitLineData
fb43722c
SL
1From 38ae241a4c470941c75d4cce01b0d01ff329c52e Mon Sep 17 00:00:00 2001
2From: James Morse <james.morse@arm.com>
3Date: Wed, 22 May 2019 18:47:05 +0100
4Subject: KVM: arm/arm64: Move cc/it checks under hyp's Makefile to avoid
5 instrumentation
6
7[ Upstream commit 623e1528d4090bd1abaf93ec46f047dee9a6fb32 ]
8
9KVM has helpers to handle the condition codes of trapped aarch32
10instructions. These are marked __hyp_text and used from HYP, but they
11aren't built by the 'hyp' Makefile, which has all the runes to avoid ASAN
12and KCOV instrumentation.
13
14Move this code to a new hyp/aarch32.c to avoid a hyp-panic when starting
15an aarch32 guest on a host built with the ASAN/KCOV debug options.
16
17Fixes: 021234ef3752f ("KVM: arm64: Make kvm_condition_valid32() accessible from EL2")
18Fixes: 8cebe750c4d9a ("arm64: KVM: Make kvm_skip_instr32 available to HYP")
19Signed-off-by: James Morse <james.morse@arm.com>
20Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
21Signed-off-by: Sasha Levin <sashal@kernel.org>
22---
23 arch/arm/kvm/hyp/Makefile | 1 +
24 arch/arm64/kvm/hyp/Makefile | 1 +
25 virt/kvm/arm/aarch32.c | 121 --------------------------------
26 virt/kvm/arm/hyp/aarch32.c | 136 ++++++++++++++++++++++++++++++++++++
27 4 files changed, 138 insertions(+), 121 deletions(-)
28 create mode 100644 virt/kvm/arm/hyp/aarch32.c
29
30diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
31index d2b5ec9c4b92..ba88b1eca93c 100644
32--- a/arch/arm/kvm/hyp/Makefile
33+++ b/arch/arm/kvm/hyp/Makefile
34@@ -11,6 +11,7 @@ CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
35
36 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
37 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
38+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
39
40 obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
41 obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
42diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
43index 82d1904328ad..ea710f674cb6 100644
44--- a/arch/arm64/kvm/hyp/Makefile
45+++ b/arch/arm64/kvm/hyp/Makefile
46@@ -10,6 +10,7 @@ KVM=../../../../virt/kvm
47
48 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
49 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
50+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
51
52 obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-cpuif-proxy.o
53 obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
54diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
55index 5abbe9b3c652..6880236974b8 100644
56--- a/virt/kvm/arm/aarch32.c
57+++ b/virt/kvm/arm/aarch32.c
58@@ -25,127 +25,6 @@
59 #include <asm/kvm_emulate.h>
60 #include <asm/kvm_hyp.h>
61
62-/*
63- * stolen from arch/arm/kernel/opcodes.c
64- *
65- * condition code lookup table
66- * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
67- *
68- * bit position in short is condition code: NZCV
69- */
70-static const unsigned short cc_map[16] = {
71- 0xF0F0, /* EQ == Z set */
72- 0x0F0F, /* NE */
73- 0xCCCC, /* CS == C set */
74- 0x3333, /* CC */
75- 0xFF00, /* MI == N set */
76- 0x00FF, /* PL */
77- 0xAAAA, /* VS == V set */
78- 0x5555, /* VC */
79- 0x0C0C, /* HI == C set && Z clear */
80- 0xF3F3, /* LS == C clear || Z set */
81- 0xAA55, /* GE == (N==V) */
82- 0x55AA, /* LT == (N!=V) */
83- 0x0A05, /* GT == (!Z && (N==V)) */
84- 0xF5FA, /* LE == (Z || (N!=V)) */
85- 0xFFFF, /* AL always */
86- 0 /* NV */
87-};
88-
89-/*
90- * Check if a trapped instruction should have been executed or not.
91- */
92-bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
93-{
94- unsigned long cpsr;
95- u32 cpsr_cond;
96- int cond;
97-
98- /* Top two bits non-zero? Unconditional. */
99- if (kvm_vcpu_get_hsr(vcpu) >> 30)
100- return true;
101-
102- /* Is condition field valid? */
103- cond = kvm_vcpu_get_condition(vcpu);
104- if (cond == 0xE)
105- return true;
106-
107- cpsr = *vcpu_cpsr(vcpu);
108-
109- if (cond < 0) {
110- /* This can happen in Thumb mode: examine IT state. */
111- unsigned long it;
112-
113- it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
114-
115- /* it == 0 => unconditional. */
116- if (it == 0)
117- return true;
118-
119- /* The cond for this insn works out as the top 4 bits. */
120- cond = (it >> 4);
121- }
122-
123- cpsr_cond = cpsr >> 28;
124-
125- if (!((cc_map[cond] >> cpsr_cond) & 1))
126- return false;
127-
128- return true;
129-}
130-
131-/**
132- * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
133- * @vcpu: The VCPU pointer
134- *
135- * When exceptions occur while instructions are executed in Thumb IF-THEN
136- * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
137- * to do this little bit of work manually. The fields map like this:
138- *
139- * IT[7:0] -> CPSR[26:25],CPSR[15:10]
140- */
141-static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
142-{
143- unsigned long itbits, cond;
144- unsigned long cpsr = *vcpu_cpsr(vcpu);
145- bool is_arm = !(cpsr & PSR_AA32_T_BIT);
146-
147- if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
148- return;
149-
150- cond = (cpsr & 0xe000) >> 13;
151- itbits = (cpsr & 0x1c00) >> (10 - 2);
152- itbits |= (cpsr & (0x3 << 25)) >> 25;
153-
154- /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
155- if ((itbits & 0x7) == 0)
156- itbits = cond = 0;
157- else
158- itbits = (itbits << 1) & 0x1f;
159-
160- cpsr &= ~PSR_AA32_IT_MASK;
161- cpsr |= cond << 13;
162- cpsr |= (itbits & 0x1c) << (10 - 2);
163- cpsr |= (itbits & 0x3) << 25;
164- *vcpu_cpsr(vcpu) = cpsr;
165-}
166-
167-/**
168- * kvm_skip_instr - skip a trapped instruction and proceed to the next
169- * @vcpu: The vcpu pointer
170- */
171-void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
172-{
173- bool is_thumb;
174-
175- is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
176- if (is_thumb && !is_wide_instr)
177- *vcpu_pc(vcpu) += 2;
178- else
179- *vcpu_pc(vcpu) += 4;
180- kvm_adjust_itstate(vcpu);
181-}
182-
183 /*
184 * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
185 */
186diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c
187new file mode 100644
188index 000000000000..d31f267961e7
189--- /dev/null
190+++ b/virt/kvm/arm/hyp/aarch32.c
191@@ -0,0 +1,136 @@
192+// SPDX-License-Identifier: GPL-2.0
193+/*
194+ * Hyp portion of the (not much of an) Emulation layer for 32bit guests.
195+ *
196+ * Copyright (C) 2012,2013 - ARM Ltd
197+ * Author: Marc Zyngier <marc.zyngier@arm.com>
198+ *
199+ * based on arch/arm/kvm/emulate.c
200+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
201+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
202+ */
203+
204+#include <linux/kvm_host.h>
205+#include <asm/kvm_emulate.h>
206+#include <asm/kvm_hyp.h>
207+
208+/*
209+ * stolen from arch/arm/kernel/opcodes.c
210+ *
211+ * condition code lookup table
212+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
213+ *
214+ * bit position in short is condition code: NZCV
215+ */
216+static const unsigned short cc_map[16] = {
217+ 0xF0F0, /* EQ == Z set */
218+ 0x0F0F, /* NE */
219+ 0xCCCC, /* CS == C set */
220+ 0x3333, /* CC */
221+ 0xFF00, /* MI == N set */
222+ 0x00FF, /* PL */
223+ 0xAAAA, /* VS == V set */
224+ 0x5555, /* VC */
225+ 0x0C0C, /* HI == C set && Z clear */
226+ 0xF3F3, /* LS == C clear || Z set */
227+ 0xAA55, /* GE == (N==V) */
228+ 0x55AA, /* LT == (N!=V) */
229+ 0x0A05, /* GT == (!Z && (N==V)) */
230+ 0xF5FA, /* LE == (Z || (N!=V)) */
231+ 0xFFFF, /* AL always */
232+ 0 /* NV */
233+};
234+
235+/*
236+ * Check if a trapped instruction should have been executed or not.
237+ */
238+bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
239+{
240+ unsigned long cpsr;
241+ u32 cpsr_cond;
242+ int cond;
243+
244+ /* Top two bits non-zero? Unconditional. */
245+ if (kvm_vcpu_get_hsr(vcpu) >> 30)
246+ return true;
247+
248+ /* Is condition field valid? */
249+ cond = kvm_vcpu_get_condition(vcpu);
250+ if (cond == 0xE)
251+ return true;
252+
253+ cpsr = *vcpu_cpsr(vcpu);
254+
255+ if (cond < 0) {
256+ /* This can happen in Thumb mode: examine IT state. */
257+ unsigned long it;
258+
259+ it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
260+
261+ /* it == 0 => unconditional. */
262+ if (it == 0)
263+ return true;
264+
265+ /* The cond for this insn works out as the top 4 bits. */
266+ cond = (it >> 4);
267+ }
268+
269+ cpsr_cond = cpsr >> 28;
270+
271+ if (!((cc_map[cond] >> cpsr_cond) & 1))
272+ return false;
273+
274+ return true;
275+}
276+
277+/**
278+ * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
279+ * @vcpu: The VCPU pointer
280+ *
281+ * When exceptions occur while instructions are executed in Thumb IF-THEN
282+ * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
283+ * to do this little bit of work manually. The fields map like this:
284+ *
285+ * IT[7:0] -> CPSR[26:25],CPSR[15:10]
286+ */
287+static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
288+{
289+ unsigned long itbits, cond;
290+ unsigned long cpsr = *vcpu_cpsr(vcpu);
291+ bool is_arm = !(cpsr & PSR_AA32_T_BIT);
292+
293+ if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
294+ return;
295+
296+ cond = (cpsr & 0xe000) >> 13;
297+ itbits = (cpsr & 0x1c00) >> (10 - 2);
298+ itbits |= (cpsr & (0x3 << 25)) >> 25;
299+
300+ /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
301+ if ((itbits & 0x7) == 0)
302+ itbits = cond = 0;
303+ else
304+ itbits = (itbits << 1) & 0x1f;
305+
306+ cpsr &= ~PSR_AA32_IT_MASK;
307+ cpsr |= cond << 13;
308+ cpsr |= (itbits & 0x1c) << (10 - 2);
309+ cpsr |= (itbits & 0x3) << 25;
310+ *vcpu_cpsr(vcpu) = cpsr;
311+}
312+
313+/**
314+ * kvm_skip_instr - skip a trapped instruction and proceed to the next
315+ * @vcpu: The vcpu pointer
316+ */
317+void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
318+{
319+ bool is_thumb;
320+
321+ is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
322+ if (is_thumb && !is_wide_instr)
323+ *vcpu_pc(vcpu) += 2;
324+ else
325+ *vcpu_pc(vcpu) += 4;
326+ kvm_adjust_itstate(vcpu);
327+}
328--
3292.20.1
330