1 From 4c37e3642dc8e0ecc0be4cee4eb636c1ca9441fc Mon Sep 17 00:00:00 2001
2 From: James Morse <james.morse@arm.com>
3 Date: Wed, 22 May 2019 18:47:05 +0100
4 Subject: KVM: arm/arm64: Move cc/it checks under hyp's Makefile to avoid
7 [ Upstream commit 623e1528d4090bd1abaf93ec46f047dee9a6fb32 ]
9 KVM has helpers to handle the condition codes of trapped aarch32
10 instructions. These are marked __hyp_text and used from HYP, but they
11 aren't built by the 'hyp' Makefile, which has all the runes to avoid ASAN
12 and KCOV instrumentation.
14 Move this code to a new hyp/aarch32.c to avoid a hyp-panic when starting
15 an aarch32 guest on a host built with the ASAN/KCOV debug options.
17 Fixes: 021234ef3752f ("KVM: arm64: Make kvm_condition_valid32() accessible from EL2")
18 Fixes: 8cebe750c4d9a ("arm64: KVM: Make kvm_skip_instr32 available to HYP")
19 Signed-off-by: James Morse <james.morse@arm.com>
20 Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
21 Signed-off-by: Sasha Levin <sashal@kernel.org>
23 arch/arm/kvm/hyp/Makefile | 1 +
24 arch/arm64/kvm/hyp/Makefile | 1 +
25 virt/kvm/arm/aarch32.c | 121 --------------------------------
26 virt/kvm/arm/hyp/aarch32.c | 136 ++++++++++++++++++++++++++++++++++++
27 4 files changed, 138 insertions(+), 121 deletions(-)
28 create mode 100644 virt/kvm/arm/hyp/aarch32.c
30 diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
31 index d2b5ec9c4b92..ba88b1eca93c 100644
32 --- a/arch/arm/kvm/hyp/Makefile
33 +++ b/arch/arm/kvm/hyp/Makefile
34 @@ -11,6 +11,7 @@ CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
36 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
37 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
38 +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
40 obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
41 obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
42 diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
43 index 2fabc2dc1966..feef06fc7c5a 100644
44 --- a/arch/arm64/kvm/hyp/Makefile
45 +++ b/arch/arm64/kvm/hyp/Makefile
46 @@ -10,6 +10,7 @@ KVM=../../../../virt/kvm
48 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
49 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
50 +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
52 obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-cpuif-proxy.o
53 obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
54 diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
55 index 5abbe9b3c652..6880236974b8 100644
56 --- a/virt/kvm/arm/aarch32.c
57 +++ b/virt/kvm/arm/aarch32.c
59 #include <asm/kvm_emulate.h>
60 #include <asm/kvm_hyp.h>
63 - * stolen from arch/arm/kernel/opcodes.c
65 - * condition code lookup table
66 - * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
68 - * bit position in short is condition code: NZCV
70 -static const unsigned short cc_map[16] = {
71 - 0xF0F0, /* EQ == Z set */
73 - 0xCCCC, /* CS == C set */
75 - 0xFF00, /* MI == N set */
77 - 0xAAAA, /* VS == V set */
79 - 0x0C0C, /* HI == C set && Z clear */
80 - 0xF3F3, /* LS == C clear || Z set */
81 - 0xAA55, /* GE == (N==V) */
82 - 0x55AA, /* LT == (N!=V) */
83 - 0x0A05, /* GT == (!Z && (N==V)) */
84 - 0xF5FA, /* LE == (Z || (N!=V)) */
85 - 0xFFFF, /* AL always */
90 - * Check if a trapped instruction should have been executed or not.
92 -bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
98 - /* Top two bits non-zero? Unconditional. */
99 - if (kvm_vcpu_get_hsr(vcpu) >> 30)
102 - /* Is condition field valid? */
103 - cond = kvm_vcpu_get_condition(vcpu);
107 - cpsr = *vcpu_cpsr(vcpu);
110 - /* This can happen in Thumb mode: examine IT state. */
113 - it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
115 - /* it == 0 => unconditional. */
119 - /* The cond for this insn works out as the top 4 bits. */
123 - cpsr_cond = cpsr >> 28;
125 - if (!((cc_map[cond] >> cpsr_cond) & 1))
132 - * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
133 - * @vcpu: The VCPU pointer
135 - * When exceptions occur while instructions are executed in Thumb IF-THEN
136 - * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
137 - * to do this little bit of work manually. The fields map like this:
139 - * IT[7:0] -> CPSR[26:25],CPSR[15:10]
141 -static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
143 - unsigned long itbits, cond;
144 - unsigned long cpsr = *vcpu_cpsr(vcpu);
145 - bool is_arm = !(cpsr & PSR_AA32_T_BIT);
147 - if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
150 - cond = (cpsr & 0xe000) >> 13;
151 - itbits = (cpsr & 0x1c00) >> (10 - 2);
152 - itbits |= (cpsr & (0x3 << 25)) >> 25;
154 - /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
155 - if ((itbits & 0x7) == 0)
158 - itbits = (itbits << 1) & 0x1f;
160 - cpsr &= ~PSR_AA32_IT_MASK;
161 - cpsr |= cond << 13;
162 - cpsr |= (itbits & 0x1c) << (10 - 2);
163 - cpsr |= (itbits & 0x3) << 25;
164 - *vcpu_cpsr(vcpu) = cpsr;
168 - * kvm_skip_instr - skip a trapped instruction and proceed to the next
169 - * @vcpu: The vcpu pointer
171 -void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
175 - is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
176 - if (is_thumb && !is_wide_instr)
177 - *vcpu_pc(vcpu) += 2;
179 - *vcpu_pc(vcpu) += 4;
180 - kvm_adjust_itstate(vcpu);
184 * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
186 diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c
188 index 000000000000..d31f267961e7
190 +++ b/virt/kvm/arm/hyp/aarch32.c
192 +// SPDX-License-Identifier: GPL-2.0
194 + * Hyp portion of the (not much of an) Emulation layer for 32bit guests.
196 + * Copyright (C) 2012,2013 - ARM Ltd
197 + * Author: Marc Zyngier <marc.zyngier@arm.com>
199 + * based on arch/arm/kvm/emulate.c
200 + * Copyright (C) 2012 - Virtual Open Systems and Columbia University
201 + * Author: Christoffer Dall <c.dall@virtualopensystems.com>
204 +#include <linux/kvm_host.h>
205 +#include <asm/kvm_emulate.h>
206 +#include <asm/kvm_hyp.h>
209 + * stolen from arch/arm/kernel/opcodes.c
211 + * condition code lookup table
212 + * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
214 + * bit position in short is condition code: NZCV
216 +static const unsigned short cc_map[16] = {
217 + 0xF0F0, /* EQ == Z set */
219 + 0xCCCC, /* CS == C set */
221 + 0xFF00, /* MI == N set */
223 + 0xAAAA, /* VS == V set */
225 + 0x0C0C, /* HI == C set && Z clear */
226 + 0xF3F3, /* LS == C clear || Z set */
227 + 0xAA55, /* GE == (N==V) */
228 + 0x55AA, /* LT == (N!=V) */
229 + 0x0A05, /* GT == (!Z && (N==V)) */
230 + 0xF5FA, /* LE == (Z || (N!=V)) */
231 + 0xFFFF, /* AL always */
236 + * Check if a trapped instruction should have been executed or not.
238 +bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
240 + unsigned long cpsr;
244 + /* Top two bits non-zero? Unconditional. */
245 + if (kvm_vcpu_get_hsr(vcpu) >> 30)
248 + /* Is condition field valid? */
249 + cond = kvm_vcpu_get_condition(vcpu);
253 + cpsr = *vcpu_cpsr(vcpu);
256 + /* This can happen in Thumb mode: examine IT state. */
259 + it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
261 + /* it == 0 => unconditional. */
265 + /* The cond for this insn works out as the top 4 bits. */
269 + cpsr_cond = cpsr >> 28;
271 + if (!((cc_map[cond] >> cpsr_cond) & 1))
278 + * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
279 + * @vcpu: The VCPU pointer
281 + * When exceptions occur while instructions are executed in Thumb IF-THEN
282 + * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
283 + * to do this little bit of work manually. The fields map like this:
285 + * IT[7:0] -> CPSR[26:25],CPSR[15:10]
287 +static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
289 + unsigned long itbits, cond;
290 + unsigned long cpsr = *vcpu_cpsr(vcpu);
291 + bool is_arm = !(cpsr & PSR_AA32_T_BIT);
293 + if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
296 + cond = (cpsr & 0xe000) >> 13;
297 + itbits = (cpsr & 0x1c00) >> (10 - 2);
298 + itbits |= (cpsr & (0x3 << 25)) >> 25;
300 + /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
301 + if ((itbits & 0x7) == 0)
304 + itbits = (itbits << 1) & 0x1f;
306 + cpsr &= ~PSR_AA32_IT_MASK;
307 + cpsr |= cond << 13;
308 + cpsr |= (itbits & 0x1c) << (10 - 2);
309 + cpsr |= (itbits & 0x3) << 25;
310 + *vcpu_cpsr(vcpu) = cpsr;
314 + * kvm_skip_instr - skip a trapped instruction and proceed to the next
315 + * @vcpu: The vcpu pointer
317 +void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
321 + is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
322 + if (is_thumb && !is_wide_instr)
323 + *vcpu_pc(vcpu) += 2;
325 + *vcpu_pc(vcpu) += 4;
326 + kvm_adjust_itstate(vcpu);