]>
Commit | Line | Data |
---|---|---|
623e1528 JM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Hyp portion of the (not much of an) Emulation layer for 32bit guests. | |
4 | * | |
5 | * Copyright (C) 2012,2013 - ARM Ltd | |
6 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
7 | * | |
8 | * based on arch/arm/kvm/emulate.c | |
9 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
10 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
11 | */ | |
12 | ||
13 | #include <linux/kvm_host.h> | |
14 | #include <asm/kvm_emulate.h> | |
15 | #include <asm/kvm_hyp.h> | |
16 | ||
17 | /* | |
18 | * stolen from arch/arm/kernel/opcodes.c | |
19 | * | |
20 | * condition code lookup table | |
21 | * index into the table is test code: EQ, NE, ... LT, GT, AL, NV | |
22 | * | |
23 | * bit position in short is condition code: NZCV | |
24 | */ | |
25 | static const unsigned short cc_map[16] = { | |
26 | 0xF0F0, /* EQ == Z set */ | |
27 | 0x0F0F, /* NE */ | |
28 | 0xCCCC, /* CS == C set */ | |
29 | 0x3333, /* CC */ | |
30 | 0xFF00, /* MI == N set */ | |
31 | 0x00FF, /* PL */ | |
32 | 0xAAAA, /* VS == V set */ | |
33 | 0x5555, /* VC */ | |
34 | 0x0C0C, /* HI == C set && Z clear */ | |
35 | 0xF3F3, /* LS == C clear || Z set */ | |
36 | 0xAA55, /* GE == (N==V) */ | |
37 | 0x55AA, /* LT == (N!=V) */ | |
38 | 0x0A05, /* GT == (!Z && (N==V)) */ | |
39 | 0xF5FA, /* LE == (Z || (N!=V)) */ | |
40 | 0xFFFF, /* AL always */ | |
41 | 0 /* NV */ | |
42 | }; | |
43 | ||
44 | /* | |
45 | * Check if a trapped instruction should have been executed or not. | |
46 | */ | |
47 | bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu) | |
48 | { | |
49 | unsigned long cpsr; | |
50 | u32 cpsr_cond; | |
51 | int cond; | |
52 | ||
53 | /* Top two bits non-zero? Unconditional. */ | |
54 | if (kvm_vcpu_get_hsr(vcpu) >> 30) | |
55 | return true; | |
56 | ||
57 | /* Is condition field valid? */ | |
58 | cond = kvm_vcpu_get_condition(vcpu); | |
59 | if (cond == 0xE) | |
60 | return true; | |
61 | ||
62 | cpsr = *vcpu_cpsr(vcpu); | |
63 | ||
64 | if (cond < 0) { | |
65 | /* This can happen in Thumb mode: examine IT state. */ | |
66 | unsigned long it; | |
67 | ||
68 | it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); | |
69 | ||
70 | /* it == 0 => unconditional. */ | |
71 | if (it == 0) | |
72 | return true; | |
73 | ||
74 | /* The cond for this insn works out as the top 4 bits. */ | |
75 | cond = (it >> 4); | |
76 | } | |
77 | ||
78 | cpsr_cond = cpsr >> 28; | |
79 | ||
80 | if (!((cc_map[cond] >> cpsr_cond) & 1)) | |
81 | return false; | |
82 | ||
83 | return true; | |
84 | } | |
85 | ||
86 | /** | |
87 | * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block | |
88 | * @vcpu: The VCPU pointer | |
89 | * | |
90 | * When exceptions occur while instructions are executed in Thumb IF-THEN | |
91 | * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have | |
92 | * to do this little bit of work manually. The fields map like this: | |
93 | * | |
94 | * IT[7:0] -> CPSR[26:25],CPSR[15:10] | |
95 | */ | |
96 | static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) | |
97 | { | |
98 | unsigned long itbits, cond; | |
99 | unsigned long cpsr = *vcpu_cpsr(vcpu); | |
100 | bool is_arm = !(cpsr & PSR_AA32_T_BIT); | |
101 | ||
102 | if (is_arm || !(cpsr & PSR_AA32_IT_MASK)) | |
103 | return; | |
104 | ||
105 | cond = (cpsr & 0xe000) >> 13; | |
106 | itbits = (cpsr & 0x1c00) >> (10 - 2); | |
107 | itbits |= (cpsr & (0x3 << 25)) >> 25; | |
108 | ||
109 | /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */ | |
110 | if ((itbits & 0x7) == 0) | |
111 | itbits = cond = 0; | |
112 | else | |
113 | itbits = (itbits << 1) & 0x1f; | |
114 | ||
115 | cpsr &= ~PSR_AA32_IT_MASK; | |
116 | cpsr |= cond << 13; | |
117 | cpsr |= (itbits & 0x1c) << (10 - 2); | |
118 | cpsr |= (itbits & 0x3) << 25; | |
119 | *vcpu_cpsr(vcpu) = cpsr; | |
120 | } | |
121 | ||
122 | /** | |
123 | * kvm_skip_instr - skip a trapped instruction and proceed to the next | |
124 | * @vcpu: The vcpu pointer | |
125 | */ | |
126 | void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) | |
127 | { | |
0225fd5e | 128 | u32 pc = *vcpu_pc(vcpu); |
623e1528 JM |
129 | bool is_thumb; |
130 | ||
131 | is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); | |
132 | if (is_thumb && !is_wide_instr) | |
0225fd5e | 133 | pc += 2; |
623e1528 | 134 | else |
0225fd5e MZ |
135 | pc += 4; |
136 | ||
137 | *vcpu_pc(vcpu) = pc; | |
138 | ||
623e1528 JM |
139 | kvm_adjust_itstate(vcpu); |
140 | } |