]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - virt/kvm/arm/aarch32.c
KVM: arm/arm64: Move cc/it checks under hyp's Makefile to avoid instrumentation
[thirdparty/kernel/stable.git] / virt / kvm / arm / aarch32.c
1 /*
2 * (not much of an) Emulation layer for 32bit guests.
3 *
4 * Copyright (C) 2012,2013 - ARM Ltd
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * based on arch/arm/kvm/emulate.c
8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
10 *
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27
28 /*
29 * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
30 */
31 static const u8 return_offsets[8][2] = {
32 [0] = { 0, 0 }, /* Reset, unused */
33 [1] = { 4, 2 }, /* Undefined */
34 [2] = { 0, 0 }, /* SVC, unused */
35 [3] = { 4, 4 }, /* Prefetch abort */
36 [4] = { 8, 8 }, /* Data abort */
37 [5] = { 0, 0 }, /* HVC, unused */
38 [6] = { 4, 4 }, /* IRQ, unused */
39 [7] = { 4, 4 }, /* FIQ, unused */
40 };
41
42 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
43 {
44 unsigned long cpsr;
45 unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
46 bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
47 u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
48 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
49
50 cpsr = mode | PSR_AA32_I_BIT;
51
52 if (sctlr & (1 << 30))
53 cpsr |= PSR_AA32_T_BIT;
54 if (sctlr & (1 << 25))
55 cpsr |= PSR_AA32_E_BIT;
56
57 *vcpu_cpsr(vcpu) = cpsr;
58
59 /* Note: These now point to the banked copies */
60 vcpu_write_spsr(vcpu, new_spsr_value);
61 *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
62
63 /* Branch to exception vector */
64 if (sctlr & (1 << 13))
65 vect_offset += 0xffff0000;
66 else /* always have security exceptions */
67 vect_offset += vcpu_cp15(vcpu, c12_VBAR);
68
69 *vcpu_pc(vcpu) = vect_offset;
70 }
71
72 void kvm_inject_undef32(struct kvm_vcpu *vcpu)
73 {
74 prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
75 }
76
77 /*
78 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
79 * pseudocode.
80 */
81 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
82 unsigned long addr)
83 {
84 u32 vect_offset;
85 u32 *far, *fsr;
86 bool is_lpae;
87
88 if (is_pabt) {
89 vect_offset = 12;
90 far = &vcpu_cp15(vcpu, c6_IFAR);
91 fsr = &vcpu_cp15(vcpu, c5_IFSR);
92 } else { /* !iabt */
93 vect_offset = 16;
94 far = &vcpu_cp15(vcpu, c6_DFAR);
95 fsr = &vcpu_cp15(vcpu, c5_DFSR);
96 }
97
98 prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
99
100 *far = addr;
101
102 /* Give the guest an IMPLEMENTATION DEFINED exception */
103 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
104 if (is_lpae)
105 *fsr = 1 << 9 | 0x34;
106 else
107 *fsr = 0x14;
108 }
109
110 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
111 {
112 inject_abt32(vcpu, false, addr);
113 }
114
115 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr)
116 {
117 inject_abt32(vcpu, true, addr);
118 }