]>
git.ipfire.org Git - thirdparty/kernel/stable.git/blob - virt/kvm/arm/aarch32.c
2 * (not much of an) Emulation layer for 32bit guests.
4 * Copyright (C) 2012,2013 - ARM Ltd
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 * based on arch/arm/kvm/emulate.c
8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
29 * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
31 static const u8 return_offsets
[8][2] = {
32 [0] = { 0, 0 }, /* Reset, unused */
33 [1] = { 4, 2 }, /* Undefined */
34 [2] = { 0, 0 }, /* SVC, unused */
35 [3] = { 4, 4 }, /* Prefetch abort */
36 [4] = { 8, 8 }, /* Data abort */
37 [5] = { 0, 0 }, /* HVC, unused */
38 [6] = { 4, 4 }, /* IRQ, unused */
39 [7] = { 4, 4 }, /* FIQ, unused */
42 static void prepare_fault32(struct kvm_vcpu
*vcpu
, u32 mode
, u32 vect_offset
)
45 unsigned long new_spsr_value
= *vcpu_cpsr(vcpu
);
46 bool is_thumb
= (new_spsr_value
& PSR_AA32_T_BIT
);
47 u32 return_offset
= return_offsets
[vect_offset
>> 2][is_thumb
];
48 u32 sctlr
= vcpu_cp15(vcpu
, c1_SCTLR
);
50 cpsr
= mode
| PSR_AA32_I_BIT
;
52 if (sctlr
& (1 << 30))
53 cpsr
|= PSR_AA32_T_BIT
;
54 if (sctlr
& (1 << 25))
55 cpsr
|= PSR_AA32_E_BIT
;
57 *vcpu_cpsr(vcpu
) = cpsr
;
59 /* Note: These now point to the banked copies */
60 vcpu_write_spsr(vcpu
, new_spsr_value
);
61 *vcpu_reg32(vcpu
, 14) = *vcpu_pc(vcpu
) + return_offset
;
63 /* Branch to exception vector */
64 if (sctlr
& (1 << 13))
65 vect_offset
+= 0xffff0000;
66 else /* always have security exceptions */
67 vect_offset
+= vcpu_cp15(vcpu
, c12_VBAR
);
69 *vcpu_pc(vcpu
) = vect_offset
;
72 void kvm_inject_undef32(struct kvm_vcpu
*vcpu
)
74 prepare_fault32(vcpu
, PSR_AA32_MODE_UND
, 4);
78 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
81 static void inject_abt32(struct kvm_vcpu
*vcpu
, bool is_pabt
,
90 far
= &vcpu_cp15(vcpu
, c6_IFAR
);
91 fsr
= &vcpu_cp15(vcpu
, c5_IFSR
);
94 far
= &vcpu_cp15(vcpu
, c6_DFAR
);
95 fsr
= &vcpu_cp15(vcpu
, c5_DFSR
);
98 prepare_fault32(vcpu
, PSR_AA32_MODE_ABT
| PSR_AA32_A_BIT
, vect_offset
);
102 /* Give the guest an IMPLEMENTATION DEFINED exception */
103 is_lpae
= (vcpu_cp15(vcpu
, c2_TTBCR
) >> 31);
105 *fsr
= 1 << 9 | 0x34;
110 void kvm_inject_dabt32(struct kvm_vcpu
*vcpu
, unsigned long addr
)
112 inject_abt32(vcpu
, false, addr
);
115 void kvm_inject_pabt32(struct kvm_vcpu
*vcpu
, unsigned long addr
)
117 inject_abt32(vcpu
, true, addr
);