]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/arm64/kvm/hyp/entry.S
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / arch / arm64 / kvm / hyp / entry.S
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
b97b66c1
MZ
2/*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
b97b66c1
MZ
5 */
6
7#include <linux/linkage.h>
8
5dcd0fdb 9#include <asm/alternative.h>
b97b66c1
MZ
10#include <asm/asm-offsets.h>
11#include <asm/assembler.h>
12#include <asm/fpsimdmacros.h>
13#include <asm/kvm.h>
14#include <asm/kvm_arm.h>
15#include <asm/kvm_asm.h>
16#include <asm/kvm_mmu.h>
384b40ca 17#include <asm/kvm_ptrauth.h>
b97b66c1
MZ
18
19#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
20#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
6e977984 21#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)
b97b66c1
MZ
22
23 .text
24 .pushsection .hyp.text, "ax"
25
af123768
AB
26/*
27 * We treat x18 as callee-saved as the host may use it as a platform
28 * register (e.g. for shadow call stack).
29 */
b97b66c1 30.macro save_callee_saved_regs ctxt
af123768 31 str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
b97b66c1
MZ
32 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
33 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
34 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
35 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
36 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
37 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
38.endm
39
40.macro restore_callee_saved_regs ctxt
af123768
AB
41 // We require \ctxt is not x18-x28
42 ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
b97b66c1
MZ
43 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
44 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
45 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
46 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
47 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
48 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
49.endm
50
6e977984
MZ
51.macro save_sp_el0 ctxt, tmp
52 mrs \tmp, sp_el0
53 str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
54.endm
55
56.macro restore_sp_el0 ctxt, tmp
57 ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
58 msr sp_el0, \tmp
59.endm
60
b97b66c1
MZ
61/*
62 * u64 __guest_enter(struct kvm_vcpu *vcpu,
63 * struct kvm_cpu_context *host_ctxt);
64 */
6645d854 65SYM_FUNC_START(__guest_enter)
b97b66c1 66 // x0: vcpu
68381b2b
SD
67 // x1: host context
68 // x2-x17: clobbered by macros
af123768 69 // x29: guest context
b97b66c1
MZ
70
71 // Store the host regs
72 save_callee_saved_regs x1
73
6e977984
MZ
74 // Save the host's sp_el0
75 save_sp_el0 x1, x2
76
5dcd0fdb
JM
77 // Now the host state is stored if we have a pending RAS SError it must
78 // affect the host. If any asynchronous exception is pending we defer
79 // the guest entry. The DSB isn't necessary before v8.2 as any SError
80 // would be fatal.
81alternative_if ARM64_HAS_RAS_EXTN
82 dsb nshst
83 isb
84alternative_else_nop_endif
85 mrs x1, isr_el1
86 cbz x1, 1f
87 mov x0, #ARM_EXCEPTION_IRQ
88 ret
89
901:
af123768 91 add x29, x0, #VCPU_CONTEXT
b97b66c1 92
384b40ca
MR
93 // Macro ptrauth_switch_to_guest format:
94 // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
95 // The below macro to restore guest keys is not implemented in C code
96 // as it may cause Pointer Authentication key signing mismatch errors
97 // when this feature is enabled for kernel code.
af123768 98 ptrauth_switch_to_guest x29, x0, x1, x2
384b40ca 99
6e977984
MZ
100 // Restore the guest's sp_el0
101 restore_sp_el0 x29, x0
102
68381b2b 103 // Restore guest regs x0-x17
af123768
AB
104 ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
105 ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
106 ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
107 ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
108 ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
109 ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
110 ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
111 ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
112 ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
113
114 // Restore guest regs x18-x29, lr
115 restore_callee_saved_regs x29
b97b66c1
MZ
116
117 // Do not touch any register after this!
118 eret
679db708 119 sb
b97b66c1 120
6645d854 121SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
68381b2b
SD
122 // x0: return code
123 // x1: vcpu
124 // x2-x29,lr: vcpu regs
125 // vcpu x0-x1 on the stack
126
127 add x1, x1, #VCPU_CONTEXT
128
cb96408d
VM
129 ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
130
68381b2b
SD
131 // Store the guest regs x2 and x3
132 stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
133
134 // Retrieve the guest regs x0-x1 from the stack
135 ldp x2, x3, [sp], #16 // x0, x1
136
af123768 137 // Store the guest regs x0-x1 and x4-x17
68381b2b
SD
138 stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
139 stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
140 stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
141 stp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
142 stp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
143 stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
144 stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
145 stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
68381b2b 146
af123768 147 // Store the guest regs x18-x29, lr
68381b2b 148 save_callee_saved_regs x1
b97b66c1 149
6e977984
MZ
150 // Store the guest's sp_el0
151 save_sp_el0 x1, x2
152
4464e210 153 get_host_ctxt x2, x3
b97b66c1 154
384b40ca
MR
155 // Macro ptrauth_switch_to_guest format:
156 // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
157 // The below macro to save/restore keys is not implemented in C code
158 // as it may cause Pointer Authentication key signing mismatch errors
159 // when this feature is enabled for kernel code.
160 ptrauth_switch_to_host x1, x2, x3, x4, x5
161
6e977984
MZ
162 // Restore the hosts's sp_el0
163 restore_sp_el0 x2, x3
164
b97b66c1
MZ
165 // Now restore the host regs
166 restore_callee_saved_regs x2
167
0067df41
JM
168alternative_if ARM64_HAS_RAS_EXTN
169 // If we have the RAS extensions we can consume a pending error
0e5b9c08
JM
170 // without an unmask-SError and isb. The ESB-instruction consumed any
171 // pending guest error when we took the exception from the guest.
0067df41
JM
172 mrs_s x2, SYS_DISR_EL1
173 str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
174 cbz x2, 1f
175 msr_s SYS_DISR_EL1, xzr
176 orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
1771: ret
178alternative_else
11b41626
JM
179 dsb sy // Synchronize against in-flight ld/st
180 isb // Prevent an early read of side-effect free ISR
181 mrs x2, isr_el1
182 tbnz x2, #8, 2f // ISR_EL1.A
183 ret
184 nop
1852:
186alternative_endif
187 // We know we have a pending asynchronous abort, now is the
188 // time to flush it out. From your VAXorcist book, page 666:
395ea79e
MZ
189 // "Threaten me not, oh Evil one! For I speak with
190 // the power of DEC, and I command thee to show thyself!"
191 mrs x2, elr_el2
192 mrs x3, esr_el2
193 mrs x4, spsr_el2
194 mov x5, x0
195
395ea79e
MZ
196 msr daifclr, #4 // Unmask aborts
197
198 // This is our single instruction exception window. A pending
199 // SError is guaranteed to occur at the earliest when we unmask
200 // it, and at the latest just after the ISB.
201 .global abort_guest_exit_start
202abort_guest_exit_start:
203
204 isb
205
206 .global abort_guest_exit_end
207abort_guest_exit_end:
208
dad6321f
JM
209 msr daifset, #4 // Mask aborts
210
395ea79e
MZ
211 // If the exception took place, restore the EL1 exception
212 // context so that we can report some information.
213 // Merge the exception code with the SError pending bit.
214 tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
215 msr elr_el2, x2
216 msr esr_el2, x3
217 msr spsr_el2, x4
218 orr x0, x0, x5
2191: ret
6645d854 220SYM_FUNC_END(__guest_enter)