]> git.ipfire.org Git - people/arne_f/kernel.git/blob - arch/arm64/kvm/hyp.S
arm64: kvm: Adopt new alternative assembler macros
[people/arne_f/kernel.git] / arch / arm64 / kvm / hyp.S
1 /*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/linkage.h>
19
20 #include <asm/alternative.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/assembler.h>
23 #include <asm/cpufeature.h>
24 #include <asm/debug-monitors.h>
25 #include <asm/esr.h>
26 #include <asm/fpsimdmacros.h>
27 #include <asm/kvm.h>
28 #include <asm/kvm_arm.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_mmu.h>
31 #include <asm/memory.h>
32
33 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
34 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
35 #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
36 #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
37
38 .text
39 .pushsection .hyp.text, "ax"
40 .align PAGE_SHIFT
41
42 .macro save_common_regs
43 // x2: base address for cpu context
44 // x3: tmp register
45
46 add x3, x2, #CPU_XREG_OFFSET(19)
47 stp x19, x20, [x3]
48 stp x21, x22, [x3, #16]
49 stp x23, x24, [x3, #32]
50 stp x25, x26, [x3, #48]
51 stp x27, x28, [x3, #64]
52 stp x29, lr, [x3, #80]
53
54 mrs x19, sp_el0
55 mrs x20, elr_el2 // pc before entering el2
56 mrs x21, spsr_el2 // pstate before entering el2
57
58 stp x19, x20, [x3, #96]
59 str x21, [x3, #112]
60
61 mrs x22, sp_el1
62 mrs x23, elr_el1
63 mrs x24, spsr_el1
64
65 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
66 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
67 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
68 .endm
69
70 .macro restore_common_regs
71 // x2: base address for cpu context
72 // x3: tmp register
73
74 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
75 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
76 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
77
78 msr sp_el1, x22
79 msr elr_el1, x23
80 msr spsr_el1, x24
81
82 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
83 ldp x19, x20, [x3]
84 ldr x21, [x3, #16]
85
86 msr sp_el0, x19
87 msr elr_el2, x20 // pc on return from el2
88 msr spsr_el2, x21 // pstate on return from el2
89
90 add x3, x2, #CPU_XREG_OFFSET(19)
91 ldp x19, x20, [x3]
92 ldp x21, x22, [x3, #16]
93 ldp x23, x24, [x3, #32]
94 ldp x25, x26, [x3, #48]
95 ldp x27, x28, [x3, #64]
96 ldp x29, lr, [x3, #80]
97 .endm
98
99 .macro save_host_regs
100 save_common_regs
101 .endm
102
103 .macro restore_host_regs
104 restore_common_regs
105 .endm
106
107 .macro save_fpsimd
108 // x2: cpu context address
109 // x3, x4: tmp regs
110 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
111 fpsimd_save x3, 4
112 .endm
113
114 .macro restore_fpsimd
115 // x2: cpu context address
116 // x3, x4: tmp regs
117 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
118 fpsimd_restore x3, 4
119 .endm
120
121 .macro save_guest_regs
122 // x0 is the vcpu address
123 // x1 is the return code, do not corrupt!
124 // x2 is the cpu context
125 // x3 is a tmp register
126 // Guest's x0-x3 are on the stack
127
128 // Compute base to save registers
129 add x3, x2, #CPU_XREG_OFFSET(4)
130 stp x4, x5, [x3]
131 stp x6, x7, [x3, #16]
132 stp x8, x9, [x3, #32]
133 stp x10, x11, [x3, #48]
134 stp x12, x13, [x3, #64]
135 stp x14, x15, [x3, #80]
136 stp x16, x17, [x3, #96]
137 str x18, [x3, #112]
138
139 pop x6, x7 // x2, x3
140 pop x4, x5 // x0, x1
141
142 add x3, x2, #CPU_XREG_OFFSET(0)
143 stp x4, x5, [x3]
144 stp x6, x7, [x3, #16]
145
146 save_common_regs
147 .endm
148
149 .macro restore_guest_regs
150 // x0 is the vcpu address.
151 // x2 is the cpu context
152 // x3 is a tmp register
153
154 // Prepare x0-x3 for later restore
155 add x3, x2, #CPU_XREG_OFFSET(0)
156 ldp x4, x5, [x3]
157 ldp x6, x7, [x3, #16]
158 push x4, x5 // Push x0-x3 on the stack
159 push x6, x7
160
161 // x4-x18
162 ldp x4, x5, [x3, #32]
163 ldp x6, x7, [x3, #48]
164 ldp x8, x9, [x3, #64]
165 ldp x10, x11, [x3, #80]
166 ldp x12, x13, [x3, #96]
167 ldp x14, x15, [x3, #112]
168 ldp x16, x17, [x3, #128]
169 ldr x18, [x3, #144]
170
171 // x19-x29, lr, sp*, elr*, spsr*
172 restore_common_regs
173
174 // Last bits of the 64bit state
175 pop x2, x3
176 pop x0, x1
177
178 // Do not touch any register after this!
179 .endm
180
181 /*
182 * Macros to perform system register save/restore.
183 *
184 * Ordering here is absolutely critical, and must be kept consistent
185 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
186 * and in kvm_asm.h.
187 *
188 * In other words, don't touch any of these unless you know what
189 * you are doing.
190 */
191 .macro save_sysregs
192 // x2: base address for cpu context
193 // x3: tmp register
194
195 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
196
197 mrs x4, vmpidr_el2
198 mrs x5, csselr_el1
199 mrs x6, sctlr_el1
200 mrs x7, actlr_el1
201 mrs x8, cpacr_el1
202 mrs x9, ttbr0_el1
203 mrs x10, ttbr1_el1
204 mrs x11, tcr_el1
205 mrs x12, esr_el1
206 mrs x13, afsr0_el1
207 mrs x14, afsr1_el1
208 mrs x15, far_el1
209 mrs x16, mair_el1
210 mrs x17, vbar_el1
211 mrs x18, contextidr_el1
212 mrs x19, tpidr_el0
213 mrs x20, tpidrro_el0
214 mrs x21, tpidr_el1
215 mrs x22, amair_el1
216 mrs x23, cntkctl_el1
217 mrs x24, par_el1
218 mrs x25, mdscr_el1
219
220 stp x4, x5, [x3]
221 stp x6, x7, [x3, #16]
222 stp x8, x9, [x3, #32]
223 stp x10, x11, [x3, #48]
224 stp x12, x13, [x3, #64]
225 stp x14, x15, [x3, #80]
226 stp x16, x17, [x3, #96]
227 stp x18, x19, [x3, #112]
228 stp x20, x21, [x3, #128]
229 stp x22, x23, [x3, #144]
230 stp x24, x25, [x3, #160]
231 .endm
232
233 .macro save_debug
234 // x2: base address for cpu context
235 // x3: tmp register
236
237 mrs x26, id_aa64dfr0_el1
238 ubfx x24, x26, #12, #4 // Extract BRPs
239 ubfx x25, x26, #20, #4 // Extract WRPs
240 mov w26, #15
241 sub w24, w26, w24 // How many BPs to skip
242 sub w25, w26, w25 // How many WPs to skip
243
244 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
245
246 adr x26, 1f
247 add x26, x26, x24, lsl #2
248 br x26
249 1:
250 mrs x20, dbgbcr15_el1
251 mrs x19, dbgbcr14_el1
252 mrs x18, dbgbcr13_el1
253 mrs x17, dbgbcr12_el1
254 mrs x16, dbgbcr11_el1
255 mrs x15, dbgbcr10_el1
256 mrs x14, dbgbcr9_el1
257 mrs x13, dbgbcr8_el1
258 mrs x12, dbgbcr7_el1
259 mrs x11, dbgbcr6_el1
260 mrs x10, dbgbcr5_el1
261 mrs x9, dbgbcr4_el1
262 mrs x8, dbgbcr3_el1
263 mrs x7, dbgbcr2_el1
264 mrs x6, dbgbcr1_el1
265 mrs x5, dbgbcr0_el1
266
267 adr x26, 1f
268 add x26, x26, x24, lsl #2
269 br x26
270
271 1:
272 str x20, [x3, #(15 * 8)]
273 str x19, [x3, #(14 * 8)]
274 str x18, [x3, #(13 * 8)]
275 str x17, [x3, #(12 * 8)]
276 str x16, [x3, #(11 * 8)]
277 str x15, [x3, #(10 * 8)]
278 str x14, [x3, #(9 * 8)]
279 str x13, [x3, #(8 * 8)]
280 str x12, [x3, #(7 * 8)]
281 str x11, [x3, #(6 * 8)]
282 str x10, [x3, #(5 * 8)]
283 str x9, [x3, #(4 * 8)]
284 str x8, [x3, #(3 * 8)]
285 str x7, [x3, #(2 * 8)]
286 str x6, [x3, #(1 * 8)]
287 str x5, [x3, #(0 * 8)]
288
289 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
290
291 adr x26, 1f
292 add x26, x26, x24, lsl #2
293 br x26
294 1:
295 mrs x20, dbgbvr15_el1
296 mrs x19, dbgbvr14_el1
297 mrs x18, dbgbvr13_el1
298 mrs x17, dbgbvr12_el1
299 mrs x16, dbgbvr11_el1
300 mrs x15, dbgbvr10_el1
301 mrs x14, dbgbvr9_el1
302 mrs x13, dbgbvr8_el1
303 mrs x12, dbgbvr7_el1
304 mrs x11, dbgbvr6_el1
305 mrs x10, dbgbvr5_el1
306 mrs x9, dbgbvr4_el1
307 mrs x8, dbgbvr3_el1
308 mrs x7, dbgbvr2_el1
309 mrs x6, dbgbvr1_el1
310 mrs x5, dbgbvr0_el1
311
312 adr x26, 1f
313 add x26, x26, x24, lsl #2
314 br x26
315
316 1:
317 str x20, [x3, #(15 * 8)]
318 str x19, [x3, #(14 * 8)]
319 str x18, [x3, #(13 * 8)]
320 str x17, [x3, #(12 * 8)]
321 str x16, [x3, #(11 * 8)]
322 str x15, [x3, #(10 * 8)]
323 str x14, [x3, #(9 * 8)]
324 str x13, [x3, #(8 * 8)]
325 str x12, [x3, #(7 * 8)]
326 str x11, [x3, #(6 * 8)]
327 str x10, [x3, #(5 * 8)]
328 str x9, [x3, #(4 * 8)]
329 str x8, [x3, #(3 * 8)]
330 str x7, [x3, #(2 * 8)]
331 str x6, [x3, #(1 * 8)]
332 str x5, [x3, #(0 * 8)]
333
334 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
335
336 adr x26, 1f
337 add x26, x26, x25, lsl #2
338 br x26
339 1:
340 mrs x20, dbgwcr15_el1
341 mrs x19, dbgwcr14_el1
342 mrs x18, dbgwcr13_el1
343 mrs x17, dbgwcr12_el1
344 mrs x16, dbgwcr11_el1
345 mrs x15, dbgwcr10_el1
346 mrs x14, dbgwcr9_el1
347 mrs x13, dbgwcr8_el1
348 mrs x12, dbgwcr7_el1
349 mrs x11, dbgwcr6_el1
350 mrs x10, dbgwcr5_el1
351 mrs x9, dbgwcr4_el1
352 mrs x8, dbgwcr3_el1
353 mrs x7, dbgwcr2_el1
354 mrs x6, dbgwcr1_el1
355 mrs x5, dbgwcr0_el1
356
357 adr x26, 1f
358 add x26, x26, x25, lsl #2
359 br x26
360
361 1:
362 str x20, [x3, #(15 * 8)]
363 str x19, [x3, #(14 * 8)]
364 str x18, [x3, #(13 * 8)]
365 str x17, [x3, #(12 * 8)]
366 str x16, [x3, #(11 * 8)]
367 str x15, [x3, #(10 * 8)]
368 str x14, [x3, #(9 * 8)]
369 str x13, [x3, #(8 * 8)]
370 str x12, [x3, #(7 * 8)]
371 str x11, [x3, #(6 * 8)]
372 str x10, [x3, #(5 * 8)]
373 str x9, [x3, #(4 * 8)]
374 str x8, [x3, #(3 * 8)]
375 str x7, [x3, #(2 * 8)]
376 str x6, [x3, #(1 * 8)]
377 str x5, [x3, #(0 * 8)]
378
379 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
380
381 adr x26, 1f
382 add x26, x26, x25, lsl #2
383 br x26
384 1:
385 mrs x20, dbgwvr15_el1
386 mrs x19, dbgwvr14_el1
387 mrs x18, dbgwvr13_el1
388 mrs x17, dbgwvr12_el1
389 mrs x16, dbgwvr11_el1
390 mrs x15, dbgwvr10_el1
391 mrs x14, dbgwvr9_el1
392 mrs x13, dbgwvr8_el1
393 mrs x12, dbgwvr7_el1
394 mrs x11, dbgwvr6_el1
395 mrs x10, dbgwvr5_el1
396 mrs x9, dbgwvr4_el1
397 mrs x8, dbgwvr3_el1
398 mrs x7, dbgwvr2_el1
399 mrs x6, dbgwvr1_el1
400 mrs x5, dbgwvr0_el1
401
402 adr x26, 1f
403 add x26, x26, x25, lsl #2
404 br x26
405
406 1:
407 str x20, [x3, #(15 * 8)]
408 str x19, [x3, #(14 * 8)]
409 str x18, [x3, #(13 * 8)]
410 str x17, [x3, #(12 * 8)]
411 str x16, [x3, #(11 * 8)]
412 str x15, [x3, #(10 * 8)]
413 str x14, [x3, #(9 * 8)]
414 str x13, [x3, #(8 * 8)]
415 str x12, [x3, #(7 * 8)]
416 str x11, [x3, #(6 * 8)]
417 str x10, [x3, #(5 * 8)]
418 str x9, [x3, #(4 * 8)]
419 str x8, [x3, #(3 * 8)]
420 str x7, [x3, #(2 * 8)]
421 str x6, [x3, #(1 * 8)]
422 str x5, [x3, #(0 * 8)]
423
424 mrs x21, mdccint_el1
425 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
426 .endm
427
428 .macro restore_sysregs
429 // x2: base address for cpu context
430 // x3: tmp register
431
432 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
433
434 ldp x4, x5, [x3]
435 ldp x6, x7, [x3, #16]
436 ldp x8, x9, [x3, #32]
437 ldp x10, x11, [x3, #48]
438 ldp x12, x13, [x3, #64]
439 ldp x14, x15, [x3, #80]
440 ldp x16, x17, [x3, #96]
441 ldp x18, x19, [x3, #112]
442 ldp x20, x21, [x3, #128]
443 ldp x22, x23, [x3, #144]
444 ldp x24, x25, [x3, #160]
445
446 msr vmpidr_el2, x4
447 msr csselr_el1, x5
448 msr sctlr_el1, x6
449 msr actlr_el1, x7
450 msr cpacr_el1, x8
451 msr ttbr0_el1, x9
452 msr ttbr1_el1, x10
453 msr tcr_el1, x11
454 msr esr_el1, x12
455 msr afsr0_el1, x13
456 msr afsr1_el1, x14
457 msr far_el1, x15
458 msr mair_el1, x16
459 msr vbar_el1, x17
460 msr contextidr_el1, x18
461 msr tpidr_el0, x19
462 msr tpidrro_el0, x20
463 msr tpidr_el1, x21
464 msr amair_el1, x22
465 msr cntkctl_el1, x23
466 msr par_el1, x24
467 msr mdscr_el1, x25
468 .endm
469
470 .macro restore_debug
471 // x2: base address for cpu context
472 // x3: tmp register
473
474 mrs x26, id_aa64dfr0_el1
475 ubfx x24, x26, #12, #4 // Extract BRPs
476 ubfx x25, x26, #20, #4 // Extract WRPs
477 mov w26, #15
478 sub w24, w26, w24 // How many BPs to skip
479 sub w25, w26, w25 // How many WPs to skip
480
481 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
482
483 adr x26, 1f
484 add x26, x26, x24, lsl #2
485 br x26
486 1:
487 ldr x20, [x3, #(15 * 8)]
488 ldr x19, [x3, #(14 * 8)]
489 ldr x18, [x3, #(13 * 8)]
490 ldr x17, [x3, #(12 * 8)]
491 ldr x16, [x3, #(11 * 8)]
492 ldr x15, [x3, #(10 * 8)]
493 ldr x14, [x3, #(9 * 8)]
494 ldr x13, [x3, #(8 * 8)]
495 ldr x12, [x3, #(7 * 8)]
496 ldr x11, [x3, #(6 * 8)]
497 ldr x10, [x3, #(5 * 8)]
498 ldr x9, [x3, #(4 * 8)]
499 ldr x8, [x3, #(3 * 8)]
500 ldr x7, [x3, #(2 * 8)]
501 ldr x6, [x3, #(1 * 8)]
502 ldr x5, [x3, #(0 * 8)]
503
504 adr x26, 1f
505 add x26, x26, x24, lsl #2
506 br x26
507 1:
508 msr dbgbcr15_el1, x20
509 msr dbgbcr14_el1, x19
510 msr dbgbcr13_el1, x18
511 msr dbgbcr12_el1, x17
512 msr dbgbcr11_el1, x16
513 msr dbgbcr10_el1, x15
514 msr dbgbcr9_el1, x14
515 msr dbgbcr8_el1, x13
516 msr dbgbcr7_el1, x12
517 msr dbgbcr6_el1, x11
518 msr dbgbcr5_el1, x10
519 msr dbgbcr4_el1, x9
520 msr dbgbcr3_el1, x8
521 msr dbgbcr2_el1, x7
522 msr dbgbcr1_el1, x6
523 msr dbgbcr0_el1, x5
524
525 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
526
527 adr x26, 1f
528 add x26, x26, x24, lsl #2
529 br x26
530 1:
531 ldr x20, [x3, #(15 * 8)]
532 ldr x19, [x3, #(14 * 8)]
533 ldr x18, [x3, #(13 * 8)]
534 ldr x17, [x3, #(12 * 8)]
535 ldr x16, [x3, #(11 * 8)]
536 ldr x15, [x3, #(10 * 8)]
537 ldr x14, [x3, #(9 * 8)]
538 ldr x13, [x3, #(8 * 8)]
539 ldr x12, [x3, #(7 * 8)]
540 ldr x11, [x3, #(6 * 8)]
541 ldr x10, [x3, #(5 * 8)]
542 ldr x9, [x3, #(4 * 8)]
543 ldr x8, [x3, #(3 * 8)]
544 ldr x7, [x3, #(2 * 8)]
545 ldr x6, [x3, #(1 * 8)]
546 ldr x5, [x3, #(0 * 8)]
547
548 adr x26, 1f
549 add x26, x26, x24, lsl #2
550 br x26
551 1:
552 msr dbgbvr15_el1, x20
553 msr dbgbvr14_el1, x19
554 msr dbgbvr13_el1, x18
555 msr dbgbvr12_el1, x17
556 msr dbgbvr11_el1, x16
557 msr dbgbvr10_el1, x15
558 msr dbgbvr9_el1, x14
559 msr dbgbvr8_el1, x13
560 msr dbgbvr7_el1, x12
561 msr dbgbvr6_el1, x11
562 msr dbgbvr5_el1, x10
563 msr dbgbvr4_el1, x9
564 msr dbgbvr3_el1, x8
565 msr dbgbvr2_el1, x7
566 msr dbgbvr1_el1, x6
567 msr dbgbvr0_el1, x5
568
569 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
570
571 adr x26, 1f
572 add x26, x26, x25, lsl #2
573 br x26
574 1:
575 ldr x20, [x3, #(15 * 8)]
576 ldr x19, [x3, #(14 * 8)]
577 ldr x18, [x3, #(13 * 8)]
578 ldr x17, [x3, #(12 * 8)]
579 ldr x16, [x3, #(11 * 8)]
580 ldr x15, [x3, #(10 * 8)]
581 ldr x14, [x3, #(9 * 8)]
582 ldr x13, [x3, #(8 * 8)]
583 ldr x12, [x3, #(7 * 8)]
584 ldr x11, [x3, #(6 * 8)]
585 ldr x10, [x3, #(5 * 8)]
586 ldr x9, [x3, #(4 * 8)]
587 ldr x8, [x3, #(3 * 8)]
588 ldr x7, [x3, #(2 * 8)]
589 ldr x6, [x3, #(1 * 8)]
590 ldr x5, [x3, #(0 * 8)]
591
592 adr x26, 1f
593 add x26, x26, x25, lsl #2
594 br x26
595 1:
596 msr dbgwcr15_el1, x20
597 msr dbgwcr14_el1, x19
598 msr dbgwcr13_el1, x18
599 msr dbgwcr12_el1, x17
600 msr dbgwcr11_el1, x16
601 msr dbgwcr10_el1, x15
602 msr dbgwcr9_el1, x14
603 msr dbgwcr8_el1, x13
604 msr dbgwcr7_el1, x12
605 msr dbgwcr6_el1, x11
606 msr dbgwcr5_el1, x10
607 msr dbgwcr4_el1, x9
608 msr dbgwcr3_el1, x8
609 msr dbgwcr2_el1, x7
610 msr dbgwcr1_el1, x6
611 msr dbgwcr0_el1, x5
612
613 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
614
615 adr x26, 1f
616 add x26, x26, x25, lsl #2
617 br x26
618 1:
619 ldr x20, [x3, #(15 * 8)]
620 ldr x19, [x3, #(14 * 8)]
621 ldr x18, [x3, #(13 * 8)]
622 ldr x17, [x3, #(12 * 8)]
623 ldr x16, [x3, #(11 * 8)]
624 ldr x15, [x3, #(10 * 8)]
625 ldr x14, [x3, #(9 * 8)]
626 ldr x13, [x3, #(8 * 8)]
627 ldr x12, [x3, #(7 * 8)]
628 ldr x11, [x3, #(6 * 8)]
629 ldr x10, [x3, #(5 * 8)]
630 ldr x9, [x3, #(4 * 8)]
631 ldr x8, [x3, #(3 * 8)]
632 ldr x7, [x3, #(2 * 8)]
633 ldr x6, [x3, #(1 * 8)]
634 ldr x5, [x3, #(0 * 8)]
635
636 adr x26, 1f
637 add x26, x26, x25, lsl #2
638 br x26
639 1:
640 msr dbgwvr15_el1, x20
641 msr dbgwvr14_el1, x19
642 msr dbgwvr13_el1, x18
643 msr dbgwvr12_el1, x17
644 msr dbgwvr11_el1, x16
645 msr dbgwvr10_el1, x15
646 msr dbgwvr9_el1, x14
647 msr dbgwvr8_el1, x13
648 msr dbgwvr7_el1, x12
649 msr dbgwvr6_el1, x11
650 msr dbgwvr5_el1, x10
651 msr dbgwvr4_el1, x9
652 msr dbgwvr3_el1, x8
653 msr dbgwvr2_el1, x7
654 msr dbgwvr1_el1, x6
655 msr dbgwvr0_el1, x5
656
657 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
658 msr mdccint_el1, x21
659 .endm
660
661 .macro skip_32bit_state tmp, target
662 // Skip 32bit state if not needed
663 mrs \tmp, hcr_el2
664 tbnz \tmp, #HCR_RW_SHIFT, \target
665 .endm
666
667 .macro skip_tee_state tmp, target
668 // Skip ThumbEE state if not needed
669 mrs \tmp, id_pfr0_el1
670 tbz \tmp, #12, \target
671 .endm
672
673 .macro skip_debug_state tmp, target
674 ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
675 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
676 .endm
677
678 .macro compute_debug_state target
679 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
680 // is set, we do a full save/restore cycle and disable trapping.
681 add x25, x0, #VCPU_CONTEXT
682
683 // Check the state of MDSCR_EL1
684 ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
685 and x26, x25, #DBG_MDSCR_KDE
686 and x25, x25, #DBG_MDSCR_MDE
687 adds xzr, x25, x26
688 b.eq 9998f // Nothing to see there
689
690 // If any interesting bits was set, we must set the flag
691 mov x26, #KVM_ARM64_DEBUG_DIRTY
692 str x26, [x0, #VCPU_DEBUG_FLAGS]
693 b 9999f // Don't skip restore
694
695 9998:
696 // Otherwise load the flags from memory in case we recently
697 // trapped
698 skip_debug_state x25, \target
699 9999:
700 .endm
701
702 .macro save_guest_32bit_state
703 skip_32bit_state x3, 1f
704
705 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
706 mrs x4, spsr_abt
707 mrs x5, spsr_und
708 mrs x6, spsr_irq
709 mrs x7, spsr_fiq
710 stp x4, x5, [x3]
711 stp x6, x7, [x3, #16]
712
713 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
714 mrs x4, dacr32_el2
715 mrs x5, ifsr32_el2
716 mrs x6, fpexc32_el2
717 stp x4, x5, [x3]
718 str x6, [x3, #16]
719
720 skip_debug_state x8, 2f
721 mrs x7, dbgvcr32_el2
722 str x7, [x3, #24]
723 2:
724 skip_tee_state x8, 1f
725
726 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
727 mrs x4, teecr32_el1
728 mrs x5, teehbr32_el1
729 stp x4, x5, [x3]
730 1:
731 .endm
732
733 .macro restore_guest_32bit_state
734 skip_32bit_state x3, 1f
735
736 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
737 ldp x4, x5, [x3]
738 ldp x6, x7, [x3, #16]
739 msr spsr_abt, x4
740 msr spsr_und, x5
741 msr spsr_irq, x6
742 msr spsr_fiq, x7
743
744 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
745 ldp x4, x5, [x3]
746 ldr x6, [x3, #16]
747 msr dacr32_el2, x4
748 msr ifsr32_el2, x5
749 msr fpexc32_el2, x6
750
751 skip_debug_state x8, 2f
752 ldr x7, [x3, #24]
753 msr dbgvcr32_el2, x7
754 2:
755 skip_tee_state x8, 1f
756
757 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
758 ldp x4, x5, [x3]
759 msr teecr32_el1, x4
760 msr teehbr32_el1, x5
761 1:
762 .endm
763
764 .macro activate_traps
765 ldr x2, [x0, #VCPU_HCR_EL2]
766 msr hcr_el2, x2
767 mov x2, #CPTR_EL2_TTA
768 msr cptr_el2, x2
769
770 mov x2, #(1 << 15) // Trap CP15 Cr=15
771 msr hstr_el2, x2
772
773 mrs x2, mdcr_el2
774 and x2, x2, #MDCR_EL2_HPMN_MASK
775 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
776 orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
777
778 // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
779 // if not dirty.
780 ldr x3, [x0, #VCPU_DEBUG_FLAGS]
781 tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
782 orr x2, x2, #MDCR_EL2_TDA
783 1:
784 msr mdcr_el2, x2
785 .endm
786
787 .macro deactivate_traps
788 mov x2, #HCR_RW
789 msr hcr_el2, x2
790 msr cptr_el2, xzr
791 msr hstr_el2, xzr
792
793 mrs x2, mdcr_el2
794 and x2, x2, #MDCR_EL2_HPMN_MASK
795 msr mdcr_el2, x2
796 .endm
797
798 .macro activate_vm
799 ldr x1, [x0, #VCPU_KVM]
800 kern_hyp_va x1
801 ldr x2, [x1, #KVM_VTTBR]
802 msr vttbr_el2, x2
803 .endm
804
805 .macro deactivate_vm
806 msr vttbr_el2, xzr
807 .endm
808
809 /*
810 * Call into the vgic backend for state saving
811 */
812 .macro save_vgic_state
813 alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
814 bl __save_vgic_v2_state
815 alternative_else
816 bl __save_vgic_v3_state
817 alternative_endif
818 mrs x24, hcr_el2
819 mov x25, #HCR_INT_OVERRIDE
820 neg x25, x25
821 and x24, x24, x25
822 msr hcr_el2, x24
823 .endm
824
825 /*
826 * Call into the vgic backend for state restoring
827 */
828 .macro restore_vgic_state
829 mrs x24, hcr_el2
830 ldr x25, [x0, #VCPU_IRQ_LINES]
831 orr x24, x24, #HCR_INT_OVERRIDE
832 orr x24, x24, x25
833 msr hcr_el2, x24
834 alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
835 bl __restore_vgic_v2_state
836 alternative_else
837 bl __restore_vgic_v3_state
838 alternative_endif
839 .endm
840
841 .macro save_timer_state
842 // x0: vcpu pointer
843 ldr x2, [x0, #VCPU_KVM]
844 kern_hyp_va x2
845 ldr w3, [x2, #KVM_TIMER_ENABLED]
846 cbz w3, 1f
847
848 mrs x3, cntv_ctl_el0
849 and x3, x3, #3
850 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
851 bic x3, x3, #1 // Clear Enable
852 msr cntv_ctl_el0, x3
853
854 isb
855
856 mrs x3, cntv_cval_el0
857 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
858
859 1:
860 // Allow physical timer/counter access for the host
861 mrs x2, cnthctl_el2
862 orr x2, x2, #3
863 msr cnthctl_el2, x2
864
865 // Clear cntvoff for the host
866 msr cntvoff_el2, xzr
867 .endm
868
869 .macro restore_timer_state
870 // x0: vcpu pointer
871 // Disallow physical timer access for the guest
872 // Physical counter access is allowed
873 mrs x2, cnthctl_el2
874 orr x2, x2, #1
875 bic x2, x2, #2
876 msr cnthctl_el2, x2
877
878 ldr x2, [x0, #VCPU_KVM]
879 kern_hyp_va x2
880 ldr w3, [x2, #KVM_TIMER_ENABLED]
881 cbz w3, 1f
882
883 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
884 msr cntvoff_el2, x3
885 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
886 msr cntv_cval_el0, x2
887 isb
888
889 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
890 and x2, x2, #3
891 msr cntv_ctl_el0, x2
892 1:
893 .endm
894
895 __save_sysregs:
896 save_sysregs
897 ret
898
899 __restore_sysregs:
900 restore_sysregs
901 ret
902
903 __save_debug:
904 save_debug
905 ret
906
907 __restore_debug:
908 restore_debug
909 ret
910
911 __save_fpsimd:
912 save_fpsimd
913 ret
914
915 __restore_fpsimd:
916 restore_fpsimd
917 ret
918
919 /*
920 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
921 *
922 * This is the world switch. The first half of the function
923 * deals with entering the guest, and anything from __kvm_vcpu_return
924 * to the end of the function deals with reentering the host.
925 * On the enter path, only x0 (vcpu pointer) must be preserved until
926 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
927 * code) must both be preserved until the epilogue.
928 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
929 */
930 ENTRY(__kvm_vcpu_run)
931 kern_hyp_va x0
932 msr tpidr_el2, x0 // Save the vcpu register
933
934 // Host context
935 ldr x2, [x0, #VCPU_HOST_CONTEXT]
936 kern_hyp_va x2
937
938 save_host_regs
939 bl __save_fpsimd
940 bl __save_sysregs
941
942 compute_debug_state 1f
943 bl __save_debug
944 1:
945 activate_traps
946 activate_vm
947
948 restore_vgic_state
949 restore_timer_state
950
951 // Guest context
952 add x2, x0, #VCPU_CONTEXT
953
954 bl __restore_sysregs
955 bl __restore_fpsimd
956
957 skip_debug_state x3, 1f
958 bl __restore_debug
959 1:
960 restore_guest_32bit_state
961 restore_guest_regs
962
963 // That's it, no more messing around.
964 eret
965
966 __kvm_vcpu_return:
967 // Assume x0 is the vcpu pointer, x1 the return code
968 // Guest's x0-x3 are on the stack
969
970 // Guest context
971 add x2, x0, #VCPU_CONTEXT
972
973 save_guest_regs
974 bl __save_fpsimd
975 bl __save_sysregs
976
977 skip_debug_state x3, 1f
978 bl __save_debug
979 1:
980 save_guest_32bit_state
981
982 save_timer_state
983 save_vgic_state
984
985 deactivate_traps
986 deactivate_vm
987
988 // Host context
989 ldr x2, [x0, #VCPU_HOST_CONTEXT]
990 kern_hyp_va x2
991
992 bl __restore_sysregs
993 bl __restore_fpsimd
994
995 skip_debug_state x3, 1f
996 // Clear the dirty flag for the next run, as all the state has
997 // already been saved. Note that we nuke the whole 64bit word.
998 // If we ever add more flags, we'll have to be more careful...
999 str xzr, [x0, #VCPU_DEBUG_FLAGS]
1000 bl __restore_debug
1001 1:
1002 restore_host_regs
1003
1004 mov x0, x1
1005 ret
1006 END(__kvm_vcpu_run)
1007
1008 // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
1009 ENTRY(__kvm_tlb_flush_vmid_ipa)
1010 dsb ishst
1011
1012 kern_hyp_va x0
1013 ldr x2, [x0, #KVM_VTTBR]
1014 msr vttbr_el2, x2
1015 isb
1016
1017 /*
1018 * We could do so much better if we had the VA as well.
1019 * Instead, we invalidate Stage-2 for this IPA, and the
1020 * whole of Stage-1. Weep...
1021 */
1022 lsr x1, x1, #12
1023 tlbi ipas2e1is, x1
1024 /*
1025 * We have to ensure completion of the invalidation at Stage-2,
1026 * since a table walk on another CPU could refill a TLB with a
1027 * complete (S1 + S2) walk based on the old Stage-2 mapping if
1028 * the Stage-1 invalidation happened first.
1029 */
1030 dsb ish
1031 tlbi vmalle1is
1032 dsb ish
1033 isb
1034
1035 msr vttbr_el2, xzr
1036 ret
1037 ENDPROC(__kvm_tlb_flush_vmid_ipa)
1038
1039 /**
1040 * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
1041 * @struct kvm *kvm - pointer to kvm structure
1042 *
1043 * Invalidates all Stage 1 and 2 TLB entries for current VMID.
1044 */
1045 ENTRY(__kvm_tlb_flush_vmid)
1046 dsb ishst
1047
1048 kern_hyp_va x0
1049 ldr x2, [x0, #KVM_VTTBR]
1050 msr vttbr_el2, x2
1051 isb
1052
1053 tlbi vmalls12e1is
1054 dsb ish
1055 isb
1056
1057 msr vttbr_el2, xzr
1058 ret
1059 ENDPROC(__kvm_tlb_flush_vmid)
1060
1061 ENTRY(__kvm_flush_vm_context)
1062 dsb ishst
1063 tlbi alle1is
1064 ic ialluis
1065 dsb ish
1066 ret
1067 ENDPROC(__kvm_flush_vm_context)
1068
1069 __kvm_hyp_panic:
1070 // Guess the context by looking at VTTBR:
1071 // If zero, then we're already a host.
1072 // Otherwise restore a minimal host context before panicing.
1073 mrs x0, vttbr_el2
1074 cbz x0, 1f
1075
1076 mrs x0, tpidr_el2
1077
1078 deactivate_traps
1079 deactivate_vm
1080
1081 ldr x2, [x0, #VCPU_HOST_CONTEXT]
1082 kern_hyp_va x2
1083
1084 bl __restore_sysregs
1085
1086 1: adr x0, __hyp_panic_str
1087 adr x1, 2f
1088 ldp x2, x3, [x1]
1089 sub x0, x0, x2
1090 add x0, x0, x3
1091 mrs x1, spsr_el2
1092 mrs x2, elr_el2
1093 mrs x3, esr_el2
1094 mrs x4, far_el2
1095 mrs x5, hpfar_el2
1096 mrs x6, par_el1
1097 mrs x7, tpidr_el2
1098
1099 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
1100 PSR_MODE_EL1h)
1101 msr spsr_el2, lr
1102 ldr lr, =panic
1103 msr elr_el2, lr
1104 eret
1105
1106 .align 3
1107 2: .quad HYP_PAGE_OFFSET
1108 .quad PAGE_OFFSET
1109 ENDPROC(__kvm_hyp_panic)
1110
1111 __hyp_panic_str:
1112 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
1113
1114 .align 2
1115
1116 /*
1117 * u64 kvm_call_hyp(void *hypfn, ...);
1118 *
1119 * This is not really a variadic function in the classic C-way and care must
1120 * be taken when calling this to ensure parameters are passed in registers
1121 * only, since the stack will change between the caller and the callee.
1122 *
1123 * Call the function with the first argument containing a pointer to the
1124 * function you wish to call in Hyp mode, and subsequent arguments will be
1125 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
1126 * function pointer can be passed). The function being called must be mapped
1127 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
1128 * passed in r0 and r1.
1129 *
1130 * A function pointer with a value of 0 has a special meaning, and is
1131 * used to implement __hyp_get_vectors in the same way as in
1132 * arch/arm64/kernel/hyp_stub.S.
1133 */
1134 ENTRY(kvm_call_hyp)
1135 hvc #0
1136 ret
1137 ENDPROC(kvm_call_hyp)
1138
1139 .macro invalid_vector label, target
1140 .align 2
1141 \label:
1142 b \target
1143 ENDPROC(\label)
1144 .endm
1145
1146 /* None of these should ever happen */
1147 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
1148 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
1149 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
1150 invalid_vector el2t_error_invalid, __kvm_hyp_panic
1151 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
1152 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
1153 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
1154 invalid_vector el2h_error_invalid, __kvm_hyp_panic
1155 invalid_vector el1_sync_invalid, __kvm_hyp_panic
1156 invalid_vector el1_irq_invalid, __kvm_hyp_panic
1157 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
1158 invalid_vector el1_error_invalid, __kvm_hyp_panic
1159
1160 el1_sync: // Guest trapped into EL2
1161 push x0, x1
1162 push x2, x3
1163
1164 mrs x1, esr_el2
1165 lsr x2, x1, #ESR_ELx_EC_SHIFT
1166
1167 cmp x2, #ESR_ELx_EC_HVC64
1168 b.ne el1_trap
1169
1170 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
1171 cbnz x3, el1_trap // called HVC
1172
1173 /* Here, we're pretty sure the host called HVC. */
1174 pop x2, x3
1175 pop x0, x1
1176
1177 /* Check for __hyp_get_vectors */
1178 cbnz x0, 1f
1179 mrs x0, vbar_el2
1180 b 2f
1181
1182 1: push lr, xzr
1183
1184 /*
1185 * Compute the function address in EL2, and shuffle the parameters.
1186 */
1187 kern_hyp_va x0
1188 mov lr, x0
1189 mov x0, x1
1190 mov x1, x2
1191 mov x2, x3
1192 blr lr
1193
1194 pop lr, xzr
1195 2: eret
1196
1197 el1_trap:
1198 /*
1199 * x1: ESR
1200 * x2: ESR_EC
1201 */
1202 cmp x2, #ESR_ELx_EC_DABT_LOW
1203 mov x0, #ESR_ELx_EC_IABT_LOW
1204 ccmp x2, x0, #4, ne
1205 b.ne 1f // Not an abort we care about
1206
1207 /* This is an abort. Check for permission fault */
1208 and x2, x1, #ESR_ELx_FSC_TYPE
1209 cmp x2, #FSC_PERM
1210 b.ne 1f // Not a permission fault
1211
1212 /*
1213 * Check for Stage-1 page table walk, which is guaranteed
1214 * to give a valid HPFAR_EL2.
1215 */
1216 tbnz x1, #7, 1f // S1PTW is set
1217
1218 /* Preserve PAR_EL1 */
1219 mrs x3, par_el1
1220 push x3, xzr
1221
1222 /*
1223 * Permission fault, HPFAR_EL2 is invalid.
1224 * Resolve the IPA the hard way using the guest VA.
1225 * Stage-1 translation already validated the memory access rights.
1226 * As such, we can use the EL1 translation regime, and don't have
1227 * to distinguish between EL0 and EL1 access.
1228 */
1229 mrs x2, far_el2
1230 at s1e1r, x2
1231 isb
1232
1233 /* Read result */
1234 mrs x3, par_el1
1235 pop x0, xzr // Restore PAR_EL1 from the stack
1236 msr par_el1, x0
1237 tbnz x3, #0, 3f // Bail out if we failed the translation
1238 ubfx x3, x3, #12, #36 // Extract IPA
1239 lsl x3, x3, #4 // and present it like HPFAR
1240 b 2f
1241
1242 1: mrs x3, hpfar_el2
1243 mrs x2, far_el2
1244
1245 2: mrs x0, tpidr_el2
1246 str w1, [x0, #VCPU_ESR_EL2]
1247 str x2, [x0, #VCPU_FAR_EL2]
1248 str x3, [x0, #VCPU_HPFAR_EL2]
1249
1250 mov x1, #ARM_EXCEPTION_TRAP
1251 b __kvm_vcpu_return
1252
1253 /*
1254 * Translation failed. Just return to the guest and
1255 * let it fault again. Another CPU is probably playing
1256 * behind our back.
1257 */
1258 3: pop x2, x3
1259 pop x0, x1
1260
1261 eret
1262
1263 el1_irq:
1264 push x0, x1
1265 push x2, x3
1266 mrs x0, tpidr_el2
1267 mov x1, #ARM_EXCEPTION_IRQ
1268 b __kvm_vcpu_return
1269
1270 .ltorg
1271
1272 .align 11
1273
1274 ENTRY(__kvm_hyp_vector)
1275 ventry el2t_sync_invalid // Synchronous EL2t
1276 ventry el2t_irq_invalid // IRQ EL2t
1277 ventry el2t_fiq_invalid // FIQ EL2t
1278 ventry el2t_error_invalid // Error EL2t
1279
1280 ventry el2h_sync_invalid // Synchronous EL2h
1281 ventry el2h_irq_invalid // IRQ EL2h
1282 ventry el2h_fiq_invalid // FIQ EL2h
1283 ventry el2h_error_invalid // Error EL2h
1284
1285 ventry el1_sync // Synchronous 64-bit EL1
1286 ventry el1_irq // IRQ 64-bit EL1
1287 ventry el1_fiq_invalid // FIQ 64-bit EL1
1288 ventry el1_error_invalid // Error 64-bit EL1
1289
1290 ventry el1_sync // Synchronous 32-bit EL1
1291 ventry el1_irq // IRQ 32-bit EL1
1292 ventry el1_fiq_invalid // FIQ 32-bit EL1
1293 ventry el1_error_invalid // Error 32-bit EL1
1294 ENDPROC(__kvm_hyp_vector)
1295
1296 .popsection