]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/powerpc/kernel/entry_64.S
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / arch / powerpc / kernel / entry_64.S
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 * This file contains the system call entry code, context switch
14 * code, and exception/interrupt return code for PowerPC.
15 */
16
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <asm/cache.h>
20 #include <asm/unistd.h>
21 #include <asm/processor.h>
22 #include <asm/page.h>
23 #include <asm/mmu.h>
24 #include <asm/thread_info.h>
25 #include <asm/code-patching-asm.h>
26 #include <asm/ppc_asm.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/cputable.h>
29 #include <asm/firmware.h>
30 #include <asm/bug.h>
31 #include <asm/ptrace.h>
32 #include <asm/irqflags.h>
33 #include <asm/hw_irq.h>
34 #include <asm/context_tracking.h>
35 #include <asm/tm.h>
36 #include <asm/ppc-opcode.h>
37 #include <asm/barrier.h>
38 #include <asm/export.h>
39 #include <asm/asm-compat.h>
40 #ifdef CONFIG_PPC_BOOK3S
41 #include <asm/exception-64s.h>
42 #else
43 #include <asm/exception-64e.h>
44 #endif
45 #include <asm/feature-fixups.h>
46 #include <asm/kup.h>
47
48 /*
49 * System calls.
50 */
51 .section ".toc","aw"
52 SYS_CALL_TABLE:
53 .tc sys_call_table[TC],sys_call_table
54
55 #ifdef CONFIG_COMPAT
56 COMPAT_SYS_CALL_TABLE:
57 .tc compat_sys_call_table[TC],compat_sys_call_table
58 #endif
59
60 /* This value is used to mark exception frames on the stack. */
61 exception_marker:
62 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
63
64 .section ".text"
65 .align 7
66
67 .globl system_call_common
68 system_call_common:
69 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
70 BEGIN_FTR_SECTION
71 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
72 bne .Ltabort_syscall
73 END_FTR_SECTION_IFSET(CPU_FTR_TM)
74 #endif
75 _ASM_NOKPROBE_SYMBOL(system_call_common)
76 mr r10,r1
77 ld r1,PACAKSAVE(r13)
78 std r10,0(r1)
79 std r11,_NIP(r1)
80 std r12,_MSR(r1)
81 std r0,GPR0(r1)
82 std r10,GPR1(r1)
83 std r2,GPR2(r1)
84 #ifdef CONFIG_PPC_FSL_BOOK3E
85 START_BTB_FLUSH_SECTION
86 BTB_FLUSH(r10)
87 END_BTB_FLUSH_SECTION
88 #endif
89 ld r2,PACATOC(r13)
90 mfcr r12
91 li r11,0
92 /* Can we avoid saving r3-r8 in common case? */
93 std r3,GPR3(r1)
94 std r4,GPR4(r1)
95 std r5,GPR5(r1)
96 std r6,GPR6(r1)
97 std r7,GPR7(r1)
98 std r8,GPR8(r1)
99 /* Zero r9-r12, this should only be required when restoring all GPRs */
100 std r11,GPR9(r1)
101 std r11,GPR10(r1)
102 std r11,GPR11(r1)
103 std r11,GPR12(r1)
104 std r9,GPR13(r1)
105 SAVE_NVGPRS(r1)
106 std r11,_XER(r1)
107 std r11,_CTR(r1)
108 mflr r10
109
110 /*
111 * This clears CR0.SO (bit 28), which is the error indication on
112 * return from this system call.
113 */
114 rldimi r12,r11,28,(63-28)
115 li r11,0xc00
116 std r10,_LINK(r1)
117 std r11,_TRAP(r1)
118 std r12,_CCR(r1)
119 std r3,ORIG_GPR3(r1)
120 addi r10,r1,STACK_FRAME_OVERHEAD
121 ld r11,exception_marker@toc(r2)
122 std r11,-16(r10) /* "regshere" marker */
123
124 /*
125 * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
126 * would clobber syscall parameters. Also we always enter with IRQs
127 * enabled and nothing pending. system_call_exception() will call
128 * trace_hardirqs_off().
129 */
130 li r11,IRQS_ALL_DISABLED
131 li r12,PACA_IRQ_HARD_DIS
132 stb r11,PACAIRQSOFTMASK(r13)
133 stb r12,PACAIRQHAPPENED(r13)
134
135 /* Calling convention has r9 = orig r0, r10 = regs */
136 mr r9,r0
137 bl system_call_exception
138
139 .Lsyscall_exit:
140 addi r4,r1,STACK_FRAME_OVERHEAD
141 bl syscall_exit_prepare
142
143 ld r2,_CCR(r1)
144 ld r4,_NIP(r1)
145 ld r5,_MSR(r1)
146 ld r6,_LINK(r1)
147
148 BEGIN_FTR_SECTION
149 stdcx. r0,0,r1 /* to clear the reservation */
150 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
151
152 mtspr SPRN_SRR0,r4
153 mtspr SPRN_SRR1,r5
154 mtlr r6
155
156 cmpdi r3,0
157 bne .Lsyscall_restore_regs
158 /* Zero volatile regs that may contain sensitive kernel data */
159 li r0,0
160 li r4,0
161 li r5,0
162 li r6,0
163 li r7,0
164 li r8,0
165 li r9,0
166 li r10,0
167 li r11,0
168 li r12,0
169 mtctr r0
170 mtspr SPRN_XER,r0
171 .Lsyscall_restore_regs_cont:
172
173 BEGIN_FTR_SECTION
174 HMT_MEDIUM_LOW
175 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
176
177 /*
178 * We don't need to restore AMR on the way back to userspace for KUAP.
179 * The value of AMR only matters while we're in the kernel.
180 */
181 mtcr r2
182 ld r2,GPR2(r1)
183 ld r3,GPR3(r1)
184 ld r13,GPR13(r1)
185 ld r1,GPR1(r1)
186 RFI_TO_USER
187 b . /* prevent speculative execution */
188
189 .Lsyscall_restore_regs:
190 ld r3,_CTR(r1)
191 ld r4,_XER(r1)
192 REST_NVGPRS(r1)
193 mtctr r3
194 mtspr SPRN_XER,r4
195 ld r0,GPR0(r1)
196 REST_8GPRS(4, r1)
197 ld r12,GPR12(r1)
198 b .Lsyscall_restore_regs_cont
199
200 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
201 .Ltabort_syscall:
202 /* Firstly we need to enable TM in the kernel */
203 mfmsr r10
204 li r9, 1
205 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
206 mtmsrd r10, 0
207
208 /* tabort, this dooms the transaction, nothing else */
209 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
210 TABORT(R9)
211
212 /*
213 * Return directly to userspace. We have corrupted user register state,
214 * but userspace will never see that register state. Execution will
215 * resume after the tbegin of the aborted transaction with the
216 * checkpointed register state.
217 */
218 li r9, MSR_RI
219 andc r10, r10, r9
220 mtmsrd r10, 1
221 mtspr SPRN_SRR0, r11
222 mtspr SPRN_SRR1, r12
223 RFI_TO_USER
224 b . /* prevent speculative execution */
225 #endif
226
227 _GLOBAL(ret_from_fork)
228 bl schedule_tail
229 REST_NVGPRS(r1)
230 li r3,0
231 b .Lsyscall_exit
232
233 _GLOBAL(ret_from_kernel_thread)
234 bl schedule_tail
235 REST_NVGPRS(r1)
236 mtlr r14
237 mr r3,r15
238 #ifdef PPC64_ELF_ABI_v2
239 mr r12,r14
240 #endif
241 blrl
242 li r3,0
243 b .Lsyscall_exit
244
245 #ifdef CONFIG_PPC_BOOK3E
246 /* Save non-volatile GPRs, if not already saved. */
247 _GLOBAL(save_nvgprs)
248 ld r11,_TRAP(r1)
249 andi. r0,r11,1
250 beqlr-
251 SAVE_NVGPRS(r1)
252 clrrdi r0,r11,1
253 std r0,_TRAP(r1)
254 blr
255 _ASM_NOKPROBE_SYMBOL(save_nvgprs);
256 #endif
257
258 #ifdef CONFIG_PPC_BOOK3S_64
259
260 #define FLUSH_COUNT_CACHE \
261 1: nop; \
262 patch_site 1b, patch__call_flush_count_cache
263
264
265 #define BCCTR_FLUSH .long 0x4c400420
266
267 .macro nops number
268 .rept \number
269 nop
270 .endr
271 .endm
272
273 .balign 32
274 .global flush_count_cache
275 flush_count_cache:
276 /* Save LR into r9 */
277 mflr r9
278
279 // Flush the link stack
280 .rept 64
281 bl .+4
282 .endr
283 b 1f
284 nops 6
285
286 .balign 32
287 /* Restore LR */
288 1: mtlr r9
289
290 // If we're just flushing the link stack, return here
291 3: nop
292 patch_site 3b patch__flush_link_stack_return
293
294 li r9,0x7fff
295 mtctr r9
296
297 BCCTR_FLUSH
298
299 2: nop
300 patch_site 2b patch__flush_count_cache_return
301
302 nops 3
303
304 .rept 278
305 .balign 32
306 BCCTR_FLUSH
307 nops 7
308 .endr
309
310 blr
311 #else
312 #define FLUSH_COUNT_CACHE
313 #endif /* CONFIG_PPC_BOOK3S_64 */
314
315 /*
316 * This routine switches between two different tasks. The process
317 * state of one is saved on its kernel stack. Then the state
318 * of the other is restored from its kernel stack. The memory
319 * management hardware is updated to the second process's state.
320 * Finally, we can return to the second process, via interrupt_return.
321 * On entry, r3 points to the THREAD for the current task, r4
322 * points to the THREAD for the new task.
323 *
324 * Note: there are two ways to get to the "going out" portion
325 * of this code; either by coming in via the entry (_switch)
326 * or via "fork" which must set up an environment equivalent
327 * to the "_switch" path. If you change this you'll have to change
328 * the fork code also.
329 *
330 * The code which creates the new task context is in 'copy_thread'
331 * in arch/powerpc/kernel/process.c
332 */
333 .align 7
334 _GLOBAL(_switch)
335 mflr r0
336 std r0,16(r1)
337 stdu r1,-SWITCH_FRAME_SIZE(r1)
338 /* r3-r13 are caller saved -- Cort */
339 SAVE_NVGPRS(r1)
340 std r0,_NIP(r1) /* Return to switch caller */
341 mfcr r23
342 std r23,_CCR(r1)
343 std r1,KSP(r3) /* Set old stack pointer */
344
345 kuap_check_amr r9, r10
346
347 FLUSH_COUNT_CACHE
348
349 /*
350 * On SMP kernels, care must be taken because a task may be
351 * scheduled off CPUx and on to CPUy. Memory ordering must be
352 * considered.
353 *
354 * Cacheable stores on CPUx will be visible when the task is
355 * scheduled on CPUy by virtue of the core scheduler barriers
356 * (see "Notes on Program-Order guarantees on SMP systems." in
357 * kernel/sched/core.c).
358 *
359 * Uncacheable stores in the case of involuntary preemption must
360 * be taken care of. The smp_mb__before_spin_lock() in __schedule()
361 * is implemented as hwsync on powerpc, which orders MMIO too. So
362 * long as there is an hwsync in the context switch path, it will
363 * be executed on the source CPU after the task has performed
364 * all MMIO ops on that CPU, and on the destination CPU before the
365 * task performs any MMIO ops there.
366 */
367
368 /*
369 * The kernel context switch path must contain a spin_lock,
370 * which contains larx/stcx, which will clear any reservation
371 * of the task being switched.
372 */
373 #ifdef CONFIG_PPC_BOOK3S
374 /* Cancel all explict user streams as they will have no use after context
375 * switch and will stop the HW from creating streams itself
376 */
377 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
378 #endif
379
380 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
381 std r6,PACACURRENT(r13) /* Set new 'current' */
382 #if defined(CONFIG_STACKPROTECTOR)
383 ld r6, TASK_CANARY(r6)
384 std r6, PACA_CANARY(r13)
385 #endif
386
387 ld r8,KSP(r4) /* new stack pointer */
388 #ifdef CONFIG_PPC_BOOK3S_64
389 BEGIN_MMU_FTR_SECTION
390 b 2f
391 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
392 BEGIN_FTR_SECTION
393 clrrdi r6,r8,28 /* get its ESID */
394 clrrdi r9,r1,28 /* get current sp ESID */
395 FTR_SECTION_ELSE
396 clrrdi r6,r8,40 /* get its 1T ESID */
397 clrrdi r9,r1,40 /* get current sp 1T ESID */
398 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
399 clrldi. r0,r6,2 /* is new ESID c00000000? */
400 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
401 cror eq,4*cr1+eq,eq
402 beq 2f /* if yes, don't slbie it */
403
404 /* Bolt in the new stack SLB entry */
405 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
406 oris r0,r6,(SLB_ESID_V)@h
407 ori r0,r0,(SLB_NUM_BOLTED-1)@l
408 BEGIN_FTR_SECTION
409 li r9,MMU_SEGSIZE_1T /* insert B field */
410 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
411 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
412 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
413
414 /* Update the last bolted SLB. No write barriers are needed
415 * here, provided we only update the current CPU's SLB shadow
416 * buffer.
417 */
418 ld r9,PACA_SLBSHADOWPTR(r13)
419 li r12,0
420 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
421 li r12,SLBSHADOW_STACKVSID
422 STDX_BE r7,r12,r9 /* Save VSID */
423 li r12,SLBSHADOW_STACKESID
424 STDX_BE r0,r12,r9 /* Save ESID */
425
426 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
427 * we have 1TB segments, the only CPUs known to have the errata
428 * only support less than 1TB of system memory and we'll never
429 * actually hit this code path.
430 */
431
432 isync
433 slbie r6
434 BEGIN_FTR_SECTION
435 slbie r6 /* Workaround POWER5 < DD2.1 issue */
436 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
437 slbmte r7,r0
438 isync
439 2:
440 #endif /* CONFIG_PPC_BOOK3S_64 */
441
442 clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
443 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
444 because we don't need to leave the 288-byte ABI gap at the
445 top of the kernel stack. */
446 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
447
448 /*
449 * PMU interrupts in radix may come in here. They will use r1, not
450 * PACAKSAVE, so this stack switch will not cause a problem. They
451 * will store to the process stack, which may then be migrated to
452 * another CPU. However the rq lock release on this CPU paired with
453 * the rq lock acquire on the new CPU before the stack becomes
454 * active on the new CPU, will order those stores.
455 */
456 mr r1,r8 /* start using new stack pointer */
457 std r7,PACAKSAVE(r13)
458
459 ld r6,_CCR(r1)
460 mtcrf 0xFF,r6
461
462 /* r3-r13 are destroyed -- Cort */
463 REST_NVGPRS(r1)
464
465 /* convert old thread to its task_struct for return value */
466 addi r3,r3,-THREAD
467 ld r7,_NIP(r1) /* Return to _switch caller in new task */
468 mtlr r7
469 addi r1,r1,SWITCH_FRAME_SIZE
470 blr
471
472 #ifdef CONFIG_PPC_BOOK3S
473 /*
474 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
475 * touched, no exit work created, then this can be used.
476 */
477 .balign IFETCH_ALIGN_BYTES
478 .globl fast_interrupt_return
479 fast_interrupt_return:
480 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
481 kuap_check_amr r3, r4
482 ld r4,_MSR(r1)
483 andi. r0,r4,MSR_PR
484 bne .Lfast_user_interrupt_return
485 kuap_restore_amr r3
486 andi. r0,r4,MSR_RI
487 li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
488 bne+ .Lfast_kernel_interrupt_return
489 addi r3,r1,STACK_FRAME_OVERHEAD
490 bl unrecoverable_exception
491 b . /* should not get here */
492
493 .balign IFETCH_ALIGN_BYTES
494 .globl interrupt_return
495 interrupt_return:
496 _ASM_NOKPROBE_SYMBOL(interrupt_return)
497 ld r4,_MSR(r1)
498 andi. r0,r4,MSR_PR
499 beq .Lkernel_interrupt_return
500 addi r3,r1,STACK_FRAME_OVERHEAD
501 bl interrupt_exit_user_prepare
502 cmpdi r3,0
503 bne- .Lrestore_nvgprs
504
505 .Lfast_user_interrupt_return:
506 ld r11,_NIP(r1)
507 ld r12,_MSR(r1)
508 BEGIN_FTR_SECTION
509 ld r10,_PPR(r1)
510 mtspr SPRN_PPR,r10
511 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
512 mtspr SPRN_SRR0,r11
513 mtspr SPRN_SRR1,r12
514
515 BEGIN_FTR_SECTION
516 stdcx. r0,0,r1 /* to clear the reservation */
517 FTR_SECTION_ELSE
518 ldarx r0,0,r1
519 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
520
521 ld r3,_CCR(r1)
522 ld r4,_LINK(r1)
523 ld r5,_CTR(r1)
524 ld r6,_XER(r1)
525 li r0,0
526
527 REST_4GPRS(7, r1)
528 REST_2GPRS(11, r1)
529 REST_GPR(13, r1)
530
531 mtcr r3
532 mtlr r4
533 mtctr r5
534 mtspr SPRN_XER,r6
535
536 REST_4GPRS(2, r1)
537 REST_GPR(6, r1)
538 REST_GPR(0, r1)
539 REST_GPR(1, r1)
540 RFI_TO_USER
541 b . /* prevent speculative execution */
542
543 .Lrestore_nvgprs:
544 REST_NVGPRS(r1)
545 b .Lfast_user_interrupt_return
546
547 .balign IFETCH_ALIGN_BYTES
548 .Lkernel_interrupt_return:
549 addi r3,r1,STACK_FRAME_OVERHEAD
550 bl interrupt_exit_kernel_prepare
551
552 .Lfast_kernel_interrupt_return:
553 cmpdi cr1,r3,0
554 ld r11,_NIP(r1)
555 ld r12,_MSR(r1)
556 mtspr SPRN_SRR0,r11
557 mtspr SPRN_SRR1,r12
558
559 BEGIN_FTR_SECTION
560 stdcx. r0,0,r1 /* to clear the reservation */
561 FTR_SECTION_ELSE
562 ldarx r0,0,r1
563 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
564
565 ld r3,_LINK(r1)
566 ld r4,_CTR(r1)
567 ld r5,_XER(r1)
568 ld r6,_CCR(r1)
569 li r0,0
570
571 REST_4GPRS(7, r1)
572 REST_2GPRS(11, r1)
573
574 mtlr r3
575 mtctr r4
576 mtspr SPRN_XER,r5
577
578 /*
579 * Leaving a stale exception_marker on the stack can confuse
580 * the reliable stack unwinder later on. Clear it.
581 */
582 std r0,STACK_FRAME_OVERHEAD-16(r1)
583
584 REST_4GPRS(2, r1)
585
586 bne- cr1,1f /* emulate stack store */
587 mtcr r6
588 REST_GPR(6, r1)
589 REST_GPR(0, r1)
590 REST_GPR(1, r1)
591 RFI_TO_KERNEL
592 b . /* prevent speculative execution */
593
594 1: /*
595 * Emulate stack store with update. New r1 value was already calculated
596 * and updated in our interrupt regs by emulate_loadstore, but we can't
597 * store the previous value of r1 to the stack before re-loading our
598 * registers from it, otherwise they could be clobbered. Use
599 * PACA_EXGEN as temporary storage to hold the store data, as
600 * interrupts are disabled here so it won't be clobbered.
601 */
602 mtcr r6
603 std r9,PACA_EXGEN+0(r13)
604 addi r9,r1,INT_FRAME_SIZE /* get original r1 */
605 REST_GPR(6, r1)
606 REST_GPR(0, r1)
607 REST_GPR(1, r1)
608 std r9,0(r1) /* perform store component of stdu */
609 ld r9,PACA_EXGEN+0(r13)
610
611 RFI_TO_KERNEL
612 b . /* prevent speculative execution */
613 #endif /* CONFIG_PPC_BOOK3S */
614
615 #ifdef CONFIG_PPC_RTAS
616 /*
617 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
618 * called with the MMU off.
619 *
620 * In addition, we need to be in 32b mode, at least for now.
621 *
622 * Note: r3 is an input parameter to rtas, so don't trash it...
623 */
624 _GLOBAL(enter_rtas)
625 mflr r0
626 std r0,16(r1)
627 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
628
629 /* Because RTAS is running in 32b mode, it clobbers the high order half
630 * of all registers that it saves. We therefore save those registers
631 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
632 */
633 SAVE_GPR(2, r1) /* Save the TOC */
634 SAVE_GPR(13, r1) /* Save paca */
635 SAVE_NVGPRS(r1) /* Save the non-volatiles */
636
637 mfcr r4
638 std r4,_CCR(r1)
639 mfctr r5
640 std r5,_CTR(r1)
641 mfspr r6,SPRN_XER
642 std r6,_XER(r1)
643 mfdar r7
644 std r7,_DAR(r1)
645 mfdsisr r8
646 std r8,_DSISR(r1)
647
648 /* Temporary workaround to clear CR until RTAS can be modified to
649 * ignore all bits.
650 */
651 li r0,0
652 mtcr r0
653
654 #ifdef CONFIG_BUG
655 /* There is no way it is acceptable to get here with interrupts enabled,
656 * check it with the asm equivalent of WARN_ON
657 */
658 lbz r0,PACAIRQSOFTMASK(r13)
659 1: tdeqi r0,IRQS_ENABLED
660 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
661 #endif
662
663 /* Hard-disable interrupts */
664 mfmsr r6
665 rldicl r7,r6,48,1
666 rotldi r7,r7,16
667 mtmsrd r7,1
668
669 /* Unfortunately, the stack pointer and the MSR are also clobbered,
670 * so they are saved in the PACA which allows us to restore
671 * our original state after RTAS returns.
672 */
673 std r1,PACAR1(r13)
674 std r6,PACASAVEDMSR(r13)
675
676 /* Setup our real return addr */
677 LOAD_REG_ADDR(r4,rtas_return_loc)
678 clrldi r4,r4,2 /* convert to realmode address */
679 mtlr r4
680
681 li r0,0
682 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
683 andc r0,r6,r0
684
685 li r9,1
686 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
687 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
688 andc r6,r0,r9
689
690 __enter_rtas:
691 sync /* disable interrupts so SRR0/1 */
692 mtmsrd r0 /* don't get trashed */
693
694 LOAD_REG_ADDR(r4, rtas)
695 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
696 ld r4,RTASBASE(r4) /* get the rtas->base value */
697
698 mtspr SPRN_SRR0,r5
699 mtspr SPRN_SRR1,r6
700 RFI_TO_KERNEL
701 b . /* prevent speculative execution */
702
703 rtas_return_loc:
704 FIXUP_ENDIAN
705
706 /*
707 * Clear RI and set SF before anything.
708 */
709 mfmsr r6
710 li r0,MSR_RI
711 andc r6,r6,r0
712 sldi r0,r0,(MSR_SF_LG - MSR_RI_LG)
713 or r6,r6,r0
714 sync
715 mtmsrd r6
716
717 /* relocation is off at this point */
718 GET_PACA(r4)
719 clrldi r4,r4,2 /* convert to realmode address */
720
721 bcl 20,31,$+4
722 0: mflr r3
723 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
724
725 ld r1,PACAR1(r4) /* Restore our SP */
726 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
727
728 mtspr SPRN_SRR0,r3
729 mtspr SPRN_SRR1,r4
730 RFI_TO_KERNEL
731 b . /* prevent speculative execution */
732 _ASM_NOKPROBE_SYMBOL(__enter_rtas)
733 _ASM_NOKPROBE_SYMBOL(rtas_return_loc)
734
735 .align 3
736 1: .8byte rtas_restore_regs
737
738 rtas_restore_regs:
739 /* relocation is on at this point */
740 REST_GPR(2, r1) /* Restore the TOC */
741 REST_GPR(13, r1) /* Restore paca */
742 REST_NVGPRS(r1) /* Restore the non-volatiles */
743
744 GET_PACA(r13)
745
746 ld r4,_CCR(r1)
747 mtcr r4
748 ld r5,_CTR(r1)
749 mtctr r5
750 ld r6,_XER(r1)
751 mtspr SPRN_XER,r6
752 ld r7,_DAR(r1)
753 mtdar r7
754 ld r8,_DSISR(r1)
755 mtdsisr r8
756
757 addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
758 ld r0,16(r1) /* get return address */
759
760 mtlr r0
761 blr /* return to caller */
762
763 #endif /* CONFIG_PPC_RTAS */
764
765 _GLOBAL(enter_prom)
766 mflr r0
767 std r0,16(r1)
768 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
769
770 /* Because PROM is running in 32b mode, it clobbers the high order half
771 * of all registers that it saves. We therefore save those registers
772 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
773 */
774 SAVE_GPR(2, r1)
775 SAVE_GPR(13, r1)
776 SAVE_NVGPRS(r1)
777 mfcr r10
778 mfmsr r11
779 std r10,_CCR(r1)
780 std r11,_MSR(r1)
781
782 /* Put PROM address in SRR0 */
783 mtsrr0 r4
784
785 /* Setup our trampoline return addr in LR */
786 bcl 20,31,$+4
787 0: mflr r4
788 addi r4,r4,(1f - 0b)
789 mtlr r4
790
791 /* Prepare a 32-bit mode big endian MSR
792 */
793 #ifdef CONFIG_PPC_BOOK3E
794 rlwinm r11,r11,0,1,31
795 mtsrr1 r11
796 rfi
797 #else /* CONFIG_PPC_BOOK3E */
798 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
799 andc r11,r11,r12
800 mtsrr1 r11
801 RFI_TO_KERNEL
802 #endif /* CONFIG_PPC_BOOK3E */
803
804 1: /* Return from OF */
805 FIXUP_ENDIAN
806
807 /* Just make sure that r1 top 32 bits didn't get
808 * corrupt by OF
809 */
810 rldicl r1,r1,0,32
811
812 /* Restore the MSR (back to 64 bits) */
813 ld r0,_MSR(r1)
814 MTMSRD(r0)
815 isync
816
817 /* Restore other registers */
818 REST_GPR(2, r1)
819 REST_GPR(13, r1)
820 REST_NVGPRS(r1)
821 ld r4,_CCR(r1)
822 mtcr r4
823
824 addi r1,r1,SWITCH_FRAME_SIZE
825 ld r0,16(r1)
826 mtlr r0
827 blr