]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/powerpc/kernel/entry_64.S
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / arch / powerpc / kernel / entry_64.S
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 * This file contains the system call entry code, context switch
14 * code, and exception/interrupt return code for PowerPC.
15 */
16
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <asm/cache.h>
20 #include <asm/unistd.h>
21 #include <asm/processor.h>
22 #include <asm/page.h>
23 #include <asm/mmu.h>
24 #include <asm/thread_info.h>
25 #include <asm/code-patching-asm.h>
26 #include <asm/ppc_asm.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/cputable.h>
29 #include <asm/firmware.h>
30 #include <asm/bug.h>
31 #include <asm/ptrace.h>
32 #include <asm/irqflags.h>
33 #include <asm/hw_irq.h>
34 #include <asm/context_tracking.h>
35 #include <asm/tm.h>
36 #include <asm/ppc-opcode.h>
37 #include <asm/barrier.h>
38 #include <asm/export.h>
39 #include <asm/asm-compat.h>
40 #ifdef CONFIG_PPC_BOOK3S
41 #include <asm/exception-64s.h>
42 #else
43 #include <asm/exception-64e.h>
44 #endif
45 #include <asm/feature-fixups.h>
46 #include <asm/kup.h>
47
48 /*
49 * System calls.
50 */
51 .section ".toc","aw"
52 SYS_CALL_TABLE:
53 .tc sys_call_table[TC],sys_call_table
54
55 #ifdef CONFIG_COMPAT
56 COMPAT_SYS_CALL_TABLE:
57 .tc compat_sys_call_table[TC],compat_sys_call_table
58 #endif
59
60 /* This value is used to mark exception frames on the stack. */
61 exception_marker:
62 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
63
64 .section ".text"
65 .align 7
66
67 .globl system_call_common
68 system_call_common:
69 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
70 BEGIN_FTR_SECTION
71 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
72 bne .Ltabort_syscall
73 END_FTR_SECTION_IFSET(CPU_FTR_TM)
74 #endif
75 _ASM_NOKPROBE_SYMBOL(system_call_common)
76 mr r10,r1
77 ld r1,PACAKSAVE(r13)
78 std r10,0(r1)
79 std r11,_NIP(r1)
80 std r12,_MSR(r1)
81 std r0,GPR0(r1)
82 std r10,GPR1(r1)
83 std r2,GPR2(r1)
84 #ifdef CONFIG_PPC_FSL_BOOK3E
85 START_BTB_FLUSH_SECTION
86 BTB_FLUSH(r10)
87 END_BTB_FLUSH_SECTION
88 #endif
89 ld r2,PACATOC(r13)
90 mfcr r12
91 li r11,0
92 /* Can we avoid saving r3-r8 in common case? */
93 std r3,GPR3(r1)
94 std r4,GPR4(r1)
95 std r5,GPR5(r1)
96 std r6,GPR6(r1)
97 std r7,GPR7(r1)
98 std r8,GPR8(r1)
99 /* Zero r9-r12, this should only be required when restoring all GPRs */
100 std r11,GPR9(r1)
101 std r11,GPR10(r1)
102 std r11,GPR11(r1)
103 std r11,GPR12(r1)
104 std r9,GPR13(r1)
105 SAVE_NVGPRS(r1)
106 std r11,_XER(r1)
107 std r11,_CTR(r1)
108 mflr r10
109
110 /*
111 * This clears CR0.SO (bit 28), which is the error indication on
112 * return from this system call.
113 */
114 rldimi r12,r11,28,(63-28)
115 li r11,0xc00
116 std r10,_LINK(r1)
117 std r11,_TRAP(r1)
118 std r12,_CCR(r1)
119 std r3,ORIG_GPR3(r1)
120 addi r10,r1,STACK_FRAME_OVERHEAD
121 ld r11,exception_marker@toc(r2)
122 std r11,-16(r10) /* "regshere" marker */
123
124 /*
125 * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
126 * would clobber syscall parameters. Also we always enter with IRQs
127 * enabled and nothing pending. system_call_exception() will call
128 * trace_hardirqs_off().
129 */
130 li r11,IRQS_ALL_DISABLED
131 li r12,PACA_IRQ_HARD_DIS
132 stb r11,PACAIRQSOFTMASK(r13)
133 stb r12,PACAIRQHAPPENED(r13)
134
135 /* Calling convention has r9 = orig r0, r10 = regs */
136 mr r9,r0
137 bl system_call_exception
138
139 .Lsyscall_exit:
140 addi r4,r1,STACK_FRAME_OVERHEAD
141 bl syscall_exit_prepare
142
143 ld r2,_CCR(r1)
144 ld r4,_NIP(r1)
145 ld r5,_MSR(r1)
146 ld r6,_LINK(r1)
147
148 BEGIN_FTR_SECTION
149 stdcx. r0,0,r1 /* to clear the reservation */
150 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
151
152 mtspr SPRN_SRR0,r4
153 mtspr SPRN_SRR1,r5
154 mtlr r6
155
156 cmpdi r3,0
157 bne .Lsyscall_restore_regs
158 /* Zero volatile regs that may contain sensitive kernel data */
159 li r0,0
160 li r4,0
161 li r5,0
162 li r6,0
163 li r7,0
164 li r8,0
165 li r9,0
166 li r10,0
167 li r11,0
168 li r12,0
169 mtctr r0
170 mtspr SPRN_XER,r0
171 .Lsyscall_restore_regs_cont:
172
173 BEGIN_FTR_SECTION
174 HMT_MEDIUM_LOW
175 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
176
177 /*
178 * We don't need to restore AMR on the way back to userspace for KUAP.
179 * The value of AMR only matters while we're in the kernel.
180 */
181 mtcr r2
182 ld r2,GPR2(r1)
183 ld r3,GPR3(r1)
184 ld r13,GPR13(r1)
185 ld r1,GPR1(r1)
186 RFI_TO_USER
187 b . /* prevent speculative execution */
188
189 .Lsyscall_restore_regs:
190 ld r3,_CTR(r1)
191 ld r4,_XER(r1)
192 REST_NVGPRS(r1)
193 mtctr r3
194 mtspr SPRN_XER,r4
195 ld r0,GPR0(r1)
196 REST_8GPRS(4, r1)
197 ld r12,GPR12(r1)
198 b .Lsyscall_restore_regs_cont
199
200 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
201 .Ltabort_syscall:
202 /* Firstly we need to enable TM in the kernel */
203 mfmsr r10
204 li r9, 1
205 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
206 mtmsrd r10, 0
207
208 /* tabort, this dooms the transaction, nothing else */
209 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
210 TABORT(R9)
211
212 /*
213 * Return directly to userspace. We have corrupted user register state,
214 * but userspace will never see that register state. Execution will
215 * resume after the tbegin of the aborted transaction with the
216 * checkpointed register state.
217 */
218 li r9, MSR_RI
219 andc r10, r10, r9
220 mtmsrd r10, 1
221 mtspr SPRN_SRR0, r11
222 mtspr SPRN_SRR1, r12
223 RFI_TO_USER
224 b . /* prevent speculative execution */
225 #endif
226
227 _GLOBAL(ret_from_fork)
228 bl schedule_tail
229 REST_NVGPRS(r1)
230 li r3,0
231 b .Lsyscall_exit
232
233 _GLOBAL(ret_from_kernel_thread)
234 bl schedule_tail
235 REST_NVGPRS(r1)
236 mtlr r14
237 mr r3,r15
238 #ifdef PPC64_ELF_ABI_v2
239 mr r12,r14
240 #endif
241 blrl
242 li r3,0
243 b .Lsyscall_exit
244
245 #ifdef CONFIG_PPC_BOOK3E
246 /* Save non-volatile GPRs, if not already saved. */
247 _GLOBAL(save_nvgprs)
248 ld r11,_TRAP(r1)
249 andi. r0,r11,1
250 beqlr-
251 SAVE_NVGPRS(r1)
252 clrrdi r0,r11,1
253 std r0,_TRAP(r1)
254 blr
255 _ASM_NOKPROBE_SYMBOL(save_nvgprs);
256 #endif
257
258 #ifdef CONFIG_PPC_BOOK3S_64
259
260 #define FLUSH_COUNT_CACHE \
261 1: nop; \
262 patch_site 1b, patch__call_flush_count_cache
263
264
265 #define BCCTR_FLUSH .long 0x4c400420
266
267 .macro nops number
268 .rept \number
269 nop
270 .endr
271 .endm
272
273 .balign 32
274 .global flush_count_cache
275 flush_count_cache:
276 /* Save LR into r9 */
277 mflr r9
278
279 // Flush the link stack
280 .rept 64
281 bl .+4
282 .endr
283 b 1f
284 nops 6
285
286 .balign 32
287 /* Restore LR */
288 1: mtlr r9
289
290 // If we're just flushing the link stack, return here
291 3: nop
292 patch_site 3b patch__flush_link_stack_return
293
294 li r9,0x7fff
295 mtctr r9
296
297 BCCTR_FLUSH
298
299 2: nop
300 patch_site 2b patch__flush_count_cache_return
301
302 nops 3
303
304 .rept 278
305 .balign 32
306 BCCTR_FLUSH
307 nops 7
308 .endr
309
310 blr
311 #else
312 #define FLUSH_COUNT_CACHE
313 #endif /* CONFIG_PPC_BOOK3S_64 */
314
315 /*
316 * This routine switches between two different tasks. The process
317 * state of one is saved on its kernel stack. Then the state
318 * of the other is restored from its kernel stack. The memory
319 * management hardware is updated to the second process's state.
320 * Finally, we can return to the second process, via interrupt_return.
321 * On entry, r3 points to the THREAD for the current task, r4
322 * points to the THREAD for the new task.
323 *
324 * Note: there are two ways to get to the "going out" portion
325 * of this code; either by coming in via the entry (_switch)
326 * or via "fork" which must set up an environment equivalent
327 * to the "_switch" path. If you change this you'll have to change
328 * the fork code also.
329 *
330 * The code which creates the new task context is in 'copy_thread'
331 * in arch/powerpc/kernel/process.c
332 */
333 .align 7
334 _GLOBAL(_switch)
335 mflr r0
336 std r0,16(r1)
337 stdu r1,-SWITCH_FRAME_SIZE(r1)
338 /* r3-r13 are caller saved -- Cort */
339 SAVE_NVGPRS(r1)
340 std r0,_NIP(r1) /* Return to switch caller */
341 mfcr r23
342 std r23,_CCR(r1)
343 std r1,KSP(r3) /* Set old stack pointer */
344
345 kuap_check_amr r9, r10
346
347 FLUSH_COUNT_CACHE
348
349 /*
350 * On SMP kernels, care must be taken because a task may be
351 * scheduled off CPUx and on to CPUy. Memory ordering must be
352 * considered.
353 *
354 * Cacheable stores on CPUx will be visible when the task is
355 * scheduled on CPUy by virtue of the core scheduler barriers
356 * (see "Notes on Program-Order guarantees on SMP systems." in
357 * kernel/sched/core.c).
358 *
359 * Uncacheable stores in the case of involuntary preemption must
360 * be taken care of. The smp_mb__before_spin_lock() in __schedule()
361 * is implemented as hwsync on powerpc, which orders MMIO too. So
362 * long as there is an hwsync in the context switch path, it will
363 * be executed on the source CPU after the task has performed
364 * all MMIO ops on that CPU, and on the destination CPU before the
365 * task performs any MMIO ops there.
366 */
367
368 /*
369 * The kernel context switch path must contain a spin_lock,
370 * which contains larx/stcx, which will clear any reservation
371 * of the task being switched.
372 */
373 #ifdef CONFIG_PPC_BOOK3S
374 /* Cancel all explict user streams as they will have no use after context
375 * switch and will stop the HW from creating streams itself
376 */
377 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
378 #endif
379
380 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
381 std r6,PACACURRENT(r13) /* Set new 'current' */
382 #if defined(CONFIG_STACKPROTECTOR)
383 ld r6, TASK_CANARY(r6)
384 std r6, PACA_CANARY(r13)
385 #endif
386
387 ld r8,KSP(r4) /* new stack pointer */
388 #ifdef CONFIG_PPC_BOOK3S_64
389 BEGIN_MMU_FTR_SECTION
390 b 2f
391 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
392 BEGIN_FTR_SECTION
393 clrrdi r6,r8,28 /* get its ESID */
394 clrrdi r9,r1,28 /* get current sp ESID */
395 FTR_SECTION_ELSE
396 clrrdi r6,r8,40 /* get its 1T ESID */
397 clrrdi r9,r1,40 /* get current sp 1T ESID */
398 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
399 clrldi. r0,r6,2 /* is new ESID c00000000? */
400 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
401 cror eq,4*cr1+eq,eq
402 beq 2f /* if yes, don't slbie it */
403
404 /* Bolt in the new stack SLB entry */
405 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
406 oris r0,r6,(SLB_ESID_V)@h
407 ori r0,r0,(SLB_NUM_BOLTED-1)@l
408 BEGIN_FTR_SECTION
409 li r9,MMU_SEGSIZE_1T /* insert B field */
410 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
411 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
412 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
413
414 /* Update the last bolted SLB. No write barriers are needed
415 * here, provided we only update the current CPU's SLB shadow
416 * buffer.
417 */
418 ld r9,PACA_SLBSHADOWPTR(r13)
419 li r12,0
420 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
421 li r12,SLBSHADOW_STACKVSID
422 STDX_BE r7,r12,r9 /* Save VSID */
423 li r12,SLBSHADOW_STACKESID
424 STDX_BE r0,r12,r9 /* Save ESID */
425
426 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
427 * we have 1TB segments, the only CPUs known to have the errata
428 * only support less than 1TB of system memory and we'll never
429 * actually hit this code path.
430 */
431
432 isync
433 slbie r6
434 BEGIN_FTR_SECTION
435 slbie r6 /* Workaround POWER5 < DD2.1 issue */
436 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
437 slbmte r7,r0
438 isync
439 2:
440 #endif /* CONFIG_PPC_BOOK3S_64 */
441
442 clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
443 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
444 because we don't need to leave the 288-byte ABI gap at the
445 top of the kernel stack. */
446 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
447
448 /*
449 * PMU interrupts in radix may come in here. They will use r1, not
450 * PACAKSAVE, so this stack switch will not cause a problem. They
451 * will store to the process stack, which may then be migrated to
452 * another CPU. However the rq lock release on this CPU paired with
453 * the rq lock acquire on the new CPU before the stack becomes
454 * active on the new CPU, will order those stores.
455 */
456 mr r1,r8 /* start using new stack pointer */
457 std r7,PACAKSAVE(r13)
458
459 ld r6,_CCR(r1)
460 mtcrf 0xFF,r6
461
462 /* r3-r13 are destroyed -- Cort */
463 REST_NVGPRS(r1)
464
465 /* convert old thread to its task_struct for return value */
466 addi r3,r3,-THREAD
467 ld r7,_NIP(r1) /* Return to _switch caller in new task */
468 mtlr r7
469 addi r1,r1,SWITCH_FRAME_SIZE
470 blr
471
472 #ifdef CONFIG_PPC_BOOK3S
473 /*
474 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
475 * touched, AMR not set, no exit work created, then this can be used.
476 */
477 .balign IFETCH_ALIGN_BYTES
478 .globl fast_interrupt_return
479 fast_interrupt_return:
480 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
481 ld r4,_MSR(r1)
482 andi. r0,r4,MSR_PR
483 bne .Lfast_user_interrupt_return
484 andi. r0,r4,MSR_RI
485 li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
486 bne+ .Lfast_kernel_interrupt_return
487 addi r3,r1,STACK_FRAME_OVERHEAD
488 bl unrecoverable_exception
489 b . /* should not get here */
490
491 .balign IFETCH_ALIGN_BYTES
492 .globl interrupt_return
493 interrupt_return:
494 _ASM_NOKPROBE_SYMBOL(interrupt_return)
495 ld r4,_MSR(r1)
496 andi. r0,r4,MSR_PR
497 beq .Lkernel_interrupt_return
498 addi r3,r1,STACK_FRAME_OVERHEAD
499 bl interrupt_exit_user_prepare
500 cmpdi r3,0
501 bne- .Lrestore_nvgprs
502
503 .Lfast_user_interrupt_return:
504 ld r11,_NIP(r1)
505 ld r12,_MSR(r1)
506 BEGIN_FTR_SECTION
507 ld r10,_PPR(r1)
508 mtspr SPRN_PPR,r10
509 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
510 mtspr SPRN_SRR0,r11
511 mtspr SPRN_SRR1,r12
512
513 BEGIN_FTR_SECTION
514 stdcx. r0,0,r1 /* to clear the reservation */
515 FTR_SECTION_ELSE
516 ldarx r0,0,r1
517 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
518
519 ld r3,_CCR(r1)
520 ld r4,_LINK(r1)
521 ld r5,_CTR(r1)
522 ld r6,_XER(r1)
523 li r0,0
524
525 REST_4GPRS(7, r1)
526 REST_2GPRS(11, r1)
527 REST_GPR(13, r1)
528
529 mtcr r3
530 mtlr r4
531 mtctr r5
532 mtspr SPRN_XER,r6
533
534 REST_4GPRS(2, r1)
535 REST_GPR(6, r1)
536 REST_GPR(0, r1)
537 REST_GPR(1, r1)
538 RFI_TO_USER
539 b . /* prevent speculative execution */
540
541 .Lrestore_nvgprs:
542 REST_NVGPRS(r1)
543 b .Lfast_user_interrupt_return
544
545 .balign IFETCH_ALIGN_BYTES
546 .Lkernel_interrupt_return:
547 addi r3,r1,STACK_FRAME_OVERHEAD
548 bl interrupt_exit_kernel_prepare
549
550 .Lfast_kernel_interrupt_return:
551 cmpdi cr1,r3,0
552 ld r11,_NIP(r1)
553 ld r12,_MSR(r1)
554 mtspr SPRN_SRR0,r11
555 mtspr SPRN_SRR1,r12
556
557 BEGIN_FTR_SECTION
558 stdcx. r0,0,r1 /* to clear the reservation */
559 FTR_SECTION_ELSE
560 ldarx r0,0,r1
561 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
562
563 ld r3,_LINK(r1)
564 ld r4,_CTR(r1)
565 ld r5,_XER(r1)
566 ld r6,_CCR(r1)
567 li r0,0
568
569 REST_4GPRS(7, r1)
570 REST_2GPRS(11, r1)
571
572 mtlr r3
573 mtctr r4
574 mtspr SPRN_XER,r5
575
576 /*
577 * Leaving a stale exception_marker on the stack can confuse
578 * the reliable stack unwinder later on. Clear it.
579 */
580 std r0,STACK_FRAME_OVERHEAD-16(r1)
581
582 REST_4GPRS(2, r1)
583
584 bne- cr1,1f /* emulate stack store */
585 mtcr r6
586 REST_GPR(6, r1)
587 REST_GPR(0, r1)
588 REST_GPR(1, r1)
589 RFI_TO_KERNEL
590 b . /* prevent speculative execution */
591
592 1: /*
593 * Emulate stack store with update. New r1 value was already calculated
594 * and updated in our interrupt regs by emulate_loadstore, but we can't
595 * store the previous value of r1 to the stack before re-loading our
596 * registers from it, otherwise they could be clobbered. Use
597 * PACA_EXGEN as temporary storage to hold the store data, as
598 * interrupts are disabled here so it won't be clobbered.
599 */
600 mtcr r6
601 std r9,PACA_EXGEN+0(r13)
602 addi r9,r1,INT_FRAME_SIZE /* get original r1 */
603 REST_GPR(6, r1)
604 REST_GPR(0, r1)
605 REST_GPR(1, r1)
606 std r9,0(r1) /* perform store component of stdu */
607 ld r9,PACA_EXGEN+0(r13)
608
609 RFI_TO_KERNEL
610 b . /* prevent speculative execution */
611 #endif /* CONFIG_PPC_BOOK3S */
612
613 #ifdef CONFIG_PPC_RTAS
614 /*
615 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
616 * called with the MMU off.
617 *
618 * In addition, we need to be in 32b mode, at least for now.
619 *
620 * Note: r3 is an input parameter to rtas, so don't trash it...
621 */
622 _GLOBAL(enter_rtas)
623 mflr r0
624 std r0,16(r1)
625 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
626
627 /* Because RTAS is running in 32b mode, it clobbers the high order half
628 * of all registers that it saves. We therefore save those registers
629 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
630 */
631 SAVE_GPR(2, r1) /* Save the TOC */
632 SAVE_GPR(13, r1) /* Save paca */
633 SAVE_NVGPRS(r1) /* Save the non-volatiles */
634
635 mfcr r4
636 std r4,_CCR(r1)
637 mfctr r5
638 std r5,_CTR(r1)
639 mfspr r6,SPRN_XER
640 std r6,_XER(r1)
641 mfdar r7
642 std r7,_DAR(r1)
643 mfdsisr r8
644 std r8,_DSISR(r1)
645
646 /* Temporary workaround to clear CR until RTAS can be modified to
647 * ignore all bits.
648 */
649 li r0,0
650 mtcr r0
651
652 #ifdef CONFIG_BUG
653 /* There is no way it is acceptable to get here with interrupts enabled,
654 * check it with the asm equivalent of WARN_ON
655 */
656 lbz r0,PACAIRQSOFTMASK(r13)
657 1: tdeqi r0,IRQS_ENABLED
658 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
659 #endif
660
661 /* Hard-disable interrupts */
662 mfmsr r6
663 rldicl r7,r6,48,1
664 rotldi r7,r7,16
665 mtmsrd r7,1
666
667 /* Unfortunately, the stack pointer and the MSR are also clobbered,
668 * so they are saved in the PACA which allows us to restore
669 * our original state after RTAS returns.
670 */
671 std r1,PACAR1(r13)
672 std r6,PACASAVEDMSR(r13)
673
674 /* Setup our real return addr */
675 LOAD_REG_ADDR(r4,rtas_return_loc)
676 clrldi r4,r4,2 /* convert to realmode address */
677 mtlr r4
678
679 li r0,0
680 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
681 andc r0,r6,r0
682
683 li r9,1
684 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
685 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
686 andc r6,r0,r9
687
688 __enter_rtas:
689 sync /* disable interrupts so SRR0/1 */
690 mtmsrd r0 /* don't get trashed */
691
692 LOAD_REG_ADDR(r4, rtas)
693 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
694 ld r4,RTASBASE(r4) /* get the rtas->base value */
695
696 mtspr SPRN_SRR0,r5
697 mtspr SPRN_SRR1,r6
698 RFI_TO_KERNEL
699 b . /* prevent speculative execution */
700
701 rtas_return_loc:
702 FIXUP_ENDIAN
703
704 /*
705 * Clear RI and set SF before anything.
706 */
707 mfmsr r6
708 li r0,MSR_RI
709 andc r6,r6,r0
710 sldi r0,r0,(MSR_SF_LG - MSR_RI_LG)
711 or r6,r6,r0
712 sync
713 mtmsrd r6
714
715 /* relocation is off at this point */
716 GET_PACA(r4)
717 clrldi r4,r4,2 /* convert to realmode address */
718
719 bcl 20,31,$+4
720 0: mflr r3
721 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
722
723 ld r1,PACAR1(r4) /* Restore our SP */
724 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
725
726 mtspr SPRN_SRR0,r3
727 mtspr SPRN_SRR1,r4
728 RFI_TO_KERNEL
729 b . /* prevent speculative execution */
730 _ASM_NOKPROBE_SYMBOL(__enter_rtas)
731 _ASM_NOKPROBE_SYMBOL(rtas_return_loc)
732
733 .align 3
734 1: .8byte rtas_restore_regs
735
736 rtas_restore_regs:
737 /* relocation is on at this point */
738 REST_GPR(2, r1) /* Restore the TOC */
739 REST_GPR(13, r1) /* Restore paca */
740 REST_NVGPRS(r1) /* Restore the non-volatiles */
741
742 GET_PACA(r13)
743
744 ld r4,_CCR(r1)
745 mtcr r4
746 ld r5,_CTR(r1)
747 mtctr r5
748 ld r6,_XER(r1)
749 mtspr SPRN_XER,r6
750 ld r7,_DAR(r1)
751 mtdar r7
752 ld r8,_DSISR(r1)
753 mtdsisr r8
754
755 addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
756 ld r0,16(r1) /* get return address */
757
758 mtlr r0
759 blr /* return to caller */
760
761 #endif /* CONFIG_PPC_RTAS */
762
763 _GLOBAL(enter_prom)
764 mflr r0
765 std r0,16(r1)
766 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
767
768 /* Because PROM is running in 32b mode, it clobbers the high order half
769 * of all registers that it saves. We therefore save those registers
770 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
771 */
772 SAVE_GPR(2, r1)
773 SAVE_GPR(13, r1)
774 SAVE_NVGPRS(r1)
775 mfcr r10
776 mfmsr r11
777 std r10,_CCR(r1)
778 std r11,_MSR(r1)
779
780 /* Put PROM address in SRR0 */
781 mtsrr0 r4
782
783 /* Setup our trampoline return addr in LR */
784 bcl 20,31,$+4
785 0: mflr r4
786 addi r4,r4,(1f - 0b)
787 mtlr r4
788
789 /* Prepare a 32-bit mode big endian MSR
790 */
791 #ifdef CONFIG_PPC_BOOK3E
792 rlwinm r11,r11,0,1,31
793 mtsrr1 r11
794 rfi
795 #else /* CONFIG_PPC_BOOK3E */
796 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
797 andc r11,r11,r12
798 mtsrr1 r11
799 RFI_TO_KERNEL
800 #endif /* CONFIG_PPC_BOOK3E */
801
802 1: /* Return from OF */
803 FIXUP_ENDIAN
804
805 /* Just make sure that r1 top 32 bits didn't get
806 * corrupt by OF
807 */
808 rldicl r1,r1,0,32
809
810 /* Restore the MSR (back to 64 bits) */
811 ld r0,_MSR(r1)
812 MTMSRD(r0)
813 isync
814
815 /* Restore other registers */
816 REST_GPR(2, r1)
817 REST_GPR(13, r1)
818 REST_NVGPRS(r1)
819 ld r4,_CCR(r1)
820 mtcr r4
821
822 addi r1,r1,SWITCH_FRAME_SIZE
823 ld r0,16(r1)
824 mtlr r0
825 blr