]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - arch/ia64/kernel/mca_asm.S
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/kernel/linux.git] / arch / ia64 / kernel / mca_asm.S
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
fe77efb8
HS
2/*
3 * File: mca_asm.S
4 * Purpose: assembly portion of the IA64 MCA handling
5 *
6 * Mods by cfleck to integrate into kernel build
7 *
8 * 2000-03-15 David Mosberger-Tang <davidm@hpl.hp.com>
9 * Added various stop bits to get a clean compile
10 *
11 * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
12 * Added code to save INIT handoff state in pt_regs format,
13 * switch to temp kstack, switch modes, jump to C INIT handler
14 *
15 * 2002-01-04 J.Hall <jenna.s.hall@intel.com>
16 * Before entering virtual mode code:
17 * 1. Check for TLB CPU error
18 * 2. Restore current thread pointer to kr6
19 * 3. Move stack ptr 16 bytes to conform to C calling convention
20 *
21 * 2004-11-12 Russ Anderson <rja@sgi.com>
22 * Added per cpu MCA/INIT stack save areas.
23 *
24 * 2005-12-08 Keith Owens <kaos@sgi.com>
25 * Use per cpu MCA/INIT stacks for all data.
26 */
1da177e4
LT
27#include <linux/threads.h>
28
29#include <asm/asmmacro.h>
30#include <asm/pgtable.h>
31#include <asm/processor.h>
32#include <asm/mca_asm.h>
33#include <asm/mca.h>
34
7f613c7d 35#include "entry.h"
1da177e4
LT
36
37#define GET_IA64_MCA_DATA(reg) \
38 GET_THIS_PADDR(reg, ia64_mca_data) \
39 ;; \
40 ld8 reg=[reg]
41
b8d8b883 42 .global ia64_do_tlb_purge
7f613c7d 43 .global ia64_os_mca_dispatch
07a6a4ae 44 .global ia64_os_init_on_kdump
7f613c7d
KO
45 .global ia64_os_init_dispatch_monarch
46 .global ia64_os_init_dispatch_slave
1da177e4
LT
47
48 .text
49 .align 16
50
7f613c7d
KO
51//StartMain////////////////////////////////////////////////////////////////////
52
b8d8b883
AR
53/*
54 * Just the TLB purge part is moved to a separate function
55 * so we can re-use the code for cpu hotplug code as well
56 * Caller should now setup b1, so we can branch once the
57 * tlb flush is complete.
58 */
1da177e4 59
b8d8b883 60ia64_do_tlb_purge:
1da177e4
LT
61#define O(member) IA64_CPUINFO_##member##_OFFSET
62
877105cc 63 GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
1da177e4
LT
64 ;;
65 addl r17=O(PTCE_STRIDE),r2
66 addl r2=O(PTCE_BASE),r2
67 ;;
68 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
69 ld4 r19=[r2],4 // r19=ptce_count[0]
70 ld4 r21=[r17],4 // r21=ptce_stride[0]
71 ;;
72 ld4 r20=[r2] // r20=ptce_count[1]
73 ld4 r22=[r17] // r22=ptce_stride[1]
74 mov r24=0
75 ;;
76 adds r20=-1,r20
77 ;;
78#undef O
79
802:
81 cmp.ltu p6,p7=r24,r19
82(p7) br.cond.dpnt.few 4f
83 mov ar.lc=r20
843:
85 ptc.e r18
86 ;;
87 add r18=r22,r18
88 br.cloop.sptk.few 3b
89 ;;
90 add r18=r21,r18
91 add r24=1,r24
92 ;;
93 br.sptk.few 2b
944:
95 srlz.i // srlz.i implies srlz.d
96 ;;
97
98 // Now purge addresses formerly mapped by TR registers
99 // 1. Purge ITR&DTR for kernel.
100 movl r16=KERNEL_START
101 mov r18=KERNEL_TR_PAGE_SHIFT<<2
102 ;;
103 ptr.i r16, r18
104 ptr.d r16, r18
105 ;;
106 srlz.i
107 ;;
108 srlz.d
109 ;;
1da177e4
LT
110 // 3. Purge ITR for PAL code.
111 GET_THIS_PADDR(r2, ia64_mca_pal_base)
112 ;;
113 ld8 r16=[r2]
114 mov r18=IA64_GRANULE_SHIFT<<2
115 ;;
116 ptr.i r16,r18
117 ;;
118 srlz.i
119 ;;
120 // 4. Purge DTR for stack.
121 mov r16=IA64_KR(CURRENT_STACK)
122 ;;
123 shl r16=r16,IA64_GRANULE_SHIFT
124 movl r19=PAGE_OFFSET
125 ;;
126 add r16=r19,r16
127 mov r18=IA64_GRANULE_SHIFT<<2
128 ;;
129 ptr.d r16,r18
130 ;;
131 srlz.i
132 ;;
b8d8b883
AR
133 // Now branch away to caller.
134 br.sptk.many b1
135 ;;
136
7f613c7d
KO
137//EndMain//////////////////////////////////////////////////////////////////////
138
139//StartMain////////////////////////////////////////////////////////////////////
b8d8b883 140
7f613c7d 141ia64_os_mca_dispatch:
7f613c7d
KO
142 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
143 LOAD_PHYSICAL(p0,r2,1f) // return address
144 mov r19=1 // All MCA events are treated as monarch (for now)
145 br.sptk ia64_state_save // save the state that is not in minstate
1461:
b8d8b883 147
7f613c7d
KO
148 GET_IA64_MCA_DATA(r2)
149 // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
150 ;;
d270acbc 151 add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+SOS(PROC_STATE_PARAM), r2
b8d8b883 152 ;;
7f613c7d 153 ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK.
b8d8b883
AR
154 ;;
155 tbit.nz p6,p7=r18,60
156(p7) br.spnt done_tlb_purge_and_reload
157
158 // The following code purges TC and TR entries. Then reload all TC entries.
159 // Purge percpu data TC entries.
160begin_tlb_purge_and_reload:
161 movl r18=ia64_reload_tr;;
162 LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
163 mov b1=r18;;
164 br.sptk.many ia64_do_tlb_purge;;
165
166ia64_reload_tr:
1da177e4
LT
167 // Finally reload the TR registers.
168 // 1. Reload DTR/ITR registers for kernel.
169 mov r18=KERNEL_TR_PAGE_SHIFT<<2
170 movl r17=KERNEL_START
171 ;;
172 mov cr.itir=r18
173 mov cr.ifa=r17
174 mov r16=IA64_TR_KERNEL
175 mov r19=ip
176 movl r18=PAGE_KERNEL
177 ;;
178 dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
179 ;;
180 or r18=r17,r18
181 ;;
182 itr.i itr[r16]=r18
183 ;;
184 itr.d dtr[r16]=r18
185 ;;
186 srlz.i
187 srlz.d
188 ;;
1da177e4
LT
189 // 3. Reload ITR for PAL code.
190 GET_THIS_PADDR(r2, ia64_mca_pal_pte)
191 ;;
192 ld8 r18=[r2] // load PAL PTE
193 ;;
194 GET_THIS_PADDR(r2, ia64_mca_pal_base)
195 ;;
196 ld8 r16=[r2] // load PAL vaddr
197 mov r19=IA64_GRANULE_SHIFT<<2
198 ;;
199 mov cr.itir=r19
200 mov cr.ifa=r16
201 mov r20=IA64_TR_PALCODE
202 ;;
203 itr.i itr[r20]=r18
204 ;;
205 srlz.i
206 ;;
207 // 4. Reload DTR for stack.
208 mov r16=IA64_KR(CURRENT_STACK)
209 ;;
210 shl r16=r16,IA64_GRANULE_SHIFT
211 movl r19=PAGE_OFFSET
212 ;;
213 add r18=r19,r16
214 movl r20=PAGE_KERNEL
215 ;;
216 add r16=r20,r16
217 mov r19=IA64_GRANULE_SHIFT<<2
218 ;;
219 mov cr.itir=r19
220 mov cr.ifa=r18
221 mov r20=IA64_TR_CURRENT_STACK
222 ;;
223 itr.d dtr[r20]=r16
96651896
XZ
224 GET_THIS_PADDR(r2, ia64_mca_tr_reload)
225 mov r18 = 1
1da177e4
LT
226 ;;
227 srlz.d
96651896
XZ
228 ;;
229 st8 [r2] =r18
230 ;;
1da177e4
LT
231
232done_tlb_purge_and_reload:
233
7f613c7d
KO
234 // switch to per cpu MCA stack
235 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
236 LOAD_PHYSICAL(p0,r2,1f) // return address
237 br.sptk ia64_new_stack
2381:
239
240 // everything saved, now we can set the kernel registers
241 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
242 LOAD_PHYSICAL(p0,r2,1f) // return address
243 br.sptk ia64_set_kernel_registers
2441:
1da177e4 245
7f613c7d 246 // This must be done in physical mode
1da177e4
LT
247 GET_IA64_MCA_DATA(r2)
248 ;;
7f613c7d 249 mov r7=r2
1da177e4
LT
250
251 // Enter virtual mode from physical mode
252 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
7f613c7d
KO
253
254 // This code returns to SAL via SOS r2, in general SAL has no unwind
255 // data. To get a clean termination when backtracing the C MCA/INIT
256 // handler, set a dummy return address of 0 in this routine. That
257 // requires that ia64_os_mca_virtual_begin be a global function.
258ENTRY(ia64_os_mca_virtual_begin)
259 .prologue
260 .save rp,r0
261 .body
262
263 mov ar.rsc=3 // set eager mode for C handler
264 mov r2=r7 // see GET_IA64_MCA_DATA above
265 ;;
1da177e4
LT
266
267 // Call virtual mode handler
7f613c7d
KO
268 alloc r14=ar.pfs,0,0,3,0
269 ;;
270 DATA_PA_TO_VA(r2,r7)
271 ;;
272 add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
273 add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
274 add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2
275 br.call.sptk.many b0=ia64_mca_handler
276
1da177e4
LT
277 // Revert back to physical mode before going back to SAL
278 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
279ia64_os_mca_virtual_end:
280
7f613c7d
KO
281END(ia64_os_mca_virtual_begin)
282
283 // switch back to previous stack
284 alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame
285 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
286 LOAD_PHYSICAL(p0,r2,1f) // return address
287 br.sptk ia64_old_stack
2881:
289
290 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
291 LOAD_PHYSICAL(p0,r2,1f) // return address
292 br.sptk ia64_state_restore // restore the SAL state
2931:
294
295 mov b0=r12 // SAL_CHECK return address
296
7f613c7d
KO
297 br b0
298
299//EndMain//////////////////////////////////////////////////////////////////////
300
301//StartMain////////////////////////////////////////////////////////////////////
302
07a6a4ae
HS
303//
304// NOP init handler for kdump. In panic situation, we may receive INIT
305// while kernel transition. Since we initialize registers on leave from
306// current kernel, no longer monarch/slave handlers of current kernel in
307// virtual mode are called safely.
308// We can unregister these init handlers from SAL, however then the INIT
309// will result in warmboot by SAL and we cannot retrieve the crashdump.
310// Therefore register this NOP function to SAL, to prevent entering virtual
311// mode and resulting warmboot by SAL.
312//
313ia64_os_init_on_kdump:
314 mov r8=r0 // IA64_INIT_RESUME
315 mov r9=r10 // SAL_GP
316 mov r22=r17 // *minstate
317 ;;
318 mov r10=r0 // return to same context
319 mov b0=r12 // SAL_CHECK return address
320 br b0
321
7f613c7d
KO
322//
323// SAL to OS entry point for INIT on all processors. This has been defined for
324// registration purposes with SAL as a part of ia64_mca_init. Monarch and
325// slave INIT have identical processing, except for the value of the
326// sos->monarch flag in r19.
327//
328
329ia64_os_init_dispatch_monarch:
330 mov r19=1 // Bow, bow, ye lower middle classes!
331 br.sptk ia64_os_init_dispatch
332
333ia64_os_init_dispatch_slave:
334 mov r19=0 // <igor>yeth, mathter</igor>
335
336ia64_os_init_dispatch:
337
338 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
339 LOAD_PHYSICAL(p0,r2,1f) // return address
340 br.sptk ia64_state_save // save the state that is not in minstate
3411:
342
343 // switch to per cpu INIT stack
344 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
345 LOAD_PHYSICAL(p0,r2,1f) // return address
346 br.sptk ia64_new_stack
3471:
348
349 // everything saved, now we can set the kernel registers
350 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
351 LOAD_PHYSICAL(p0,r2,1f) // return address
352 br.sptk ia64_set_kernel_registers
3531:
354
355 // This must be done in physical mode
1da177e4
LT
356 GET_IA64_MCA_DATA(r2)
357 ;;
7f613c7d
KO
358 mov r7=r2
359
360 // Enter virtual mode from physical mode
361 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4)
362
363 // This code returns to SAL via SOS r2, in general SAL has no unwind
364 // data. To get a clean termination when backtracing the C MCA/INIT
365 // handler, set a dummy return address of 0 in this routine. That
366 // requires that ia64_os_init_virtual_begin be a global function.
367ENTRY(ia64_os_init_virtual_begin)
368 .prologue
369 .save rp,r0
370 .body
371
372 mov ar.rsc=3 // set eager mode for C handler
373 mov r2=r7 // see GET_IA64_MCA_DATA above
1da177e4 374 ;;
1da177e4 375
7f613c7d
KO
376 // Call virtual mode handler
377 alloc r14=ar.pfs,0,0,3,0
378 ;;
379 DATA_PA_TO_VA(r2,r7)
1da177e4 380 ;;
7f613c7d
KO
381 add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
382 add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
383 add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2
384 br.call.sptk.many b0=ia64_init_handler
1da177e4 385
7f613c7d
KO
386 // Revert back to physical mode before going back to SAL
387 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4)
388ia64_os_init_virtual_end:
1da177e4 389
7f613c7d
KO
390END(ia64_os_init_virtual_begin)
391
392 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
393 LOAD_PHYSICAL(p0,r2,1f) // return address
394 br.sptk ia64_state_restore // restore the SAL state
3951:
1da177e4 396
7f613c7d
KO
397 // switch back to previous stack
398 alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame
399 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
400 LOAD_PHYSICAL(p0,r2,1f) // return address
401 br.sptk ia64_old_stack
4021:
403
404 mov b0=r12 // SAL_CHECK return address
1da177e4 405 br b0
7f613c7d 406
1da177e4
LT
407//EndMain//////////////////////////////////////////////////////////////////////
408
7f613c7d
KO
409// common defines for the stubs
410#define ms r4
411#define regs r5
412#define temp1 r2 /* careful, it overlaps with input registers */
413#define temp2 r3 /* careful, it overlaps with input registers */
414#define temp3 r7
415#define temp4 r14
416
1da177e4
LT
417
418//++
419// Name:
7f613c7d 420// ia64_state_save()
1da177e4
LT
421//
422// Stub Description:
423//
7f613c7d
KO
424// Save the state that is not in minstate. This is sensitive to the layout of
425// struct ia64_sal_os_state in mca.h.
426//
427// r2 contains the return address, r3 contains either
428// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
429//
430// The OS to SAL section of struct ia64_sal_os_state is set to a default
431// value of cold boot (MCA) or warm boot (INIT) and return to the same
432// context. ia64_sal_os_state is also used to hold some registers that
433// need to be saved and restored across the stack switches.
434//
435// Most input registers to this stub come from PAL/SAL
436// r1 os gp, physical
437// r8 pal_proc entry point
438// r9 sal_proc entry point
439// r10 sal gp
440// r11 MCA - rendevzous state, INIT - reason code
441// r12 sal return address
442// r17 pal min_state
443// r18 processor state parameter
444// r19 monarch flag, set by the caller of this routine
445//
446// In addition to the SAL to OS state, this routine saves all the
447// registers that appear in struct pt_regs and struct switch_stack,
448// excluding those that are already in the PAL minstate area. This
449// results in a partial pt_regs and switch_stack, the C code copies the
450// remaining registers from PAL minstate to pt_regs and switch_stack. The
451// resulting structures contain all the state of the original process when
452// MCA/INIT occurred.
1da177e4
LT
453//
454//--
455
7f613c7d
KO
456ia64_state_save:
457 add regs=MCA_SOS_OFFSET, r3
458 add ms=MCA_SOS_OFFSET+8, r3
459 mov b0=r2 // save return address
460 cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3
461 ;;
462 GET_IA64_MCA_DATA(temp2)
463 ;;
464 add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack
465 add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack
466 ;;
467 mov regs=temp1 // save the start of sos
468 st8 [temp1]=r1,16 // os_gp
469 st8 [temp2]=r8,16 // pal_proc
470 ;;
471 st8 [temp1]=r9,16 // sal_proc
472 st8 [temp2]=r11,16 // rv_rc
473 mov r11=cr.iipa
474 ;;
d270acbc
KO
475 st8 [temp1]=r18 // proc_state_param
476 st8 [temp2]=r19 // monarch
7f613c7d 477 mov r6=IA64_KR(CURRENT)
d270acbc
KO
478 add temp1=SOS(SAL_RA), regs
479 add temp2=SOS(SAL_GP), regs
7f613c7d
KO
480 ;;
481 st8 [temp1]=r12,16 // sal_ra
482 st8 [temp2]=r10,16 // sal_gp
483 mov r12=cr.isr
484 ;;
485 st8 [temp1]=r17,16 // pal_min_state
486 st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT
20bb8685
KO
487 mov r6=IA64_KR(CURRENT_STACK)
488 ;;
489 st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK
490 st8 [temp2]=r0,16 // prev_task, starts off as NULL
7f613c7d
KO
491 mov r6=cr.ifa
492 ;;
20bb8685
KO
493 st8 [temp1]=r12,16 // cr.isr
494 st8 [temp2]=r6,16 // cr.ifa
7f613c7d
KO
495 mov r12=cr.itir
496 ;;
20bb8685
KO
497 st8 [temp1]=r12,16 // cr.itir
498 st8 [temp2]=r11,16 // cr.iipa
7f613c7d
KO
499 mov r12=cr.iim
500 ;;
d270acbc 501 st8 [temp1]=r12 // cr.iim
7f613c7d
KO
502(p1) mov r12=IA64_MCA_COLD_BOOT
503(p2) mov r12=IA64_INIT_WARM_BOOT
20bb8685 504 mov r6=cr.iha
d270acbc 505 add temp1=SOS(OS_STATUS), regs
7f613c7d 506 ;;
d270acbc
KO
507 st8 [temp2]=r6 // cr.iha
508 add temp2=SOS(CONTEXT), regs
20bb8685 509 st8 [temp1]=r12 // os_status, default is cold boot
7f613c7d
KO
510 mov r6=IA64_MCA_SAME_CONTEXT
511 ;;
2a792058 512 st8 [temp2]=r6 // context, default is same context
7f613c7d
KO
513
514 // Save the pt_regs data that is not in minstate. The previous code
515 // left regs at sos.
516 add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs
517 ;;
518 add temp1=PT(B6), regs
519 mov temp3=b6
520 mov temp4=b7
521 add temp2=PT(B7), regs
522 ;;
523 st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6
524 st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7
525 mov temp3=ar.csd
526 mov temp4=ar.ssd
527 cover // must be last in group
1da177e4 528 ;;
7f613c7d
KO
529 st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd
530 st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd
531 mov temp3=ar.unat
532 mov temp4=ar.pfs
533 ;;
534 st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat
535 st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs
536 mov temp3=ar.rnat
537 mov temp4=ar.bspstore
538 ;;
539 st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat
540 st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore
541 mov temp3=ar.bsp
542 ;;
543 sub temp3=temp3, temp4 // ar.bsp - ar.bspstore
544 mov temp4=ar.fpsr
545 ;;
546 shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs"
547 ;;
548 st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs
549 st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr
550 mov temp3=ar.ccv
551 ;;
552 st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv
553 stf.spill [temp2]=f6,PT(F8)-PT(F6)
554 ;;
555 stf.spill [temp1]=f7,PT(F9)-PT(F7)
556 stf.spill [temp2]=f8,PT(F10)-PT(F8)
557 ;;
558 stf.spill [temp1]=f9,PT(F11)-PT(F9)
559 stf.spill [temp2]=f10
560 ;;
561 stf.spill [temp1]=f11
562
563 // Save the switch_stack data that is not in minstate nor pt_regs. The
564 // previous code left regs at pt_regs.
565 add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs
566 ;;
567 add temp1=SW(F2), regs
568 add temp2=SW(F3), regs
569 ;;
570 stf.spill [temp1]=f2,32
571 stf.spill [temp2]=f3,32
572 ;;
573 stf.spill [temp1]=f4,32
574 stf.spill [temp2]=f5,32
575 ;;
576 stf.spill [temp1]=f12,32
577 stf.spill [temp2]=f13,32
578 ;;
579 stf.spill [temp1]=f14,32
580 stf.spill [temp2]=f15,32
581 ;;
582 stf.spill [temp1]=f16,32
583 stf.spill [temp2]=f17,32
584 ;;
585 stf.spill [temp1]=f18,32
586 stf.spill [temp2]=f19,32
587 ;;
588 stf.spill [temp1]=f20,32
589 stf.spill [temp2]=f21,32
590 ;;
591 stf.spill [temp1]=f22,32
592 stf.spill [temp2]=f23,32
593 ;;
594 stf.spill [temp1]=f24,32
595 stf.spill [temp2]=f25,32
596 ;;
597 stf.spill [temp1]=f26,32
598 stf.spill [temp2]=f27,32
599 ;;
600 stf.spill [temp1]=f28,32
601 stf.spill [temp2]=f29,32
602 ;;
603 stf.spill [temp1]=f30,SW(B2)-SW(F30)
604 stf.spill [temp2]=f31,SW(B3)-SW(F31)
605 mov temp3=b2
606 mov temp4=b3
607 ;;
608 st8 [temp1]=temp3,16 // save b2
609 st8 [temp2]=temp4,16 // save b3
610 mov temp3=b4
611 mov temp4=b5
612 ;;
613 st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4
614 st8 [temp2]=temp4 // save b5
615 mov temp3=ar.lc
616 ;;
617 st8 [temp1]=temp3 // save ar.lc
618
619 // FIXME: Some proms are incorrectly accessing the minstate area as
620 // cached data. The C code uses region 6, uncached virtual. Ensure
621 // that there is no cache data lying around for the first 1K of the
622 // minstate area.
623 // Remove this code in September 2006, that gives platforms a year to
624 // fix their proms and get their customers updated.
625
626 add r1=32*1,r17
627 add r2=32*2,r17
628 add r3=32*3,r17
629 add r4=32*4,r17
630 add r5=32*5,r17
631 add r6=32*6,r17
632 add r7=32*7,r17
633 ;;
634 fc r17
635 fc r1
636 fc r2
637 fc r3
638 fc r4
639 fc r5
640 fc r6
641 fc r7
642 add r17=32*8,r17
643 add r1=32*8,r1
644 add r2=32*8,r2
645 add r3=32*8,r3
646 add r4=32*8,r4
647 add r5=32*8,r5
648 add r6=32*8,r6
649 add r7=32*8,r7
650 ;;
651 fc r17
652 fc r1
653 fc r2
654 fc r3
655 fc r4
656 fc r5
657 fc r6
658 fc r7
659 add r17=32*8,r17
660 add r1=32*8,r1
661 add r2=32*8,r2
662 add r3=32*8,r3
663 add r4=32*8,r4
664 add r5=32*8,r5
665 add r6=32*8,r6
666 add r7=32*8,r7
667 ;;
668 fc r17
669 fc r1
670 fc r2
671 fc r3
672 fc r4
673 fc r5
674 fc r6
675 fc r7
676 add r17=32*8,r17
677 add r1=32*8,r1
678 add r2=32*8,r2
679 add r3=32*8,r3
680 add r4=32*8,r4
681 add r5=32*8,r5
682 add r6=32*8,r6
683 add r7=32*8,r7
684 ;;
685 fc r17
686 fc r1
687 fc r2
688 fc r3
689 fc r4
690 fc r5
691 fc r6
692 fc r7
693
694 br.sptk b0
1da177e4
LT
695
696//EndStub//////////////////////////////////////////////////////////////////////
697
698
699//++
700// Name:
7f613c7d 701// ia64_state_restore()
1da177e4
LT
702//
703// Stub Description:
704//
7f613c7d
KO
705// Restore the SAL/OS state. This is sensitive to the layout of struct
706// ia64_sal_os_state in mca.h.
707//
708// r2 contains the return address, r3 contains either
709// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
710//
711// In addition to the SAL to OS state, this routine restores all the
712// registers that appear in struct pt_regs and struct switch_stack,
713// excluding those in the PAL minstate area.
1da177e4
LT
714//
715//--
716
7f613c7d
KO
717ia64_state_restore:
718 // Restore the switch_stack data that is not in minstate nor pt_regs.
719 add regs=MCA_SWITCH_STACK_OFFSET, r3
720 mov b0=r2 // save return address
721 ;;
722 GET_IA64_MCA_DATA(temp2)
723 ;;
724 add regs=temp2, regs
725 ;;
726 add temp1=SW(F2), regs
727 add temp2=SW(F3), regs
728 ;;
729 ldf.fill f2=[temp1],32
730 ldf.fill f3=[temp2],32
731 ;;
732 ldf.fill f4=[temp1],32
733 ldf.fill f5=[temp2],32
734 ;;
735 ldf.fill f12=[temp1],32
736 ldf.fill f13=[temp2],32
737 ;;
738 ldf.fill f14=[temp1],32
739 ldf.fill f15=[temp2],32
740 ;;
741 ldf.fill f16=[temp1],32
742 ldf.fill f17=[temp2],32
743 ;;
744 ldf.fill f18=[temp1],32
745 ldf.fill f19=[temp2],32
746 ;;
747 ldf.fill f20=[temp1],32
748 ldf.fill f21=[temp2],32
749 ;;
750 ldf.fill f22=[temp1],32
751 ldf.fill f23=[temp2],32
752 ;;
753 ldf.fill f24=[temp1],32
754 ldf.fill f25=[temp2],32
755 ;;
756 ldf.fill f26=[temp1],32
757 ldf.fill f27=[temp2],32
758 ;;
759 ldf.fill f28=[temp1],32
760 ldf.fill f29=[temp2],32
761 ;;
762 ldf.fill f30=[temp1],SW(B2)-SW(F30)
763 ldf.fill f31=[temp2],SW(B3)-SW(F31)
764 ;;
765 ld8 temp3=[temp1],16 // restore b2
766 ld8 temp4=[temp2],16 // restore b3
767 ;;
768 mov b2=temp3
769 mov b3=temp4
770 ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4
771 ld8 temp4=[temp2] // restore b5
772 ;;
773 mov b4=temp3
774 mov b5=temp4
775 ld8 temp3=[temp1] // restore ar.lc
776 ;;
777 mov ar.lc=temp3
1da177e4 778
7f613c7d
KO
779 // Restore the pt_regs data that is not in minstate. The previous code
780 // left regs at switch_stack.
781 add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs
782 ;;
783 add temp1=PT(B6), regs
784 add temp2=PT(B7), regs
785 ;;
786 ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6
787 ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7
788 ;;
789 mov b6=temp3
790 mov b7=temp4
791 ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd
792 ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd
793 ;;
794 mov ar.csd=temp3
795 mov ar.ssd=temp4
796 ld8 temp3=[temp1] // restore ar.unat
797 add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1
798 ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs
799 ;;
800 mov ar.unat=temp3
801 mov ar.pfs=temp4
802 // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack.
803 ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv
804 ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr
805 ;;
806 mov ar.ccv=temp3
807 mov ar.fpsr=temp4
808 ldf.fill f6=[temp1],PT(F8)-PT(F6)
809 ldf.fill f7=[temp2],PT(F9)-PT(F7)
810 ;;
811 ldf.fill f8=[temp1],PT(F10)-PT(F8)
812 ldf.fill f9=[temp2],PT(F11)-PT(F9)
813 ;;
814 ldf.fill f10=[temp1]
815 ldf.fill f11=[temp2]
816
817 // Restore the SAL to OS state. The previous code left regs at pt_regs.
818 add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs
1da177e4 819 ;;
d270acbc
KO
820 add temp1=SOS(SAL_RA), regs
821 add temp2=SOS(SAL_GP), regs
7f613c7d
KO
822 ;;
823 ld8 r12=[temp1],16 // sal_ra
824 ld8 r9=[temp2],16 // sal_gp
825 ;;
20bb8685 826 ld8 r22=[temp1],16 // pal_min_state, virtual
8cab7ccc 827 ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT
7f613c7d 828 ;;
20bb8685
KO
829 ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK
830 ld8 r20=[temp2],16 // prev_task
831 ;;
7f613c7d
KO
832 ld8 temp3=[temp1],16 // cr.isr
833 ld8 temp4=[temp2],16 // cr.ifa
834 ;;
835 mov cr.isr=temp3
836 mov cr.ifa=temp4
837 ld8 temp3=[temp1],16 // cr.itir
838 ld8 temp4=[temp2],16 // cr.iipa
839 ;;
840 mov cr.itir=temp3
841 mov cr.iipa=temp4
d270acbc
KO
842 ld8 temp3=[temp1] // cr.iim
843 ld8 temp4=[temp2] // cr.iha
844 add temp1=SOS(OS_STATUS), regs
845 add temp2=SOS(CONTEXT), regs
7f613c7d
KO
846 ;;
847 mov cr.iim=temp3
848 mov cr.iha=temp4
8a4b7b6f 849 dep r22=0,r22,62,1 // pal_min_state, physical, uncached
8cab7ccc 850 mov IA64_KR(CURRENT)=r13
7f613c7d
KO
851 ld8 r8=[temp1] // os_status
852 ld8 r10=[temp2] // context
853
20bb8685
KO
854 /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To
855 * avoid any dependencies on the algorithm in ia64_switch_to(), just
856 * purge any existing CURRENT_STACK mapping and insert the new one.
857 *
8cab7ccc 858 * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains
20bb8685
KO
859 * prev_IA64_KR_CURRENT, these values may have been changed by the C
860 * code. Do not use r8, r9, r10, r22, they contain values ready for
861 * the return to SAL.
862 */
863
864 mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
865 ;;
866 shl r15=r15,IA64_GRANULE_SHIFT
867 ;;
868 dep r15=-1,r15,61,3 // virtual granule
869 mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
870 ;;
871 ptr.d r15,r18
872 ;;
873 srlz.d
874
8cab7ccc 875 extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT
20bb8685
KO
876 shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK
877 movl r21=PAGE_KERNEL // page properties
878 ;;
879 mov IA64_KR(CURRENT_STACK)=r16
880 cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region?
881 or r21=r20,r21 // construct PA | page properties
882(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
883 ;;
884 mov cr.itir=r18
8cab7ccc 885 mov cr.ifa=r13
20bb8685
KO
886 mov r20=IA64_TR_CURRENT_STACK
887 ;;
888 itr.d dtr[r20]=r21
889 ;;
890 srlz.d
8911:
892
7f613c7d 893 br.sptk b0
1da177e4
LT
894
895//EndStub//////////////////////////////////////////////////////////////////////
896
897
7f613c7d
KO
898//++
899// Name:
900// ia64_new_stack()
1da177e4 901//
7f613c7d 902// Stub Description:
1da177e4 903//
7f613c7d 904// Switch to the MCA/INIT stack.
1da177e4 905//
7f613c7d
KO
906// r2 contains the return address, r3 contains either
907// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
1da177e4 908//
7f613c7d
KO
909// On entry RBS is still on the original stack, this routine switches RBS
910// to use the MCA/INIT stack.
1da177e4 911//
7f613c7d
KO
912// On entry, sos->pal_min_state is physical, on exit it is virtual.
913//
914//--
1da177e4 915
7f613c7d
KO
916ia64_new_stack:
917 add regs=MCA_PT_REGS_OFFSET, r3
d270acbc 918 add temp2=MCA_SOS_OFFSET+SOS(PAL_MIN_STATE), r3
7f613c7d
KO
919 mov b0=r2 // save return address
920 GET_IA64_MCA_DATA(temp1)
921 invala
1da177e4 922 ;;
7f613c7d
KO
923 add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack
924 add regs=regs, temp1 // struct pt_regs on MCA or INIT stack
1da177e4 925 ;;
7f613c7d
KO
926 // Address of minstate area provided by PAL is physical, uncacheable.
927 // Convert to Linux virtual address in region 6 for C code.
928 ld8 ms=[temp2] // pal_min_state, physical
1da177e4 929 ;;
7f613c7d
KO
930 dep temp1=-1,ms,62,2 // set region 6
931 mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET
932 ;;
933 st8 [temp2]=temp1 // pal_min_state, virtual
1da177e4 934
7f613c7d 935 add temp4=temp3, regs // start of bspstore on new stack
1da177e4 936 ;;
7f613c7d 937 mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack
1da177e4 938 ;;
7f613c7d
KO
939 flushrs // must be first in group
940 br.sptk b0
941
942//EndStub//////////////////////////////////////////////////////////////////////
943
944
945//++
946// Name:
947// ia64_old_stack()
948//
949// Stub Description:
950//
951// Switch to the old stack.
952//
953// r2 contains the return address, r3 contains either
954// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
955//
956// On entry, pal_min_state is virtual, on exit it is physical.
957//
958// On entry RBS is on the MCA/INIT stack, this routine switches RBS
959// back to the previous stack.
960//
961// The psr is set to all zeroes. SAL return requires either all zeroes or
962// just psr.mc set. Leaving psr.mc off allows INIT to be issued if this
963// code does not perform correctly.
964//
965// The dirty registers at the time of the event were flushed to the
966// MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers
967// before reverting to the previous bspstore.
968//--
969
970ia64_old_stack:
971 add regs=MCA_PT_REGS_OFFSET, r3
972 mov b0=r2 // save return address
973 GET_IA64_MCA_DATA(temp2)
974 LOAD_PHYSICAL(p0,temp1,1f)
1da177e4 975 ;;
7f613c7d
KO
976 mov cr.ipsr=r0
977 mov cr.ifs=r0
978 mov cr.iip=temp1
1da177e4 979 ;;
7f613c7d 980 invala
1da177e4 981 rfi
7f613c7d
KO
9821:
983
984 add regs=regs, temp2 // struct pt_regs on MCA or INIT stack
1da177e4 985 ;;
7f613c7d 986 add temp1=PT(LOADRS), regs
1da177e4 987 ;;
7f613c7d 988 ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs
1da177e4 989 ;;
7f613c7d
KO
990 ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore
991 mov ar.rsc=temp2
992 ;;
993 loadrs
994 ld8 temp4=[temp1] // restore ar.rnat
995 ;;
996 mov ar.bspstore=temp3 // back to old stack
997 ;;
998 mov ar.rnat=temp4
999 ;;
1000
1001 br.sptk b0
1da177e4 1002
7f613c7d 1003//EndStub//////////////////////////////////////////////////////////////////////
1da177e4 1004
1da177e4 1005
7f613c7d
KO
1006//++
1007// Name:
1008// ia64_set_kernel_registers()
1da177e4 1009//
7f613c7d
KO
1010// Stub Description:
1011//
1012// Set the registers that are required by the C code in order to run on an
1013// MCA/INIT stack.
1014//
1015// r2 contains the return address, r3 contains either
1016// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
1da177e4 1017//
7f613c7d
KO
1018//--
1019
1020ia64_set_kernel_registers:
1021 add temp3=MCA_SP_OFFSET, r3
7f613c7d
KO
1022 mov b0=r2 // save return address
1023 GET_IA64_MCA_DATA(temp1)
1024 ;;
7f613c7d
KO
1025 add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
1026 add r13=temp1, r3 // set current to start of MCA/INIT stack
20bb8685 1027 add r20=temp1, r3 // physical start of MCA/INIT stack
7f613c7d 1028 ;;
7f613c7d
KO
1029 DATA_PA_TO_VA(r12,temp2)
1030 DATA_PA_TO_VA(r13,temp3)
1031 ;;
1032 mov IA64_KR(CURRENT)=r13
1033
20bb8685
KO
1034 /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid
1035 * any dependencies on the algorithm in ia64_switch_to(), just purge
1036 * any existing CURRENT_STACK mapping and insert the new one.
1037 */
1038
1039 mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
1040 ;;
1041 shl r16=r16,IA64_GRANULE_SHIFT
1042 ;;
1043 dep r16=-1,r16,61,3 // virtual granule
1044 mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
1045 ;;
1046 ptr.d r16,r18
1047 ;;
1048 srlz.d
1049
1050 shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack
1051 movl r21=PAGE_KERNEL // page properties
1052 ;;
1053 mov IA64_KR(CURRENT_STACK)=r16
1054 or r21=r20,r21 // construct PA | page properties
1055 ;;
1056 mov cr.itir=r18
1057 mov cr.ifa=r13
1058 mov r20=IA64_TR_CURRENT_STACK
8f9e1467
RA
1059
1060 movl r17=FPSR_DEFAULT
1061 ;;
1062 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
20bb8685
KO
1063 ;;
1064 itr.d dtr[r20]=r21
1065 ;;
1066 srlz.d
7f613c7d
KO
1067
1068 br.sptk b0
1069
1070//EndStub//////////////////////////////////////////////////////////////////////
1071
1072#undef ms
1073#undef regs
1074#undef temp1
1075#undef temp2
1076#undef temp3
1077#undef temp4
1078
1da177e4 1079
7f613c7d
KO
1080// Support function for mca.c, it is here to avoid using inline asm. Given the
1081// address of an rnat slot, if that address is below the current ar.bspstore
1082// then return the contents of that slot, otherwise return the contents of
1083// ar.rnat.
1084GLOBAL_ENTRY(ia64_get_rnat)
1085 alloc r14=ar.pfs,1,0,0,0
1086 mov ar.rsc=0
1087 ;;
1088 mov r14=ar.bspstore
1089 ;;
1090 cmp.lt p6,p7=in0,r14
1091 ;;
1092(p6) ld8 r8=[in0]
1093(p7) mov r8=ar.rnat
1094 mov ar.rsc=3
1095 br.ret.sptk.many rp
1096END(ia64_get_rnat)
4295ab34
HS
1097
1098
1099// void ia64_set_psr_mc(void)
1100//
1101// Set psr.mc bit to mask MCA/INIT.
1102GLOBAL_ENTRY(ia64_set_psr_mc)
1103 rsm psr.i | psr.ic // disable interrupts
1104 ;;
1105 srlz.d
1106 ;;
1107 mov r14 = psr // get psr{36:35,31:0}
1108 movl r15 = 1f
1109 ;;
1110 dep r14 = -1, r14, PSR_MC, 1 // set psr.mc
1111 ;;
1112 dep r14 = -1, r14, PSR_IC, 1 // set psr.ic
1113 ;;
1114 dep r14 = -1, r14, PSR_BN, 1 // keep bank1 in use
1115 ;;
1116 mov cr.ipsr = r14
1117 mov cr.ifs = r0
1118 mov cr.iip = r15
1119 ;;
1120 rfi
11211:
1122 br.ret.sptk.many rp
1123END(ia64_set_psr_mc)