1 /* Copyright (C) 1999-2019 Free Software Foundation, Inc.
2 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>.
18 Note that __sigsetjmp() did NOT flush the register stack. Instead,
19 we do it here since __longjmp() is usually much less frequently
20 invoked than __sigsetjmp(). The only difficulty is that __sigsetjmp()
21 didn't (and wouldn't be able to) save ar.rnat either. This is a problem
22 because if we're not careful, we could end up loading random NaT bits.
25 (i) ar.bsp < ia64_rse_rnat_addr(jmpbuf.ar_bsp)
26 ar.rnat contains the desired bits---preserve ar.rnat
27 across loadrs and write to ar.bspstore
29 (ii) ar.bsp >= ia64_rse_rnat_addr(jmpbuf.ar_bsp)
30 The desired ar.rnat is stored in
31 ia64_rse_rnat_addr(jmpbuf.ar_bsp). Load those
32 bits into ar.rnat after setting ar.bspstore. */
37 # define pPos p6 /* is rotate count positive? */
38 # define pNeg p7 /* is rotate count negative? */
41 /* __longjmp(__jmp_buf buf, int val) */
45 alloc r8=ar.pfs,2,1,3,0
48 alloc r8=ar.pfs,2,0,0,0
51 add r2=0x98,in0 // r2 <- &jmpbuf.orig_jmp_buf_addr
53 ld8 r8=[r2],-16 // r8 <- orig_jmp_buf_addr
55 and r11=~0x3,r27 // clear ar.rsc.mode
57 flushrs // flush dirty regs to backing store (must be first in insn grp)
58 ld8 r23=[r2],8 // r23 <- jmpbuf.ar_bsp
59 sub r8=r8,in0 // r8 <- &orig_jmpbuf - &jmpbuf
61 ld8 r25=[r2] // r25 <- jmpbuf.ar_unat
62 extr.u r8=r8,3,6 // r8 <- (&orig_jmpbuf - &jmpbuf)/8 & 0x3f
64 cmp.lt pNeg,pPos=r8,r0
72 mov ar.rsc=r11 // put RSE in enforced lazy mode
74 add r3=8,in0 // r3 <- &jmpbuf.r1
77 ld8.fill.nta r28=[r2],16 // r28 <- jmpbuf.sp
81 mov ar.unat=r25 // setup ar.unat (NaT bits for r1, r4-r7, and r12)
83 ld8.fill.nta gp=[r3],32 // r1 (gp)
84 dep r11=-1,r23,3,6 // r11 <- ia64_rse_rnat_addr(jmpbuf.ar_bsp)
85 mov sp=r28 // r12 (sp)
87 ld8.nta r16=[r2],16 // caller's unat
88 // ld8.nta r17=[r3],16 // fpsr
90 ld8.fill.nta r4=[r2],16 // r4
91 ld8.fill.nta r5=[r3],16 // r5 (gp)
92 cmp.geu p8,p0=r10,r11 // p8 <- (ar.bsp >= jmpbuf.ar_bsp)
94 ld8.fill.nta r6=[r2],16 // r6
95 ld8.fill.nta r7=[r3],16 // r7
97 mov ar.unat=r16 // restore caller's unat
98 // mov ar.fpsr=r17 // restore fpsr
100 ld8.nta r16=[r2],16 // b0
101 ld8.nta r17=[r3],16 // b1
103 (p8) ld8 r26=[r11] // r26 <- *ia64_rse_rnat_addr(jmpbuf.ar_bsp)
104 mov ar.bspstore=r23 // restore ar.bspstore
106 ld8.nta r18=[r2],16 // b2
107 ld8.nta r19=[r3],16 // b3
110 PTR_DEMANGLE (r16, r24)
112 ld8.nta r20=[r2],16 // b4
113 ld8.nta r21=[r3],16 // b5
115 ld8.nta r11=[r2],16 // ar.pfs
116 ld8.nta r22=[r3],56 // ar.lc
118 ld8.nta r24=[r2],32 // pr
121 ldf.fill.nta f2=[r2],32
122 ldf.fill.nta f3=[r3],32
125 ldf.fill.nta f4=[r2],32
126 ldf.fill.nta f5=[r3],32
129 ldf.fill.nta f16=[r2],32
130 ldf.fill.nta f17=[r3],32
133 ldf.fill.nta f18=[r2],32
134 ldf.fill.nta f19=[r3],32
137 ldf.fill.nta f20=[r2],32
138 ldf.fill.nta f21=[r3],32
141 ldf.fill.nta f22=[r2],32
142 ldf.fill.nta f23=[r3],32
145 ldf.fill.nta f24=[r2],32
146 ldf.fill.nta f25=[r3],32
149 ldf.fill.nta f26=[r2],32
150 ldf.fill.nta f27=[r3],32
153 ldf.fill.nta f28=[r2],32
154 ldf.fill.nta f29=[r3],32
156 ldf.fill.nta f30=[r2]
157 ldf.fill.nta f31=[r3]
160 mov ar.rnat=r26 // restore ar.rnat
162 mov ar.rsc=r27 // restore ar.rsc
165 invala // virt. -> phys. regnum mapping may change