]>
Commit | Line | Data |
---|---|---|
6d7e8eda | 1 | /* Copyright (C) 1999-2023 Free Software Foundation, Inc. |
d5efd131 MF |
2 | |
3 | The GNU C Library is free software; you can redistribute it and/or | |
4 | modify it under the terms of the GNU Lesser General Public | |
5 | License as published by the Free Software Foundation; either | |
6 | version 2.1 of the License, or (at your option) any later version. | |
7 | ||
8 | The GNU C Library is distributed in the hope that it will be useful, | |
9 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | Lesser General Public License for more details. | |
12 | ||
13 | You should have received a copy of the GNU Lesser General Public | |
75efb018 | 14 | License along with the GNU C Library; if not, see |
5a82c748 | 15 | <https://www.gnu.org/licenses/>. |
d5efd131 MF |
16 | |
17 | Note that __sigsetjmp() did NOT flush the register stack. Instead, | |
18 | we do it here since __longjmp() is usually much less frequently | |
19 | invoked than __sigsetjmp(). The only difficulty is that __sigsetjmp() | |
20 | didn't (and wouldn't be able to) save ar.rnat either. This is a problem | |
21 | because if we're not careful, we could end up loading random NaT bits. | |
22 | There are two cases: | |
23 | ||
24 | (i) ar.bsp < ia64_rse_rnat_addr(jmpbuf.ar_bsp) | |
25 | ar.rnat contains the desired bits---preserve ar.rnat | |
26 | across loadrs and write to ar.bspstore | |
27 | ||
28 | (ii) ar.bsp >= ia64_rse_rnat_addr(jmpbuf.ar_bsp) | |
29 | The desired ar.rnat is stored in | |
30 | ia64_rse_rnat_addr(jmpbuf.ar_bsp). Load those | |
31 | bits into ar.rnat after setting ar.bspstore. */ | |
32 | ||
33 | #include <sysdep.h> | |
88f4b692 | 34 | #include <pointer_guard.h> |
d5efd131 MF |
35 | #include <features.h> |
36 | ||
37 | # define pPos p6 /* is rotate count positive? */ | |
38 | # define pNeg p7 /* is rotate count negative? */ | |
39 | ||
40 | ||
41 | /* __longjmp(__jmp_buf buf, int val) */ | |
42 | ||
43 | LEAF(__longjmp) | |
44 | #ifdef CHECK_RSP | |
98b78b4b MF |
45 | alloc r8=ar.pfs,2,1,3,0 |
46 | CHECK_RSP | |
d5efd131 | 47 | #else |
98b78b4b | 48 | alloc r8=ar.pfs,2,0,0,0 |
d5efd131 MF |
49 | #endif |
50 | mov r27=ar.rsc | |
51 | add r2=0x98,in0 // r2 <- &jmpbuf.orig_jmp_buf_addr | |
52 | ;; | |
53 | ld8 r8=[r2],-16 // r8 <- orig_jmp_buf_addr | |
54 | mov r10=ar.bsp | |
55 | and r11=~0x3,r27 // clear ar.rsc.mode | |
56 | ;; | |
57 | flushrs // flush dirty regs to backing store (must be first in insn grp) | |
58 | ld8 r23=[r2],8 // r23 <- jmpbuf.ar_bsp | |
59 | sub r8=r8,in0 // r8 <- &orig_jmpbuf - &jmpbuf | |
60 | ;; | |
61 | ld8 r25=[r2] // r25 <- jmpbuf.ar_unat | |
62 | extr.u r8=r8,3,6 // r8 <- (&orig_jmpbuf - &jmpbuf)/8 & 0x3f | |
63 | ;; | |
64 | cmp.lt pNeg,pPos=r8,r0 | |
65 | mov r2=in0 | |
66 | ;; | |
67 | (pPos) mov r16=r8 | |
68 | (pNeg) add r16=64,r8 | |
69 | (pPos) sub r17=64,r8 | |
70 | (pNeg) sub r17=r0,r8 | |
71 | ;; | |
72 | mov ar.rsc=r11 // put RSE in enforced lazy mode | |
73 | shr.u r8=r25,r16 | |
74 | add r3=8,in0 // r3 <- &jmpbuf.r1 | |
75 | shl r9=r25,r17 | |
76 | ;; | |
77 | ld8.fill.nta r28=[r2],16 // r28 <- jmpbuf.sp | |
78 | or r25=r8,r9 | |
79 | ;; | |
80 | mov r26=ar.rnat | |
81 | mov ar.unat=r25 // setup ar.unat (NaT bits for r1, r4-r7, and r12) | |
82 | ;; | |
e646a161 | 83 | ld8.fill.nta gp=[r3],32 // r1 (gp) |
d5efd131 MF |
84 | dep r11=-1,r23,3,6 // r11 <- ia64_rse_rnat_addr(jmpbuf.ar_bsp) |
85 | mov sp=r28 // r12 (sp) | |
86 | ;; | |
87 | ld8.nta r16=[r2],16 // caller's unat | |
e646a161 | 88 | // ld8.nta r17=[r3],16 // fpsr |
d5efd131 MF |
89 | ;; |
90 | ld8.fill.nta r4=[r2],16 // r4 | |
91 | ld8.fill.nta r5=[r3],16 // r5 (gp) | |
92 | cmp.geu p8,p0=r10,r11 // p8 <- (ar.bsp >= jmpbuf.ar_bsp) | |
93 | ;; | |
94 | ld8.fill.nta r6=[r2],16 // r6 | |
95 | ld8.fill.nta r7=[r3],16 // r7 | |
96 | ;; | |
97 | mov ar.unat=r16 // restore caller's unat | |
e646a161 | 98 | // mov ar.fpsr=r17 // restore fpsr |
d5efd131 MF |
99 | ;; |
100 | ld8.nta r16=[r2],16 // b0 | |
101 | ld8.nta r17=[r3],16 // b1 | |
102 | ;; | |
103 | (p8) ld8 r26=[r11] // r26 <- *ia64_rse_rnat_addr(jmpbuf.ar_bsp) | |
104 | mov ar.bspstore=r23 // restore ar.bspstore | |
105 | ;; | |
106 | ld8.nta r18=[r2],16 // b2 | |
107 | ld8.nta r19=[r3],16 // b3 | |
108 | ;; | |
109 | #ifdef PTR_DEMANGLE | |
110 | PTR_DEMANGLE (r16, r24) | |
111 | #endif | |
112 | ld8.nta r20=[r2],16 // b4 | |
113 | ld8.nta r21=[r3],16 // b5 | |
114 | ;; | |
115 | ld8.nta r11=[r2],16 // ar.pfs | |
116 | ld8.nta r22=[r3],56 // ar.lc | |
117 | ;; | |
118 | ld8.nta r24=[r2],32 // pr | |
119 | mov b0=r16 | |
120 | ;; | |
121 | ldf.fill.nta f2=[r2],32 | |
122 | ldf.fill.nta f3=[r3],32 | |
123 | mov b1=r17 | |
124 | ;; | |
125 | ldf.fill.nta f4=[r2],32 | |
126 | ldf.fill.nta f5=[r3],32 | |
127 | mov b2=r18 | |
128 | ;; | |
129 | ldf.fill.nta f16=[r2],32 | |
130 | ldf.fill.nta f17=[r3],32 | |
131 | mov b3=r19 | |
132 | ;; | |
133 | ldf.fill.nta f18=[r2],32 | |
134 | ldf.fill.nta f19=[r3],32 | |
135 | mov b4=r20 | |
136 | ;; | |
137 | ldf.fill.nta f20=[r2],32 | |
138 | ldf.fill.nta f21=[r3],32 | |
139 | mov b5=r21 | |
140 | ;; | |
141 | ldf.fill.nta f22=[r2],32 | |
142 | ldf.fill.nta f23=[r3],32 | |
143 | mov ar.lc=r22 | |
144 | ;; | |
145 | ldf.fill.nta f24=[r2],32 | |
146 | ldf.fill.nta f25=[r3],32 | |
147 | cmp.eq p8,p9=0,in1 | |
148 | ;; | |
149 | ldf.fill.nta f26=[r2],32 | |
150 | ldf.fill.nta f27=[r3],32 | |
151 | mov ar.pfs=r11 | |
152 | ;; | |
153 | ldf.fill.nta f28=[r2],32 | |
154 | ldf.fill.nta f29=[r3],32 | |
155 | ;; | |
156 | ldf.fill.nta f30=[r2] | |
157 | ldf.fill.nta f31=[r3] | |
158 | (p8) mov r8=1 | |
159 | ||
160 | mov ar.rnat=r26 // restore ar.rnat | |
161 | ;; | |
162 | mov ar.rsc=r27 // restore ar.rsc | |
163 | (p9) mov r8=in1 | |
164 | ||
165 | invala // virt. -> phys. regnum mapping may change | |
166 | mov pr=r24,-1 | |
167 | ret | |
168 | END(__longjmp) |