]> git.ipfire.org Git - thirdparty/gcc.git/blob - libgcc/config/arm/libunwind.S
Move libgcc1 to toplevel libgcc
[thirdparty/gcc.git] / libgcc / config / arm / libunwind.S
1 /* Support functions for the unwinder.
2 Copyright (C) 2003, 2004, 2005, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Paul Brook
5
6 This file is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 This file is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
19
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
24
25 /* An executable stack is *not* required for these functions. */
26 #if defined(__ELF__) && defined(__linux__)
27 .section .note.GNU-stack,"",%progbits
28 .previous
29 #endif
30
31 #ifdef __ARM_EABI__
32 /* Some attributes that are common to all routines in this file. */
33 /* Tag_ABI_align_needed: This code does not require 8-byte
34 alignment from the caller. */
35 /* .eabi_attribute 24, 0 -- default setting. */
36 /* Tag_ABI_align_preserved: This code preserves 8-byte
37 alignment in any callee. */
38 .eabi_attribute 25, 1
39 #endif /* __ARM_EABI__ */
40
41 #ifndef __symbian__
42
43 #include "lib1funcs.S"
44
45 .macro UNPREFIX name
46 .global SYM (\name)
47 EQUIV SYM (\name), SYM (__\name)
48 .endm
49
50 #if (__ARM_ARCH__ == 4)
51 /* Some coprocessors require armv5. We know this code will never be run on
52 other cpus. Tell gas to allow armv5, but only mark the objects as armv4.
53 */
54 .arch armv5t
55 #ifdef __ARM_ARCH_4T__
56 .object_arch armv4t
57 #else
58 .object_arch armv4
59 #endif
60 #endif
61
62 #ifdef __ARM_ARCH_6M__
63
64 /* r0 points to a 16-word block. Upload these values to the actual core
65 state. */
66 FUNC_START restore_core_regs
67 mov r1, r0
68 add r1, r1, #52
69 ldmia r1!, {r3, r4, r5}
70 sub r3, r3, #4
71 mov ip, r3
72 str r5, [r3]
73 mov lr, r4
74 /* Restore r8-r11. */
75 mov r1, r0
76 add r1, r1, #32
77 ldmia r1!, {r2, r3, r4, r5}
78 mov r8, r2
79 mov r9, r3
80 mov sl, r4
81 mov fp, r5
82 mov r1, r0
83 add r1, r1, #8
84 ldmia r1!, {r2, r3, r4, r5, r6, r7}
85 ldr r1, [r0, #4]
86 ldr r0, [r0]
87 mov sp, ip
88 pop {pc}
89 FUNC_END restore_core_regs
90 UNPREFIX restore_core_regs
91
92 /* ARMV6M does not have coprocessors, so these should never be used. */
93 FUNC_START gnu_Unwind_Restore_VFP
94 RET
95
96 /* Store VFR regsters d0-d15 to the address in r0. */
97 FUNC_START gnu_Unwind_Save_VFP
98 RET
99
100 /* Load VFP registers d0-d15 from the address in r0.
101 Use this to load from FSTMD format. */
102 FUNC_START gnu_Unwind_Restore_VFP_D
103 RET
104
105 /* Store VFP registers d0-d15 to the address in r0.
106 Use this to store in FLDMD format. */
107 FUNC_START gnu_Unwind_Save_VFP_D
108 RET
109
110 /* Load VFP registers d16-d31 from the address in r0.
111 Use this to load from FSTMD (=VSTM) format. Needs VFPv3. */
112 FUNC_START gnu_Unwind_Restore_VFP_D_16_to_31
113 RET
114
115 /* Store VFP registers d16-d31 to the address in r0.
116 Use this to store in FLDMD (=VLDM) format. Needs VFPv3. */
117 FUNC_START gnu_Unwind_Save_VFP_D_16_to_31
118 RET
119
120 FUNC_START gnu_Unwind_Restore_WMMXD
121 RET
122
123 FUNC_START gnu_Unwind_Save_WMMXD
124 RET
125
126 FUNC_START gnu_Unwind_Restore_WMMXC
127 RET
128
129 FUNC_START gnu_Unwind_Save_WMMXC
130 RET
131
132 .macro UNWIND_WRAPPER name nargs
133 FUNC_START \name
134 /* Create a phase2_vrs structure. */
135 /* Save r0 in the PC slot so we can use it as a scratch register. */
136 push {r0}
137 add r0, sp, #4
138 push {r0, lr} /* Push original SP and LR. */
139 /* Make space for r8-r12. */
140 sub sp, sp, #20
141 /* Save low registers. */
142 push {r0, r1, r2, r3, r4, r5, r6, r7}
143 /* Save high registers. */
144 add r0, sp, #32
145 mov r1, r8
146 mov r2, r9
147 mov r3, sl
148 mov r4, fp
149 mov r5, ip
150 stmia r0!, {r1, r2, r3, r4, r5}
151 /* Restore original low register values. */
152 add r0, sp, #4
153 ldmia r0!, {r1, r2, r3, r4, r5}
154 /* Restore orginial r0. */
155 ldr r0, [sp, #60]
156 str r0, [sp]
157 /* Demand-save flags, plus an extra word for alignment. */
158 mov r3, #0
159 push {r2, r3}
160 /* Point r1 at the block. Pass r[0..nargs) unchanged. */
161 add r\nargs, sp, #4
162
163 bl SYM (__gnu\name)
164
165 ldr r3, [sp, #64]
166 add sp, sp, #72
167 bx r3
168
169 FUNC_END \name
170 UNPREFIX \name
171 .endm
172
173 #else /* !__ARM_ARCH_6M__ */
174
175 /* r0 points to a 16-word block. Upload these values to the actual core
176 state. */
177 ARM_FUNC_START restore_core_regs
178 /* We must use sp as the base register when restoring sp. Push the
179 last 3 registers onto the top of the current stack to achieve
180 this. */
181 add r1, r0, #52
182 ldmia r1, {r3, r4, r5} /* {sp, lr, pc}. */
183 #if defined(__thumb2__)
184 /* Thumb-2 doesn't allow sp in a load-multiple instruction, so push
185 the target address onto the target stack. This is safe as
186 we're always returning to somewhere further up the call stack. */
187 mov ip, r3
188 mov lr, r4
189 str r5, [ip, #-4]!
190 #elif defined(__INTERWORKING__)
191 /* Restore pc into ip. */
192 mov r2, r5
193 stmfd sp!, {r2, r3, r4}
194 #else
195 stmfd sp!, {r3, r4, r5}
196 #endif
197 /* Don't bother restoring ip. */
198 ldmia r0, {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, sl, fp}
199 #if defined(__thumb2__)
200 /* Pop the return address off the target stack. */
201 mov sp, ip
202 pop {pc}
203 #elif defined(__INTERWORKING__)
204 /* Pop the three registers we pushed earlier. */
205 ldmfd sp, {ip, sp, lr}
206 bx ip
207 #else
208 ldmfd sp, {sp, lr, pc}
209 #endif
210 FUNC_END restore_core_regs
211 UNPREFIX restore_core_regs
212
213 /* Load VFP registers d0-d15 from the address in r0.
214 Use this to load from FSTMX format. */
215 ARM_FUNC_START gnu_Unwind_Restore_VFP
216 /* Use the generic coprocessor form so that gas doesn't complain
217 on soft-float targets. */
218 ldc p11,cr0,[r0],{0x21} /* fldmiax r0, {d0-d15} */
219 RET
220
221 /* Store VFP registers d0-d15 to the address in r0.
222 Use this to store in FSTMX format. */
223 ARM_FUNC_START gnu_Unwind_Save_VFP
224 /* Use the generic coprocessor form so that gas doesn't complain
225 on soft-float targets. */
226 stc p11,cr0,[r0],{0x21} /* fstmiax r0, {d0-d15} */
227 RET
228
229 /* Load VFP registers d0-d15 from the address in r0.
230 Use this to load from FSTMD format. */
231 ARM_FUNC_START gnu_Unwind_Restore_VFP_D
232 ldc p11,cr0,[r0],{0x20} /* fldmiad r0, {d0-d15} */
233 RET
234
235 /* Store VFP registers d0-d15 to the address in r0.
236 Use this to store in FLDMD format. */
237 ARM_FUNC_START gnu_Unwind_Save_VFP_D
238 stc p11,cr0,[r0],{0x20} /* fstmiad r0, {d0-d15} */
239 RET
240
241 /* Load VFP registers d16-d31 from the address in r0.
242 Use this to load from FSTMD (=VSTM) format. Needs VFPv3. */
243 ARM_FUNC_START gnu_Unwind_Restore_VFP_D_16_to_31
244 ldcl p11,cr0,[r0],{0x20} /* vldm r0, {d16-d31} */
245 RET
246
247 /* Store VFP registers d16-d31 to the address in r0.
248 Use this to store in FLDMD (=VLDM) format. Needs VFPv3. */
249 ARM_FUNC_START gnu_Unwind_Save_VFP_D_16_to_31
250 stcl p11,cr0,[r0],{0x20} /* vstm r0, {d16-d31} */
251 RET
252
253 ARM_FUNC_START gnu_Unwind_Restore_WMMXD
254 /* Use the generic coprocessor form so that gas doesn't complain
255 on non-iWMMXt targets. */
256 ldcl p1, cr0, [r0], #8 /* wldrd wr0, [r0], #8 */
257 ldcl p1, cr1, [r0], #8 /* wldrd wr1, [r0], #8 */
258 ldcl p1, cr2, [r0], #8 /* wldrd wr2, [r0], #8 */
259 ldcl p1, cr3, [r0], #8 /* wldrd wr3, [r0], #8 */
260 ldcl p1, cr4, [r0], #8 /* wldrd wr4, [r0], #8 */
261 ldcl p1, cr5, [r0], #8 /* wldrd wr5, [r0], #8 */
262 ldcl p1, cr6, [r0], #8 /* wldrd wr6, [r0], #8 */
263 ldcl p1, cr7, [r0], #8 /* wldrd wr7, [r0], #8 */
264 ldcl p1, cr8, [r0], #8 /* wldrd wr8, [r0], #8 */
265 ldcl p1, cr9, [r0], #8 /* wldrd wr9, [r0], #8 */
266 ldcl p1, cr10, [r0], #8 /* wldrd wr10, [r0], #8 */
267 ldcl p1, cr11, [r0], #8 /* wldrd wr11, [r0], #8 */
268 ldcl p1, cr12, [r0], #8 /* wldrd wr12, [r0], #8 */
269 ldcl p1, cr13, [r0], #8 /* wldrd wr13, [r0], #8 */
270 ldcl p1, cr14, [r0], #8 /* wldrd wr14, [r0], #8 */
271 ldcl p1, cr15, [r0], #8 /* wldrd wr15, [r0], #8 */
272 RET
273
274 ARM_FUNC_START gnu_Unwind_Save_WMMXD
275 /* Use the generic coprocessor form so that gas doesn't complain
276 on non-iWMMXt targets. */
277 stcl p1, cr0, [r0], #8 /* wstrd wr0, [r0], #8 */
278 stcl p1, cr1, [r0], #8 /* wstrd wr1, [r0], #8 */
279 stcl p1, cr2, [r0], #8 /* wstrd wr2, [r0], #8 */
280 stcl p1, cr3, [r0], #8 /* wstrd wr3, [r0], #8 */
281 stcl p1, cr4, [r0], #8 /* wstrd wr4, [r0], #8 */
282 stcl p1, cr5, [r0], #8 /* wstrd wr5, [r0], #8 */
283 stcl p1, cr6, [r0], #8 /* wstrd wr6, [r0], #8 */
284 stcl p1, cr7, [r0], #8 /* wstrd wr7, [r0], #8 */
285 stcl p1, cr8, [r0], #8 /* wstrd wr8, [r0], #8 */
286 stcl p1, cr9, [r0], #8 /* wstrd wr9, [r0], #8 */
287 stcl p1, cr10, [r0], #8 /* wstrd wr10, [r0], #8 */
288 stcl p1, cr11, [r0], #8 /* wstrd wr11, [r0], #8 */
289 stcl p1, cr12, [r0], #8 /* wstrd wr12, [r0], #8 */
290 stcl p1, cr13, [r0], #8 /* wstrd wr13, [r0], #8 */
291 stcl p1, cr14, [r0], #8 /* wstrd wr14, [r0], #8 */
292 stcl p1, cr15, [r0], #8 /* wstrd wr15, [r0], #8 */
293 RET
294
295 ARM_FUNC_START gnu_Unwind_Restore_WMMXC
296 /* Use the generic coprocessor form so that gas doesn't complain
297 on non-iWMMXt targets. */
298 ldc2 p1, cr8, [r0], #4 /* wldrw wcgr0, [r0], #4 */
299 ldc2 p1, cr9, [r0], #4 /* wldrw wcgr1, [r0], #4 */
300 ldc2 p1, cr10, [r0], #4 /* wldrw wcgr2, [r0], #4 */
301 ldc2 p1, cr11, [r0], #4 /* wldrw wcgr3, [r0], #4 */
302 RET
303
304 ARM_FUNC_START gnu_Unwind_Save_WMMXC
305 /* Use the generic coprocessor form so that gas doesn't complain
306 on non-iWMMXt targets. */
307 stc2 p1, cr8, [r0], #4 /* wstrw wcgr0, [r0], #4 */
308 stc2 p1, cr9, [r0], #4 /* wstrw wcgr1, [r0], #4 */
309 stc2 p1, cr10, [r0], #4 /* wstrw wcgr2, [r0], #4 */
310 stc2 p1, cr11, [r0], #4 /* wstrw wcgr3, [r0], #4 */
311 RET
312
313 /* Wrappers to save core registers, then call the real routine. */
314
315 .macro UNWIND_WRAPPER name nargs
316 ARM_FUNC_START \name
317 /* Create a phase2_vrs structure. */
318 /* Split reg push in two to ensure the correct value for sp. */
319 #if defined(__thumb2__)
320 mov ip, sp
321 push {lr} /* PC is ignored. */
322 push {ip, lr} /* Push original SP and LR. */
323 #else
324 stmfd sp!, {sp, lr, pc}
325 #endif
326 stmfd sp!, {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, sl, fp, ip}
327
328 /* Demand-save flags, plus an extra word for alignment. */
329 mov r3, #0
330 stmfd sp!, {r2, r3}
331
332 /* Point r1 at the block. Pass r[0..nargs) unchanged. */
333 add r\nargs, sp, #4
334 #if defined(__thumb__) && !defined(__thumb2__)
335 /* Switch back to thumb mode to avoid interworking hassle. */
336 adr ip, .L1_\name
337 orr ip, ip, #1
338 bx ip
339 .thumb
340 .L1_\name:
341 bl SYM (__gnu\name) __PLT__
342 ldr r3, [sp, #64]
343 add sp, #72
344 bx r3
345 #else
346 bl SYM (__gnu\name) __PLT__
347 ldr lr, [sp, #64]
348 add sp, sp, #72
349 RET
350 #endif
351 FUNC_END \name
352 UNPREFIX \name
353 .endm
354
355 #endif /* !__ARM_ARCH_6M__ */
356
357 UNWIND_WRAPPER _Unwind_RaiseException 1
358 UNWIND_WRAPPER _Unwind_Resume 1
359 UNWIND_WRAPPER _Unwind_Resume_or_Rethrow 1
360 UNWIND_WRAPPER _Unwind_ForcedUnwind 3
361 UNWIND_WRAPPER _Unwind_Backtrace 2
362
363 #endif /* ndef __symbian__ */