]>
Commit | Line | Data |
---|---|---|
1e874273 PB |
1 | /* Common unwinding code for ARM EABI and C6X. |
2 | Copyright (C) 2004, 2005, 2009, 2011 Free Software Foundation, Inc. | |
3 | Contributed by Paul Brook | |
4 | ||
5 | This file is free software; you can redistribute it and/or modify it | |
6 | under the terms of the GNU General Public License as published by the | |
7 | Free Software Foundation; either version 3, or (at your option) any | |
8 | later version. | |
9 | ||
10 | This file is distributed in the hope that it will be useful, but | |
11 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | General Public License for more details. | |
14 | ||
15 | Under Section 7 of GPL version 3, you are granted additional | |
16 | permissions described in the GCC Runtime Library Exception, version | |
17 | 3.1, as published by the Free Software Foundation. | |
18 | ||
19 | You should have received a copy of the GNU General Public License and | |
20 | a copy of the GCC Runtime Library Exception along with this program; | |
21 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
22 | <http://www.gnu.org/licenses/>. */ | |
23 | ||
24 | #include "unwind.h" | |
25 | ||
26 | /* We add a prototype for abort here to avoid creating a dependency on | |
27 | target headers. */ | |
28 | extern void abort (void); | |
29 | ||
30 | /* Definitions for C++ runtime support routines. We make these weak | |
31 | declarations to avoid pulling in libsupc++ unnecessarily. */ | |
32 | typedef unsigned char bool; | |
33 | ||
34 | typedef struct _ZSt9type_info type_info; /* This names C++ type_info type */ | |
35 | enum __cxa_type_match_result | |
36 | { | |
37 | ctm_failed = 0, | |
38 | ctm_succeeded = 1, | |
39 | ctm_succeeded_with_ptr_to_base = 2 | |
40 | }; | |
41 | ||
42 | void __attribute__((weak)) __cxa_call_unexpected(_Unwind_Control_Block *ucbp); | |
43 | bool __attribute__((weak)) __cxa_begin_cleanup(_Unwind_Control_Block *ucbp); | |
44 | enum __cxa_type_match_result __attribute__((weak)) __cxa_type_match | |
45 | (_Unwind_Control_Block *ucbp, const type_info *rttip, | |
46 | bool is_reference, void **matched_object); | |
47 | ||
48 | _Unwind_Ptr __attribute__((weak)) | |
49 | __gnu_Unwind_Find_exidx (_Unwind_Ptr, int *); | |
50 | ||
51 | #define EXIDX_CANTUNWIND 1 | |
52 | #define uint32_highbit (((_uw) 1) << 31) | |
53 | ||
54 | #define UCB_FORCED_STOP_FN(ucbp) ((ucbp)->unwinder_cache.reserved1) | |
55 | #define UCB_PR_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved2) | |
56 | #define UCB_SAVED_CALLSITE_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved3) | |
57 | #define UCB_FORCED_STOP_ARG(ucbp) ((ucbp)->unwinder_cache.reserved4) | |
58 | ||
59 | /* Unwind descriptors. */ | |
60 | ||
61 | typedef struct | |
62 | { | |
63 | _uw16 length; | |
64 | _uw16 offset; | |
65 | } EHT16; | |
66 | ||
67 | typedef struct | |
68 | { | |
69 | _uw length; | |
70 | _uw offset; | |
71 | } EHT32; | |
72 | ||
73 | /* An exception index table entry. */ | |
74 | ||
75 | typedef struct __EIT_entry | |
76 | { | |
77 | _uw fnoffset; | |
78 | _uw content; | |
79 | } __EIT_entry; | |
80 | ||
81 | /* Assembly helper functions. */ | |
82 | ||
83 | /* Restore core register state. Never returns. */ | |
84 | void __attribute__((noreturn)) restore_core_regs (struct core_regs *); | |
85 | ||
86 | ||
87 | /* Restore coprocessor state after phase1 unwinding. */ | |
88 | static void restore_non_core_regs (phase1_vrs * vrs); | |
89 | ||
90 | /* A better way to do this would probably be to compare the absolute address | |
91 | with a segment relative relocation of the same symbol. */ | |
92 | ||
93 | extern int __text_start; | |
94 | extern int __data_start; | |
95 | ||
96 | /* The exception index table location. */ | |
97 | extern __EIT_entry __exidx_start; | |
98 | extern __EIT_entry __exidx_end; | |
99 | ||
100 | /* Core unwinding functions. */ | |
101 | ||
102 | /* Calculate the address encoded by a 31-bit self-relative offset at address | |
103 | P. */ | |
104 | static inline _uw selfrel_offset31 (const _uw *p); | |
105 | ||
106 | static _uw __gnu_unwind_get_pr_addr (int idx); | |
107 | ||
108 | /* Perform a binary search for RETURN_ADDRESS in TABLE. The table contains | |
109 | NREC entries. */ | |
110 | ||
111 | static const __EIT_entry * | |
112 | search_EIT_table (const __EIT_entry * table, int nrec, _uw return_address) | |
113 | { | |
114 | _uw next_fn; | |
115 | _uw this_fn; | |
116 | int n, left, right; | |
117 | ||
118 | if (nrec == 0) | |
119 | return (__EIT_entry *) 0; | |
120 | ||
121 | left = 0; | |
122 | right = nrec - 1; | |
123 | ||
124 | while (1) | |
125 | { | |
126 | n = (left + right) / 2; | |
127 | this_fn = selfrel_offset31 (&table[n].fnoffset); | |
128 | if (n != nrec - 1) | |
129 | next_fn = selfrel_offset31 (&table[n + 1].fnoffset) - 1; | |
130 | else | |
131 | next_fn = (_uw)0 - 1; | |
132 | ||
133 | if (return_address < this_fn) | |
134 | { | |
135 | if (n == left) | |
136 | return (__EIT_entry *) 0; | |
137 | right = n - 1; | |
138 | } | |
139 | else if (return_address <= next_fn) | |
140 | return &table[n]; | |
141 | else | |
142 | left = n + 1; | |
143 | } | |
144 | } | |
145 | ||
146 | /* Find the exception index table eintry for the given address. | |
147 | Fill in the relevant fields of the UCB. | |
148 | Returns _URC_FAILURE if an error occurred, _URC_OK on success. */ | |
149 | ||
150 | static _Unwind_Reason_Code | |
151 | get_eit_entry (_Unwind_Control_Block *ucbp, _uw return_address) | |
152 | { | |
153 | const __EIT_entry * eitp; | |
154 | int nrec; | |
155 | ||
156 | /* The return address is the address of the instruction following the | |
157 | call instruction (plus one in thumb mode). If this was the last | |
158 | instruction in the function the address will lie in the following | |
159 | function. Subtract 2 from the address so that it points within the call | |
160 | instruction itself. */ | |
161 | return_address -= 2; | |
162 | ||
163 | if (__gnu_Unwind_Find_exidx) | |
164 | { | |
165 | eitp = (const __EIT_entry *) __gnu_Unwind_Find_exidx (return_address, | |
166 | &nrec); | |
167 | if (!eitp) | |
168 | { | |
169 | UCB_PR_ADDR (ucbp) = 0; | |
170 | return _URC_FAILURE; | |
171 | } | |
172 | } | |
173 | else | |
174 | { | |
175 | eitp = &__exidx_start; | |
176 | nrec = &__exidx_end - &__exidx_start; | |
177 | } | |
178 | ||
179 | eitp = search_EIT_table (eitp, nrec, return_address); | |
180 | ||
181 | if (!eitp) | |
182 | { | |
183 | UCB_PR_ADDR (ucbp) = 0; | |
184 | return _URC_FAILURE; | |
185 | } | |
186 | ucbp->pr_cache.fnstart = selfrel_offset31 (&eitp->fnoffset); | |
187 | ||
188 | /* Can this frame be unwound at all? */ | |
189 | if (eitp->content == EXIDX_CANTUNWIND) | |
190 | { | |
191 | UCB_PR_ADDR (ucbp) = 0; | |
192 | return _URC_END_OF_STACK; | |
193 | } | |
194 | ||
195 | /* Obtain the address of the "real" __EHT_Header word. */ | |
196 | ||
197 | if (eitp->content & uint32_highbit) | |
198 | { | |
199 | /* It is immediate data. */ | |
200 | ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content; | |
201 | ucbp->pr_cache.additional = 1; | |
202 | } | |
203 | else | |
204 | { | |
205 | /* The low 31 bits of the content field are a self-relative | |
206 | offset to an _Unwind_EHT_Entry structure. */ | |
207 | ucbp->pr_cache.ehtp = | |
208 | (_Unwind_EHT_Header *) selfrel_offset31 (&eitp->content); | |
209 | ucbp->pr_cache.additional = 0; | |
210 | } | |
211 | ||
212 | /* Discover the personality routine address. */ | |
213 | if (*ucbp->pr_cache.ehtp & (1u << 31)) | |
214 | { | |
215 | /* One of the predefined standard routines. */ | |
216 | _uw idx = (*(_uw *) ucbp->pr_cache.ehtp >> 24) & 0xf; | |
217 | UCB_PR_ADDR (ucbp) = __gnu_unwind_get_pr_addr (idx); | |
218 | if (UCB_PR_ADDR (ucbp) == 0) | |
219 | { | |
220 | /* Failed */ | |
221 | return _URC_FAILURE; | |
222 | } | |
223 | } | |
224 | else | |
225 | { | |
226 | /* Execute region offset to PR */ | |
227 | UCB_PR_ADDR (ucbp) = selfrel_offset31 (ucbp->pr_cache.ehtp); | |
228 | } | |
229 | return _URC_OK; | |
230 | } | |
231 | ||
232 | ||
233 | /* Perform phase2 unwinding. VRS is the initial virtual register state. */ | |
234 | ||
235 | static void __attribute__((noreturn)) | |
236 | unwind_phase2 (_Unwind_Control_Block * ucbp, phase2_vrs * vrs) | |
237 | { | |
238 | _Unwind_Reason_Code pr_result; | |
239 | ||
240 | do | |
241 | { | |
242 | /* Find the entry for this routine. */ | |
243 | if (get_eit_entry (ucbp, VRS_PC(vrs)) != _URC_OK) | |
244 | abort (); | |
245 | ||
246 | UCB_SAVED_CALLSITE_ADDR (ucbp) = VRS_PC(vrs); | |
247 | ||
248 | /* Call the pr to decide what to do. */ | |
249 | pr_result = ((personality_routine) UCB_PR_ADDR (ucbp)) | |
250 | (_US_UNWIND_FRAME_STARTING, ucbp, (_Unwind_Context *) vrs); | |
251 | } | |
252 | while (pr_result == _URC_CONTINUE_UNWIND); | |
253 | ||
254 | if (pr_result != _URC_INSTALL_CONTEXT) | |
255 | abort(); | |
256 | ||
257 | restore_core_regs (&vrs->core); | |
258 | } | |
259 | ||
260 | /* Perform phase2 forced unwinding. */ | |
261 | ||
262 | static _Unwind_Reason_Code | |
263 | unwind_phase2_forced (_Unwind_Control_Block *ucbp, phase2_vrs *entry_vrs, | |
264 | int resuming) | |
265 | { | |
266 | _Unwind_Stop_Fn stop_fn = (_Unwind_Stop_Fn) UCB_FORCED_STOP_FN (ucbp); | |
267 | void *stop_arg = (void *)UCB_FORCED_STOP_ARG (ucbp); | |
268 | _Unwind_Reason_Code pr_result = 0; | |
269 | /* We use phase1_vrs here even though we do not demand save, for the | |
270 | prev_sp field. */ | |
271 | phase1_vrs saved_vrs, next_vrs; | |
272 | ||
273 | /* Save the core registers. */ | |
274 | saved_vrs.core = entry_vrs->core; | |
275 | /* We don't need to demand-save the non-core registers, because we | |
276 | unwind in a single pass. */ | |
277 | saved_vrs.demand_save_flags = 0; | |
278 | ||
279 | /* Unwind until we reach a propagation barrier. */ | |
280 | do | |
281 | { | |
282 | _Unwind_State action; | |
283 | _Unwind_Reason_Code entry_code; | |
284 | _Unwind_Reason_Code stop_code; | |
285 | ||
286 | /* Find the entry for this routine. */ | |
287 | entry_code = get_eit_entry (ucbp, VRS_PC (&saved_vrs)); | |
288 | ||
289 | if (resuming) | |
290 | { | |
291 | action = _US_UNWIND_FRAME_RESUME | _US_FORCE_UNWIND; | |
292 | resuming = 0; | |
293 | } | |
294 | else | |
295 | action = _US_UNWIND_FRAME_STARTING | _US_FORCE_UNWIND; | |
296 | ||
297 | if (entry_code == _URC_OK) | |
298 | { | |
299 | UCB_SAVED_CALLSITE_ADDR (ucbp) = VRS_PC (&saved_vrs); | |
300 | ||
301 | next_vrs = saved_vrs; | |
302 | ||
303 | /* Call the pr to decide what to do. */ | |
304 | pr_result = ((personality_routine) UCB_PR_ADDR (ucbp)) | |
305 | (action, ucbp, (void *) &next_vrs); | |
306 | ||
307 | saved_vrs.prev_sp = VRS_SP (&next_vrs); | |
308 | } | |
309 | else | |
310 | { | |
311 | /* Treat any failure as the end of unwinding, to cope more | |
312 | gracefully with missing EH information. Mixed EH and | |
313 | non-EH within one object will usually result in failure, | |
314 | because the .ARM.exidx tables do not indicate the end | |
315 | of the code to which they apply; but mixed EH and non-EH | |
316 | shared objects should return an unwind failure at the | |
317 | entry of a non-EH shared object. */ | |
318 | action |= _US_END_OF_STACK; | |
319 | ||
320 | saved_vrs.prev_sp = VRS_SP (&saved_vrs); | |
321 | } | |
322 | ||
323 | stop_code = stop_fn (1, action, ucbp->exception_class, ucbp, | |
324 | (void *)&saved_vrs, stop_arg); | |
325 | if (stop_code != _URC_NO_REASON) | |
326 | return _URC_FAILURE; | |
327 | ||
328 | if (entry_code != _URC_OK) | |
329 | return entry_code; | |
330 | ||
331 | saved_vrs = next_vrs; | |
332 | } | |
333 | while (pr_result == _URC_CONTINUE_UNWIND); | |
334 | ||
335 | if (pr_result != _URC_INSTALL_CONTEXT) | |
336 | { | |
337 | /* Some sort of failure has occurred in the pr and probably the | |
338 | pr returned _URC_FAILURE. */ | |
339 | return _URC_FAILURE; | |
340 | } | |
341 | ||
342 | restore_core_regs (&saved_vrs.core); | |
343 | } | |
344 | ||
345 | /* This is a very limited implementation of _Unwind_GetCFA. It returns | |
346 | the stack pointer as it is about to be unwound, and is only valid | |
347 | while calling the stop function during forced unwinding. If the | |
348 | current personality routine result is going to run a cleanup, this | |
349 | will not be the CFA; but when the frame is really unwound, it will | |
350 | be. */ | |
351 | ||
352 | _Unwind_Word | |
353 | _Unwind_GetCFA (_Unwind_Context *context) | |
354 | { | |
355 | return ((phase1_vrs *) context)->prev_sp; | |
356 | } | |
357 | ||
358 | /* Perform phase1 unwinding. UCBP is the exception being thrown, and | |
359 | entry_VRS is the register state on entry to _Unwind_RaiseException. */ | |
360 | ||
361 | _Unwind_Reason_Code | |
362 | __gnu_Unwind_RaiseException (_Unwind_Control_Block *, phase2_vrs *); | |
363 | ||
364 | _Unwind_Reason_Code | |
365 | __gnu_Unwind_RaiseException (_Unwind_Control_Block * ucbp, | |
366 | phase2_vrs * entry_vrs) | |
367 | { | |
368 | phase1_vrs saved_vrs; | |
369 | _Unwind_Reason_Code pr_result; | |
370 | ||
371 | /* Set the pc to the call site. */ | |
372 | VRS_PC (entry_vrs) = VRS_RETURN(entry_vrs); | |
373 | ||
374 | /* Save the core registers. */ | |
375 | saved_vrs.core = entry_vrs->core; | |
376 | /* Set demand-save flags. */ | |
377 | saved_vrs.demand_save_flags = ~(_uw) 0; | |
378 | ||
379 | /* Unwind until we reach a propagation barrier. */ | |
380 | do | |
381 | { | |
382 | /* Find the entry for this routine. */ | |
383 | if (get_eit_entry (ucbp, VRS_PC (&saved_vrs)) != _URC_OK) | |
384 | return _URC_FAILURE; | |
385 | ||
386 | /* Call the pr to decide what to do. */ | |
387 | pr_result = ((personality_routine) UCB_PR_ADDR (ucbp)) | |
388 | (_US_VIRTUAL_UNWIND_FRAME, ucbp, (void *) &saved_vrs); | |
389 | } | |
390 | while (pr_result == _URC_CONTINUE_UNWIND); | |
391 | ||
392 | /* We've unwound as far as we want to go, so restore the original | |
393 | register state. */ | |
394 | restore_non_core_regs (&saved_vrs); | |
395 | if (pr_result != _URC_HANDLER_FOUND) | |
396 | { | |
397 | /* Some sort of failure has occurred in the pr and probably the | |
398 | pr returned _URC_FAILURE. */ | |
399 | return _URC_FAILURE; | |
400 | } | |
401 | ||
402 | unwind_phase2 (ucbp, entry_vrs); | |
403 | } | |
404 | ||
405 | /* Resume unwinding after a cleanup has been run. UCBP is the exception | |
406 | being thrown and ENTRY_VRS is the register state on entry to | |
407 | _Unwind_Resume. */ | |
408 | _Unwind_Reason_Code | |
409 | __gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *, | |
410 | _Unwind_Stop_Fn, void *, phase2_vrs *); | |
411 | ||
412 | _Unwind_Reason_Code | |
413 | __gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *ucbp, | |
414 | _Unwind_Stop_Fn stop_fn, void *stop_arg, | |
415 | phase2_vrs *entry_vrs) | |
416 | { | |
417 | UCB_FORCED_STOP_FN (ucbp) = (_uw) stop_fn; | |
418 | UCB_FORCED_STOP_ARG (ucbp) = (_uw) stop_arg; | |
419 | ||
420 | /* Set the pc to the call site. */ | |
421 | VRS_PC (entry_vrs) = VRS_RETURN(entry_vrs); | |
422 | ||
423 | return unwind_phase2_forced (ucbp, entry_vrs, 0); | |
424 | } | |
425 | ||
426 | _Unwind_Reason_Code | |
427 | __gnu_Unwind_Resume (_Unwind_Control_Block *, phase2_vrs *); | |
428 | ||
429 | _Unwind_Reason_Code | |
430 | __gnu_Unwind_Resume (_Unwind_Control_Block * ucbp, phase2_vrs * entry_vrs) | |
431 | { | |
432 | _Unwind_Reason_Code pr_result; | |
433 | ||
434 | /* Recover the saved address. */ | |
435 | VRS_PC (entry_vrs) = UCB_SAVED_CALLSITE_ADDR (ucbp); | |
436 | ||
437 | if (UCB_FORCED_STOP_FN (ucbp)) | |
438 | { | |
439 | unwind_phase2_forced (ucbp, entry_vrs, 1); | |
440 | ||
441 | /* We can't return failure at this point. */ | |
442 | abort (); | |
443 | } | |
444 | ||
445 | /* Call the cached PR. */ | |
446 | pr_result = ((personality_routine) UCB_PR_ADDR (ucbp)) | |
447 | (_US_UNWIND_FRAME_RESUME, ucbp, (_Unwind_Context *) entry_vrs); | |
448 | ||
449 | switch (pr_result) | |
450 | { | |
451 | case _URC_INSTALL_CONTEXT: | |
452 | /* Upload the registers to enter the landing pad. */ | |
453 | restore_core_regs (&entry_vrs->core); | |
454 | ||
455 | case _URC_CONTINUE_UNWIND: | |
456 | /* Continue unwinding the next frame. */ | |
457 | unwind_phase2 (ucbp, entry_vrs); | |
458 | ||
459 | default: | |
460 | abort (); | |
461 | } | |
462 | } | |
463 | ||
464 | _Unwind_Reason_Code | |
465 | __gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block *, phase2_vrs *); | |
466 | ||
467 | _Unwind_Reason_Code | |
468 | __gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block * ucbp, | |
469 | phase2_vrs * entry_vrs) | |
470 | { | |
471 | if (!UCB_FORCED_STOP_FN (ucbp)) | |
472 | return __gnu_Unwind_RaiseException (ucbp, entry_vrs); | |
473 | ||
474 | /* Set the pc to the call site. */ | |
475 | VRS_PC (entry_vrs) = VRS_RETURN (entry_vrs); | |
476 | /* Continue unwinding the next frame. */ | |
477 | return unwind_phase2_forced (ucbp, entry_vrs, 0); | |
478 | } | |
479 | ||
480 | /* Clean up an exception object when unwinding is complete. */ | |
481 | void | |
482 | _Unwind_Complete (_Unwind_Control_Block * ucbp __attribute__((unused))) | |
483 | { | |
484 | } | |
485 | ||
486 | ||
487 | /* Free an exception. */ | |
488 | ||
489 | void | |
490 | _Unwind_DeleteException (_Unwind_Exception * exc) | |
491 | { | |
492 | if (exc->exception_cleanup) | |
493 | (*exc->exception_cleanup) (_URC_FOREIGN_EXCEPTION_CAUGHT, exc); | |
494 | } | |
495 | ||
496 | ||
497 | /* Perform stack backtrace through unwind data. */ | |
498 | _Unwind_Reason_Code | |
499 | __gnu_Unwind_Backtrace(_Unwind_Trace_Fn trace, void * trace_argument, | |
500 | phase2_vrs * entry_vrs); | |
501 | _Unwind_Reason_Code | |
502 | __gnu_Unwind_Backtrace(_Unwind_Trace_Fn trace, void * trace_argument, | |
503 | phase2_vrs * entry_vrs) | |
504 | { | |
505 | phase1_vrs saved_vrs; | |
506 | _Unwind_Reason_Code code; | |
507 | ||
508 | _Unwind_Control_Block ucb; | |
509 | _Unwind_Control_Block *ucbp = &ucb; | |
510 | ||
511 | /* Set the pc to the call site. */ | |
512 | VRS_PC (entry_vrs) = VRS_RETURN (entry_vrs); | |
513 | ||
514 | /* Save the core registers. */ | |
515 | saved_vrs.core = entry_vrs->core; | |
516 | /* Set demand-save flags. */ | |
517 | saved_vrs.demand_save_flags = ~(_uw) 0; | |
518 | ||
519 | do | |
520 | { | |
521 | /* Find the entry for this routine. */ | |
522 | if (get_eit_entry (ucbp, VRS_PC (&saved_vrs)) != _URC_OK) | |
523 | { | |
524 | code = _URC_FAILURE; | |
525 | break; | |
526 | } | |
527 | ||
528 | /* The dwarf unwinder assumes the context structure holds things | |
529 | like the function and LSDA pointers. The ARM implementation | |
530 | caches these in the exception header (UCB). To avoid | |
531 | rewriting everything we make the virtual IP register point at | |
532 | the UCB. */ | |
533 | _Unwind_SetGR((_Unwind_Context *)&saved_vrs, UNWIND_POINTER_REG, (_Unwind_Ptr) ucbp); | |
534 | ||
535 | /* Call trace function. */ | |
536 | if ((*trace) ((_Unwind_Context *) &saved_vrs, trace_argument) | |
537 | != _URC_NO_REASON) | |
538 | { | |
539 | code = _URC_FAILURE; | |
540 | break; | |
541 | } | |
542 | ||
543 | /* Call the pr to decide what to do. */ | |
544 | code = ((personality_routine) UCB_PR_ADDR (ucbp)) | |
545 | (_US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND, | |
546 | ucbp, (void *) &saved_vrs); | |
547 | } | |
548 | while (code != _URC_END_OF_STACK | |
549 | && code != _URC_FAILURE); | |
550 | ||
551 | restore_non_core_regs (&saved_vrs); | |
552 | return code; | |
553 | } | |
554 | ||
555 | ||
556 | /* Common implementation for ARM ABI defined personality routines. | |
557 | ID is the index of the personality routine, other arguments are as defined | |
558 | by __aeabi_unwind_cpp_pr{0,1,2}. */ | |
559 | ||
560 | static _Unwind_Reason_Code | |
561 | __gnu_unwind_pr_common (_Unwind_State state, | |
562 | _Unwind_Control_Block *ucbp, | |
563 | _Unwind_Context *context, | |
564 | int id) | |
565 | { | |
566 | __gnu_unwind_state uws; | |
567 | _uw *data; | |
568 | _uw offset; | |
569 | _uw len; | |
570 | _uw rtti_count; | |
571 | int phase2_call_unexpected_after_unwind = 0; | |
572 | int in_range = 0; | |
573 | int forced_unwind = state & _US_FORCE_UNWIND; | |
574 | ||
575 | state &= _US_ACTION_MASK; | |
576 | ||
577 | data = (_uw *) ucbp->pr_cache.ehtp; | |
578 | uws.data = *(data++); | |
579 | uws.next = data; | |
580 | if (id == 0) | |
581 | { | |
582 | uws.data <<= 8; | |
583 | uws.words_left = 0; | |
584 | uws.bytes_left = 3; | |
585 | } | |
56dc349e | 586 | else if (id < 3) |
1e874273 PB |
587 | { |
588 | uws.words_left = (uws.data >> 16) & 0xff; | |
589 | uws.data <<= 16; | |
590 | uws.bytes_left = 2; | |
591 | data += uws.words_left; | |
592 | } | |
593 | ||
594 | /* Restore the saved pointer. */ | |
595 | if (state == _US_UNWIND_FRAME_RESUME) | |
596 | data = (_uw *) ucbp->cleanup_cache.bitpattern[0]; | |
597 | ||
598 | if ((ucbp->pr_cache.additional & 1) == 0) | |
599 | { | |
600 | /* Process descriptors. */ | |
601 | while (*data) | |
602 | { | |
603 | _uw addr; | |
604 | _uw fnstart; | |
605 | ||
606 | if (id == 2) | |
607 | { | |
608 | len = ((EHT32 *) data)->length; | |
609 | offset = ((EHT32 *) data)->offset; | |
610 | data += 2; | |
611 | } | |
612 | else | |
613 | { | |
614 | len = ((EHT16 *) data)->length; | |
615 | offset = ((EHT16 *) data)->offset; | |
616 | data++; | |
617 | } | |
618 | ||
619 | fnstart = ucbp->pr_cache.fnstart + (offset & ~1); | |
620 | addr = _Unwind_GetGR (context, R_PC); | |
621 | in_range = (fnstart <= addr && addr < fnstart + (len & ~1)); | |
622 | ||
623 | switch (((offset & 1) << 1) | (len & 1)) | |
624 | { | |
625 | case 0: | |
626 | /* Cleanup. */ | |
627 | if (state != _US_VIRTUAL_UNWIND_FRAME | |
628 | && in_range) | |
629 | { | |
630 | /* Cleanup in range, and we are running cleanups. */ | |
631 | _uw lp; | |
632 | ||
633 | /* Landing pad address is 31-bit pc-relative offset. */ | |
634 | lp = selfrel_offset31 (data); | |
635 | data++; | |
636 | /* Save the exception data pointer. */ | |
637 | ucbp->cleanup_cache.bitpattern[0] = (_uw) data; | |
638 | if (!__cxa_begin_cleanup (ucbp)) | |
639 | return _URC_FAILURE; | |
640 | /* Setup the VRS to enter the landing pad. */ | |
641 | _Unwind_SetGR (context, R_PC, lp); | |
642 | return _URC_INSTALL_CONTEXT; | |
643 | } | |
644 | /* Cleanup not in range, or we are in stage 1. */ | |
645 | data++; | |
646 | break; | |
647 | ||
648 | case 1: | |
649 | /* Catch handler. */ | |
650 | if (state == _US_VIRTUAL_UNWIND_FRAME) | |
651 | { | |
652 | if (in_range) | |
653 | { | |
654 | /* Check for a barrier. */ | |
655 | _uw rtti; | |
656 | bool is_reference = (data[0] & uint32_highbit) != 0; | |
657 | void *matched; | |
658 | enum __cxa_type_match_result match_type; | |
659 | ||
660 | /* Check for no-throw areas. */ | |
661 | if (data[1] == (_uw) -2) | |
662 | return _URC_FAILURE; | |
663 | ||
664 | /* The thrown object immediately follows the ECB. */ | |
665 | matched = (void *)(ucbp + 1); | |
666 | if (data[1] != (_uw) -1) | |
667 | { | |
668 | /* Match a catch specification. */ | |
669 | rtti = _Unwind_decode_typeinfo_ptr (0, | |
670 | (_uw) &data[1]); | |
671 | match_type = __cxa_type_match (ucbp, | |
672 | (type_info *) rtti, | |
673 | is_reference, | |
674 | &matched); | |
675 | } | |
676 | else | |
677 | match_type = ctm_succeeded; | |
678 | ||
679 | if (match_type) | |
680 | { | |
681 | ucbp->barrier_cache.sp = | |
682 | _Unwind_GetGR (context, R_SP); | |
683 | // ctm_succeeded_with_ptr_to_base really | |
684 | // means _c_t_m indirected the pointer | |
685 | // object. We have to reconstruct the | |
686 | // additional pointer layer by using a temporary. | |
687 | if (match_type == ctm_succeeded_with_ptr_to_base) | |
688 | { | |
689 | ucbp->barrier_cache.bitpattern[2] | |
690 | = (_uw) matched; | |
691 | ucbp->barrier_cache.bitpattern[0] | |
692 | = (_uw) &ucbp->barrier_cache.bitpattern[2]; | |
693 | } | |
694 | else | |
695 | ucbp->barrier_cache.bitpattern[0] = (_uw) matched; | |
696 | ucbp->barrier_cache.bitpattern[1] = (_uw) data; | |
697 | return _URC_HANDLER_FOUND; | |
698 | } | |
699 | } | |
700 | /* Handler out of range, or not matched. */ | |
701 | } | |
702 | else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP) | |
703 | && ucbp->barrier_cache.bitpattern[1] == (_uw) data) | |
704 | { | |
705 | /* Matched a previous propagation barrier. */ | |
706 | _uw lp; | |
707 | ||
708 | /* Setup for entry to the handler. */ | |
709 | lp = selfrel_offset31 (data); | |
710 | _Unwind_SetGR (context, R_PC, lp); | |
711 | _Unwind_SetGR (context, 0, (_uw) ucbp); | |
712 | return _URC_INSTALL_CONTEXT; | |
713 | } | |
714 | /* Catch handler not matched. Advance to the next descriptor. */ | |
715 | data += 2; | |
716 | break; | |
717 | ||
718 | case 2: | |
719 | rtti_count = data[0] & 0x7fffffff; | |
720 | /* Exception specification. */ | |
721 | if (state == _US_VIRTUAL_UNWIND_FRAME) | |
722 | { | |
723 | if (in_range && (!forced_unwind || !rtti_count)) | |
724 | { | |
725 | /* Match against the exception specification. */ | |
726 | _uw i; | |
727 | _uw rtti; | |
728 | void *matched; | |
729 | ||
730 | for (i = 0; i < rtti_count; i++) | |
731 | { | |
732 | matched = (void *)(ucbp + 1); | |
733 | rtti = _Unwind_decode_typeinfo_ptr (0, | |
734 | (_uw) &data[i + 1]); | |
735 | if (__cxa_type_match (ucbp, (type_info *) rtti, 0, | |
736 | &matched)) | |
737 | break; | |
738 | } | |
739 | ||
740 | if (i == rtti_count) | |
741 | { | |
742 | /* Exception does not match the spec. */ | |
743 | ucbp->barrier_cache.sp = | |
744 | _Unwind_GetGR (context, R_SP); | |
745 | ucbp->barrier_cache.bitpattern[0] = (_uw) matched; | |
746 | ucbp->barrier_cache.bitpattern[1] = (_uw) data; | |
747 | return _URC_HANDLER_FOUND; | |
748 | } | |
749 | } | |
750 | /* Handler out of range, or exception is permitted. */ | |
751 | } | |
752 | else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP) | |
753 | && ucbp->barrier_cache.bitpattern[1] == (_uw) data) | |
754 | { | |
755 | /* Matched a previous propagation barrier. */ | |
756 | _uw lp; | |
757 | /* Record the RTTI list for __cxa_call_unexpected. */ | |
758 | ucbp->barrier_cache.bitpattern[1] = rtti_count; | |
759 | ucbp->barrier_cache.bitpattern[2] = 0; | |
760 | ucbp->barrier_cache.bitpattern[3] = 4; | |
761 | ucbp->barrier_cache.bitpattern[4] = (_uw) &data[1]; | |
762 | ||
763 | if (data[0] & uint32_highbit) | |
764 | { | |
765 | data += rtti_count + 1; | |
766 | /* Setup for entry to the handler. */ | |
767 | lp = selfrel_offset31 (data); | |
768 | data++; | |
769 | _Unwind_SetGR (context, R_PC, lp); | |
770 | _Unwind_SetGR (context, 0, (_uw) ucbp); | |
771 | return _URC_INSTALL_CONTEXT; | |
772 | } | |
773 | else | |
774 | phase2_call_unexpected_after_unwind = 1; | |
775 | } | |
776 | if (data[0] & uint32_highbit) | |
777 | data++; | |
778 | data += rtti_count + 1; | |
779 | break; | |
780 | ||
781 | default: | |
782 | /* Should never happen. */ | |
783 | return _URC_FAILURE; | |
784 | } | |
785 | /* Finished processing this descriptor. */ | |
786 | } | |
787 | } | |
788 | ||
789 | if (id >= 3) | |
790 | { | |
791 | /* 24-bit ecoding */ | |
792 | if (__gnu_unwind_24bit (context, uws.data, id == 4) != _URC_OK) | |
793 | return _URC_FAILURE; | |
794 | } | |
795 | else | |
796 | { | |
797 | if (__gnu_unwind_execute (context, &uws) != _URC_OK) | |
798 | return _URC_FAILURE; | |
799 | } | |
800 | ||
801 | if (phase2_call_unexpected_after_unwind) | |
802 | { | |
803 | /* Enter __cxa_unexpected as if called from the call site. */ | |
804 | _Unwind_SetGR (context, R_LR, _Unwind_GetGR (context, R_PC)); | |
805 | _Unwind_SetGR (context, R_PC, (_uw) &__cxa_call_unexpected); | |
806 | return _URC_INSTALL_CONTEXT; | |
807 | } | |
808 | ||
809 | return _URC_CONTINUE_UNWIND; | |
810 | } |