]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/dwarf2cfi.c
dwarf2: Reduce some redundant definitions.
[thirdparty/gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "version.h"
27 #include "flags.h"
28 #include "rtl.h"
29 #include "function.h"
30 #include "dwarf2.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
33 #include "ggc.h"
34 #include "tm_p.h"
35 #include "target.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
38
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44
45
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
52 #endif
53
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
56 #endif
57
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
60 \f
61 /* A vector of call frame insns for the CIE. */
62 cfi_vec cie_cfi_vec;
63
64 static GTY(()) unsigned long dwarf2out_cfi_label_num;
65
66 /* The insn after which a new CFI note should be emitted. */
67 static rtx cfi_insn;
68
69 /* When non-null, add_cfi will add the CFI to this vector. */
70 static cfi_vec *add_cfi_vec;
71
72 /* True if remember_state should be emitted before following CFI directive. */
73 static bool emit_cfa_remember;
74
75 /* True if any CFI directives were emitted at the current insn. */
76 static bool any_cfis_emitted;
77 \f
78
79 static void dwarf2out_cfi_begin_epilogue (rtx insn);
80 static void dwarf2out_frame_debug_restore_state (void);
81
82 \f
83 /* Hook used by __throw. */
84
85 rtx
86 expand_builtin_dwarf_sp_column (void)
87 {
88 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
89 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
90 }
91
92 /* MEM is a memory reference for the register size table, each element of
93 which has mode MODE. Initialize column C as a return address column. */
94
95 static void
96 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
97 {
98 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
99 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
100 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
101 }
102
103 /* Generate code to initialize the register size table. */
104
105 void
106 expand_builtin_init_dwarf_reg_sizes (tree address)
107 {
108 unsigned int i;
109 enum machine_mode mode = TYPE_MODE (char_type_node);
110 rtx addr = expand_normal (address);
111 rtx mem = gen_rtx_MEM (BLKmode, addr);
112 bool wrote_return_column = false;
113
114 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
115 {
116 int rnum = DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), 1);
117
118 if (rnum < DWARF_FRAME_REGISTERS)
119 {
120 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
121 enum machine_mode save_mode = reg_raw_mode[i];
122 HOST_WIDE_INT size;
123
124 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
125 save_mode = choose_hard_reg_mode (i, 1, true);
126 if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN)
127 {
128 if (save_mode == VOIDmode)
129 continue;
130 wrote_return_column = true;
131 }
132 size = GET_MODE_SIZE (save_mode);
133 if (offset < 0)
134 continue;
135
136 emit_move_insn (adjust_address (mem, mode, offset),
137 gen_int_mode (size, mode));
138 }
139 }
140
141 if (!wrote_return_column)
142 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
143
144 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
145 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
146 #endif
147
148 targetm.init_dwarf_reg_sizes_extra (address);
149 }
150
151 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
152
153 static inline HOST_WIDE_INT
154 div_data_align (HOST_WIDE_INT off)
155 {
156 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
157 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
158 return r;
159 }
160
161 /* Return true if we need a signed version of a given opcode
162 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
163
164 static inline bool
165 need_data_align_sf_opcode (HOST_WIDE_INT off)
166 {
167 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
168 }
169
170 /* Return a pointer to a newly allocated Call Frame Instruction. */
171
172 static inline dw_cfi_ref
173 new_cfi (void)
174 {
175 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
176
177 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
178 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
179
180 return cfi;
181 }
182
183 /* Generate a new label for the CFI info to refer to. */
184
185 static char *
186 dwarf2out_cfi_label (void)
187 {
188 int num = dwarf2out_cfi_label_num++;
189 char label[20];
190
191 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
192
193 return xstrdup (label);
194 }
195
196 /* Add CFI either to the current insn stream or to a vector, or both. */
197
198 static void
199 add_cfi (dw_cfi_ref cfi)
200 {
201 if (emit_cfa_remember)
202 {
203 dw_cfi_ref cfi_remember;
204
205 /* Emit the state save. */
206 emit_cfa_remember = false;
207 cfi_remember = new_cfi ();
208 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
209 add_cfi (cfi_remember);
210 }
211
212 any_cfis_emitted = true;
213 if (cfi_insn != NULL)
214 {
215 cfi_insn = emit_note_after (NOTE_INSN_CFI, cfi_insn);
216 NOTE_CFI (cfi_insn) = cfi;
217 }
218 if (add_cfi_vec != NULL)
219 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
220 }
221
222 /* This function fills in aa dw_cfa_location structure from a dwarf location
223 descriptor sequence. */
224
225 static void
226 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
227 {
228 struct dw_loc_descr_struct *ptr;
229 cfa->offset = 0;
230 cfa->base_offset = 0;
231 cfa->indirect = 0;
232 cfa->reg = -1;
233
234 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
235 {
236 enum dwarf_location_atom op = ptr->dw_loc_opc;
237
238 switch (op)
239 {
240 case DW_OP_reg0:
241 case DW_OP_reg1:
242 case DW_OP_reg2:
243 case DW_OP_reg3:
244 case DW_OP_reg4:
245 case DW_OP_reg5:
246 case DW_OP_reg6:
247 case DW_OP_reg7:
248 case DW_OP_reg8:
249 case DW_OP_reg9:
250 case DW_OP_reg10:
251 case DW_OP_reg11:
252 case DW_OP_reg12:
253 case DW_OP_reg13:
254 case DW_OP_reg14:
255 case DW_OP_reg15:
256 case DW_OP_reg16:
257 case DW_OP_reg17:
258 case DW_OP_reg18:
259 case DW_OP_reg19:
260 case DW_OP_reg20:
261 case DW_OP_reg21:
262 case DW_OP_reg22:
263 case DW_OP_reg23:
264 case DW_OP_reg24:
265 case DW_OP_reg25:
266 case DW_OP_reg26:
267 case DW_OP_reg27:
268 case DW_OP_reg28:
269 case DW_OP_reg29:
270 case DW_OP_reg30:
271 case DW_OP_reg31:
272 cfa->reg = op - DW_OP_reg0;
273 break;
274 case DW_OP_regx:
275 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
276 break;
277 case DW_OP_breg0:
278 case DW_OP_breg1:
279 case DW_OP_breg2:
280 case DW_OP_breg3:
281 case DW_OP_breg4:
282 case DW_OP_breg5:
283 case DW_OP_breg6:
284 case DW_OP_breg7:
285 case DW_OP_breg8:
286 case DW_OP_breg9:
287 case DW_OP_breg10:
288 case DW_OP_breg11:
289 case DW_OP_breg12:
290 case DW_OP_breg13:
291 case DW_OP_breg14:
292 case DW_OP_breg15:
293 case DW_OP_breg16:
294 case DW_OP_breg17:
295 case DW_OP_breg18:
296 case DW_OP_breg19:
297 case DW_OP_breg20:
298 case DW_OP_breg21:
299 case DW_OP_breg22:
300 case DW_OP_breg23:
301 case DW_OP_breg24:
302 case DW_OP_breg25:
303 case DW_OP_breg26:
304 case DW_OP_breg27:
305 case DW_OP_breg28:
306 case DW_OP_breg29:
307 case DW_OP_breg30:
308 case DW_OP_breg31:
309 cfa->reg = op - DW_OP_breg0;
310 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
311 break;
312 case DW_OP_bregx:
313 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
314 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
315 break;
316 case DW_OP_deref:
317 cfa->indirect = 1;
318 break;
319 case DW_OP_plus_uconst:
320 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
321 break;
322 default:
323 gcc_unreachable ();
324 }
325 }
326 }
327
328 /* Find the previous value for the CFA, iteratively. CFI is the opcode
329 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
330 one level of remember/restore state processing. */
331
332 void
333 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
334 {
335 switch (cfi->dw_cfi_opc)
336 {
337 case DW_CFA_def_cfa_offset:
338 case DW_CFA_def_cfa_offset_sf:
339 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
340 break;
341 case DW_CFA_def_cfa_register:
342 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
343 break;
344 case DW_CFA_def_cfa:
345 case DW_CFA_def_cfa_sf:
346 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
347 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
348 break;
349 case DW_CFA_def_cfa_expression:
350 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
351 break;
352
353 case DW_CFA_remember_state:
354 gcc_assert (!remember->in_use);
355 *remember = *loc;
356 remember->in_use = 1;
357 break;
358 case DW_CFA_restore_state:
359 gcc_assert (remember->in_use);
360 *loc = *remember;
361 remember->in_use = 0;
362 break;
363
364 default:
365 break;
366 }
367 }
368
369 /* The current rule for calculating the DWARF2 canonical frame address. */
370 static dw_cfa_location cfa;
371
372 /* A copy of the CFA, for comparison purposes. */
373 static dw_cfa_location old_cfa;
374
375 /* The register used for saving registers to the stack, and its offset
376 from the CFA. */
377 static dw_cfa_location cfa_store;
378
379 /* The current save location around an epilogue. */
380 static dw_cfa_location cfa_remember;
381
382 /* Like cfa_remember, but a copy of old_cfa. */
383 static dw_cfa_location old_cfa_remember;
384
385 /* The running total of the size of arguments pushed onto the stack. */
386 static HOST_WIDE_INT args_size;
387
388 /* The last args_size we actually output. */
389 static HOST_WIDE_INT old_args_size;
390
391 /* Determine if two dw_cfa_location structures define the same data. */
392
393 bool
394 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
395 {
396 return (loc1->reg == loc2->reg
397 && loc1->offset == loc2->offset
398 && loc1->indirect == loc2->indirect
399 && (loc1->indirect == 0
400 || loc1->base_offset == loc2->base_offset));
401 }
402
403 /* This routine does the actual work. The CFA is now calculated from
404 the dw_cfa_location structure. */
405
406 static void
407 def_cfa_1 (dw_cfa_location *loc_p)
408 {
409 dw_cfi_ref cfi;
410 dw_cfa_location loc;
411
412 cfa = *loc_p;
413 loc = *loc_p;
414
415 if (cfa_store.reg == loc.reg && loc.indirect == 0)
416 cfa_store.offset = loc.offset;
417
418 loc.reg = DWARF_FRAME_REGNUM (loc.reg);
419
420 /* If nothing changed, no need to issue any call frame instructions. */
421 if (cfa_equal_p (&loc, &old_cfa))
422 return;
423
424 cfi = new_cfi ();
425
426 if (loc.reg == old_cfa.reg && !loc.indirect && !old_cfa.indirect)
427 {
428 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
429 the CFA register did not change but the offset did. The data
430 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
431 in the assembler via the .cfi_def_cfa_offset directive. */
432 if (loc.offset < 0)
433 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
434 else
435 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
436 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
437 }
438
439 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
440 else if (loc.offset == old_cfa.offset
441 && old_cfa.reg != INVALID_REGNUM
442 && !loc.indirect
443 && !old_cfa.indirect)
444 {
445 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
446 indicating the CFA register has changed to <register> but the
447 offset has not changed. */
448 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
449 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
450 }
451 #endif
452
453 else if (loc.indirect == 0)
454 {
455 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
456 indicating the CFA register has changed to <register> with
457 the specified offset. The data factoring for DW_CFA_def_cfa_sf
458 happens in output_cfi, or in the assembler via the .cfi_def_cfa
459 directive. */
460 if (loc.offset < 0)
461 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
462 else
463 cfi->dw_cfi_opc = DW_CFA_def_cfa;
464 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
465 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
466 }
467 else
468 {
469 /* Construct a DW_CFA_def_cfa_expression instruction to
470 calculate the CFA using a full location expression since no
471 register-offset pair is available. */
472 struct dw_loc_descr_struct *loc_list;
473
474 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
475 loc_list = build_cfa_loc (&loc, 0);
476 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
477 }
478
479 add_cfi (cfi);
480 old_cfa = loc;
481 }
482
483 /* Add the CFI for saving a register. REG is the CFA column number.
484 If SREG is -1, the register is saved at OFFSET from the CFA;
485 otherwise it is saved in SREG. */
486
487 static void
488 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
489 {
490 dw_fde_ref fde = cfun ? cfun->fde : NULL;
491 dw_cfi_ref cfi = new_cfi ();
492
493 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
494
495 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
496 if (fde
497 && fde->stack_realign
498 && sreg == INVALID_REGNUM)
499 {
500 cfi->dw_cfi_opc = DW_CFA_expression;
501 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
502 cfi->dw_cfi_oprnd2.dw_cfi_loc
503 = build_cfa_aligned_loc (&cfa, offset, fde->stack_realignment);
504 }
505 else if (sreg == INVALID_REGNUM)
506 {
507 if (need_data_align_sf_opcode (offset))
508 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
509 else if (reg & ~0x3f)
510 cfi->dw_cfi_opc = DW_CFA_offset_extended;
511 else
512 cfi->dw_cfi_opc = DW_CFA_offset;
513 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
514 }
515 else if (sreg == reg)
516 cfi->dw_cfi_opc = DW_CFA_same_value;
517 else
518 {
519 cfi->dw_cfi_opc = DW_CFA_register;
520 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
521 }
522
523 add_cfi (cfi);
524 }
525
526 /* Given a SET, calculate the amount of stack adjustment it
527 contains. */
528
529 static HOST_WIDE_INT
530 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
531 HOST_WIDE_INT cur_offset)
532 {
533 const_rtx src = SET_SRC (pattern);
534 const_rtx dest = SET_DEST (pattern);
535 HOST_WIDE_INT offset = 0;
536 enum rtx_code code;
537
538 if (dest == stack_pointer_rtx)
539 {
540 code = GET_CODE (src);
541
542 /* Assume (set (reg sp) (reg whatever)) sets args_size
543 level to 0. */
544 if (code == REG && src != stack_pointer_rtx)
545 {
546 offset = -cur_args_size;
547 #ifndef STACK_GROWS_DOWNWARD
548 offset = -offset;
549 #endif
550 return offset - cur_offset;
551 }
552
553 if (! (code == PLUS || code == MINUS)
554 || XEXP (src, 0) != stack_pointer_rtx
555 || !CONST_INT_P (XEXP (src, 1)))
556 return 0;
557
558 /* (set (reg sp) (plus (reg sp) (const_int))) */
559 offset = INTVAL (XEXP (src, 1));
560 if (code == PLUS)
561 offset = -offset;
562 return offset;
563 }
564
565 if (MEM_P (src) && !MEM_P (dest))
566 dest = src;
567 if (MEM_P (dest))
568 {
569 /* (set (mem (pre_dec (reg sp))) (foo)) */
570 src = XEXP (dest, 0);
571 code = GET_CODE (src);
572
573 switch (code)
574 {
575 case PRE_MODIFY:
576 case POST_MODIFY:
577 if (XEXP (src, 0) == stack_pointer_rtx)
578 {
579 rtx val = XEXP (XEXP (src, 1), 1);
580 /* We handle only adjustments by constant amount. */
581 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
582 && CONST_INT_P (val));
583 offset = -INTVAL (val);
584 break;
585 }
586 return 0;
587
588 case PRE_DEC:
589 case POST_DEC:
590 if (XEXP (src, 0) == stack_pointer_rtx)
591 {
592 offset = GET_MODE_SIZE (GET_MODE (dest));
593 break;
594 }
595 return 0;
596
597 case PRE_INC:
598 case POST_INC:
599 if (XEXP (src, 0) == stack_pointer_rtx)
600 {
601 offset = -GET_MODE_SIZE (GET_MODE (dest));
602 break;
603 }
604 return 0;
605
606 default:
607 return 0;
608 }
609 }
610 else
611 return 0;
612
613 return offset;
614 }
615
616 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
617 indexed by INSN_UID. */
618
619 static HOST_WIDE_INT *barrier_args_size;
620
621 /* Helper function for compute_barrier_args_size. Handle one insn. */
622
623 static HOST_WIDE_INT
624 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
625 VEC (rtx, heap) **next)
626 {
627 HOST_WIDE_INT offset = 0;
628 int i;
629
630 if (! RTX_FRAME_RELATED_P (insn))
631 {
632 if (prologue_epilogue_contains (insn))
633 /* Nothing */;
634 else if (GET_CODE (PATTERN (insn)) == SET)
635 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
636 else if (GET_CODE (PATTERN (insn)) == PARALLEL
637 || GET_CODE (PATTERN (insn)) == SEQUENCE)
638 {
639 /* There may be stack adjustments inside compound insns. Search
640 for them. */
641 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
642 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
643 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
644 cur_args_size, offset);
645 }
646 }
647 else
648 {
649 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
650
651 if (expr)
652 {
653 expr = XEXP (expr, 0);
654 if (GET_CODE (expr) == PARALLEL
655 || GET_CODE (expr) == SEQUENCE)
656 for (i = 1; i < XVECLEN (expr, 0); i++)
657 {
658 rtx elem = XVECEXP (expr, 0, i);
659
660 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
661 offset += stack_adjust_offset (elem, cur_args_size, offset);
662 }
663 }
664 }
665
666 #ifndef STACK_GROWS_DOWNWARD
667 offset = -offset;
668 #endif
669
670 cur_args_size += offset;
671 if (cur_args_size < 0)
672 cur_args_size = 0;
673
674 if (JUMP_P (insn))
675 {
676 rtx dest = JUMP_LABEL (insn);
677
678 if (dest)
679 {
680 if (barrier_args_size [INSN_UID (dest)] < 0)
681 {
682 barrier_args_size [INSN_UID (dest)] = cur_args_size;
683 VEC_safe_push (rtx, heap, *next, dest);
684 }
685 }
686 }
687
688 return cur_args_size;
689 }
690
691 /* Walk the whole function and compute args_size on BARRIERs. */
692
693 static void
694 compute_barrier_args_size (void)
695 {
696 int max_uid = get_max_uid (), i;
697 rtx insn;
698 VEC (rtx, heap) *worklist, *next, *tmp;
699
700 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
701 for (i = 0; i < max_uid; i++)
702 barrier_args_size[i] = -1;
703
704 worklist = VEC_alloc (rtx, heap, 20);
705 next = VEC_alloc (rtx, heap, 20);
706 insn = get_insns ();
707 barrier_args_size[INSN_UID (insn)] = 0;
708 VEC_quick_push (rtx, worklist, insn);
709 for (;;)
710 {
711 while (!VEC_empty (rtx, worklist))
712 {
713 rtx prev, body, first_insn;
714 HOST_WIDE_INT cur_args_size;
715
716 first_insn = insn = VEC_pop (rtx, worklist);
717 cur_args_size = barrier_args_size[INSN_UID (insn)];
718 prev = prev_nonnote_insn (insn);
719 if (prev && BARRIER_P (prev))
720 barrier_args_size[INSN_UID (prev)] = cur_args_size;
721
722 for (; insn; insn = NEXT_INSN (insn))
723 {
724 if (INSN_DELETED_P (insn) || NOTE_P (insn))
725 continue;
726 if (BARRIER_P (insn))
727 break;
728
729 if (LABEL_P (insn))
730 {
731 if (insn == first_insn)
732 continue;
733 else if (barrier_args_size[INSN_UID (insn)] < 0)
734 {
735 barrier_args_size[INSN_UID (insn)] = cur_args_size;
736 continue;
737 }
738 else
739 {
740 /* The insns starting with this label have been
741 already scanned or are in the worklist. */
742 break;
743 }
744 }
745
746 body = PATTERN (insn);
747 if (GET_CODE (body) == SEQUENCE)
748 {
749 HOST_WIDE_INT dest_args_size = cur_args_size;
750 for (i = 1; i < XVECLEN (body, 0); i++)
751 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
752 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
753 dest_args_size
754 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
755 dest_args_size, &next);
756 else
757 cur_args_size
758 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
759 cur_args_size, &next);
760
761 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
762 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
763 dest_args_size, &next);
764 else
765 cur_args_size
766 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
767 cur_args_size, &next);
768 }
769 else
770 cur_args_size
771 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
772 }
773 }
774
775 if (VEC_empty (rtx, next))
776 break;
777
778 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
779 tmp = next;
780 next = worklist;
781 worklist = tmp;
782 VEC_truncate (rtx, next, 0);
783 }
784
785 VEC_free (rtx, heap, worklist);
786 VEC_free (rtx, heap, next);
787 }
788
789 /* Add a CFI to update the running total of the size of arguments
790 pushed onto the stack. */
791
792 static void
793 dwarf2out_args_size (HOST_WIDE_INT size)
794 {
795 dw_cfi_ref cfi;
796
797 if (size == old_args_size)
798 return;
799
800 old_args_size = size;
801
802 cfi = new_cfi ();
803 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
804 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
805 add_cfi (cfi);
806 }
807
808 /* Record a stack adjustment of OFFSET bytes. */
809
810 static void
811 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
812 {
813 if (cfa.reg == STACK_POINTER_REGNUM)
814 cfa.offset += offset;
815
816 if (cfa_store.reg == STACK_POINTER_REGNUM)
817 cfa_store.offset += offset;
818
819 if (ACCUMULATE_OUTGOING_ARGS)
820 return;
821
822 #ifndef STACK_GROWS_DOWNWARD
823 offset = -offset;
824 #endif
825
826 args_size += offset;
827 if (args_size < 0)
828 args_size = 0;
829
830 def_cfa_1 (&cfa);
831 if (flag_asynchronous_unwind_tables)
832 dwarf2out_args_size (args_size);
833 }
834
835 /* Check INSN to see if it looks like a push or a stack adjustment, and
836 make a note of it if it does. EH uses this information to find out
837 how much extra space it needs to pop off the stack. */
838
839 static void
840 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
841 {
842 HOST_WIDE_INT offset;
843 int i;
844
845 /* Don't handle epilogues at all. Certainly it would be wrong to do so
846 with this function. Proper support would require all frame-related
847 insns to be marked, and to be able to handle saving state around
848 epilogues textually in the middle of the function. */
849 if (prologue_epilogue_contains (insn))
850 return;
851
852 /* If INSN is an instruction from target of an annulled branch, the
853 effects are for the target only and so current argument size
854 shouldn't change at all. */
855 if (final_sequence
856 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
857 && INSN_FROM_TARGET_P (insn))
858 return;
859
860 /* If only calls can throw, and we have a frame pointer,
861 save up adjustments until we see the CALL_INSN. */
862 if (!flag_asynchronous_unwind_tables && cfa.reg != STACK_POINTER_REGNUM)
863 {
864 if (CALL_P (insn) && !after_p)
865 {
866 /* Extract the size of the args from the CALL rtx itself. */
867 insn = PATTERN (insn);
868 if (GET_CODE (insn) == PARALLEL)
869 insn = XVECEXP (insn, 0, 0);
870 if (GET_CODE (insn) == SET)
871 insn = SET_SRC (insn);
872 gcc_assert (GET_CODE (insn) == CALL);
873 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
874 }
875 return;
876 }
877
878 if (CALL_P (insn) && !after_p)
879 {
880 if (!flag_asynchronous_unwind_tables)
881 dwarf2out_args_size (args_size);
882 return;
883 }
884 else if (BARRIER_P (insn))
885 {
886 /* Don't call compute_barrier_args_size () if the only
887 BARRIER is at the end of function. */
888 if (barrier_args_size == NULL && next_nonnote_insn (insn))
889 compute_barrier_args_size ();
890 if (barrier_args_size == NULL)
891 offset = 0;
892 else
893 {
894 offset = barrier_args_size[INSN_UID (insn)];
895 if (offset < 0)
896 offset = 0;
897 }
898
899 offset -= args_size;
900 #ifndef STACK_GROWS_DOWNWARD
901 offset = -offset;
902 #endif
903 }
904 else if (GET_CODE (PATTERN (insn)) == SET)
905 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
906 else if (GET_CODE (PATTERN (insn)) == PARALLEL
907 || GET_CODE (PATTERN (insn)) == SEQUENCE)
908 {
909 /* There may be stack adjustments inside compound insns. Search
910 for them. */
911 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
912 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
913 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
914 args_size, offset);
915 }
916 else
917 return;
918
919 if (offset == 0)
920 return;
921
922 dwarf2out_stack_adjust (offset);
923 }
924
925 /* We delay emitting a register save until either (a) we reach the end
926 of the prologue or (b) the register is clobbered. This clusters
927 register saves so that there are fewer pc advances. */
928
929 struct GTY(()) queued_reg_save {
930 struct queued_reg_save *next;
931 rtx reg;
932 HOST_WIDE_INT cfa_offset;
933 rtx saved_reg;
934 };
935
936 static GTY(()) struct queued_reg_save *queued_reg_saves;
937
938 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
939 typedef struct GTY(()) reg_saved_in_data {
940 rtx orig_reg;
941 rtx saved_in_reg;
942 } reg_saved_in_data;
943
944 DEF_VEC_O (reg_saved_in_data);
945 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
946
947 /* A set of registers saved in other registers. This is implemented as
948 a flat array because it normally contains zero or 1 entry, depending
949 on the target. IA-64 is the big spender here, using a maximum of
950 5 entries. */
951 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
952
953 static GTY(()) reg_saved_in_data *cie_return_save;
954
955 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
956
957 static bool
958 compare_reg_or_pc (rtx x, rtx y)
959 {
960 if (REG_P (x) && REG_P (y))
961 return REGNO (x) == REGNO (y);
962 return x == y;
963 }
964
965 /* Record SRC as being saved in DEST. DEST may be null to delete an
966 existing entry. SRC may be a register or PC_RTX. */
967
968 static void
969 record_reg_saved_in_reg (rtx dest, rtx src)
970 {
971 reg_saved_in_data *elt;
972 size_t i;
973
974 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
975 if (compare_reg_or_pc (elt->orig_reg, src))
976 {
977 if (dest == NULL)
978 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
979 else
980 elt->saved_in_reg = dest;
981 return;
982 }
983
984 if (dest == NULL)
985 return;
986
987 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
988 elt->orig_reg = src;
989 elt->saved_in_reg = dest;
990 }
991
992 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
993 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
994
995 static void
996 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
997 {
998 struct queued_reg_save *q;
999
1000 /* Duplicates waste space, but it's also necessary to remove them
1001 for correctness, since the queue gets output in reverse order. */
1002 for (q = queued_reg_saves; q != NULL; q = q->next)
1003 if (compare_reg_or_pc (q->reg, reg))
1004 break;
1005
1006 if (q == NULL)
1007 {
1008 q = ggc_alloc_queued_reg_save ();
1009 q->next = queued_reg_saves;
1010 queued_reg_saves = q;
1011 }
1012
1013 q->reg = reg;
1014 q->cfa_offset = offset;
1015 q->saved_reg = sreg;
1016 }
1017
1018 /* Output all the entries in QUEUED_REG_SAVES. */
1019
1020 static void
1021 dwarf2out_flush_queued_reg_saves (void)
1022 {
1023 struct queued_reg_save *q;
1024
1025 for (q = queued_reg_saves; q; q = q->next)
1026 {
1027 unsigned int reg, sreg;
1028
1029 record_reg_saved_in_reg (q->saved_reg, q->reg);
1030
1031 if (q->reg == pc_rtx)
1032 reg = DWARF_FRAME_RETURN_COLUMN;
1033 else
1034 reg = DWARF_FRAME_REGNUM (REGNO (q->reg));
1035 if (q->saved_reg)
1036 sreg = DWARF_FRAME_REGNUM (REGNO (q->saved_reg));
1037 else
1038 sreg = INVALID_REGNUM;
1039 reg_save (reg, sreg, q->cfa_offset);
1040 }
1041
1042 queued_reg_saves = NULL;
1043 }
1044
1045 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1046 location for? Or, does it clobber a register which we've previously
1047 said that some other register is saved in, and for which we now
1048 have a new location for? */
1049
1050 static bool
1051 clobbers_queued_reg_save (const_rtx insn)
1052 {
1053 struct queued_reg_save *q;
1054
1055 for (q = queued_reg_saves; q; q = q->next)
1056 {
1057 size_t i;
1058 reg_saved_in_data *rir;
1059
1060 if (modified_in_p (q->reg, insn))
1061 return true;
1062
1063 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1064 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1065 && modified_in_p (rir->saved_in_reg, insn))
1066 return true;
1067 }
1068
1069 return false;
1070 }
1071
1072 /* What register, if any, is currently saved in REG? */
1073
1074 static rtx
1075 reg_saved_in (rtx reg)
1076 {
1077 unsigned int regn = REGNO (reg);
1078 struct queued_reg_save *q;
1079 reg_saved_in_data *rir;
1080 size_t i;
1081
1082 for (q = queued_reg_saves; q; q = q->next)
1083 if (q->saved_reg && regn == REGNO (q->saved_reg))
1084 return q->reg;
1085
1086 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1087 if (regn == REGNO (rir->saved_in_reg))
1088 return rir->orig_reg;
1089
1090 return NULL_RTX;
1091 }
1092
1093
1094 /* A temporary register holding an integral value used in adjusting SP
1095 or setting up the store_reg. The "offset" field holds the integer
1096 value, not an offset. */
1097 static dw_cfa_location cfa_temp;
1098
1099 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1100
1101 static void
1102 dwarf2out_frame_debug_def_cfa (rtx pat)
1103 {
1104 memset (&cfa, 0, sizeof (cfa));
1105
1106 switch (GET_CODE (pat))
1107 {
1108 case PLUS:
1109 cfa.reg = REGNO (XEXP (pat, 0));
1110 cfa.offset = INTVAL (XEXP (pat, 1));
1111 break;
1112
1113 case REG:
1114 cfa.reg = REGNO (pat);
1115 break;
1116
1117 case MEM:
1118 cfa.indirect = 1;
1119 pat = XEXP (pat, 0);
1120 if (GET_CODE (pat) == PLUS)
1121 {
1122 cfa.base_offset = INTVAL (XEXP (pat, 1));
1123 pat = XEXP (pat, 0);
1124 }
1125 cfa.reg = REGNO (pat);
1126 break;
1127
1128 default:
1129 /* Recurse and define an expression. */
1130 gcc_unreachable ();
1131 }
1132
1133 def_cfa_1 (&cfa);
1134 }
1135
1136 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1137
1138 static void
1139 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1140 {
1141 rtx src, dest;
1142
1143 gcc_assert (GET_CODE (pat) == SET);
1144 dest = XEXP (pat, 0);
1145 src = XEXP (pat, 1);
1146
1147 switch (GET_CODE (src))
1148 {
1149 case PLUS:
1150 gcc_assert (REGNO (XEXP (src, 0)) == cfa.reg);
1151 cfa.offset -= INTVAL (XEXP (src, 1));
1152 break;
1153
1154 case REG:
1155 break;
1156
1157 default:
1158 gcc_unreachable ();
1159 }
1160
1161 cfa.reg = REGNO (dest);
1162 gcc_assert (cfa.indirect == 0);
1163
1164 def_cfa_1 (&cfa);
1165 }
1166
1167 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1168
1169 static void
1170 dwarf2out_frame_debug_cfa_offset (rtx set)
1171 {
1172 HOST_WIDE_INT offset;
1173 rtx src, addr, span;
1174 unsigned int sregno;
1175
1176 src = XEXP (set, 1);
1177 addr = XEXP (set, 0);
1178 gcc_assert (MEM_P (addr));
1179 addr = XEXP (addr, 0);
1180
1181 /* As documented, only consider extremely simple addresses. */
1182 switch (GET_CODE (addr))
1183 {
1184 case REG:
1185 gcc_assert (REGNO (addr) == cfa.reg);
1186 offset = -cfa.offset;
1187 break;
1188 case PLUS:
1189 gcc_assert (REGNO (XEXP (addr, 0)) == cfa.reg);
1190 offset = INTVAL (XEXP (addr, 1)) - cfa.offset;
1191 break;
1192 default:
1193 gcc_unreachable ();
1194 }
1195
1196 if (src == pc_rtx)
1197 {
1198 span = NULL;
1199 sregno = DWARF_FRAME_RETURN_COLUMN;
1200 }
1201 else
1202 {
1203 span = targetm.dwarf_register_span (src);
1204 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1205 }
1206
1207 /* ??? We'd like to use queue_reg_save, but we need to come up with
1208 a different flushing heuristic for epilogues. */
1209 if (!span)
1210 reg_save (sregno, INVALID_REGNUM, offset);
1211 else
1212 {
1213 /* We have a PARALLEL describing where the contents of SRC live.
1214 Queue register saves for each piece of the PARALLEL. */
1215 int par_index;
1216 int limit;
1217 HOST_WIDE_INT span_offset = offset;
1218
1219 gcc_assert (GET_CODE (span) == PARALLEL);
1220
1221 limit = XVECLEN (span, 0);
1222 for (par_index = 0; par_index < limit; par_index++)
1223 {
1224 rtx elem = XVECEXP (span, 0, par_index);
1225
1226 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1227 reg_save (sregno, INVALID_REGNUM, span_offset);
1228 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1229 }
1230 }
1231 }
1232
1233 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1234
1235 static void
1236 dwarf2out_frame_debug_cfa_register (rtx set)
1237 {
1238 rtx src, dest;
1239 unsigned sregno, dregno;
1240
1241 src = XEXP (set, 1);
1242 dest = XEXP (set, 0);
1243
1244 record_reg_saved_in_reg (dest, src);
1245 if (src == pc_rtx)
1246 sregno = DWARF_FRAME_RETURN_COLUMN;
1247 else
1248 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1249
1250 dregno = DWARF_FRAME_REGNUM (REGNO (dest));
1251
1252 /* ??? We'd like to use queue_reg_save, but we need to come up with
1253 a different flushing heuristic for epilogues. */
1254 reg_save (sregno, dregno, 0);
1255 }
1256
1257 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1258
1259 static void
1260 dwarf2out_frame_debug_cfa_expression (rtx set)
1261 {
1262 rtx src, dest, span;
1263 dw_cfi_ref cfi = new_cfi ();
1264
1265 dest = SET_DEST (set);
1266 src = SET_SRC (set);
1267
1268 gcc_assert (REG_P (src));
1269 gcc_assert (MEM_P (dest));
1270
1271 span = targetm.dwarf_register_span (src);
1272 gcc_assert (!span);
1273
1274 cfi->dw_cfi_opc = DW_CFA_expression;
1275 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = DWARF_FRAME_REGNUM (REGNO (src));
1276 cfi->dw_cfi_oprnd2.dw_cfi_loc
1277 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1278 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1279
1280 /* ??? We'd like to use queue_reg_save, were the interface different,
1281 and, as above, we could manage flushing for epilogues. */
1282 add_cfi (cfi);
1283 }
1284
1285 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1286
1287 static void
1288 dwarf2out_frame_debug_cfa_restore (rtx reg)
1289 {
1290 dw_cfi_ref cfi = new_cfi ();
1291 unsigned int regno = DWARF_FRAME_REGNUM (REGNO (reg));
1292
1293 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1294 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1295
1296 add_cfi (cfi);
1297 }
1298
1299 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1300 ??? Perhaps we should note in the CIE where windows are saved (instead of
1301 assuming 0(cfa)) and what registers are in the window. */
1302
1303 static void
1304 dwarf2out_frame_debug_cfa_window_save (void)
1305 {
1306 dw_cfi_ref cfi = new_cfi ();
1307
1308 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1309 add_cfi (cfi);
1310 }
1311
1312 /* Record call frame debugging information for an expression EXPR,
1313 which either sets SP or FP (adjusting how we calculate the frame
1314 address) or saves a register to the stack or another register.
1315 LABEL indicates the address of EXPR.
1316
1317 This function encodes a state machine mapping rtxes to actions on
1318 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1319 users need not read the source code.
1320
1321 The High-Level Picture
1322
1323 Changes in the register we use to calculate the CFA: Currently we
1324 assume that if you copy the CFA register into another register, we
1325 should take the other one as the new CFA register; this seems to
1326 work pretty well. If it's wrong for some target, it's simple
1327 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1328
1329 Changes in the register we use for saving registers to the stack:
1330 This is usually SP, but not always. Again, we deduce that if you
1331 copy SP into another register (and SP is not the CFA register),
1332 then the new register is the one we will be using for register
1333 saves. This also seems to work.
1334
1335 Register saves: There's not much guesswork about this one; if
1336 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1337 register save, and the register used to calculate the destination
1338 had better be the one we think we're using for this purpose.
1339 It's also assumed that a copy from a call-saved register to another
1340 register is saving that register if RTX_FRAME_RELATED_P is set on
1341 that instruction. If the copy is from a call-saved register to
1342 the *same* register, that means that the register is now the same
1343 value as in the caller.
1344
1345 Except: If the register being saved is the CFA register, and the
1346 offset is nonzero, we are saving the CFA, so we assume we have to
1347 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1348 the intent is to save the value of SP from the previous frame.
1349
1350 In addition, if a register has previously been saved to a different
1351 register,
1352
1353 Invariants / Summaries of Rules
1354
1355 cfa current rule for calculating the CFA. It usually
1356 consists of a register and an offset.
1357 cfa_store register used by prologue code to save things to the stack
1358 cfa_store.offset is the offset from the value of
1359 cfa_store.reg to the actual CFA
1360 cfa_temp register holding an integral value. cfa_temp.offset
1361 stores the value, which will be used to adjust the
1362 stack pointer. cfa_temp is also used like cfa_store,
1363 to track stores to the stack via fp or a temp reg.
1364
1365 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1366 with cfa.reg as the first operand changes the cfa.reg and its
1367 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1368 cfa_temp.offset.
1369
1370 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1371 expression yielding a constant. This sets cfa_temp.reg
1372 and cfa_temp.offset.
1373
1374 Rule 5: Create a new register cfa_store used to save items to the
1375 stack.
1376
1377 Rules 10-14: Save a register to the stack. Define offset as the
1378 difference of the original location and cfa_store's
1379 location (or cfa_temp's location if cfa_temp is used).
1380
1381 Rules 16-20: If AND operation happens on sp in prologue, we assume
1382 stack is realigned. We will use a group of DW_OP_XXX
1383 expressions to represent the location of the stored
1384 register instead of CFA+offset.
1385
1386 The Rules
1387
1388 "{a,b}" indicates a choice of a xor b.
1389 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1390
1391 Rule 1:
1392 (set <reg1> <reg2>:cfa.reg)
1393 effects: cfa.reg = <reg1>
1394 cfa.offset unchanged
1395 cfa_temp.reg = <reg1>
1396 cfa_temp.offset = cfa.offset
1397
1398 Rule 2:
1399 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1400 {<const_int>,<reg>:cfa_temp.reg}))
1401 effects: cfa.reg = sp if fp used
1402 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1403 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1404 if cfa_store.reg==sp
1405
1406 Rule 3:
1407 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1408 effects: cfa.reg = fp
1409 cfa_offset += +/- <const_int>
1410
1411 Rule 4:
1412 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1413 constraints: <reg1> != fp
1414 <reg1> != sp
1415 effects: cfa.reg = <reg1>
1416 cfa_temp.reg = <reg1>
1417 cfa_temp.offset = cfa.offset
1418
1419 Rule 5:
1420 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1421 constraints: <reg1> != fp
1422 <reg1> != sp
1423 effects: cfa_store.reg = <reg1>
1424 cfa_store.offset = cfa.offset - cfa_temp.offset
1425
1426 Rule 6:
1427 (set <reg> <const_int>)
1428 effects: cfa_temp.reg = <reg>
1429 cfa_temp.offset = <const_int>
1430
1431 Rule 7:
1432 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1433 effects: cfa_temp.reg = <reg1>
1434 cfa_temp.offset |= <const_int>
1435
1436 Rule 8:
1437 (set <reg> (high <exp>))
1438 effects: none
1439
1440 Rule 9:
1441 (set <reg> (lo_sum <exp> <const_int>))
1442 effects: cfa_temp.reg = <reg>
1443 cfa_temp.offset = <const_int>
1444
1445 Rule 10:
1446 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1447 effects: cfa_store.offset -= <const_int>
1448 cfa.offset = cfa_store.offset if cfa.reg == sp
1449 cfa.reg = sp
1450 cfa.base_offset = -cfa_store.offset
1451
1452 Rule 11:
1453 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1454 effects: cfa_store.offset += -/+ mode_size(mem)
1455 cfa.offset = cfa_store.offset if cfa.reg == sp
1456 cfa.reg = sp
1457 cfa.base_offset = -cfa_store.offset
1458
1459 Rule 12:
1460 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1461
1462 <reg2>)
1463 effects: cfa.reg = <reg1>
1464 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1465
1466 Rule 13:
1467 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1468 effects: cfa.reg = <reg1>
1469 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1470
1471 Rule 14:
1472 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1473 effects: cfa.reg = <reg1>
1474 cfa.base_offset = -cfa_temp.offset
1475 cfa_temp.offset -= mode_size(mem)
1476
1477 Rule 15:
1478 (set <reg> {unspec, unspec_volatile})
1479 effects: target-dependent
1480
1481 Rule 16:
1482 (set sp (and: sp <const_int>))
1483 constraints: cfa_store.reg == sp
1484 effects: cfun->fde.stack_realign = 1
1485 cfa_store.offset = 0
1486 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1487
1488 Rule 17:
1489 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1490 effects: cfa_store.offset += -/+ mode_size(mem)
1491
1492 Rule 18:
1493 (set (mem ({pre_inc, pre_dec} sp)) fp)
1494 constraints: fde->stack_realign == 1
1495 effects: cfa_store.offset = 0
1496 cfa.reg != HARD_FRAME_POINTER_REGNUM
1497
1498 Rule 19:
1499 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1500 constraints: fde->stack_realign == 1
1501 && cfa.offset == 0
1502 && cfa.indirect == 0
1503 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1504 effects: Use DW_CFA_def_cfa_expression to define cfa
1505 cfa.reg == fde->drap_reg */
1506
1507 static void
1508 dwarf2out_frame_debug_expr (rtx expr)
1509 {
1510 rtx src, dest, span;
1511 HOST_WIDE_INT offset;
1512 dw_fde_ref fde;
1513
1514 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1515 the PARALLEL independently. The first element is always processed if
1516 it is a SET. This is for backward compatibility. Other elements
1517 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1518 flag is set in them. */
1519 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1520 {
1521 int par_index;
1522 int limit = XVECLEN (expr, 0);
1523 rtx elem;
1524
1525 /* PARALLELs have strict read-modify-write semantics, so we
1526 ought to evaluate every rvalue before changing any lvalue.
1527 It's cumbersome to do that in general, but there's an
1528 easy approximation that is enough for all current users:
1529 handle register saves before register assignments. */
1530 if (GET_CODE (expr) == PARALLEL)
1531 for (par_index = 0; par_index < limit; par_index++)
1532 {
1533 elem = XVECEXP (expr, 0, par_index);
1534 if (GET_CODE (elem) == SET
1535 && MEM_P (SET_DEST (elem))
1536 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1537 dwarf2out_frame_debug_expr (elem);
1538 }
1539
1540 for (par_index = 0; par_index < limit; par_index++)
1541 {
1542 elem = XVECEXP (expr, 0, par_index);
1543 if (GET_CODE (elem) == SET
1544 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1545 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1546 dwarf2out_frame_debug_expr (elem);
1547 else if (GET_CODE (elem) == SET
1548 && par_index != 0
1549 && !RTX_FRAME_RELATED_P (elem))
1550 {
1551 /* Stack adjustment combining might combine some post-prologue
1552 stack adjustment into a prologue stack adjustment. */
1553 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1554
1555 if (offset != 0)
1556 dwarf2out_stack_adjust (offset);
1557 }
1558 }
1559 return;
1560 }
1561
1562 gcc_assert (GET_CODE (expr) == SET);
1563
1564 src = SET_SRC (expr);
1565 dest = SET_DEST (expr);
1566
1567 if (REG_P (src))
1568 {
1569 rtx rsi = reg_saved_in (src);
1570 if (rsi)
1571 src = rsi;
1572 }
1573
1574 fde = cfun->fde;
1575
1576 switch (GET_CODE (dest))
1577 {
1578 case REG:
1579 switch (GET_CODE (src))
1580 {
1581 /* Setting FP from SP. */
1582 case REG:
1583 if (cfa.reg == (unsigned) REGNO (src))
1584 {
1585 /* Rule 1 */
1586 /* Update the CFA rule wrt SP or FP. Make sure src is
1587 relative to the current CFA register.
1588
1589 We used to require that dest be either SP or FP, but the
1590 ARM copies SP to a temporary register, and from there to
1591 FP. So we just rely on the backends to only set
1592 RTX_FRAME_RELATED_P on appropriate insns. */
1593 cfa.reg = REGNO (dest);
1594 cfa_temp.reg = cfa.reg;
1595 cfa_temp.offset = cfa.offset;
1596 }
1597 else
1598 {
1599 /* Saving a register in a register. */
1600 gcc_assert (!fixed_regs [REGNO (dest)]
1601 /* For the SPARC and its register window. */
1602 || (DWARF_FRAME_REGNUM (REGNO (src))
1603 == DWARF_FRAME_RETURN_COLUMN));
1604
1605 /* After stack is aligned, we can only save SP in FP
1606 if drap register is used. In this case, we have
1607 to restore stack pointer with the CFA value and we
1608 don't generate this DWARF information. */
1609 if (fde
1610 && fde->stack_realign
1611 && REGNO (src) == STACK_POINTER_REGNUM)
1612 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1613 && fde->drap_reg != INVALID_REGNUM
1614 && cfa.reg != REGNO (src));
1615 else
1616 queue_reg_save (src, dest, 0);
1617 }
1618 break;
1619
1620 case PLUS:
1621 case MINUS:
1622 case LO_SUM:
1623 if (dest == stack_pointer_rtx)
1624 {
1625 /* Rule 2 */
1626 /* Adjusting SP. */
1627 switch (GET_CODE (XEXP (src, 1)))
1628 {
1629 case CONST_INT:
1630 offset = INTVAL (XEXP (src, 1));
1631 break;
1632 case REG:
1633 gcc_assert ((unsigned) REGNO (XEXP (src, 1))
1634 == cfa_temp.reg);
1635 offset = cfa_temp.offset;
1636 break;
1637 default:
1638 gcc_unreachable ();
1639 }
1640
1641 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1642 {
1643 /* Restoring SP from FP in the epilogue. */
1644 gcc_assert (cfa.reg == (unsigned) HARD_FRAME_POINTER_REGNUM);
1645 cfa.reg = STACK_POINTER_REGNUM;
1646 }
1647 else if (GET_CODE (src) == LO_SUM)
1648 /* Assume we've set the source reg of the LO_SUM from sp. */
1649 ;
1650 else
1651 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1652
1653 if (GET_CODE (src) != MINUS)
1654 offset = -offset;
1655 if (cfa.reg == STACK_POINTER_REGNUM)
1656 cfa.offset += offset;
1657 if (cfa_store.reg == STACK_POINTER_REGNUM)
1658 cfa_store.offset += offset;
1659 }
1660 else if (dest == hard_frame_pointer_rtx)
1661 {
1662 /* Rule 3 */
1663 /* Either setting the FP from an offset of the SP,
1664 or adjusting the FP */
1665 gcc_assert (frame_pointer_needed);
1666
1667 gcc_assert (REG_P (XEXP (src, 0))
1668 && (unsigned) REGNO (XEXP (src, 0)) == cfa.reg
1669 && CONST_INT_P (XEXP (src, 1)));
1670 offset = INTVAL (XEXP (src, 1));
1671 if (GET_CODE (src) != MINUS)
1672 offset = -offset;
1673 cfa.offset += offset;
1674 cfa.reg = HARD_FRAME_POINTER_REGNUM;
1675 }
1676 else
1677 {
1678 gcc_assert (GET_CODE (src) != MINUS);
1679
1680 /* Rule 4 */
1681 if (REG_P (XEXP (src, 0))
1682 && REGNO (XEXP (src, 0)) == cfa.reg
1683 && CONST_INT_P (XEXP (src, 1)))
1684 {
1685 /* Setting a temporary CFA register that will be copied
1686 into the FP later on. */
1687 offset = - INTVAL (XEXP (src, 1));
1688 cfa.offset += offset;
1689 cfa.reg = REGNO (dest);
1690 /* Or used to save regs to the stack. */
1691 cfa_temp.reg = cfa.reg;
1692 cfa_temp.offset = cfa.offset;
1693 }
1694
1695 /* Rule 5 */
1696 else if (REG_P (XEXP (src, 0))
1697 && REGNO (XEXP (src, 0)) == cfa_temp.reg
1698 && XEXP (src, 1) == stack_pointer_rtx)
1699 {
1700 /* Setting a scratch register that we will use instead
1701 of SP for saving registers to the stack. */
1702 gcc_assert (cfa.reg == STACK_POINTER_REGNUM);
1703 cfa_store.reg = REGNO (dest);
1704 cfa_store.offset = cfa.offset - cfa_temp.offset;
1705 }
1706
1707 /* Rule 9 */
1708 else if (GET_CODE (src) == LO_SUM
1709 && CONST_INT_P (XEXP (src, 1)))
1710 {
1711 cfa_temp.reg = REGNO (dest);
1712 cfa_temp.offset = INTVAL (XEXP (src, 1));
1713 }
1714 else
1715 gcc_unreachable ();
1716 }
1717 break;
1718
1719 /* Rule 6 */
1720 case CONST_INT:
1721 cfa_temp.reg = REGNO (dest);
1722 cfa_temp.offset = INTVAL (src);
1723 break;
1724
1725 /* Rule 7 */
1726 case IOR:
1727 gcc_assert (REG_P (XEXP (src, 0))
1728 && (unsigned) REGNO (XEXP (src, 0)) == cfa_temp.reg
1729 && CONST_INT_P (XEXP (src, 1)));
1730
1731 if ((unsigned) REGNO (dest) != cfa_temp.reg)
1732 cfa_temp.reg = REGNO (dest);
1733 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1734 break;
1735
1736 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1737 which will fill in all of the bits. */
1738 /* Rule 8 */
1739 case HIGH:
1740 break;
1741
1742 /* Rule 15 */
1743 case UNSPEC:
1744 case UNSPEC_VOLATILE:
1745 /* All unspecs should be represented by REG_CFA_* notes. */
1746 gcc_unreachable ();
1747 return;
1748
1749 /* Rule 16 */
1750 case AND:
1751 /* If this AND operation happens on stack pointer in prologue,
1752 we assume the stack is realigned and we extract the
1753 alignment. */
1754 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1755 {
1756 /* We interpret reg_save differently with stack_realign set.
1757 Thus we must flush whatever we have queued first. */
1758 dwarf2out_flush_queued_reg_saves ();
1759
1760 gcc_assert (cfa_store.reg == REGNO (XEXP (src, 0)));
1761 fde->stack_realign = 1;
1762 fde->stack_realignment = INTVAL (XEXP (src, 1));
1763 cfa_store.offset = 0;
1764
1765 if (cfa.reg != STACK_POINTER_REGNUM
1766 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
1767 fde->drap_reg = cfa.reg;
1768 }
1769 return;
1770
1771 default:
1772 gcc_unreachable ();
1773 }
1774
1775 def_cfa_1 (&cfa);
1776 break;
1777
1778 case MEM:
1779
1780 /* Saving a register to the stack. Make sure dest is relative to the
1781 CFA register. */
1782 switch (GET_CODE (XEXP (dest, 0)))
1783 {
1784 /* Rule 10 */
1785 /* With a push. */
1786 case PRE_MODIFY:
1787 case POST_MODIFY:
1788 /* We can't handle variable size modifications. */
1789 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1790 == CONST_INT);
1791 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1792
1793 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1794 && cfa_store.reg == STACK_POINTER_REGNUM);
1795
1796 cfa_store.offset += offset;
1797 if (cfa.reg == STACK_POINTER_REGNUM)
1798 cfa.offset = cfa_store.offset;
1799
1800 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1801 offset -= cfa_store.offset;
1802 else
1803 offset = -cfa_store.offset;
1804 break;
1805
1806 /* Rule 11 */
1807 case PRE_INC:
1808 case PRE_DEC:
1809 case POST_DEC:
1810 offset = GET_MODE_SIZE (GET_MODE (dest));
1811 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1812 offset = -offset;
1813
1814 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1815 == STACK_POINTER_REGNUM)
1816 && cfa_store.reg == STACK_POINTER_REGNUM);
1817
1818 cfa_store.offset += offset;
1819
1820 /* Rule 18: If stack is aligned, we will use FP as a
1821 reference to represent the address of the stored
1822 regiser. */
1823 if (fde
1824 && fde->stack_realign
1825 && src == hard_frame_pointer_rtx)
1826 {
1827 gcc_assert (cfa.reg != HARD_FRAME_POINTER_REGNUM);
1828 cfa_store.offset = 0;
1829 }
1830
1831 if (cfa.reg == STACK_POINTER_REGNUM)
1832 cfa.offset = cfa_store.offset;
1833
1834 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1835 offset += -cfa_store.offset;
1836 else
1837 offset = -cfa_store.offset;
1838 break;
1839
1840 /* Rule 12 */
1841 /* With an offset. */
1842 case PLUS:
1843 case MINUS:
1844 case LO_SUM:
1845 {
1846 int regno;
1847
1848 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1849 && REG_P (XEXP (XEXP (dest, 0), 0)));
1850 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1851 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1852 offset = -offset;
1853
1854 regno = REGNO (XEXP (XEXP (dest, 0), 0));
1855
1856 if (cfa.reg == (unsigned) regno)
1857 offset -= cfa.offset;
1858 else if (cfa_store.reg == (unsigned) regno)
1859 offset -= cfa_store.offset;
1860 else
1861 {
1862 gcc_assert (cfa_temp.reg == (unsigned) regno);
1863 offset -= cfa_temp.offset;
1864 }
1865 }
1866 break;
1867
1868 /* Rule 13 */
1869 /* Without an offset. */
1870 case REG:
1871 {
1872 int regno = REGNO (XEXP (dest, 0));
1873
1874 if (cfa.reg == (unsigned) regno)
1875 offset = -cfa.offset;
1876 else if (cfa_store.reg == (unsigned) regno)
1877 offset = -cfa_store.offset;
1878 else
1879 {
1880 gcc_assert (cfa_temp.reg == (unsigned) regno);
1881 offset = -cfa_temp.offset;
1882 }
1883 }
1884 break;
1885
1886 /* Rule 14 */
1887 case POST_INC:
1888 gcc_assert (cfa_temp.reg
1889 == (unsigned) REGNO (XEXP (XEXP (dest, 0), 0)));
1890 offset = -cfa_temp.offset;
1891 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1892 break;
1893
1894 default:
1895 gcc_unreachable ();
1896 }
1897
1898 /* Rule 17 */
1899 /* If the source operand of this MEM operation is a memory,
1900 we only care how much stack grew. */
1901 if (MEM_P (src))
1902 break;
1903
1904 if (REG_P (src)
1905 && REGNO (src) != STACK_POINTER_REGNUM
1906 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1907 && (unsigned) REGNO (src) == cfa.reg)
1908 {
1909 /* We're storing the current CFA reg into the stack. */
1910
1911 if (cfa.offset == 0)
1912 {
1913 /* Rule 19 */
1914 /* If stack is aligned, putting CFA reg into stack means
1915 we can no longer use reg + offset to represent CFA.
1916 Here we use DW_CFA_def_cfa_expression instead. The
1917 result of this expression equals to the original CFA
1918 value. */
1919 if (fde
1920 && fde->stack_realign
1921 && cfa.indirect == 0
1922 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
1923 {
1924 dw_cfa_location cfa_exp;
1925
1926 gcc_assert (fde->drap_reg == cfa.reg);
1927
1928 cfa_exp.indirect = 1;
1929 cfa_exp.reg = HARD_FRAME_POINTER_REGNUM;
1930 cfa_exp.base_offset = offset;
1931 cfa_exp.offset = 0;
1932
1933 fde->drap_reg_saved = 1;
1934
1935 def_cfa_1 (&cfa_exp);
1936 break;
1937 }
1938
1939 /* If the source register is exactly the CFA, assume
1940 we're saving SP like any other register; this happens
1941 on the ARM. */
1942 def_cfa_1 (&cfa);
1943 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1944 break;
1945 }
1946 else
1947 {
1948 /* Otherwise, we'll need to look in the stack to
1949 calculate the CFA. */
1950 rtx x = XEXP (dest, 0);
1951
1952 if (!REG_P (x))
1953 x = XEXP (x, 0);
1954 gcc_assert (REG_P (x));
1955
1956 cfa.reg = REGNO (x);
1957 cfa.base_offset = offset;
1958 cfa.indirect = 1;
1959 def_cfa_1 (&cfa);
1960 break;
1961 }
1962 }
1963
1964 def_cfa_1 (&cfa);
1965
1966 span = NULL;
1967 if (REG_P (src))
1968 span = targetm.dwarf_register_span (src);
1969 if (!span)
1970 queue_reg_save (src, NULL_RTX, offset);
1971 else
1972 {
1973 /* We have a PARALLEL describing where the contents of SRC live.
1974 Queue register saves for each piece of the PARALLEL. */
1975 int par_index;
1976 int limit;
1977 HOST_WIDE_INT span_offset = offset;
1978
1979 gcc_assert (GET_CODE (span) == PARALLEL);
1980
1981 limit = XVECLEN (span, 0);
1982 for (par_index = 0; par_index < limit; par_index++)
1983 {
1984 rtx elem = XVECEXP (span, 0, par_index);
1985 queue_reg_save (elem, NULL_RTX, span_offset);
1986 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1987 }
1988 }
1989 break;
1990
1991 default:
1992 gcc_unreachable ();
1993 }
1994 }
1995
1996 /* Record call frame debugging information for INSN, which either
1997 sets SP or FP (adjusting how we calculate the frame address) or saves a
1998 register to the stack. If INSN is NULL_RTX, initialize our state.
1999
2000 If AFTER_P is false, we're being called before the insn is emitted,
2001 otherwise after. Call instructions get invoked twice. */
2002
2003 static void
2004 dwarf2out_frame_debug (rtx insn, bool after_p)
2005 {
2006 rtx note, n;
2007 bool handled_one = false;
2008 bool need_flush = false;
2009
2010 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2011 dwarf2out_flush_queued_reg_saves ();
2012
2013 if (!RTX_FRAME_RELATED_P (insn))
2014 {
2015 /* ??? This should be done unconditionally since stack adjustments
2016 matter if the stack pointer is not the CFA register anymore but
2017 is still used to save registers. */
2018 if (!ACCUMULATE_OUTGOING_ARGS)
2019 dwarf2out_notice_stack_adjust (insn, after_p);
2020 return;
2021 }
2022
2023 any_cfis_emitted = false;
2024
2025 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2026 switch (REG_NOTE_KIND (note))
2027 {
2028 case REG_FRAME_RELATED_EXPR:
2029 insn = XEXP (note, 0);
2030 goto do_frame_expr;
2031
2032 case REG_CFA_DEF_CFA:
2033 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2034 handled_one = true;
2035 break;
2036
2037 case REG_CFA_ADJUST_CFA:
2038 n = XEXP (note, 0);
2039 if (n == NULL)
2040 {
2041 n = PATTERN (insn);
2042 if (GET_CODE (n) == PARALLEL)
2043 n = XVECEXP (n, 0, 0);
2044 }
2045 dwarf2out_frame_debug_adjust_cfa (n);
2046 handled_one = true;
2047 break;
2048
2049 case REG_CFA_OFFSET:
2050 n = XEXP (note, 0);
2051 if (n == NULL)
2052 n = single_set (insn);
2053 dwarf2out_frame_debug_cfa_offset (n);
2054 handled_one = true;
2055 break;
2056
2057 case REG_CFA_REGISTER:
2058 n = XEXP (note, 0);
2059 if (n == NULL)
2060 {
2061 n = PATTERN (insn);
2062 if (GET_CODE (n) == PARALLEL)
2063 n = XVECEXP (n, 0, 0);
2064 }
2065 dwarf2out_frame_debug_cfa_register (n);
2066 handled_one = true;
2067 break;
2068
2069 case REG_CFA_EXPRESSION:
2070 n = XEXP (note, 0);
2071 if (n == NULL)
2072 n = single_set (insn);
2073 dwarf2out_frame_debug_cfa_expression (n);
2074 handled_one = true;
2075 break;
2076
2077 case REG_CFA_RESTORE:
2078 n = XEXP (note, 0);
2079 if (n == NULL)
2080 {
2081 n = PATTERN (insn);
2082 if (GET_CODE (n) == PARALLEL)
2083 n = XVECEXP (n, 0, 0);
2084 n = XEXP (n, 0);
2085 }
2086 dwarf2out_frame_debug_cfa_restore (n);
2087 handled_one = true;
2088 break;
2089
2090 case REG_CFA_SET_VDRAP:
2091 n = XEXP (note, 0);
2092 if (REG_P (n))
2093 {
2094 dw_fde_ref fde = cfun->fde;
2095 if (fde)
2096 {
2097 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2098 if (REG_P (n))
2099 fde->vdrap_reg = REGNO (n);
2100 }
2101 }
2102 handled_one = true;
2103 break;
2104
2105 case REG_CFA_WINDOW_SAVE:
2106 dwarf2out_frame_debug_cfa_window_save ();
2107 handled_one = true;
2108 break;
2109
2110 case REG_CFA_FLUSH_QUEUE:
2111 /* The actual flush happens below. */
2112 need_flush = true;
2113 handled_one = true;
2114 break;
2115
2116 default:
2117 break;
2118 }
2119
2120 if (handled_one)
2121 {
2122 /* Minimize the number of advances by emitting the entire queue
2123 once anything is emitted. */
2124 need_flush |= any_cfis_emitted;
2125 }
2126 else
2127 {
2128 insn = PATTERN (insn);
2129 do_frame_expr:
2130 dwarf2out_frame_debug_expr (insn);
2131
2132 /* Check again. A parallel can save and update the same register.
2133 We could probably check just once, here, but this is safer than
2134 removing the check at the start of the function. */
2135 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2136 need_flush = true;
2137 }
2138
2139 if (need_flush)
2140 dwarf2out_flush_queued_reg_saves ();
2141 }
2142
2143 /* Examine CFI and return true if a cfi label and set_loc is needed
2144 beforehand. Even when generating CFI assembler instructions, we
2145 still have to add the cfi to the list so that lookup_cfa_1 works
2146 later on. When -g2 and above we even need to force emitting of
2147 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2148 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2149 and so don't use convert_cfa_to_fb_loc_list. */
2150
2151 static bool
2152 cfi_label_required_p (dw_cfi_ref cfi)
2153 {
2154 if (!dwarf2out_do_cfi_asm ())
2155 return true;
2156
2157 if (dwarf_version == 2
2158 && debug_info_level > DINFO_LEVEL_TERSE
2159 && (write_symbols == DWARF2_DEBUG
2160 || write_symbols == VMS_AND_DWARF2_DEBUG))
2161 {
2162 switch (cfi->dw_cfi_opc)
2163 {
2164 case DW_CFA_def_cfa_offset:
2165 case DW_CFA_def_cfa_offset_sf:
2166 case DW_CFA_def_cfa_register:
2167 case DW_CFA_def_cfa:
2168 case DW_CFA_def_cfa_sf:
2169 case DW_CFA_def_cfa_expression:
2170 case DW_CFA_restore_state:
2171 return true;
2172 default:
2173 return false;
2174 }
2175 }
2176 return false;
2177 }
2178
2179 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2180 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2181 necessary. */
2182 static void
2183 add_cfis_to_fde (void)
2184 {
2185 dw_fde_ref fde = cfun->fde;
2186 rtx insn, next;
2187 /* We always start with a function_begin label. */
2188 bool first = false;
2189
2190 for (insn = get_insns (); insn; insn = next)
2191 {
2192 next = NEXT_INSN (insn);
2193
2194 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2195 {
2196 /* Don't attempt to advance_loc4 between labels
2197 in different sections. */
2198 first = true;
2199 }
2200
2201 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2202 {
2203 bool required = cfi_label_required_p (NOTE_CFI (insn));
2204 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2205 {
2206 required |= cfi_label_required_p (NOTE_CFI (next));
2207 next = NEXT_INSN (next);
2208 }
2209 if (required)
2210 {
2211 int num = dwarf2out_cfi_label_num;
2212 const char *label = dwarf2out_cfi_label ();
2213 dw_cfi_ref xcfi;
2214 rtx tmp;
2215
2216 /* Set the location counter to the new label. */
2217 xcfi = new_cfi ();
2218 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2219 : DW_CFA_advance_loc4);
2220 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2221 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2222
2223 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2224 NOTE_LABEL_NUMBER (tmp) = num;
2225 }
2226
2227 do
2228 {
2229 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2230 insn = NEXT_INSN (insn);
2231 }
2232 while (insn != next);
2233 first = false;
2234 }
2235 }
2236 }
2237
2238 /* Scan the function and create the initial set of CFI notes. */
2239
2240 static void
2241 create_cfi_notes (void)
2242 {
2243 rtx insn;
2244
2245 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2246 {
2247 rtx pat;
2248
2249 cfi_insn = PREV_INSN (insn);
2250
2251 if (BARRIER_P (insn))
2252 {
2253 dwarf2out_frame_debug (insn, false);
2254 continue;
2255 }
2256
2257 if (NOTE_P (insn))
2258 {
2259 switch (NOTE_KIND (insn))
2260 {
2261 case NOTE_INSN_PROLOGUE_END:
2262 dwarf2out_flush_queued_reg_saves ();
2263 break;
2264
2265 case NOTE_INSN_EPILOGUE_BEG:
2266 #if defined(HAVE_epilogue)
2267 dwarf2out_cfi_begin_epilogue (insn);
2268 #endif
2269 break;
2270
2271 case NOTE_INSN_CFA_RESTORE_STATE:
2272 cfi_insn = insn;
2273 dwarf2out_frame_debug_restore_state ();
2274 break;
2275 }
2276 continue;
2277 }
2278
2279 if (!NONDEBUG_INSN_P (insn))
2280 continue;
2281
2282 pat = PATTERN (insn);
2283 if (asm_noperands (pat) >= 0)
2284 {
2285 dwarf2out_frame_debug (insn, false);
2286 continue;
2287 }
2288
2289 if (GET_CODE (pat) == SEQUENCE)
2290 {
2291 int i, n = XVECLEN (pat, 0);
2292 for (i = 1; i < n; ++i)
2293 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2294 }
2295
2296 if (CALL_P (insn)
2297 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2298 dwarf2out_frame_debug (insn, false);
2299
2300 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2301 Putting the note after the VEC should be ok. */
2302 if (!tablejump_p (insn, NULL, &cfi_insn))
2303 cfi_insn = insn;
2304
2305 dwarf2out_frame_debug (insn, true);
2306 }
2307
2308 cfi_insn = NULL;
2309 }
2310
2311 /* Determine if we need to save and restore CFI information around this
2312 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2313 we do need to save/restore, then emit the save now, and insert a
2314 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2315
2316 static void
2317 dwarf2out_cfi_begin_epilogue (rtx insn)
2318 {
2319 bool saw_frp = false;
2320 rtx i;
2321
2322 /* Scan forward to the return insn, noticing if there are possible
2323 frame related insns. */
2324 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2325 {
2326 if (!INSN_P (i))
2327 continue;
2328
2329 /* Look for both regular and sibcalls to end the block. */
2330 if (returnjump_p (i))
2331 break;
2332 if (CALL_P (i) && SIBLING_CALL_P (i))
2333 break;
2334
2335 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2336 {
2337 int idx;
2338 rtx seq = PATTERN (i);
2339
2340 if (returnjump_p (XVECEXP (seq, 0, 0)))
2341 break;
2342 if (CALL_P (XVECEXP (seq, 0, 0))
2343 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2344 break;
2345
2346 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2347 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2348 saw_frp = true;
2349 }
2350
2351 if (RTX_FRAME_RELATED_P (i))
2352 saw_frp = true;
2353 }
2354
2355 /* If the port doesn't emit epilogue unwind info, we don't need a
2356 save/restore pair. */
2357 if (!saw_frp)
2358 return;
2359
2360 /* Otherwise, search forward to see if the return insn was the last
2361 basic block of the function. If so, we don't need save/restore. */
2362 gcc_assert (i != NULL);
2363 i = next_real_insn (i);
2364 if (i == NULL)
2365 return;
2366
2367 /* Insert the restore before that next real insn in the stream, and before
2368 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2369 properly nested. This should be after any label or alignment. This
2370 will be pushed into the CFI stream by the function below. */
2371 while (1)
2372 {
2373 rtx p = PREV_INSN (i);
2374 if (!NOTE_P (p))
2375 break;
2376 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2377 break;
2378 i = p;
2379 }
2380 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2381
2382 emit_cfa_remember = true;
2383
2384 /* And emulate the state save. */
2385 gcc_assert (!cfa_remember.in_use);
2386 cfa_remember = cfa;
2387 old_cfa_remember = old_cfa;
2388 cfa_remember.in_use = 1;
2389 }
2390
2391 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2392 required. */
2393
2394 static void
2395 dwarf2out_frame_debug_restore_state (void)
2396 {
2397 dw_cfi_ref cfi = new_cfi ();
2398
2399 cfi->dw_cfi_opc = DW_CFA_restore_state;
2400 add_cfi (cfi);
2401
2402 gcc_assert (cfa_remember.in_use);
2403 cfa = cfa_remember;
2404 old_cfa = old_cfa_remember;
2405 cfa_remember.in_use = 0;
2406 }
2407 \f
2408 /* Record the initial position of the return address. RTL is
2409 INCOMING_RETURN_ADDR_RTX. */
2410
2411 static void
2412 initial_return_save (rtx rtl)
2413 {
2414 unsigned int reg = INVALID_REGNUM;
2415 HOST_WIDE_INT offset = 0;
2416
2417 switch (GET_CODE (rtl))
2418 {
2419 case REG:
2420 /* RA is in a register. */
2421 reg = DWARF_FRAME_REGNUM (REGNO (rtl));
2422 break;
2423
2424 case MEM:
2425 /* RA is on the stack. */
2426 rtl = XEXP (rtl, 0);
2427 switch (GET_CODE (rtl))
2428 {
2429 case REG:
2430 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2431 offset = 0;
2432 break;
2433
2434 case PLUS:
2435 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2436 offset = INTVAL (XEXP (rtl, 1));
2437 break;
2438
2439 case MINUS:
2440 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2441 offset = -INTVAL (XEXP (rtl, 1));
2442 break;
2443
2444 default:
2445 gcc_unreachable ();
2446 }
2447
2448 break;
2449
2450 case PLUS:
2451 /* The return address is at some offset from any value we can
2452 actually load. For instance, on the SPARC it is in %i7+8. Just
2453 ignore the offset for now; it doesn't matter for unwinding frames. */
2454 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2455 initial_return_save (XEXP (rtl, 0));
2456 return;
2457
2458 default:
2459 gcc_unreachable ();
2460 }
2461
2462 if (reg != DWARF_FRAME_RETURN_COLUMN)
2463 {
2464 if (reg != INVALID_REGNUM)
2465 record_reg_saved_in_reg (rtl, pc_rtx);
2466 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa.offset);
2467 }
2468 }
2469
2470 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2471 state at each location within the function. These notes will be
2472 emitted during pass_final. */
2473
2474 static unsigned int
2475 execute_dwarf2_frame (void)
2476 {
2477 /* The first time we're called, compute the incoming frame state. */
2478 if (cie_cfi_vec == NULL)
2479 {
2480 dw_cfa_location loc;
2481
2482 add_cfi_vec = &cie_cfi_vec;
2483
2484 memset(&old_cfa, 0, sizeof (old_cfa));
2485 old_cfa.reg = INVALID_REGNUM;
2486
2487 /* On entry, the Canonical Frame Address is at SP. */
2488 memset(&loc, 0, sizeof (loc));
2489 loc.reg = STACK_POINTER_REGNUM;
2490 loc.offset = INCOMING_FRAME_SP_OFFSET;
2491 def_cfa_1 (&loc);
2492
2493 if (targetm.debug_unwind_info () == UI_DWARF2
2494 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2495 {
2496 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2497
2498 /* For a few targets, we have the return address incoming into a
2499 register, but choose a different return column. This will result
2500 in a DW_CFA_register for the return, and an entry in
2501 regs_saved_in_regs to match. If the target later stores that
2502 return address register to the stack, we want to be able to emit
2503 the DW_CFA_offset against the return column, not the intermediate
2504 save register. Save the contents of regs_saved_in_regs so that
2505 we can re-initialize it at the start of each function. */
2506 switch (VEC_length (reg_saved_in_data, regs_saved_in_regs))
2507 {
2508 case 0:
2509 break;
2510 case 1:
2511 cie_return_save = ggc_alloc_reg_saved_in_data ();
2512 *cie_return_save = *VEC_index (reg_saved_in_data,
2513 regs_saved_in_regs, 0);
2514 regs_saved_in_regs = NULL;
2515 break;
2516 default:
2517 gcc_unreachable ();
2518 }
2519 }
2520
2521 add_cfi_vec = NULL;
2522 }
2523
2524 /* Set up state for generating call frame debug info. */
2525 gcc_checking_assert (queued_reg_saves == NULL);
2526 gcc_checking_assert (regs_saved_in_regs == NULL);
2527
2528 memset (&cfa, 0, sizeof(cfa));
2529 cfa.reg = STACK_POINTER_REGNUM;
2530 cfa.offset = INCOMING_FRAME_SP_OFFSET;
2531
2532 old_cfa = cfa;
2533 cfa_store = cfa;
2534
2535 memset (&cfa_temp, 0, sizeof(cfa_temp));
2536 cfa_temp.reg = INVALID_REGNUM;
2537
2538 if (cie_return_save)
2539 VEC_safe_push (reg_saved_in_data, gc, regs_saved_in_regs, cie_return_save);
2540
2541 dwarf2out_alloc_current_fde ();
2542
2543 /* Do the work. */
2544 create_cfi_notes ();
2545 add_cfis_to_fde ();
2546
2547 /* Reset all function-specific information, particularly for GC. */
2548 XDELETEVEC (barrier_args_size);
2549 barrier_args_size = NULL;
2550 regs_saved_in_regs = NULL;
2551 queued_reg_saves = NULL;
2552
2553 return 0;
2554 }
2555 \f
2556
2557 /* Save the result of dwarf2out_do_frame across PCH.
2558 This variable is tri-state, with 0 unset, >0 true, <0 false. */
2559 static GTY(()) signed char saved_do_cfi_asm = 0;
2560
2561 /* Decide whether we want to emit frame unwind information for the current
2562 translation unit. */
2563
2564 bool
2565 dwarf2out_do_frame (void)
2566 {
2567 /* We want to emit correct CFA location expressions or lists, so we
2568 have to return true if we're going to output debug info, even if
2569 we're not going to output frame or unwind info. */
2570 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
2571 return true;
2572
2573 if (saved_do_cfi_asm > 0)
2574 return true;
2575
2576 if (targetm.debug_unwind_info () == UI_DWARF2)
2577 return true;
2578
2579 if ((flag_unwind_tables || flag_exceptions)
2580 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2581 return true;
2582
2583 return false;
2584 }
2585
2586 /* Decide whether to emit frame unwind via assembler directives. */
2587
2588 bool
2589 dwarf2out_do_cfi_asm (void)
2590 {
2591 int enc;
2592
2593 #ifdef MIPS_DEBUGGING_INFO
2594 return false;
2595 #endif
2596
2597 if (saved_do_cfi_asm != 0)
2598 return saved_do_cfi_asm > 0;
2599
2600 /* Assume failure for a moment. */
2601 saved_do_cfi_asm = -1;
2602
2603 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
2604 return false;
2605 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
2606 return false;
2607
2608 /* Make sure the personality encoding is one the assembler can support.
2609 In particular, aligned addresses can't be handled. */
2610 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
2611 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2612 return false;
2613 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
2614 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2615 return false;
2616
2617 /* If we can't get the assembler to emit only .debug_frame, and we don't need
2618 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
2619 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
2620 && !flag_unwind_tables && !flag_exceptions
2621 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
2622 return false;
2623
2624 /* Success! */
2625 saved_do_cfi_asm = 1;
2626 return true;
2627 }
2628
2629 static bool
2630 gate_dwarf2_frame (void)
2631 {
2632 #ifndef HAVE_prologue
2633 /* Targets which still implement the prologue in assembler text
2634 cannot use the generic dwarf2 unwinding. */
2635 return false;
2636 #endif
2637
2638 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
2639 from the optimized shrink-wrapping annotations that we will compute.
2640 For now, only produce the CFI notes for dwarf2. */
2641 return dwarf2out_do_frame ();
2642 }
2643
2644 struct rtl_opt_pass pass_dwarf2_frame =
2645 {
2646 {
2647 RTL_PASS,
2648 "dwarf2", /* name */
2649 gate_dwarf2_frame, /* gate */
2650 execute_dwarf2_frame, /* execute */
2651 NULL, /* sub */
2652 NULL, /* next */
2653 0, /* static_pass_number */
2654 TV_FINAL, /* tv_id */
2655 0, /* properties_required */
2656 0, /* properties_provided */
2657 0, /* properties_destroyed */
2658 0, /* todo_flags_start */
2659 0 /* todo_flags_finish */
2660 }
2661 };
2662
2663 #include "gt-dwarf2cfi.h"